Compare commits

..

100 Commits

Author SHA1 Message Date
Alex Crawford
ae3676096c coreos-cloudinit: bump to v1.2.1 2015-01-21 14:29:25 -08:00
Alex Crawford
a548b557ed doc: add coreos-ssh-import-github-users 2015-01-21 14:28:43 -08:00
Alex Crawford
a9c132a706 coreos-cloudinit: bump to v1.2.0+git 2015-01-20 14:42:48 -08:00
Alex Crawford
c3c4b86a3b coreos-cloudinit: bump to v1.2.0 2015-01-20 14:42:18 -08:00
Alex Crawford
44142ff8af Merge pull request #301 from crawford/github
config: add support for multiple github users
2015-01-20 14:26:38 -08:00
Alex Crawford
e9529ede44 config: add support for multiple github users 2015-01-20 13:45:52 -08:00
Alex Crawford
4b5b801171 Merge pull request #295 from crawford/rules
config/validate: add some sanity checks
2015-01-15 16:25:37 -08:00
Alex Crawford
551cbb1e5d config/validate: add rule for file encoding 2015-01-15 15:01:44 -08:00
Alex Crawford
3c93938f8a decode: refactor file decoding into config package 2015-01-15 15:01:44 -08:00
Alex Crawford
f61c08c246 config/validate: add rule for coreos.write_files 2015-01-15 15:01:44 -08:00
Alex Crawford
571903cec6 config/validate: add rule for etcd discovery token 2015-01-15 15:01:44 -08:00
Alex Crawford
bdbd1930ed config/validate: add rule for file paths 2015-01-15 15:01:44 -08:00
Alex Crawford
cc75a943ba Merge pull request #300 from crawford/float
validate: allow promotion of int to float64
2015-01-15 13:17:44 -08:00
Alex Crawford
fc77ba6355 validate: allow promotion of int to float64 2015-01-14 17:54:01 -08:00
Eugene Yakubovich
7cfa0df7c4 Merge pull request #299 from eyakubovich/master
config: document and update flannel config values
2015-01-13 15:55:42 -08:00
Eugene Yakubovich
58f0dadaf9 config: document and update flannel config values
Fixes #297
2015-01-13 15:51:47 -08:00
Michael Marineau
1ab530f157 Merge pull request #293 from realestate-com-au/master
select first available hostname returned by EC2 metadata.
2015-01-05 12:47:27 -08:00
Kevin Yung
13e4b77130 ec2: allow spaces seperated hostname in metadata
AWS hostname metadata will return space seperated hostname and domain
names when DHCPOptionSet is using multiple domain names. This patch will
cater for this scenario.
2015-01-05 16:01:57 +11:00
Alex Crawford
54c62cbb70 coreos-cloudinit: bump to v1.1.0+git 2014-12-30 16:52:10 +01:00
Alex Crawford
c8e864fef5 coreos-cloudinit: bump to v1.1.0 2014-12-30 16:51:24 +01:00
Alex Crawford
60a3377e7c Merge pull request #290 from crawford/yaml
Improved YAML parsing
2014-12-30 16:24:12 +01:00
Alex Crawford
5527f09778 config: fix parsing of file permissions
These reintroduces the braindead '744' syntax for file permissions. Even
though this number isn't octal, it is assumed by convention to be. In
order to pull this off, coerceNodes() was introduced to try to
counteract the type inferrencing that occurs during the yaml
unmarshalling. The config is unmarshalled twice: once into an empty
interface and once into the CloudConfig structure. The two resulting
node structures are combined together. The nodes from the CloudConfig
process replace those from the interface{} when the types of the two
nodes are compatible. For example, with the input `0744`, yaml
interprets that as the integer 484 giving us the nodes '0744'(string)
and 484(int). Because the types string and int are compatible, we opt to
take the string node instead of the integer.
2014-12-30 16:20:21 +01:00
Alex Crawford
54a64454b9 validate: fix printing for non-string values 2014-12-30 16:20:21 +01:00
Alex Crawford
0e70d4f01f config: add validity check for file permissions 2014-12-30 16:20:21 +01:00
Alex Crawford
af8e590575 config: change valid tag to use regexp
A regular expression is much more useful than a list of strings.
2014-12-30 16:20:21 +01:00
Alex Crawford
40d943fb7a reboot-strategy: remove the 'false' value
Since we no longer do a two-stage unmarshal, the 'false' value will no
longer be necessary.
2014-12-30 16:20:21 +01:00
Alex Crawford
248536a5cd config: use a YAML transform to normalize keys
This removes the problematic two-pass unmarshalling.
2014-12-30 16:20:21 +01:00
Alex Crawford
4ed1d03c97 godeps: bump github.com/coreos/yaml 2014-12-30 16:20:20 +01:00
Alex Crawford
057ab37364 config: seperate the CoreOS type from CloudConfig
Renamed Coreos to CoreOS while I was at it.
2014-12-30 16:20:20 +01:00
Alex Crawford
182241c8d3 config: clean up and remove some tests
Small modification to make these align with our test-table-style tests.
Also removed TestCloudConfigInvalidKeys since it hasn't been a useful
test since d3294bcb86.
2014-12-30 16:19:00 +01:00
Michael Marineau
edced59fa6 Merge pull request #281 from thommay/flannel_env_file
Create an environment file for flannel
2014-12-29 15:07:08 -08:00
Thom May
9be836df31 Create an environment file for flannel
Rather than using a systemd overlay, allow docker to load the
environment file. This is due to coreos/coreos-overlay#1002
2014-12-29 10:27:22 +00:00
Jonathan Boulle
4e54447b8e Merge pull request #286 from jonboulle/master
Godeps: switch to coreos/yaml
2014-12-20 15:43:55 -08:00
Jonathan Boulle
999c38b09b Godeps: switch to coreos/yaml 2014-12-20 15:31:02 -08:00
Alex Crawford
06d13de5c3 coreos-cloudinit: bump to v1.0.2+git 2014-12-12 17:38:28 -08:00
Alex Crawford
5b0903d162 coreos-cloudinit: bump to v1.0.2 2014-12-12 17:37:39 -08:00
Alex Crawford
10669be7c0 Merge pull request #284 from crawford/travis
test: disable Travis sudo capability
2014-12-12 17:28:47 -08:00
Alex Crawford
2edae741e1 test: disable Travis sudo capability 2014-12-12 16:46:18 -08:00
Alex Crawford
ea90e553d1 Merge pull request #282 from crawford/network
network: write network units with user units
2014-12-12 15:12:50 -08:00
Alex Crawford
b0cfd86902 network: write network units with user units
This allows us to test the network unit generation as well as removing
some special-cased code.
2014-12-12 15:08:03 -08:00
Alex Crawford
565a9540c9 Merge pull request #283 from crawford/validate
validate: empty user_data is also valid
2014-12-12 15:05:51 -08:00
Alex Crawford
fd10e27b99 validate: empty user_data is also valid 2014-12-12 14:49:42 -08:00
Michael Marineau
39763d772c Merge pull request #280 from marineam/go1.4
travis: Add Go 1.4 as a test target
2014-12-11 16:34:00 -08:00
Michael Marineau
ee69b77bfb travis: Add Go 1.4 as a test target 2014-12-11 15:29:36 -08:00
Jonathan Boulle
353444e56d Merge pull request #279 from cnelson/write_files_encoding_support
Add support for the encoding key to write_files
2014-12-09 17:37:34 -08:00
cnelson
112ba1e31f Added support for the encoding key in write_files
Supported encodings are base64, gzip, and base64 encoded gzip
2014-12-09 17:35:33 -08:00
cnelson
9c3cd9e69c bumped version of yaml.v1 2014-12-09 07:49:59 -08:00
Alex Crawford
685d8317bc Merge pull request #275 from mwhooker/master
Enable configuration of locksmithd
2014-12-05 14:09:40 -08:00
Matthew Hooker
f42d102b26 Also fix wording of Flannel section 2014-12-04 23:55:26 -08:00
Matthew Hooker
c944e9ef94 Enable configuration of locksmithd 2014-12-04 23:53:31 -08:00
Alex Crawford
f10d6e8bef coreos-cloudinit: bump to v1.0.1+git 2014-12-04 16:35:20 -08:00
Alex Crawford
f3f3af79fd coreos-cloudinit: bump to v1.0.1 2014-12-04 16:34:57 -08:00
Alex Crawford
0e63aa0f6b Merge pull request #276 from crawford/networkd
initialize: restart networkd before other units
2014-12-04 16:33:02 -08:00
Alex Crawford
b254e17e89 Merge pull request #263 from robszumski/docs-validator
docs: link to validator
2014-12-04 16:28:21 -08:00
Alex Crawford
5c059b66f0 initialize: restart networkd before other units 2014-12-04 15:25:44 -08:00
Alex Crawford
c628bef666 Merge pull request #273 from crawford/networkd
initialize: only restart networkd once per config
2014-12-02 12:54:27 -08:00
Alex Crawford
2270db3f7a initialize: only restart networkd once per config
Regression introduced by 9fcf338bf3.

Networkd was erroneously being restarted once per network unit. It
should only restart once for the entire config.
2014-12-02 12:46:35 -08:00
Alex Crawford
d0d467813d Merge pull request #251 from Vladimiroff/master
metadata: Populate CloudSigma's IPs properly
2014-12-01 14:52:11 -08:00
Alex Crawford
123f111efe coreos-cloudinit: bump to 1.0.0+git 2014-11-26 14:19:29 -08:00
Alex Crawford
521ecfdab5 coreos-cloudinit: bump to 1.0.0 2014-11-26 14:19:13 -08:00
Alex Crawford
6d0fdf1a47 Merge pull request #268 from crawford/dropins
drop-in: add support for drop-ins
2014-11-26 14:14:49 -08:00
Alex Crawford
ffc54b028c drop-in: add support for drop-ins
This allows a list of drop-ins for a unit to be declared inline within a
cloud-config. For example:

  #cloud-config
  coreos:
    units:
      - name: docker.service
        drop-ins:
          - name: 50-insecure-registry.conf
            content: |
              [Service]
              Environment=DOCKER_OPTS='--insecure-registry="10.0.1.0/24"'
2014-11-26 14:09:35 -08:00
Alex Crawford
420f7cf202 system: clean up TestPlaceUnit() 2014-11-26 10:32:43 -08:00
Alex Crawford
624df676d0 config/unit: move Type() and Group() out of config 2014-11-26 10:32:43 -08:00
Alex Crawford
75ed8dacf9 initialize: clean up TestProcessUnits() 2014-11-26 10:32:43 -08:00
Alex Crawford
dcaabe4d4a system: clean up UnitManager interface 2014-11-26 10:32:43 -08:00
Alex Crawford
92c57423ba Merge pull request #269 from crawford/valid
validate: promote invalid values to an error
2014-11-26 10:32:27 -08:00
Alex Crawford
7447e133c9 validate: promote invalid values to an error 2014-11-26 10:29:09 -08:00
Eugene Yakubovich
4e466c12da Merge pull request #267 from thommay/flannel_unit
the flannel service is called flanneld
2014-11-25 12:30:58 -08:00
Thom May
333468dba3 the flannel service is called flanneld 2014-11-25 14:00:53 +00:00
Alex Crawford
55c3a793ad coreos-cloudinit: bump to 0.11.4+git 2014-11-21 20:11:54 -08:00
Alex Crawford
eca51031c8 coreos-cloudinit: bump to 0.11.4 2014-11-21 20:11:37 -08:00
Alex Crawford
19522bcb82 Merge pull request #266 from crawford/config
config: update configs to match etcd, fleet, and flannel
2014-11-21 20:10:34 -08:00
Alex Crawford
62248ea33d config/fleet: fix configs
Added EtcdKeyPrefix and fixed the types of EngineReconcileInterval and EtcdRequestTimeout.
2014-11-21 16:57:00 -08:00
Alex Crawford
d2a19cc86d config/flannel: correct - vs _ 2014-11-21 16:57:00 -08:00
Alex Crawford
08131ffab1 config/etcd: fix configs
This new table is pulled from the etcd codebase rather than the docs...

Added:
 GraphiteHost
 PeerElectionTimeout
 PeerHeartbeatInterval
 PeerKeyFile
 RetryInterval
 SnapshotCount
 StrTrace
 VeryVeryVerbose

Fixed types:
 ClusterActiveSize
 ClusterRemoveDelay
 ClusterSyncInterval
 HTTPReadTimeout
 HTTPWriteTimeout
 MaxResultBuffer
 MaxRetryAttempts
 Snapshot
 Verbose
 VeryVerbose

Renamed:
 Cors

Removed:
 MaxClusterSize
 CPUProfileFile
2014-11-21 16:57:00 -08:00
Alex Crawford
4a0019c669 config: add support for float64 2014-11-21 16:13:49 -08:00
Alex Crawford
3275ead1ec coreos-cloudinit: bump to 0.11.3+git 2014-11-21 12:25:26 -08:00
Alex Crawford
32b6a55724 coreos-cloudinit: bump to 0.11.3 2014-11-21 12:25:04 -08:00
Alex Crawford
6c43644369 Merge pull request #265 from crawford/update
config/update: add "off" as a valid strategy
2014-11-21 12:22:45 -08:00
Alex Crawford
e6593d49e6 config/update: add "off" as a valid strategy
It was assumed that the user would specify the reboot strategy as an
unquoted value. In the case that they turn off updates, `off` is
interpreted as a boolean and the normalization pass converts that to
`false`. In the event that the user uses `"off"`, it's interpreted as a
string and not modified.
2014-11-21 10:41:03 -08:00
Alex Crawford
ab752b239f coreos-cloudinit: bump to 0.11.2+git 2014-11-20 11:29:25 -08:00
Alex Crawford
0742e4d357 coreos-cloudinit: bump to 0.11.2 2014-11-20 11:29:12 -08:00
Alex Crawford
78f586ec9e Merge pull request #262 from crawford/permissions
config: fix parsing of file permissions
2014-11-20 11:28:11 -08:00
Alex Crawford
6f91b76d79 docs: correct type of permissions 2014-11-20 11:14:44 -08:00
Alex Crawford
5c80ccacc4 config: fix parsing of file permissions
The file permissions can be specified (unfortunately) as a string or an
octal integer. During the normalization step, every field is
unmarshalled into an interface{}. String types are kept in tact but
integers are converted to decimal integers. If the raw config
represented the permissions as an octal, it would be converted to
decimal _before_ it was saved to RawFilePermissions. Permissions() would
then try to convert it again, assuming it was an octal. The new behavior
doesn't assume the radix of the number, allowing decimal and octal
input.
2014-11-20 11:14:44 -08:00
Rob Szumski
44fdf95d99 docs: mention validate flag 2014-11-20 11:12:31 -08:00
Rob Szumski
0a62614eec docs: link to validator 2014-11-20 10:58:57 -08:00
Alex Crawford
97758b343b coreos-cloudinit: bump to 0.11.1+git 2014-11-18 12:14:34 -08:00
Alex Crawford
fb6f52b360 coreos-cloudinit: bump to 0.11.1 2014-11-18 12:14:29 -08:00
Alex Crawford
786cd2a539 Merge pull request #259 from crawford/hyphen
config/validate: disable - vs _ message for now
2014-11-18 12:12:26 -08:00
Alex Crawford
45793f1254 config/validate: disable - vs _ message for now 2014-11-18 12:11:50 -08:00
Alex Crawford
b621756d92 Merge pull request #258 from crawford/header
config/validate: fix line number for header check
2014-11-18 12:11:35 -08:00
Alex Crawford
a5b5c700a6 config/validate: fix line number for header check 2014-11-18 12:02:23 -08:00
Kiril Vladimirov
ea95920f31 fix(datasource/CloudSigma): Make sure DHCP has run 2014-11-17 15:35:10 +02:00
Alex Crawford
d7602f3c08 Merge pull request #244 from eyakubovich/master
flannel: added flannel support and helper to make dropins
2014-11-14 10:46:19 -08:00
Eugene Yakubovich
a20addd05e flannel: added flannel support and helper to make dropins
fleet, flannel, and etcd all generate dropins from config.
To reduce code duplication, factor out a helper to do that.
2014-11-14 10:45:23 -08:00
Alex Crawford
d9d89a6fa0 coreos-cloudinit: bump to 0.11.0+git 2014-11-14 10:42:00 -08:00
Kiril Vladimirov
b6062f0644 fix(datasource/CloudSigma): Populate local IPv4 address properly 2014-10-23 15:03:23 +03:00
Kiril Vladimirov
c5fada6e69 fix(datasource/CloudSigma): Populate public IPv4 address properly 2014-10-23 13:21:49 +03:00
68 changed files with 2580 additions and 911 deletions

View File

@@ -1,11 +1,17 @@
language: go
go:
- 1.3
- 1.2
sudo: false
matrix:
include:
- go: 1.4
env: TOOLS_CMD=golang.org/x/tools/cmd
- go: 1.3
env: TOOLS_CMD=code.google.com/p/go.tools/cmd
- go: 1.2
env: TOOLS_CMD=code.google.com/p/go.tools/cmd
install:
- go get code.google.com/p/go.tools/cmd/cover
- go get code.google.com/p/go.tools/cmd/vet
- go get ${TOOLS_CMD}/cover
- go get ${TOOLS_CMD}/vet
script:
- ./test

View File

@@ -1,6 +1,8 @@
# Using Cloud-Config
CoreOS allows you to declaratively customize various OS-level items, such as network configuration, user accounts, and systemd units. This document describes the full list of items we can configure. The `coreos-cloudinit` program uses these files as it configures the OS after startup or during runtime. Your cloud-config is processed during each boot.
CoreOS allows you to declaratively customize various OS-level items, such as network configuration, user accounts, and systemd units. This document describes the full list of items we can configure. The `coreos-cloudinit` program uses these files as it configures the OS after startup or during runtime.
Your cloud-config is processed during each boot. Invalid cloud-config won't be processed but will be logged in the journal. You can validate your cloud-config with the [CoreOS validator]({{site.url}}/validate) or by running `coreos-cloudinit -validate`.
## Configuration File
@@ -16,7 +18,7 @@ We've designed our implementation to allow the same cloud-config file to work ac
The cloud-config file uses the [YAML][yaml] file format, which uses whitespace and new-lines to delimit lists, associative arrays, and values.
A cloud-config file should contain `#cloud-config`, followed by an associative array which has zero or more of the following keys:
A cloud-config file must contain `#cloud-config`, followed by an associative array which has zero or more of the following keys:
- `coreos`
- `ssh_authorized_keys`
@@ -46,13 +48,13 @@ If the platform environment supports the templating feature of coreos-cloudinit
#cloud-config
coreos:
etcd:
name: node001
# generate a new token for each unique cluster from https://discovery.etcd.io/new
discovery: https://discovery.etcd.io/<token>
# multi-region and multi-cloud deployments need to use $public_ipv4
addr: $public_ipv4:4001
peer-addr: $private_ipv4:7001
etcd:
name: node001
# generate a new token for each unique cluster from https://discovery.etcd.io/new
discovery: https://discovery.etcd.io/<token>
# multi-region and multi-cloud deployments need to use $public_ipv4
addr: $public_ipv4:4001
peer-addr: $private_ipv4:7001
```
...will generate a systemd unit drop-in like this:
@@ -66,7 +68,6 @@ Environment="ETCD_PEER_ADDR=192.0.2.13:7001"
```
For more information about the available configuration parameters, see the [etcd documentation][etcd-config].
Note that hyphens in the coreos.etcd.* keys are mapped to underscores.
_Note: The `$private_ipv4` and `$public_ipv4` substitution variables referenced in other documents are only supported on Amazon EC2, Google Compute Engine, OpenStack, Rackspace, DigitalOcean, and Vagrant._
@@ -80,9 +81,9 @@ The `coreos.fleet.*` parameters work very similarly to `coreos.etcd.*`, and allo
#cloud-config
coreos:
fleet:
public-ip: $public_ipv4
metadata: region=us-west
fleet:
public-ip: $public_ipv4
metadata: region=us-west
```
...will generate a systemd unit drop-in like this:
@@ -97,6 +98,63 @@ For more information on fleet configuration, see the [fleet documentation][fleet
[fleet-config]: https://github.com/coreos/fleet/blob/master/Documentation/deployment-and-configuration.md#configuration
#### flannel
The `coreos.flannel.*` parameters also work very similarly to `coreos.etcd.*`
and `coreos.fleet.*`. They can be used to set environment variables for
flanneld. For example, the following cloud-config...
```yaml
#cloud-config
coreos:
flannel:
etcd_prefix: /coreos.com/network2
```
...will generate a systemd unit drop-in like so:
```
[Service]
Environment="FLANNELD_ETCD_PREFIX=/coreos.com/network2"
```
List of flannel configuration parameters:
- **etcd_endpoints**: Comma separated list of etcd endpoints
- **etcd_cafile**: Path to CA file used for TLS communication with etcd
- **etcd_certfile**: Path to certificate file used for TLS communication with etcd
- **etcd_keyfile**: Path to private key file used for TLS communication with etcd
- **etcd_prefix**: Etcd prefix path to be used for flannel keys
- **ip_masq**: Install IP masquerade rules for traffic outside of flannel subnet
- **subnet_file**: Path to flannel subnet file to write out
- **interface**: Interface (name or IP) that should be used for inter-host communication
[flannel-readme]: https://github.com/coreos/flannel/blob/master/README.md
#### locksmith
The `coreos.locksmith.*` parameters can be used to set environment variables
for locksmith. For example, the following cloud-config...
```yaml
#cloud-config
coreos:
locksmith:
endpoint: example.com:4001
```
...will generate a systemd unit drop-in like so:
```
[Service]
Environment="LOCKSMITHD_ENDPOINT=example.com:4001"
```
For the complete list of locksmith configuraion parameters, see the [locksmith documentation][locksmith-readme].
[locksmith-readme]: https://github.com/coreos/locksmith/blob/master/README.md
#### update
The `coreos.update.*` parameters manipulate settings related to how CoreOS instances are updated.
@@ -135,6 +193,10 @@ Each item is an object with the following fields:
- **content**: Plaintext string representing entire unit file. If no value is provided, the unit is assumed to exist already.
- **command**: Command to execute on unit: start, stop, reload, restart, try-restart, reload-or-restart, reload-or-try-restart. The default behavior is to not execute any commands.
- **mask**: Whether to mask the unit file by symlinking it to `/dev/null` (analogous to `systemctl mask <name>`). Note that unlike `systemctl mask`, **this will destructively remove any existing unit file** located at `/etc/systemd/system/<unit>`, to ensure that the mask succeeds. The default value is false.
- **drop-ins**: A list of unit drop-ins with the following fields:
- **name**: String representing unit's name. Required.
- **content**: Plaintext string representing entire file. Required.
**NOTE:** The command field is ignored for all network, netdev, and link units. The systemd-networkd.service unit will be restarted in their place.
@@ -146,19 +208,34 @@ Write a unit to disk, automatically starting it.
#cloud-config
coreos:
units:
- name: docker-redis.service
command: start
content: |
[Unit]
Description=Redis container
Author=Me
After=docker.service
units:
- name: docker-redis.service
command: start
content: |
[Unit]
Description=Redis container
Author=Me
After=docker.service
[Service]
Restart=always
ExecStart=/usr/bin/docker start -a redis_server
ExecStop=/usr/bin/docker stop -t 2 redis_server
[Service]
Restart=always
ExecStart=/usr/bin/docker start -a redis_server
ExecStop=/usr/bin/docker stop -t 2 redis_server
```
Add the DOCKER_OPTS environment variable to docker.service.
```yaml
#cloud-config
coreos:
units:
- name: docker.service
drop-ins:
- name: 50-insecure-registry.conf
content: |
[Service]
Environment=DOCKER_OPTS='--insecure-registry="10.0.1.0/24"'
```
Start the built-in `etcd` and `fleet` services:
@@ -167,11 +244,11 @@ Start the built-in `etcd` and `fleet` services:
#cloud-config
coreos:
units:
- name: etcd.service
command: start
- name: fleet.service
command: start
units:
- name: etcd.service
command: start
- name: fleet.service
command: start
```
### ssh_authorized_keys
@@ -214,6 +291,7 @@ All but the `passwd` and `ssh-authorized-keys` fields will be ignored if the use
- **no-user-group**: Boolean. Skip default group creation.
- **ssh-authorized-keys**: List of public SSH keys to authorize for this user
- **coreos-ssh-import-github**: Authorize SSH keys from Github user
- **coreos-ssh-import-github-users**: Authorize SSH keys from a list of Github users
- **coreos-ssh-import-url**: Authorize SSH keys imported from a url endpoint.
- **system**: Create the user as a system user. No home directory will be created.
- **no-log-init**: Boolean. Skip initialization of lastlog and faillog databases.
@@ -303,11 +381,13 @@ Each item in the list may have the following keys:
- **path**: Absolute location on disk where contents should be written
- **content**: Data to write at the provided `path`
- **permissions**: String representing file permissions in octal notation (i.e. '0644')
- **permissions**: Integer representing file permissions, typically in octal notation (i.e. 0644)
- **owner**: User and group that should own the file written to disk. This is equivalent to the `<user>:<group>` argument to `chown <user>:<group> <path>`.
- **encoding**: Optional. The encoding of the data in content. If not specified this defaults to the yaml document encoding (usually utf-8). Supported encoding types are:
- **b64, base64**: Base64 encoded content
- **gz, gzip**: gzip encoded content, for use with the !!binary tag
- **gz+b64, gz+base64, gzip+b64, gzip+base64**: Base64 encoded gzip content
Explicitly not implemented is the **encoding** attribute.
The **content** field must represent exactly what should be written to disk.
```yaml
#cloud-config
@@ -322,6 +402,24 @@ write_files:
owner: root
content: |
Good news, everyone!
- path: /tmp/like_this
permissions: 0644
owner: root
encoding: gzip
content: !!binary |
H4sIAKgdh1QAAwtITM5WyK1USMqvUCjPLMlQSMssS1VIya9KzVPIySwszS9SyCpNLwYARQFQ5CcAAAA=
- path: /tmp/or_like_this
permissions: 0644
owner: root
encoding: gzip+base64
content: |
H4sIAKgdh1QAAwtITM5WyK1USMqvUCjPLMlQSMssS1VIya9KzVPIySwszS9SyCpNLwYARQFQ5CcAAAA=
- path: /tmp/todolist
permissions: 0644
owner: root
encoding: base64
content: |
UGFjayBteSBib3ggd2l0aCBmaXZlIGRvemVuIGxpcXVvciBqdWdz
```
### manage_etc_hosts

10
Godeps/Godeps.json generated
View File

@@ -1,6 +1,6 @@
{
"ImportPath": "github.com/coreos/coreos-cloudinit",
"GoVersion": "go1.3.1",
"GoVersion": "go1.3.3",
"Packages": [
"./..."
],
@@ -13,6 +13,10 @@
"ImportPath": "github.com/coreos/go-systemd/dbus",
"Rev": "4fbc5060a317b142e6c7bfbedb65596d5f0ab99b"
},
{
"ImportPath": "github.com/coreos/yaml",
"Rev": "6b16a5714269b2f70720a45406b1babd947a17ef"
},
{
"ImportPath": "github.com/dotcloud/docker/pkg/netlink",
"Comment": "v0.11.1-359-g55d41c3e21e1",
@@ -25,10 +29,6 @@
{
"ImportPath": "github.com/tarm/goserial",
"Rev": "cdabc8d44e8e84f58f18074ae44337e1f2f375b9"
},
{
"ImportPath": "gopkg.in/yaml.v1",
"Rev": "feb4ca79644e8e7e39c06095246ee54b1282c118"
}
]
}

View File

@@ -1,3 +1,6 @@
Copyright (c) 2011-2014 - Canonical Inc.
This software is licensed under the LGPLv3, included below.
As a special exception to the GNU Lesser General Public License version 3

View File

@@ -1,3 +1,6 @@
Note: This is a fork of https://github.com/go-yaml/yaml. The following README
doesn't necessarily apply to this fork.
# YAML support for the Go language
Introduction
@@ -12,10 +15,10 @@ C library to parse and generate YAML data quickly and reliably.
Compatibility
-------------
The yaml package is almost compatible with YAML 1.1, including support for
anchors, tags, etc. There are still a few missing bits, such as document
merging, base-60 floats (huh?), and multi-document unmarshalling. These
features are not hard to add, and will be introduced as necessary.
The yaml package supports most of YAML 1.1 and 1.2, including support for
anchors, tags, map merging, etc. Multi-document unmarshalling is not yet
implemented, and base-60 floats from YAML 1.1 are purposefully not
supported since they're a poor design and are gone in YAML 1.2.
Installation and usage
----------------------

View File

@@ -1,6 +1,8 @@
package yaml
import (
"encoding/base64"
"fmt"
"reflect"
"strconv"
"time"
@@ -28,13 +30,15 @@ type node struct {
// Parser, produces a node tree out of a libyaml event stream.
type parser struct {
parser yaml_parser_t
event yaml_event_t
doc *node
parser yaml_parser_t
event yaml_event_t
doc *node
transform transformString
}
func newParser(b []byte) *parser {
p := parser{}
func newParser(b []byte, t transformString) *parser {
p := parser{transform: t}
if !yaml_parser_initialize(&p.parser) {
panic("Failed to initialize YAML emitter")
}
@@ -63,7 +67,7 @@ func (p *parser) destroy() {
func (p *parser) skip() {
if p.event.typ != yaml_NO_EVENT {
if p.event.typ == yaml_STREAM_END_EVENT {
panic("Attempted to go past the end of stream. Corrupted value?")
fail("Attempted to go past the end of stream. Corrupted value?")
}
yaml_event_delete(&p.event)
}
@@ -89,7 +93,7 @@ func (p *parser) fail() {
} else {
msg = "Unknown problem parsing YAML content"
}
panic(where + msg)
fail(where + msg)
}
func (p *parser) anchor(n *node, anchor []byte) {
@@ -114,10 +118,9 @@ func (p *parser) parse() *node {
// Happens when attempting to decode an empty buffer.
return nil
default:
panic("Attempted to parse unknown event: " +
strconv.Itoa(int(p.event.typ)))
panic("Attempted to parse unknown event: " + strconv.Itoa(int(p.event.typ)))
}
panic("Unreachable")
panic("unreachable")
}
func (p *parser) node(kind int) *node {
@@ -135,8 +138,7 @@ func (p *parser) document() *node {
p.skip()
n.children = append(n.children, p.parse())
if p.event.typ != yaml_DOCUMENT_END_EVENT {
panic("Expected end of document event but got " +
strconv.Itoa(int(p.event.typ)))
panic("Expected end of document event but got " + strconv.Itoa(int(p.event.typ)))
}
p.skip()
return n
@@ -175,7 +177,10 @@ func (p *parser) mapping() *node {
p.anchor(n, p.event.anchor)
p.skip()
for p.event.typ != yaml_MAPPING_END_EVENT {
n.children = append(n.children, p.parse(), p.parse())
key := p.parse()
key.value = p.transform(key.value)
value := p.parse()
n.children = append(n.children, key, value)
}
p.skip()
return n
@@ -218,7 +223,7 @@ func (d *decoder) setter(tag string, out *reflect.Value, good *bool) (set func()
var arg interface{}
*out = reflect.ValueOf(&arg).Elem()
return func() {
*good = setter.SetYAML(tag, arg)
*good = setter.SetYAML(shortTag(tag), arg)
}
}
}
@@ -226,7 +231,7 @@ func (d *decoder) setter(tag string, out *reflect.Value, good *bool) (set func()
for again {
again = false
setter, _ := (*out).Interface().(Setter)
if tag != "!!null" || setter != nil {
if tag != yaml_NULL_TAG || setter != nil {
if pv := (*out); pv.Kind() == reflect.Ptr {
if pv.IsNil() {
*out = reflect.New(pv.Type().Elem()).Elem()
@@ -242,7 +247,7 @@ func (d *decoder) setter(tag string, out *reflect.Value, good *bool) (set func()
var arg interface{}
*out = reflect.ValueOf(&arg).Elem()
return func() {
*good = setter.SetYAML(tag, arg)
*good = setter.SetYAML(shortTag(tag), arg)
}
}
}
@@ -279,10 +284,10 @@ func (d *decoder) document(n *node, out reflect.Value) (good bool) {
func (d *decoder) alias(n *node, out reflect.Value) (good bool) {
an, ok := d.doc.anchors[n.value]
if !ok {
panic("Unknown anchor '" + n.value + "' referenced")
fail("Unknown anchor '" + n.value + "' referenced")
}
if d.aliases[n.value] {
panic("Anchor '" + n.value + "' value contains itself")
fail("Anchor '" + n.value + "' value contains itself")
}
d.aliases[n.value] = true
good = d.unmarshal(an, out)
@@ -290,23 +295,50 @@ func (d *decoder) alias(n *node, out reflect.Value) (good bool) {
return good
}
var zeroValue reflect.Value
func resetMap(out reflect.Value) {
for _, k := range out.MapKeys() {
out.SetMapIndex(k, zeroValue)
}
}
var durationType = reflect.TypeOf(time.Duration(0))
func (d *decoder) scalar(n *node, out reflect.Value) (good bool) {
var tag string
var resolved interface{}
if n.tag == "" && !n.implicit {
tag = "!!str"
tag = yaml_STR_TAG
resolved = n.value
} else {
tag, resolved = resolve(n.tag, n.value)
if tag == yaml_BINARY_TAG {
data, err := base64.StdEncoding.DecodeString(resolved.(string))
if err != nil {
fail("!!binary value contains invalid base64 data")
}
resolved = string(data)
}
}
if set := d.setter(tag, &out, &good); set != nil {
defer set()
}
if resolved == nil {
if out.Kind() == reflect.Map && !out.CanAddr() {
resetMap(out)
} else {
out.Set(reflect.Zero(out.Type()))
}
good = true
return
}
switch out.Kind() {
case reflect.String:
if resolved != nil {
if tag == yaml_BINARY_TAG {
out.SetString(resolved.(string))
good = true
} else if resolved != nil {
out.SetString(n.value)
good = true
}
@@ -380,17 +412,11 @@ func (d *decoder) scalar(n *node, out reflect.Value) (good bool) {
good = true
}
case reflect.Ptr:
switch resolved.(type) {
case nil:
out.Set(reflect.Zero(out.Type()))
if out.Type().Elem() == reflect.TypeOf(resolved) {
elem := reflect.New(out.Type().Elem())
elem.Elem().Set(reflect.ValueOf(resolved))
out.Set(elem)
good = true
default:
if out.Type().Elem() == reflect.TypeOf(resolved) {
elem := reflect.New(out.Type().Elem())
elem.Elem().Set(reflect.ValueOf(resolved))
out.Set(elem)
good = true
}
}
}
return good
@@ -404,7 +430,7 @@ func settableValueOf(i interface{}) reflect.Value {
}
func (d *decoder) sequence(n *node, out reflect.Value) (good bool) {
if set := d.setter("!!seq", &out, &good); set != nil {
if set := d.setter(yaml_SEQ_TAG, &out, &good); set != nil {
defer set()
}
var iface reflect.Value
@@ -433,7 +459,7 @@ func (d *decoder) sequence(n *node, out reflect.Value) (good bool) {
}
func (d *decoder) mapping(n *node, out reflect.Value) (good bool) {
if set := d.setter("!!map", &out, &good); set != nil {
if set := d.setter(yaml_MAP_TAG, &out, &good); set != nil {
defer set()
}
if out.Kind() == reflect.Struct {
@@ -465,6 +491,13 @@ func (d *decoder) mapping(n *node, out reflect.Value) (good bool) {
}
k := reflect.New(kt).Elem()
if d.unmarshal(n.children[i], k) {
kkind := k.Kind()
if kkind == reflect.Interface {
kkind = k.Elem().Kind()
}
if kkind == reflect.Map || kkind == reflect.Slice {
fail(fmt.Sprintf("invalid map key: %#v", k.Interface()))
}
e := reflect.New(et).Elem()
if d.unmarshal(n.children[i+1], e) {
out.SetMapIndex(k, e)
@@ -511,28 +544,28 @@ func (d *decoder) merge(n *node, out reflect.Value) {
case aliasNode:
an, ok := d.doc.anchors[n.value]
if ok && an.kind != mappingNode {
panic(wantMap)
fail(wantMap)
}
d.unmarshal(n, out)
case sequenceNode:
// Step backwards as earlier nodes take precedence.
for i := len(n.children)-1; i >= 0; i-- {
for i := len(n.children) - 1; i >= 0; i-- {
ni := n.children[i]
if ni.kind == aliasNode {
an, ok := d.doc.anchors[ni.value]
if ok && an.kind != mappingNode {
panic(wantMap)
fail(wantMap)
}
} else if ni.kind != mappingNode {
panic(wantMap)
fail(wantMap)
}
d.unmarshal(ni, out)
}
default:
panic(wantMap)
fail(wantMap)
}
}
func isMerge(n *node) bool {
return n.kind == scalarNode && n.value == "<<" && (n.implicit == true || n.tag == "!!merge" || n.tag == "tag:yaml.org,2002:merge")
return n.kind == scalarNode && n.value == "<<" && (n.implicit == true || n.tag == yaml_MERGE_TAG)
}

View File

@@ -1,10 +1,11 @@
package yaml_test
import (
"github.com/coreos/yaml"
. "gopkg.in/check.v1"
"github.com/coreos/coreos-cloudinit/Godeps/_workspace/src/gopkg.in/yaml.v1"
"math"
"reflect"
"strings"
"time"
)
@@ -316,7 +317,10 @@ var unmarshalTests = []struct {
map[string]*string{"foo": new(string)},
}, {
"foo: null",
map[string]string{},
map[string]string{"foo": ""},
}, {
"foo: null",
map[string]interface{}{"foo": nil},
},
// Ignored field
@@ -377,6 +381,24 @@ var unmarshalTests = []struct {
"a: <foo>",
map[string]string{"a": "<foo>"},
},
// Base 60 floats are obsolete and unsupported.
{
"a: 1:1\n",
map[string]string{"a": "1:1"},
},
// Binary data.
{
"a: !!binary gIGC\n",
map[string]string{"a": "\x80\x81\x82"},
}, {
"a: !!binary |\n " + strings.Repeat("kJCQ", 17) + "kJ\n CQ\n",
map[string]string{"a": strings.Repeat("\x90", 54)},
}, {
"a: !!binary |\n " + strings.Repeat("A", 70) + "\n ==\n",
map[string]string{"a": strings.Repeat("\x00", 52)},
},
}
type inlineB struct {
@@ -424,12 +446,15 @@ func (s *S) TestUnmarshalNaN(c *C) {
var unmarshalErrorTests = []struct {
data, error string
}{
{"v: !!float 'error'", "YAML error: Can't decode !!str 'error' as a !!float"},
{"v: !!float 'error'", "YAML error: cannot decode !!str `error` as a !!float"},
{"v: [A,", "YAML error: line 1: did not find expected node content"},
{"v:\n- [A,", "YAML error: line 2: did not find expected node content"},
{"a: *b\n", "YAML error: Unknown anchor 'b' referenced"},
{"a: &a\n b: *a\n", "YAML error: Anchor 'a' value contains itself"},
{"value: -", "YAML error: block sequence entries are not allowed in this context"},
{"a: !!binary ==", "YAML error: !!binary value contains invalid base64 data"},
{"{[.]}", `YAML error: invalid map key: \[\]interface \{\}\{"\."\}`},
{"{{.}}", `YAML error: invalid map key: map\[interface\ \{\}\]interface \{\}\{".":interface \{\}\(nil\)\}`},
}
func (s *S) TestUnmarshalErrors(c *C) {
@@ -532,6 +557,23 @@ func (s *S) TestUnmarshalWithFalseSetterIgnoresValue(c *C) {
c.Assert(m["ghi"].value, Equals, 3)
}
func (s *S) TestUnmarshalWithTransform(c *C) {
data := `{a_b: 1, c-d: 2, e-f_g: 3, h_i-j: 4}`
expect := map[string]int{
"a_b": 1,
"c_d": 2,
"e_f_g": 3,
"h_i_j": 4,
}
m := map[string]int{}
yaml.UnmarshalMappingKeyTransform = func(i string) string {
return strings.Replace(i, "-", "_", -1)
}
err := yaml.Unmarshal([]byte(data), m)
c.Assert(err, IsNil)
c.Assert(m, DeepEquals, expect)
}
// From http://yaml.org/type/merge.html
var mergeTests = `
anchors:
@@ -624,6 +666,30 @@ func (s *S) TestMergeStruct(c *C) {
}
}
var unmarshalNullTests = []func() interface{}{
func() interface{} { var v interface{}; v = "v"; return &v },
func() interface{} { var s = "s"; return &s },
func() interface{} { var s = "s"; sptr := &s; return &sptr },
func() interface{} { var i = 1; return &i },
func() interface{} { var i = 1; iptr := &i; return &iptr },
func() interface{} { m := map[string]int{"s": 1}; return &m },
func() interface{} { m := map[string]int{"s": 1}; return m },
}
func (s *S) TestUnmarshalNull(c *C) {
for _, test := range unmarshalNullTests {
item := test()
zero := reflect.Zero(reflect.TypeOf(item).Elem()).Interface()
err := yaml.Unmarshal([]byte("null"), item)
c.Assert(err, IsNil)
if reflect.TypeOf(item).Kind() == reflect.Map {
c.Assert(reflect.ValueOf(item).Interface(), DeepEquals, reflect.MakeMap(reflect.TypeOf(item)).Interface())
} else {
c.Assert(reflect.ValueOf(item).Elem().Interface(), DeepEquals, zero)
}
}
}
//var data []byte
//func init() {
// var err error

View File

@@ -973,8 +973,8 @@ func yaml_emitter_analyze_tag(emitter *yaml_emitter_t, tag []byte) bool {
if bytes.HasPrefix(tag, tag_directive.prefix) {
emitter.tag_data.handle = tag_directive.handle
emitter.tag_data.suffix = tag[len(tag_directive.prefix):]
return true
}
return true
}
emitter.tag_data.suffix = tag
return true
@@ -1279,6 +1279,9 @@ func yaml_emitter_write_tag_content(emitter *yaml_emitter_t, value []byte, need_
for k := 0; k < w; k++ {
octet := value[i]
i++
if !put(emitter, '%') {
return false
}
c := octet >> 4
if c < 10 {

View File

@@ -2,8 +2,10 @@ package yaml
import (
"reflect"
"regexp"
"sort"
"strconv"
"strings"
"time"
)
@@ -50,14 +52,19 @@ func (e *encoder) must(ok bool) {
if msg == "" {
msg = "Unknown problem generating YAML content"
}
panic(msg)
fail(msg)
}
}
func (e *encoder) marshal(tag string, in reflect.Value) {
if !in.IsValid() {
e.nilv()
return
}
var value interface{}
if getter, ok := in.Interface().(Getter); ok {
tag, value = getter.GetYAML()
tag = longTag(tag)
if value == nil {
e.nilv()
return
@@ -98,7 +105,7 @@ func (e *encoder) marshal(tag string, in reflect.Value) {
case reflect.Bool:
e.boolv(tag, in)
default:
panic("Can't marshal type yet: " + in.Type().String())
panic("Can't marshal type: " + in.Type().String())
}
}
@@ -167,11 +174,46 @@ func (e *encoder) slicev(tag string, in reflect.Value) {
e.emit()
}
// isBase60 returns whether s is in base 60 notation as defined in YAML 1.1.
//
// The base 60 float notation in YAML 1.1 is a terrible idea and is unsupported
// in YAML 1.2 and by this package, but these should be marshalled quoted for
// the time being for compatibility with other parsers.
func isBase60Float(s string) (result bool) {
// Fast path.
if s == "" {
return false
}
c := s[0]
if !(c == '+' || c == '-' || c >= '0' && c <= '9') || strings.IndexByte(s, ':') < 0 {
return false
}
// Do the full match.
return base60float.MatchString(s)
}
// From http://yaml.org/type/float.html, except the regular expression there
// is bogus. In practice parsers do not enforce the "\.[0-9_]*" suffix.
var base60float = regexp.MustCompile(`^[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+(?:\.[0-9_]*)?$`)
func (e *encoder) stringv(tag string, in reflect.Value) {
var style yaml_scalar_style_t
s := in.String()
if rtag, _ := resolve("", s); rtag != "!!str" {
rtag, rs := resolve("", s)
if rtag == yaml_BINARY_TAG {
if tag == "" || tag == yaml_STR_TAG {
tag = rtag
s = rs.(string)
} else if tag == yaml_BINARY_TAG {
fail("explicitly tagged !!binary data must be base64-encoded")
} else {
fail("cannot marshal invalid UTF-8 data as " + shortTag(tag))
}
}
if tag == "" && (rtag != yaml_STR_TAG || isBase60Float(s)) {
style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
} else if strings.Contains(s, "\n") {
style = yaml_LITERAL_SCALAR_STYLE
} else {
style = yaml_PLAIN_SCALAR_STYLE
}
@@ -218,9 +260,6 @@ func (e *encoder) nilv() {
func (e *encoder) emitScalar(value, anchor, tag string, style yaml_scalar_style_t) {
implicit := tag == ""
if !implicit {
style = yaml_PLAIN_SCALAR_STYLE
}
e.must(yaml_scalar_event_initialize(&e.event, []byte(anchor), []byte(tag), []byte(value), implicit, implicit, style))
e.emit()
}

View File

@@ -2,12 +2,13 @@ package yaml_test
import (
"fmt"
"github.com/coreos/coreos-cloudinit/Godeps/_workspace/src/gopkg.in/yaml.v1"
. "gopkg.in/check.v1"
"math"
"strconv"
"strings"
"time"
"github.com/coreos/yaml"
. "gopkg.in/check.v1"
)
var marshalIntTest = 123
@@ -17,6 +18,9 @@ var marshalTests = []struct {
data string
}{
{
nil,
"null\n",
}, {
&struct{}{},
"{}\n",
}, {
@@ -87,7 +91,7 @@ var marshalTests = []struct {
"v:\n- A\n- B\n",
}, {
map[string][]string{"v": []string{"A", "B\nC"}},
"v:\n- A\n- 'B\n\n C'\n",
"v:\n- A\n- |-\n B\n C\n",
}, {
map[string][]interface{}{"v": []interface{}{"A", 1, map[string][]int{"B": []int{2, 3}}}},
"v:\n- A\n- 1\n- B:\n - 2\n - 3\n",
@@ -220,11 +224,39 @@ var marshalTests = []struct {
"a: 3s\n",
},
// Issue #24.
// Issue #24: bug in map merging logic.
{
map[string]string{"a": "<foo>"},
"a: <foo>\n",
},
// Issue #34: marshal unsupported base 60 floats quoted for compatibility
// with old YAML 1.1 parsers.
{
map[string]string{"a": "1:1"},
"a: \"1:1\"\n",
},
// Binary data.
{
map[string]string{"a": "\x00"},
"a: \"\\0\"\n",
}, {
map[string]string{"a": "\x80\x81\x82"},
"a: !!binary gIGC\n",
}, {
map[string]string{"a": strings.Repeat("\x90", 54)},
"a: !!binary |\n " + strings.Repeat("kJCQ", 17) + "kJ\n CQ\n",
}, {
map[string]interface{}{"a": typeWithGetter{"!!str", "\x80\x81\x82"}},
"a: !!binary gIGC\n",
},
// Escaping of tags.
{
map[string]interface{}{"a": typeWithGetter{"foo!bar", 1}},
"a: !<foo%21bar> 1\n",
},
}
func (s *S) TestMarshal(c *C) {
@@ -238,20 +270,29 @@ func (s *S) TestMarshal(c *C) {
var marshalErrorTests = []struct {
value interface{}
error string
}{
{
&struct {
B int
inlineB ",inline"
}{1, inlineB{2, inlineC{3}}},
`Duplicated key 'b' in struct struct \{ B int; .*`,
},
}
panic string
}{{
value: &struct {
B int
inlineB ",inline"
}{1, inlineB{2, inlineC{3}}},
panic: `Duplicated key 'b' in struct struct \{ B int; .*`,
}, {
value: typeWithGetter{"!!binary", "\x80"},
error: "YAML error: explicitly tagged !!binary data must be base64-encoded",
}, {
value: typeWithGetter{"!!float", "\x80"},
error: `YAML error: cannot marshal invalid UTF-8 data as !!float`,
}}
func (s *S) TestMarshalErrors(c *C) {
for _, item := range marshalErrorTests {
_, err := yaml.Marshal(item.value)
c.Assert(err, ErrorMatches, item.error)
if item.panic != "" {
c.Assert(func() { yaml.Marshal(item.value) }, PanicMatches, item.panic)
} else {
_, err := yaml.Marshal(item.value)
c.Assert(err, ErrorMatches, item.error)
}
}
}

190
Godeps/_workspace/src/github.com/coreos/yaml/resolve.go generated vendored Normal file
View File

@@ -0,0 +1,190 @@
package yaml
import (
"encoding/base64"
"fmt"
"math"
"strconv"
"strings"
"unicode/utf8"
)
// TODO: merge, timestamps, base 60 floats, omap.
type resolveMapItem struct {
value interface{}
tag string
}
var resolveTable = make([]byte, 256)
var resolveMap = make(map[string]resolveMapItem)
func init() {
t := resolveTable
t[int('+')] = 'S' // Sign
t[int('-')] = 'S'
for _, c := range "0123456789" {
t[int(c)] = 'D' // Digit
}
for _, c := range "yYnNtTfFoO~" {
t[int(c)] = 'M' // In map
}
t[int('.')] = '.' // Float (potentially in map)
var resolveMapList = []struct {
v interface{}
tag string
l []string
}{
{true, yaml_BOOL_TAG, []string{"y", "Y", "yes", "Yes", "YES"}},
{true, yaml_BOOL_TAG, []string{"true", "True", "TRUE"}},
{true, yaml_BOOL_TAG, []string{"on", "On", "ON"}},
{false, yaml_BOOL_TAG, []string{"n", "N", "no", "No", "NO"}},
{false, yaml_BOOL_TAG, []string{"false", "False", "FALSE"}},
{false, yaml_BOOL_TAG, []string{"off", "Off", "OFF"}},
{nil, yaml_NULL_TAG, []string{"", "~", "null", "Null", "NULL"}},
{math.NaN(), yaml_FLOAT_TAG, []string{".nan", ".NaN", ".NAN"}},
{math.Inf(+1), yaml_FLOAT_TAG, []string{".inf", ".Inf", ".INF"}},
{math.Inf(+1), yaml_FLOAT_TAG, []string{"+.inf", "+.Inf", "+.INF"}},
{math.Inf(-1), yaml_FLOAT_TAG, []string{"-.inf", "-.Inf", "-.INF"}},
{"<<", yaml_MERGE_TAG, []string{"<<"}},
}
m := resolveMap
for _, item := range resolveMapList {
for _, s := range item.l {
m[s] = resolveMapItem{item.v, item.tag}
}
}
}
const longTagPrefix = "tag:yaml.org,2002:"
func shortTag(tag string) string {
// TODO This can easily be made faster and produce less garbage.
if strings.HasPrefix(tag, longTagPrefix) {
return "!!" + tag[len(longTagPrefix):]
}
return tag
}
func longTag(tag string) string {
if strings.HasPrefix(tag, "!!") {
return longTagPrefix + tag[2:]
}
return tag
}
func resolvableTag(tag string) bool {
switch tag {
case "", yaml_STR_TAG, yaml_BOOL_TAG, yaml_INT_TAG, yaml_FLOAT_TAG, yaml_NULL_TAG:
return true
}
return false
}
func resolve(tag string, in string) (rtag string, out interface{}) {
if !resolvableTag(tag) {
return tag, in
}
defer func() {
switch tag {
case "", rtag, yaml_STR_TAG, yaml_BINARY_TAG:
return
}
fail(fmt.Sprintf("cannot decode %s `%s` as a %s", shortTag(rtag), in, shortTag(tag)))
}()
// Any data is accepted as a !!str or !!binary.
// Otherwise, the prefix is enough of a hint about what it might be.
hint := byte('N')
if in != "" {
hint = resolveTable[in[0]]
}
if hint != 0 && tag != yaml_STR_TAG && tag != yaml_BINARY_TAG {
// Handle things we can lookup in a map.
if item, ok := resolveMap[in]; ok {
return item.tag, item.value
}
// Base 60 floats are a bad idea, were dropped in YAML 1.2, and
// are purposefully unsupported here. They're still quoted on
// the way out for compatibility with other parser, though.
switch hint {
case 'M':
// We've already checked the map above.
case '.':
// Not in the map, so maybe a normal float.
floatv, err := strconv.ParseFloat(in, 64)
if err == nil {
return yaml_FLOAT_TAG, floatv
}
case 'D', 'S':
// Int, float, or timestamp.
plain := strings.Replace(in, "_", "", -1)
intv, err := strconv.ParseInt(plain, 0, 64)
if err == nil {
if intv == int64(int(intv)) {
return yaml_INT_TAG, int(intv)
} else {
return yaml_INT_TAG, intv
}
}
floatv, err := strconv.ParseFloat(plain, 64)
if err == nil {
return yaml_FLOAT_TAG, floatv
}
if strings.HasPrefix(plain, "0b") {
intv, err := strconv.ParseInt(plain[2:], 2, 64)
if err == nil {
return yaml_INT_TAG, int(intv)
}
} else if strings.HasPrefix(plain, "-0b") {
intv, err := strconv.ParseInt(plain[3:], 2, 64)
if err == nil {
return yaml_INT_TAG, -int(intv)
}
}
// XXX Handle timestamps here.
default:
panic("resolveTable item not yet handled: " + string(rune(hint)) + " (with " + in + ")")
}
}
if tag == yaml_BINARY_TAG {
return yaml_BINARY_TAG, in
}
if utf8.ValidString(in) {
return yaml_STR_TAG, in
}
return yaml_BINARY_TAG, encodeBase64(in)
}
// encodeBase64 encodes s as base64 that is broken up into multiple lines
// as appropriate for the resulting length.
func encodeBase64(s string) string {
const lineLen = 70
encLen := base64.StdEncoding.EncodedLen(len(s))
lines := encLen/lineLen + 1
buf := make([]byte, encLen*2+lines)
in := buf[0:encLen]
out := buf[encLen:]
base64.StdEncoding.Encode(in, []byte(s))
k := 0
for i := 0; i < len(in); i += lineLen {
j := i + lineLen
if j > len(in) {
j = len(in)
}
k += copy(out[k:], in[i:j])
if lines > 1 {
out[k] = '\n'
k++
}
}
return string(out[:k])
}

View File

@@ -10,23 +10,20 @@ import (
"errors"
"fmt"
"reflect"
"runtime"
"strings"
"sync"
)
type yamlError string
func fail(msg string) {
panic(yamlError(msg))
}
func handleErr(err *error) {
if r := recover(); r != nil {
if _, ok := r.(runtime.Error); ok {
panic(r)
} else if _, ok := r.(*reflect.ValueError); ok {
panic(r)
} else if _, ok := r.(externalPanic); ok {
panic(r)
} else if s, ok := r.(string); ok {
*err = errors.New("YAML error: " + s)
} else if e, ok := r.(error); ok {
*err = e
if e, ok := r.(yamlError); ok {
*err = errors.New("YAML error: " + string(e))
} else {
panic(r)
}
@@ -78,7 +75,7 @@ type Getter interface {
// F int `yaml:"a,omitempty"`
// B int
// }
// var T t
// var t T
// yaml.Unmarshal([]byte("a: 1\nb: 2"), &t)
//
// See the documentation of Marshal for the format of tags and a list of
@@ -87,11 +84,15 @@ type Getter interface {
func Unmarshal(in []byte, out interface{}) (err error) {
defer handleErr(&err)
d := newDecoder()
p := newParser(in)
p := newParser(in, UnmarshalMappingKeyTransform)
defer p.destroy()
node := p.parse()
if node != nil {
d.unmarshal(node, reflect.ValueOf(out))
v := reflect.ValueOf(out)
if v.Kind() == reflect.Ptr && !v.IsNil() {
v = v.Elem()
}
d.unmarshal(node, v)
}
return nil
}
@@ -145,6 +146,17 @@ func Marshal(in interface{}) (out []byte, err error) {
return
}
// UnmarshalMappingKeyTransform is a string transformation that is applied to
// each mapping key in a YAML document before it is unmarshalled. By default,
// UnmarshalMappingKeyTransform is an identity transform (no modification).
var UnmarshalMappingKeyTransform transformString = identityTransform
type transformString func(in string) (out string)
func identityTransform(in string) (out string) {
return in
}
// --------------------------------------------------------------------------
// Maintain a mapping of keys to structure field indexes
@@ -174,12 +186,6 @@ type fieldInfo struct {
var structMap = make(map[reflect.Type]*structInfo)
var fieldMapMutex sync.RWMutex
type externalPanic string
func (e externalPanic) String() string {
return string(e)
}
func getStructInfo(st reflect.Type) (*structInfo, error) {
fieldMapMutex.RLock()
sinfo, found := structMap[st]
@@ -220,8 +226,7 @@ func getStructInfo(st reflect.Type) (*structInfo, error) {
case "inline":
inline = true
default:
msg := fmt.Sprintf("Unsupported flag %q in tag %q of type %s", flag, tag, st)
panic(externalPanic(msg))
return nil, errors.New(fmt.Sprintf("Unsupported flag %q in tag %q of type %s", flag, tag, st))
}
}
tag = fields[0]
@@ -229,6 +234,7 @@ func getStructInfo(st reflect.Type) (*structInfo, error) {
if inline {
switch field.Type.Kind() {
// TODO: Implement support for inline maps.
//case reflect.Map:
// if inlineMap >= 0 {
// return nil, errors.New("Multiple ,inline maps in struct " + st.String())
@@ -256,8 +262,8 @@ func getStructInfo(st reflect.Type) (*structInfo, error) {
fieldsList = append(fieldsList, finfo)
}
default:
//panic("Option ,inline needs a struct value or map field")
panic("Option ,inline needs a struct value field")
//return nil, errors.New("Option ,inline needs a struct value or map field")
return nil, errors.New("Option ,inline needs a struct value field")
}
continue
}

View File

@@ -294,6 +294,10 @@ const (
yaml_SEQ_TAG = "tag:yaml.org,2002:seq" // The tag !!seq is used to denote sequences.
yaml_MAP_TAG = "tag:yaml.org,2002:map" // The tag !!map is used to denote mapping.
// Not in original libyaml.
yaml_BINARY_TAG = "tag:yaml.org,2002:binary"
yaml_MERGE_TAG = "tag:yaml.org,2002:merge"
yaml_DEFAULT_SCALAR_TAG = yaml_STR_TAG // The default scalar tag is !!str.
yaml_DEFAULT_SEQUENCE_TAG = yaml_SEQ_TAG // The default sequence tag is !!seq.
yaml_DEFAULT_MAPPING_TAG = yaml_MAP_TAG // The default mapping tag is !!map.

View File

@@ -1,147 +0,0 @@
package yaml
import (
"math"
"strconv"
"strings"
)
// TODO: merge, timestamps, base 60 floats, omap.
type resolveMapItem struct {
value interface{}
tag string
}
var resolveTable = make([]byte, 256)
var resolveMap = make(map[string]resolveMapItem)
func init() {
t := resolveTable
t[int('+')] = 'S' // Sign
t[int('-')] = 'S'
for _, c := range "0123456789" {
t[int(c)] = 'D' // Digit
}
for _, c := range "yYnNtTfFoO~" {
t[int(c)] = 'M' // In map
}
t[int('.')] = '.' // Float (potentially in map)
var resolveMapList = []struct {
v interface{}
tag string
l []string
}{
{true, "!!bool", []string{"y", "Y", "yes", "Yes", "YES"}},
{true, "!!bool", []string{"true", "True", "TRUE"}},
{true, "!!bool", []string{"on", "On", "ON"}},
{false, "!!bool", []string{"n", "N", "no", "No", "NO"}},
{false, "!!bool", []string{"false", "False", "FALSE"}},
{false, "!!bool", []string{"off", "Off", "OFF"}},
{nil, "!!null", []string{"~", "null", "Null", "NULL"}},
{math.NaN(), "!!float", []string{".nan", ".NaN", ".NAN"}},
{math.Inf(+1), "!!float", []string{".inf", ".Inf", ".INF"}},
{math.Inf(+1), "!!float", []string{"+.inf", "+.Inf", "+.INF"}},
{math.Inf(-1), "!!float", []string{"-.inf", "-.Inf", "-.INF"}},
{"<<", "!!merge", []string{"<<"}},
}
m := resolveMap
for _, item := range resolveMapList {
for _, s := range item.l {
m[s] = resolveMapItem{item.v, item.tag}
}
}
}
const longTagPrefix = "tag:yaml.org,2002:"
func shortTag(tag string) string {
if strings.HasPrefix(tag, longTagPrefix) {
return "!!" + tag[len(longTagPrefix):]
}
return tag
}
func resolvableTag(tag string) bool {
switch tag {
case "", "!!str", "!!bool", "!!int", "!!float", "!!null":
return true
}
return false
}
func resolve(tag string, in string) (rtag string, out interface{}) {
tag = shortTag(tag)
if !resolvableTag(tag) {
return tag, in
}
defer func() {
if tag != "" && tag != rtag {
panic("Can't decode " + rtag + " '" + in + "' as a " + tag)
}
}()
if in == "" {
return "!!null", nil
}
c := resolveTable[in[0]]
if c == 0 {
// It's a string for sure. Nothing to do.
return "!!str", in
}
// Handle things we can lookup in a map.
if item, ok := resolveMap[in]; ok {
return item.tag, item.value
}
switch c {
case 'M':
// We've already checked the map above.
case '.':
// Not in the map, so maybe a normal float.
floatv, err := strconv.ParseFloat(in, 64)
if err == nil {
return "!!float", floatv
}
// XXX Handle base 60 floats here (WTF!)
case 'D', 'S':
// Int, float, or timestamp.
plain := strings.Replace(in, "_", "", -1)
intv, err := strconv.ParseInt(plain, 0, 64)
if err == nil {
if intv == int64(int(intv)) {
return "!!int", int(intv)
} else {
return "!!int", intv
}
}
floatv, err := strconv.ParseFloat(plain, 64)
if err == nil {
return "!!float", floatv
}
if strings.HasPrefix(plain, "0b") {
intv, err := strconv.ParseInt(plain[2:], 2, 64)
if err == nil {
return "!!int", int(intv)
}
} else if strings.HasPrefix(plain, "-0b") {
intv, err := strconv.ParseInt(plain[3:], 2, 64)
if err == nil {
return "!!int", -int(intv)
}
}
// XXX Handle timestamps here.
default:
panic("resolveTable item not yet handled: " +
string([]byte{c}) + " (with " + in + ")")
}
return "!!str", in
}

View File

@@ -19,9 +19,10 @@ package config
import (
"fmt"
"reflect"
"regexp"
"strings"
"github.com/coreos/coreos-cloudinit/Godeps/_workspace/src/gopkg.in/yaml.v1"
"github.com/coreos/coreos-cloudinit/Godeps/_workspace/src/github.com/coreos/yaml"
)
// CloudConfig encapsulates the entire cloud-config configuration file and maps
@@ -29,13 +30,7 @@ import (
// used for internal use) have the YAML tag '-' so that they aren't marshalled.
type CloudConfig struct {
SSHAuthorizedKeys []string `yaml:"ssh_authorized_keys"`
Coreos struct {
Etcd Etcd `yaml:"etcd"`
Fleet Fleet `yaml:"fleet"`
OEM OEM `yaml:"oem"`
Update Update `yaml:"update"`
Units []Unit `yaml:"units"`
} `yaml:"coreos"`
CoreOS CoreOS `yaml:"coreos"`
WriteFiles []File `yaml:"write_files"`
Hostname string `yaml:"hostname"`
Users []User `yaml:"users"`
@@ -44,6 +39,16 @@ type CloudConfig struct {
NetworkConfig string `yaml:"-"`
}
type CoreOS struct {
Etcd Etcd `yaml:"etcd"`
Flannel Flannel `yaml:"flannel"`
Fleet Fleet `yaml:"fleet"`
Locksmith Locksmith `yaml:"locksmith"`
OEM OEM `yaml:"oem"`
Update Update `yaml:"update"`
Units []Unit `yaml:"units"`
}
func IsCloudConfig(userdata string) bool {
header := strings.SplitN(userdata, "\n", 2)[0]
@@ -59,15 +64,12 @@ func IsCloudConfig(userdata string) bool {
// string of YAML), returning any error encountered. It will ignore unknown
// fields but log encountering them.
func NewCloudConfig(contents string) (*CloudConfig, error) {
yaml.UnmarshalMappingKeyTransform = func(nameIn string) (nameOut string) {
return strings.Replace(nameIn, "-", "_", -1)
}
var cfg CloudConfig
ncontents, err := normalizeConfig(contents)
if err != nil {
return &cfg, err
}
if err = yaml.Unmarshal(ncontents, &cfg); err != nil {
return &cfg, err
}
return &cfg, nil
err := yaml.Unmarshal([]byte(contents), &cfg)
return &cfg, err
}
func (cc CloudConfig) String() string {
@@ -90,7 +92,7 @@ func IsZero(c interface{}) bool {
type ErrorValid struct {
Value string
Valid []string
Valid string
Field string
}
@@ -124,16 +126,15 @@ func AssertValid(value reflect.Value, valid string) *ErrorValid {
if valid == "" || isZero(value) {
return nil
}
vs := fmt.Sprintf("%v", value.Interface())
valids := strings.Split(valid, ",")
for _, valid := range valids {
if vs == valid {
return nil
}
if m, _ := regexp.MatchString(valid, vs); m {
return nil
}
return &ErrorValid{
Value: vs,
Valid: valids,
Valid: valid,
}
}
@@ -155,31 +156,3 @@ func isZero(v reflect.Value) bool {
func isFieldExported(f reflect.StructField) bool {
return f.PkgPath == ""
}
func normalizeConfig(config string) ([]byte, error) {
var cfg map[interface{}]interface{}
if err := yaml.Unmarshal([]byte(config), &cfg); err != nil {
return nil, err
}
return yaml.Marshal(normalizeKeys(cfg))
}
func normalizeKeys(m map[interface{}]interface{}) map[interface{}]interface{} {
for k, v := range m {
if m, ok := m[k].(map[interface{}]interface{}); ok {
normalizeKeys(m)
}
if s, ok := m[k].([]interface{}); ok {
for _, e := range s {
if m, ok := e.(map[interface{}]interface{}); ok {
normalizeKeys(m)
}
}
}
delete(m, k)
m[strings.Replace(fmt.Sprint(k), "-", "_", -1)] = v
}
return m
}

View File

@@ -18,13 +18,67 @@ package config
import (
"reflect"
"regexp"
"strings"
"testing"
)
func TestNewCloudConfig(t *testing.T) {
tests := []struct {
contents string
config CloudConfig
}{
{},
{
contents: "#cloud-config\nwrite_files:\n - path: underscore",
config: CloudConfig{WriteFiles: []File{File{Path: "underscore"}}},
},
{
contents: "#cloud-config\nwrite-files:\n - path: hyphen",
config: CloudConfig{WriteFiles: []File{File{Path: "hyphen"}}},
},
{
contents: "#cloud-config\ncoreos:\n update:\n reboot-strategy: off",
config: CloudConfig{CoreOS: CoreOS{Update: Update{RebootStrategy: "off"}}},
},
{
contents: "#cloud-config\ncoreos:\n update:\n reboot-strategy: false",
config: CloudConfig{CoreOS: CoreOS{Update: Update{RebootStrategy: "false"}}},
},
{
contents: "#cloud-config\nwrite_files:\n - permissions: 0744",
config: CloudConfig{WriteFiles: []File{File{RawFilePermissions: "0744"}}},
},
{
contents: "#cloud-config\nwrite_files:\n - permissions: 744",
config: CloudConfig{WriteFiles: []File{File{RawFilePermissions: "744"}}},
},
{
contents: "#cloud-config\nwrite_files:\n - permissions: '0744'",
config: CloudConfig{WriteFiles: []File{File{RawFilePermissions: "0744"}}},
},
{
contents: "#cloud-config\nwrite_files:\n - permissions: '744'",
config: CloudConfig{WriteFiles: []File{File{RawFilePermissions: "744"}}},
},
}
for i, tt := range tests {
config, err := NewCloudConfig(tt.contents)
if err != nil {
t.Errorf("bad error (test case #%d): want %v, got %s", i, nil, err)
}
if !reflect.DeepEqual(&tt.config, config) {
t.Errorf("bad config (test case #%d): want %#v, got %#v", i, tt.config, config)
}
}
}
func TestIsZero(t *testing.T) {
for _, tt := range []struct {
c interface{}
tests := []struct {
c interface{}
empty bool
}{
{struct{}{}, true},
@@ -34,7 +88,9 @@ func TestIsZero(t *testing.T) {
{struct{ A string }{A: "hello"}, false},
{struct{ A int }{}, true},
{struct{ A int }{A: 1}, false},
} {
}
for _, tt := range tests {
if empty := IsZero(tt.c); tt.empty != empty {
t.Errorf("bad result (%q): want %t, got %t", tt.c, tt.empty, empty)
}
@@ -42,66 +98,68 @@ func TestIsZero(t *testing.T) {
}
func TestAssertStructValid(t *testing.T) {
for _, tt := range []struct {
c interface{}
tests := []struct {
c interface{}
err error
}{
{struct{}{}, nil},
{struct {
A, b string `valid:"1,2"`
A, b string `valid:"^1|2$"`
}{}, nil},
{struct {
A, b string `valid:"1,2"`
A, b string `valid:"^1|2$"`
}{A: "1", b: "2"}, nil},
{struct {
A, b string `valid:"1,2"`
A, b string `valid:"^1|2$"`
}{A: "1", b: "hello"}, nil},
{struct {
A, b string `valid:"1,2"`
}{A: "hello", b: "2"}, &ErrorValid{Value: "hello", Field: "A", Valid: []string{"1", "2"}}},
A, b string `valid:"^1|2$"`
}{A: "hello", b: "2"}, &ErrorValid{Value: "hello", Field: "A", Valid: "^1|2$"}},
{struct {
A, b int `valid:"1,2"`
A, b int `valid:"^1|2$"`
}{}, nil},
{struct {
A, b int `valid:"1,2"`
A, b int `valid:"^1|2$"`
}{A: 1, b: 2}, nil},
{struct {
A, b int `valid:"1,2"`
A, b int `valid:"^1|2$"`
}{A: 1, b: 9}, nil},
{struct {
A, b int `valid:"1,2"`
}{A: 9, b: 2}, &ErrorValid{Value: "9", Field: "A", Valid: []string{"1", "2"}}},
} {
A, b int `valid:"^1|2$"`
}{A: 9, b: 2}, &ErrorValid{Value: "9", Field: "A", Valid: "^1|2$"}},
}
for _, tt := range tests {
if err := AssertStructValid(tt.c); !reflect.DeepEqual(tt.err, err) {
t.Errorf("bad result (%q): want %q, got %q", tt.c, tt.err, err)
}
}
}
func TestCloudConfigInvalidKeys(t *testing.T) {
defer func() {
if r := recover(); r != nil {
t.Fatalf("panic while instantiating CloudConfig with nil keys: %v", r)
}
}()
func TestConfigCompile(t *testing.T) {
tests := []interface{}{
Etcd{},
File{},
Flannel{},
Fleet{},
Locksmith{},
OEM{},
Unit{},
Update{},
}
for _, tt := range []struct {
contents string
}{
{"coreos:"},
{"ssh_authorized_keys:"},
{"ssh_authorized_keys:\n -"},
{"ssh_authorized_keys:\n - 0:"},
{"write_files:"},
{"write_files:\n -"},
{"write_files:\n - 0:"},
{"users:"},
{"users:\n -"},
{"users:\n - 0:"},
} {
_, err := NewCloudConfig(tt.contents)
if err != nil {
t.Fatalf("error instantiating CloudConfig with invalid keys: %v", err)
for _, tt := range tests {
ttt := reflect.TypeOf(tt)
for i := 0; i < ttt.NumField(); i++ {
ft := ttt.Field(i)
if !isFieldExported(ft) {
continue
}
if _, err := regexp.Compile(ft.Tag.Get("valid")); err != nil {
t.Errorf("bad regexp(%s.%s): want %v, got %s", ttt.Name(), ft.Name, nil, err)
}
}
}
}
@@ -136,7 +194,7 @@ hostname:
if cfg.Hostname != "foo" {
t.Fatalf("hostname not correctly set when invalid keys are present")
}
if cfg.Coreos.Etcd.Discovery != "https://discovery.etcd.io/827c73219eeb2fa5530027c37bf18877" {
if cfg.CoreOS.Etcd.Discovery != "https://discovery.etcd.io/827c73219eeb2fa5530027c37bf18877" {
t.Fatalf("etcd section not correctly set when invalid keys are present")
}
if len(cfg.WriteFiles) < 1 || cfg.WriteFiles[0].Content != "fun" || cfg.WriteFiles[0].Path != "/var/party" {
@@ -242,10 +300,10 @@ hostname: trontastic
}
}
if len(cfg.Coreos.Units) != 1 {
if len(cfg.CoreOS.Units) != 1 {
t.Error("Failed to parse correct number of units")
} else {
u := cfg.Coreos.Units[0]
u := cfg.CoreOS.Units[0]
expect := `[Match]
Name=eth47
@@ -261,19 +319,16 @@ Address=10.209.171.177/19
if u.Name != "50-eth0.network" {
t.Errorf("Unit has incorrect name %s", u.Name)
}
if u.Type() != "network" {
t.Errorf("Unit has incorrect type '%s'", u.Type())
}
}
if cfg.Coreos.OEM.ID != "rackspace" {
t.Errorf("Failed parsing coreos.oem. Expected ID 'rackspace', got %q.", cfg.Coreos.OEM.ID)
if cfg.CoreOS.OEM.ID != "rackspace" {
t.Errorf("Failed parsing coreos.oem. Expected ID 'rackspace', got %q.", cfg.CoreOS.OEM.ID)
}
if cfg.Hostname != "trontastic" {
t.Errorf("Failed to parse hostname")
}
if cfg.Coreos.Update.RebootStrategy != "reboot" {
if cfg.CoreOS.Update.RebootStrategy != "reboot" {
t.Errorf("Failed to parse locksmith strategy")
}
}
@@ -304,26 +359,6 @@ func TestCloudConfigSerializationHeader(t *testing.T) {
}
}
// TestDropInIgnored asserts that users are unable to set DropIn=True on units
func TestDropInIgnored(t *testing.T) {
contents := `
coreos:
units:
- name: test
dropin: true
`
cfg, err := NewCloudConfig(contents)
if err != nil || len(cfg.Coreos.Units) != 1 {
t.Fatalf("Encountered unexpected error: %v", err)
}
if len(cfg.Coreos.Units) != 1 || cfg.Coreos.Units[0].Name != "test" {
t.Fatalf("Expected 1 unit, but got %d: %v", len(cfg.Coreos.Units), cfg.Coreos.Units)
}
if cfg.Coreos.Units[0].DropIn {
t.Errorf("dropin option on unit in cloud-config was not ignored!")
}
}
func TestCloudConfigUsers(t *testing.T) {
contents := `
users:
@@ -462,30 +497,3 @@ users:
t.Errorf("ssh import url is %q, expected 'https://token:x-auth-token@github.enterprise.com/api/v3/polvi/keys'", user.SSHImportURL)
}
}
func TestNormalizeKeys(t *testing.T) {
for _, tt := range []struct {
in string
out string
}{
{"my_key_name: the-value\n", "my_key_name: the-value\n"},
{"my-key_name: the-value\n", "my_key_name: the-value\n"},
{"my-key-name: the-value\n", "my_key_name: the-value\n"},
{"a:\n- key_name: the-value\n", "a:\n- key_name: the-value\n"},
{"a:\n- key-name: the-value\n", "a:\n- key_name: the-value\n"},
{"a:\n b:\n - key_name: the-value\n", "a:\n b:\n - key_name: the-value\n"},
{"a:\n b:\n - key-name: the-value\n", "a:\n b:\n - key_name: the-value\n"},
{"coreos:\n update:\n reboot-strategy: off\n", "coreos:\n update:\n reboot_strategy: false\n"},
} {
out, err := normalizeConfig(tt.in)
if err != nil {
t.Fatalf("bad error (%q): want nil, got %s", tt.in, err)
}
if string(out) != tt.out {
t.Fatalf("bad normalization (%q): want %q, got %q", tt.in, tt.out, out)
}
}
}

56
config/decode.go Normal file
View File

@@ -0,0 +1,56 @@
package config
import (
"bytes"
"compress/gzip"
"encoding/base64"
"fmt"
)
func DecodeBase64Content(content string) ([]byte, error) {
output, err := base64.StdEncoding.DecodeString(content)
if err != nil {
return nil, fmt.Errorf("Unable to decode base64: %q", err)
}
return output, nil
}
func DecodeGzipContent(content string) ([]byte, error) {
gzr, err := gzip.NewReader(bytes.NewReader([]byte(content)))
if err != nil {
return nil, fmt.Errorf("Unable to decode gzip: %q", err)
}
defer gzr.Close()
buf := new(bytes.Buffer)
buf.ReadFrom(gzr)
return buf.Bytes(), nil
}
func DecodeContent(content string, encoding string) ([]byte, error) {
switch encoding {
case "":
return []byte(content), nil
case "b64", "base64":
return DecodeBase64Content(content)
case "gz", "gzip":
return DecodeGzipContent(content)
case "gz+base64", "gzip+base64", "gz+b64", "gzip+b64":
gz, err := DecodeBase64Content(content)
if err != nil {
return nil, err
}
return DecodeGzipContent(string(gz))
}
return nil, fmt.Errorf("Unsupported encoding %q", encoding)
}

View File

@@ -17,32 +17,37 @@
package config
type Etcd struct {
Addr string `yaml:"addr" env:"ETCD_ADDR"`
BindAddr string `yaml:"bind_addr" env:"ETCD_BIND_ADDR"`
CAFile string `yaml:"ca_file" env:"ETCD_CA_FILE"`
CertFile string `yaml:"cert_file" env:"ETCD_CERT_FILE"`
ClusterActiveSize string `yaml:"cluster_active_size" env:"ETCD_CLUSTER_ACTIVE_SIZE"`
ClusterRemoveDelay string `yaml:"cluster_remove_delay" env:"ETCD_CLUSTER_REMOVE_DELAY"`
ClusterSyncInterval string `yaml:"cluster_sync_interval" env:"ETCD_CLUSTER_SYNC_INTERVAL"`
Cors string `yaml:"cors" env:"ETCD_CORS"`
CPUProfileFile string `yaml:"cpu_profile_file" env:"ETCD_CPU_PROFILE_FILE"`
DataDir string `yaml:"data_dir" env:"ETCD_DATA_DIR"`
Discovery string `yaml:"discovery" env:"ETCD_DISCOVERY"`
HTTPReadTimeout string `yaml:"http_read_timeout" env:"ETCD_HTTP_READ_TIMEOUT"`
HTTPWriteTimeout string `yaml:"http_write_timeout" env:"ETCD_HTTP_WRITE_TIMEOUT"`
KeyFile string `yaml:"key_file" env:"ETCD_KEY_FILE"`
MaxClusterSize string `yaml:"max_cluster_size" env:"ETCD_MAX_CLUSTER_SIZE"`
MaxResultBuffer string `yaml:"max_result_buffer" env:"ETCD_MAX_RESULT_BUFFER"`
MaxRetryAttempts string `yaml:"max_retry_attempts" env:"ETCD_MAX_RETRY_ATTEMPTS"`
Name string `yaml:"name" env:"ETCD_NAME"`
PeerAddr string `yaml:"peer_addr" env:"ETCD_PEER_ADDR"`
PeerBindAddr string `yaml:"peer_bind_addr" env:"ETCD_PEER_BIND_ADDR"`
PeerCAFile string `yaml:"peer_ca_file" env:"ETCD_PEER_CA_FILE"`
PeerCertFile string `yaml:"peer_cert_file" env:"ETCD_PEER_CERT_FILE"`
PeerKeyFile string `yaml:"peer_key_file" env:"ETCD_PEER_KEY_FILE"`
Peers string `yaml:"peers" env:"ETCD_PEERS"`
PeersFile string `yaml:"peers_file" env:"ETCD_PEERS_FILE"`
Snapshot string `yaml:"snapshot" env:"ETCD_SNAPSHOT"`
Verbose string `yaml:"verbose" env:"ETCD_VERBOSE"`
VeryVerbose string `yaml:"very_verbose" env:"ETCD_VERY_VERBOSE"`
Addr string `yaml:"addr" env:"ETCD_ADDR"`
BindAddr string `yaml:"bind_addr" env:"ETCD_BIND_ADDR"`
CAFile string `yaml:"ca_file" env:"ETCD_CA_FILE"`
CertFile string `yaml:"cert_file" env:"ETCD_CERT_FILE"`
ClusterActiveSize int `yaml:"cluster_active_size" env:"ETCD_CLUSTER_ACTIVE_SIZE"`
ClusterRemoveDelay float64 `yaml:"cluster_remove_delay" env:"ETCD_CLUSTER_REMOVE_DELAY"`
ClusterSyncInterval float64 `yaml:"cluster_sync_interval" env:"ETCD_CLUSTER_SYNC_INTERVAL"`
CorsOrigins string `yaml:"cors" env:"ETCD_CORS"`
DataDir string `yaml:"data_dir" env:"ETCD_DATA_DIR"`
Discovery string `yaml:"discovery" env:"ETCD_DISCOVERY"`
GraphiteHost string `yaml:"graphite_host" env:"ETCD_GRAPHITE_HOST"`
HTTPReadTimeout float64 `yaml:"http_read_timeout" env:"ETCD_HTTP_READ_TIMEOUT"`
HTTPWriteTimeout float64 `yaml:"http_write_timeout" env:"ETCD_HTTP_WRITE_TIMEOUT"`
KeyFile string `yaml:"key_file" env:"ETCD_KEY_FILE"`
MaxResultBuffer int `yaml:"max_result_buffer" env:"ETCD_MAX_RESULT_BUFFER"`
MaxRetryAttempts int `yaml:"max_retry_attempts" env:"ETCD_MAX_RETRY_ATTEMPTS"`
Name string `yaml:"name" env:"ETCD_NAME"`
PeerAddr string `yaml:"peer_addr" env:"ETCD_PEER_ADDR"`
PeerBindAddr string `yaml:"peer_bind_addr" env:"ETCD_PEER_BIND_ADDR"`
PeerCAFile string `yaml:"peer_ca_file" env:"ETCD_PEER_CA_FILE"`
PeerCertFile string `yaml:"peer_cert_file" env:"ETCD_PEER_CERT_FILE"`
PeerElectionTimeout int `yaml:"peer_election_timeout" env:"ETCD_PEER_ELECTION_TIMEOUT"`
PeerHeartbeatInterval int `yaml:"peer_heartbeat_interval" env:"ETCD_PEER_HEARTBEAT_INTERVAL"`
PeerKeyFile string `yaml:"peer_key_file" env:"ETCD_PEER_KEY_FILE"`
Peers string `yaml:"peers" env:"ETCD_PEERS"`
PeersFile string `yaml:"peers_file" env:"ETCD_PEERS_FILE"`
RetryInterval float64 `yaml:"retry_interval" env:"ETCD_RETRY_INTERVAL"`
Snapshot bool `yaml:"snapshot" env:"ETCD_SNAPSHOT"`
SnapshotCount int `yaml:"snapshot_count" env:"ETCD_SNAPSHOTCOUNT"`
StrTrace string `yaml:"trace" env:"ETCD_TRACE"`
Verbose bool `yaml:"verbose" env:"ETCD_VERBOSE"`
VeryVerbose bool `yaml:"very_verbose" env:"ETCD_VERY_VERBOSE"`
VeryVeryVerbose bool `yaml:"very_very_verbose" env:"ETCD_VERY_VERY_VERBOSE"`
}

View File

@@ -17,9 +17,9 @@
package config
type File struct {
Encoding string `yaml:"-"`
Encoding string `yaml:"encoding" valid:"^(base64|b64|gz|gzip|gz\\+base64|gzip\\+base64|gz\\+b64|gzip\\+b64)$"`
Content string `yaml:"content"`
Owner string `yaml:"owner"`
Path string `yaml:"path"`
RawFilePermissions string `yaml:"permissions"`
RawFilePermissions string `yaml:"permissions" valid:"^0?[0-7]{3,4}$"`
}

71
config/file_test.go Normal file
View File

@@ -0,0 +1,71 @@
/*
Copyright 2014 CoreOS, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package config
import (
"testing"
)
func TestEncodingValid(t *testing.T) {
tests := []struct {
value string
isValid bool
}{
{value: "base64", isValid: true},
{value: "b64", isValid: true},
{value: "gz", isValid: true},
{value: "gzip", isValid: true},
{value: "gz+base64", isValid: true},
{value: "gzip+base64", isValid: true},
{value: "gz+b64", isValid: true},
{value: "gzip+b64", isValid: true},
{value: "gzzzzbase64", isValid: false},
{value: "gzipppbase64", isValid: false},
{value: "unknown", isValid: false},
}
for _, tt := range tests {
isValid := (nil == AssertStructValid(File{Encoding: tt.value}))
if tt.isValid != isValid {
t.Errorf("bad assert (%s): want %t, got %t", tt.value, tt.isValid, isValid)
}
}
}
func TestRawFilePermissionsValid(t *testing.T) {
tests := []struct {
value string
isValid bool
}{
{value: "744", isValid: true},
{value: "0744", isValid: true},
{value: "1744", isValid: true},
{value: "01744", isValid: true},
{value: "11744", isValid: false},
{value: "rwxr--r--", isValid: false},
{value: "800", isValid: false},
}
for _, tt := range tests {
isValid := (nil == AssertStructValid(File{RawFilePermissions: tt.value}))
if tt.isValid != isValid {
t.Errorf("bad assert (%s): want %t, got %t", tt.value, tt.isValid, isValid)
}
}
}

12
config/flannel.go Normal file
View File

@@ -0,0 +1,12 @@
package config
type Flannel struct {
EtcdEndpoints string `yaml:"etcd_endpoints" env:"FLANNELD_ETCD_ENDPOINTS"`
EtcdCAFile string `yaml:"etcd_cafile" env:"FLANNELD_ETCD_CAFILE"`
EtcdCertFile string `yaml:"etcd_certfile" env:"FLANNELD_ETCD_CERTFILE"`
EtcdKeyFile string `yaml:"etcd_keyfile" env:"FLANNELD_ETCD_KEYFILE"`
EtcdPrefix string `yaml:"etcd_prefix" env:"FLANNELD_ETCD_PREFIX"`
IPMasq string `yaml:"ip_masq" env:"FLANNELD_IP_MASQ"`
SubnetFile string `yaml:"subnet_file" env:"FLANNELD_SUBNET_FILE"`
Iface string `yaml:"interface" env:"FLANNELD_IFACE"`
}

View File

@@ -17,14 +17,15 @@
package config
type Fleet struct {
AgentTTL string `yaml:"agent_ttl" env:"FLEET_AGENT_TTL"`
EngineReconcileInterval string `yaml:"engine_reconcile_interval" env:"FLEET_ENGINE_RECONCILE_INTERVAL"`
EtcdCAFile string `yaml:"etcd_cafile" env:"FLEET_ETCD_CAFILE"`
EtcdCertFile string `yaml:"etcd_certfile" env:"FLEET_ETCD_CERTFILE"`
EtcdKeyFile string `yaml:"etcd_keyfile" env:"FLEET_ETCD_KEYFILE"`
EtcdRequestTimeout string `yaml:"etcd_request_timeout" env:"FLEET_ETCD_REQUEST_TIMEOUT"`
EtcdServers string `yaml:"etcd_servers" env:"FLEET_ETCD_SERVERS"`
Metadata string `yaml:"metadata" env:"FLEET_METADATA"`
PublicIP string `yaml:"public_ip" env:"FLEET_PUBLIC_IP"`
Verbosity string `yaml:"verbosity" env:"FLEET_VERBOSITY"`
AgentTTL string `yaml:"agent_ttl" env:"FLEET_AGENT_TTL"`
EngineReconcileInterval float64 `yaml:"engine_reconcile_interval" env:"FLEET_ENGINE_RECONCILE_INTERVAL"`
EtcdCAFile string `yaml:"etcd_cafile" env:"FLEET_ETCD_CAFILE"`
EtcdCertFile string `yaml:"etcd_certfile" env:"FLEET_ETCD_CERTFILE"`
EtcdKeyFile string `yaml:"etcd_keyfile" env:"FLEET_ETCD_KEYFILE"`
EtcdKeyPrefix string `yaml:"etcd_key_prefix" env:"FLEET_ETCD_KEY_PREFIX"`
EtcdRequestTimeout float64 `yaml:"etcd_request_timeout" env:"FLEET_ETCD_REQUEST_TIMEOUT"`
EtcdServers string `yaml:"etcd_servers" env:"FLEET_ETCD_SERVERS"`
Metadata string `yaml:"metadata" env:"FLEET_METADATA"`
PublicIP string `yaml:"public_ip" env:"FLEET_PUBLIC_IP"`
Verbosity int `yaml:"verbosity" env:"FLEET_VERBOSITY"`
}

8
config/locksmith.go Normal file
View File

@@ -0,0 +1,8 @@
package config
type Locksmith struct {
Endpoint string `yaml:"endpoint" env:"LOCKSMITHD_ENDPOINT"`
EtcdCAFile string `yaml:"etcd_cafile" env:"LOCKSMITHD_ETCD_CAFILE"`
EtcdCertFile string `yaml:"etcd_certfile" env:"LOCKSMITHD_ETCD_CERTFILE"`
EtcdKeyFile string `yaml:"etcd_keyfile" env:"LOCKSMITHD_ETCD_KEYFILE"`
}

View File

@@ -16,35 +16,17 @@
package config
import (
"path/filepath"
"strings"
)
type Unit struct {
Name string `yaml:"name"`
Mask bool `yaml:"mask"`
Enable bool `yaml:"enable"`
Runtime bool `yaml:"runtime"`
Content string `yaml:"content"`
Command string `yaml:"command" valid:"^(start|stop|restart|reload|try-restart|reload-or-restart|reload-or-try-restart)$"`
DropIns []UnitDropIn `yaml:"drop_ins"`
}
type UnitDropIn struct {
Name string `yaml:"name"`
Mask bool `yaml:"mask"`
Enable bool `yaml:"enable"`
Runtime bool `yaml:"runtime"`
Content string `yaml:"content"`
Command string `yaml:"command" valid:"start,stop,restart,reload,try-restart,reload-or-restart,reload-or-try-restart"`
// For drop-in units, a cloudinit.conf is generated.
// This is currently unbound in YAML (and hence unsettable in cloud-config files)
// until the correct behaviour for multiple drop-in units is determined.
DropIn bool `yaml:"-"`
}
func (u *Unit) Type() string {
ext := filepath.Ext(u.Name)
return strings.TrimLeft(ext, ".")
}
func (u *Unit) Group() string {
switch u.Type() {
case "network", "netdev", "link":
return "network"
default:
return "system"
}
}

46
config/unit_test.go Normal file
View File

@@ -0,0 +1,46 @@
/*
Copyright 2014 CoreOS, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package config
import (
"testing"
)
func TestCommandValid(t *testing.T) {
tests := []struct {
value string
isValid bool
}{
{value: "start", isValid: true},
{value: "stop", isValid: true},
{value: "restart", isValid: true},
{value: "reload", isValid: true},
{value: "try-restart", isValid: true},
{value: "reload-or-restart", isValid: true},
{value: "reload-or-try-restart", isValid: true},
{value: "tryrestart", isValid: false},
{value: "unknown", isValid: false},
}
for _, tt := range tests {
isValid := (nil == AssertStructValid(Unit{Command: tt.value}))
if tt.isValid != isValid {
t.Errorf("bad assert (%s): want %t, got %t", tt.value, tt.isValid, isValid)
}
}
}

View File

@@ -17,7 +17,7 @@
package config
type Update struct {
RebootStrategy string `yaml:"reboot_strategy" env:"REBOOT_STRATEGY" valid:"best-effort,etcd-lock,reboot,false"`
RebootStrategy string `yaml:"reboot_strategy" env:"REBOOT_STRATEGY" valid:"^(best-effort|etcd-lock|reboot|off)$"`
Group string `yaml:"group" env:"GROUP"`
Server string `yaml:"server" env:"SERVER"`
}

43
config/update_test.go Normal file
View File

@@ -0,0 +1,43 @@
/*
Copyright 2014 CoreOS, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package config
import (
"testing"
)
func TestRebootStrategyValid(t *testing.T) {
tests := []struct {
value string
isValid bool
}{
{value: "best-effort", isValid: true},
{value: "etcd-lock", isValid: true},
{value: "reboot", isValid: true},
{value: "off", isValid: true},
{value: "besteffort", isValid: false},
{value: "unknown", isValid: false},
}
for _, tt := range tests {
isValid := (nil == AssertStructValid(Update{RebootStrategy: tt.value}))
if tt.isValid != isValid {
t.Errorf("bad assert (%s): want %t, got %t", tt.value, tt.isValid, isValid)
}
}
}

View File

@@ -17,17 +17,18 @@
package config
type User struct {
Name string `yaml:"name"`
PasswordHash string `yaml:"passwd"`
SSHAuthorizedKeys []string `yaml:"ssh_authorized_keys"`
SSHImportGithubUser string `yaml:"coreos_ssh_import_github"`
SSHImportURL string `yaml:"coreos_ssh_import_url"`
GECOS string `yaml:"gecos"`
Homedir string `yaml:"homedir"`
NoCreateHome bool `yaml:"no_create_home"`
PrimaryGroup string `yaml:"primary_group"`
Groups []string `yaml:"groups"`
NoUserGroup bool `yaml:"no_user_group"`
System bool `yaml:"system"`
NoLogInit bool `yaml:"no_log_init"`
Name string `yaml:"name"`
PasswordHash string `yaml:"passwd"`
SSHAuthorizedKeys []string `yaml:"ssh_authorized_keys"`
SSHImportGithubUser string `yaml:"coreos_ssh_import_github"`
SSHImportGithubUsers []string `yaml:"coreos_ssh_import_github_users"`
SSHImportURL string `yaml:"coreos_ssh_import_url"`
GECOS string `yaml:"gecos"`
Homedir string `yaml:"homedir"`
NoCreateHome bool `yaml:"no_create_home"`
PrimaryGroup string `yaml:"primary_group"`
Groups []string `yaml:"groups"`
NoUserGroup bool `yaml:"no_user_group"`
System bool `yaml:"system"`
NoLogInit bool `yaml:"no_log_init"`
}

View File

@@ -125,7 +125,7 @@ func toNode(v interface{}, c context, n *node) {
n.children = append(n.children, cn)
c.Increment()
}
case reflect.String, reflect.Int, reflect.Bool:
case reflect.String, reflect.Int, reflect.Bool, reflect.Float64:
default:
panic(fmt.Sprintf("toNode(): unhandled kind %s", vv.Kind()))
}

View File

@@ -18,7 +18,10 @@ package validate
import (
"fmt"
"net/url"
"path"
"reflect"
"strings"
"github.com/coreos/coreos-cloudinit/config"
)
@@ -27,8 +30,40 @@ type rule func(config node, report *Report)
// Rules contains all of the validation rules.
var Rules []rule = []rule{
checkDiscoveryUrl,
checkEncoding,
checkStructure,
checkValidity,
checkWriteFiles,
checkWriteFilesUnderCoreos,
}
// checkDiscoveryUrl verifies that the string is a valid url.
func checkDiscoveryUrl(cfg node, report *Report) {
c := cfg.Child("coreos").Child("etcd").Child("discovery")
if !c.IsValid() {
return
}
if _, err := url.ParseRequestURI(c.String()); err != nil {
report.Warning(c.line, "discovery URL is not valid")
}
}
// checkEncoding validates that, for each file under 'write_files', the
// content can be decoded given the specified encoding.
func checkEncoding(cfg node, report *Report) {
for _, f := range cfg.Child("write_files").children {
e := f.Child("encoding")
if !e.IsValid() {
continue
}
c := f.Child("contents")
if _, err := config.DecodeContent(c.String(), e.String()); err != nil {
report.Error(c.line, fmt.Sprintf("contents cannot be decoded as %q", e.String()))
}
}
}
// checkStructure compares the provided config to the empty config.CloudConfig
@@ -61,12 +96,30 @@ func checkNodeStructure(n, g node, r *Report) {
toNode(reflect.New(c).Elem().Interface(), context{}, &cg)
checkNodeStructure(cn, cg, r)
}
case reflect.String, reflect.Int, reflect.Bool:
case reflect.String, reflect.Int, reflect.Float64, reflect.Bool:
default:
panic(fmt.Sprintf("checkNodeStructure(): unhandled kind %s", g.Kind()))
}
}
// isCompatible determines if the type of kind n can be converted to the type
// of kind g in the context of YAML. This is not an exhaustive list, but its
// enough for the purposes of cloud-config validation.
func isCompatible(n, g reflect.Kind) bool {
switch g {
case reflect.String:
return n == reflect.String || n == reflect.Int || n == reflect.Float64 || n == reflect.Bool
case reflect.Struct:
return n == reflect.Struct || n == reflect.Map
case reflect.Float64:
return n == reflect.Float64 || n == reflect.Int
case reflect.Bool, reflect.Slice, reflect.Int:
return n == g
default:
panic(fmt.Sprintf("isCompatible(): unhandled kind %s", g))
}
}
// checkValidity checks the value of every node in the provided config by
// running config.AssertValid() on it.
func checkValidity(cfg node, report *Report) {
@@ -76,7 +129,7 @@ func checkValidity(cfg node, report *Report) {
func checkNodeValidity(n, g node, r *Report) {
if err := config.AssertValid(n.Value, g.field.Tag.Get("valid")); err != nil {
r.Warning(n.line, fmt.Sprintf("invalid value %v", n.Value))
r.Error(n.line, fmt.Sprintf("invalid value %v", n.Value.Interface()))
}
switch g.Kind() {
case reflect.Struct:
@@ -92,24 +145,35 @@ func checkNodeValidity(n, g node, r *Report) {
toNode(reflect.New(c).Elem().Interface(), context{}, &cg)
checkNodeValidity(cn, cg, r)
}
case reflect.String, reflect.Int, reflect.Bool:
case reflect.String, reflect.Int, reflect.Float64, reflect.Bool:
default:
panic(fmt.Sprintf("checkNodeValidity(): unhandled kind %s", g.Kind()))
}
}
// isCompatible determines if the type of kind n can be converted to the type
// of kind g in the context of YAML. This is not an exhaustive list, but its
// enough for the purposes of cloud-config validation.
func isCompatible(n, g reflect.Kind) bool {
switch g {
case reflect.String:
return n == reflect.String || n == reflect.Int || n == reflect.Bool
case reflect.Struct:
return n == reflect.Struct || n == reflect.Map
case reflect.Bool, reflect.Slice:
return n == g
default:
panic(fmt.Sprintf("isCompatible(): unhandled kind %s", g))
// checkWriteFiles checks to make sure that the target file can actually be
// written. Note that this check is approximate (it only checks to see if the file
// is under /usr).
func checkWriteFiles(cfg node, report *Report) {
for _, f := range cfg.Child("write_files").children {
c := f.Child("path")
if !c.IsValid() {
continue
}
d := path.Dir(c.String())
switch {
case strings.HasPrefix(d, "/usr"):
report.Error(c.line, "file cannot be written to a read-only filesystem")
}
}
}
// checkWriteFilesUnderCoreos checks to see if the 'write_files' node is a
// child of 'coreos' (it shouldn't be).
func checkWriteFilesUnderCoreos(cfg node, report *Report) {
c := cfg.Child("coreos").Child("write_files")
if c.IsValid() {
report.Info(c.line, "write_files doesn't belong under coreos")
}
}

View File

@@ -21,6 +21,85 @@ import (
"testing"
)
func TestCheckDiscoveryUrl(t *testing.T) {
tests := []struct {
config string
entries []Entry
}{
{},
{
config: "coreos:\n etcd:\n discovery: https://discovery.etcd.io/00000000000000000000000000000000",
},
{
config: "coreos:\n etcd:\n discovery: http://custom.domain/mytoken",
},
{
config: "coreos:\n etcd:\n discovery: disco",
entries: []Entry{{entryWarning, "discovery URL is not valid", 3}},
},
}
for i, tt := range tests {
r := Report{}
n, err := parseCloudConfig([]byte(tt.config), &r)
if err != nil {
panic(err)
}
checkDiscoveryUrl(n, &r)
if e := r.Entries(); !reflect.DeepEqual(tt.entries, e) {
t.Errorf("bad report (%d, %q): want %#v, got %#v", i, tt.config, tt.entries, e)
}
}
}
func TestCheckEncoding(t *testing.T) {
tests := []struct {
config string
entries []Entry
}{
{},
{
config: "write_files:\n - encoding: base64\n contents: aGVsbG8K",
},
{
config: "write_files:\n - contents: !!binary aGVsbG8K",
},
{
config: "write_files:\n - encoding: base64\n contents: !!binary aGVsbG8K",
entries: []Entry{{entryError, `contents cannot be decoded as "base64"`, 3}},
},
{
config: "write_files:\n - encoding: base64\n contents: !!binary YUdWc2JHOEsK",
},
{
config: "write_files:\n - encoding: gzip\n contents: !!binary H4sIAOC3tVQAA8tIzcnJ5wIAIDA6NgYAAAA=",
},
{
config: "write_files:\n - encoding: gzip+base64\n contents: H4sIAOC3tVQAA8tIzcnJ5wIAIDA6NgYAAAA=",
},
{
config: "write_files:\n - encoding: custom\n contents: hello",
entries: []Entry{{entryError, `contents cannot be decoded as "custom"`, 3}},
},
}
for i, tt := range tests {
r := Report{}
n, err := parseCloudConfig([]byte(tt.config), &r)
if err != nil {
panic(err)
}
checkEncoding(n, &r)
if e := r.Entries(); !reflect.DeepEqual(tt.entries, e) {
t.Errorf("bad report (%d, %q): want %#v, got %#v", i, tt.config, tt.entries, e)
}
}
}
func TestCheckStructure(t *testing.T) {
tests := []struct {
config string
@@ -218,7 +297,7 @@ func TestCheckValidity(t *testing.T) {
},
{
config: "coreos:\n units:\n - command: lol",
entries: []Entry{{entryWarning, "invalid value lol", 3}},
entries: []Entry{{entryError, "invalid value lol", 3}},
},
// struct
@@ -227,7 +306,7 @@ func TestCheckValidity(t *testing.T) {
},
{
config: "coreos:\n update:\n reboot_strategy: always",
entries: []Entry{{entryWarning, "invalid value always", 3}},
entries: []Entry{{entryError, "invalid value always", 3}},
},
// unknown
@@ -249,3 +328,74 @@ func TestCheckValidity(t *testing.T) {
}
}
}
func TestCheckWriteFiles(t *testing.T) {
tests := []struct {
config string
entries []Entry
}{
{},
{
config: "write_files:\n - path: /valid",
},
{
config: "write_files:\n - path: /tmp/usr/valid",
},
{
config: "write_files:\n - path: /usr/invalid",
entries: []Entry{{entryError, "file cannot be written to a read-only filesystem", 2}},
},
{
config: "write-files:\n - path: /tmp/../usr/invalid",
entries: []Entry{{entryError, "file cannot be written to a read-only filesystem", 2}},
},
}
for i, tt := range tests {
r := Report{}
n, err := parseCloudConfig([]byte(tt.config), &r)
if err != nil {
panic(err)
}
checkWriteFiles(n, &r)
if e := r.Entries(); !reflect.DeepEqual(tt.entries, e) {
t.Errorf("bad report (%d, %q): want %#v, got %#v", i, tt.config, tt.entries, e)
}
}
}
func TestCheckWriteFilesUnderCoreos(t *testing.T) {
tests := []struct {
config string
entries []Entry
}{
{},
{
config: "write_files:\n - path: /hi",
},
{
config: "coreos:\n write_files:\n - path: /hi",
entries: []Entry{{entryInfo, "write_files doesn't belong under coreos", 2}},
},
{
config: "coreos:\n write-files:\n - path: /hyphen",
entries: []Entry{{entryInfo, "write_files doesn't belong under coreos", 2}},
},
}
for i, tt := range tests {
r := Report{}
n, err := parseCloudConfig([]byte(tt.config), &r)
if err != nil {
panic(err)
}
checkWriteFilesUnderCoreos(n, &r)
if e := r.Entries(); !reflect.DeepEqual(tt.entries, e) {
t.Errorf("bad report (%d, %q): want %#v, got %#v", i, tt.config, tt.entries, e)
}
}
}

View File

@@ -25,7 +25,7 @@ import (
"github.com/coreos/coreos-cloudinit/config"
"github.com/coreos/coreos-cloudinit/Godeps/_workspace/src/gopkg.in/yaml.v1"
"github.com/coreos/coreos-cloudinit/Godeps/_workspace/src/github.com/coreos/yaml"
)
var (
@@ -38,13 +38,15 @@ var (
// can be validated.
func Validate(userdataBytes []byte) (Report, error) {
switch {
case len(userdataBytes) == 0:
return Report{}, nil
case config.IsScript(string(userdataBytes)):
return Report{}, nil
case config.IsCloudConfig(string(userdataBytes)):
return validateCloudConfig(userdataBytes, Rules)
default:
return Report{entries: []Entry{
Entry{kind: entryError, message: `must be "#cloud-config" or begin with "#!"`},
Entry{kind: entryError, message: `must be "#cloud-config" or begin with "#!"`, line: 1},
}}, nil
}
}
@@ -63,7 +65,6 @@ func validateCloudConfig(config []byte, rules []rule) (report Report, err error)
return report, err
}
c = normalizeNodeNames(c, &report)
for _, r := range rules {
r(c, &report)
}
@@ -73,37 +74,87 @@ func validateCloudConfig(config []byte, rules []rule) (report Report, err error)
// parseCloudConfig parses the provided config into a node structure and logs
// any parsing issues into the provided report. Unrecoverable errors are
// returned as an error.
func parseCloudConfig(config []byte, report *Report) (n node, err error) {
var raw map[interface{}]interface{}
if err := yaml.Unmarshal(config, &raw); err != nil {
func parseCloudConfig(cfg []byte, report *Report) (node, error) {
yaml.UnmarshalMappingKeyTransform = func(nameIn string) (nameOut string) {
return nameIn
}
// unmarshal the config into an implicitly-typed form. The yaml library
// will implicitly convert types into their normalized form
// (e.g. 0744 -> 484, off -> false).
var weak map[interface{}]interface{}
if err := yaml.Unmarshal(cfg, &weak); err != nil {
matches := yamlLineError.FindStringSubmatch(err.Error())
if len(matches) == 3 {
line, err := strconv.Atoi(matches[1])
if err != nil {
return n, err
return node{}, err
}
msg := matches[2]
report.Error(line, msg)
return n, nil
return node{}, nil
}
matches = yamlError.FindStringSubmatch(err.Error())
if len(matches) == 2 {
report.Error(1, matches[1])
return n, nil
return node{}, nil
}
return n, errors.New("couldn't parse yaml error")
return node{}, errors.New("couldn't parse yaml error")
}
w := NewNode(weak, NewContext(cfg))
w = normalizeNodeNames(w, report)
// unmarshal the config into the explicitly-typed form.
yaml.UnmarshalMappingKeyTransform = func(nameIn string) (nameOut string) {
return strings.Replace(nameIn, "-", "_", -1)
}
var strong config.CloudConfig
if err := yaml.Unmarshal([]byte(cfg), &strong); err != nil {
return node{}, err
}
s := NewNode(strong, NewContext(cfg))
// coerceNodes weak nodes and strong nodes. strong nodes replace weak nodes
// if they are compatible types (this happens when the yaml library
// converts the input).
// (e.g. weak 484 is replaced by strong 0744, weak 4 is not replaced by
// strong false)
return coerceNodes(w, s), nil
}
// coerceNodes recursively evaluates two nodes, returning a new node containing
// either the weak or strong node's value and its recursively processed
// children. The strong node's value is used if the two nodes are leafs, are
// both valid, and are compatible types (defined by isCompatible()). The weak
// node is returned in all other cases. coerceNodes is used to counteract the
// effects of yaml's automatic type conversion. The weak node is the one
// resulting from unmarshalling into an empty interface{} (the type is
// inferred). The strong node is the one resulting from unmarshalling into a
// struct. If the two nodes are of compatible types, the yaml library correctly
// parsed the value into the strongly typed unmarshalling. In this case, we
// prefer the strong node because its actually the type we are expecting.
func coerceNodes(w, s node) node {
n := w
n.children = nil
if len(w.children) == 0 && len(s.children) == 0 &&
w.IsValid() && s.IsValid() &&
isCompatible(w.Kind(), s.Kind()) {
n.Value = s.Value
}
return NewNode(raw, NewContext(config)), nil
for _, cw := range w.children {
n.children = append(n.children, coerceNodes(cw, s.Child(cw.name)))
}
return n
}
// normalizeNodeNames replaces all occurences of '-' with '_' within key names
// and makes a note of each replacement in the report.
func normalizeNodeNames(node node, report *Report) node {
if strings.Contains(node.name, "-") {
report.Info(node.line, fmt.Sprintf("%q uses '-' instead of '_'", node.name))
// TODO(crawford): Enable this message once the new validator hits stable.
//report.Info(node.line, fmt.Sprintf("%q uses '-' instead of '_'", node.name))
node.name = strings.Replace(node.name, "-", "_", -1)
}
for i := range node.children {

View File

@@ -65,6 +65,31 @@ func TestValidateCloudConfig(t *testing.T) {
rules: []rule{func(_ node, _ *Report) { panic("something happened") }},
err: errors.New("something happened"),
},
{
config: "write_files:\n - permissions: 0744",
rules: Rules,
},
{
config: "write_files:\n - permissions: '0744'",
rules: Rules,
},
{
config: "write_files:\n - permissions: 744",
rules: Rules,
},
{
config: "write_files:\n - permissions: '744'",
rules: Rules,
},
{
config: "coreos:\n update:\n reboot-strategy: off",
rules: Rules,
},
{
config: "coreos:\n update:\n reboot-strategy: false",
rules: Rules,
report: Report{entries: []Entry{{entryError, "invalid value false", 3}}},
},
}
for _, tt := range tests {
@@ -78,6 +103,29 @@ func TestValidateCloudConfig(t *testing.T) {
}
}
func TestValidate(t *testing.T) {
tests := []struct {
config string
report Report
}{
{},
{
config: "#!/bin/bash\necho hey",
},
}
for i, tt := range tests {
r, err := Validate([]byte(tt.config))
if err != nil {
t.Errorf("bad error (case #%d): want %v, got %v", i, nil, err)
}
if !reflect.DeepEqual(tt.report, r) {
t.Errorf("bad report (case #%d): want %+v, got %+v", i, tt.report, r)
}
}
}
func BenchmarkValidate(b *testing.B) {
config := `#cloud-config
hostname: test

View File

@@ -40,7 +40,7 @@ import (
)
const (
version = "0.11.0"
version = "1.2.1"
datasourceInterval = 100 * time.Millisecond
datasourceMaxInterval = 30 * time.Second
datasourceTimeout = 5 * time.Minute

View File

@@ -17,8 +17,12 @@
package cloudsigma
import (
"bytes"
"encoding/base64"
"encoding/json"
"errors"
"io/ioutil"
"net"
"os"
"strings"
@@ -51,7 +55,8 @@ func (_ *serverContextService) IsAvailable() bool {
}
productName := make([]byte, 10)
_, err = productNameFile.Read(productName)
return err == nil && string(productName) == "CloudSigma"
return err == nil && string(productName) == "CloudSigma" && hasDHCPLeases()
}
func (_ *serverContextService) AvailabilityChanges() bool {
@@ -73,12 +78,16 @@ func (scs *serverContextService) FetchMetadata() ([]byte, error) {
UUID string `json:"uuid"`
Meta map[string]string `json:"meta"`
Nics []struct {
Runtime struct {
Mac string `json:"mac"`
IPv4Conf struct {
InterfaceType string `json:"interface_type"`
IPv4 struct {
IP string `json:"uuid"`
} `json:"ip_v4"`
} `json:"runtime"`
IP struct {
UUID string `json:"uuid"`
} `json:"ip"`
} `json:"ip_v4_conf"`
VLAN struct {
UUID string `json:"uuid"`
} `json:"vlan"`
} `json:"nics"`
}
outputMetadata struct {
@@ -112,11 +121,12 @@ func (scs *serverContextService) FetchMetadata() ([]byte, error) {
}
for _, nic := range inputMetadata.Nics {
if nic.Runtime.IPv4.IP != "" {
if nic.Runtime.InterfaceType == "public" {
outputMetadata.PublicIPv4 = nic.Runtime.IPv4.IP
} else {
outputMetadata.LocalIPv4 = nic.Runtime.IPv4.IP
if nic.IPv4Conf.IP.UUID != "" {
outputMetadata.PublicIPv4 = nic.IPv4Conf.IP.UUID
}
if nic.VLAN.UUID != "" {
if localIP, err := scs.findLocalIP(nic.Mac); err == nil {
outputMetadata.LocalIPv4 = localIP
}
}
}
@@ -146,6 +156,36 @@ func (scs *serverContextService) FetchNetworkConfig(a string) ([]byte, error) {
return nil, nil
}
func (scs *serverContextService) findLocalIP(mac string) (string, error) {
ifaces, err := net.Interfaces()
if err != nil {
return "", err
}
ifaceMac, err := net.ParseMAC(mac)
if err != nil {
return "", err
}
for _, iface := range ifaces {
if !bytes.Equal(iface.HardwareAddr, ifaceMac) {
continue
}
addrs, err := iface.Addrs()
if err != nil {
continue
}
for _, addr := range addrs {
switch ip := addr.(type) {
case *net.IPNet:
if ip.IP.To4() != nil {
return ip.IP.To4().String(), nil
}
}
}
}
return "", errors.New("Local IP not found")
}
func isBase64Encoded(field string, userdata map[string]string) bool {
base64Fields, ok := userdata["base64_fields"]
if !ok {
@@ -159,3 +199,8 @@ func isBase64Encoded(field string, userdata map[string]string) bool {
}
return false
}
func hasDHCPLeases() bool {
files, err := ioutil.ReadDir("/run/systemd/netif/leases/")
return err == nil && len(files) > 0
}

View File

@@ -74,14 +74,41 @@ func TestServerContextFetchMetadata(t *testing.T) {
"name": "coreos",
"nics": [
{
"runtime": {
"interface_type": "public",
"ip_v4": {
"boot_order": null,
"ip_v4_conf": {
"conf": "dhcp",
"ip": {
"gateway": "31.171.244.1",
"meta": {},
"nameservers": [
"178.22.66.167",
"178.22.71.56",
"8.8.8.8"
],
"netmask": 22,
"tags": [],
"uuid": "31.171.251.74"
},
"ip_v6": null
}
},
"ip_v6_conf": null,
"mac": "22:3d:09:6b:90:f3",
"model": "virtio",
"vlan": null
},
{
"boot_order": null,
"ip_v4_conf": null,
"ip_v6_conf": null,
"mac": "22:ae:4a:fb:8f:31",
"model": "virtio",
"vlan": {
"meta": {
"description": "",
"name": "CoreOS"
},
"tags": [],
"uuid": "5dec030e-25b8-4621-a5a4-a3302c9d9619"
}
}
],
"smp": 2,
@@ -106,12 +133,8 @@ func TestServerContextFetchMetadata(t *testing.T) {
t.Error("Public SSH Keys are not being read properly")
}
if metadata.LocalIPv4 != "" {
t.Errorf("Local IP is not empty but %s instead", metadata.LocalIPv4)
}
if metadata.PublicIPv4 != "31.171.251.74" {
t.Errorf("Local IP is not 31.171.251.74 but %s instead", metadata.PublicIPv4)
t.Errorf("Public IP is not 31.171.251.74 but %s instead", metadata.PublicIPv4)
}
}

View File

@@ -69,7 +69,7 @@ func (ms metadataService) FetchMetadata() ([]byte, error) {
}
if hostname, err := ms.fetchAttribute(fmt.Sprintf("%s/hostname", ms.MetadataUrl())); err == nil {
attrs["hostname"] = hostname
attrs["hostname"] = strings.Split(hostname, " ")[0]
} else if _, ok := err.(pkg.ErrNotFound); !ok {
return nil, err
}

View File

@@ -173,6 +173,20 @@ func TestFetchMetadata(t *testing.T) {
},
expect: []byte(`{"hostname":"host","local-ipv4":"1.2.3.4","network_config":{"content_path":"path"},"public-ipv4":"5.6.7.8","public_keys":{"test1":"key"}}`),
},
{
root: "/",
metadataPath: "2009-04-04/meta-data",
resources: map[string]string{
"/2009-04-04/meta-data/hostname": "host domain another_domain",
"/2009-04-04/meta-data/local-ipv4": "1.2.3.4",
"/2009-04-04/meta-data/public-ipv4": "5.6.7.8",
"/2009-04-04/meta-data/public-keys": "0=test1\n",
"/2009-04-04/meta-data/public-keys/0": "openssh-key",
"/2009-04-04/meta-data/public-keys/0/openssh-key": "key",
"/2009-04-04/meta-data/network_config/content_path": "path",
},
expect: []byte(`{"hostname":"host","local-ipv4":"1.2.3.4","network_config":{"content_path":"path"},"public-ipv4":"5.6.7.8","public_keys":{"test1":"key"}}`),
},
{
clientErr: pkg.ErrTimeout{Err: fmt.Errorf("test error")},
expectErr: pkg.ErrTimeout{Err: fmt.Errorf("test error")},

View File

@@ -87,6 +87,12 @@ func Apply(cfg config.CloudConfig, env *Environment) error {
return err
}
}
for _, u := range user.SSHImportGithubUsers {
log.Printf("Authorizing github user %s SSH keys for CoreOS user '%s'", u, user.Name)
if err := SSHImportGithubUser(user.Name, u); err != nil {
return err
}
}
if user.SSHImportURL != "" {
log.Printf("Authorizing SSH keys for CoreOS user '%s' from '%s'", user.Name, user.SSHImportURL)
if err := SSHImportKeysFromURL(user.Name, user.SSHImportURL); err != nil {
@@ -110,9 +116,10 @@ func Apply(cfg config.CloudConfig, env *Environment) error {
}
for _, ccf := range []CloudConfigFile{
system.OEM{OEM: cfg.Coreos.OEM},
system.Update{Update: cfg.Coreos.Update, ReadConfig: system.DefaultReadConfig},
system.OEM{OEM: cfg.CoreOS.OEM},
system.Update{Update: cfg.CoreOS.Update, ReadConfig: system.DefaultReadConfig},
system.EtcHosts{EtcHosts: cfg.ManageEtcHosts},
system.Flannel{Flannel: cfg.CoreOS.Flannel},
} {
f, err := ccf.File()
if err != nil {
@@ -124,14 +131,15 @@ func Apply(cfg config.CloudConfig, env *Environment) error {
}
var units []system.Unit
for _, u := range cfg.Coreos.Units {
for _, u := range cfg.CoreOS.Units {
units = append(units, system.Unit{Unit: u})
}
for _, ccu := range []CloudConfigUnit{
system.Etcd{Etcd: cfg.Coreos.Etcd},
system.Fleet{Fleet: cfg.Coreos.Fleet},
system.Update{Update: cfg.Coreos.Update, ReadConfig: system.DefaultReadConfig},
system.Etcd{Etcd: cfg.CoreOS.Etcd},
system.Fleet{Fleet: cfg.CoreOS.Fleet},
system.Locksmith{Locksmith: cfg.CoreOS.Locksmith},
system.Update{Update: cfg.CoreOS.Update, ReadConfig: system.DefaultReadConfig},
} {
units = append(units, ccu.Units()...)
}
@@ -168,16 +176,13 @@ func Apply(cfg config.CloudConfig, env *Environment) error {
case "digitalocean":
interfaces, err = network.ProcessDigitalOceanNetconf(cfg.NetworkConfig)
default:
return fmt.Errorf("Unsupported network config format %q", env.NetconfType())
err = fmt.Errorf("Unsupported network config format %q", env.NetconfType())
}
if err != nil {
return err
}
if err := system.WriteNetworkdConfigs(interfaces); err != nil {
return err
}
units = append(units, createNetworkingUnits(interfaces)...)
if err := system.RestartNetwork(interfaces); err != nil {
return err
}
@@ -185,7 +190,25 @@ func Apply(cfg config.CloudConfig, env *Environment) error {
um := system.NewUnitManager(env.Root())
return processUnits(units, env.Root(), um)
}
func createNetworkingUnits(interfaces []network.InterfaceGenerator) (units []system.Unit) {
appendNewUnit := func(units []system.Unit, name, content string) []system.Unit {
if content == "" {
return units
}
return append(units, system.Unit{Unit: config.Unit{
Name: name,
Runtime: true,
Content: content,
}})
}
for _, i := range interfaces {
units = appendNewUnit(units, fmt.Sprintf("%s.netdev", i.Filename()), i.Netdev())
units = appendNewUnit(units, fmt.Sprintf("%s.link", i.Filename()), i.Link())
units = appendNewUnit(units, fmt.Sprintf("%s.network", i.Filename()), i.Network())
}
return units
}
// processUnits takes a set of Units and applies them to the given root using
@@ -194,66 +217,92 @@ func Apply(cfg config.CloudConfig, env *Environment) error {
// commands against units. It returns any error encountered.
func processUnits(units []system.Unit, root string, um system.UnitManager) error {
type action struct {
unit string
unit system.Unit
command string
}
actions := make([]action, 0, len(units))
reload := false
restartNetworkd := false
for _, unit := range units {
dst := unit.Destination(root)
if unit.Name == "" {
log.Printf("Skipping unit without name")
continue
}
if unit.Content != "" {
log.Printf("Writing unit %s to filesystem at path %s", unit.Name, dst)
if err := um.PlaceUnit(&unit, dst); err != nil {
log.Printf("Writing unit %q to filesystem", unit.Name)
if err := um.PlaceUnit(unit); err != nil {
return err
}
log.Printf("Placed unit %s at %s", unit.Name, dst)
log.Printf("Wrote unit %q", unit.Name)
reload = true
}
for _, dropin := range unit.DropIns {
if dropin.Name != "" && dropin.Content != "" {
log.Printf("Writing drop-in unit %q to filesystem", dropin.Name)
if err := um.PlaceUnitDropIn(unit, dropin); err != nil {
return err
}
log.Printf("Wrote drop-in unit %q", dropin.Name)
reload = true
}
}
if unit.Mask {
log.Printf("Masking unit file %s", unit.Name)
if err := um.MaskUnit(&unit); err != nil {
log.Printf("Masking unit file %q", unit.Name)
if err := um.MaskUnit(unit); err != nil {
return err
}
} else if unit.Runtime {
log.Printf("Ensuring runtime unit file %s is unmasked", unit.Name)
if err := um.UnmaskUnit(&unit); err != nil {
log.Printf("Ensuring runtime unit file %q is unmasked", unit.Name)
if err := um.UnmaskUnit(unit); err != nil {
return err
}
}
if unit.Enable {
if unit.Group() != "network" {
log.Printf("Enabling unit file %s", unit.Name)
if err := um.EnableUnitFile(unit.Name, unit.Runtime); err != nil {
log.Printf("Enabling unit file %q", unit.Name)
if err := um.EnableUnitFile(unit); err != nil {
return err
}
log.Printf("Enabled unit %s", unit.Name)
log.Printf("Enabled unit %q", unit.Name)
} else {
log.Printf("Skipping enable for network-like unit %s", unit.Name)
log.Printf("Skipping enable for network-like unit %q", unit.Name)
}
}
if unit.Group() == "network" {
actions = append(actions, action{"systemd-networkd.service", "restart"})
restartNetworkd = true
} else if unit.Command != "" {
actions = append(actions, action{unit.Name, unit.Command})
actions = append(actions, action{unit, unit.Command})
}
}
if reload {
if err := um.DaemonReload(); err != nil {
return errors.New(fmt.Sprintf("failed systemd daemon-reload: %v", err))
return errors.New(fmt.Sprintf("failed systemd daemon-reload: %s", err))
}
}
for _, action := range actions {
log.Printf("Calling unit command '%s %s'", action.command, action.unit)
res, err := um.RunUnitCommand(action.command, action.unit)
if restartNetworkd {
log.Printf("Restarting systemd-networkd")
networkd := system.Unit{Unit: config.Unit{Name: "systemd-networkd.service"}}
res, err := um.RunUnitCommand(networkd, "restart")
if err != nil {
return err
}
log.Printf("Result of '%s %s': %s", action.command, action.unit, res)
log.Printf("Restarted systemd-networkd (%s)", res)
}
for _, action := range actions {
log.Printf("Calling unit command %q on %q'", action.command, action.unit.Name)
res, err := um.RunUnitCommand(action.unit, action.command)
if err != nil {
return err
}
log.Printf("Result of %q on %q: %s", action.command, action.unit.Name, res)
}
return nil

View File

@@ -17,9 +17,11 @@
package initialize
import (
"reflect"
"testing"
"github.com/coreos/coreos-cloudinit/config"
"github.com/coreos/coreos-cloudinit/network"
"github.com/coreos/coreos-cloudinit/system"
)
@@ -28,103 +30,272 @@ type TestUnitManager struct {
enabled []string
masked []string
unmasked []string
commands map[string]string
commands []UnitAction
reload bool
}
func (tum *TestUnitManager) PlaceUnit(unit *system.Unit, dst string) error {
tum.placed = append(tum.placed, unit.Name)
type UnitAction struct {
unit string
command string
}
func (tum *TestUnitManager) PlaceUnit(u system.Unit) error {
tum.placed = append(tum.placed, u.Name)
return nil
}
func (tum *TestUnitManager) EnableUnitFile(unit string, runtime bool) error {
tum.enabled = append(tum.enabled, unit)
func (tum *TestUnitManager) PlaceUnitDropIn(u system.Unit, d config.UnitDropIn) error {
tum.placed = append(tum.placed, u.Name+".d/"+d.Name)
return nil
}
func (tum *TestUnitManager) RunUnitCommand(command, unit string) (string, error) {
tum.commands = make(map[string]string)
tum.commands[unit] = command
func (tum *TestUnitManager) EnableUnitFile(u system.Unit) error {
tum.enabled = append(tum.enabled, u.Name)
return nil
}
func (tum *TestUnitManager) RunUnitCommand(u system.Unit, c string) (string, error) {
tum.commands = append(tum.commands, UnitAction{u.Name, c})
return "", nil
}
func (tum *TestUnitManager) DaemonReload() error {
tum.reload = true
return nil
}
func (tum *TestUnitManager) MaskUnit(unit *system.Unit) error {
tum.masked = append(tum.masked, unit.Name)
func (tum *TestUnitManager) MaskUnit(u system.Unit) error {
tum.masked = append(tum.masked, u.Name)
return nil
}
func (tum *TestUnitManager) UnmaskUnit(unit *system.Unit) error {
tum.unmasked = append(tum.unmasked, unit.Name)
func (tum *TestUnitManager) UnmaskUnit(u system.Unit) error {
tum.unmasked = append(tum.unmasked, u.Name)
return nil
}
type mockInterface struct {
name string
filename string
netdev string
link string
network string
kind string
modprobeParams string
}
func (i mockInterface) Name() string {
return i.name
}
func (i mockInterface) Filename() string {
return i.filename
}
func (i mockInterface) Netdev() string {
return i.netdev
}
func (i mockInterface) Link() string {
return i.link
}
func (i mockInterface) Network() string {
return i.network
}
func (i mockInterface) Type() string {
return i.kind
}
func (i mockInterface) ModprobeParams() string {
return i.modprobeParams
}
func TestCreateNetworkingUnits(t *testing.T) {
for _, tt := range []struct {
interfaces []network.InterfaceGenerator
expect []system.Unit
}{
{nil, nil},
{
[]network.InterfaceGenerator{
network.InterfaceGenerator(mockInterface{filename: "test"}),
},
nil,
},
{
[]network.InterfaceGenerator{
network.InterfaceGenerator(mockInterface{filename: "test1", netdev: "test netdev"}),
network.InterfaceGenerator(mockInterface{filename: "test2", link: "test link"}),
network.InterfaceGenerator(mockInterface{filename: "test3", network: "test network"}),
},
[]system.Unit{
system.Unit{Unit: config.Unit{Name: "test1.netdev", Runtime: true, Content: "test netdev"}},
system.Unit{Unit: config.Unit{Name: "test2.link", Runtime: true, Content: "test link"}},
system.Unit{Unit: config.Unit{Name: "test3.network", Runtime: true, Content: "test network"}},
},
},
{
[]network.InterfaceGenerator{
network.InterfaceGenerator(mockInterface{filename: "test", netdev: "test netdev", link: "test link", network: "test network"}),
},
[]system.Unit{
system.Unit{Unit: config.Unit{Name: "test.netdev", Runtime: true, Content: "test netdev"}},
system.Unit{Unit: config.Unit{Name: "test.link", Runtime: true, Content: "test link"}},
system.Unit{Unit: config.Unit{Name: "test.network", Runtime: true, Content: "test network"}},
},
},
} {
units := createNetworkingUnits(tt.interfaces)
if !reflect.DeepEqual(tt.expect, units) {
t.Errorf("bad units (%+v): want %#v, got %#v", tt.interfaces, tt.expect, units)
}
}
}
func TestProcessUnits(t *testing.T) {
tum := &TestUnitManager{}
units := []system.Unit{
system.Unit{Unit: config.Unit{
Name: "foo",
Mask: true,
}},
}
if err := processUnits(units, "", tum); err != nil {
t.Fatalf("unexpected error calling processUnits: %v", err)
}
if len(tum.masked) != 1 || tum.masked[0] != "foo" {
t.Errorf("expected foo to be masked, but found %v", tum.masked)
tests := []struct {
units []system.Unit
result TestUnitManager
}{
{
units: []system.Unit{
system.Unit{Unit: config.Unit{
Name: "foo",
Mask: true,
}},
},
result: TestUnitManager{
masked: []string{"foo"},
},
},
{
units: []system.Unit{
system.Unit{Unit: config.Unit{
Name: "baz.service",
Content: "[Service]\nExecStart=/bin/baz",
Command: "start",
}},
system.Unit{Unit: config.Unit{
Name: "foo.network",
Content: "[Network]\nFoo=true",
}},
system.Unit{Unit: config.Unit{
Name: "bar.network",
Content: "[Network]\nBar=true",
}},
},
result: TestUnitManager{
placed: []string{"baz.service", "foo.network", "bar.network"},
commands: []UnitAction{
UnitAction{"systemd-networkd.service", "restart"},
UnitAction{"baz.service", "start"},
},
reload: true,
},
},
{
units: []system.Unit{
system.Unit{Unit: config.Unit{
Name: "baz.service",
Content: "[Service]\nExecStart=/bin/true",
}},
},
result: TestUnitManager{
placed: []string{"baz.service"},
reload: true,
},
},
{
units: []system.Unit{
system.Unit{Unit: config.Unit{
Name: "locksmithd.service",
Runtime: true,
}},
},
result: TestUnitManager{
unmasked: []string{"locksmithd.service"},
},
},
{
units: []system.Unit{
system.Unit{Unit: config.Unit{
Name: "woof",
Enable: true,
}},
},
result: TestUnitManager{
enabled: []string{"woof"},
},
},
{
units: []system.Unit{
system.Unit{Unit: config.Unit{
Name: "hi.service",
Runtime: true,
Content: "[Service]\nExecStart=/bin/echo hi",
DropIns: []config.UnitDropIn{
{
Name: "lo.conf",
Content: "[Service]\nExecStart=/bin/echo lo",
},
{
Name: "bye.conf",
Content: "[Service]\nExecStart=/bin/echo bye",
},
},
}},
},
result: TestUnitManager{
placed: []string{"hi.service", "hi.service.d/lo.conf", "hi.service.d/bye.conf"},
unmasked: []string{"hi.service"},
reload: true,
},
},
{
units: []system.Unit{
system.Unit{Unit: config.Unit{
DropIns: []config.UnitDropIn{
{
Name: "lo.conf",
Content: "[Service]\nExecStart=/bin/echo lo",
},
},
}},
},
result: TestUnitManager{},
},
{
units: []system.Unit{
system.Unit{Unit: config.Unit{
Name: "hi.service",
DropIns: []config.UnitDropIn{
{
Content: "[Service]\nExecStart=/bin/echo lo",
},
},
}},
},
result: TestUnitManager{},
},
{
units: []system.Unit{
system.Unit{Unit: config.Unit{
Name: "hi.service",
DropIns: []config.UnitDropIn{
{
Name: "lo.conf",
},
},
}},
},
result: TestUnitManager{},
},
}
tum = &TestUnitManager{}
units = []system.Unit{
system.Unit{Unit: config.Unit{
Name: "bar.network",
}},
}
if err := processUnits(units, "", tum); err != nil {
t.Fatalf("unexpected error calling processUnits: %v", err)
}
if _, ok := tum.commands["systemd-networkd.service"]; !ok {
t.Errorf("expected systemd-networkd.service to be reloaded!")
}
tum = &TestUnitManager{}
units = []system.Unit{
system.Unit{Unit: config.Unit{
Name: "baz.service",
Content: "[Service]\nExecStart=/bin/true",
}},
}
if err := processUnits(units, "", tum); err != nil {
t.Fatalf("unexpected error calling processUnits: %v", err)
}
if len(tum.placed) != 1 || tum.placed[0] != "baz.service" {
t.Fatalf("expected baz.service to be written, but got %v", tum.placed)
}
tum = &TestUnitManager{}
units = []system.Unit{
system.Unit{Unit: config.Unit{
Name: "locksmithd.service",
Runtime: true,
}},
}
if err := processUnits(units, "", tum); err != nil {
t.Fatalf("unexpected error calling processUnits: %v", err)
}
if len(tum.unmasked) != 1 || tum.unmasked[0] != "locksmithd.service" {
t.Fatalf("expected locksmithd.service to be unmasked, but got %v", tum.unmasked)
}
tum = &TestUnitManager{}
units = []system.Unit{
system.Unit{Unit: config.Unit{
Name: "woof",
Enable: true,
}},
}
if err := processUnits(units, "", tum); err != nil {
t.Fatalf("unexpected error calling processUnits: %v", err)
}
if len(tum.enabled) != 1 || tum.enabled[0] != "woof" {
t.Fatalf("expected woof to be enabled, but got %v", tum.enabled)
for _, tt := range tests {
tum := &TestUnitManager{}
if err := processUnits(tt.units, "", tum); err != nil {
t.Errorf("bad error (%+v): want nil, got %s", tt.units, err)
}
if !reflect.DeepEqual(tt.result, *tum) {
t.Errorf("bad result (%+v): want %+v, got %+v", tt.units, tt.result, tum)
}
}
}

View File

@@ -19,24 +19,36 @@ package system
import (
"fmt"
"reflect"
"github.com/coreos/coreos-cloudinit/config"
)
// dropinContents generates the contents for a drop-in unit given the config.
// serviceContents generates the contents for a drop-in unit given the config.
// The argument must be a struct from the 'config' package.
func dropinContents(e interface{}) string {
func serviceContents(e interface{}) string {
vars := getEnvVars(e)
if len(vars) == 0 {
return ""
}
out := "[Service]\n"
for _, v := range vars {
out += fmt.Sprintf("Environment=\"%s\"\n", v)
}
return out
}
func getEnvVars(e interface{}) []string {
et := reflect.TypeOf(e)
ev := reflect.ValueOf(e)
var out string
vars := []string{}
for i := 0; i < et.NumField(); i++ {
if val := ev.Field(i).String(); val != "" {
if val := ev.Field(i).Interface(); !config.IsZero(val) {
key := et.Field(i).Tag.Get("env")
out += fmt.Sprintf("Environment=\"%s=%s\"\n", key, val)
vars = append(vars, fmt.Sprintf("%s=%v", key, val))
}
}
if out == "" {
return ""
}
return "[Service]\n" + out
return vars
}

55
system/env_test.go Normal file
View File

@@ -0,0 +1,55 @@
package system
import (
"testing"
)
func TestServiceContents(t *testing.T) {
tests := []struct {
Config interface{}
Contents string
}{
{
struct{}{},
"",
},
{
struct {
A string `env:"A"`
B int `env:"B"`
C bool `env:"C"`
D float64 `env:"D"`
}{
"hi", 1, true, 0.12345,
},
`[Service]
Environment="A=hi"
Environment="B=1"
Environment="C=true"
Environment="D=0.12345"
`,
},
{
struct {
A float64 `env:"A"`
B float64 `env:"B"`
C float64 `env:"C"`
D float64 `env:"D"`
}{
0.000001, 1, 0.9999999, 0.1,
},
`[Service]
Environment="A=1e-06"
Environment="B=1"
Environment="C=0.9999999"
Environment="D=0.1"
`,
},
}
for _, tt := range tests {
if c := serviceContents(tt.Config); c != tt.Contents {
t.Errorf("bad contents (%+v): want %q, got %q", tt, tt.Contents, c)
}
}
}

View File

@@ -28,14 +28,12 @@ type Etcd struct {
// Units creates a Unit file drop-in for etcd, using any configured options.
func (ee Etcd) Units() []Unit {
content := dropinContents(ee.Etcd)
if content == "" {
return nil
}
return []Unit{{config.Unit{
Name: "etcd.service",
Runtime: true,
DropIn: true,
Content: content,
DropIns: []config.UnitDropIn{{
Name: "20-cloudinit.conf",
Content: serviceContents(ee.Etcd),
}},
}}}
}

View File

@@ -30,7 +30,11 @@ func TestEtcdUnits(t *testing.T) {
}{
{
config.Etcd{},
nil,
[]Unit{{config.Unit{
Name: "etcd.service",
Runtime: true,
DropIns: []config.UnitDropIn{{Name: "20-cloudinit.conf"}},
}}},
},
{
config.Etcd{
@@ -40,11 +44,13 @@ func TestEtcdUnits(t *testing.T) {
[]Unit{{config.Unit{
Name: "etcd.service",
Runtime: true,
DropIn: true,
Content: `[Service]
DropIns: []config.UnitDropIn{{
Name: "20-cloudinit.conf",
Content: `[Service]
Environment="ETCD_DISCOVERY=http://disco.example.com/foobar"
Environment="ETCD_PEER_BIND_ADDR=127.0.0.1:7002"
`,
}},
}}},
},
{
@@ -56,18 +62,20 @@ Environment="ETCD_PEER_BIND_ADDR=127.0.0.1:7002"
[]Unit{{config.Unit{
Name: "etcd.service",
Runtime: true,
DropIn: true,
Content: `[Service]
DropIns: []config.UnitDropIn{{
Name: "20-cloudinit.conf",
Content: `[Service]
Environment="ETCD_DISCOVERY=http://disco.example.com/foobar"
Environment="ETCD_NAME=node001"
Environment="ETCD_PEER_BIND_ADDR=127.0.0.1:7002"
`,
}},
}}},
},
} {
units := Etcd{tt.config}.Units()
if !reflect.DeepEqual(tt.units, units) {
t.Errorf("bad units (%q): want %#v, got %#v", tt.config, tt.units, units)
t.Errorf("bad units (%+v): want %#v, got %#v", tt.config, tt.units, units)
}
}
}

View File

@@ -17,9 +17,9 @@
package system
import (
"errors"
"fmt"
"io/ioutil"
"log"
"os"
"os/exec"
"path"
@@ -39,21 +39,24 @@ func (f *File) Permissions() (os.FileMode, error) {
return os.FileMode(0644), nil
}
// Parse string representation of file mode as octal
// Parse string representation of file mode as integer
perm, err := strconv.ParseInt(f.RawFilePermissions, 8, 32)
if err != nil {
return 0, errors.New("Unable to parse file permissions as octal integer")
return 0, fmt.Errorf("Unable to parse file permissions %q as integer", f.RawFilePermissions)
}
return os.FileMode(perm), nil
}
func WriteFile(f *File, root string) (string, error) {
if f.Encoding != "" {
return "", fmt.Errorf("Unable to write file with encoding %s", f.Encoding)
}
fullpath := path.Join(root, f.Path)
dir := path.Dir(fullpath)
log.Printf("Writing file to %q", fullpath)
content, err := config.DecodeContent(f.Content, f.Encoding)
if err != nil {
return "", fmt.Errorf("Unable to decode %s (%v)", f.Path, err)
}
if err := EnsureDirectoryExists(dir); err != nil {
return "", err
@@ -70,7 +73,7 @@ func WriteFile(f *File, root string) (string, error) {
return "", err
}
if err := ioutil.WriteFile(tmp.Name(), []byte(f.Content), perm); err != nil {
if err := ioutil.WriteFile(tmp.Name(), content, perm); err != nil {
return "", err
}
@@ -95,6 +98,7 @@ func WriteFile(f *File, root string) (string, error) {
return "", err
}
log.Printf("Wrote file to %q", fullpath)
return fullpath, nil
}

View File

@@ -85,6 +85,38 @@ func TestWriteFileInvalidPermission(t *testing.T) {
}
}
func TestDecimalFilePermissions(t *testing.T) {
dir, err := ioutil.TempDir(os.TempDir(), "coreos-cloudinit-")
if err != nil {
t.Fatalf("Unable to create tempdir: %v", err)
}
defer os.RemoveAll(dir)
fn := "foo"
fullPath := path.Join(dir, fn)
wf := File{config.File{
Path: fn,
RawFilePermissions: "744",
}}
path, err := WriteFile(&wf, dir)
if err != nil {
t.Fatalf("Processing of WriteFile failed: %v", err)
} else if path != fullPath {
t.Fatalf("WriteFile returned bad path: want %s, got %s", fullPath, path)
}
fi, err := os.Stat(fullPath)
if err != nil {
t.Fatalf("Unable to stat file: %v", err)
}
if fi.Mode() != os.FileMode(0744) {
t.Errorf("File has incorrect mode: %v", fi.Mode())
}
}
func TestWriteFilePermissions(t *testing.T) {
dir, err := ioutil.TempDir(os.TempDir(), "coreos-cloudinit-")
if err != nil {
@@ -124,10 +156,97 @@ func TestWriteFileEncodedContent(t *testing.T) {
}
defer os.RemoveAll(dir)
//all of these decode to "bar"
content_tests := map[string]string{
"base64": "YmFy",
"b64": "YmFy",
"gz": "\x1f\x8b\x08\x08w\x14\x87T\x02\xffok\x00KJ,\x02\x00\xaa\x8c\xffv\x03\x00\x00\x00",
"gzip": "\x1f\x8b\x08\x08w\x14\x87T\x02\xffok\x00KJ,\x02\x00\xaa\x8c\xffv\x03\x00\x00\x00",
"gz+base64": "H4sIABMVh1QAA0tKLAIAqoz/dgMAAAA=",
"gzip+base64": "H4sIABMVh1QAA0tKLAIAqoz/dgMAAAA=",
"gz+b64": "H4sIABMVh1QAA0tKLAIAqoz/dgMAAAA=",
"gzip+b64": "H4sIABMVh1QAA0tKLAIAqoz/dgMAAAA=",
}
for encoding, content := range content_tests {
fullPath := path.Join(dir, encoding)
wf := File{config.File{
Path: encoding,
Encoding: encoding,
Content: content,
RawFilePermissions: "0644",
}}
path, err := WriteFile(&wf, dir)
if err != nil {
t.Fatalf("Processing of WriteFile failed: %v", err)
} else if path != fullPath {
t.Fatalf("WriteFile returned bad path: want %s, got %s", fullPath, path)
}
fi, err := os.Stat(fullPath)
if err != nil {
t.Fatalf("Unable to stat file: %v", err)
}
if fi.Mode() != os.FileMode(0644) {
t.Errorf("File has incorrect mode: %v", fi.Mode())
}
contents, err := ioutil.ReadFile(fullPath)
if err != nil {
t.Fatalf("Unable to read expected file: %v", err)
}
if string(contents) != "bar" {
t.Fatalf("File has incorrect contents: '%s'", contents)
}
}
}
func TestWriteFileInvalidEncodedContent(t *testing.T) {
dir, err := ioutil.TempDir(os.TempDir(), "coreos-cloudinit-")
if err != nil {
t.Fatalf("Unable to create tempdir: %v", err)
}
defer os.RemoveAll(dir)
content_encodings := []string{
"base64",
"b64",
"gz",
"gzip",
"gz+base64",
"gzip+base64",
"gz+b64",
"gzip+b64",
}
for _, encoding := range content_encodings {
wf := File{config.File{
Path: path.Join(dir, "tmp", "foo"),
Content: "@&*#%invalid data*@&^#*&",
Encoding: encoding,
}}
if _, err := WriteFile(&wf, dir); err == nil {
t.Fatalf("Expected error to be raised when writing file with encoding")
}
}
}
func TestWriteFileUnknownEncodedContent(t *testing.T) {
dir, err := ioutil.TempDir(os.TempDir(), "coreos-cloudinit-")
if err != nil {
t.Fatalf("Unable to create tempdir: %v", err)
}
defer os.RemoveAll(dir)
wf := File{config.File{
Path: path.Join(dir, "tmp", "foo"),
Content: "",
Encoding: "base64",
Encoding: "no-such-encoding",
}}
if _, err := WriteFile(&wf, dir); err == nil {

30
system/flannel.go Normal file
View File

@@ -0,0 +1,30 @@
package system
import (
"path"
"strings"
"github.com/coreos/coreos-cloudinit/config"
)
// flannel is a top-level structure which embeds its underlying configuration,
// config.Flannel, and provides the system-specific Unit().
type Flannel struct {
config.Flannel
}
func (fl Flannel) envVars() string {
return strings.Join(getEnvVars(fl.Flannel), "\n")
}
func (fl Flannel) File() (*File, error) {
vars := fl.envVars()
if vars == "" {
return nil, nil
}
return &File{config.File{
Path: path.Join("run", "flannel", "options.env"),
RawFilePermissions: "0644",
Content: vars,
}}, nil
}

62
system/flannel_test.go Normal file
View File

@@ -0,0 +1,62 @@
package system
import (
"reflect"
"testing"
"github.com/coreos/coreos-cloudinit/config"
)
func TestFlannelEnvVars(t *testing.T) {
for _, tt := range []struct {
config config.Flannel
contents string
}{
{
config.Flannel{},
"",
},
{
config.Flannel{
EtcdEndpoints: "http://12.34.56.78:4001",
EtcdPrefix: "/coreos.com/network/tenant1",
},
`FLANNELD_ETCD_ENDPOINTS=http://12.34.56.78:4001
FLANNELD_ETCD_PREFIX=/coreos.com/network/tenant1`,
},
} {
out := Flannel{tt.config}.envVars()
if out != tt.contents {
t.Errorf("bad contents (%+v): want %q, got %q", tt, tt.contents, out)
}
}
}
func TestFlannelFile(t *testing.T) {
for _, tt := range []struct {
config config.Flannel
file *File
}{
{
config.Flannel{},
nil,
},
{
config.Flannel{
EtcdEndpoints: "http://12.34.56.78:4001",
EtcdPrefix: "/coreos.com/network/tenant1",
},
&File{config.File{
Path: "run/flannel/options.env",
RawFilePermissions: "0644",
Content: `FLANNELD_ETCD_ENDPOINTS=http://12.34.56.78:4001
FLANNELD_ETCD_PREFIX=/coreos.com/network/tenant1`,
}},
},
} {
file, _ := Flannel{tt.config}.File()
if !reflect.DeepEqual(tt.file, file) {
t.Errorf("bad units (%q): want %#v, got %#v", tt.config, tt.file, file)
}
}
}

View File

@@ -29,14 +29,12 @@ type Fleet struct {
// Units generates a Unit file drop-in for fleet, if any fleet options were
// configured in cloud-config
func (fe Fleet) Units() []Unit {
content := dropinContents(fe.Fleet)
if content == "" {
return nil
}
return []Unit{{config.Unit{
Name: "fleet.service",
Runtime: true,
DropIn: true,
Content: content,
DropIns: []config.UnitDropIn{{
Name: "20-cloudinit.conf",
Content: serviceContents(fe.Fleet),
}},
}}}
}

View File

@@ -30,25 +30,31 @@ func TestFleetUnits(t *testing.T) {
}{
{
config.Fleet{},
nil,
[]Unit{{config.Unit{
Name: "fleet.service",
Runtime: true,
DropIns: []config.UnitDropIn{{Name: "20-cloudinit.conf"}},
}}},
},
{
config.Fleet{
PublicIP: "12.34.56.78",
},
[]Unit{{config.Unit{
Name: "fleet.service",
Content: `[Service]
Name: "fleet.service",
Runtime: true,
DropIns: []config.UnitDropIn{{
Name: "20-cloudinit.conf",
Content: `[Service]
Environment="FLEET_PUBLIC_IP=12.34.56.78"
`,
Runtime: true,
DropIn: true,
}},
}}},
},
} {
units := Fleet{tt.config}.Units()
if !reflect.DeepEqual(units, tt.units) {
t.Errorf("bad units (%q): want %#v, got %#v", tt.config, tt.units, units)
t.Errorf("bad units (%+v): want %#v, got %#v", tt.config, tt.units, units)
}
}
}

39
system/locksmith.go Normal file
View File

@@ -0,0 +1,39 @@
/*
Copyright 2014 CoreOS, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package system
import (
"github.com/coreos/coreos-cloudinit/config"
)
// Locksmith is a top-level structure which embeds its underlying configuration,
// config.Locksmith, and provides the system-specific Unit().
type Locksmith struct {
config.Locksmith
}
// Units creates a Unit file drop-in for etcd, using any configured options.
func (ee Locksmith) Units() []Unit {
return []Unit{{config.Unit{
Name: "locksmithd.service",
Runtime: true,
DropIns: []config.UnitDropIn{{
Name: "20-cloudinit.conf",
Content: serviceContents(ee.Locksmith),
}},
}}}
}

60
system/locksmith_test.go Normal file
View File

@@ -0,0 +1,60 @@
/*
Copyright 2014 CoreOS, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package system
import (
"reflect"
"testing"
"github.com/coreos/coreos-cloudinit/config"
)
func TestLocksmithUnits(t *testing.T) {
for _, tt := range []struct {
config config.Locksmith
units []Unit
}{
{
config.Locksmith{},
[]Unit{{config.Unit{
Name: "locksmithd.service",
Runtime: true,
DropIns: []config.UnitDropIn{{Name: "20-cloudinit.conf"}},
}}},
},
{
config.Locksmith{
Endpoint: "12.34.56.78:4001",
},
[]Unit{{config.Unit{
Name: "locksmithd.service",
Runtime: true,
DropIns: []config.UnitDropIn{{
Name: "20-cloudinit.conf",
Content: `[Service]
Environment="LOCKSMITHD_ENDPOINT=12.34.56.78:4001"
`,
}},
}}},
},
} {
units := Locksmith{tt.config}.Units()
if !reflect.DeepEqual(units, tt.units) {
t.Errorf("bad units (%+v): want %#v, got %#v", tt.config, tt.units, units)
}
}
}

View File

@@ -29,10 +29,6 @@ import (
"github.com/coreos/coreos-cloudinit/Godeps/_workspace/src/github.com/dotcloud/docker/pkg/netlink"
)
const (
runtimeNetworkPath = "/run/systemd/network"
)
func RestartNetwork(interfaces []network.InterfaceGenerator) (err error) {
defer func() {
if e := restartNetworkd(); e != nil {
@@ -96,33 +92,7 @@ func maybeProbeBonding(interfaces []network.InterfaceGenerator) error {
func restartNetworkd() error {
log.Printf("Restarting networkd.service\n")
_, err := NewUnitManager("").RunUnitCommand("restart", "systemd-networkd.service")
return err
}
func WriteNetworkdConfigs(interfaces []network.InterfaceGenerator) error {
for _, iface := range interfaces {
filename := fmt.Sprintf("%s.netdev", iface.Filename())
if err := writeConfig(filename, iface.Netdev()); err != nil {
return err
}
filename = fmt.Sprintf("%s.link", iface.Filename())
if err := writeConfig(filename, iface.Link()); err != nil {
return err
}
filename = fmt.Sprintf("%s.network", iface.Filename())
if err := writeConfig(filename, iface.Network()); err != nil {
return err
}
}
return nil
}
func writeConfig(filename string, content string) error {
if content == "" {
return nil
}
log.Printf("Writing networkd unit %q\n", filename)
_, err := WriteFile(&File{config.File{Content: content, Path: filename}}, runtimeNetworkPath)
networkd := Unit{config.Unit{Name: "systemd-networkd.service"}}
_, err := NewUnitManager("").RunUnitCommand(networkd, "restart")
return err
}

View File

@@ -23,7 +23,6 @@ import (
"os"
"os/exec"
"path"
"path/filepath"
"strings"
"github.com/coreos/coreos-cloudinit/Godeps/_workspace/src/github.com/coreos/go-systemd/dbus"
@@ -42,49 +41,51 @@ type systemd struct {
// never be used as a true MachineID
const fakeMachineID = "42000000000000000000000000000042"
// PlaceUnit writes a unit file at the provided destination, creating
// parent directories as necessary.
func (s *systemd) PlaceUnit(u *Unit, dst string) error {
dir := filepath.Dir(dst)
if _, err := os.Stat(dir); os.IsNotExist(err) {
if err := os.MkdirAll(dir, os.FileMode(0755)); err != nil {
return err
}
}
// PlaceUnit writes a unit file at its desired destination, creating parent
// directories as necessary.
func (s *systemd) PlaceUnit(u Unit) error {
file := File{config.File{
Path: filepath.Base(dst),
Path: u.Destination(s.root),
Content: u.Content,
RawFilePermissions: "0644",
}}
_, err := WriteFile(&file, dir)
if err != nil {
return err
}
return nil
_, err := WriteFile(&file, "/")
return err
}
func (s *systemd) EnableUnitFile(unit string, runtime bool) error {
// PlaceUnitDropIn writes a unit drop-in file at its desired destination,
// creating parent directories as necessary.
func (s *systemd) PlaceUnitDropIn(u Unit, d config.UnitDropIn) error {
file := File{config.File{
Path: u.DropInDestination(s.root, d),
Content: d.Content,
RawFilePermissions: "0644",
}}
_, err := WriteFile(&file, "/")
return err
}
func (s *systemd) EnableUnitFile(u Unit) error {
conn, err := dbus.New()
if err != nil {
return err
}
units := []string{unit}
_, _, err = conn.EnableUnitFiles(units, runtime, true)
units := []string{u.Name}
_, _, err = conn.EnableUnitFiles(units, u.Runtime, true)
return err
}
func (s *systemd) RunUnitCommand(command, unit string) (string, error) {
func (s *systemd) RunUnitCommand(u Unit, c string) (string, error) {
conn, err := dbus.New()
if err != nil {
return "", err
}
var fn func(string, string) (string, error)
switch command {
switch c {
case "start":
fn = conn.StartUnit
case "stop":
@@ -100,10 +101,10 @@ func (s *systemd) RunUnitCommand(command, unit string) (string, error) {
case "reload-or-try-restart":
fn = conn.ReloadOrTryRestartUnit
default:
return "", fmt.Errorf("Unsupported systemd command %q", command)
return "", fmt.Errorf("Unsupported systemd command %q", c)
}
return fn(unit, "replace")
return fn(u.Name, "replace")
}
func (s *systemd) DaemonReload() error {
@@ -119,8 +120,8 @@ func (s *systemd) DaemonReload() error {
// /dev/null, analogous to `systemctl mask`.
// N.B.: Unlike `systemctl mask`, this function will *remove any existing unit
// file at the location*, to ensure that the mask will succeed.
func (s *systemd) MaskUnit(unit *Unit) error {
masked := unit.Destination(s.root)
func (s *systemd) MaskUnit(u Unit) error {
masked := u.Destination(s.root)
if _, err := os.Stat(masked); os.IsNotExist(err) {
if err := os.MkdirAll(path.Dir(masked), os.FileMode(0755)); err != nil {
return err
@@ -134,8 +135,8 @@ func (s *systemd) MaskUnit(unit *Unit) error {
// UnmaskUnit is analogous to systemd's unit_file_unmask. If the file
// associated with the given Unit is empty or appears to be a symlink to
// /dev/null, it is removed.
func (s *systemd) UnmaskUnit(unit *Unit) error {
masked := unit.Destination(s.root)
func (s *systemd) UnmaskUnit(u Unit) error {
masked := u.Destination(s.root)
ne, err := nullOrEmpty(masked)
if os.IsNotExist(err) {
return nil

View File

@@ -17,6 +17,7 @@
package system
import (
"fmt"
"io/ioutil"
"os"
"path"
@@ -25,133 +26,109 @@ import (
"github.com/coreos/coreos-cloudinit/config"
)
func TestPlaceNetworkUnit(t *testing.T) {
u := Unit{config.Unit{
Name: "50-eth0.network",
Runtime: true,
Content: `[Match]
Name=eth47
[Network]
Address=10.209.171.177/19
`,
}}
dir, err := ioutil.TempDir(os.TempDir(), "coreos-cloudinit-")
if err != nil {
t.Fatalf("Unable to create tempdir: %v", err)
}
defer os.RemoveAll(dir)
sd := &systemd{dir}
dst := u.Destination(dir)
expectDst := path.Join(dir, "run", "systemd", "network", "50-eth0.network")
if dst != expectDst {
t.Fatalf("unit.Destination returned %s, expected %s", dst, expectDst)
func TestPlaceUnit(t *testing.T) {
tests := []config.Unit{
{
Name: "50-eth0.network",
Runtime: true,
Content: "[Match]\nName=eth47\n\n[Network]\nAddress=10.209.171.177/19\n",
},
{
Name: "media-state.mount",
Content: "[Mount]\nWhat=/dev/sdb1\nWhere=/media/state\n",
},
}
if err := sd.PlaceUnit(&u, dst); err != nil {
t.Fatalf("PlaceUnit failed: %v", err)
}
for _, tt := range tests {
dir, err := ioutil.TempDir(os.TempDir(), "coreos-cloudinit-")
if err != nil {
panic(fmt.Sprintf("Unable to create tempdir: %v", err))
}
fi, err := os.Stat(dst)
if err != nil {
t.Fatalf("Unable to stat file: %v", err)
}
u := Unit{tt}
sd := &systemd{dir}
if fi.Mode() != os.FileMode(0644) {
t.Errorf("File has incorrect mode: %v", fi.Mode())
}
if err := sd.PlaceUnit(u); err != nil {
t.Fatalf("PlaceUnit(): bad error (%+v): want nil, got %s", tt, err)
}
contents, err := ioutil.ReadFile(dst)
if err != nil {
t.Fatalf("Unable to read expected file: %v", err)
}
fi, err := os.Stat(u.Destination(dir))
if err != nil {
t.Fatalf("Stat(): bad error (%+v): want nil, got %s", tt, err)
}
expectContents := `[Match]
Name=eth47
if mode := fi.Mode(); mode != os.FileMode(0644) {
t.Errorf("bad filemode (%+v): want %v, got %v", tt, os.FileMode(0644), mode)
}
[Network]
Address=10.209.171.177/19
`
if string(contents) != expectContents {
t.Fatalf("File has incorrect contents '%s'.\nExpected '%s'", string(contents), expectContents)
c, err := ioutil.ReadFile(u.Destination(dir))
if err != nil {
t.Fatalf("ReadFile(): bad error (%+v): want nil, got %s", tt, err)
}
if string(c) != tt.Content {
t.Errorf("bad contents (%+v): want %q, got %q", tt, tt.Content, string(c))
}
os.RemoveAll(dir)
}
}
func TestUnitDestination(t *testing.T) {
dir := "/some/dir"
name := "foobar.service"
u := Unit{config.Unit{
Name: name,
DropIn: false,
}}
dst := u.Destination(dir)
expectDst := path.Join(dir, "etc", "systemd", "system", "foobar.service")
if dst != expectDst {
t.Errorf("unit.Destination returned %s, expected %s", dst, expectDst)
func TestPlaceUnitDropIn(t *testing.T) {
tests := []config.Unit{
{
Name: "false.service",
Runtime: true,
DropIns: []config.UnitDropIn{
{
Name: "00-true.conf",
Content: "[Service]\nExecStart=\nExecStart=/usr/bin/true\n",
},
},
},
{
Name: "true.service",
DropIns: []config.UnitDropIn{
{
Name: "00-false.conf",
Content: "[Service]\nExecStart=\nExecStart=/usr/bin/false\n",
},
},
},
}
u.DropIn = true
for _, tt := range tests {
dir, err := ioutil.TempDir(os.TempDir(), "coreos-cloudinit-")
if err != nil {
panic(fmt.Sprintf("Unable to create tempdir: %v", err))
}
dst = u.Destination(dir)
expectDst = path.Join(dir, "etc", "systemd", "system", "foobar.service.d", cloudConfigDropIn)
if dst != expectDst {
t.Errorf("unit.Destination returned %s, expected %s", dst, expectDst)
}
}
u := Unit{tt}
sd := &systemd{dir}
func TestPlaceMountUnit(t *testing.T) {
u := Unit{config.Unit{
Name: "media-state.mount",
Runtime: false,
Content: `[Mount]
What=/dev/sdb1
Where=/media/state
`,
}}
if err := sd.PlaceUnitDropIn(u, u.DropIns[0]); err != nil {
t.Fatalf("PlaceUnit(): bad error (%+v): want nil, got %s", tt, err)
}
dir, err := ioutil.TempDir(os.TempDir(), "coreos-cloudinit-")
if err != nil {
t.Fatalf("Unable to create tempdir: %v", err)
}
defer os.RemoveAll(dir)
fi, err := os.Stat(u.DropInDestination(dir, u.DropIns[0]))
if err != nil {
t.Fatalf("Stat(): bad error (%+v): want nil, got %s", tt, err)
}
sd := &systemd{dir}
if mode := fi.Mode(); mode != os.FileMode(0644) {
t.Errorf("bad filemode (%+v): want %v, got %v", tt, os.FileMode(0644), mode)
}
dst := u.Destination(dir)
expectDst := path.Join(dir, "etc", "systemd", "system", "media-state.mount")
if dst != expectDst {
t.Fatalf("unit.Destination returned %s, expected %s", dst, expectDst)
}
c, err := ioutil.ReadFile(u.DropInDestination(dir, u.DropIns[0]))
if err != nil {
t.Fatalf("ReadFile(): bad error (%+v): want nil, got %s", tt, err)
}
if err := sd.PlaceUnit(&u, dst); err != nil {
t.Fatalf("PlaceUnit failed: %v", err)
}
if string(c) != u.DropIns[0].Content {
t.Errorf("bad contents (%+v): want %q, got %q", tt, u.DropIns[0].Content, string(c))
}
fi, err := os.Stat(dst)
if err != nil {
t.Fatalf("Unable to stat file: %v", err)
}
if fi.Mode() != os.FileMode(0644) {
t.Errorf("File has incorrect mode: %v", fi.Mode())
}
contents, err := ioutil.ReadFile(dst)
if err != nil {
t.Fatalf("Unable to read expected file: %v", err)
}
expectContents := `[Mount]
What=/dev/sdb1
Where=/media/state
`
if string(contents) != expectContents {
t.Fatalf("File has incorrect contents '%s'.\nExpected '%s'", string(contents), expectContents)
os.RemoveAll(dir)
}
}
@@ -180,7 +157,7 @@ func TestMaskUnit(t *testing.T) {
sd := &systemd{dir}
// Ensure mask works with units that do not currently exist
uf := &Unit{config.Unit{Name: "foo.service"}}
uf := Unit{config.Unit{Name: "foo.service"}}
if err := sd.MaskUnit(uf); err != nil {
t.Fatalf("Unable to mask new unit: %v", err)
}
@@ -194,7 +171,7 @@ func TestMaskUnit(t *testing.T) {
}
// Ensure mask works with unit files that already exist
ub := &Unit{config.Unit{Name: "bar.service"}}
ub := Unit{config.Unit{Name: "bar.service"}}
barPath := path.Join(dir, "etc", "systemd", "system", "bar.service")
if _, err := os.Create(barPath); err != nil {
t.Fatalf("Error creating new unit file: %v", err)
@@ -220,12 +197,12 @@ func TestUnmaskUnit(t *testing.T) {
sd := &systemd{dir}
nilUnit := &Unit{config.Unit{Name: "null.service"}}
nilUnit := Unit{config.Unit{Name: "null.service"}}
if err := sd.UnmaskUnit(nilUnit); err != nil {
t.Errorf("unexpected error from unmasking nonexistent unit: %v", err)
}
uf := &Unit{config.Unit{Name: "foo.service", Content: "[Service]\nExecStart=/bin/true"}}
uf := Unit{config.Unit{Name: "foo.service", Content: "[Service]\nExecStart=/bin/true"}}
dst := uf.Destination(dir)
if err := os.MkdirAll(path.Dir(dst), os.FileMode(0755)); err != nil {
t.Fatalf("Unable to create unit directory: %v", err)
@@ -245,7 +222,7 @@ func TestUnmaskUnit(t *testing.T) {
t.Errorf("unmask of non-empty unit mutated unit contents unexpectedly")
}
ub := &Unit{config.Unit{Name: "bar.service"}}
ub := Unit{config.Unit{Name: "bar.service"}}
dst = ub.Destination(dir)
if err := os.Symlink("/dev/null", dst); err != nil {
t.Fatalf("Unable to create masked unit: %v", err)

View File

@@ -19,40 +19,66 @@ package system
import (
"fmt"
"path"
"path/filepath"
"strings"
"github.com/coreos/coreos-cloudinit/config"
)
// Name for drop-in service configuration files created by cloudconfig
const cloudConfigDropIn = "20-cloudinit.conf"
type UnitManager interface {
PlaceUnit(unit *Unit, dst string) error
EnableUnitFile(unit string, runtime bool) error
RunUnitCommand(command, unit string) (string, error)
PlaceUnit(unit Unit) error
PlaceUnitDropIn(unit Unit, dropIn config.UnitDropIn) error
EnableUnitFile(unit Unit) error
RunUnitCommand(unit Unit, command string) (string, error)
MaskUnit(unit Unit) error
UnmaskUnit(unit Unit) error
DaemonReload() error
MaskUnit(unit *Unit) error
UnmaskUnit(unit *Unit) error
}
// Unit is a top-level structure which embeds its underlying configuration,
// config.Unit, and provides the system-specific Destination().
// config.Unit, and provides the system-specific Destination(), Type(), and
// Group().
type Unit struct {
config.Unit
}
// Destination builds the appropriate absolute file path for
// the Unit. The root argument indicates the effective base
// directory of the system (similar to a chroot).
func (u *Unit) Destination(root string) string {
// Type returns the extension of the unit (everything that follows the final
// period).
func (u Unit) Type() string {
ext := filepath.Ext(u.Name)
return strings.TrimLeft(ext, ".")
}
// Group returns "network" or "system" depending on whether or not the unit is
// a network unit or otherwise.
func (u Unit) Group() string {
switch u.Type() {
case "network", "netdev", "link":
return "network"
default:
return "system"
}
}
// Destination builds the appropriate absolute file path for the Unit. The root
// argument indicates the effective base directory of the system (similar to a
// chroot).
func (u Unit) Destination(root string) string {
return path.Join(u.prefix(root), u.Name)
}
// DropInDestination builds the appropriate absolute file path for the
// UnitDropIn. The root argument indicates the effective base directory of the
// system (similar to a chroot) and the dropIn argument is the UnitDropIn for
// which the destination is being calculated.
func (u Unit) DropInDestination(root string, dropIn config.UnitDropIn) string {
return path.Join(u.prefix(root), fmt.Sprintf("%s.d", u.Name), dropIn.Name)
}
func (u Unit) prefix(root string) string {
dir := "etc"
if u.Runtime {
dir = "run"
}
if u.DropIn {
return path.Join(root, dir, "systemd", u.Group(), fmt.Sprintf("%s.d", u.Name), cloudConfigDropIn)
} else {
return path.Join(root, dir, "systemd", u.Group(), u.Name)
}
return path.Join(root, dir, "systemd", u.Group())
}

138
system/unit_test.go Normal file
View File

@@ -0,0 +1,138 @@
/*
Copyright 2014 CoreOS, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package system
import (
"testing"
"github.com/coreos/coreos-cloudinit/config"
)
func TestType(t *testing.T) {
tests := []struct {
name string
typ string
}{
{},
{"test.service", "service"},
{"hello", ""},
{"lots.of.dots", "dots"},
}
for _, tt := range tests {
u := Unit{config.Unit{
Name: tt.name,
}}
if typ := u.Type(); tt.typ != typ {
t.Errorf("bad type (%+v): want %q, got %q", tt, tt.typ, typ)
}
}
}
func TestGroup(t *testing.T) {
tests := []struct {
name string
group string
}{
{"test.service", "system"},
{"test.link", "network"},
{"test.network", "network"},
{"test.netdev", "network"},
{"test.conf", "system"},
}
for _, tt := range tests {
u := Unit{config.Unit{
Name: tt.name,
}}
if group := u.Group(); tt.group != group {
t.Errorf("bad group (%+v): want %q, got %q", tt, tt.group, group)
}
}
}
func TestDestination(t *testing.T) {
tests := []struct {
root string
name string
runtime bool
destination string
}{
{
root: "/some/dir",
name: "foobar.service",
destination: "/some/dir/etc/systemd/system/foobar.service",
},
{
root: "/some/dir",
name: "foobar.service",
runtime: true,
destination: "/some/dir/run/systemd/system/foobar.service",
},
}
for _, tt := range tests {
u := Unit{config.Unit{
Name: tt.name,
Runtime: tt.runtime,
}}
if d := u.Destination(tt.root); tt.destination != d {
t.Errorf("bad destination (%+v): want %q, got %q", tt, tt.destination, d)
}
}
}
func TestDropInDestination(t *testing.T) {
tests := []struct {
root string
unitName string
dropInName string
runtime bool
destination string
}{
{
root: "/some/dir",
unitName: "foo.service",
dropInName: "bar.conf",
destination: "/some/dir/etc/systemd/system/foo.service.d/bar.conf",
},
{
root: "/some/dir",
unitName: "foo.service",
dropInName: "bar.conf",
runtime: true,
destination: "/some/dir/run/systemd/system/foo.service.d/bar.conf",
},
}
for _, tt := range tests {
u := Unit{config.Unit{
Name: tt.unitName,
Runtime: tt.runtime,
DropIns: []config.UnitDropIn{{
Name: tt.dropInName,
}},
}}
if d := u.DropInDestination(tt.root, u.DropIns[0]); tt.destination != d {
t.Errorf("bad destination (%+v): want %q, got %q", tt, tt.destination, d)
}
}
}

View File

@@ -126,7 +126,7 @@ func (uc Update) Units() []Unit {
Runtime: true,
}}
if uc.Update.RebootStrategy == "false" {
if uc.Update.RebootStrategy == "off" {
ls.Command = "stop"
ls.Mask = true
}

View File

@@ -72,7 +72,7 @@ func TestUpdateUnits(t *testing.T) {
}}},
},
{
config: config.Update{RebootStrategy: "false"},
config: config.Update{RebootStrategy: "off"},
units: []Unit{{config.Unit{
Name: "locksmithd.service",
Command: "stop",
@@ -100,7 +100,7 @@ func TestUpdateFile(t *testing.T) {
},
{
config: config.Update{RebootStrategy: "wizzlewazzle"},
err: &config.ErrorValid{Value: "wizzlewazzle", Field: "RebootStrategy", Valid: []string{"best-effort", "etcd-lock", "reboot", "false"}},
err: &config.ErrorValid{Value: "wizzlewazzle", Field: "RebootStrategy", Valid: "^(best-effort|etcd-lock|reboot|off)$"},
},
{
config: config.Update{Group: "master", Server: "http://foo.com"},
@@ -135,9 +135,9 @@ func TestUpdateFile(t *testing.T) {
}},
},
{
config: config.Update{RebootStrategy: "false"},
config: config.Update{RebootStrategy: "off"},
file: &File{config.File{
Content: "REBOOT_STRATEGY=false\n",
Content: "REBOOT_STRATEGY=off\n",
Path: "etc/coreos/update.conf",
RawFilePermissions: "0644",
}},