Compare commits
252 Commits
Author | SHA1 | Date | |
---|---|---|---|
|
c8e864fef5 | ||
|
60a3377e7c | ||
|
5527f09778 | ||
|
54a64454b9 | ||
|
0e70d4f01f | ||
|
af8e590575 | ||
|
40d943fb7a | ||
|
248536a5cd | ||
|
4ed1d03c97 | ||
|
057ab37364 | ||
|
182241c8d3 | ||
|
edced59fa6 | ||
|
9be836df31 | ||
|
4e54447b8e | ||
|
999c38b09b | ||
|
06d13de5c3 | ||
|
5b0903d162 | ||
|
10669be7c0 | ||
|
2edae741e1 | ||
|
ea90e553d1 | ||
|
b0cfd86902 | ||
|
565a9540c9 | ||
|
fd10e27b99 | ||
|
39763d772c | ||
|
ee69b77bfb | ||
|
353444e56d | ||
|
112ba1e31f | ||
|
9c3cd9e69c | ||
|
685d8317bc | ||
|
f42d102b26 | ||
|
c944e9ef94 | ||
|
f10d6e8bef | ||
|
f3f3af79fd | ||
|
0e63aa0f6b | ||
|
b254e17e89 | ||
|
5c059b66f0 | ||
|
c628bef666 | ||
|
2270db3f7a | ||
|
d0d467813d | ||
|
123f111efe | ||
|
521ecfdab5 | ||
|
6d0fdf1a47 | ||
|
ffc54b028c | ||
|
420f7cf202 | ||
|
624df676d0 | ||
|
75ed8dacf9 | ||
|
dcaabe4d4a | ||
|
92c57423ba | ||
|
7447e133c9 | ||
|
4e466c12da | ||
|
333468dba3 | ||
|
55c3a793ad | ||
|
eca51031c8 | ||
|
19522bcb82 | ||
|
62248ea33d | ||
|
d2a19cc86d | ||
|
08131ffab1 | ||
|
4a0019c669 | ||
|
3275ead1ec | ||
|
32b6a55724 | ||
|
6c43644369 | ||
|
e6593d49e6 | ||
|
ab752b239f | ||
|
0742e4d357 | ||
|
78f586ec9e | ||
|
6f91b76d79 | ||
|
5c80ccacc4 | ||
|
44fdf95d99 | ||
|
0a62614eec | ||
|
97758b343b | ||
|
fb6f52b360 | ||
|
786cd2a539 | ||
|
45793f1254 | ||
|
b621756d92 | ||
|
a5b5c700a6 | ||
|
ea95920f31 | ||
|
d7602f3c08 | ||
|
a20addd05e | ||
|
d9d89a6fa0 | ||
|
3c26376326 | ||
|
d3294bcb86 | ||
|
dda314b518 | ||
|
055a3c339a | ||
|
51f37100a1 | ||
|
88e8265cd6 | ||
|
6e2db882e6 | ||
|
3e2823df1b | ||
|
46cb51cf91 | ||
|
1a6cee5305 | ||
|
d02aa18839 | ||
|
e9bda98b54 | ||
|
badc874b74 | ||
|
c9e8c887b8 | ||
|
8be307de49 | ||
|
562c474275 | ||
|
b6062f0644 | ||
|
c5fada6e69 | ||
|
5c5834863b | ||
|
44f0a949c5 | ||
|
106c4e7a2c | ||
|
6c1ba590aa | ||
|
45da664c59 | ||
|
2a71551ef2 | ||
|
84e1cb3242 | ||
|
5214ead926 | ||
|
e2c24c4cef | ||
|
75e288c553 | ||
|
0785840fe3 | ||
|
c10bfc2f56 | ||
|
2f954dcdc2 | ||
|
cdfc94f4e9 | ||
|
18e2f98414 | ||
|
4b472795c4 | ||
|
85b8d804c8 | ||
|
1fbbaaec19 | ||
|
667dbd8fb7 | ||
|
6730cb7227 | ||
|
9454522033 | ||
|
c255739a93 | ||
|
2051cd3e1c | ||
|
b52cb3fea3 | ||
|
da5f85b3fb | ||
|
9999178538 | ||
|
8f766e4666 | ||
|
2d28d16c92 | ||
|
e9cd09dd7b | ||
|
8370b30aa2 | ||
|
3e015cc3a1 | ||
|
a0fe6d0884 | ||
|
585ce5fcd9 | ||
|
72445796ca | ||
|
7342d91a85 | ||
|
db1bc51c98 | ||
|
c1f373e648 | ||
|
db49a16002 | ||
|
a4a6c281d9 | ||
|
17f8733121 | ||
|
7dec922618 | ||
|
54d3ae27af | ||
|
ee2416af64 | ||
|
cda037f9a5 | ||
|
549806cf64 | ||
|
56815a6756 | ||
|
24a6f7c49c | ||
|
98484be434 | ||
|
9024659296 | ||
|
fc6940f7ba | ||
|
f2fd95699b | ||
|
65db96cc7c | ||
|
c17b93b5c0 | ||
|
d352f8ce6a | ||
|
78aa2c56ec | ||
|
c5b3788282 | ||
|
5e98970bb5 | ||
|
cbdd446c55 | ||
|
316cadcf44 | ||
|
5a939be21b | ||
|
8d76c64386 | ||
|
1b854eb51e | ||
|
9fcf338bf3 | ||
|
fda72bdb5c | ||
|
685a38c6c8 | ||
|
9d15f2cfaf | ||
|
2134fce791 | ||
|
3abd6b2225 | ||
|
2a8e6c9566 | ||
|
abe43537da | ||
|
3a550af651 | ||
|
61c3a0eb2d | ||
|
480176bc11 | ||
|
01b18eb551 | ||
|
970ef435b6 | ||
|
e8d0021140 | ||
|
e9ec78ac6f | ||
|
4a2e417781 | ||
|
604ef7ecb4 | ||
|
c39dd5cc67 | ||
|
a923161f4a | ||
|
e59e2f6cd5 | ||
|
e90fe3eba8 | ||
|
fb0187b197 | ||
|
6babe74716 | ||
|
b1e88284ca | ||
|
18a65f7dac | ||
|
0c212c72c9 | ||
|
6a800d8cc0 | ||
|
5e112147bb | ||
|
7e78b1563f | ||
|
ecbe81f103 | ||
|
45c20c1dd3 | ||
|
8ce925a060 | ||
|
eadb6ef42c | ||
|
7518f0ec93 | ||
|
f0b9eaf2fe | ||
|
7320a2cbf2 | ||
|
57950b3ed9 | ||
|
85c6a2a16a | ||
|
24b44e86a6 | ||
|
2f52ad4ef8 | ||
|
735d6c6161 | ||
|
1cf275bad6 | ||
|
f1c97cb4d5 | ||
|
d143904aa9 | ||
|
c428ce2cc5 | ||
|
dfb5b4fc3a | ||
|
97d5538533 | ||
|
6b8f82b5d3 | ||
|
facde6609f | ||
|
d68ae84b37 | ||
|
54aa39543b | ||
|
8566a2c118 | ||
|
49ac083af5 | ||
|
5d65ca230a | ||
|
38b3e1213a | ||
|
4eedca26e9 | ||
|
f2b342c8be | ||
|
c19d8f6b61 | ||
|
7913f74351 | ||
|
5593408be8 | ||
|
7fc67c2acf | ||
|
b093094292 | ||
|
9a80fd714a | ||
|
fef5473881 | ||
|
bf5a2b208f | ||
|
364507fb75 | ||
|
08d4842502 | ||
|
21e32e44f8 | ||
|
7a06dee16f | ||
|
ff9cf5743d | ||
|
1b10a3a187 | ||
|
10838e001d | ||
|
96370ac5b9 | ||
|
0b82cd074d | ||
|
a974e85103 | ||
|
f0450662b0 | ||
|
03e29d1291 | ||
|
98ae5d88aa | ||
|
bf5d3539c9 | ||
|
5e4cbcd909 | ||
|
a270c4c737 | ||
|
f356a8a690 | ||
|
b1a897d75c | ||
|
be51f4eba0 | ||
|
a55e2cd49b | ||
|
983501e43b | ||
|
e3037f18a6 | ||
|
fe388a3ab6 | ||
|
c820f2b1cf | ||
|
81824be3bf | ||
|
98c26440be | ||
|
3b5fcc393b | ||
|
9528077340 |
15
.travis.yml
15
.travis.yml
@@ -1,8 +1,17 @@
|
||||
language: go
|
||||
go: 1.2
|
||||
sudo: false
|
||||
matrix:
|
||||
include:
|
||||
- go: 1.4
|
||||
env: TOOLS_CMD=golang.org/x/tools/cmd
|
||||
- go: 1.3
|
||||
env: TOOLS_CMD=code.google.com/p/go.tools/cmd
|
||||
- go: 1.2
|
||||
env: TOOLS_CMD=code.google.com/p/go.tools/cmd
|
||||
|
||||
install:
|
||||
- go get code.google.com/p/go.tools/cmd/cover
|
||||
- go get ${TOOLS_CMD}/cover
|
||||
- go get ${TOOLS_CMD}/vet
|
||||
|
||||
script:
|
||||
- ./test
|
||||
- ./test
|
||||
|
@@ -39,22 +39,25 @@ Thanks for your contributions!
|
||||
|
||||
### Format of the Commit Message
|
||||
|
||||
We follow a rough convention for commit messages borrowed from AngularJS. This
|
||||
is an example of a commit:
|
||||
We follow a rough convention for commit messages that is designed to answer two
|
||||
questions: what changed and why. The subject line should feature the what and
|
||||
the body of the commit should describe the why.
|
||||
|
||||
```
|
||||
feat(scripts/test-cluster): add a cluster test command
|
||||
environment: write new keys in consistent order
|
||||
|
||||
this uses tmux to setup a test cluster that you can easily kill and
|
||||
start for debugging.
|
||||
Go 1.3 randomizes the ordering of keys when iterating over a map.
|
||||
Sort the keys to make this ordering consistent.
|
||||
|
||||
Fixes #38
|
||||
```
|
||||
|
||||
The format can be described more formally as follows:
|
||||
|
||||
```
|
||||
<type>(<scope>): <subject>
|
||||
<subsystem>: <what changed>
|
||||
<BLANK LINE>
|
||||
<body>
|
||||
<why this change was made>
|
||||
<BLANK LINE>
|
||||
<footer>
|
||||
```
|
||||
@@ -63,25 +66,3 @@ The first line is the subject and should be no longer than 70 characters, the
|
||||
second line is always blank, and other lines should be wrapped at 80 characters.
|
||||
This allows the message to be easier to read on GitHub as well as in various
|
||||
git tools.
|
||||
|
||||
#### Subject Line
|
||||
|
||||
The subject line contains a succinct description of the change.
|
||||
|
||||
#### Allowed `<type>`s
|
||||
- *feat* (feature)
|
||||
- *fix* (bug fix)
|
||||
- *docs* (documentation)
|
||||
- *style* (formatting, missing semi colons, …)
|
||||
- *refactor*
|
||||
- *test* (when adding missing tests)
|
||||
- *chore* (maintain)
|
||||
|
||||
#### Allowed `<scope>`s
|
||||
|
||||
Scopes can anything specifying the place of the commit change in the code base -
|
||||
for example, "api", "store", etc.
|
||||
|
||||
|
||||
For more details on the commit format, see the [AngularJS commit style
|
||||
guide](https://docs.google.com/a/coreos.com/document/d/1QrDFcIiPjSLDn3EL15IJygNPiHORgU1_OOAqWjiDU5Y/edit#).
|
||||
|
@@ -13,7 +13,7 @@ If no **id** field is provided, coreos-cloudinit will ignore this section.
|
||||
|
||||
For example, the following cloud-config document...
|
||||
|
||||
```
|
||||
```yaml
|
||||
#cloud-config
|
||||
coreos:
|
||||
oem:
|
||||
@@ -26,7 +26,7 @@ coreos:
|
||||
|
||||
...would be rendered to the following `/etc/oem-release`:
|
||||
|
||||
```
|
||||
```yaml
|
||||
ID=rackspace
|
||||
NAME="Rackspace Cloud Servers"
|
||||
VERSION_ID=168.0.0
|
||||
|
@@ -1,10 +1,12 @@
|
||||
# Using Cloud-Config
|
||||
|
||||
CoreOS allows you to declaratively customize various OS-level items, such as network configuration, user accounts, and systemd units. This document describes the full list of items we can configure. The `coreos-cloudinit` program uses these files as it configures the OS after startup or during runtime.
|
||||
CoreOS allows you to declaratively customize various OS-level items, such as network configuration, user accounts, and systemd units. This document describes the full list of items we can configure. The `coreos-cloudinit` program uses these files as it configures the OS after startup or during runtime.
|
||||
|
||||
Your cloud-config is processed during each boot. Invalid cloud-config won't be processed but will be logged in the journal. You can validate your cloud-config with the [CoreOS validator]({{site.url}}/validate) or by running `coreos-cloudinit -validate`.
|
||||
|
||||
## Configuration File
|
||||
|
||||
The file used by this system initialization program is called a "cloud-config" file. It is inspired by the [cloud-init][cloud-init] project's [cloud-config][cloud-config] file. which is "the defacto multi-distribution package that handles early initialization of a cloud instance" ([cloud-init docs][cloud-init-docs]). Because the cloud-init project includes tools which aren't used by CoreOS, only the relevant subset of its configuration items will be implemented in our cloud-config file. In addition to those, we added a few CoreOS-specific items, such as etcd configuration, OEM definition, and systemd units.
|
||||
The file used by this system initialization program is called a "cloud-config" file. It is inspired by the [cloud-init][cloud-init] project's [cloud-config][cloud-config] file, which is "the defacto multi-distribution package that handles early initialization of a cloud instance" ([cloud-init docs][cloud-init-docs]). Because the cloud-init project includes tools which aren't used by CoreOS, only the relevant subset of its configuration items will be implemented in our cloud-config file. In addition to those, we added a few CoreOS-specific items, such as etcd configuration, OEM definition, and systemd units.
|
||||
|
||||
We've designed our implementation to allow the same cloud-config file to work across all of our supported platforms.
|
||||
|
||||
@@ -16,7 +18,7 @@ We've designed our implementation to allow the same cloud-config file to work ac
|
||||
|
||||
The cloud-config file uses the [YAML][yaml] file format, which uses whitespace and new-lines to delimit lists, associative arrays, and values.
|
||||
|
||||
A cloud-config file should contain an associative array which has zero or more of the following keys:
|
||||
A cloud-config file must contain `#cloud-config`, followed by an associative array which has zero or more of the following keys:
|
||||
|
||||
- `coreos`
|
||||
- `ssh_authorized_keys`
|
||||
@@ -40,24 +42,24 @@ CoreOS tries to conform to each platform's native method to provide user data. E
|
||||
#### etcd
|
||||
|
||||
The `coreos.etcd.*` parameters will be translated to a partial systemd unit acting as an etcd configuration file.
|
||||
We can use the templating feature of coreos-cloudinit to automate etcd configuration with the `$private_ipv4` and `$public_ipv4` fields. For example, the following cloud-config document...
|
||||
If the platform environment supports the templating feature of coreos-cloudinit it is possible to automate etcd configuration with the `$private_ipv4` and `$public_ipv4` fields. For example, the following cloud-config document...
|
||||
|
||||
```
|
||||
```yaml
|
||||
#cloud-config
|
||||
|
||||
coreos:
|
||||
etcd:
|
||||
name: node001
|
||||
# generate a new token for each unique cluster from https://discovery.etcd.io/new
|
||||
discovery: https://discovery.etcd.io/<token>
|
||||
# multi-region and multi-cloud deployments need to use $public_ipv4
|
||||
addr: $public_ipv4:4001
|
||||
peer-addr: $private_ipv4:7001
|
||||
etcd:
|
||||
name: node001
|
||||
# generate a new token for each unique cluster from https://discovery.etcd.io/new
|
||||
discovery: https://discovery.etcd.io/<token>
|
||||
# multi-region and multi-cloud deployments need to use $public_ipv4
|
||||
addr: $public_ipv4:4001
|
||||
peer-addr: $private_ipv4:7001
|
||||
```
|
||||
|
||||
...will generate a systemd unit drop-in like this:
|
||||
|
||||
```
|
||||
```yaml
|
||||
[Service]
|
||||
Environment="ETCD_NAME=node001"
|
||||
Environment="ETCD_DISCOVERY=https://discovery.etcd.io/<token>"
|
||||
@@ -66,7 +68,8 @@ Environment="ETCD_PEER_ADDR=192.0.2.13:7001"
|
||||
```
|
||||
|
||||
For more information about the available configuration parameters, see the [etcd documentation][etcd-config].
|
||||
Note that hyphens in the coreos.etcd.* keys are mapped to underscores.
|
||||
|
||||
_Note: The `$private_ipv4` and `$public_ipv4` substitution variables referenced in other documents are only supported on Amazon EC2, Google Compute Engine, OpenStack, Rackspace, DigitalOcean, and Vagrant._
|
||||
|
||||
[etcd-config]: https://github.com/coreos/etcd/blob/master/Documentation/configuration.md
|
||||
|
||||
@@ -74,18 +77,18 @@ Note that hyphens in the coreos.etcd.* keys are mapped to underscores.
|
||||
|
||||
The `coreos.fleet.*` parameters work very similarly to `coreos.etcd.*`, and allow for the configuration of fleet through environment variables. For example, the following cloud-config document...
|
||||
|
||||
```
|
||||
```yaml
|
||||
#cloud-config
|
||||
|
||||
coreos:
|
||||
fleet:
|
||||
public-ip: $public_ipv4
|
||||
metadata: region=us-west
|
||||
fleet:
|
||||
public-ip: $public_ipv4
|
||||
metadata: region=us-west
|
||||
```
|
||||
|
||||
...will generate a systemd unit drop-in like this:
|
||||
|
||||
```
|
||||
```yaml
|
||||
[Service]
|
||||
Environment="FLEET_PUBLIC_IP=203.0.113.29"
|
||||
Environment="FLEET_METADATA=region=us-west"
|
||||
@@ -93,7 +96,56 @@ Environment="FLEET_METADATA=region=us-west"
|
||||
|
||||
For more information on fleet configuration, see the [fleet documentation][fleet-config].
|
||||
|
||||
[fleet-config]: https://github.com/coreos/fleet/blob/master/Documentation/configuration.md
|
||||
[fleet-config]: https://github.com/coreos/fleet/blob/master/Documentation/deployment-and-configuration.md#configuration
|
||||
|
||||
#### flannel
|
||||
|
||||
The `coreos.flannel.*` parameters also work very similarly to `coreos.etcd.*`
|
||||
and `coreos.fleet.*`. They can be used to set environment variables for
|
||||
flanneld. For example, the following cloud-config...
|
||||
|
||||
```yaml
|
||||
#cloud-config
|
||||
|
||||
coreos:
|
||||
flannel:
|
||||
etcd-prefix: /coreos.com/network2
|
||||
```
|
||||
|
||||
...will generate a systemd unit drop-in like so:
|
||||
|
||||
```
|
||||
[Service]
|
||||
Environment="FLANNELD_ETCD_PREFIX=/coreos.com/network2"
|
||||
```
|
||||
|
||||
For the complete list of flannel configuraion parameters, see the [flannel documentation][flannel-readme].
|
||||
|
||||
[flannel-readme]: https://github.com/coreos/flannel/blob/master/README.md
|
||||
|
||||
#### locksmith
|
||||
|
||||
The `coreos.locksmith.*` parameters can be used to set environment variables
|
||||
for locksmith. For example, the following cloud-config...
|
||||
|
||||
```yaml
|
||||
#cloud-config
|
||||
|
||||
coreos:
|
||||
locksmith:
|
||||
endpoint: example.com:4001
|
||||
```
|
||||
|
||||
...will generate a systemd unit drop-in like so:
|
||||
|
||||
```
|
||||
[Service]
|
||||
Environment="LOCKSMITHD_ENDPOINT=example.com:4001"
|
||||
```
|
||||
|
||||
For the complete list of locksmith configuraion parameters, see the [locksmith documentation][locksmith-readme].
|
||||
|
||||
[locksmith-readme]: https://github.com/coreos/locksmith/blob/master/README.md
|
||||
|
||||
#### update
|
||||
|
||||
@@ -114,7 +166,7 @@ The `reboot-strategy` parameter also affects the behaviour of [locksmith](https:
|
||||
|
||||
##### Example
|
||||
|
||||
```
|
||||
```yaml
|
||||
#cloud-config
|
||||
coreos:
|
||||
update:
|
||||
@@ -123,14 +175,20 @@ coreos:
|
||||
|
||||
#### units
|
||||
|
||||
The `coreos.units.*` parameters define a list of arbitrary systemd units to start. Each item is an object with the following fields:
|
||||
The `coreos.units.*` parameters define a list of arbitrary systemd units to start after booting. This feature is intended to help you start essential services required to mount storage and configure networking in order to join the CoreOS cluster. It is not intended to be a Chef/Puppet replacement.
|
||||
|
||||
Each item is an object with the following fields:
|
||||
|
||||
- **name**: String representing unit's name. Required.
|
||||
- **runtime**: Boolean indicating whether or not to persist the unit across reboots. This is analogous to the `--runtime` argument to `systemctl enable`. Default value is false.
|
||||
- **enable**: Boolean indicating whether or not to handle the [Install] section of the unit file. This is similar to running `systemctl enable <name>`. Default value is false.
|
||||
- **runtime**: Boolean indicating whether or not to persist the unit across reboots. This is analogous to the `--runtime` argument to `systemctl enable`. The default value is false.
|
||||
- **enable**: Boolean indicating whether or not to handle the [Install] section of the unit file. This is similar to running `systemctl enable <name>`. The default value is false.
|
||||
- **content**: Plaintext string representing entire unit file. If no value is provided, the unit is assumed to exist already.
|
||||
- **command**: Command to execute on unit: start, stop, reload, restart, try-restart, reload-or-restart, reload-or-try-restart. Default value is restart.
|
||||
- **mask**: Whether to mask the unit file by symlinking it to `/dev/null` (analogous to `systemctl mask <name>`). Note that unlike `systemctl mask`, **this will destructively remove any existing unit file** located at `/etc/systemd/system/<unit>`, to ensure that the mask succeeds. Default value is false.
|
||||
- **command**: Command to execute on unit: start, stop, reload, restart, try-restart, reload-or-restart, reload-or-try-restart. The default behavior is to not execute any commands.
|
||||
- **mask**: Whether to mask the unit file by symlinking it to `/dev/null` (analogous to `systemctl mask <name>`). Note that unlike `systemctl mask`, **this will destructively remove any existing unit file** located at `/etc/systemd/system/<unit>`, to ensure that the mask succeeds. The default value is false.
|
||||
- **drop-ins**: A list of unit drop-ins with the following fields:
|
||||
- **name**: String representing unit's name. Required.
|
||||
- **content**: Plaintext string representing entire file. Required.
|
||||
|
||||
|
||||
**NOTE:** The command field is ignored for all network, netdev, and link units. The systemd-networkd.service unit will be restarted in their place.
|
||||
|
||||
@@ -138,36 +196,51 @@ The `coreos.units.*` parameters define a list of arbitrary systemd units to star
|
||||
|
||||
Write a unit to disk, automatically starting it.
|
||||
|
||||
```
|
||||
```yaml
|
||||
#cloud-config
|
||||
|
||||
coreos:
|
||||
units:
|
||||
- name: docker-redis.service
|
||||
command: start
|
||||
content: |
|
||||
[Unit]
|
||||
Description=Redis container
|
||||
Author=Me
|
||||
After=docker.service
|
||||
units:
|
||||
- name: docker-redis.service
|
||||
command: start
|
||||
content: |
|
||||
[Unit]
|
||||
Description=Redis container
|
||||
Author=Me
|
||||
After=docker.service
|
||||
|
||||
[Service]
|
||||
Restart=always
|
||||
ExecStart=/usr/bin/docker start -a redis_server
|
||||
ExecStop=/usr/bin/docker stop -t 2 redis_server
|
||||
[Service]
|
||||
Restart=always
|
||||
ExecStart=/usr/bin/docker start -a redis_server
|
||||
ExecStop=/usr/bin/docker stop -t 2 redis_server
|
||||
```
|
||||
|
||||
Add the DOCKER_OPTS environment variable to docker.service.
|
||||
|
||||
```yaml
|
||||
#cloud-config
|
||||
|
||||
coreos:
|
||||
units:
|
||||
- name: docker.service
|
||||
drop-ins:
|
||||
- name: 50-insecure-registry.conf
|
||||
content: |
|
||||
[Service]
|
||||
Environment=DOCKER_OPTS='--insecure-registry="10.0.1.0/24"'
|
||||
```
|
||||
|
||||
Start the built-in `etcd` and `fleet` services:
|
||||
|
||||
```
|
||||
```yaml
|
||||
#cloud-config
|
||||
|
||||
coreos:
|
||||
units:
|
||||
- name: etcd.service
|
||||
command: start
|
||||
- name: fleet.service
|
||||
command: start
|
||||
units:
|
||||
- name: etcd.service
|
||||
command: start
|
||||
- name: fleet.service
|
||||
command: start
|
||||
```
|
||||
|
||||
### ssh_authorized_keys
|
||||
@@ -177,7 +250,7 @@ The `ssh_authorized_keys` parameter adds public SSH keys which will be authorize
|
||||
The keys will be named "coreos-cloudinit" by default.
|
||||
Override this by using the `--ssh-key-name` flag when calling `coreos-cloudinit`.
|
||||
|
||||
```
|
||||
```yaml
|
||||
#cloud-config
|
||||
|
||||
ssh_authorized_keys:
|
||||
@@ -189,7 +262,7 @@ ssh_authorized_keys:
|
||||
The `hostname` parameter defines the system's hostname.
|
||||
This is the local part of a fully-qualified domain name (i.e. `foo` in `foo.example.com`).
|
||||
|
||||
```
|
||||
```yaml
|
||||
#cloud-config
|
||||
|
||||
hostname: coreos1
|
||||
@@ -203,7 +276,7 @@ All but the `passwd` and `ssh-authorized-keys` fields will be ignored if the use
|
||||
- **name**: Required. Login name of user
|
||||
- **gecos**: GECOS comment of user
|
||||
- **passwd**: Hash of the password to use for this user
|
||||
- **homedir**: User's home directory. Defaults to /home/<name>
|
||||
- **homedir**: User's home directory. Defaults to /home/\<name\>
|
||||
- **no-create-home**: Boolean. Skip home directory creation.
|
||||
- **primary-group**: Default group for the user. Defaults to a new group created named after the user.
|
||||
- **groups**: Add user to these additional groups
|
||||
@@ -222,7 +295,7 @@ The following fields are not yet implemented:
|
||||
- **selinux-user**: Corresponding SELinux user
|
||||
- **ssh-import-id**: Import SSH keys by ID from Launchpad.
|
||||
|
||||
```
|
||||
```yaml
|
||||
#cloud-config
|
||||
|
||||
users:
|
||||
@@ -261,7 +334,7 @@ Using a higher number of rounds will help create more secure passwords, but give
|
||||
|
||||
Using the `coreos-ssh-import-github` field, we can import public SSH keys from a GitHub user to use as authorized keys to a server.
|
||||
|
||||
```
|
||||
```yaml
|
||||
#cloud-config
|
||||
|
||||
users:
|
||||
@@ -274,7 +347,7 @@ users:
|
||||
We can also pull public SSH keys from any HTTP endpoint which matches [GitHub's API response format](https://developer.github.com/v3/users/keys/#list-public-keys-for-a-user).
|
||||
For example, if you have an installation of GitHub Enterprise, you can provide a complete URL with an authentication token:
|
||||
|
||||
```
|
||||
```yaml
|
||||
#cloud-config
|
||||
|
||||
users:
|
||||
@@ -284,7 +357,7 @@ users:
|
||||
|
||||
You can also specify any URL whose response matches the JSON format for public keys:
|
||||
|
||||
```
|
||||
```yaml
|
||||
#cloud-config
|
||||
|
||||
users:
|
||||
@@ -294,24 +367,50 @@ users:
|
||||
|
||||
### write_files
|
||||
|
||||
The `write-file` parameter defines a list of files to create on the local filesystem. Each file is represented as an associative array which has the following keys:
|
||||
The `write_files` directive defines a set of files to create on the local filesystem.
|
||||
Each item in the list may have the following keys:
|
||||
|
||||
- **path**: Absolute location on disk where contents should be written
|
||||
- **content**: Data to write at the provided `path`
|
||||
- **permissions**: String representing file permissions in octal notation (i.e. '0644')
|
||||
- **permissions**: Integer representing file permissions, typically in octal notation (i.e. 0644)
|
||||
- **owner**: User and group that should own the file written to disk. This is equivalent to the `<user>:<group>` argument to `chown <user>:<group> <path>`.
|
||||
- **encoding**: Optional. The encoding of the data in content. If not specified this defaults to the yaml document encoding (usually utf-8). Supported encoding types are:
|
||||
- **b64, base64**: Base64 encoded content
|
||||
- **gz, gzip**: gzip encoded content, for use with the !!binary tag
|
||||
- **gz+b64, gz+base64, gzip+b64, gzip+base64**: Base64 encoded gzip content
|
||||
|
||||
Explicitly not implemented is the **encoding** attribute.
|
||||
The **content** field must represent exactly what should be written to disk.
|
||||
|
||||
```
|
||||
```yaml
|
||||
#cloud-config
|
||||
write_files:
|
||||
- path: /etc/fleet/fleet.conf
|
||||
- path: /etc/resolv.conf
|
||||
permissions: 0644
|
||||
owner: root
|
||||
content: |
|
||||
verbosity=1
|
||||
metadata="region=us-west,type=ssd"
|
||||
nameserver 8.8.8.8
|
||||
- path: /etc/motd
|
||||
permissions: 0644
|
||||
owner: root
|
||||
content: |
|
||||
Good news, everyone!
|
||||
- path: /tmp/like_this
|
||||
permissions: 0644
|
||||
owner: root
|
||||
encoding: gzip
|
||||
content: !!binary |
|
||||
H4sIAKgdh1QAAwtITM5WyK1USMqvUCjPLMlQSMssS1VIya9KzVPIySwszS9SyCpNLwYARQFQ5CcAAAA=
|
||||
- path: /tmp/or_like_this
|
||||
permissions: 0644
|
||||
owner: root
|
||||
encoding: gzip+base64
|
||||
content: |
|
||||
H4sIAKgdh1QAAwtITM5WyK1USMqvUCjPLMlQSMssS1VIya9KzVPIySwszS9SyCpNLwYARQFQ5CcAAAA=
|
||||
- path: /tmp/todolist
|
||||
permissions: 0644
|
||||
owner: root
|
||||
encoding: base64
|
||||
content: |
|
||||
UGFjayBteSBib3ggd2l0aCBmaXZlIGRvemVuIGxpcXVvciBqdWdz
|
||||
```
|
||||
|
||||
### manage_etc_hosts
|
||||
@@ -321,7 +420,7 @@ Currently, the only supported value is "localhost" which will cause your system'
|
||||
to resolve to "127.0.0.1". This is helpful when the host does not have DNS
|
||||
infrastructure in place to resolve its own hostname, for example, when using Vagrant.
|
||||
|
||||
```
|
||||
```yaml
|
||||
#cloud-config
|
||||
|
||||
manage_etc_hosts: localhost
|
||||
|
@@ -14,17 +14,21 @@ The image should be a single FAT or ISO9660 file system with the label
|
||||
|
||||
For example, to wrap up a config named `user_data` in a config drive image:
|
||||
|
||||
mkdir -p /tmp/new-drive/openstack/latest
|
||||
cp user_data /tmp/new-drive/openstack/latest/user_data
|
||||
mkisofs -R -V config-2 -o configdrive.iso /tmp/new-drive
|
||||
rm -r /tmp/new-drive
|
||||
```sh
|
||||
mkdir -p /tmp/new-drive/openstack/latest
|
||||
cp user_data /tmp/new-drive/openstack/latest/user_data
|
||||
mkisofs -R -V config-2 -o configdrive.iso /tmp/new-drive
|
||||
rm -r /tmp/new-drive
|
||||
```
|
||||
|
||||
## QEMU virtfs
|
||||
|
||||
One exception to the above, when using QEMU it is possible to skip creating an
|
||||
image and use a plain directory containing the same contents:
|
||||
|
||||
qemu-system-x86_64 \
|
||||
-fsdev local,id=conf,security_model=none,readonly,path=/tmp/new-drive \
|
||||
-device virtio-9p-pci,fsdev=conf,mount_tag=config-2 \
|
||||
[usual qemu options here...]
|
||||
```sh
|
||||
qemu-system-x86_64 \
|
||||
-fsdev local,id=conf,security_model=none,readonly,path=/tmp/new-drive \
|
||||
-device virtio-9p-pci,fsdev=conf,mount_tag=config-2 \
|
||||
[usual qemu options here...]
|
||||
```
|
||||
|
34
Godeps/Godeps.json
generated
Normal file
34
Godeps/Godeps.json
generated
Normal file
@@ -0,0 +1,34 @@
|
||||
{
|
||||
"ImportPath": "github.com/coreos/coreos-cloudinit",
|
||||
"GoVersion": "go1.3.3",
|
||||
"Packages": [
|
||||
"./..."
|
||||
],
|
||||
"Deps": [
|
||||
{
|
||||
"ImportPath": "github.com/cloudsigma/cepgo",
|
||||
"Rev": "1bfc4895bf5c4d3b599f3f6ee142299488c8739b"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/coreos/go-systemd/dbus",
|
||||
"Rev": "4fbc5060a317b142e6c7bfbedb65596d5f0ab99b"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/coreos/yaml",
|
||||
"Rev": "6b16a5714269b2f70720a45406b1babd947a17ef"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/dotcloud/docker/pkg/netlink",
|
||||
"Comment": "v0.11.1-359-g55d41c3e21e1",
|
||||
"Rev": "55d41c3e21e1593b944c06196ffb2ac57ab7f653"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/guelfey/go.dbus",
|
||||
"Rev": "f6a3a2366cc39b8479cadc499d3c735fb10fbdda"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/tarm/goserial",
|
||||
"Rev": "cdabc8d44e8e84f58f18074ae44337e1f2f375b9"
|
||||
}
|
||||
]
|
||||
}
|
5
Godeps/Readme
generated
Normal file
5
Godeps/Readme
generated
Normal file
@@ -0,0 +1,5 @@
|
||||
This directory tree is generated automatically by godep.
|
||||
|
||||
Please do not edit.
|
||||
|
||||
See https://github.com/tools/godep for more information.
|
2
Godeps/_workspace/.gitignore
generated
vendored
Normal file
2
Godeps/_workspace/.gitignore
generated
vendored
Normal file
@@ -0,0 +1,2 @@
|
||||
/pkg
|
||||
/bin
|
23
Godeps/_workspace/src/github.com/cloudsigma/cepgo/.gitignore
generated
vendored
Normal file
23
Godeps/_workspace/src/github.com/cloudsigma/cepgo/.gitignore
generated
vendored
Normal file
@@ -0,0 +1,23 @@
|
||||
# Compiled Object files, Static and Dynamic libs (Shared Objects)
|
||||
*.o
|
||||
*.a
|
||||
*.so
|
||||
|
||||
# Folders
|
||||
_obj
|
||||
_test
|
||||
|
||||
# Architecture specific extensions/prefixes
|
||||
*.[568vq]
|
||||
[568vq].out
|
||||
|
||||
*.cgo1.go
|
||||
*.cgo2.c
|
||||
_cgo_defun.c
|
||||
_cgo_gotypes.go
|
||||
_cgo_export.*
|
||||
|
||||
_testmain.go
|
||||
|
||||
*.exe
|
||||
*.test
|
202
Godeps/_workspace/src/github.com/cloudsigma/cepgo/LICENSE
generated
vendored
Normal file
202
Godeps/_workspace/src/github.com/cloudsigma/cepgo/LICENSE
generated
vendored
Normal file
@@ -0,0 +1,202 @@
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "{}"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright {yyyy} {name of copyright owner}
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
|
43
Godeps/_workspace/src/github.com/cloudsigma/cepgo/README.md
generated
vendored
Normal file
43
Godeps/_workspace/src/github.com/cloudsigma/cepgo/README.md
generated
vendored
Normal file
@@ -0,0 +1,43 @@
|
||||
cepgo
|
||||
=====
|
||||
|
||||
Cepko implements easy-to-use communication with CloudSigma's VMs through a
|
||||
virtual serial port without bothering with formatting the messages properly nor
|
||||
parsing the output with the specific and sometimes confusing shell tools for
|
||||
that purpose.
|
||||
|
||||
Having the server definition accessible by the VM can be useful in various
|
||||
ways. For example it is possible to easily determine from within the VM, which
|
||||
network interfaces are connected to public and which to private network.
|
||||
Another use is to pass some data to initial VM setup scripts, like setting the
|
||||
hostname to the VM name or passing ssh public keys through server meta.
|
||||
|
||||
Example usage:
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/cloudsigma/cepgo"
|
||||
)
|
||||
|
||||
func main() {
|
||||
c := cepgo.NewCepgo()
|
||||
result, err := c.Meta()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
fmt.Printf("%#v", result)
|
||||
}
|
||||
|
||||
Output:
|
||||
|
||||
map[string]interface {}{
|
||||
"optimize_for":"custom",
|
||||
"ssh_public_key":"ssh-rsa AAA...",
|
||||
"description":"[...]",
|
||||
}
|
||||
|
||||
For more information take a look at the Server Context section of CloudSigma
|
||||
API Docs: http://cloudsigma-docs.readthedocs.org/en/latest/server_context.html
|
186
Godeps/_workspace/src/github.com/cloudsigma/cepgo/cepgo.go
generated
vendored
Normal file
186
Godeps/_workspace/src/github.com/cloudsigma/cepgo/cepgo.go
generated
vendored
Normal file
@@ -0,0 +1,186 @@
|
||||
// Cepko implements easy-to-use communication with CloudSigma's VMs through a
|
||||
// virtual serial port without bothering with formatting the messages properly
|
||||
// nor parsing the output with the specific and sometimes confusing shell tools
|
||||
// for that purpose.
|
||||
//
|
||||
// Having the server definition accessible by the VM can be useful in various
|
||||
// ways. For example it is possible to easily determine from within the VM,
|
||||
// which network interfaces are connected to public and which to private
|
||||
// network. Another use is to pass some data to initial VM setup scripts, like
|
||||
// setting the hostname to the VM name or passing ssh public keys through
|
||||
// server meta.
|
||||
//
|
||||
// Example usage:
|
||||
//
|
||||
// package main
|
||||
//
|
||||
// import (
|
||||
// "fmt"
|
||||
//
|
||||
// "github.com/cloudsigma/cepgo"
|
||||
// )
|
||||
//
|
||||
// func main() {
|
||||
// c := cepgo.NewCepgo()
|
||||
// result, err := c.Meta()
|
||||
// if err != nil {
|
||||
// panic(err)
|
||||
// }
|
||||
// fmt.Printf("%#v", result)
|
||||
// }
|
||||
//
|
||||
// Output:
|
||||
//
|
||||
// map[string]string{
|
||||
// "optimize_for":"custom",
|
||||
// "ssh_public_key":"ssh-rsa AAA...",
|
||||
// "description":"[...]",
|
||||
// }
|
||||
//
|
||||
// For more information take a look at the Server Context section API Docs:
|
||||
// http://cloudsigma-docs.readthedocs.org/en/latest/server_context.html
|
||||
package cepgo
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"runtime"
|
||||
|
||||
"github.com/coreos/coreos-cloudinit/Godeps/_workspace/src/github.com/tarm/goserial"
|
||||
)
|
||||
|
||||
const (
|
||||
requestPattern = "<\n%s\n>"
|
||||
EOT = '\x04' // End Of Transmission
|
||||
)
|
||||
|
||||
var (
|
||||
SerialPort string = "/dev/ttyS1"
|
||||
Baud int = 115200
|
||||
)
|
||||
|
||||
// Sets the serial port. If the operating system is windows CloudSigma's server
|
||||
// context is at COM2 port, otherwise (linux, freebsd, darwin) the port is
|
||||
// being left to the default /dev/ttyS1.
|
||||
func init() {
|
||||
if runtime.GOOS == "windows" {
|
||||
SerialPort = "COM2"
|
||||
}
|
||||
}
|
||||
|
||||
// The default fetcher makes the connection to the serial port,
|
||||
// writes given query and reads until the EOT symbol.
|
||||
func fetchViaSerialPort(key string) ([]byte, error) {
|
||||
config := &serial.Config{Name: SerialPort, Baud: Baud}
|
||||
connection, err := serial.OpenPort(config)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
query := fmt.Sprintf(requestPattern, key)
|
||||
if _, err := connection.Write([]byte(query)); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
reader := bufio.NewReader(connection)
|
||||
answer, err := reader.ReadBytes(EOT)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return answer[0 : len(answer)-1], nil
|
||||
}
|
||||
|
||||
// Queries to the serial port can be executed only from instance of this type.
|
||||
// The result from each of them can be either interface{}, map[string]string or
|
||||
// a single in case of single value is returned. There is also a public metod
|
||||
// who directly calls the fetcher and returns raw []byte from the serial port.
|
||||
type Cepgo struct {
|
||||
fetcher func(string) ([]byte, error)
|
||||
}
|
||||
|
||||
// Creates a Cepgo instance with the default serial port fetcher.
|
||||
func NewCepgo() *Cepgo {
|
||||
cepgo := new(Cepgo)
|
||||
cepgo.fetcher = fetchViaSerialPort
|
||||
return cepgo
|
||||
}
|
||||
|
||||
// Creates a Cepgo instance with custom fetcher.
|
||||
func NewCepgoFetcher(fetcher func(string) ([]byte, error)) *Cepgo {
|
||||
cepgo := new(Cepgo)
|
||||
cepgo.fetcher = fetcher
|
||||
return cepgo
|
||||
}
|
||||
|
||||
// Fetches raw []byte from the serial port using directly the fetcher member.
|
||||
func (c *Cepgo) FetchRaw(key string) ([]byte, error) {
|
||||
return c.fetcher(key)
|
||||
}
|
||||
|
||||
// Fetches a single key and tries to unmarshal the result to json and returns
|
||||
// it. If the unmarshalling fails it's safe to assume the result it's just a
|
||||
// string and returns it.
|
||||
func (c *Cepgo) Key(key string) (interface{}, error) {
|
||||
var result interface{}
|
||||
|
||||
fetched, err := c.FetchRaw(key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err = json.Unmarshal(fetched, &result)
|
||||
if err != nil {
|
||||
return string(fetched), nil
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// Fetches all the server context. Equivalent of c.Key("")
|
||||
func (c *Cepgo) All() (interface{}, error) {
|
||||
return c.Key("")
|
||||
}
|
||||
|
||||
// Fetches only the object meta field and makes sure to return a proper
|
||||
// map[string]string
|
||||
func (c *Cepgo) Meta() (map[string]string, error) {
|
||||
rawMeta, err := c.Key("/meta/")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return typeAssertToMapOfStrings(rawMeta)
|
||||
}
|
||||
|
||||
// Fetches only the global context and makes sure to return a proper
|
||||
// map[string]string
|
||||
func (c *Cepgo) GlobalContext() (map[string]string, error) {
|
||||
rawContext, err := c.Key("/global_context/")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return typeAssertToMapOfStrings(rawContext)
|
||||
}
|
||||
|
||||
// Just a little helper function that uses type assertions in order to convert
|
||||
// a interface{} to map[string]string if this is possible.
|
||||
func typeAssertToMapOfStrings(raw interface{}) (map[string]string, error) {
|
||||
result := make(map[string]string)
|
||||
|
||||
dictionary, ok := raw.(map[string]interface{})
|
||||
if !ok {
|
||||
return nil, errors.New("Received bytes are formatted badly")
|
||||
}
|
||||
|
||||
for key, rawValue := range dictionary {
|
||||
if value, ok := rawValue.(string); ok {
|
||||
result[key] = value
|
||||
} else {
|
||||
return nil, errors.New("Server context metadata is formatted badly")
|
||||
}
|
||||
}
|
||||
return result, nil
|
||||
}
|
122
Godeps/_workspace/src/github.com/cloudsigma/cepgo/cepgo_test.go
generated
vendored
Normal file
122
Godeps/_workspace/src/github.com/cloudsigma/cepgo/cepgo_test.go
generated
vendored
Normal file
@@ -0,0 +1,122 @@
|
||||
package cepgo
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func fetchMock(key string) ([]byte, error) {
|
||||
context := []byte(`{
|
||||
"context": true,
|
||||
"cpu": 4000,
|
||||
"cpu_model": null,
|
||||
"cpus_instead_of_cores": false,
|
||||
"enable_numa": false,
|
||||
"global_context": {
|
||||
"some_global_key": "some_global_val"
|
||||
},
|
||||
"grantees": [],
|
||||
"hv_relaxed": false,
|
||||
"hv_tsc": false,
|
||||
"jobs": [],
|
||||
"mem": 4294967296,
|
||||
"meta": {
|
||||
"base64_fields": "cloudinit-user-data",
|
||||
"cloudinit-user-data": "I2Nsb3VkLWNvbmZpZwoKaG9zdG5hbWU6IGNvcmVvczE=",
|
||||
"ssh_public_key": "ssh-rsa AAAAB2NzaC1yc2E.../hQ5D5 john@doe"
|
||||
},
|
||||
"name": "coreos",
|
||||
"nics": [
|
||||
{
|
||||
"runtime": {
|
||||
"interface_type": "public",
|
||||
"ip_v4": {
|
||||
"uuid": "31.171.251.74"
|
||||
},
|
||||
"ip_v6": null
|
||||
},
|
||||
"vlan": null
|
||||
}
|
||||
],
|
||||
"smp": 2,
|
||||
"status": "running",
|
||||
"uuid": "20a0059b-041e-4d0c-bcc6-9b2852de48b3"
|
||||
}`)
|
||||
|
||||
if key == "" {
|
||||
return context, nil
|
||||
}
|
||||
|
||||
var marshalledContext map[string]interface{}
|
||||
|
||||
err := json.Unmarshal(context, &marshalledContext)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if key[0] == '/' {
|
||||
key = key[1:]
|
||||
}
|
||||
if key[len(key)-1] == '/' {
|
||||
key = key[:len(key)-1]
|
||||
}
|
||||
|
||||
return json.Marshal(marshalledContext[key])
|
||||
}
|
||||
|
||||
func TestAll(t *testing.T) {
|
||||
cepgo := NewCepgoFetcher(fetchMock)
|
||||
|
||||
result, err := cepgo.All()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
for _, key := range []string{"meta", "name", "uuid", "global_context"} {
|
||||
if _, ok := result.(map[string]interface{})[key]; !ok {
|
||||
t.Errorf("%s not in all keys", key)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestKey(t *testing.T) {
|
||||
cepgo := NewCepgoFetcher(fetchMock)
|
||||
|
||||
result, err := cepgo.Key("uuid")
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
if _, ok := result.(string); !ok {
|
||||
t.Errorf("%#v\n", result)
|
||||
|
||||
t.Error("Fetching the uuid did not return a string")
|
||||
}
|
||||
}
|
||||
|
||||
func TestMeta(t *testing.T) {
|
||||
cepgo := NewCepgoFetcher(fetchMock)
|
||||
|
||||
meta, err := cepgo.Meta()
|
||||
if err != nil {
|
||||
t.Errorf("%#v\n", meta)
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
if _, ok := meta["ssh_public_key"]; !ok {
|
||||
t.Error("ssh_public_key is not in the meta")
|
||||
}
|
||||
}
|
||||
|
||||
func TestGlobalContext(t *testing.T) {
|
||||
cepgo := NewCepgoFetcher(fetchMock)
|
||||
|
||||
result, err := cepgo.GlobalContext()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
if _, ok := result["some_global_key"]; !ok {
|
||||
t.Error("some_global_key is not in the global context")
|
||||
}
|
||||
}
|
@@ -23,7 +23,7 @@ import (
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/coreos/coreos-cloudinit/third_party/github.com/guelfey/go.dbus"
|
||||
"github.com/coreos/coreos-cloudinit/Godeps/_workspace/src/github.com/guelfey/go.dbus"
|
||||
)
|
||||
|
||||
const signalBuffer = 100
|
@@ -18,7 +18,7 @@ package dbus
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"github.com/coreos/coreos-cloudinit/third_party/github.com/guelfey/go.dbus"
|
||||
"github.com/coreos/coreos-cloudinit/Godeps/_workspace/src/github.com/guelfey/go.dbus"
|
||||
)
|
||||
|
||||
func (c *Conn) initJobs() {
|
||||
@@ -208,7 +208,7 @@ func (c *Conn) SetUnitProperties(name string, runtime bool, properties ...Proper
|
||||
}
|
||||
|
||||
func (c *Conn) GetUnitTypeProperty(unit string, unitType string, propertyName string) (*Property, error) {
|
||||
return c.getProperty(unit, "org.freedesktop.systemd1." + unitType, propertyName)
|
||||
return c.getProperty(unit, "org.freedesktop.systemd1."+unitType, propertyName)
|
||||
}
|
||||
|
||||
// ListUnits returns an array with all currently loaded units. Note that
|
@@ -18,7 +18,7 @@ package dbus
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/coreos/coreos-cloudinit/third_party/github.com/guelfey/go.dbus"
|
||||
"github.com/coreos/coreos-cloudinit/Godeps/_workspace/src/github.com/guelfey/go.dbus"
|
||||
"math/rand"
|
||||
"os"
|
||||
"path/filepath"
|
@@ -17,7 +17,7 @@ limitations under the License.
|
||||
package dbus
|
||||
|
||||
import (
|
||||
"github.com/coreos/coreos-cloudinit/third_party/github.com/guelfey/go.dbus"
|
||||
"github.com/coreos/coreos-cloudinit/Godeps/_workspace/src/github.com/guelfey/go.dbus"
|
||||
)
|
||||
|
||||
// From the systemd docs:
|
@@ -20,7 +20,7 @@ import (
|
||||
"errors"
|
||||
"time"
|
||||
|
||||
"github.com/coreos/coreos-cloudinit/third_party/github.com/guelfey/go.dbus"
|
||||
"github.com/coreos/coreos-cloudinit/Godeps/_workspace/src/github.com/guelfey/go.dbus"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -101,7 +101,7 @@ func (c *Conn) SubscribeUnits(interval time.Duration) (<-chan map[string]*UnitSt
|
||||
// SubscribeUnitsCustom is like SubscribeUnits but lets you specify the buffer
|
||||
// size of the channels, the comparison function for detecting changes and a filter
|
||||
// function for cutting down on the noise that your channel receives.
|
||||
func (c *Conn) SubscribeUnitsCustom(interval time.Duration, buffer int, isChanged func(*UnitStatus, *UnitStatus) bool, filterUnit func (string) bool) (<-chan map[string]*UnitStatus, <-chan error) {
|
||||
func (c *Conn) SubscribeUnitsCustom(interval time.Duration, buffer int, isChanged func(*UnitStatus, *UnitStatus) bool, filterUnit func(string) bool) (<-chan map[string]*UnitStatus, <-chan error) {
|
||||
old := make(map[string]*UnitStatus)
|
||||
statusChan := make(chan map[string]*UnitStatus, buffer)
|
||||
errChan := make(chan error, buffer)
|
@@ -1,3 +1,6 @@
|
||||
|
||||
Copyright (c) 2011-2014 - Canonical Inc.
|
||||
|
||||
This software is licensed under the LGPLv3, included below.
|
||||
|
||||
As a special exception to the GNU Lesser General Public License version 3
|
@@ -1,3 +1,15 @@
|
||||
The following files were ported to Go from C files of libyaml, and thus
|
||||
are still covered by their original copyright and license:
|
||||
|
||||
apic.go
|
||||
emitterc.go
|
||||
parserc.go
|
||||
readerc.go
|
||||
scannerc.go
|
||||
writerc.go
|
||||
yamlh.go
|
||||
yamlprivateh.go
|
||||
|
||||
Copyright (c) 2006 Kirill Simonov
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy of
|
131
Godeps/_workspace/src/github.com/coreos/yaml/README.md
generated
vendored
Normal file
131
Godeps/_workspace/src/github.com/coreos/yaml/README.md
generated
vendored
Normal file
@@ -0,0 +1,131 @@
|
||||
Note: This is a fork of https://github.com/go-yaml/yaml. The following README
|
||||
doesn't necessarily apply to this fork.
|
||||
|
||||
# YAML support for the Go language
|
||||
|
||||
Introduction
|
||||
------------
|
||||
|
||||
The yaml package enables Go programs to comfortably encode and decode YAML
|
||||
values. It was developed within [Canonical](https://www.canonical.com) as
|
||||
part of the [juju](https://juju.ubuntu.com) project, and is based on a
|
||||
pure Go port of the well-known [libyaml](http://pyyaml.org/wiki/LibYAML)
|
||||
C library to parse and generate YAML data quickly and reliably.
|
||||
|
||||
Compatibility
|
||||
-------------
|
||||
|
||||
The yaml package supports most of YAML 1.1 and 1.2, including support for
|
||||
anchors, tags, map merging, etc. Multi-document unmarshalling is not yet
|
||||
implemented, and base-60 floats from YAML 1.1 are purposefully not
|
||||
supported since they're a poor design and are gone in YAML 1.2.
|
||||
|
||||
Installation and usage
|
||||
----------------------
|
||||
|
||||
The import path for the package is *gopkg.in/yaml.v1*.
|
||||
|
||||
To install it, run:
|
||||
|
||||
go get gopkg.in/yaml.v1
|
||||
|
||||
API documentation
|
||||
-----------------
|
||||
|
||||
If opened in a browser, the import path itself leads to the API documentation:
|
||||
|
||||
* [https://gopkg.in/yaml.v1](https://gopkg.in/yaml.v1)
|
||||
|
||||
API stability
|
||||
-------------
|
||||
|
||||
The package API for yaml v1 will remain stable as described in [gopkg.in](https://gopkg.in).
|
||||
|
||||
|
||||
License
|
||||
-------
|
||||
|
||||
The yaml package is licensed under the LGPL with an exception that allows it to be linked statically. Please see the LICENSE file for details.
|
||||
|
||||
|
||||
Example
|
||||
-------
|
||||
|
||||
```Go
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
|
||||
"gopkg.in/yaml.v1"
|
||||
)
|
||||
|
||||
var data = `
|
||||
a: Easy!
|
||||
b:
|
||||
c: 2
|
||||
d: [3, 4]
|
||||
`
|
||||
|
||||
type T struct {
|
||||
A string
|
||||
B struct{C int; D []int ",flow"}
|
||||
}
|
||||
|
||||
func main() {
|
||||
t := T{}
|
||||
|
||||
err := yaml.Unmarshal([]byte(data), &t)
|
||||
if err != nil {
|
||||
log.Fatalf("error: %v", err)
|
||||
}
|
||||
fmt.Printf("--- t:\n%v\n\n", t)
|
||||
|
||||
d, err := yaml.Marshal(&t)
|
||||
if err != nil {
|
||||
log.Fatalf("error: %v", err)
|
||||
}
|
||||
fmt.Printf("--- t dump:\n%s\n\n", string(d))
|
||||
|
||||
m := make(map[interface{}]interface{})
|
||||
|
||||
err = yaml.Unmarshal([]byte(data), &m)
|
||||
if err != nil {
|
||||
log.Fatalf("error: %v", err)
|
||||
}
|
||||
fmt.Printf("--- m:\n%v\n\n", m)
|
||||
|
||||
d, err = yaml.Marshal(&m)
|
||||
if err != nil {
|
||||
log.Fatalf("error: %v", err)
|
||||
}
|
||||
fmt.Printf("--- m dump:\n%s\n\n", string(d))
|
||||
}
|
||||
```
|
||||
|
||||
This example will generate the following output:
|
||||
|
||||
```
|
||||
--- t:
|
||||
{Easy! {2 [3 4]}}
|
||||
|
||||
--- t dump:
|
||||
a: Easy!
|
||||
b:
|
||||
c: 2
|
||||
d: [3, 4]
|
||||
|
||||
|
||||
--- m:
|
||||
map[a:Easy! b:map[c:2 d:[3 4]]]
|
||||
|
||||
--- m dump:
|
||||
a: Easy!
|
||||
b:
|
||||
c: 2
|
||||
d:
|
||||
- 3
|
||||
- 4
|
||||
```
|
||||
|
@@ -1,4 +1,4 @@
|
||||
package goyaml
|
||||
package yaml
|
||||
|
||||
import (
|
||||
"io"
|
@@ -1,8 +1,11 @@
|
||||
package goyaml
|
||||
package yaml
|
||||
|
||||
import (
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -27,13 +30,15 @@ type node struct {
|
||||
// Parser, produces a node tree out of a libyaml event stream.
|
||||
|
||||
type parser struct {
|
||||
parser yaml_parser_t
|
||||
event yaml_event_t
|
||||
doc *node
|
||||
parser yaml_parser_t
|
||||
event yaml_event_t
|
||||
doc *node
|
||||
transform transformString
|
||||
}
|
||||
|
||||
func newParser(b []byte) *parser {
|
||||
p := parser{}
|
||||
func newParser(b []byte, t transformString) *parser {
|
||||
p := parser{transform: t}
|
||||
|
||||
if !yaml_parser_initialize(&p.parser) {
|
||||
panic("Failed to initialize YAML emitter")
|
||||
}
|
||||
@@ -62,7 +67,7 @@ func (p *parser) destroy() {
|
||||
func (p *parser) skip() {
|
||||
if p.event.typ != yaml_NO_EVENT {
|
||||
if p.event.typ == yaml_STREAM_END_EVENT {
|
||||
panic("Attempted to go past the end of stream. Corrupted value?")
|
||||
fail("Attempted to go past the end of stream. Corrupted value?")
|
||||
}
|
||||
yaml_event_delete(&p.event)
|
||||
}
|
||||
@@ -88,7 +93,7 @@ func (p *parser) fail() {
|
||||
} else {
|
||||
msg = "Unknown problem parsing YAML content"
|
||||
}
|
||||
panic(where + msg)
|
||||
fail(where + msg)
|
||||
}
|
||||
|
||||
func (p *parser) anchor(n *node, anchor []byte) {
|
||||
@@ -113,10 +118,9 @@ func (p *parser) parse() *node {
|
||||
// Happens when attempting to decode an empty buffer.
|
||||
return nil
|
||||
default:
|
||||
panic("Attempted to parse unknown event: " +
|
||||
strconv.Itoa(int(p.event.typ)))
|
||||
panic("Attempted to parse unknown event: " + strconv.Itoa(int(p.event.typ)))
|
||||
}
|
||||
panic("Unreachable")
|
||||
panic("unreachable")
|
||||
}
|
||||
|
||||
func (p *parser) node(kind int) *node {
|
||||
@@ -134,8 +138,7 @@ func (p *parser) document() *node {
|
||||
p.skip()
|
||||
n.children = append(n.children, p.parse())
|
||||
if p.event.typ != yaml_DOCUMENT_END_EVENT {
|
||||
panic("Expected end of document event but got " +
|
||||
strconv.Itoa(int(p.event.typ)))
|
||||
panic("Expected end of document event but got " + strconv.Itoa(int(p.event.typ)))
|
||||
}
|
||||
p.skip()
|
||||
return n
|
||||
@@ -174,7 +177,10 @@ func (p *parser) mapping() *node {
|
||||
p.anchor(n, p.event.anchor)
|
||||
p.skip()
|
||||
for p.event.typ != yaml_MAPPING_END_EVENT {
|
||||
n.children = append(n.children, p.parse(), p.parse())
|
||||
key := p.parse()
|
||||
key.value = p.transform(key.value)
|
||||
value := p.parse()
|
||||
n.children = append(n.children, key, value)
|
||||
}
|
||||
p.skip()
|
||||
return n
|
||||
@@ -211,11 +217,21 @@ func newDecoder() *decoder {
|
||||
// returned to call SetYAML() with the value of *out once it's defined.
|
||||
//
|
||||
func (d *decoder) setter(tag string, out *reflect.Value, good *bool) (set func()) {
|
||||
if (*out).Kind() != reflect.Ptr && (*out).CanAddr() {
|
||||
setter, _ := (*out).Addr().Interface().(Setter)
|
||||
if setter != nil {
|
||||
var arg interface{}
|
||||
*out = reflect.ValueOf(&arg).Elem()
|
||||
return func() {
|
||||
*good = setter.SetYAML(shortTag(tag), arg)
|
||||
}
|
||||
}
|
||||
}
|
||||
again := true
|
||||
for again {
|
||||
again = false
|
||||
setter, _ := (*out).Interface().(Setter)
|
||||
if tag != "!!null" || setter != nil {
|
||||
if tag != yaml_NULL_TAG || setter != nil {
|
||||
if pv := (*out); pv.Kind() == reflect.Ptr {
|
||||
if pv.IsNil() {
|
||||
*out = reflect.New(pv.Type().Elem()).Elem()
|
||||
@@ -231,7 +247,7 @@ func (d *decoder) setter(tag string, out *reflect.Value, good *bool) (set func()
|
||||
var arg interface{}
|
||||
*out = reflect.ValueOf(&arg).Elem()
|
||||
return func() {
|
||||
*good = setter.SetYAML(tag, arg)
|
||||
*good = setter.SetYAML(shortTag(tag), arg)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -268,10 +284,10 @@ func (d *decoder) document(n *node, out reflect.Value) (good bool) {
|
||||
func (d *decoder) alias(n *node, out reflect.Value) (good bool) {
|
||||
an, ok := d.doc.anchors[n.value]
|
||||
if !ok {
|
||||
panic("Unknown anchor '" + n.value + "' referenced")
|
||||
fail("Unknown anchor '" + n.value + "' referenced")
|
||||
}
|
||||
if d.aliases[n.value] {
|
||||
panic("Anchor '" + n.value + "' value contains itself")
|
||||
fail("Anchor '" + n.value + "' value contains itself")
|
||||
}
|
||||
d.aliases[n.value] = true
|
||||
good = d.unmarshal(an, out)
|
||||
@@ -279,20 +295,50 @@ func (d *decoder) alias(n *node, out reflect.Value) (good bool) {
|
||||
return good
|
||||
}
|
||||
|
||||
var zeroValue reflect.Value
|
||||
|
||||
func resetMap(out reflect.Value) {
|
||||
for _, k := range out.MapKeys() {
|
||||
out.SetMapIndex(k, zeroValue)
|
||||
}
|
||||
}
|
||||
|
||||
var durationType = reflect.TypeOf(time.Duration(0))
|
||||
|
||||
func (d *decoder) scalar(n *node, out reflect.Value) (good bool) {
|
||||
var tag string
|
||||
var resolved interface{}
|
||||
if n.tag == "" && !n.implicit {
|
||||
tag = yaml_STR_TAG
|
||||
resolved = n.value
|
||||
} else {
|
||||
tag, resolved = resolve(n.tag, n.value)
|
||||
if set := d.setter(tag, &out, &good); set != nil {
|
||||
defer set()
|
||||
if tag == yaml_BINARY_TAG {
|
||||
data, err := base64.StdEncoding.DecodeString(resolved.(string))
|
||||
if err != nil {
|
||||
fail("!!binary value contains invalid base64 data")
|
||||
}
|
||||
resolved = string(data)
|
||||
}
|
||||
}
|
||||
if set := d.setter(tag, &out, &good); set != nil {
|
||||
defer set()
|
||||
}
|
||||
if resolved == nil {
|
||||
if out.Kind() == reflect.Map && !out.CanAddr() {
|
||||
resetMap(out)
|
||||
} else {
|
||||
out.Set(reflect.Zero(out.Type()))
|
||||
}
|
||||
good = true
|
||||
return
|
||||
}
|
||||
switch out.Kind() {
|
||||
case reflect.String:
|
||||
if resolved != nil {
|
||||
if tag == yaml_BINARY_TAG {
|
||||
out.SetString(resolved.(string))
|
||||
good = true
|
||||
} else if resolved != nil {
|
||||
out.SetString(n.value)
|
||||
good = true
|
||||
}
|
||||
@@ -320,6 +366,14 @@ func (d *decoder) scalar(n *node, out reflect.Value) (good bool) {
|
||||
out.SetInt(int64(resolved))
|
||||
good = true
|
||||
}
|
||||
case string:
|
||||
if out.Type() == durationType {
|
||||
d, err := time.ParseDuration(resolved)
|
||||
if err == nil {
|
||||
out.SetInt(int64(d))
|
||||
good = true
|
||||
}
|
||||
}
|
||||
}
|
||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
|
||||
switch resolved := resolved.(type) {
|
||||
@@ -358,17 +412,11 @@ func (d *decoder) scalar(n *node, out reflect.Value) (good bool) {
|
||||
good = true
|
||||
}
|
||||
case reflect.Ptr:
|
||||
switch resolved.(type) {
|
||||
case nil:
|
||||
out.Set(reflect.Zero(out.Type()))
|
||||
if out.Type().Elem() == reflect.TypeOf(resolved) {
|
||||
elem := reflect.New(out.Type().Elem())
|
||||
elem.Elem().Set(reflect.ValueOf(resolved))
|
||||
out.Set(elem)
|
||||
good = true
|
||||
default:
|
||||
if out.Type().Elem() == reflect.TypeOf(resolved) {
|
||||
elem := reflect.New(out.Type().Elem())
|
||||
elem.Elem().Set(reflect.ValueOf(resolved))
|
||||
out.Set(elem)
|
||||
good = true
|
||||
}
|
||||
}
|
||||
}
|
||||
return good
|
||||
@@ -382,7 +430,7 @@ func settableValueOf(i interface{}) reflect.Value {
|
||||
}
|
||||
|
||||
func (d *decoder) sequence(n *node, out reflect.Value) (good bool) {
|
||||
if set := d.setter("!!seq", &out, &good); set != nil {
|
||||
if set := d.setter(yaml_SEQ_TAG, &out, &good); set != nil {
|
||||
defer set()
|
||||
}
|
||||
var iface reflect.Value
|
||||
@@ -411,7 +459,7 @@ func (d *decoder) sequence(n *node, out reflect.Value) (good bool) {
|
||||
}
|
||||
|
||||
func (d *decoder) mapping(n *node, out reflect.Value) (good bool) {
|
||||
if set := d.setter("!!map", &out, &good); set != nil {
|
||||
if set := d.setter(yaml_MAP_TAG, &out, &good); set != nil {
|
||||
defer set()
|
||||
}
|
||||
if out.Kind() == reflect.Struct {
|
||||
@@ -437,8 +485,19 @@ func (d *decoder) mapping(n *node, out reflect.Value) (good bool) {
|
||||
}
|
||||
l := len(n.children)
|
||||
for i := 0; i < l; i += 2 {
|
||||
if isMerge(n.children[i]) {
|
||||
d.merge(n.children[i+1], out)
|
||||
continue
|
||||
}
|
||||
k := reflect.New(kt).Elem()
|
||||
if d.unmarshal(n.children[i], k) {
|
||||
kkind := k.Kind()
|
||||
if kkind == reflect.Interface {
|
||||
kkind = k.Elem().Kind()
|
||||
}
|
||||
if kkind == reflect.Map || kkind == reflect.Slice {
|
||||
fail(fmt.Sprintf("invalid map key: %#v", k.Interface()))
|
||||
}
|
||||
e := reflect.New(et).Elem()
|
||||
if d.unmarshal(n.children[i+1], e) {
|
||||
out.SetMapIndex(k, e)
|
||||
@@ -456,7 +515,12 @@ func (d *decoder) mappingStruct(n *node, out reflect.Value) (good bool) {
|
||||
name := settableValueOf("")
|
||||
l := len(n.children)
|
||||
for i := 0; i < l; i += 2 {
|
||||
if !d.unmarshal(n.children[i], name) {
|
||||
ni := n.children[i]
|
||||
if isMerge(ni) {
|
||||
d.merge(n.children[i+1], out)
|
||||
continue
|
||||
}
|
||||
if !d.unmarshal(ni, name) {
|
||||
continue
|
||||
}
|
||||
if info, ok := sinfo.FieldsMap[name.String()]; ok {
|
||||
@@ -471,3 +535,37 @@ func (d *decoder) mappingStruct(n *node, out reflect.Value) (good bool) {
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func (d *decoder) merge(n *node, out reflect.Value) {
|
||||
const wantMap = "map merge requires map or sequence of maps as the value"
|
||||
switch n.kind {
|
||||
case mappingNode:
|
||||
d.unmarshal(n, out)
|
||||
case aliasNode:
|
||||
an, ok := d.doc.anchors[n.value]
|
||||
if ok && an.kind != mappingNode {
|
||||
fail(wantMap)
|
||||
}
|
||||
d.unmarshal(n, out)
|
||||
case sequenceNode:
|
||||
// Step backwards as earlier nodes take precedence.
|
||||
for i := len(n.children) - 1; i >= 0; i-- {
|
||||
ni := n.children[i]
|
||||
if ni.kind == aliasNode {
|
||||
an, ok := d.doc.anchors[ni.value]
|
||||
if ok && an.kind != mappingNode {
|
||||
fail(wantMap)
|
||||
}
|
||||
} else if ni.kind != mappingNode {
|
||||
fail(wantMap)
|
||||
}
|
||||
d.unmarshal(ni, out)
|
||||
}
|
||||
default:
|
||||
fail(wantMap)
|
||||
}
|
||||
}
|
||||
|
||||
func isMerge(n *node) bool {
|
||||
return n.kind == scalarNode && n.value == "<<" && (n.implicit == true || n.tag == yaml_MERGE_TAG)
|
||||
}
|
@@ -1,10 +1,12 @@
|
||||
package goyaml_test
|
||||
package yaml_test
|
||||
|
||||
import (
|
||||
. "launchpad.net/gocheck"
|
||||
"github.com/coreos/coreos-cloudinit/third_party/launchpad.net/goyaml"
|
||||
"github.com/coreos/yaml"
|
||||
. "gopkg.in/check.v1"
|
||||
"math"
|
||||
"reflect"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
var unmarshalIntTest = 123
|
||||
@@ -315,7 +317,10 @@ var unmarshalTests = []struct {
|
||||
map[string]*string{"foo": new(string)},
|
||||
}, {
|
||||
"foo: null",
|
||||
map[string]string{},
|
||||
map[string]string{"foo": ""},
|
||||
}, {
|
||||
"foo: null",
|
||||
map[string]interface{}{"foo": nil},
|
||||
},
|
||||
|
||||
// Ignored field
|
||||
@@ -350,6 +355,50 @@ var unmarshalTests = []struct {
|
||||
C inlineB `yaml:",inline"`
|
||||
}{1, inlineB{2, inlineC{3}}},
|
||||
},
|
||||
|
||||
// bug 1243827
|
||||
{
|
||||
"a: -b_c",
|
||||
map[string]interface{}{"a": "-b_c"},
|
||||
},
|
||||
{
|
||||
"a: +b_c",
|
||||
map[string]interface{}{"a": "+b_c"},
|
||||
},
|
||||
{
|
||||
"a: 50cent_of_dollar",
|
||||
map[string]interface{}{"a": "50cent_of_dollar"},
|
||||
},
|
||||
|
||||
// Duration
|
||||
{
|
||||
"a: 3s",
|
||||
map[string]time.Duration{"a": 3 * time.Second},
|
||||
},
|
||||
|
||||
// Issue #24.
|
||||
{
|
||||
"a: <foo>",
|
||||
map[string]string{"a": "<foo>"},
|
||||
},
|
||||
|
||||
// Base 60 floats are obsolete and unsupported.
|
||||
{
|
||||
"a: 1:1\n",
|
||||
map[string]string{"a": "1:1"},
|
||||
},
|
||||
|
||||
// Binary data.
|
||||
{
|
||||
"a: !!binary gIGC\n",
|
||||
map[string]string{"a": "\x80\x81\x82"},
|
||||
}, {
|
||||
"a: !!binary |\n " + strings.Repeat("kJCQ", 17) + "kJ\n CQ\n",
|
||||
map[string]string{"a": strings.Repeat("\x90", 54)},
|
||||
}, {
|
||||
"a: !!binary |\n " + strings.Repeat("A", 70) + "\n ==\n",
|
||||
map[string]string{"a": strings.Repeat("\x00", 52)},
|
||||
},
|
||||
}
|
||||
|
||||
type inlineB struct {
|
||||
@@ -377,7 +426,7 @@ func (s *S) TestUnmarshal(c *C) {
|
||||
pv := reflect.New(pt.Elem())
|
||||
value = pv.Interface()
|
||||
}
|
||||
err := goyaml.Unmarshal([]byte(item.data), value)
|
||||
err := yaml.Unmarshal([]byte(item.data), value)
|
||||
c.Assert(err, IsNil, Commentf("Item #%d", i))
|
||||
if t.Kind() == reflect.String {
|
||||
c.Assert(*value.(*string), Equals, item.value, Commentf("Item #%d", i))
|
||||
@@ -389,7 +438,7 @@ func (s *S) TestUnmarshal(c *C) {
|
||||
|
||||
func (s *S) TestUnmarshalNaN(c *C) {
|
||||
value := map[string]interface{}{}
|
||||
err := goyaml.Unmarshal([]byte("notanum: .NaN"), &value)
|
||||
err := yaml.Unmarshal([]byte("notanum: .NaN"), &value)
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(math.IsNaN(value["notanum"].(float64)), Equals, true)
|
||||
}
|
||||
@@ -397,18 +446,21 @@ func (s *S) TestUnmarshalNaN(c *C) {
|
||||
var unmarshalErrorTests = []struct {
|
||||
data, error string
|
||||
}{
|
||||
{"v: !!float 'error'", "YAML error: Can't decode !!str 'error' as a !!float"},
|
||||
{"v: !!float 'error'", "YAML error: cannot decode !!str `error` as a !!float"},
|
||||
{"v: [A,", "YAML error: line 1: did not find expected node content"},
|
||||
{"v:\n- [A,", "YAML error: line 2: did not find expected node content"},
|
||||
{"a: *b\n", "YAML error: Unknown anchor 'b' referenced"},
|
||||
{"a: &a\n b: *a\n", "YAML error: Anchor 'a' value contains itself"},
|
||||
{"value: -", "YAML error: block sequence entries are not allowed in this context"},
|
||||
{"a: !!binary ==", "YAML error: !!binary value contains invalid base64 data"},
|
||||
{"{[.]}", `YAML error: invalid map key: \[\]interface \{\}\{"\."\}`},
|
||||
{"{{.}}", `YAML error: invalid map key: map\[interface\ \{\}\]interface \{\}\{".":interface \{\}\(nil\)\}`},
|
||||
}
|
||||
|
||||
func (s *S) TestUnmarshalErrors(c *C) {
|
||||
for _, item := range unmarshalErrorTests {
|
||||
var value interface{}
|
||||
err := goyaml.Unmarshal([]byte(item.data), &value)
|
||||
err := yaml.Unmarshal([]byte(item.data), &value)
|
||||
c.Assert(err, ErrorMatches, item.error, Commentf("Partial unmarshal: %#v", value))
|
||||
}
|
||||
}
|
||||
@@ -421,6 +473,8 @@ var setterTests = []struct {
|
||||
{"_: [1,A]", "!!seq", []interface{}{1, "A"}},
|
||||
{"_: 10", "!!int", 10},
|
||||
{"_: null", "!!null", nil},
|
||||
{`_: BAR!`, "!!str", "BAR!"},
|
||||
{`_: "BAR!"`, "!!str", "BAR!"},
|
||||
{"_: !!foo 'BAR!'", "!!foo", "BAR!"},
|
||||
}
|
||||
|
||||
@@ -442,17 +496,31 @@ func (o *typeWithSetter) SetYAML(tag string, value interface{}) (ok bool) {
|
||||
return true
|
||||
}
|
||||
|
||||
type typeWithSetterField struct {
|
||||
type setterPointerType struct {
|
||||
Field *typeWithSetter "_"
|
||||
}
|
||||
|
||||
func (s *S) TestUnmarshalWithSetter(c *C) {
|
||||
type setterValueType struct {
|
||||
Field typeWithSetter "_"
|
||||
}
|
||||
|
||||
func (s *S) TestUnmarshalWithPointerSetter(c *C) {
|
||||
for _, item := range setterTests {
|
||||
obj := &typeWithSetterField{}
|
||||
err := goyaml.Unmarshal([]byte(item.data), obj)
|
||||
obj := &setterPointerType{}
|
||||
err := yaml.Unmarshal([]byte(item.data), obj)
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(obj.Field, NotNil,
|
||||
Commentf("Pointer not initialized (%#v)", item.value))
|
||||
c.Assert(obj.Field, NotNil, Commentf("Pointer not initialized (%#v)", item.value))
|
||||
c.Assert(obj.Field.tag, Equals, item.tag)
|
||||
c.Assert(obj.Field.value, DeepEquals, item.value)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *S) TestUnmarshalWithValueSetter(c *C) {
|
||||
for _, item := range setterTests {
|
||||
obj := &setterValueType{}
|
||||
err := yaml.Unmarshal([]byte(item.data), obj)
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(obj.Field, NotNil, Commentf("Pointer not initialized (%#v)", item.value))
|
||||
c.Assert(obj.Field.tag, Equals, item.tag)
|
||||
c.Assert(obj.Field.value, DeepEquals, item.value)
|
||||
}
|
||||
@@ -460,7 +528,7 @@ func (s *S) TestUnmarshalWithSetter(c *C) {
|
||||
|
||||
func (s *S) TestUnmarshalWholeDocumentWithSetter(c *C) {
|
||||
obj := &typeWithSetter{}
|
||||
err := goyaml.Unmarshal([]byte(setterTests[0].data), obj)
|
||||
err := yaml.Unmarshal([]byte(setterTests[0].data), obj)
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(obj.tag, Equals, setterTests[0].tag)
|
||||
value, ok := obj.value.(map[interface{}]interface{})
|
||||
@@ -477,8 +545,8 @@ func (s *S) TestUnmarshalWithFalseSetterIgnoresValue(c *C) {
|
||||
}()
|
||||
|
||||
m := map[string]*typeWithSetter{}
|
||||
data := "{abc: 1, def: 2, ghi: 3, jkl: 4}"
|
||||
err := goyaml.Unmarshal([]byte(data), m)
|
||||
data := `{abc: 1, def: 2, ghi: 3, jkl: 4}`
|
||||
err := yaml.Unmarshal([]byte(data), m)
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(m["abc"], NotNil)
|
||||
c.Assert(m["def"], IsNil)
|
||||
@@ -489,6 +557,139 @@ func (s *S) TestUnmarshalWithFalseSetterIgnoresValue(c *C) {
|
||||
c.Assert(m["ghi"].value, Equals, 3)
|
||||
}
|
||||
|
||||
func (s *S) TestUnmarshalWithTransform(c *C) {
|
||||
data := `{a_b: 1, c-d: 2, e-f_g: 3, h_i-j: 4}`
|
||||
expect := map[string]int{
|
||||
"a_b": 1,
|
||||
"c_d": 2,
|
||||
"e_f_g": 3,
|
||||
"h_i_j": 4,
|
||||
}
|
||||
m := map[string]int{}
|
||||
yaml.UnmarshalMappingKeyTransform = func(i string) string {
|
||||
return strings.Replace(i, "-", "_", -1)
|
||||
}
|
||||
err := yaml.Unmarshal([]byte(data), m)
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(m, DeepEquals, expect)
|
||||
}
|
||||
|
||||
// From http://yaml.org/type/merge.html
|
||||
var mergeTests = `
|
||||
anchors:
|
||||
- &CENTER { "x": 1, "y": 2 }
|
||||
- &LEFT { "x": 0, "y": 2 }
|
||||
- &BIG { "r": 10 }
|
||||
- &SMALL { "r": 1 }
|
||||
|
||||
# All the following maps are equal:
|
||||
|
||||
plain:
|
||||
# Explicit keys
|
||||
"x": 1
|
||||
"y": 2
|
||||
"r": 10
|
||||
label: center/big
|
||||
|
||||
mergeOne:
|
||||
# Merge one map
|
||||
<< : *CENTER
|
||||
"r": 10
|
||||
label: center/big
|
||||
|
||||
mergeMultiple:
|
||||
# Merge multiple maps
|
||||
<< : [ *CENTER, *BIG ]
|
||||
label: center/big
|
||||
|
||||
override:
|
||||
# Override
|
||||
<< : [ *BIG, *LEFT, *SMALL ]
|
||||
"x": 1
|
||||
label: center/big
|
||||
|
||||
shortTag:
|
||||
# Explicit short merge tag
|
||||
!!merge "<<" : [ *CENTER, *BIG ]
|
||||
label: center/big
|
||||
|
||||
longTag:
|
||||
# Explicit merge long tag
|
||||
!<tag:yaml.org,2002:merge> "<<" : [ *CENTER, *BIG ]
|
||||
label: center/big
|
||||
|
||||
inlineMap:
|
||||
# Inlined map
|
||||
<< : {"x": 1, "y": 2, "r": 10}
|
||||
label: center/big
|
||||
|
||||
inlineSequenceMap:
|
||||
# Inlined map in sequence
|
||||
<< : [ *CENTER, {"r": 10} ]
|
||||
label: center/big
|
||||
`
|
||||
|
||||
func (s *S) TestMerge(c *C) {
|
||||
var want = map[interface{}]interface{}{
|
||||
"x": 1,
|
||||
"y": 2,
|
||||
"r": 10,
|
||||
"label": "center/big",
|
||||
}
|
||||
|
||||
var m map[string]interface{}
|
||||
err := yaml.Unmarshal([]byte(mergeTests), &m)
|
||||
c.Assert(err, IsNil)
|
||||
for name, test := range m {
|
||||
if name == "anchors" {
|
||||
continue
|
||||
}
|
||||
c.Assert(test, DeepEquals, want, Commentf("test %q failed", name))
|
||||
}
|
||||
}
|
||||
|
||||
func (s *S) TestMergeStruct(c *C) {
|
||||
type Data struct {
|
||||
X, Y, R int
|
||||
Label string
|
||||
}
|
||||
want := Data{1, 2, 10, "center/big"}
|
||||
|
||||
var m map[string]Data
|
||||
err := yaml.Unmarshal([]byte(mergeTests), &m)
|
||||
c.Assert(err, IsNil)
|
||||
for name, test := range m {
|
||||
if name == "anchors" {
|
||||
continue
|
||||
}
|
||||
c.Assert(test, Equals, want, Commentf("test %q failed", name))
|
||||
}
|
||||
}
|
||||
|
||||
var unmarshalNullTests = []func() interface{}{
|
||||
func() interface{} { var v interface{}; v = "v"; return &v },
|
||||
func() interface{} { var s = "s"; return &s },
|
||||
func() interface{} { var s = "s"; sptr := &s; return &sptr },
|
||||
func() interface{} { var i = 1; return &i },
|
||||
func() interface{} { var i = 1; iptr := &i; return &iptr },
|
||||
func() interface{} { m := map[string]int{"s": 1}; return &m },
|
||||
func() interface{} { m := map[string]int{"s": 1}; return m },
|
||||
}
|
||||
|
||||
func (s *S) TestUnmarshalNull(c *C) {
|
||||
for _, test := range unmarshalNullTests {
|
||||
item := test()
|
||||
zero := reflect.Zero(reflect.TypeOf(item).Elem()).Interface()
|
||||
err := yaml.Unmarshal([]byte("null"), item)
|
||||
c.Assert(err, IsNil)
|
||||
if reflect.TypeOf(item).Kind() == reflect.Map {
|
||||
c.Assert(reflect.ValueOf(item).Interface(), DeepEquals, reflect.MakeMap(reflect.TypeOf(item)).Interface())
|
||||
} else {
|
||||
c.Assert(reflect.ValueOf(item).Elem().Interface(), DeepEquals, zero)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
//var data []byte
|
||||
//func init() {
|
||||
// var err error
|
||||
@@ -502,7 +703,7 @@ func (s *S) TestUnmarshalWithFalseSetterIgnoresValue(c *C) {
|
||||
// var err error
|
||||
// for i := 0; i < c.N; i++ {
|
||||
// var v map[string]interface{}
|
||||
// err = goyaml.Unmarshal(data, &v)
|
||||
// err = yaml.Unmarshal(data, &v)
|
||||
// }
|
||||
// if err != nil {
|
||||
// panic(err)
|
||||
@@ -511,9 +712,9 @@ func (s *S) TestUnmarshalWithFalseSetterIgnoresValue(c *C) {
|
||||
//
|
||||
//func (s *S) BenchmarkMarshal(c *C) {
|
||||
// var v map[string]interface{}
|
||||
// goyaml.Unmarshal(data, &v)
|
||||
// yaml.Unmarshal(data, &v)
|
||||
// c.ResetTimer()
|
||||
// for i := 0; i < c.N; i++ {
|
||||
// goyaml.Marshal(&v)
|
||||
// yaml.Marshal(&v)
|
||||
// }
|
||||
//}
|
@@ -1,4 +1,4 @@
|
||||
package goyaml
|
||||
package yaml
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
@@ -973,8 +973,8 @@ func yaml_emitter_analyze_tag(emitter *yaml_emitter_t, tag []byte) bool {
|
||||
if bytes.HasPrefix(tag, tag_directive.prefix) {
|
||||
emitter.tag_data.handle = tag_directive.handle
|
||||
emitter.tag_data.suffix = tag[len(tag_directive.prefix):]
|
||||
return true
|
||||
}
|
||||
return true
|
||||
}
|
||||
emitter.tag_data.suffix = tag
|
||||
return true
|
||||
@@ -1279,6 +1279,9 @@ func yaml_emitter_write_tag_content(emitter *yaml_emitter_t, value []byte, need_
|
||||
for k := 0; k < w; k++ {
|
||||
octet := value[i]
|
||||
i++
|
||||
if !put(emitter, '%') {
|
||||
return false
|
||||
}
|
||||
|
||||
c := octet >> 4
|
||||
if c < 10 {
|
@@ -1,9 +1,12 @@
|
||||
package goyaml
|
||||
package yaml
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"regexp"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
type encoder struct {
|
||||
@@ -49,14 +52,19 @@ func (e *encoder) must(ok bool) {
|
||||
if msg == "" {
|
||||
msg = "Unknown problem generating YAML content"
|
||||
}
|
||||
panic(msg)
|
||||
fail(msg)
|
||||
}
|
||||
}
|
||||
|
||||
func (e *encoder) marshal(tag string, in reflect.Value) {
|
||||
if !in.IsValid() {
|
||||
e.nilv()
|
||||
return
|
||||
}
|
||||
var value interface{}
|
||||
if getter, ok := in.Interface().(Getter); ok {
|
||||
tag, value = getter.GetYAML()
|
||||
tag = longTag(tag)
|
||||
if value == nil {
|
||||
e.nilv()
|
||||
return
|
||||
@@ -85,7 +93,11 @@ func (e *encoder) marshal(tag string, in reflect.Value) {
|
||||
case reflect.String:
|
||||
e.stringv(tag, in)
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
e.intv(tag, in)
|
||||
if in.Type() == durationType {
|
||||
e.stringv(tag, reflect.ValueOf(in.Interface().(time.Duration).String()))
|
||||
} else {
|
||||
e.intv(tag, in)
|
||||
}
|
||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
|
||||
e.uintv(tag, in)
|
||||
case reflect.Float32, reflect.Float64:
|
||||
@@ -93,7 +105,7 @@ func (e *encoder) marshal(tag string, in reflect.Value) {
|
||||
case reflect.Bool:
|
||||
e.boolv(tag, in)
|
||||
default:
|
||||
panic("Can't marshal type yet: " + in.Type().String())
|
||||
panic("Can't marshal type: " + in.Type().String())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -162,11 +174,46 @@ func (e *encoder) slicev(tag string, in reflect.Value) {
|
||||
e.emit()
|
||||
}
|
||||
|
||||
// isBase60 returns whether s is in base 60 notation as defined in YAML 1.1.
|
||||
//
|
||||
// The base 60 float notation in YAML 1.1 is a terrible idea and is unsupported
|
||||
// in YAML 1.2 and by this package, but these should be marshalled quoted for
|
||||
// the time being for compatibility with other parsers.
|
||||
func isBase60Float(s string) (result bool) {
|
||||
// Fast path.
|
||||
if s == "" {
|
||||
return false
|
||||
}
|
||||
c := s[0]
|
||||
if !(c == '+' || c == '-' || c >= '0' && c <= '9') || strings.IndexByte(s, ':') < 0 {
|
||||
return false
|
||||
}
|
||||
// Do the full match.
|
||||
return base60float.MatchString(s)
|
||||
}
|
||||
|
||||
// From http://yaml.org/type/float.html, except the regular expression there
|
||||
// is bogus. In practice parsers do not enforce the "\.[0-9_]*" suffix.
|
||||
var base60float = regexp.MustCompile(`^[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+(?:\.[0-9_]*)?$`)
|
||||
|
||||
func (e *encoder) stringv(tag string, in reflect.Value) {
|
||||
var style yaml_scalar_style_t
|
||||
s := in.String()
|
||||
if rtag, _ := resolve("", s); rtag != "!!str" {
|
||||
rtag, rs := resolve("", s)
|
||||
if rtag == yaml_BINARY_TAG {
|
||||
if tag == "" || tag == yaml_STR_TAG {
|
||||
tag = rtag
|
||||
s = rs.(string)
|
||||
} else if tag == yaml_BINARY_TAG {
|
||||
fail("explicitly tagged !!binary data must be base64-encoded")
|
||||
} else {
|
||||
fail("cannot marshal invalid UTF-8 data as " + shortTag(tag))
|
||||
}
|
||||
}
|
||||
if tag == "" && (rtag != yaml_STR_TAG || isBase60Float(s)) {
|
||||
style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
|
||||
} else if strings.Contains(s, "\n") {
|
||||
style = yaml_LITERAL_SCALAR_STYLE
|
||||
} else {
|
||||
style = yaml_PLAIN_SCALAR_STYLE
|
||||
}
|
||||
@@ -213,9 +260,6 @@ func (e *encoder) nilv() {
|
||||
|
||||
func (e *encoder) emitScalar(value, anchor, tag string, style yaml_scalar_style_t) {
|
||||
implicit := tag == ""
|
||||
if !implicit {
|
||||
style = yaml_PLAIN_SCALAR_STYLE
|
||||
}
|
||||
e.must(yaml_scalar_event_initialize(&e.event, []byte(anchor), []byte(tag), []byte(value), implicit, implicit, style))
|
||||
e.emit()
|
||||
}
|
@@ -1,12 +1,14 @@
|
||||
package goyaml_test
|
||||
package yaml_test
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
. "launchpad.net/gocheck"
|
||||
"github.com/coreos/coreos-cloudinit/third_party/launchpad.net/goyaml"
|
||||
"math"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/coreos/yaml"
|
||||
. "gopkg.in/check.v1"
|
||||
)
|
||||
|
||||
var marshalIntTest = 123
|
||||
@@ -16,6 +18,9 @@ var marshalTests = []struct {
|
||||
data string
|
||||
}{
|
||||
{
|
||||
nil,
|
||||
"null\n",
|
||||
}, {
|
||||
&struct{}{},
|
||||
"{}\n",
|
||||
}, {
|
||||
@@ -86,7 +91,7 @@ var marshalTests = []struct {
|
||||
"v:\n- A\n- B\n",
|
||||
}, {
|
||||
map[string][]string{"v": []string{"A", "B\nC"}},
|
||||
"v:\n- A\n- 'B\n\n C'\n",
|
||||
"v:\n- A\n- |-\n B\n C\n",
|
||||
}, {
|
||||
map[string][]interface{}{"v": []interface{}{"A", 1, map[string][]int{"B": []int{2, 3}}}},
|
||||
"v:\n- A\n- 1\n- B:\n - 2\n - 3\n",
|
||||
@@ -212,11 +217,51 @@ var marshalTests = []struct {
|
||||
}{1, inlineB{2, inlineC{3}}},
|
||||
"a: 1\nb: 2\nc: 3\n",
|
||||
},
|
||||
|
||||
// Duration
|
||||
{
|
||||
map[string]time.Duration{"a": 3 * time.Second},
|
||||
"a: 3s\n",
|
||||
},
|
||||
|
||||
// Issue #24: bug in map merging logic.
|
||||
{
|
||||
map[string]string{"a": "<foo>"},
|
||||
"a: <foo>\n",
|
||||
},
|
||||
|
||||
// Issue #34: marshal unsupported base 60 floats quoted for compatibility
|
||||
// with old YAML 1.1 parsers.
|
||||
{
|
||||
map[string]string{"a": "1:1"},
|
||||
"a: \"1:1\"\n",
|
||||
},
|
||||
|
||||
// Binary data.
|
||||
{
|
||||
map[string]string{"a": "\x00"},
|
||||
"a: \"\\0\"\n",
|
||||
}, {
|
||||
map[string]string{"a": "\x80\x81\x82"},
|
||||
"a: !!binary gIGC\n",
|
||||
}, {
|
||||
map[string]string{"a": strings.Repeat("\x90", 54)},
|
||||
"a: !!binary |\n " + strings.Repeat("kJCQ", 17) + "kJ\n CQ\n",
|
||||
}, {
|
||||
map[string]interface{}{"a": typeWithGetter{"!!str", "\x80\x81\x82"}},
|
||||
"a: !!binary gIGC\n",
|
||||
},
|
||||
|
||||
// Escaping of tags.
|
||||
{
|
||||
map[string]interface{}{"a": typeWithGetter{"foo!bar", 1}},
|
||||
"a: !<foo%21bar> 1\n",
|
||||
},
|
||||
}
|
||||
|
||||
func (s *S) TestMarshal(c *C) {
|
||||
for _, item := range marshalTests {
|
||||
data, err := goyaml.Marshal(item.value)
|
||||
data, err := yaml.Marshal(item.value)
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(string(data), Equals, item.data)
|
||||
}
|
||||
@@ -225,20 +270,29 @@ func (s *S) TestMarshal(c *C) {
|
||||
var marshalErrorTests = []struct {
|
||||
value interface{}
|
||||
error string
|
||||
}{
|
||||
{
|
||||
&struct {
|
||||
B int
|
||||
inlineB ",inline"
|
||||
}{1, inlineB{2, inlineC{3}}},
|
||||
`Duplicated key 'b' in struct struct \{ B int; .*`,
|
||||
},
|
||||
}
|
||||
panic string
|
||||
}{{
|
||||
value: &struct {
|
||||
B int
|
||||
inlineB ",inline"
|
||||
}{1, inlineB{2, inlineC{3}}},
|
||||
panic: `Duplicated key 'b' in struct struct \{ B int; .*`,
|
||||
}, {
|
||||
value: typeWithGetter{"!!binary", "\x80"},
|
||||
error: "YAML error: explicitly tagged !!binary data must be base64-encoded",
|
||||
}, {
|
||||
value: typeWithGetter{"!!float", "\x80"},
|
||||
error: `YAML error: cannot marshal invalid UTF-8 data as !!float`,
|
||||
}}
|
||||
|
||||
func (s *S) TestMarshalErrors(c *C) {
|
||||
for _, item := range marshalErrorTests {
|
||||
_, err := goyaml.Marshal(item.value)
|
||||
c.Assert(err, ErrorMatches, item.error)
|
||||
if item.panic != "" {
|
||||
c.Assert(func() { yaml.Marshal(item.value) }, PanicMatches, item.panic)
|
||||
} else {
|
||||
_, err := yaml.Marshal(item.value)
|
||||
c.Assert(err, ErrorMatches, item.error)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -269,12 +323,12 @@ func (s *S) TestMarshalTypeCache(c *C) {
|
||||
var err error
|
||||
func() {
|
||||
type T struct{ A int }
|
||||
data, err = goyaml.Marshal(&T{})
|
||||
data, err = yaml.Marshal(&T{})
|
||||
c.Assert(err, IsNil)
|
||||
}()
|
||||
func() {
|
||||
type T struct{ B int }
|
||||
data, err = goyaml.Marshal(&T{})
|
||||
data, err = yaml.Marshal(&T{})
|
||||
c.Assert(err, IsNil)
|
||||
}()
|
||||
c.Assert(string(data), Equals, "b: 0\n")
|
||||
@@ -298,7 +352,7 @@ func (s *S) TestMashalWithGetter(c *C) {
|
||||
obj := &typeWithGetterField{}
|
||||
obj.Field.tag = item.tag
|
||||
obj.Field.value = item.value
|
||||
data, err := goyaml.Marshal(obj)
|
||||
data, err := yaml.Marshal(obj)
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(string(data), Equals, string(item.data))
|
||||
}
|
||||
@@ -308,7 +362,7 @@ func (s *S) TestUnmarshalWholeDocumentWithGetter(c *C) {
|
||||
obj := &typeWithGetter{}
|
||||
obj.tag = ""
|
||||
obj.value = map[string]string{"hello": "world!"}
|
||||
data, err := goyaml.Marshal(obj)
|
||||
data, err := yaml.Marshal(obj)
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(string(data), Equals, "hello: world!\n")
|
||||
}
|
||||
@@ -356,7 +410,7 @@ func (s *S) TestSortedOutput(c *C) {
|
||||
for _, k := range order {
|
||||
m[k] = 1
|
||||
}
|
||||
data, err := goyaml.Marshal(m)
|
||||
data, err := yaml.Marshal(m)
|
||||
c.Assert(err, IsNil)
|
||||
out := "\n" + string(data)
|
||||
last := 0
|
@@ -1,4 +1,4 @@
|
||||
package goyaml
|
||||
package yaml
|
||||
|
||||
import (
|
||||
"bytes"
|
@@ -1,4 +1,4 @@
|
||||
package goyaml
|
||||
package yaml
|
||||
|
||||
import (
|
||||
"io"
|
190
Godeps/_workspace/src/github.com/coreos/yaml/resolve.go
generated
vendored
Normal file
190
Godeps/_workspace/src/github.com/coreos/yaml/resolve.go
generated
vendored
Normal file
@@ -0,0 +1,190 @@
|
||||
package yaml
|
||||
|
||||
import (
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
"math"
|
||||
"strconv"
|
||||
"strings"
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
// TODO: merge, timestamps, base 60 floats, omap.
|
||||
|
||||
type resolveMapItem struct {
|
||||
value interface{}
|
||||
tag string
|
||||
}
|
||||
|
||||
var resolveTable = make([]byte, 256)
|
||||
var resolveMap = make(map[string]resolveMapItem)
|
||||
|
||||
func init() {
|
||||
t := resolveTable
|
||||
t[int('+')] = 'S' // Sign
|
||||
t[int('-')] = 'S'
|
||||
for _, c := range "0123456789" {
|
||||
t[int(c)] = 'D' // Digit
|
||||
}
|
||||
for _, c := range "yYnNtTfFoO~" {
|
||||
t[int(c)] = 'M' // In map
|
||||
}
|
||||
t[int('.')] = '.' // Float (potentially in map)
|
||||
|
||||
var resolveMapList = []struct {
|
||||
v interface{}
|
||||
tag string
|
||||
l []string
|
||||
}{
|
||||
{true, yaml_BOOL_TAG, []string{"y", "Y", "yes", "Yes", "YES"}},
|
||||
{true, yaml_BOOL_TAG, []string{"true", "True", "TRUE"}},
|
||||
{true, yaml_BOOL_TAG, []string{"on", "On", "ON"}},
|
||||
{false, yaml_BOOL_TAG, []string{"n", "N", "no", "No", "NO"}},
|
||||
{false, yaml_BOOL_TAG, []string{"false", "False", "FALSE"}},
|
||||
{false, yaml_BOOL_TAG, []string{"off", "Off", "OFF"}},
|
||||
{nil, yaml_NULL_TAG, []string{"", "~", "null", "Null", "NULL"}},
|
||||
{math.NaN(), yaml_FLOAT_TAG, []string{".nan", ".NaN", ".NAN"}},
|
||||
{math.Inf(+1), yaml_FLOAT_TAG, []string{".inf", ".Inf", ".INF"}},
|
||||
{math.Inf(+1), yaml_FLOAT_TAG, []string{"+.inf", "+.Inf", "+.INF"}},
|
||||
{math.Inf(-1), yaml_FLOAT_TAG, []string{"-.inf", "-.Inf", "-.INF"}},
|
||||
{"<<", yaml_MERGE_TAG, []string{"<<"}},
|
||||
}
|
||||
|
||||
m := resolveMap
|
||||
for _, item := range resolveMapList {
|
||||
for _, s := range item.l {
|
||||
m[s] = resolveMapItem{item.v, item.tag}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const longTagPrefix = "tag:yaml.org,2002:"
|
||||
|
||||
func shortTag(tag string) string {
|
||||
// TODO This can easily be made faster and produce less garbage.
|
||||
if strings.HasPrefix(tag, longTagPrefix) {
|
||||
return "!!" + tag[len(longTagPrefix):]
|
||||
}
|
||||
return tag
|
||||
}
|
||||
|
||||
func longTag(tag string) string {
|
||||
if strings.HasPrefix(tag, "!!") {
|
||||
return longTagPrefix + tag[2:]
|
||||
}
|
||||
return tag
|
||||
}
|
||||
|
||||
func resolvableTag(tag string) bool {
|
||||
switch tag {
|
||||
case "", yaml_STR_TAG, yaml_BOOL_TAG, yaml_INT_TAG, yaml_FLOAT_TAG, yaml_NULL_TAG:
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func resolve(tag string, in string) (rtag string, out interface{}) {
|
||||
if !resolvableTag(tag) {
|
||||
return tag, in
|
||||
}
|
||||
|
||||
defer func() {
|
||||
switch tag {
|
||||
case "", rtag, yaml_STR_TAG, yaml_BINARY_TAG:
|
||||
return
|
||||
}
|
||||
fail(fmt.Sprintf("cannot decode %s `%s` as a %s", shortTag(rtag), in, shortTag(tag)))
|
||||
}()
|
||||
|
||||
// Any data is accepted as a !!str or !!binary.
|
||||
// Otherwise, the prefix is enough of a hint about what it might be.
|
||||
hint := byte('N')
|
||||
if in != "" {
|
||||
hint = resolveTable[in[0]]
|
||||
}
|
||||
if hint != 0 && tag != yaml_STR_TAG && tag != yaml_BINARY_TAG {
|
||||
// Handle things we can lookup in a map.
|
||||
if item, ok := resolveMap[in]; ok {
|
||||
return item.tag, item.value
|
||||
}
|
||||
|
||||
// Base 60 floats are a bad idea, were dropped in YAML 1.2, and
|
||||
// are purposefully unsupported here. They're still quoted on
|
||||
// the way out for compatibility with other parser, though.
|
||||
|
||||
switch hint {
|
||||
case 'M':
|
||||
// We've already checked the map above.
|
||||
|
||||
case '.':
|
||||
// Not in the map, so maybe a normal float.
|
||||
floatv, err := strconv.ParseFloat(in, 64)
|
||||
if err == nil {
|
||||
return yaml_FLOAT_TAG, floatv
|
||||
}
|
||||
|
||||
case 'D', 'S':
|
||||
// Int, float, or timestamp.
|
||||
plain := strings.Replace(in, "_", "", -1)
|
||||
intv, err := strconv.ParseInt(plain, 0, 64)
|
||||
if err == nil {
|
||||
if intv == int64(int(intv)) {
|
||||
return yaml_INT_TAG, int(intv)
|
||||
} else {
|
||||
return yaml_INT_TAG, intv
|
||||
}
|
||||
}
|
||||
floatv, err := strconv.ParseFloat(plain, 64)
|
||||
if err == nil {
|
||||
return yaml_FLOAT_TAG, floatv
|
||||
}
|
||||
if strings.HasPrefix(plain, "0b") {
|
||||
intv, err := strconv.ParseInt(plain[2:], 2, 64)
|
||||
if err == nil {
|
||||
return yaml_INT_TAG, int(intv)
|
||||
}
|
||||
} else if strings.HasPrefix(plain, "-0b") {
|
||||
intv, err := strconv.ParseInt(plain[3:], 2, 64)
|
||||
if err == nil {
|
||||
return yaml_INT_TAG, -int(intv)
|
||||
}
|
||||
}
|
||||
// XXX Handle timestamps here.
|
||||
|
||||
default:
|
||||
panic("resolveTable item not yet handled: " + string(rune(hint)) + " (with " + in + ")")
|
||||
}
|
||||
}
|
||||
if tag == yaml_BINARY_TAG {
|
||||
return yaml_BINARY_TAG, in
|
||||
}
|
||||
if utf8.ValidString(in) {
|
||||
return yaml_STR_TAG, in
|
||||
}
|
||||
return yaml_BINARY_TAG, encodeBase64(in)
|
||||
}
|
||||
|
||||
// encodeBase64 encodes s as base64 that is broken up into multiple lines
|
||||
// as appropriate for the resulting length.
|
||||
func encodeBase64(s string) string {
|
||||
const lineLen = 70
|
||||
encLen := base64.StdEncoding.EncodedLen(len(s))
|
||||
lines := encLen/lineLen + 1
|
||||
buf := make([]byte, encLen*2+lines)
|
||||
in := buf[0:encLen]
|
||||
out := buf[encLen:]
|
||||
base64.StdEncoding.Encode(in, []byte(s))
|
||||
k := 0
|
||||
for i := 0; i < len(in); i += lineLen {
|
||||
j := i + lineLen
|
||||
if j > len(in) {
|
||||
j = len(in)
|
||||
}
|
||||
k += copy(out[k:], in[i:j])
|
||||
if lines > 1 {
|
||||
out[k] = '\n'
|
||||
k++
|
||||
}
|
||||
}
|
||||
return string(out[:k])
|
||||
}
|
@@ -1,4 +1,4 @@
|
||||
package goyaml
|
||||
package yaml
|
||||
|
||||
import (
|
||||
"bytes"
|
@@ -1,4 +1,4 @@
|
||||
package goyaml
|
||||
package yaml
|
||||
|
||||
import (
|
||||
"reflect"
|
@@ -1,7 +1,7 @@
|
||||
package goyaml_test
|
||||
package yaml_test
|
||||
|
||||
import (
|
||||
. "launchpad.net/gocheck"
|
||||
. "gopkg.in/check.v1"
|
||||
"testing"
|
||||
)
|
||||
|
@@ -1,4 +1,4 @@
|
||||
package goyaml
|
||||
package yaml
|
||||
|
||||
// Set the writer error and return false.
|
||||
func yaml_emitter_set_writer_error(emitter *yaml_emitter_t, problem string) bool {
|
@@ -1,59 +1,60 @@
|
||||
// Package goyaml implements YAML support for the Go language.
|
||||
package goyaml
|
||||
// Package yaml implements YAML support for the Go language.
|
||||
//
|
||||
// Source code and other details for the project are available at GitHub:
|
||||
//
|
||||
// https://github.com/go-yaml/yaml
|
||||
//
|
||||
package yaml
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"runtime"
|
||||
"strings"
|
||||
"sync"
|
||||
)
|
||||
|
||||
type yamlError string
|
||||
|
||||
func fail(msg string) {
|
||||
panic(yamlError(msg))
|
||||
}
|
||||
|
||||
func handleErr(err *error) {
|
||||
if r := recover(); r != nil {
|
||||
if _, ok := r.(runtime.Error); ok {
|
||||
panic(r)
|
||||
} else if _, ok := r.(*reflect.ValueError); ok {
|
||||
panic(r)
|
||||
} else if _, ok := r.(externalPanic); ok {
|
||||
panic(r)
|
||||
} else if s, ok := r.(string); ok {
|
||||
*err = errors.New("YAML error: " + s)
|
||||
} else if e, ok := r.(error); ok {
|
||||
*err = e
|
||||
if e, ok := r.(yamlError); ok {
|
||||
*err = errors.New("YAML error: " + string(e))
|
||||
} else {
|
||||
panic(r)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Objects implementing the goyaml.Setter interface will receive the YAML
|
||||
// tag and value via the SetYAML method during unmarshaling, rather than
|
||||
// being implicitly assigned by the goyaml machinery. If setting the value
|
||||
// works, the method should return true. If it returns false, the given
|
||||
// value will be omitted from maps and slices.
|
||||
// The Setter interface may be implemented by types to do their own custom
|
||||
// unmarshalling of YAML values, rather than being implicitly assigned by
|
||||
// the yaml package machinery. If setting the value works, the method should
|
||||
// return true. If it returns false, the value is considered unsupported
|
||||
// and is omitted from maps and slices.
|
||||
type Setter interface {
|
||||
SetYAML(tag string, value interface{}) bool
|
||||
}
|
||||
|
||||
// Objects implementing the goyaml.Getter interface will get the GetYAML()
|
||||
// method called when goyaml is requested to marshal the given value, and
|
||||
// the result of this method will be marshaled in place of the actual object.
|
||||
// The Getter interface is implemented by types to do their own custom
|
||||
// marshalling into a YAML tag and value.
|
||||
type Getter interface {
|
||||
GetYAML() (tag string, value interface{})
|
||||
}
|
||||
|
||||
// Unmarshal decodes the first document found within the in byte slice
|
||||
// and assigns decoded values into the object pointed by out.
|
||||
// and assigns decoded values into the out value.
|
||||
//
|
||||
// Maps, pointers to structs and ints, etc, may all be used as out values.
|
||||
// If an internal pointer within a struct is not initialized, goyaml
|
||||
// will initialize it if necessary for unmarshalling the provided data,
|
||||
// but the struct provided as out must not be a nil pointer.
|
||||
// Maps and pointers (to a struct, string, int, etc) are accepted as out
|
||||
// values. If an internal pointer within a struct is not initialized,
|
||||
// the yaml package will initialize it if necessary for unmarshalling
|
||||
// the provided data. The out parameter must not be nil.
|
||||
//
|
||||
// The type of the decoded values and the type of out will be considered,
|
||||
// and Unmarshal() will do the best possible job to unmarshal values
|
||||
// and Unmarshal will do the best possible job to unmarshal values
|
||||
// appropriately. It is NOT considered an error, though, to skip values
|
||||
// because they are not available in the decoded YAML, or if they are not
|
||||
// compatible with the out value. To ensure something was properly
|
||||
@@ -61,11 +62,11 @@ type Getter interface {
|
||||
// field (usually the zero value).
|
||||
//
|
||||
// Struct fields are only unmarshalled if they are exported (have an
|
||||
// upper case first letter), and will be unmarshalled using the field
|
||||
// name lowercased by default. When custom field names are desired, the
|
||||
// tag value may be used to tweak the name. Everything before the first
|
||||
// comma in the field tag will be used as the name. The values following
|
||||
// the comma are used to tweak the marshalling process (see Marshal).
|
||||
// upper case first letter), and are unmarshalled using the field name
|
||||
// lowercased as the default key. Custom keys may be defined via the
|
||||
// "yaml" name in the field tag: the content preceding the first comma
|
||||
// is used as the key, and the following comma-separated options are
|
||||
// used to tweak the marshalling process (see Marshal).
|
||||
// Conflicting names result in a runtime error.
|
||||
//
|
||||
// For example:
|
||||
@@ -74,8 +75,8 @@ type Getter interface {
|
||||
// F int `yaml:"a,omitempty"`
|
||||
// B int
|
||||
// }
|
||||
// var T t
|
||||
// goyaml.Unmarshal([]byte("a: 1\nb: 2"), &t)
|
||||
// var t T
|
||||
// yaml.Unmarshal([]byte("a: 1\nb: 2"), &t)
|
||||
//
|
||||
// See the documentation of Marshal for the format of tags and a list of
|
||||
// supported tag options.
|
||||
@@ -83,25 +84,31 @@ type Getter interface {
|
||||
func Unmarshal(in []byte, out interface{}) (err error) {
|
||||
defer handleErr(&err)
|
||||
d := newDecoder()
|
||||
p := newParser(in)
|
||||
p := newParser(in, UnmarshalMappingKeyTransform)
|
||||
defer p.destroy()
|
||||
node := p.parse()
|
||||
if node != nil {
|
||||
d.unmarshal(node, reflect.ValueOf(out))
|
||||
v := reflect.ValueOf(out)
|
||||
if v.Kind() == reflect.Ptr && !v.IsNil() {
|
||||
v = v.Elem()
|
||||
}
|
||||
d.unmarshal(node, v)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Marshal serializes the value provided into a YAML document. The structure
|
||||
// of the generated document will reflect the structure of the value itself.
|
||||
// Maps, pointers to structs and ints, etc, may all be used as the in value.
|
||||
// Maps and pointers (to struct, string, int, etc) are accepted as the in value.
|
||||
//
|
||||
// In the case of struct values, only exported fields will be serialized.
|
||||
// The lowercased field name is used as the key for each exported field,
|
||||
// but this behavior may be changed using the respective field tag.
|
||||
// The tag may also contain flags to tweak the marshalling behavior for
|
||||
// the field. Conflicting names result in a runtime error. The tag format
|
||||
// accepted is:
|
||||
// Struct fields are only unmarshalled if they are exported (have an upper case
|
||||
// first letter), and are unmarshalled using the field name lowercased as the
|
||||
// default key. Custom keys may be defined via the "yaml" name in the field
|
||||
// tag: the content preceding the first comma is used as the key, and the
|
||||
// following comma-separated options are used to tweak the marshalling process.
|
||||
// Conflicting names result in a runtime error.
|
||||
//
|
||||
// The field tag format accepted is:
|
||||
//
|
||||
// `(...) yaml:"[<key>][,<flag1>[,<flag2>]]" (...)`
|
||||
//
|
||||
@@ -126,8 +133,8 @@ func Unmarshal(in []byte, out interface{}) (err error) {
|
||||
// F int "a,omitempty"
|
||||
// B int
|
||||
// }
|
||||
// goyaml.Marshal(&T{B: 2}) // Returns "b: 2\n"
|
||||
// goyaml.Marshal(&T{F: 1}} // Returns "a: 1\nb: 0\n"
|
||||
// yaml.Marshal(&T{B: 2}) // Returns "b: 2\n"
|
||||
// yaml.Marshal(&T{F: 1}} // Returns "a: 1\nb: 0\n"
|
||||
//
|
||||
func Marshal(in interface{}) (out []byte, err error) {
|
||||
defer handleErr(&err)
|
||||
@@ -139,10 +146,21 @@ func Marshal(in interface{}) (out []byte, err error) {
|
||||
return
|
||||
}
|
||||
|
||||
// UnmarshalMappingKeyTransform is a string transformation that is applied to
|
||||
// each mapping key in a YAML document before it is unmarshalled. By default,
|
||||
// UnmarshalMappingKeyTransform is an identity transform (no modification).
|
||||
var UnmarshalMappingKeyTransform transformString = identityTransform
|
||||
|
||||
type transformString func(in string) (out string)
|
||||
|
||||
func identityTransform(in string) (out string) {
|
||||
return in
|
||||
}
|
||||
|
||||
// --------------------------------------------------------------------------
|
||||
// Maintain a mapping of keys to structure field indexes
|
||||
|
||||
// The code in this section was copied from gobson.
|
||||
// The code in this section was copied from mgo/bson.
|
||||
|
||||
// structInfo holds details for the serialization of fields of
|
||||
// a given struct.
|
||||
@@ -168,12 +186,6 @@ type fieldInfo struct {
|
||||
var structMap = make(map[reflect.Type]*structInfo)
|
||||
var fieldMapMutex sync.RWMutex
|
||||
|
||||
type externalPanic string
|
||||
|
||||
func (e externalPanic) String() string {
|
||||
return string(e)
|
||||
}
|
||||
|
||||
func getStructInfo(st reflect.Type) (*structInfo, error) {
|
||||
fieldMapMutex.RLock()
|
||||
sinfo, found := structMap[st]
|
||||
@@ -214,8 +226,7 @@ func getStructInfo(st reflect.Type) (*structInfo, error) {
|
||||
case "inline":
|
||||
inline = true
|
||||
default:
|
||||
msg := fmt.Sprintf("Unsupported flag %q in tag %q of type %s", flag, tag, st)
|
||||
panic(externalPanic(msg))
|
||||
return nil, errors.New(fmt.Sprintf("Unsupported flag %q in tag %q of type %s", flag, tag, st))
|
||||
}
|
||||
}
|
||||
tag = fields[0]
|
||||
@@ -223,6 +234,7 @@ func getStructInfo(st reflect.Type) (*structInfo, error) {
|
||||
|
||||
if inline {
|
||||
switch field.Type.Kind() {
|
||||
// TODO: Implement support for inline maps.
|
||||
//case reflect.Map:
|
||||
// if inlineMap >= 0 {
|
||||
// return nil, errors.New("Multiple ,inline maps in struct " + st.String())
|
||||
@@ -250,8 +262,8 @@ func getStructInfo(st reflect.Type) (*structInfo, error) {
|
||||
fieldsList = append(fieldsList, finfo)
|
||||
}
|
||||
default:
|
||||
//panic("Option ,inline needs a struct value or map field")
|
||||
panic("Option ,inline needs a struct value field")
|
||||
//return nil, errors.New("Option ,inline needs a struct value or map field")
|
||||
return nil, errors.New("Option ,inline needs a struct value field")
|
||||
}
|
||||
continue
|
||||
}
|
@@ -1,4 +1,4 @@
|
||||
package goyaml
|
||||
package yaml
|
||||
|
||||
import (
|
||||
"io"
|
||||
@@ -294,6 +294,10 @@ const (
|
||||
yaml_SEQ_TAG = "tag:yaml.org,2002:seq" // The tag !!seq is used to denote sequences.
|
||||
yaml_MAP_TAG = "tag:yaml.org,2002:map" // The tag !!map is used to denote mapping.
|
||||
|
||||
// Not in original libyaml.
|
||||
yaml_BINARY_TAG = "tag:yaml.org,2002:binary"
|
||||
yaml_MERGE_TAG = "tag:yaml.org,2002:merge"
|
||||
|
||||
yaml_DEFAULT_SCALAR_TAG = yaml_STR_TAG // The default scalar tag is !!str.
|
||||
yaml_DEFAULT_SEQUENCE_TAG = yaml_SEQ_TAG // The default sequence tag is !!seq.
|
||||
yaml_DEFAULT_MAPPING_TAG = yaml_MAP_TAG // The default mapping tag is !!map.
|
@@ -1,4 +1,4 @@
|
||||
package goyaml
|
||||
package yaml
|
||||
|
||||
const (
|
||||
// The size of the input raw buffer.
|
@@ -2,7 +2,7 @@ package introspect
|
||||
|
||||
import (
|
||||
"encoding/xml"
|
||||
"github.com/coreos/coreos-cloudinit/third_party/github.com/guelfey/go.dbus"
|
||||
"github.com/coreos/coreos-cloudinit/Godeps/_workspace/src/github.com/guelfey/go.dbus"
|
||||
"strings"
|
||||
)
|
||||
|
@@ -2,7 +2,7 @@ package introspect
|
||||
|
||||
import (
|
||||
"encoding/xml"
|
||||
"github.com/coreos/coreos-cloudinit/third_party/github.com/guelfey/go.dbus"
|
||||
"github.com/coreos/coreos-cloudinit/Godeps/_workspace/src/github.com/guelfey/go.dbus"
|
||||
"reflect"
|
||||
)
|
||||
|
@@ -3,8 +3,8 @@
|
||||
package prop
|
||||
|
||||
import (
|
||||
"github.com/coreos/coreos-cloudinit/third_party/github.com/guelfey/go.dbus"
|
||||
"github.com/coreos/coreos-cloudinit/third_party/github.com/guelfey/go.dbus/introspect"
|
||||
"github.com/coreos/coreos-cloudinit/Godeps/_workspace/src/github.com/guelfey/go.dbus"
|
||||
"github.com/coreos/coreos-cloudinit/Godeps/_workspace/src/github.com/guelfey/go.dbus/introspect"
|
||||
"sync"
|
||||
)
|
||||
|
27
Godeps/_workspace/src/github.com/tarm/goserial/LICENSE
generated
vendored
Normal file
27
Godeps/_workspace/src/github.com/tarm/goserial/LICENSE
generated
vendored
Normal file
@@ -0,0 +1,27 @@
|
||||
Copyright (c) 2009 The Go Authors. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Google Inc. nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
63
Godeps/_workspace/src/github.com/tarm/goserial/README.md
generated
vendored
Normal file
63
Godeps/_workspace/src/github.com/tarm/goserial/README.md
generated
vendored
Normal file
@@ -0,0 +1,63 @@
|
||||
GoSerial
|
||||
========
|
||||
A simple go package to allow you to read and write from the
|
||||
serial port as a stream of bytes.
|
||||
|
||||
Details
|
||||
-------
|
||||
It aims to have the same API on all platforms, including windows. As
|
||||
an added bonus, the windows package does not use cgo, so you can cross
|
||||
compile for windows from another platform. Unfortunately goinstall
|
||||
does not currently let you cross compile so you will have to do it
|
||||
manually:
|
||||
|
||||
GOOS=windows make clean install
|
||||
|
||||
Currently there is very little in the way of configurability. You can
|
||||
set the baud rate. Then you can Read(), Write(), or Close() the
|
||||
connection. Read() will block until at least one byte is returned.
|
||||
Write is the same. There is currently no exposed way to set the
|
||||
timeouts, though patches are welcome.
|
||||
|
||||
Currently all ports are opened with 8 data bits, 1 stop bit, no
|
||||
parity, no hardware flow control, and no software flow control. This
|
||||
works fine for many real devices and many faux serial devices
|
||||
including usb-to-serial converters and bluetooth serial ports.
|
||||
|
||||
You may Read() and Write() simulantiously on the same connection (from
|
||||
different goroutines).
|
||||
|
||||
Usage
|
||||
-----
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"github.com/tarm/goserial"
|
||||
"log"
|
||||
)
|
||||
|
||||
func main() {
|
||||
c := &serial.Config{Name: "COM45", Baud: 115200}
|
||||
s, err := serial.OpenPort(c)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
n, err := s.Write([]byte("test"))
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
buf := make([]byte, 128)
|
||||
n, err = s.Read(buf)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
log.Print("%q", buf[:n])
|
||||
}
|
||||
```
|
||||
|
||||
Possible Future Work
|
||||
--------------------
|
||||
- better tests (loopback etc)
|
61
Godeps/_workspace/src/github.com/tarm/goserial/basic_test.go
generated
vendored
Normal file
61
Godeps/_workspace/src/github.com/tarm/goserial/basic_test.go
generated
vendored
Normal file
@@ -0,0 +1,61 @@
|
||||
package serial
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func TestConnection(t *testing.T) {
|
||||
c0 := &Config{Name: "/dev/ttyUSB0", Baud: 115200}
|
||||
c1 := &Config{Name: "/dev/ttyUSB1", Baud: 115200}
|
||||
|
||||
s1, err := OpenPort(c0)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
s2, err := OpenPort(c1)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
ch := make(chan int, 1)
|
||||
go func() {
|
||||
buf := make([]byte, 128)
|
||||
var readCount int
|
||||
for {
|
||||
n, err := s2.Read(buf)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
readCount++
|
||||
t.Logf("Read %v %v bytes: % 02x %s", readCount, n, buf[:n], buf[:n])
|
||||
select {
|
||||
case <-ch:
|
||||
ch <- readCount
|
||||
close(ch)
|
||||
default:
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
if _, err = s1.Write([]byte("hello")); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if _, err = s1.Write([]byte(" ")); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
time.Sleep(time.Second)
|
||||
if _, err = s1.Write([]byte("world")); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
time.Sleep(time.Second / 10)
|
||||
|
||||
ch <- 0
|
||||
s1.Write([]byte(" ")) // We could be blocked in the read without this
|
||||
c := <-ch
|
||||
exp := 5
|
||||
if c >= exp {
|
||||
t.Fatalf("Expected less than %v read, got %v", exp, c)
|
||||
}
|
||||
}
|
99
Godeps/_workspace/src/github.com/tarm/goserial/serial.go
generated
vendored
Normal file
99
Godeps/_workspace/src/github.com/tarm/goserial/serial.go
generated
vendored
Normal file
@@ -0,0 +1,99 @@
|
||||
/*
|
||||
Goserial is a simple go package to allow you to read and write from
|
||||
the serial port as a stream of bytes.
|
||||
|
||||
It aims to have the same API on all platforms, including windows. As
|
||||
an added bonus, the windows package does not use cgo, so you can cross
|
||||
compile for windows from another platform. Unfortunately goinstall
|
||||
does not currently let you cross compile so you will have to do it
|
||||
manually:
|
||||
|
||||
GOOS=windows make clean install
|
||||
|
||||
Currently there is very little in the way of configurability. You can
|
||||
set the baud rate. Then you can Read(), Write(), or Close() the
|
||||
connection. Read() will block until at least one byte is returned.
|
||||
Write is the same. There is currently no exposed way to set the
|
||||
timeouts, though patches are welcome.
|
||||
|
||||
Currently all ports are opened with 8 data bits, 1 stop bit, no
|
||||
parity, no hardware flow control, and no software flow control. This
|
||||
works fine for many real devices and many faux serial devices
|
||||
including usb-to-serial converters and bluetooth serial ports.
|
||||
|
||||
You may Read() and Write() simulantiously on the same connection (from
|
||||
different goroutines).
|
||||
|
||||
Example usage:
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"github.com/tarm/goserial"
|
||||
"log"
|
||||
)
|
||||
|
||||
func main() {
|
||||
c := &serial.Config{Name: "COM5", Baud: 115200}
|
||||
s, err := serial.OpenPort(c)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
n, err := s.Write([]byte("test"))
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
buf := make([]byte, 128)
|
||||
n, err = s.Read(buf)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
log.Print("%q", buf[:n])
|
||||
}
|
||||
*/
|
||||
package serial
|
||||
|
||||
import "io"
|
||||
|
||||
// Config contains the information needed to open a serial port.
|
||||
//
|
||||
// Currently few options are implemented, but more may be added in the
|
||||
// future (patches welcome), so it is recommended that you create a
|
||||
// new config addressing the fields by name rather than by order.
|
||||
//
|
||||
// For example:
|
||||
//
|
||||
// c0 := &serial.Config{Name: "COM45", Baud: 115200}
|
||||
// or
|
||||
// c1 := new(serial.Config)
|
||||
// c1.Name = "/dev/tty.usbserial"
|
||||
// c1.Baud = 115200
|
||||
//
|
||||
type Config struct {
|
||||
Name string
|
||||
Baud int
|
||||
|
||||
// Size int // 0 get translated to 8
|
||||
// Parity SomeNewTypeToGetCorrectDefaultOf_None
|
||||
// StopBits SomeNewTypeToGetCorrectDefaultOf_1
|
||||
|
||||
// RTSFlowControl bool
|
||||
// DTRFlowControl bool
|
||||
// XONFlowControl bool
|
||||
|
||||
// CRLFTranslate bool
|
||||
// TimeoutStuff int
|
||||
}
|
||||
|
||||
// OpenPort opens a serial port with the specified configuration
|
||||
func OpenPort(c *Config) (io.ReadWriteCloser, error) {
|
||||
return openPort(c.Name, c.Baud)
|
||||
}
|
||||
|
||||
// func Flush()
|
||||
|
||||
// func SendBreak()
|
||||
|
||||
// func RegisterBreakHandler(func())
|
90
Godeps/_workspace/src/github.com/tarm/goserial/serial_linux.go
generated
vendored
Normal file
90
Godeps/_workspace/src/github.com/tarm/goserial/serial_linux.go
generated
vendored
Normal file
@@ -0,0 +1,90 @@
|
||||
// +build linux,!cgo
|
||||
|
||||
package serial
|
||||
|
||||
import (
|
||||
"io"
|
||||
"os"
|
||||
"syscall"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
func openPort(name string, baud int) (rwc io.ReadWriteCloser, err error) {
|
||||
|
||||
var bauds = map[int]uint32{
|
||||
50: syscall.B50,
|
||||
75: syscall.B75,
|
||||
110: syscall.B110,
|
||||
134: syscall.B134,
|
||||
150: syscall.B150,
|
||||
200: syscall.B200,
|
||||
300: syscall.B300,
|
||||
600: syscall.B600,
|
||||
1200: syscall.B1200,
|
||||
1800: syscall.B1800,
|
||||
2400: syscall.B2400,
|
||||
4800: syscall.B4800,
|
||||
9600: syscall.B9600,
|
||||
19200: syscall.B19200,
|
||||
38400: syscall.B38400,
|
||||
57600: syscall.B57600,
|
||||
115200: syscall.B115200,
|
||||
230400: syscall.B230400,
|
||||
460800: syscall.B460800,
|
||||
500000: syscall.B500000,
|
||||
576000: syscall.B576000,
|
||||
921600: syscall.B921600,
|
||||
1000000: syscall.B1000000,
|
||||
1152000: syscall.B1152000,
|
||||
1500000: syscall.B1500000,
|
||||
2000000: syscall.B2000000,
|
||||
2500000: syscall.B2500000,
|
||||
3000000: syscall.B3000000,
|
||||
3500000: syscall.B3500000,
|
||||
4000000: syscall.B4000000,
|
||||
}
|
||||
|
||||
rate := bauds[baud]
|
||||
|
||||
if rate == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
f, err := os.OpenFile(name, syscall.O_RDWR|syscall.O_NOCTTY|syscall.O_NONBLOCK, 0666)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if err != nil && f != nil {
|
||||
f.Close()
|
||||
}
|
||||
}()
|
||||
|
||||
fd := f.Fd()
|
||||
t := syscall.Termios{
|
||||
Iflag: syscall.IGNPAR,
|
||||
Cflag: syscall.CS8 | syscall.CREAD | syscall.CLOCAL | rate,
|
||||
Cc: [32]uint8{syscall.VMIN: 1},
|
||||
Ispeed: rate,
|
||||
Ospeed: rate,
|
||||
}
|
||||
|
||||
if _, _, errno := syscall.Syscall6(
|
||||
syscall.SYS_IOCTL,
|
||||
uintptr(fd),
|
||||
uintptr(syscall.TCSETS),
|
||||
uintptr(unsafe.Pointer(&t)),
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
); errno != 0 {
|
||||
return nil, errno
|
||||
}
|
||||
|
||||
if err = syscall.SetNonblock(int(fd), false); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
return f, nil
|
||||
}
|
107
Godeps/_workspace/src/github.com/tarm/goserial/serial_posix.go
generated
vendored
Normal file
107
Godeps/_workspace/src/github.com/tarm/goserial/serial_posix.go
generated
vendored
Normal file
@@ -0,0 +1,107 @@
|
||||
// +build !windows,cgo
|
||||
|
||||
package serial
|
||||
|
||||
// #include <termios.h>
|
||||
// #include <unistd.h>
|
||||
import "C"
|
||||
|
||||
// TODO: Maybe change to using syscall package + ioctl instead of cgo
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"syscall"
|
||||
//"unsafe"
|
||||
)
|
||||
|
||||
func openPort(name string, baud int) (rwc io.ReadWriteCloser, err error) {
|
||||
f, err := os.OpenFile(name, syscall.O_RDWR|syscall.O_NOCTTY|syscall.O_NONBLOCK, 0666)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
fd := C.int(f.Fd())
|
||||
if C.isatty(fd) != 1 {
|
||||
f.Close()
|
||||
return nil, errors.New("File is not a tty")
|
||||
}
|
||||
|
||||
var st C.struct_termios
|
||||
_, err = C.tcgetattr(fd, &st)
|
||||
if err != nil {
|
||||
f.Close()
|
||||
return nil, err
|
||||
}
|
||||
var speed C.speed_t
|
||||
switch baud {
|
||||
case 115200:
|
||||
speed = C.B115200
|
||||
case 57600:
|
||||
speed = C.B57600
|
||||
case 38400:
|
||||
speed = C.B38400
|
||||
case 19200:
|
||||
speed = C.B19200
|
||||
case 9600:
|
||||
speed = C.B9600
|
||||
case 4800:
|
||||
speed = C.B4800
|
||||
case 2400:
|
||||
speed = C.B2400
|
||||
default:
|
||||
f.Close()
|
||||
return nil, fmt.Errorf("Unknown baud rate %v", baud)
|
||||
}
|
||||
|
||||
_, err = C.cfsetispeed(&st, speed)
|
||||
if err != nil {
|
||||
f.Close()
|
||||
return nil, err
|
||||
}
|
||||
_, err = C.cfsetospeed(&st, speed)
|
||||
if err != nil {
|
||||
f.Close()
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Select local mode
|
||||
st.c_cflag |= (C.CLOCAL | C.CREAD)
|
||||
|
||||
// Select raw mode
|
||||
st.c_lflag &= ^C.tcflag_t(C.ICANON | C.ECHO | C.ECHOE | C.ISIG)
|
||||
st.c_oflag &= ^C.tcflag_t(C.OPOST)
|
||||
|
||||
_, err = C.tcsetattr(fd, C.TCSANOW, &st)
|
||||
if err != nil {
|
||||
f.Close()
|
||||
return nil, err
|
||||
}
|
||||
|
||||
//fmt.Println("Tweaking", name)
|
||||
r1, _, e := syscall.Syscall(syscall.SYS_FCNTL,
|
||||
uintptr(f.Fd()),
|
||||
uintptr(syscall.F_SETFL),
|
||||
uintptr(0))
|
||||
if e != 0 || r1 != 0 {
|
||||
s := fmt.Sprint("Clearing NONBLOCK syscall error:", e, r1)
|
||||
f.Close()
|
||||
return nil, errors.New(s)
|
||||
}
|
||||
|
||||
/*
|
||||
r1, _, e = syscall.Syscall(syscall.SYS_IOCTL,
|
||||
uintptr(f.Fd()),
|
||||
uintptr(0x80045402), // IOSSIOSPEED
|
||||
uintptr(unsafe.Pointer(&baud)));
|
||||
if e != 0 || r1 != 0 {
|
||||
s := fmt.Sprint("Baudrate syscall error:", e, r1)
|
||||
f.Close()
|
||||
return nil, os.NewError(s)
|
||||
}
|
||||
*/
|
||||
|
||||
return f, nil
|
||||
}
|
263
Godeps/_workspace/src/github.com/tarm/goserial/serial_windows.go
generated
vendored
Normal file
263
Godeps/_workspace/src/github.com/tarm/goserial/serial_windows.go
generated
vendored
Normal file
@@ -0,0 +1,263 @@
|
||||
// +build windows
|
||||
|
||||
package serial
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"sync"
|
||||
"syscall"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
type serialPort struct {
|
||||
f *os.File
|
||||
fd syscall.Handle
|
||||
rl sync.Mutex
|
||||
wl sync.Mutex
|
||||
ro *syscall.Overlapped
|
||||
wo *syscall.Overlapped
|
||||
}
|
||||
|
||||
type structDCB struct {
|
||||
DCBlength, BaudRate uint32
|
||||
flags [4]byte
|
||||
wReserved, XonLim, XoffLim uint16
|
||||
ByteSize, Parity, StopBits byte
|
||||
XonChar, XoffChar, ErrorChar, EofChar, EvtChar byte
|
||||
wReserved1 uint16
|
||||
}
|
||||
|
||||
type structTimeouts struct {
|
||||
ReadIntervalTimeout uint32
|
||||
ReadTotalTimeoutMultiplier uint32
|
||||
ReadTotalTimeoutConstant uint32
|
||||
WriteTotalTimeoutMultiplier uint32
|
||||
WriteTotalTimeoutConstant uint32
|
||||
}
|
||||
|
||||
func openPort(name string, baud int) (rwc io.ReadWriteCloser, err error) {
|
||||
if len(name) > 0 && name[0] != '\\' {
|
||||
name = "\\\\.\\" + name
|
||||
}
|
||||
|
||||
h, err := syscall.CreateFile(syscall.StringToUTF16Ptr(name),
|
||||
syscall.GENERIC_READ|syscall.GENERIC_WRITE,
|
||||
0,
|
||||
nil,
|
||||
syscall.OPEN_EXISTING,
|
||||
syscall.FILE_ATTRIBUTE_NORMAL|syscall.FILE_FLAG_OVERLAPPED,
|
||||
0)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
f := os.NewFile(uintptr(h), name)
|
||||
defer func() {
|
||||
if err != nil {
|
||||
f.Close()
|
||||
}
|
||||
}()
|
||||
|
||||
if err = setCommState(h, baud); err != nil {
|
||||
return
|
||||
}
|
||||
if err = setupComm(h, 64, 64); err != nil {
|
||||
return
|
||||
}
|
||||
if err = setCommTimeouts(h); err != nil {
|
||||
return
|
||||
}
|
||||
if err = setCommMask(h); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
ro, err := newOverlapped()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
wo, err := newOverlapped()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
port := new(serialPort)
|
||||
port.f = f
|
||||
port.fd = h
|
||||
port.ro = ro
|
||||
port.wo = wo
|
||||
|
||||
return port, nil
|
||||
}
|
||||
|
||||
func (p *serialPort) Close() error {
|
||||
return p.f.Close()
|
||||
}
|
||||
|
||||
func (p *serialPort) Write(buf []byte) (int, error) {
|
||||
p.wl.Lock()
|
||||
defer p.wl.Unlock()
|
||||
|
||||
if err := resetEvent(p.wo.HEvent); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
var n uint32
|
||||
err := syscall.WriteFile(p.fd, buf, &n, p.wo)
|
||||
if err != nil && err != syscall.ERROR_IO_PENDING {
|
||||
return int(n), err
|
||||
}
|
||||
return getOverlappedResult(p.fd, p.wo)
|
||||
}
|
||||
|
||||
func (p *serialPort) Read(buf []byte) (int, error) {
|
||||
if p == nil || p.f == nil {
|
||||
return 0, fmt.Errorf("Invalid port on read %v %v", p, p.f)
|
||||
}
|
||||
|
||||
p.rl.Lock()
|
||||
defer p.rl.Unlock()
|
||||
|
||||
if err := resetEvent(p.ro.HEvent); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
var done uint32
|
||||
err := syscall.ReadFile(p.fd, buf, &done, p.ro)
|
||||
if err != nil && err != syscall.ERROR_IO_PENDING {
|
||||
return int(done), err
|
||||
}
|
||||
return getOverlappedResult(p.fd, p.ro)
|
||||
}
|
||||
|
||||
var (
|
||||
nSetCommState,
|
||||
nSetCommTimeouts,
|
||||
nSetCommMask,
|
||||
nSetupComm,
|
||||
nGetOverlappedResult,
|
||||
nCreateEvent,
|
||||
nResetEvent uintptr
|
||||
)
|
||||
|
||||
func init() {
|
||||
k32, err := syscall.LoadLibrary("kernel32.dll")
|
||||
if err != nil {
|
||||
panic("LoadLibrary " + err.Error())
|
||||
}
|
||||
defer syscall.FreeLibrary(k32)
|
||||
|
||||
nSetCommState = getProcAddr(k32, "SetCommState")
|
||||
nSetCommTimeouts = getProcAddr(k32, "SetCommTimeouts")
|
||||
nSetCommMask = getProcAddr(k32, "SetCommMask")
|
||||
nSetupComm = getProcAddr(k32, "SetupComm")
|
||||
nGetOverlappedResult = getProcAddr(k32, "GetOverlappedResult")
|
||||
nCreateEvent = getProcAddr(k32, "CreateEventW")
|
||||
nResetEvent = getProcAddr(k32, "ResetEvent")
|
||||
}
|
||||
|
||||
func getProcAddr(lib syscall.Handle, name string) uintptr {
|
||||
addr, err := syscall.GetProcAddress(lib, name)
|
||||
if err != nil {
|
||||
panic(name + " " + err.Error())
|
||||
}
|
||||
return addr
|
||||
}
|
||||
|
||||
func setCommState(h syscall.Handle, baud int) error {
|
||||
var params structDCB
|
||||
params.DCBlength = uint32(unsafe.Sizeof(params))
|
||||
|
||||
params.flags[0] = 0x01 // fBinary
|
||||
params.flags[0] |= 0x10 // Assert DSR
|
||||
|
||||
params.BaudRate = uint32(baud)
|
||||
params.ByteSize = 8
|
||||
|
||||
r, _, err := syscall.Syscall(nSetCommState, 2, uintptr(h), uintptr(unsafe.Pointer(¶ms)), 0)
|
||||
if r == 0 {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func setCommTimeouts(h syscall.Handle) error {
|
||||
var timeouts structTimeouts
|
||||
const MAXDWORD = 1<<32 - 1
|
||||
timeouts.ReadIntervalTimeout = MAXDWORD
|
||||
timeouts.ReadTotalTimeoutMultiplier = MAXDWORD
|
||||
timeouts.ReadTotalTimeoutConstant = MAXDWORD - 1
|
||||
|
||||
/* From http://msdn.microsoft.com/en-us/library/aa363190(v=VS.85).aspx
|
||||
|
||||
For blocking I/O see below:
|
||||
|
||||
Remarks:
|
||||
|
||||
If an application sets ReadIntervalTimeout and
|
||||
ReadTotalTimeoutMultiplier to MAXDWORD and sets
|
||||
ReadTotalTimeoutConstant to a value greater than zero and
|
||||
less than MAXDWORD, one of the following occurs when the
|
||||
ReadFile function is called:
|
||||
|
||||
If there are any bytes in the input buffer, ReadFile returns
|
||||
immediately with the bytes in the buffer.
|
||||
|
||||
If there are no bytes in the input buffer, ReadFile waits
|
||||
until a byte arrives and then returns immediately.
|
||||
|
||||
If no bytes arrive within the time specified by
|
||||
ReadTotalTimeoutConstant, ReadFile times out.
|
||||
*/
|
||||
|
||||
r, _, err := syscall.Syscall(nSetCommTimeouts, 2, uintptr(h), uintptr(unsafe.Pointer(&timeouts)), 0)
|
||||
if r == 0 {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func setupComm(h syscall.Handle, in, out int) error {
|
||||
r, _, err := syscall.Syscall(nSetupComm, 3, uintptr(h), uintptr(in), uintptr(out))
|
||||
if r == 0 {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func setCommMask(h syscall.Handle) error {
|
||||
const EV_RXCHAR = 0x0001
|
||||
r, _, err := syscall.Syscall(nSetCommMask, 2, uintptr(h), EV_RXCHAR, 0)
|
||||
if r == 0 {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func resetEvent(h syscall.Handle) error {
|
||||
r, _, err := syscall.Syscall(nResetEvent, 1, uintptr(h), 0, 0)
|
||||
if r == 0 {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func newOverlapped() (*syscall.Overlapped, error) {
|
||||
var overlapped syscall.Overlapped
|
||||
r, _, err := syscall.Syscall6(nCreateEvent, 4, 0, 1, 0, 0, 0, 0)
|
||||
if r == 0 {
|
||||
return nil, err
|
||||
}
|
||||
overlapped.HEvent = syscall.Handle(r)
|
||||
return &overlapped, nil
|
||||
}
|
||||
|
||||
func getOverlappedResult(h syscall.Handle, overlapped *syscall.Overlapped) (int, error) {
|
||||
var n int
|
||||
r, _, err := syscall.Syscall6(nGetOverlappedResult, 4,
|
||||
uintptr(h),
|
||||
uintptr(unsafe.Pointer(overlapped)),
|
||||
uintptr(unsafe.Pointer(&n)), 1, 0, 0)
|
||||
if r == 0 {
|
||||
return n, err
|
||||
}
|
||||
|
||||
return n, nil
|
||||
}
|
3
MAINTAINERS
Normal file
3
MAINTAINERS
Normal file
@@ -0,0 +1,3 @@
|
||||
Alex Crawford <alex.crawford@coreos.com> (@crawford)
|
||||
Jonathan Boulle <jonathan.boulle@coreos.com> (@jonboulle)
|
||||
Brian Waldon <brian.waldon@coreos.com> (@bcwaldon)
|
158
config/config.go
Normal file
158
config/config.go
Normal file
@@ -0,0 +1,158 @@
|
||||
/*
|
||||
Copyright 2014 CoreOS, Inc.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package config
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"regexp"
|
||||
"strings"
|
||||
|
||||
"github.com/coreos/coreos-cloudinit/Godeps/_workspace/src/github.com/coreos/yaml"
|
||||
)
|
||||
|
||||
// CloudConfig encapsulates the entire cloud-config configuration file and maps
|
||||
// directly to YAML. Fields that cannot be set in the cloud-config (fields
|
||||
// used for internal use) have the YAML tag '-' so that they aren't marshalled.
|
||||
type CloudConfig struct {
|
||||
SSHAuthorizedKeys []string `yaml:"ssh_authorized_keys"`
|
||||
CoreOS CoreOS `yaml:"coreos"`
|
||||
WriteFiles []File `yaml:"write_files"`
|
||||
Hostname string `yaml:"hostname"`
|
||||
Users []User `yaml:"users"`
|
||||
ManageEtcHosts EtcHosts `yaml:"manage_etc_hosts"`
|
||||
NetworkConfigPath string `yaml:"-"`
|
||||
NetworkConfig string `yaml:"-"`
|
||||
}
|
||||
|
||||
type CoreOS struct {
|
||||
Etcd Etcd `yaml:"etcd"`
|
||||
Flannel Flannel `yaml:"flannel"`
|
||||
Fleet Fleet `yaml:"fleet"`
|
||||
Locksmith Locksmith `yaml:"locksmith"`
|
||||
OEM OEM `yaml:"oem"`
|
||||
Update Update `yaml:"update"`
|
||||
Units []Unit `yaml:"units"`
|
||||
}
|
||||
|
||||
func IsCloudConfig(userdata string) bool {
|
||||
header := strings.SplitN(userdata, "\n", 2)[0]
|
||||
|
||||
// Explicitly trim the header so we can handle user-data from
|
||||
// non-unix operating systems. The rest of the file is parsed
|
||||
// by yaml, which correctly handles CRLF.
|
||||
header = strings.TrimSuffix(header, "\r")
|
||||
|
||||
return (header == "#cloud-config")
|
||||
}
|
||||
|
||||
// NewCloudConfig instantiates a new CloudConfig from the given contents (a
|
||||
// string of YAML), returning any error encountered. It will ignore unknown
|
||||
// fields but log encountering them.
|
||||
func NewCloudConfig(contents string) (*CloudConfig, error) {
|
||||
yaml.UnmarshalMappingKeyTransform = func(nameIn string) (nameOut string) {
|
||||
return strings.Replace(nameIn, "-", "_", -1)
|
||||
}
|
||||
var cfg CloudConfig
|
||||
err := yaml.Unmarshal([]byte(contents), &cfg)
|
||||
return &cfg, err
|
||||
}
|
||||
|
||||
func (cc CloudConfig) String() string {
|
||||
bytes, err := yaml.Marshal(cc)
|
||||
if err != nil {
|
||||
return ""
|
||||
}
|
||||
|
||||
stringified := string(bytes)
|
||||
stringified = fmt.Sprintf("#cloud-config\n%s", stringified)
|
||||
|
||||
return stringified
|
||||
}
|
||||
|
||||
// IsZero returns whether or not the parameter is the zero value for its type.
|
||||
// If the parameter is a struct, only the exported fields are considered.
|
||||
func IsZero(c interface{}) bool {
|
||||
return isZero(reflect.ValueOf(c))
|
||||
}
|
||||
|
||||
type ErrorValid struct {
|
||||
Value string
|
||||
Valid string
|
||||
Field string
|
||||
}
|
||||
|
||||
func (e ErrorValid) Error() string {
|
||||
return fmt.Sprintf("invalid value %q for option %q (valid options: %q)", e.Value, e.Field, e.Valid)
|
||||
}
|
||||
|
||||
// AssertStructValid checks the fields in the structure and makes sure that
|
||||
// they contain valid values as specified by the 'valid' flag. Empty fields are
|
||||
// implicitly valid.
|
||||
func AssertStructValid(c interface{}) error {
|
||||
ct := reflect.TypeOf(c)
|
||||
cv := reflect.ValueOf(c)
|
||||
for i := 0; i < ct.NumField(); i++ {
|
||||
ft := ct.Field(i)
|
||||
if !isFieldExported(ft) {
|
||||
continue
|
||||
}
|
||||
|
||||
if err := AssertValid(cv.Field(i), ft.Tag.Get("valid")); err != nil {
|
||||
err.Field = ft.Name
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// AssertValid checks to make sure that the given value is in the list of
|
||||
// valid values. Zero values are implicitly valid.
|
||||
func AssertValid(value reflect.Value, valid string) *ErrorValid {
|
||||
if valid == "" || isZero(value) {
|
||||
return nil
|
||||
}
|
||||
|
||||
vs := fmt.Sprintf("%v", value.Interface())
|
||||
if m, _ := regexp.MatchString(valid, vs); m {
|
||||
return nil
|
||||
}
|
||||
|
||||
return &ErrorValid{
|
||||
Value: vs,
|
||||
Valid: valid,
|
||||
}
|
||||
}
|
||||
|
||||
func isZero(v reflect.Value) bool {
|
||||
switch v.Kind() {
|
||||
case reflect.Struct:
|
||||
vt := v.Type()
|
||||
for i := 0; i < v.NumField(); i++ {
|
||||
if isFieldExported(vt.Field(i)) && !isZero(v.Field(i)) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
default:
|
||||
return v.Interface() == reflect.Zero(v.Type()).Interface()
|
||||
}
|
||||
}
|
||||
|
||||
func isFieldExported(f reflect.StructField) bool {
|
||||
return f.PkgPath == ""
|
||||
}
|
499
config/config_test.go
Normal file
499
config/config_test.go
Normal file
@@ -0,0 +1,499 @@
|
||||
/*
|
||||
Copyright 2014 CoreOS, Inc.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package config
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"regexp"
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestNewCloudConfig(t *testing.T) {
|
||||
tests := []struct {
|
||||
contents string
|
||||
|
||||
config CloudConfig
|
||||
}{
|
||||
{},
|
||||
{
|
||||
contents: "#cloud-config\nwrite_files:\n - path: underscore",
|
||||
config: CloudConfig{WriteFiles: []File{File{Path: "underscore"}}},
|
||||
},
|
||||
{
|
||||
contents: "#cloud-config\nwrite-files:\n - path: hyphen",
|
||||
config: CloudConfig{WriteFiles: []File{File{Path: "hyphen"}}},
|
||||
},
|
||||
{
|
||||
contents: "#cloud-config\ncoreos:\n update:\n reboot-strategy: off",
|
||||
config: CloudConfig{CoreOS: CoreOS{Update: Update{RebootStrategy: "off"}}},
|
||||
},
|
||||
{
|
||||
contents: "#cloud-config\ncoreos:\n update:\n reboot-strategy: false",
|
||||
config: CloudConfig{CoreOS: CoreOS{Update: Update{RebootStrategy: "false"}}},
|
||||
},
|
||||
{
|
||||
contents: "#cloud-config\nwrite_files:\n - permissions: 0744",
|
||||
config: CloudConfig{WriteFiles: []File{File{RawFilePermissions: "0744"}}},
|
||||
},
|
||||
{
|
||||
contents: "#cloud-config\nwrite_files:\n - permissions: 744",
|
||||
config: CloudConfig{WriteFiles: []File{File{RawFilePermissions: "744"}}},
|
||||
},
|
||||
{
|
||||
contents: "#cloud-config\nwrite_files:\n - permissions: '0744'",
|
||||
config: CloudConfig{WriteFiles: []File{File{RawFilePermissions: "0744"}}},
|
||||
},
|
||||
{
|
||||
contents: "#cloud-config\nwrite_files:\n - permissions: '744'",
|
||||
config: CloudConfig{WriteFiles: []File{File{RawFilePermissions: "744"}}},
|
||||
},
|
||||
}
|
||||
|
||||
for i, tt := range tests {
|
||||
config, err := NewCloudConfig(tt.contents)
|
||||
if err != nil {
|
||||
t.Errorf("bad error (test case #%d): want %v, got %s", i, nil, err)
|
||||
}
|
||||
if !reflect.DeepEqual(&tt.config, config) {
|
||||
t.Errorf("bad config (test case #%d): want %#v, got %#v", i, tt.config, config)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestIsZero(t *testing.T) {
|
||||
tests := []struct {
|
||||
c interface{}
|
||||
|
||||
empty bool
|
||||
}{
|
||||
{struct{}{}, true},
|
||||
{struct{ a, b string }{}, true},
|
||||
{struct{ A, b string }{}, true},
|
||||
{struct{ A, B string }{}, true},
|
||||
{struct{ A string }{A: "hello"}, false},
|
||||
{struct{ A int }{}, true},
|
||||
{struct{ A int }{A: 1}, false},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
if empty := IsZero(tt.c); tt.empty != empty {
|
||||
t.Errorf("bad result (%q): want %t, got %t", tt.c, tt.empty, empty)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestAssertStructValid(t *testing.T) {
|
||||
tests := []struct {
|
||||
c interface{}
|
||||
|
||||
err error
|
||||
}{
|
||||
{struct{}{}, nil},
|
||||
{struct {
|
||||
A, b string `valid:"^1|2$"`
|
||||
}{}, nil},
|
||||
{struct {
|
||||
A, b string `valid:"^1|2$"`
|
||||
}{A: "1", b: "2"}, nil},
|
||||
{struct {
|
||||
A, b string `valid:"^1|2$"`
|
||||
}{A: "1", b: "hello"}, nil},
|
||||
{struct {
|
||||
A, b string `valid:"^1|2$"`
|
||||
}{A: "hello", b: "2"}, &ErrorValid{Value: "hello", Field: "A", Valid: "^1|2$"}},
|
||||
{struct {
|
||||
A, b int `valid:"^1|2$"`
|
||||
}{}, nil},
|
||||
{struct {
|
||||
A, b int `valid:"^1|2$"`
|
||||
}{A: 1, b: 2}, nil},
|
||||
{struct {
|
||||
A, b int `valid:"^1|2$"`
|
||||
}{A: 1, b: 9}, nil},
|
||||
{struct {
|
||||
A, b int `valid:"^1|2$"`
|
||||
}{A: 9, b: 2}, &ErrorValid{Value: "9", Field: "A", Valid: "^1|2$"}},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
if err := AssertStructValid(tt.c); !reflect.DeepEqual(tt.err, err) {
|
||||
t.Errorf("bad result (%q): want %q, got %q", tt.c, tt.err, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestConfigCompile(t *testing.T) {
|
||||
tests := []interface{}{
|
||||
Etcd{},
|
||||
File{},
|
||||
Flannel{},
|
||||
Fleet{},
|
||||
Locksmith{},
|
||||
OEM{},
|
||||
Unit{},
|
||||
Update{},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
ttt := reflect.TypeOf(tt)
|
||||
for i := 0; i < ttt.NumField(); i++ {
|
||||
ft := ttt.Field(i)
|
||||
if !isFieldExported(ft) {
|
||||
continue
|
||||
}
|
||||
|
||||
if _, err := regexp.Compile(ft.Tag.Get("valid")); err != nil {
|
||||
t.Errorf("bad regexp(%s.%s): want %v, got %s", ttt.Name(), ft.Name, nil, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestCloudConfigUnknownKeys(t *testing.T) {
|
||||
contents := `
|
||||
coreos:
|
||||
etcd:
|
||||
discovery: "https://discovery.etcd.io/827c73219eeb2fa5530027c37bf18877"
|
||||
coreos_unknown:
|
||||
foo: "bar"
|
||||
section_unknown:
|
||||
dunno:
|
||||
something
|
||||
bare_unknown:
|
||||
bar
|
||||
write_files:
|
||||
- content: fun
|
||||
path: /var/party
|
||||
file_unknown: nofun
|
||||
users:
|
||||
- name: fry
|
||||
passwd: somehash
|
||||
user_unknown: philip
|
||||
hostname:
|
||||
foo
|
||||
`
|
||||
cfg, err := NewCloudConfig(contents)
|
||||
if err != nil {
|
||||
t.Fatalf("error instantiating CloudConfig with unknown keys: %v", err)
|
||||
}
|
||||
if cfg.Hostname != "foo" {
|
||||
t.Fatalf("hostname not correctly set when invalid keys are present")
|
||||
}
|
||||
if cfg.CoreOS.Etcd.Discovery != "https://discovery.etcd.io/827c73219eeb2fa5530027c37bf18877" {
|
||||
t.Fatalf("etcd section not correctly set when invalid keys are present")
|
||||
}
|
||||
if len(cfg.WriteFiles) < 1 || cfg.WriteFiles[0].Content != "fun" || cfg.WriteFiles[0].Path != "/var/party" {
|
||||
t.Fatalf("write_files section not correctly set when invalid keys are present")
|
||||
}
|
||||
if len(cfg.Users) < 1 || cfg.Users[0].Name != "fry" || cfg.Users[0].PasswordHash != "somehash" {
|
||||
t.Fatalf("users section not correctly set when invalid keys are present")
|
||||
}
|
||||
}
|
||||
|
||||
// Assert that the parsing of a cloud config file "generally works"
|
||||
func TestCloudConfigEmpty(t *testing.T) {
|
||||
cfg, err := NewCloudConfig("")
|
||||
if err != nil {
|
||||
t.Fatalf("Encountered unexpected error :%v", err)
|
||||
}
|
||||
|
||||
keys := cfg.SSHAuthorizedKeys
|
||||
if len(keys) != 0 {
|
||||
t.Error("Parsed incorrect number of SSH keys")
|
||||
}
|
||||
|
||||
if len(cfg.WriteFiles) != 0 {
|
||||
t.Error("Expected zero WriteFiles")
|
||||
}
|
||||
|
||||
if cfg.Hostname != "" {
|
||||
t.Errorf("Expected hostname to be empty, got '%s'", cfg.Hostname)
|
||||
}
|
||||
}
|
||||
|
||||
// Assert that the parsing of a cloud config file "generally works"
|
||||
func TestCloudConfig(t *testing.T) {
|
||||
contents := `
|
||||
coreos:
|
||||
etcd:
|
||||
discovery: "https://discovery.etcd.io/827c73219eeb2fa5530027c37bf18877"
|
||||
update:
|
||||
reboot_strategy: reboot
|
||||
units:
|
||||
- name: 50-eth0.network
|
||||
runtime: yes
|
||||
content: '[Match]
|
||||
|
||||
Name=eth47
|
||||
|
||||
|
||||
[Network]
|
||||
|
||||
Address=10.209.171.177/19
|
||||
|
||||
'
|
||||
oem:
|
||||
id: rackspace
|
||||
name: Rackspace Cloud Servers
|
||||
version_id: 168.0.0
|
||||
home_url: https://www.rackspace.com/cloud/servers/
|
||||
bug_report_url: https://github.com/coreos/coreos-overlay
|
||||
ssh_authorized_keys:
|
||||
- foobar
|
||||
- foobaz
|
||||
write_files:
|
||||
- content: |
|
||||
penny
|
||||
elroy
|
||||
path: /etc/dogepack.conf
|
||||
permissions: '0644'
|
||||
owner: root:dogepack
|
||||
hostname: trontastic
|
||||
`
|
||||
cfg, err := NewCloudConfig(contents)
|
||||
if err != nil {
|
||||
t.Fatalf("Encountered unexpected error :%v", err)
|
||||
}
|
||||
|
||||
keys := cfg.SSHAuthorizedKeys
|
||||
if len(keys) != 2 {
|
||||
t.Error("Parsed incorrect number of SSH keys")
|
||||
} else if keys[0] != "foobar" {
|
||||
t.Error("Expected first SSH key to be 'foobar'")
|
||||
} else if keys[1] != "foobaz" {
|
||||
t.Error("Expected first SSH key to be 'foobaz'")
|
||||
}
|
||||
|
||||
if len(cfg.WriteFiles) != 1 {
|
||||
t.Error("Failed to parse correct number of write_files")
|
||||
} else {
|
||||
wf := cfg.WriteFiles[0]
|
||||
if wf.Content != "penny\nelroy\n" {
|
||||
t.Errorf("WriteFile has incorrect contents '%s'", wf.Content)
|
||||
}
|
||||
if wf.Encoding != "" {
|
||||
t.Errorf("WriteFile has incorrect encoding %s", wf.Encoding)
|
||||
}
|
||||
if wf.RawFilePermissions != "0644" {
|
||||
t.Errorf("WriteFile has incorrect permissions %s", wf.RawFilePermissions)
|
||||
}
|
||||
if wf.Path != "/etc/dogepack.conf" {
|
||||
t.Errorf("WriteFile has incorrect path %s", wf.Path)
|
||||
}
|
||||
if wf.Owner != "root:dogepack" {
|
||||
t.Errorf("WriteFile has incorrect owner %s", wf.Owner)
|
||||
}
|
||||
}
|
||||
|
||||
if len(cfg.CoreOS.Units) != 1 {
|
||||
t.Error("Failed to parse correct number of units")
|
||||
} else {
|
||||
u := cfg.CoreOS.Units[0]
|
||||
expect := `[Match]
|
||||
Name=eth47
|
||||
|
||||
[Network]
|
||||
Address=10.209.171.177/19
|
||||
`
|
||||
if u.Content != expect {
|
||||
t.Errorf("Unit has incorrect contents '%s'.\nExpected '%s'.", u.Content, expect)
|
||||
}
|
||||
if u.Runtime != true {
|
||||
t.Errorf("Unit has incorrect runtime value")
|
||||
}
|
||||
if u.Name != "50-eth0.network" {
|
||||
t.Errorf("Unit has incorrect name %s", u.Name)
|
||||
}
|
||||
}
|
||||
|
||||
if cfg.CoreOS.OEM.ID != "rackspace" {
|
||||
t.Errorf("Failed parsing coreos.oem. Expected ID 'rackspace', got %q.", cfg.CoreOS.OEM.ID)
|
||||
}
|
||||
|
||||
if cfg.Hostname != "trontastic" {
|
||||
t.Errorf("Failed to parse hostname")
|
||||
}
|
||||
if cfg.CoreOS.Update.RebootStrategy != "reboot" {
|
||||
t.Errorf("Failed to parse locksmith strategy")
|
||||
}
|
||||
}
|
||||
|
||||
// Assert that our interface conversion doesn't panic
|
||||
func TestCloudConfigKeysNotList(t *testing.T) {
|
||||
contents := `
|
||||
ssh_authorized_keys:
|
||||
- foo: bar
|
||||
`
|
||||
cfg, err := NewCloudConfig(contents)
|
||||
if err != nil {
|
||||
t.Fatalf("Encountered unexpected error: %v", err)
|
||||
}
|
||||
|
||||
keys := cfg.SSHAuthorizedKeys
|
||||
if len(keys) != 0 {
|
||||
t.Error("Parsed incorrect number of SSH keys")
|
||||
}
|
||||
}
|
||||
|
||||
func TestCloudConfigSerializationHeader(t *testing.T) {
|
||||
cfg, _ := NewCloudConfig("")
|
||||
contents := cfg.String()
|
||||
header := strings.SplitN(contents, "\n", 2)[0]
|
||||
if header != "#cloud-config" {
|
||||
t.Fatalf("Serialized config did not have expected header")
|
||||
}
|
||||
}
|
||||
|
||||
func TestCloudConfigUsers(t *testing.T) {
|
||||
contents := `
|
||||
users:
|
||||
- name: elroy
|
||||
passwd: somehash
|
||||
ssh_authorized_keys:
|
||||
- somekey
|
||||
gecos: arbitrary comment
|
||||
homedir: /home/place
|
||||
no_create_home: yes
|
||||
primary_group: things
|
||||
groups:
|
||||
- ping
|
||||
- pong
|
||||
no_user_group: true
|
||||
system: y
|
||||
no_log_init: True
|
||||
`
|
||||
cfg, err := NewCloudConfig(contents)
|
||||
if err != nil {
|
||||
t.Fatalf("Encountered unexpected error: %v", err)
|
||||
}
|
||||
|
||||
if len(cfg.Users) != 1 {
|
||||
t.Fatalf("Parsed %d users, expected 1", len(cfg.Users))
|
||||
}
|
||||
|
||||
user := cfg.Users[0]
|
||||
|
||||
if user.Name != "elroy" {
|
||||
t.Errorf("User name is %q, expected 'elroy'", user.Name)
|
||||
}
|
||||
|
||||
if user.PasswordHash != "somehash" {
|
||||
t.Errorf("User passwd is %q, expected 'somehash'", user.PasswordHash)
|
||||
}
|
||||
|
||||
if keys := user.SSHAuthorizedKeys; len(keys) != 1 {
|
||||
t.Errorf("Parsed %d ssh keys, expected 1", len(keys))
|
||||
} else {
|
||||
key := user.SSHAuthorizedKeys[0]
|
||||
if key != "somekey" {
|
||||
t.Errorf("User SSH key is %q, expected 'somekey'", key)
|
||||
}
|
||||
}
|
||||
|
||||
if user.GECOS != "arbitrary comment" {
|
||||
t.Errorf("Failed to parse gecos field, got %q", user.GECOS)
|
||||
}
|
||||
|
||||
if user.Homedir != "/home/place" {
|
||||
t.Errorf("Failed to parse homedir field, got %q", user.Homedir)
|
||||
}
|
||||
|
||||
if !user.NoCreateHome {
|
||||
t.Errorf("Failed to parse no_create_home field")
|
||||
}
|
||||
|
||||
if user.PrimaryGroup != "things" {
|
||||
t.Errorf("Failed to parse primary_group field, got %q", user.PrimaryGroup)
|
||||
}
|
||||
|
||||
if len(user.Groups) != 2 {
|
||||
t.Errorf("Failed to parse 2 goups, got %d", len(user.Groups))
|
||||
} else {
|
||||
if user.Groups[0] != "ping" {
|
||||
t.Errorf("First group was %q, not expected value 'ping'", user.Groups[0])
|
||||
}
|
||||
if user.Groups[1] != "pong" {
|
||||
t.Errorf("First group was %q, not expected value 'pong'", user.Groups[1])
|
||||
}
|
||||
}
|
||||
|
||||
if !user.NoUserGroup {
|
||||
t.Errorf("Failed to parse no_user_group field")
|
||||
}
|
||||
|
||||
if !user.System {
|
||||
t.Errorf("Failed to parse system field")
|
||||
}
|
||||
|
||||
if !user.NoLogInit {
|
||||
t.Errorf("Failed to parse no_log_init field")
|
||||
}
|
||||
}
|
||||
|
||||
func TestCloudConfigUsersGithubUser(t *testing.T) {
|
||||
|
||||
contents := `
|
||||
users:
|
||||
- name: elroy
|
||||
coreos_ssh_import_github: bcwaldon
|
||||
`
|
||||
cfg, err := NewCloudConfig(contents)
|
||||
if err != nil {
|
||||
t.Fatalf("Encountered unexpected error: %v", err)
|
||||
}
|
||||
|
||||
if len(cfg.Users) != 1 {
|
||||
t.Fatalf("Parsed %d users, expected 1", len(cfg.Users))
|
||||
}
|
||||
|
||||
user := cfg.Users[0]
|
||||
|
||||
if user.Name != "elroy" {
|
||||
t.Errorf("User name is %q, expected 'elroy'", user.Name)
|
||||
}
|
||||
|
||||
if user.SSHImportGithubUser != "bcwaldon" {
|
||||
t.Errorf("github user is %q, expected 'bcwaldon'", user.SSHImportGithubUser)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCloudConfigUsersSSHImportURL(t *testing.T) {
|
||||
contents := `
|
||||
users:
|
||||
- name: elroy
|
||||
coreos_ssh_import_url: https://token:x-auth-token@github.enterprise.com/api/v3/polvi/keys
|
||||
`
|
||||
cfg, err := NewCloudConfig(contents)
|
||||
if err != nil {
|
||||
t.Fatalf("Encountered unexpected error: %v", err)
|
||||
}
|
||||
|
||||
if len(cfg.Users) != 1 {
|
||||
t.Fatalf("Parsed %d users, expected 1", len(cfg.Users))
|
||||
}
|
||||
|
||||
user := cfg.Users[0]
|
||||
|
||||
if user.Name != "elroy" {
|
||||
t.Errorf("User name is %q, expected 'elroy'", user.Name)
|
||||
}
|
||||
|
||||
if user.SSHImportURL != "https://token:x-auth-token@github.enterprise.com/api/v3/polvi/keys" {
|
||||
t.Errorf("ssh import url is %q, expected 'https://token:x-auth-token@github.enterprise.com/api/v3/polvi/keys'", user.SSHImportURL)
|
||||
}
|
||||
}
|
19
config/etc_hosts.go
Normal file
19
config/etc_hosts.go
Normal file
@@ -0,0 +1,19 @@
|
||||
/*
|
||||
Copyright 2014 CoreOS, Inc.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package config
|
||||
|
||||
type EtcHosts string
|
53
config/etcd.go
Normal file
53
config/etcd.go
Normal file
@@ -0,0 +1,53 @@
|
||||
/*
|
||||
Copyright 2014 CoreOS, Inc.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package config
|
||||
|
||||
type Etcd struct {
|
||||
Addr string `yaml:"addr" env:"ETCD_ADDR"`
|
||||
BindAddr string `yaml:"bind_addr" env:"ETCD_BIND_ADDR"`
|
||||
CAFile string `yaml:"ca_file" env:"ETCD_CA_FILE"`
|
||||
CertFile string `yaml:"cert_file" env:"ETCD_CERT_FILE"`
|
||||
ClusterActiveSize int `yaml:"cluster_active_size" env:"ETCD_CLUSTER_ACTIVE_SIZE"`
|
||||
ClusterRemoveDelay float64 `yaml:"cluster_remove_delay" env:"ETCD_CLUSTER_REMOVE_DELAY"`
|
||||
ClusterSyncInterval float64 `yaml:"cluster_sync_interval" env:"ETCD_CLUSTER_SYNC_INTERVAL"`
|
||||
CorsOrigins string `yaml:"cors" env:"ETCD_CORS"`
|
||||
DataDir string `yaml:"data_dir" env:"ETCD_DATA_DIR"`
|
||||
Discovery string `yaml:"discovery" env:"ETCD_DISCOVERY"`
|
||||
GraphiteHost string `yaml:"graphite_host" env:"ETCD_GRAPHITE_HOST"`
|
||||
HTTPReadTimeout float64 `yaml:"http_read_timeout" env:"ETCD_HTTP_READ_TIMEOUT"`
|
||||
HTTPWriteTimeout float64 `yaml:"http_write_timeout" env:"ETCD_HTTP_WRITE_TIMEOUT"`
|
||||
KeyFile string `yaml:"key_file" env:"ETCD_KEY_FILE"`
|
||||
MaxResultBuffer int `yaml:"max_result_buffer" env:"ETCD_MAX_RESULT_BUFFER"`
|
||||
MaxRetryAttempts int `yaml:"max_retry_attempts" env:"ETCD_MAX_RETRY_ATTEMPTS"`
|
||||
Name string `yaml:"name" env:"ETCD_NAME"`
|
||||
PeerAddr string `yaml:"peer_addr" env:"ETCD_PEER_ADDR"`
|
||||
PeerBindAddr string `yaml:"peer_bind_addr" env:"ETCD_PEER_BIND_ADDR"`
|
||||
PeerCAFile string `yaml:"peer_ca_file" env:"ETCD_PEER_CA_FILE"`
|
||||
PeerCertFile string `yaml:"peer_cert_file" env:"ETCD_PEER_CERT_FILE"`
|
||||
PeerElectionTimeout int `yaml:"peer_election_timeout" env:"ETCD_PEER_ELECTION_TIMEOUT"`
|
||||
PeerHeartbeatInterval int `yaml:"peer_heartbeat_interval" env:"ETCD_PEER_HEARTBEAT_INTERVAL"`
|
||||
PeerKeyFile string `yaml:"peer_key_file" env:"ETCD_PEER_KEY_FILE"`
|
||||
Peers string `yaml:"peers" env:"ETCD_PEERS"`
|
||||
PeersFile string `yaml:"peers_file" env:"ETCD_PEERS_FILE"`
|
||||
RetryInterval float64 `yaml:"retry_interval" env:"ETCD_RETRY_INTERVAL"`
|
||||
Snapshot bool `yaml:"snapshot" env:"ETCD_SNAPSHOT"`
|
||||
SnapshotCount int `yaml:"snapshot_count" env:"ETCD_SNAPSHOTCOUNT"`
|
||||
StrTrace string `yaml:"trace" env:"ETCD_TRACE"`
|
||||
Verbose bool `yaml:"verbose" env:"ETCD_VERBOSE"`
|
||||
VeryVerbose bool `yaml:"very_verbose" env:"ETCD_VERY_VERBOSE"`
|
||||
VeryVeryVerbose bool `yaml:"very_very_verbose" env:"ETCD_VERY_VERY_VERBOSE"`
|
||||
}
|
25
config/file.go
Normal file
25
config/file.go
Normal file
@@ -0,0 +1,25 @@
|
||||
/*
|
||||
Copyright 2014 CoreOS, Inc.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package config
|
||||
|
||||
type File struct {
|
||||
Encoding string `yaml:"encoding" valid:"^(base64|b64|gz|gzip|gz\\+base64|gzip\\+base64|gz\\+b64|gzip\\+b64)$"`
|
||||
Content string `yaml:"content"`
|
||||
Owner string `yaml:"owner"`
|
||||
Path string `yaml:"path"`
|
||||
RawFilePermissions string `yaml:"permissions" valid:"^0?[0-7]{3,4}$"`
|
||||
}
|
71
config/file_test.go
Normal file
71
config/file_test.go
Normal file
@@ -0,0 +1,71 @@
|
||||
/*
|
||||
Copyright 2014 CoreOS, Inc.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package config
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestEncodingValid(t *testing.T) {
|
||||
tests := []struct {
|
||||
value string
|
||||
|
||||
isValid bool
|
||||
}{
|
||||
{value: "base64", isValid: true},
|
||||
{value: "b64", isValid: true},
|
||||
{value: "gz", isValid: true},
|
||||
{value: "gzip", isValid: true},
|
||||
{value: "gz+base64", isValid: true},
|
||||
{value: "gzip+base64", isValid: true},
|
||||
{value: "gz+b64", isValid: true},
|
||||
{value: "gzip+b64", isValid: true},
|
||||
{value: "gzzzzbase64", isValid: false},
|
||||
{value: "gzipppbase64", isValid: false},
|
||||
{value: "unknown", isValid: false},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
isValid := (nil == AssertStructValid(File{Encoding: tt.value}))
|
||||
if tt.isValid != isValid {
|
||||
t.Errorf("bad assert (%s): want %t, got %t", tt.value, tt.isValid, isValid)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestRawFilePermissionsValid(t *testing.T) {
|
||||
tests := []struct {
|
||||
value string
|
||||
|
||||
isValid bool
|
||||
}{
|
||||
{value: "744", isValid: true},
|
||||
{value: "0744", isValid: true},
|
||||
{value: "1744", isValid: true},
|
||||
{value: "01744", isValid: true},
|
||||
{value: "11744", isValid: false},
|
||||
{value: "rwxr--r--", isValid: false},
|
||||
{value: "800", isValid: false},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
isValid := (nil == AssertStructValid(File{RawFilePermissions: tt.value}))
|
||||
if tt.isValid != isValid {
|
||||
t.Errorf("bad assert (%s): want %t, got %t", tt.value, tt.isValid, isValid)
|
||||
}
|
||||
}
|
||||
}
|
9
config/flannel.go
Normal file
9
config/flannel.go
Normal file
@@ -0,0 +1,9 @@
|
||||
package config
|
||||
|
||||
type Flannel struct {
|
||||
EtcdEndpoint string `yaml:"etcd_endpoint" env:"FLANNELD_ETCD_ENDPOINT"`
|
||||
EtcdPrefix string `yaml:"etcd_prefix" env:"FLANNELD_ETCD_PREFIX"`
|
||||
IPMasq string `yaml:"ip_masq" env:"FLANNELD_IP_MASQ"`
|
||||
SubnetFile string `yaml:"subnet_file" env:"FLANNELD_SUBNET_FILE"`
|
||||
Iface string `yaml:"interface" env:"FLANNELD_IFACE"`
|
||||
}
|
31
config/fleet.go
Normal file
31
config/fleet.go
Normal file
@@ -0,0 +1,31 @@
|
||||
/*
|
||||
Copyright 2014 CoreOS, Inc.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package config
|
||||
|
||||
type Fleet struct {
|
||||
AgentTTL string `yaml:"agent_ttl" env:"FLEET_AGENT_TTL"`
|
||||
EngineReconcileInterval float64 `yaml:"engine_reconcile_interval" env:"FLEET_ENGINE_RECONCILE_INTERVAL"`
|
||||
EtcdCAFile string `yaml:"etcd_cafile" env:"FLEET_ETCD_CAFILE"`
|
||||
EtcdCertFile string `yaml:"etcd_certfile" env:"FLEET_ETCD_CERTFILE"`
|
||||
EtcdKeyFile string `yaml:"etcd_keyfile" env:"FLEET_ETCD_KEYFILE"`
|
||||
EtcdKeyPrefix string `yaml:"etcd_key_prefix" env:"FLEET_ETCD_KEY_PREFIX"`
|
||||
EtcdRequestTimeout float64 `yaml:"etcd_request_timeout" env:"FLEET_ETCD_REQUEST_TIMEOUT"`
|
||||
EtcdServers string `yaml:"etcd_servers" env:"FLEET_ETCD_SERVERS"`
|
||||
Metadata string `yaml:"metadata" env:"FLEET_METADATA"`
|
||||
PublicIP string `yaml:"public_ip" env:"FLEET_PUBLIC_IP"`
|
||||
Verbosity int `yaml:"verbosity" env:"FLEET_VERBOSITY"`
|
||||
}
|
8
config/locksmith.go
Normal file
8
config/locksmith.go
Normal file
@@ -0,0 +1,8 @@
|
||||
package config
|
||||
|
||||
type Locksmith struct {
|
||||
Endpoint string `yaml:"endpoint" env:"LOCKSMITHD_ENDPOINT"`
|
||||
EtcdCAFile string `yaml:"etcd_cafile" env:"LOCKSMITHD_ETCD_CAFILE"`
|
||||
EtcdCertFile string `yaml:"etcd_certfile" env:"LOCKSMITHD_ETCD_CERTFILE"`
|
||||
EtcdKeyFile string `yaml:"etcd_keyfile" env:"LOCKSMITHD_ETCD_KEYFILE"`
|
||||
}
|
25
config/oem.go
Normal file
25
config/oem.go
Normal file
@@ -0,0 +1,25 @@
|
||||
/*
|
||||
Copyright 2014 CoreOS, Inc.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package config
|
||||
|
||||
type OEM struct {
|
||||
ID string `yaml:"id"`
|
||||
Name string `yaml:"name"`
|
||||
VersionID string `yaml:"version_id"`
|
||||
HomeURL string `yaml:"home_url"`
|
||||
BugReportURL string `yaml:"bug_report_url"`
|
||||
}
|
32
config/script.go
Normal file
32
config/script.go
Normal file
@@ -0,0 +1,32 @@
|
||||
/*
|
||||
Copyright 2014 CoreOS, Inc.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package config
|
||||
|
||||
import (
|
||||
"strings"
|
||||
)
|
||||
|
||||
type Script []byte
|
||||
|
||||
func IsScript(userdata string) bool {
|
||||
header := strings.SplitN(userdata, "\n", 2)[0]
|
||||
return strings.HasPrefix(header, "#!")
|
||||
}
|
||||
|
||||
func NewScript(userdata string) (Script, error) {
|
||||
return Script(userdata), nil
|
||||
}
|
32
config/unit.go
Normal file
32
config/unit.go
Normal file
@@ -0,0 +1,32 @@
|
||||
/*
|
||||
Copyright 2014 CoreOS, Inc.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package config
|
||||
|
||||
type Unit struct {
|
||||
Name string `yaml:"name"`
|
||||
Mask bool `yaml:"mask"`
|
||||
Enable bool `yaml:"enable"`
|
||||
Runtime bool `yaml:"runtime"`
|
||||
Content string `yaml:"content"`
|
||||
Command string `yaml:"command" valid:"^(start|stop|restart|reload|try-restart|reload-or-restart|reload-or-try-restart)$"`
|
||||
DropIns []UnitDropIn `yaml:"drop_ins"`
|
||||
}
|
||||
|
||||
type UnitDropIn struct {
|
||||
Name string `yaml:"name"`
|
||||
Content string `yaml:"content"`
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user