Compare commits
233 Commits
Author | SHA1 | Date | |
---|---|---|---|
|
92eb5eb48b | ||
|
ea1e4c38fa | ||
|
46cb51cf91 | ||
|
1a6cee5305 | ||
|
e9bda98b54 | ||
|
badc874b74 | ||
|
c9e8c887b8 | ||
|
8be307de49 | ||
|
562c474275 | ||
|
5c5834863b | ||
|
44f0a949c5 | ||
|
106c4e7a2c | ||
|
6c1ba590aa | ||
|
45da664c59 | ||
|
2a71551ef2 | ||
|
84e1cb3242 | ||
|
5214ead926 | ||
|
e2c24c4cef | ||
|
75e288c553 | ||
|
0785840fe3 | ||
|
c10bfc2f56 | ||
|
2f954dcdc2 | ||
|
cdfc94f4e9 | ||
|
18e2f98414 | ||
|
4b472795c4 | ||
|
85b8d804c8 | ||
|
1fbbaaec19 | ||
|
667dbd8fb7 | ||
|
6730cb7227 | ||
|
9454522033 | ||
|
c255739a93 | ||
|
2051cd3e1c | ||
|
b52cb3fea3 | ||
|
da5f85b3fb | ||
|
9999178538 | ||
|
8f766e4666 | ||
|
2d28d16c92 | ||
|
e9cd09dd7b | ||
|
8370b30aa2 | ||
|
3e015cc3a1 | ||
|
a0fe6d0884 | ||
|
585ce5fcd9 | ||
|
72445796ca | ||
|
7342d91a85 | ||
|
db1bc51c98 | ||
|
c1f373e648 | ||
|
db49a16002 | ||
|
a4a6c281d9 | ||
|
17f8733121 | ||
|
7dec922618 | ||
|
54d3ae27af | ||
|
ee2416af64 | ||
|
cda037f9a5 | ||
|
549806cf64 | ||
|
56815a6756 | ||
|
24a6f7c49c | ||
|
98484be434 | ||
|
9024659296 | ||
|
fc6940f7ba | ||
|
f2fd95699b | ||
|
65db96cc7c | ||
|
c17b93b5c0 | ||
|
d352f8ce6a | ||
|
78aa2c56ec | ||
|
c5b3788282 | ||
|
5e98970bb5 | ||
|
cbdd446c55 | ||
|
316cadcf44 | ||
|
5a939be21b | ||
|
8d76c64386 | ||
|
1b854eb51e | ||
|
9fcf338bf3 | ||
|
fda72bdb5c | ||
|
685a38c6c8 | ||
|
9d15f2cfaf | ||
|
2134fce791 | ||
|
3abd6b2225 | ||
|
2a8e6c9566 | ||
|
abe43537da | ||
|
3a550af651 | ||
|
61c3a0eb2d | ||
|
480176bc11 | ||
|
01b18eb551 | ||
|
970ef435b6 | ||
|
e8d0021140 | ||
|
e9ec78ac6f | ||
|
4a2e417781 | ||
|
604ef7ecb4 | ||
|
c39dd5cc67 | ||
|
a923161f4a | ||
|
e59e2f6cd5 | ||
|
e90fe3eba8 | ||
|
fb0187b197 | ||
|
6babe74716 | ||
|
b1e88284ca | ||
|
18a65f7dac | ||
|
0c212c72c9 | ||
|
6a800d8cc0 | ||
|
5e112147bb | ||
|
7e78b1563f | ||
|
ecbe81f103 | ||
|
45c20c1dd3 | ||
|
8ce925a060 | ||
|
eadb6ef42c | ||
|
7518f0ec93 | ||
|
f0b9eaf2fe | ||
|
7320a2cbf2 | ||
|
57950b3ed9 | ||
|
85c6a2a16a | ||
|
24b44e86a6 | ||
|
2f52ad4ef8 | ||
|
735d6c6161 | ||
|
1cf275bad6 | ||
|
f1c97cb4d5 | ||
|
d143904aa9 | ||
|
c428ce2cc5 | ||
|
dfb5b4fc3a | ||
|
97d5538533 | ||
|
6b8f82b5d3 | ||
|
facde6609f | ||
|
d68ae84b37 | ||
|
54aa39543b | ||
|
8566a2c118 | ||
|
49ac083af5 | ||
|
5d65ca230a | ||
|
38b3e1213a | ||
|
4eedca26e9 | ||
|
f2b342c8be | ||
|
c19d8f6b61 | ||
|
7913f74351 | ||
|
5593408be8 | ||
|
7fc67c2acf | ||
|
b093094292 | ||
|
9a80fd714a | ||
|
fef5473881 | ||
|
bf5a2b208f | ||
|
364507fb75 | ||
|
08d4842502 | ||
|
21e32e44f8 | ||
|
7a06dee16f | ||
|
ff9cf5743d | ||
|
1b10a3a187 | ||
|
10838e001d | ||
|
96370ac5b9 | ||
|
0b82cd074d | ||
|
a974e85103 | ||
|
f0450662b0 | ||
|
03e29d1291 | ||
|
98ae5d88aa | ||
|
bf5d3539c9 | ||
|
5e4cbcd909 | ||
|
a270c4c737 | ||
|
f356a8a690 | ||
|
b1a897d75c | ||
|
be51f4eba0 | ||
|
a55e2cd49b | ||
|
983501e43b | ||
|
e3037f18a6 | ||
|
fe388a3ab6 | ||
|
c820f2b1cf | ||
|
81824be3bf | ||
|
98c26440be | ||
|
3b5fcc393b | ||
|
9528077340 | ||
|
4355a05d55 | ||
|
52c44923dd | ||
|
47748ef4b6 | ||
|
8eca10200e | ||
|
43be8c8996 | ||
|
19b4b1160e | ||
|
ce6fccfb3c | ||
|
7d89aefb82 | ||
|
2369e2a920 | ||
|
6d808048d3 | ||
|
276f0b5d99 | ||
|
92bd5ca5d4 | ||
|
5b5ffea126 | ||
|
18068e9375 | ||
|
1b3cabb035 | ||
|
1be2bec1c2 | ||
|
f3bd5f543e | ||
|
660feb59b9 | ||
|
9673dbe12b | ||
|
2be435dd83 | ||
|
2d91369596 | ||
|
d8d3928978 | ||
|
7fcc540154 | ||
|
cb7fbd4668 | ||
|
d4e048a1f4 | ||
|
231c0fa20b | ||
|
1aabacc769 | ||
|
6a2927d701 | ||
|
126188510b | ||
|
4627ccb444 | ||
|
aff372111a | ||
|
c7081b9918 | ||
|
9ba3b18b59 | ||
|
099de62e9a | ||
|
c089216cb5 | ||
|
68dc902ed1 | ||
|
ad66b1c92f | ||
|
fbdece2762 | ||
|
f85eafb7ca | ||
|
f0dba2294e | ||
|
bda3948382 | ||
|
fae81c78f3 | ||
|
a5dec7d7bd | ||
|
e1222c9885 | ||
|
ded3bcf122 | ||
|
80d00cde94 | ||
|
2805d70ece | ||
|
439b7e8b98 | ||
|
ba1c1e97d0 | ||
|
8a50fd8595 | ||
|
465bcce72c | ||
|
361edeebc6 | ||
|
29a7b0e34f | ||
|
8496ffb53a | ||
|
2c717a6cd1 | ||
|
13a91c9181 | ||
|
338e1b64ab | ||
|
8eb0636034 | ||
|
f7c25a1b83 | ||
|
d6a0d0908c | ||
|
5c89afc18a | ||
|
376cc4bcac | ||
|
e6cf83a2e5 | ||
|
840c208b60 | ||
|
29ed6b38bd | ||
|
259c7e1fe2 | ||
|
033c8d352f | ||
|
16d7e8af48 | ||
|
159f4a2c7c |
@@ -1,8 +1,11 @@
|
|||||||
language: go
|
language: go
|
||||||
go: 1.2
|
go:
|
||||||
|
- 1.3
|
||||||
|
- 1.2
|
||||||
|
|
||||||
install:
|
install:
|
||||||
- go get code.google.com/p/go.tools/cmd/cover
|
- go get code.google.com/p/go.tools/cmd/cover
|
||||||
|
- go get code.google.com/p/go.tools/cmd/vet
|
||||||
|
|
||||||
script:
|
script:
|
||||||
- ./test
|
- ./test
|
||||||
|
@@ -39,22 +39,25 @@ Thanks for your contributions!
|
|||||||
|
|
||||||
### Format of the Commit Message
|
### Format of the Commit Message
|
||||||
|
|
||||||
We follow a rough convention for commit messages borrowed from AngularJS. This
|
We follow a rough convention for commit messages that is designed to answer two
|
||||||
is an example of a commit:
|
questions: what changed and why. The subject line should feature the what and
|
||||||
|
the body of the commit should describe the why.
|
||||||
|
|
||||||
```
|
```
|
||||||
feat(scripts/test-cluster): add a cluster test command
|
environment: write new keys in consistent order
|
||||||
|
|
||||||
this uses tmux to setup a test cluster that you can easily kill and
|
Go 1.3 randomizes the ordering of keys when iterating over a map.
|
||||||
start for debugging.
|
Sort the keys to make this ordering consistent.
|
||||||
|
|
||||||
|
Fixes #38
|
||||||
```
|
```
|
||||||
|
|
||||||
The format can be described more formally as follows:
|
The format can be described more formally as follows:
|
||||||
|
|
||||||
```
|
```
|
||||||
<type>(<scope>): <subject>
|
<subsystem>: <what changed>
|
||||||
<BLANK LINE>
|
<BLANK LINE>
|
||||||
<body>
|
<why this change was made>
|
||||||
<BLANK LINE>
|
<BLANK LINE>
|
||||||
<footer>
|
<footer>
|
||||||
```
|
```
|
||||||
@@ -63,25 +66,3 @@ The first line is the subject and should be no longer than 70 characters, the
|
|||||||
second line is always blank, and other lines should be wrapped at 80 characters.
|
second line is always blank, and other lines should be wrapped at 80 characters.
|
||||||
This allows the message to be easier to read on GitHub as well as in various
|
This allows the message to be easier to read on GitHub as well as in various
|
||||||
git tools.
|
git tools.
|
||||||
|
|
||||||
#### Subject Line
|
|
||||||
|
|
||||||
The subject line contains a succinct description of the change.
|
|
||||||
|
|
||||||
#### Allowed `<type>`s
|
|
||||||
- *feat* (feature)
|
|
||||||
- *fix* (bug fix)
|
|
||||||
- *docs* (documentation)
|
|
||||||
- *style* (formatting, missing semi colons, …)
|
|
||||||
- *refactor*
|
|
||||||
- *test* (when adding missing tests)
|
|
||||||
- *chore* (maintain)
|
|
||||||
|
|
||||||
#### Allowed `<scope>`s
|
|
||||||
|
|
||||||
Scopes can anything specifying the place of the commit change in the code base -
|
|
||||||
for example, "api", "store", etc.
|
|
||||||
|
|
||||||
|
|
||||||
For more details on the commit format, see the [AngularJS commit style
|
|
||||||
guide](https://docs.google.com/a/coreos.com/document/d/1QrDFcIiPjSLDn3EL15IJygNPiHORgU1_OOAqWjiDU5Y/edit#).
|
|
||||||
|
@@ -13,7 +13,7 @@ If no **id** field is provided, coreos-cloudinit will ignore this section.
|
|||||||
|
|
||||||
For example, the following cloud-config document...
|
For example, the following cloud-config document...
|
||||||
|
|
||||||
```
|
```yaml
|
||||||
#cloud-config
|
#cloud-config
|
||||||
coreos:
|
coreos:
|
||||||
oem:
|
oem:
|
||||||
@@ -26,7 +26,7 @@ coreos:
|
|||||||
|
|
||||||
...would be rendered to the following `/etc/oem-release`:
|
...would be rendered to the following `/etc/oem-release`:
|
||||||
|
|
||||||
```
|
```yaml
|
||||||
ID=rackspace
|
ID=rackspace
|
||||||
NAME="Rackspace Cloud Servers"
|
NAME="Rackspace Cloud Servers"
|
||||||
VERSION_ID=168.0.0
|
VERSION_ID=168.0.0
|
||||||
|
@@ -1,10 +1,10 @@
|
|||||||
# Using Cloud-Config
|
# Using Cloud-Config
|
||||||
|
|
||||||
CoreOS allows you to declaratively customize various OS-level items, such as network configuration, user accounts, and systemd units. This document describes the full list of items we can configure. The `coreos-cloudinit` program uses these files as it configures the OS after startup or during runtime.
|
CoreOS allows you to declaratively customize various OS-level items, such as network configuration, user accounts, and systemd units. This document describes the full list of items we can configure. The `coreos-cloudinit` program uses these files as it configures the OS after startup or during runtime. Your cloud-config is processed during each boot.
|
||||||
|
|
||||||
## Configuration File
|
## Configuration File
|
||||||
|
|
||||||
The file used by this system initialization program is called a "cloud-config" file. It is inspired by the [cloud-init][cloud-init] project's [cloud-config][cloud-config] file. which is "the defacto multi-distribution package that handles early initialization of a cloud instance" ([cloud-init docs][cloud-init-docs]). Because the cloud-init project includes tools which aren't used by CoreOS, only the relevant subset of its configuration items will be implemented in our cloud-config file. In addition to those, we added a few CoreOS-specific items, such as etcd configuration, OEM definition, and systemd units.
|
The file used by this system initialization program is called a "cloud-config" file. It is inspired by the [cloud-init][cloud-init] project's [cloud-config][cloud-config] file, which is "the defacto multi-distribution package that handles early initialization of a cloud instance" ([cloud-init docs][cloud-init-docs]). Because the cloud-init project includes tools which aren't used by CoreOS, only the relevant subset of its configuration items will be implemented in our cloud-config file. In addition to those, we added a few CoreOS-specific items, such as etcd configuration, OEM definition, and systemd units.
|
||||||
|
|
||||||
We've designed our implementation to allow the same cloud-config file to work across all of our supported platforms.
|
We've designed our implementation to allow the same cloud-config file to work across all of our supported platforms.
|
||||||
|
|
||||||
@@ -16,7 +16,7 @@ We've designed our implementation to allow the same cloud-config file to work ac
|
|||||||
|
|
||||||
The cloud-config file uses the [YAML][yaml] file format, which uses whitespace and new-lines to delimit lists, associative arrays, and values.
|
The cloud-config file uses the [YAML][yaml] file format, which uses whitespace and new-lines to delimit lists, associative arrays, and values.
|
||||||
|
|
||||||
A cloud-config file should contain an associative array which has zero or more of the following keys:
|
A cloud-config file should contain `#cloud-config`, followed by an associative array which has zero or more of the following keys:
|
||||||
|
|
||||||
- `coreos`
|
- `coreos`
|
||||||
- `ssh_authorized_keys`
|
- `ssh_authorized_keys`
|
||||||
@@ -40,9 +40,9 @@ CoreOS tries to conform to each platform's native method to provide user data. E
|
|||||||
#### etcd
|
#### etcd
|
||||||
|
|
||||||
The `coreos.etcd.*` parameters will be translated to a partial systemd unit acting as an etcd configuration file.
|
The `coreos.etcd.*` parameters will be translated to a partial systemd unit acting as an etcd configuration file.
|
||||||
We can use the templating feature of coreos-cloudinit to automate etcd configuration with the `$private_ipv4` and `$public_ipv4` fields. For example, the following cloud-config document...
|
If the platform environment supports the templating feature of coreos-cloudinit it is possible to automate etcd configuration with the `$private_ipv4` and `$public_ipv4` fields. For example, the following cloud-config document...
|
||||||
|
|
||||||
```
|
```yaml
|
||||||
#cloud-config
|
#cloud-config
|
||||||
|
|
||||||
coreos:
|
coreos:
|
||||||
@@ -57,7 +57,7 @@ coreos:
|
|||||||
|
|
||||||
...will generate a systemd unit drop-in like this:
|
...will generate a systemd unit drop-in like this:
|
||||||
|
|
||||||
```
|
```yaml
|
||||||
[Service]
|
[Service]
|
||||||
Environment="ETCD_NAME=node001"
|
Environment="ETCD_NAME=node001"
|
||||||
Environment="ETCD_DISCOVERY=https://discovery.etcd.io/<token>"
|
Environment="ETCD_DISCOVERY=https://discovery.etcd.io/<token>"
|
||||||
@@ -68,13 +68,15 @@ Environment="ETCD_PEER_ADDR=192.0.2.13:7001"
|
|||||||
For more information about the available configuration parameters, see the [etcd documentation][etcd-config].
|
For more information about the available configuration parameters, see the [etcd documentation][etcd-config].
|
||||||
Note that hyphens in the coreos.etcd.* keys are mapped to underscores.
|
Note that hyphens in the coreos.etcd.* keys are mapped to underscores.
|
||||||
|
|
||||||
|
_Note: The `$private_ipv4` and `$public_ipv4` substitution variables referenced in other documents are only supported on Amazon EC2, Google Compute Engine, OpenStack, Rackspace, DigitalOcean, and Vagrant._
|
||||||
|
|
||||||
[etcd-config]: https://github.com/coreos/etcd/blob/master/Documentation/configuration.md
|
[etcd-config]: https://github.com/coreos/etcd/blob/master/Documentation/configuration.md
|
||||||
|
|
||||||
#### fleet
|
#### fleet
|
||||||
|
|
||||||
The `coreos.fleet.*` parameters work very similarly to `coreos.etcd.*`, and allow for the configuration of fleet through environment variables. For example, the following cloud-config document...
|
The `coreos.fleet.*` parameters work very similarly to `coreos.etcd.*`, and allow for the configuration of fleet through environment variables. For example, the following cloud-config document...
|
||||||
|
|
||||||
```
|
```yaml
|
||||||
#cloud-config
|
#cloud-config
|
||||||
|
|
||||||
coreos:
|
coreos:
|
||||||
@@ -85,7 +87,7 @@ coreos:
|
|||||||
|
|
||||||
...will generate a systemd unit drop-in like this:
|
...will generate a systemd unit drop-in like this:
|
||||||
|
|
||||||
```
|
```yaml
|
||||||
[Service]
|
[Service]
|
||||||
Environment="FLEET_PUBLIC_IP=203.0.113.29"
|
Environment="FLEET_PUBLIC_IP=203.0.113.29"
|
||||||
Environment="FLEET_METADATA=region=us-west"
|
Environment="FLEET_METADATA=region=us-west"
|
||||||
@@ -93,7 +95,7 @@ Environment="FLEET_METADATA=region=us-west"
|
|||||||
|
|
||||||
For more information on fleet configuration, see the [fleet documentation][fleet-config].
|
For more information on fleet configuration, see the [fleet documentation][fleet-config].
|
||||||
|
|
||||||
[fleet-config]: https://github.com/coreos/fleet/blob/master/Documentation/configuration.md
|
[fleet-config]: https://github.com/coreos/fleet/blob/master/Documentation/deployment-and-configuration.md#configuration
|
||||||
|
|
||||||
#### update
|
#### update
|
||||||
|
|
||||||
@@ -114,7 +116,7 @@ The `reboot-strategy` parameter also affects the behaviour of [locksmith](https:
|
|||||||
|
|
||||||
##### Example
|
##### Example
|
||||||
|
|
||||||
```
|
```yaml
|
||||||
#cloud-config
|
#cloud-config
|
||||||
coreos:
|
coreos:
|
||||||
update:
|
update:
|
||||||
@@ -123,14 +125,16 @@ coreos:
|
|||||||
|
|
||||||
#### units
|
#### units
|
||||||
|
|
||||||
The `coreos.units.*` parameters define a list of arbitrary systemd units to start. Each item is an object with the following fields:
|
The `coreos.units.*` parameters define a list of arbitrary systemd units to start after booting. This feature is intended to help you start essential services required to mount storage and configure networking in order to join the CoreOS cluster. It is not intended to be a Chef/Puppet replacement.
|
||||||
|
|
||||||
|
Each item is an object with the following fields:
|
||||||
|
|
||||||
- **name**: String representing unit's name. Required.
|
- **name**: String representing unit's name. Required.
|
||||||
- **runtime**: Boolean indicating whether or not to persist the unit across reboots. This is analogous to the `--runtime` argument to `systemctl enable`. Default value is false.
|
- **runtime**: Boolean indicating whether or not to persist the unit across reboots. This is analogous to the `--runtime` argument to `systemctl enable`. The default value is false.
|
||||||
- **enable**: Boolean indicating whether or not to handle the [Install] section of the unit file. This is similar to running `systemctl enable <name>`. Default value is false.
|
- **enable**: Boolean indicating whether or not to handle the [Install] section of the unit file. This is similar to running `systemctl enable <name>`. The default value is false.
|
||||||
- **content**: Plaintext string representing entire unit file. If no value is provided, the unit is assumed to exist already.
|
- **content**: Plaintext string representing entire unit file. If no value is provided, the unit is assumed to exist already.
|
||||||
- **command**: Command to execute on unit: start, stop, reload, restart, try-restart, reload-or-restart, reload-or-try-restart. Default value is restart.
|
- **command**: Command to execute on unit: start, stop, reload, restart, try-restart, reload-or-restart, reload-or-try-restart. The default behavior is to not execute any commands.
|
||||||
- **mask**: Whether to mask the unit file by symlinking it to `/dev/null` (analogous to `systemctl mask <name>`). Note that unlike `systemctl mask`, **this will destructively remove any existing unit file** located at `/etc/systemd/system/<unit>`, to ensure that the mask succeeds. Default value is false.
|
- **mask**: Whether to mask the unit file by symlinking it to `/dev/null` (analogous to `systemctl mask <name>`). Note that unlike `systemctl mask`, **this will destructively remove any existing unit file** located at `/etc/systemd/system/<unit>`, to ensure that the mask succeeds. The default value is false.
|
||||||
|
|
||||||
**NOTE:** The command field is ignored for all network, netdev, and link units. The systemd-networkd.service unit will be restarted in their place.
|
**NOTE:** The command field is ignored for all network, netdev, and link units. The systemd-networkd.service unit will be restarted in their place.
|
||||||
|
|
||||||
@@ -138,7 +142,7 @@ The `coreos.units.*` parameters define a list of arbitrary systemd units to star
|
|||||||
|
|
||||||
Write a unit to disk, automatically starting it.
|
Write a unit to disk, automatically starting it.
|
||||||
|
|
||||||
```
|
```yaml
|
||||||
#cloud-config
|
#cloud-config
|
||||||
|
|
||||||
coreos:
|
coreos:
|
||||||
@@ -159,7 +163,7 @@ coreos:
|
|||||||
|
|
||||||
Start the built-in `etcd` and `fleet` services:
|
Start the built-in `etcd` and `fleet` services:
|
||||||
|
|
||||||
```
|
```yaml
|
||||||
#cloud-config
|
#cloud-config
|
||||||
|
|
||||||
coreos:
|
coreos:
|
||||||
@@ -177,7 +181,7 @@ The `ssh_authorized_keys` parameter adds public SSH keys which will be authorize
|
|||||||
The keys will be named "coreos-cloudinit" by default.
|
The keys will be named "coreos-cloudinit" by default.
|
||||||
Override this by using the `--ssh-key-name` flag when calling `coreos-cloudinit`.
|
Override this by using the `--ssh-key-name` flag when calling `coreos-cloudinit`.
|
||||||
|
|
||||||
```
|
```yaml
|
||||||
#cloud-config
|
#cloud-config
|
||||||
|
|
||||||
ssh_authorized_keys:
|
ssh_authorized_keys:
|
||||||
@@ -189,7 +193,7 @@ ssh_authorized_keys:
|
|||||||
The `hostname` parameter defines the system's hostname.
|
The `hostname` parameter defines the system's hostname.
|
||||||
This is the local part of a fully-qualified domain name (i.e. `foo` in `foo.example.com`).
|
This is the local part of a fully-qualified domain name (i.e. `foo` in `foo.example.com`).
|
||||||
|
|
||||||
```
|
```yaml
|
||||||
#cloud-config
|
#cloud-config
|
||||||
|
|
||||||
hostname: coreos1
|
hostname: coreos1
|
||||||
@@ -203,7 +207,7 @@ All but the `passwd` and `ssh-authorized-keys` fields will be ignored if the use
|
|||||||
- **name**: Required. Login name of user
|
- **name**: Required. Login name of user
|
||||||
- **gecos**: GECOS comment of user
|
- **gecos**: GECOS comment of user
|
||||||
- **passwd**: Hash of the password to use for this user
|
- **passwd**: Hash of the password to use for this user
|
||||||
- **homedir**: User's home directory. Defaults to /home/<name>
|
- **homedir**: User's home directory. Defaults to /home/\<name\>
|
||||||
- **no-create-home**: Boolean. Skip home directory creation.
|
- **no-create-home**: Boolean. Skip home directory creation.
|
||||||
- **primary-group**: Default group for the user. Defaults to a new group created named after the user.
|
- **primary-group**: Default group for the user. Defaults to a new group created named after the user.
|
||||||
- **groups**: Add user to these additional groups
|
- **groups**: Add user to these additional groups
|
||||||
@@ -222,7 +226,7 @@ The following fields are not yet implemented:
|
|||||||
- **selinux-user**: Corresponding SELinux user
|
- **selinux-user**: Corresponding SELinux user
|
||||||
- **ssh-import-id**: Import SSH keys by ID from Launchpad.
|
- **ssh-import-id**: Import SSH keys by ID from Launchpad.
|
||||||
|
|
||||||
```
|
```yaml
|
||||||
#cloud-config
|
#cloud-config
|
||||||
|
|
||||||
users:
|
users:
|
||||||
@@ -261,7 +265,7 @@ Using a higher number of rounds will help create more secure passwords, but give
|
|||||||
|
|
||||||
Using the `coreos-ssh-import-github` field, we can import public SSH keys from a GitHub user to use as authorized keys to a server.
|
Using the `coreos-ssh-import-github` field, we can import public SSH keys from a GitHub user to use as authorized keys to a server.
|
||||||
|
|
||||||
```
|
```yaml
|
||||||
#cloud-config
|
#cloud-config
|
||||||
|
|
||||||
users:
|
users:
|
||||||
@@ -274,7 +278,7 @@ users:
|
|||||||
We can also pull public SSH keys from any HTTP endpoint which matches [GitHub's API response format](https://developer.github.com/v3/users/keys/#list-public-keys-for-a-user).
|
We can also pull public SSH keys from any HTTP endpoint which matches [GitHub's API response format](https://developer.github.com/v3/users/keys/#list-public-keys-for-a-user).
|
||||||
For example, if you have an installation of GitHub Enterprise, you can provide a complete URL with an authentication token:
|
For example, if you have an installation of GitHub Enterprise, you can provide a complete URL with an authentication token:
|
||||||
|
|
||||||
```
|
```yaml
|
||||||
#cloud-config
|
#cloud-config
|
||||||
|
|
||||||
users:
|
users:
|
||||||
@@ -284,7 +288,7 @@ users:
|
|||||||
|
|
||||||
You can also specify any URL whose response matches the JSON format for public keys:
|
You can also specify any URL whose response matches the JSON format for public keys:
|
||||||
|
|
||||||
```
|
```yaml
|
||||||
#cloud-config
|
#cloud-config
|
||||||
|
|
||||||
users:
|
users:
|
||||||
@@ -294,7 +298,8 @@ users:
|
|||||||
|
|
||||||
### write_files
|
### write_files
|
||||||
|
|
||||||
The `write-file` parameter defines a list of files to create on the local filesystem. Each file is represented as an associative array which has the following keys:
|
The `write_files` directive defines a set of files to create on the local filesystem.
|
||||||
|
Each item in the list may have the following keys:
|
||||||
|
|
||||||
- **path**: Absolute location on disk where contents should be written
|
- **path**: Absolute location on disk where contents should be written
|
||||||
- **content**: Data to write at the provided `path`
|
- **content**: Data to write at the provided `path`
|
||||||
@@ -304,14 +309,19 @@ The `write-file` parameter defines a list of files to create on the local filesy
|
|||||||
Explicitly not implemented is the **encoding** attribute.
|
Explicitly not implemented is the **encoding** attribute.
|
||||||
The **content** field must represent exactly what should be written to disk.
|
The **content** field must represent exactly what should be written to disk.
|
||||||
|
|
||||||
```
|
```yaml
|
||||||
#cloud-config
|
#cloud-config
|
||||||
write_files:
|
write_files:
|
||||||
- path: /etc/fleet/fleet.conf
|
- path: /etc/resolv.conf
|
||||||
permissions: 0644
|
permissions: 0644
|
||||||
|
owner: root
|
||||||
content: |
|
content: |
|
||||||
verbosity=1
|
nameserver 8.8.8.8
|
||||||
metadata="region=us-west,type=ssd"
|
- path: /etc/motd
|
||||||
|
permissions: 0644
|
||||||
|
owner: root
|
||||||
|
content: |
|
||||||
|
Good news, everyone!
|
||||||
```
|
```
|
||||||
|
|
||||||
### manage_etc_hosts
|
### manage_etc_hosts
|
||||||
@@ -321,7 +331,7 @@ Currently, the only supported value is "localhost" which will cause your system'
|
|||||||
to resolve to "127.0.0.1". This is helpful when the host does not have DNS
|
to resolve to "127.0.0.1". This is helpful when the host does not have DNS
|
||||||
infrastructure in place to resolve its own hostname, for example, when using Vagrant.
|
infrastructure in place to resolve its own hostname, for example, when using Vagrant.
|
||||||
|
|
||||||
```
|
```yaml
|
||||||
#cloud-config
|
#cloud-config
|
||||||
|
|
||||||
manage_etc_hosts: localhost
|
manage_etc_hosts: localhost
|
||||||
|
@@ -14,17 +14,21 @@ The image should be a single FAT or ISO9660 file system with the label
|
|||||||
|
|
||||||
For example, to wrap up a config named `user_data` in a config drive image:
|
For example, to wrap up a config named `user_data` in a config drive image:
|
||||||
|
|
||||||
mkdir -p /tmp/new-drive/openstack/latest
|
```sh
|
||||||
cp user_data /tmp/new-drive/openstack/latest/user_data
|
mkdir -p /tmp/new-drive/openstack/latest
|
||||||
mkisofs -R -V config-2 -o configdrive.iso /tmp/new-drive
|
cp user_data /tmp/new-drive/openstack/latest/user_data
|
||||||
rm -r /tmp/new-drive
|
mkisofs -R -V config-2 -o configdrive.iso /tmp/new-drive
|
||||||
|
rm -r /tmp/new-drive
|
||||||
|
```
|
||||||
|
|
||||||
## QEMU virtfs
|
## QEMU virtfs
|
||||||
|
|
||||||
One exception to the above, when using QEMU it is possible to skip creating an
|
One exception to the above, when using QEMU it is possible to skip creating an
|
||||||
image and use a plain directory containing the same contents:
|
image and use a plain directory containing the same contents:
|
||||||
|
|
||||||
qemu-system-x86_64 \
|
```sh
|
||||||
-fsdev local,id=conf,security_model=none,readonly,path=/tmp/new-drive \
|
qemu-system-x86_64 \
|
||||||
-device virtio-9p-pci,fsdev=conf,mount_tag=config-2 \
|
-fsdev local,id=conf,security_model=none,readonly,path=/tmp/new-drive \
|
||||||
[usual qemu options here...]
|
-device virtio-9p-pci,fsdev=conf,mount_tag=config-2 \
|
||||||
|
[usual qemu options here...]
|
||||||
|
```
|
||||||
|
27
Documentation/debian-interfaces.md
Normal file
27
Documentation/debian-interfaces.md
Normal file
@@ -0,0 +1,27 @@
|
|||||||
|
#Debian Interfaces#
|
||||||
|
**WARNING**: This option is EXPERIMENTAL and may change or be removed at any
|
||||||
|
point.
|
||||||
|
There is basic support for converting from a Debian network configuration to
|
||||||
|
networkd unit files. The -convert-netconf=debian option is used to activate
|
||||||
|
this feature.
|
||||||
|
|
||||||
|
#convert-netconf#
|
||||||
|
Default: ""
|
||||||
|
Read the network config provided in cloud-drive and translate it from the
|
||||||
|
specified format into networkd unit files (requires the -from-configdrive
|
||||||
|
flag). Currently only supports "debian" which provides support for a small
|
||||||
|
subset of the [Debian network configuration]
|
||||||
|
(https://wiki.debian.org/NetworkConfiguration). These options include:
|
||||||
|
|
||||||
|
- interface config methods
|
||||||
|
- static
|
||||||
|
- address/netmask
|
||||||
|
- gateway
|
||||||
|
- hwaddress
|
||||||
|
- dns-nameservers
|
||||||
|
- dhcp
|
||||||
|
- hwaddress
|
||||||
|
- manual
|
||||||
|
- loopback
|
||||||
|
- vlan_raw_device
|
||||||
|
- bond-slaves
|
34
Godeps/Godeps.json
generated
Normal file
34
Godeps/Godeps.json
generated
Normal file
@@ -0,0 +1,34 @@
|
|||||||
|
{
|
||||||
|
"ImportPath": "github.com/coreos/coreos-cloudinit",
|
||||||
|
"GoVersion": "go1.3.1",
|
||||||
|
"Packages": [
|
||||||
|
"./..."
|
||||||
|
],
|
||||||
|
"Deps": [
|
||||||
|
{
|
||||||
|
"ImportPath": "github.com/cloudsigma/cepgo",
|
||||||
|
"Rev": "1bfc4895bf5c4d3b599f3f6ee142299488c8739b"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"ImportPath": "github.com/coreos/go-systemd/dbus",
|
||||||
|
"Rev": "4fbc5060a317b142e6c7bfbedb65596d5f0ab99b"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"ImportPath": "github.com/dotcloud/docker/pkg/netlink",
|
||||||
|
"Comment": "v0.11.1-359-g55d41c3e21e1",
|
||||||
|
"Rev": "55d41c3e21e1593b944c06196ffb2ac57ab7f653"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"ImportPath": "github.com/guelfey/go.dbus",
|
||||||
|
"Rev": "f6a3a2366cc39b8479cadc499d3c735fb10fbdda"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"ImportPath": "github.com/tarm/goserial",
|
||||||
|
"Rev": "cdabc8d44e8e84f58f18074ae44337e1f2f375b9"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"ImportPath": "gopkg.in/yaml.v1",
|
||||||
|
"Rev": "feb4ca79644e8e7e39c06095246ee54b1282c118"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
5
Godeps/Readme
generated
Normal file
5
Godeps/Readme
generated
Normal file
@@ -0,0 +1,5 @@
|
|||||||
|
This directory tree is generated automatically by godep.
|
||||||
|
|
||||||
|
Please do not edit.
|
||||||
|
|
||||||
|
See https://github.com/tools/godep for more information.
|
2
Godeps/_workspace/.gitignore
generated
vendored
Normal file
2
Godeps/_workspace/.gitignore
generated
vendored
Normal file
@@ -0,0 +1,2 @@
|
|||||||
|
/pkg
|
||||||
|
/bin
|
23
Godeps/_workspace/src/github.com/cloudsigma/cepgo/.gitignore
generated
vendored
Normal file
23
Godeps/_workspace/src/github.com/cloudsigma/cepgo/.gitignore
generated
vendored
Normal file
@@ -0,0 +1,23 @@
|
|||||||
|
# Compiled Object files, Static and Dynamic libs (Shared Objects)
|
||||||
|
*.o
|
||||||
|
*.a
|
||||||
|
*.so
|
||||||
|
|
||||||
|
# Folders
|
||||||
|
_obj
|
||||||
|
_test
|
||||||
|
|
||||||
|
# Architecture specific extensions/prefixes
|
||||||
|
*.[568vq]
|
||||||
|
[568vq].out
|
||||||
|
|
||||||
|
*.cgo1.go
|
||||||
|
*.cgo2.c
|
||||||
|
_cgo_defun.c
|
||||||
|
_cgo_gotypes.go
|
||||||
|
_cgo_export.*
|
||||||
|
|
||||||
|
_testmain.go
|
||||||
|
|
||||||
|
*.exe
|
||||||
|
*.test
|
202
Godeps/_workspace/src/github.com/cloudsigma/cepgo/LICENSE
generated
vendored
Normal file
202
Godeps/_workspace/src/github.com/cloudsigma/cepgo/LICENSE
generated
vendored
Normal file
@@ -0,0 +1,202 @@
|
|||||||
|
Apache License
|
||||||
|
Version 2.0, January 2004
|
||||||
|
http://www.apache.org/licenses/
|
||||||
|
|
||||||
|
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||||
|
|
||||||
|
1. Definitions.
|
||||||
|
|
||||||
|
"License" shall mean the terms and conditions for use, reproduction,
|
||||||
|
and distribution as defined by Sections 1 through 9 of this document.
|
||||||
|
|
||||||
|
"Licensor" shall mean the copyright owner or entity authorized by
|
||||||
|
the copyright owner that is granting the License.
|
||||||
|
|
||||||
|
"Legal Entity" shall mean the union of the acting entity and all
|
||||||
|
other entities that control, are controlled by, or are under common
|
||||||
|
control with that entity. For the purposes of this definition,
|
||||||
|
"control" means (i) the power, direct or indirect, to cause the
|
||||||
|
direction or management of such entity, whether by contract or
|
||||||
|
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||||
|
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||||
|
|
||||||
|
"You" (or "Your") shall mean an individual or Legal Entity
|
||||||
|
exercising permissions granted by this License.
|
||||||
|
|
||||||
|
"Source" form shall mean the preferred form for making modifications,
|
||||||
|
including but not limited to software source code, documentation
|
||||||
|
source, and configuration files.
|
||||||
|
|
||||||
|
"Object" form shall mean any form resulting from mechanical
|
||||||
|
transformation or translation of a Source form, including but
|
||||||
|
not limited to compiled object code, generated documentation,
|
||||||
|
and conversions to other media types.
|
||||||
|
|
||||||
|
"Work" shall mean the work of authorship, whether in Source or
|
||||||
|
Object form, made available under the License, as indicated by a
|
||||||
|
copyright notice that is included in or attached to the work
|
||||||
|
(an example is provided in the Appendix below).
|
||||||
|
|
||||||
|
"Derivative Works" shall mean any work, whether in Source or Object
|
||||||
|
form, that is based on (or derived from) the Work and for which the
|
||||||
|
editorial revisions, annotations, elaborations, or other modifications
|
||||||
|
represent, as a whole, an original work of authorship. For the purposes
|
||||||
|
of this License, Derivative Works shall not include works that remain
|
||||||
|
separable from, or merely link (or bind by name) to the interfaces of,
|
||||||
|
the Work and Derivative Works thereof.
|
||||||
|
|
||||||
|
"Contribution" shall mean any work of authorship, including
|
||||||
|
the original version of the Work and any modifications or additions
|
||||||
|
to that Work or Derivative Works thereof, that is intentionally
|
||||||
|
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||||
|
or by an individual or Legal Entity authorized to submit on behalf of
|
||||||
|
the copyright owner. For the purposes of this definition, "submitted"
|
||||||
|
means any form of electronic, verbal, or written communication sent
|
||||||
|
to the Licensor or its representatives, including but not limited to
|
||||||
|
communication on electronic mailing lists, source code control systems,
|
||||||
|
and issue tracking systems that are managed by, or on behalf of, the
|
||||||
|
Licensor for the purpose of discussing and improving the Work, but
|
||||||
|
excluding communication that is conspicuously marked or otherwise
|
||||||
|
designated in writing by the copyright owner as "Not a Contribution."
|
||||||
|
|
||||||
|
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||||
|
on behalf of whom a Contribution has been received by Licensor and
|
||||||
|
subsequently incorporated within the Work.
|
||||||
|
|
||||||
|
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||||
|
this License, each Contributor hereby grants to You a perpetual,
|
||||||
|
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||||
|
copyright license to reproduce, prepare Derivative Works of,
|
||||||
|
publicly display, publicly perform, sublicense, and distribute the
|
||||||
|
Work and such Derivative Works in Source or Object form.
|
||||||
|
|
||||||
|
3. Grant of Patent License. Subject to the terms and conditions of
|
||||||
|
this License, each Contributor hereby grants to You a perpetual,
|
||||||
|
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||||
|
(except as stated in this section) patent license to make, have made,
|
||||||
|
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||||
|
where such license applies only to those patent claims licensable
|
||||||
|
by such Contributor that are necessarily infringed by their
|
||||||
|
Contribution(s) alone or by combination of their Contribution(s)
|
||||||
|
with the Work to which such Contribution(s) was submitted. If You
|
||||||
|
institute patent litigation against any entity (including a
|
||||||
|
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||||
|
or a Contribution incorporated within the Work constitutes direct
|
||||||
|
or contributory patent infringement, then any patent licenses
|
||||||
|
granted to You under this License for that Work shall terminate
|
||||||
|
as of the date such litigation is filed.
|
||||||
|
|
||||||
|
4. Redistribution. You may reproduce and distribute copies of the
|
||||||
|
Work or Derivative Works thereof in any medium, with or without
|
||||||
|
modifications, and in Source or Object form, provided that You
|
||||||
|
meet the following conditions:
|
||||||
|
|
||||||
|
(a) You must give any other recipients of the Work or
|
||||||
|
Derivative Works a copy of this License; and
|
||||||
|
|
||||||
|
(b) You must cause any modified files to carry prominent notices
|
||||||
|
stating that You changed the files; and
|
||||||
|
|
||||||
|
(c) You must retain, in the Source form of any Derivative Works
|
||||||
|
that You distribute, all copyright, patent, trademark, and
|
||||||
|
attribution notices from the Source form of the Work,
|
||||||
|
excluding those notices that do not pertain to any part of
|
||||||
|
the Derivative Works; and
|
||||||
|
|
||||||
|
(d) If the Work includes a "NOTICE" text file as part of its
|
||||||
|
distribution, then any Derivative Works that You distribute must
|
||||||
|
include a readable copy of the attribution notices contained
|
||||||
|
within such NOTICE file, excluding those notices that do not
|
||||||
|
pertain to any part of the Derivative Works, in at least one
|
||||||
|
of the following places: within a NOTICE text file distributed
|
||||||
|
as part of the Derivative Works; within the Source form or
|
||||||
|
documentation, if provided along with the Derivative Works; or,
|
||||||
|
within a display generated by the Derivative Works, if and
|
||||||
|
wherever such third-party notices normally appear. The contents
|
||||||
|
of the NOTICE file are for informational purposes only and
|
||||||
|
do not modify the License. You may add Your own attribution
|
||||||
|
notices within Derivative Works that You distribute, alongside
|
||||||
|
or as an addendum to the NOTICE text from the Work, provided
|
||||||
|
that such additional attribution notices cannot be construed
|
||||||
|
as modifying the License.
|
||||||
|
|
||||||
|
You may add Your own copyright statement to Your modifications and
|
||||||
|
may provide additional or different license terms and conditions
|
||||||
|
for use, reproduction, or distribution of Your modifications, or
|
||||||
|
for any such Derivative Works as a whole, provided Your use,
|
||||||
|
reproduction, and distribution of the Work otherwise complies with
|
||||||
|
the conditions stated in this License.
|
||||||
|
|
||||||
|
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||||
|
any Contribution intentionally submitted for inclusion in the Work
|
||||||
|
by You to the Licensor shall be under the terms and conditions of
|
||||||
|
this License, without any additional terms or conditions.
|
||||||
|
Notwithstanding the above, nothing herein shall supersede or modify
|
||||||
|
the terms of any separate license agreement you may have executed
|
||||||
|
with Licensor regarding such Contributions.
|
||||||
|
|
||||||
|
6. Trademarks. This License does not grant permission to use the trade
|
||||||
|
names, trademarks, service marks, or product names of the Licensor,
|
||||||
|
except as required for reasonable and customary use in describing the
|
||||||
|
origin of the Work and reproducing the content of the NOTICE file.
|
||||||
|
|
||||||
|
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||||
|
agreed to in writing, Licensor provides the Work (and each
|
||||||
|
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||||
|
implied, including, without limitation, any warranties or conditions
|
||||||
|
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||||
|
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||||
|
appropriateness of using or redistributing the Work and assume any
|
||||||
|
risks associated with Your exercise of permissions under this License.
|
||||||
|
|
||||||
|
8. Limitation of Liability. In no event and under no legal theory,
|
||||||
|
whether in tort (including negligence), contract, or otherwise,
|
||||||
|
unless required by applicable law (such as deliberate and grossly
|
||||||
|
negligent acts) or agreed to in writing, shall any Contributor be
|
||||||
|
liable to You for damages, including any direct, indirect, special,
|
||||||
|
incidental, or consequential damages of any character arising as a
|
||||||
|
result of this License or out of the use or inability to use the
|
||||||
|
Work (including but not limited to damages for loss of goodwill,
|
||||||
|
work stoppage, computer failure or malfunction, or any and all
|
||||||
|
other commercial damages or losses), even if such Contributor
|
||||||
|
has been advised of the possibility of such damages.
|
||||||
|
|
||||||
|
9. Accepting Warranty or Additional Liability. While redistributing
|
||||||
|
the Work or Derivative Works thereof, You may choose to offer,
|
||||||
|
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||||
|
or other liability obligations and/or rights consistent with this
|
||||||
|
License. However, in accepting such obligations, You may act only
|
||||||
|
on Your own behalf and on Your sole responsibility, not on behalf
|
||||||
|
of any other Contributor, and only if You agree to indemnify,
|
||||||
|
defend, and hold each Contributor harmless for any liability
|
||||||
|
incurred by, or claims asserted against, such Contributor by reason
|
||||||
|
of your accepting any such warranty or additional liability.
|
||||||
|
|
||||||
|
END OF TERMS AND CONDITIONS
|
||||||
|
|
||||||
|
APPENDIX: How to apply the Apache License to your work.
|
||||||
|
|
||||||
|
To apply the Apache License to your work, attach the following
|
||||||
|
boilerplate notice, with the fields enclosed by brackets "{}"
|
||||||
|
replaced with your own identifying information. (Don't include
|
||||||
|
the brackets!) The text should be enclosed in the appropriate
|
||||||
|
comment syntax for the file format. We also recommend that a
|
||||||
|
file or class name and description of purpose be included on the
|
||||||
|
same "printed page" as the copyright notice for easier
|
||||||
|
identification within third-party archives.
|
||||||
|
|
||||||
|
Copyright {yyyy} {name of copyright owner}
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
|
43
Godeps/_workspace/src/github.com/cloudsigma/cepgo/README.md
generated
vendored
Normal file
43
Godeps/_workspace/src/github.com/cloudsigma/cepgo/README.md
generated
vendored
Normal file
@@ -0,0 +1,43 @@
|
|||||||
|
cepgo
|
||||||
|
=====
|
||||||
|
|
||||||
|
Cepko implements easy-to-use communication with CloudSigma's VMs through a
|
||||||
|
virtual serial port without bothering with formatting the messages properly nor
|
||||||
|
parsing the output with the specific and sometimes confusing shell tools for
|
||||||
|
that purpose.
|
||||||
|
|
||||||
|
Having the server definition accessible by the VM can be useful in various
|
||||||
|
ways. For example it is possible to easily determine from within the VM, which
|
||||||
|
network interfaces are connected to public and which to private network.
|
||||||
|
Another use is to pass some data to initial VM setup scripts, like setting the
|
||||||
|
hostname to the VM name or passing ssh public keys through server meta.
|
||||||
|
|
||||||
|
Example usage:
|
||||||
|
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/cloudsigma/cepgo"
|
||||||
|
)
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
c := cepgo.NewCepgo()
|
||||||
|
result, err := c.Meta()
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
fmt.Printf("%#v", result)
|
||||||
|
}
|
||||||
|
|
||||||
|
Output:
|
||||||
|
|
||||||
|
map[string]interface {}{
|
||||||
|
"optimize_for":"custom",
|
||||||
|
"ssh_public_key":"ssh-rsa AAA...",
|
||||||
|
"description":"[...]",
|
||||||
|
}
|
||||||
|
|
||||||
|
For more information take a look at the Server Context section of CloudSigma
|
||||||
|
API Docs: http://cloudsigma-docs.readthedocs.org/en/latest/server_context.html
|
186
Godeps/_workspace/src/github.com/cloudsigma/cepgo/cepgo.go
generated
vendored
Normal file
186
Godeps/_workspace/src/github.com/cloudsigma/cepgo/cepgo.go
generated
vendored
Normal file
@@ -0,0 +1,186 @@
|
|||||||
|
// Cepko implements easy-to-use communication with CloudSigma's VMs through a
|
||||||
|
// virtual serial port without bothering with formatting the messages properly
|
||||||
|
// nor parsing the output with the specific and sometimes confusing shell tools
|
||||||
|
// for that purpose.
|
||||||
|
//
|
||||||
|
// Having the server definition accessible by the VM can be useful in various
|
||||||
|
// ways. For example it is possible to easily determine from within the VM,
|
||||||
|
// which network interfaces are connected to public and which to private
|
||||||
|
// network. Another use is to pass some data to initial VM setup scripts, like
|
||||||
|
// setting the hostname to the VM name or passing ssh public keys through
|
||||||
|
// server meta.
|
||||||
|
//
|
||||||
|
// Example usage:
|
||||||
|
//
|
||||||
|
// package main
|
||||||
|
//
|
||||||
|
// import (
|
||||||
|
// "fmt"
|
||||||
|
//
|
||||||
|
// "github.com/cloudsigma/cepgo"
|
||||||
|
// )
|
||||||
|
//
|
||||||
|
// func main() {
|
||||||
|
// c := cepgo.NewCepgo()
|
||||||
|
// result, err := c.Meta()
|
||||||
|
// if err != nil {
|
||||||
|
// panic(err)
|
||||||
|
// }
|
||||||
|
// fmt.Printf("%#v", result)
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// Output:
|
||||||
|
//
|
||||||
|
// map[string]string{
|
||||||
|
// "optimize_for":"custom",
|
||||||
|
// "ssh_public_key":"ssh-rsa AAA...",
|
||||||
|
// "description":"[...]",
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// For more information take a look at the Server Context section API Docs:
|
||||||
|
// http://cloudsigma-docs.readthedocs.org/en/latest/server_context.html
|
||||||
|
package cepgo
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bufio"
|
||||||
|
"encoding/json"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"runtime"
|
||||||
|
|
||||||
|
"github.com/coreos/coreos-cloudinit/Godeps/_workspace/src/github.com/tarm/goserial"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
requestPattern = "<\n%s\n>"
|
||||||
|
EOT = '\x04' // End Of Transmission
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
SerialPort string = "/dev/ttyS1"
|
||||||
|
Baud int = 115200
|
||||||
|
)
|
||||||
|
|
||||||
|
// Sets the serial port. If the operating system is windows CloudSigma's server
|
||||||
|
// context is at COM2 port, otherwise (linux, freebsd, darwin) the port is
|
||||||
|
// being left to the default /dev/ttyS1.
|
||||||
|
func init() {
|
||||||
|
if runtime.GOOS == "windows" {
|
||||||
|
SerialPort = "COM2"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// The default fetcher makes the connection to the serial port,
|
||||||
|
// writes given query and reads until the EOT symbol.
|
||||||
|
func fetchViaSerialPort(key string) ([]byte, error) {
|
||||||
|
config := &serial.Config{Name: SerialPort, Baud: Baud}
|
||||||
|
connection, err := serial.OpenPort(config)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
query := fmt.Sprintf(requestPattern, key)
|
||||||
|
if _, err := connection.Write([]byte(query)); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
reader := bufio.NewReader(connection)
|
||||||
|
answer, err := reader.ReadBytes(EOT)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return answer[0 : len(answer)-1], nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Queries to the serial port can be executed only from instance of this type.
|
||||||
|
// The result from each of them can be either interface{}, map[string]string or
|
||||||
|
// a single in case of single value is returned. There is also a public metod
|
||||||
|
// who directly calls the fetcher and returns raw []byte from the serial port.
|
||||||
|
type Cepgo struct {
|
||||||
|
fetcher func(string) ([]byte, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Creates a Cepgo instance with the default serial port fetcher.
|
||||||
|
func NewCepgo() *Cepgo {
|
||||||
|
cepgo := new(Cepgo)
|
||||||
|
cepgo.fetcher = fetchViaSerialPort
|
||||||
|
return cepgo
|
||||||
|
}
|
||||||
|
|
||||||
|
// Creates a Cepgo instance with custom fetcher.
|
||||||
|
func NewCepgoFetcher(fetcher func(string) ([]byte, error)) *Cepgo {
|
||||||
|
cepgo := new(Cepgo)
|
||||||
|
cepgo.fetcher = fetcher
|
||||||
|
return cepgo
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fetches raw []byte from the serial port using directly the fetcher member.
|
||||||
|
func (c *Cepgo) FetchRaw(key string) ([]byte, error) {
|
||||||
|
return c.fetcher(key)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fetches a single key and tries to unmarshal the result to json and returns
|
||||||
|
// it. If the unmarshalling fails it's safe to assume the result it's just a
|
||||||
|
// string and returns it.
|
||||||
|
func (c *Cepgo) Key(key string) (interface{}, error) {
|
||||||
|
var result interface{}
|
||||||
|
|
||||||
|
fetched, err := c.FetchRaw(key)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
err = json.Unmarshal(fetched, &result)
|
||||||
|
if err != nil {
|
||||||
|
return string(fetched), nil
|
||||||
|
}
|
||||||
|
return result, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fetches all the server context. Equivalent of c.Key("")
|
||||||
|
func (c *Cepgo) All() (interface{}, error) {
|
||||||
|
return c.Key("")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fetches only the object meta field and makes sure to return a proper
|
||||||
|
// map[string]string
|
||||||
|
func (c *Cepgo) Meta() (map[string]string, error) {
|
||||||
|
rawMeta, err := c.Key("/meta/")
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return typeAssertToMapOfStrings(rawMeta)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fetches only the global context and makes sure to return a proper
|
||||||
|
// map[string]string
|
||||||
|
func (c *Cepgo) GlobalContext() (map[string]string, error) {
|
||||||
|
rawContext, err := c.Key("/global_context/")
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return typeAssertToMapOfStrings(rawContext)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Just a little helper function that uses type assertions in order to convert
|
||||||
|
// a interface{} to map[string]string if this is possible.
|
||||||
|
func typeAssertToMapOfStrings(raw interface{}) (map[string]string, error) {
|
||||||
|
result := make(map[string]string)
|
||||||
|
|
||||||
|
dictionary, ok := raw.(map[string]interface{})
|
||||||
|
if !ok {
|
||||||
|
return nil, errors.New("Received bytes are formatted badly")
|
||||||
|
}
|
||||||
|
|
||||||
|
for key, rawValue := range dictionary {
|
||||||
|
if value, ok := rawValue.(string); ok {
|
||||||
|
result[key] = value
|
||||||
|
} else {
|
||||||
|
return nil, errors.New("Server context metadata is formatted badly")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return result, nil
|
||||||
|
}
|
122
Godeps/_workspace/src/github.com/cloudsigma/cepgo/cepgo_test.go
generated
vendored
Normal file
122
Godeps/_workspace/src/github.com/cloudsigma/cepgo/cepgo_test.go
generated
vendored
Normal file
@@ -0,0 +1,122 @@
|
|||||||
|
package cepgo
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"testing"
|
||||||
|
)
|
||||||
|
|
||||||
|
func fetchMock(key string) ([]byte, error) {
|
||||||
|
context := []byte(`{
|
||||||
|
"context": true,
|
||||||
|
"cpu": 4000,
|
||||||
|
"cpu_model": null,
|
||||||
|
"cpus_instead_of_cores": false,
|
||||||
|
"enable_numa": false,
|
||||||
|
"global_context": {
|
||||||
|
"some_global_key": "some_global_val"
|
||||||
|
},
|
||||||
|
"grantees": [],
|
||||||
|
"hv_relaxed": false,
|
||||||
|
"hv_tsc": false,
|
||||||
|
"jobs": [],
|
||||||
|
"mem": 4294967296,
|
||||||
|
"meta": {
|
||||||
|
"base64_fields": "cloudinit-user-data",
|
||||||
|
"cloudinit-user-data": "I2Nsb3VkLWNvbmZpZwoKaG9zdG5hbWU6IGNvcmVvczE=",
|
||||||
|
"ssh_public_key": "ssh-rsa AAAAB2NzaC1yc2E.../hQ5D5 john@doe"
|
||||||
|
},
|
||||||
|
"name": "coreos",
|
||||||
|
"nics": [
|
||||||
|
{
|
||||||
|
"runtime": {
|
||||||
|
"interface_type": "public",
|
||||||
|
"ip_v4": {
|
||||||
|
"uuid": "31.171.251.74"
|
||||||
|
},
|
||||||
|
"ip_v6": null
|
||||||
|
},
|
||||||
|
"vlan": null
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"smp": 2,
|
||||||
|
"status": "running",
|
||||||
|
"uuid": "20a0059b-041e-4d0c-bcc6-9b2852de48b3"
|
||||||
|
}`)
|
||||||
|
|
||||||
|
if key == "" {
|
||||||
|
return context, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var marshalledContext map[string]interface{}
|
||||||
|
|
||||||
|
err := json.Unmarshal(context, &marshalledContext)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if key[0] == '/' {
|
||||||
|
key = key[1:]
|
||||||
|
}
|
||||||
|
if key[len(key)-1] == '/' {
|
||||||
|
key = key[:len(key)-1]
|
||||||
|
}
|
||||||
|
|
||||||
|
return json.Marshal(marshalledContext[key])
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAll(t *testing.T) {
|
||||||
|
cepgo := NewCepgoFetcher(fetchMock)
|
||||||
|
|
||||||
|
result, err := cepgo.All()
|
||||||
|
if err != nil {
|
||||||
|
t.Error(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, key := range []string{"meta", "name", "uuid", "global_context"} {
|
||||||
|
if _, ok := result.(map[string]interface{})[key]; !ok {
|
||||||
|
t.Errorf("%s not in all keys", key)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestKey(t *testing.T) {
|
||||||
|
cepgo := NewCepgoFetcher(fetchMock)
|
||||||
|
|
||||||
|
result, err := cepgo.Key("uuid")
|
||||||
|
if err != nil {
|
||||||
|
t.Error(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, ok := result.(string); !ok {
|
||||||
|
t.Errorf("%#v\n", result)
|
||||||
|
|
||||||
|
t.Error("Fetching the uuid did not return a string")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestMeta(t *testing.T) {
|
||||||
|
cepgo := NewCepgoFetcher(fetchMock)
|
||||||
|
|
||||||
|
meta, err := cepgo.Meta()
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("%#v\n", meta)
|
||||||
|
t.Error(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, ok := meta["ssh_public_key"]; !ok {
|
||||||
|
t.Error("ssh_public_key is not in the meta")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestGlobalContext(t *testing.T) {
|
||||||
|
cepgo := NewCepgoFetcher(fetchMock)
|
||||||
|
|
||||||
|
result, err := cepgo.GlobalContext()
|
||||||
|
if err != nil {
|
||||||
|
t.Error(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, ok := result["some_global_key"]; !ok {
|
||||||
|
t.Error("some_global_key is not in the global context")
|
||||||
|
}
|
||||||
|
}
|
@@ -23,7 +23,7 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
"github.com/coreos/coreos-cloudinit/third_party/github.com/guelfey/go.dbus"
|
"github.com/coreos/coreos-cloudinit/Godeps/_workspace/src/github.com/guelfey/go.dbus"
|
||||||
)
|
)
|
||||||
|
|
||||||
const signalBuffer = 100
|
const signalBuffer = 100
|
@@ -18,7 +18,7 @@ package dbus
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"errors"
|
"errors"
|
||||||
"github.com/coreos/coreos-cloudinit/third_party/github.com/guelfey/go.dbus"
|
"github.com/coreos/coreos-cloudinit/Godeps/_workspace/src/github.com/guelfey/go.dbus"
|
||||||
)
|
)
|
||||||
|
|
||||||
func (c *Conn) initJobs() {
|
func (c *Conn) initJobs() {
|
||||||
@@ -208,7 +208,7 @@ func (c *Conn) SetUnitProperties(name string, runtime bool, properties ...Proper
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (c *Conn) GetUnitTypeProperty(unit string, unitType string, propertyName string) (*Property, error) {
|
func (c *Conn) GetUnitTypeProperty(unit string, unitType string, propertyName string) (*Property, error) {
|
||||||
return c.getProperty(unit, "org.freedesktop.systemd1." + unitType, propertyName)
|
return c.getProperty(unit, "org.freedesktop.systemd1."+unitType, propertyName)
|
||||||
}
|
}
|
||||||
|
|
||||||
// ListUnits returns an array with all currently loaded units. Note that
|
// ListUnits returns an array with all currently loaded units. Note that
|
@@ -18,7 +18,7 @@ package dbus
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"github.com/coreos/coreos-cloudinit/third_party/github.com/guelfey/go.dbus"
|
"github.com/coreos/coreos-cloudinit/Godeps/_workspace/src/github.com/guelfey/go.dbus"
|
||||||
"math/rand"
|
"math/rand"
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
@@ -17,7 +17,7 @@ limitations under the License.
|
|||||||
package dbus
|
package dbus
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"github.com/coreos/coreos-cloudinit/third_party/github.com/guelfey/go.dbus"
|
"github.com/coreos/coreos-cloudinit/Godeps/_workspace/src/github.com/guelfey/go.dbus"
|
||||||
)
|
)
|
||||||
|
|
||||||
// From the systemd docs:
|
// From the systemd docs:
|
@@ -20,7 +20,7 @@ import (
|
|||||||
"errors"
|
"errors"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/coreos/coreos-cloudinit/third_party/github.com/guelfey/go.dbus"
|
"github.com/coreos/coreos-cloudinit/Godeps/_workspace/src/github.com/guelfey/go.dbus"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
@@ -101,7 +101,7 @@ func (c *Conn) SubscribeUnits(interval time.Duration) (<-chan map[string]*UnitSt
|
|||||||
// SubscribeUnitsCustom is like SubscribeUnits but lets you specify the buffer
|
// SubscribeUnitsCustom is like SubscribeUnits but lets you specify the buffer
|
||||||
// size of the channels, the comparison function for detecting changes and a filter
|
// size of the channels, the comparison function for detecting changes and a filter
|
||||||
// function for cutting down on the noise that your channel receives.
|
// function for cutting down on the noise that your channel receives.
|
||||||
func (c *Conn) SubscribeUnitsCustom(interval time.Duration, buffer int, isChanged func(*UnitStatus, *UnitStatus) bool, filterUnit func (string) bool) (<-chan map[string]*UnitStatus, <-chan error) {
|
func (c *Conn) SubscribeUnitsCustom(interval time.Duration, buffer int, isChanged func(*UnitStatus, *UnitStatus) bool, filterUnit func(string) bool) (<-chan map[string]*UnitStatus, <-chan error) {
|
||||||
old := make(map[string]*UnitStatus)
|
old := make(map[string]*UnitStatus)
|
||||||
statusChan := make(chan map[string]*UnitStatus, buffer)
|
statusChan := make(chan map[string]*UnitStatus, buffer)
|
||||||
errChan := make(chan error, buffer)
|
errChan := make(chan error, buffer)
|
@@ -2,7 +2,7 @@ package introspect
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"encoding/xml"
|
"encoding/xml"
|
||||||
"github.com/coreos/coreos-cloudinit/third_party/github.com/guelfey/go.dbus"
|
"github.com/coreos/coreos-cloudinit/Godeps/_workspace/src/github.com/guelfey/go.dbus"
|
||||||
"strings"
|
"strings"
|
||||||
)
|
)
|
||||||
|
|
@@ -2,7 +2,7 @@ package introspect
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"encoding/xml"
|
"encoding/xml"
|
||||||
"github.com/coreos/coreos-cloudinit/third_party/github.com/guelfey/go.dbus"
|
"github.com/coreos/coreos-cloudinit/Godeps/_workspace/src/github.com/guelfey/go.dbus"
|
||||||
"reflect"
|
"reflect"
|
||||||
)
|
)
|
||||||
|
|
@@ -3,8 +3,8 @@
|
|||||||
package prop
|
package prop
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"github.com/coreos/coreos-cloudinit/third_party/github.com/guelfey/go.dbus"
|
"github.com/coreos/coreos-cloudinit/Godeps/_workspace/src/github.com/guelfey/go.dbus"
|
||||||
"github.com/coreos/coreos-cloudinit/third_party/github.com/guelfey/go.dbus/introspect"
|
"github.com/coreos/coreos-cloudinit/Godeps/_workspace/src/github.com/guelfey/go.dbus/introspect"
|
||||||
"sync"
|
"sync"
|
||||||
)
|
)
|
||||||
|
|
27
Godeps/_workspace/src/github.com/tarm/goserial/LICENSE
generated
vendored
Normal file
27
Godeps/_workspace/src/github.com/tarm/goserial/LICENSE
generated
vendored
Normal file
@@ -0,0 +1,27 @@
|
|||||||
|
Copyright (c) 2009 The Go Authors. All rights reserved.
|
||||||
|
|
||||||
|
Redistribution and use in source and binary forms, with or without
|
||||||
|
modification, are permitted provided that the following conditions are
|
||||||
|
met:
|
||||||
|
|
||||||
|
* Redistributions of source code must retain the above copyright
|
||||||
|
notice, this list of conditions and the following disclaimer.
|
||||||
|
* Redistributions in binary form must reproduce the above
|
||||||
|
copyright notice, this list of conditions and the following disclaimer
|
||||||
|
in the documentation and/or other materials provided with the
|
||||||
|
distribution.
|
||||||
|
* Neither the name of Google Inc. nor the names of its
|
||||||
|
contributors may be used to endorse or promote products derived from
|
||||||
|
this software without specific prior written permission.
|
||||||
|
|
||||||
|
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||||
|
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||||
|
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||||
|
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||||
|
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||||
|
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||||
|
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||||
|
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||||
|
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||||
|
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
|
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
63
Godeps/_workspace/src/github.com/tarm/goserial/README.md
generated
vendored
Normal file
63
Godeps/_workspace/src/github.com/tarm/goserial/README.md
generated
vendored
Normal file
@@ -0,0 +1,63 @@
|
|||||||
|
GoSerial
|
||||||
|
========
|
||||||
|
A simple go package to allow you to read and write from the
|
||||||
|
serial port as a stream of bytes.
|
||||||
|
|
||||||
|
Details
|
||||||
|
-------
|
||||||
|
It aims to have the same API on all platforms, including windows. As
|
||||||
|
an added bonus, the windows package does not use cgo, so you can cross
|
||||||
|
compile for windows from another platform. Unfortunately goinstall
|
||||||
|
does not currently let you cross compile so you will have to do it
|
||||||
|
manually:
|
||||||
|
|
||||||
|
GOOS=windows make clean install
|
||||||
|
|
||||||
|
Currently there is very little in the way of configurability. You can
|
||||||
|
set the baud rate. Then you can Read(), Write(), or Close() the
|
||||||
|
connection. Read() will block until at least one byte is returned.
|
||||||
|
Write is the same. There is currently no exposed way to set the
|
||||||
|
timeouts, though patches are welcome.
|
||||||
|
|
||||||
|
Currently all ports are opened with 8 data bits, 1 stop bit, no
|
||||||
|
parity, no hardware flow control, and no software flow control. This
|
||||||
|
works fine for many real devices and many faux serial devices
|
||||||
|
including usb-to-serial converters and bluetooth serial ports.
|
||||||
|
|
||||||
|
You may Read() and Write() simulantiously on the same connection (from
|
||||||
|
different goroutines).
|
||||||
|
|
||||||
|
Usage
|
||||||
|
-----
|
||||||
|
```go
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/tarm/goserial"
|
||||||
|
"log"
|
||||||
|
)
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
c := &serial.Config{Name: "COM45", Baud: 115200}
|
||||||
|
s, err := serial.OpenPort(c)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
n, err := s.Write([]byte("test"))
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
buf := make([]byte, 128)
|
||||||
|
n, err = s.Read(buf)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
log.Print("%q", buf[:n])
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
Possible Future Work
|
||||||
|
--------------------
|
||||||
|
- better tests (loopback etc)
|
61
Godeps/_workspace/src/github.com/tarm/goserial/basic_test.go
generated
vendored
Normal file
61
Godeps/_workspace/src/github.com/tarm/goserial/basic_test.go
generated
vendored
Normal file
@@ -0,0 +1,61 @@
|
|||||||
|
package serial
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestConnection(t *testing.T) {
|
||||||
|
c0 := &Config{Name: "/dev/ttyUSB0", Baud: 115200}
|
||||||
|
c1 := &Config{Name: "/dev/ttyUSB1", Baud: 115200}
|
||||||
|
|
||||||
|
s1, err := OpenPort(c0)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
s2, err := OpenPort(c1)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
ch := make(chan int, 1)
|
||||||
|
go func() {
|
||||||
|
buf := make([]byte, 128)
|
||||||
|
var readCount int
|
||||||
|
for {
|
||||||
|
n, err := s2.Read(buf)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
readCount++
|
||||||
|
t.Logf("Read %v %v bytes: % 02x %s", readCount, n, buf[:n], buf[:n])
|
||||||
|
select {
|
||||||
|
case <-ch:
|
||||||
|
ch <- readCount
|
||||||
|
close(ch)
|
||||||
|
default:
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
if _, err = s1.Write([]byte("hello")); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
if _, err = s1.Write([]byte(" ")); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
time.Sleep(time.Second)
|
||||||
|
if _, err = s1.Write([]byte("world")); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
time.Sleep(time.Second / 10)
|
||||||
|
|
||||||
|
ch <- 0
|
||||||
|
s1.Write([]byte(" ")) // We could be blocked in the read without this
|
||||||
|
c := <-ch
|
||||||
|
exp := 5
|
||||||
|
if c >= exp {
|
||||||
|
t.Fatalf("Expected less than %v read, got %v", exp, c)
|
||||||
|
}
|
||||||
|
}
|
99
Godeps/_workspace/src/github.com/tarm/goserial/serial.go
generated
vendored
Normal file
99
Godeps/_workspace/src/github.com/tarm/goserial/serial.go
generated
vendored
Normal file
@@ -0,0 +1,99 @@
|
|||||||
|
/*
|
||||||
|
Goserial is a simple go package to allow you to read and write from
|
||||||
|
the serial port as a stream of bytes.
|
||||||
|
|
||||||
|
It aims to have the same API on all platforms, including windows. As
|
||||||
|
an added bonus, the windows package does not use cgo, so you can cross
|
||||||
|
compile for windows from another platform. Unfortunately goinstall
|
||||||
|
does not currently let you cross compile so you will have to do it
|
||||||
|
manually:
|
||||||
|
|
||||||
|
GOOS=windows make clean install
|
||||||
|
|
||||||
|
Currently there is very little in the way of configurability. You can
|
||||||
|
set the baud rate. Then you can Read(), Write(), or Close() the
|
||||||
|
connection. Read() will block until at least one byte is returned.
|
||||||
|
Write is the same. There is currently no exposed way to set the
|
||||||
|
timeouts, though patches are welcome.
|
||||||
|
|
||||||
|
Currently all ports are opened with 8 data bits, 1 stop bit, no
|
||||||
|
parity, no hardware flow control, and no software flow control. This
|
||||||
|
works fine for many real devices and many faux serial devices
|
||||||
|
including usb-to-serial converters and bluetooth serial ports.
|
||||||
|
|
||||||
|
You may Read() and Write() simulantiously on the same connection (from
|
||||||
|
different goroutines).
|
||||||
|
|
||||||
|
Example usage:
|
||||||
|
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/tarm/goserial"
|
||||||
|
"log"
|
||||||
|
)
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
c := &serial.Config{Name: "COM5", Baud: 115200}
|
||||||
|
s, err := serial.OpenPort(c)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
n, err := s.Write([]byte("test"))
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
buf := make([]byte, 128)
|
||||||
|
n, err = s.Read(buf)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
log.Print("%q", buf[:n])
|
||||||
|
}
|
||||||
|
*/
|
||||||
|
package serial
|
||||||
|
|
||||||
|
import "io"
|
||||||
|
|
||||||
|
// Config contains the information needed to open a serial port.
|
||||||
|
//
|
||||||
|
// Currently few options are implemented, but more may be added in the
|
||||||
|
// future (patches welcome), so it is recommended that you create a
|
||||||
|
// new config addressing the fields by name rather than by order.
|
||||||
|
//
|
||||||
|
// For example:
|
||||||
|
//
|
||||||
|
// c0 := &serial.Config{Name: "COM45", Baud: 115200}
|
||||||
|
// or
|
||||||
|
// c1 := new(serial.Config)
|
||||||
|
// c1.Name = "/dev/tty.usbserial"
|
||||||
|
// c1.Baud = 115200
|
||||||
|
//
|
||||||
|
type Config struct {
|
||||||
|
Name string
|
||||||
|
Baud int
|
||||||
|
|
||||||
|
// Size int // 0 get translated to 8
|
||||||
|
// Parity SomeNewTypeToGetCorrectDefaultOf_None
|
||||||
|
// StopBits SomeNewTypeToGetCorrectDefaultOf_1
|
||||||
|
|
||||||
|
// RTSFlowControl bool
|
||||||
|
// DTRFlowControl bool
|
||||||
|
// XONFlowControl bool
|
||||||
|
|
||||||
|
// CRLFTranslate bool
|
||||||
|
// TimeoutStuff int
|
||||||
|
}
|
||||||
|
|
||||||
|
// OpenPort opens a serial port with the specified configuration
|
||||||
|
func OpenPort(c *Config) (io.ReadWriteCloser, error) {
|
||||||
|
return openPort(c.Name, c.Baud)
|
||||||
|
}
|
||||||
|
|
||||||
|
// func Flush()
|
||||||
|
|
||||||
|
// func SendBreak()
|
||||||
|
|
||||||
|
// func RegisterBreakHandler(func())
|
90
Godeps/_workspace/src/github.com/tarm/goserial/serial_linux.go
generated
vendored
Normal file
90
Godeps/_workspace/src/github.com/tarm/goserial/serial_linux.go
generated
vendored
Normal file
@@ -0,0 +1,90 @@
|
|||||||
|
// +build linux,!cgo
|
||||||
|
|
||||||
|
package serial
|
||||||
|
|
||||||
|
import (
|
||||||
|
"io"
|
||||||
|
"os"
|
||||||
|
"syscall"
|
||||||
|
"unsafe"
|
||||||
|
)
|
||||||
|
|
||||||
|
func openPort(name string, baud int) (rwc io.ReadWriteCloser, err error) {
|
||||||
|
|
||||||
|
var bauds = map[int]uint32{
|
||||||
|
50: syscall.B50,
|
||||||
|
75: syscall.B75,
|
||||||
|
110: syscall.B110,
|
||||||
|
134: syscall.B134,
|
||||||
|
150: syscall.B150,
|
||||||
|
200: syscall.B200,
|
||||||
|
300: syscall.B300,
|
||||||
|
600: syscall.B600,
|
||||||
|
1200: syscall.B1200,
|
||||||
|
1800: syscall.B1800,
|
||||||
|
2400: syscall.B2400,
|
||||||
|
4800: syscall.B4800,
|
||||||
|
9600: syscall.B9600,
|
||||||
|
19200: syscall.B19200,
|
||||||
|
38400: syscall.B38400,
|
||||||
|
57600: syscall.B57600,
|
||||||
|
115200: syscall.B115200,
|
||||||
|
230400: syscall.B230400,
|
||||||
|
460800: syscall.B460800,
|
||||||
|
500000: syscall.B500000,
|
||||||
|
576000: syscall.B576000,
|
||||||
|
921600: syscall.B921600,
|
||||||
|
1000000: syscall.B1000000,
|
||||||
|
1152000: syscall.B1152000,
|
||||||
|
1500000: syscall.B1500000,
|
||||||
|
2000000: syscall.B2000000,
|
||||||
|
2500000: syscall.B2500000,
|
||||||
|
3000000: syscall.B3000000,
|
||||||
|
3500000: syscall.B3500000,
|
||||||
|
4000000: syscall.B4000000,
|
||||||
|
}
|
||||||
|
|
||||||
|
rate := bauds[baud]
|
||||||
|
|
||||||
|
if rate == 0 {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
f, err := os.OpenFile(name, syscall.O_RDWR|syscall.O_NOCTTY|syscall.O_NONBLOCK, 0666)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
defer func() {
|
||||||
|
if err != nil && f != nil {
|
||||||
|
f.Close()
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
fd := f.Fd()
|
||||||
|
t := syscall.Termios{
|
||||||
|
Iflag: syscall.IGNPAR,
|
||||||
|
Cflag: syscall.CS8 | syscall.CREAD | syscall.CLOCAL | rate,
|
||||||
|
Cc: [32]uint8{syscall.VMIN: 1},
|
||||||
|
Ispeed: rate,
|
||||||
|
Ospeed: rate,
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, _, errno := syscall.Syscall6(
|
||||||
|
syscall.SYS_IOCTL,
|
||||||
|
uintptr(fd),
|
||||||
|
uintptr(syscall.TCSETS),
|
||||||
|
uintptr(unsafe.Pointer(&t)),
|
||||||
|
0,
|
||||||
|
0,
|
||||||
|
0,
|
||||||
|
); errno != 0 {
|
||||||
|
return nil, errno
|
||||||
|
}
|
||||||
|
|
||||||
|
if err = syscall.SetNonblock(int(fd), false); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
return f, nil
|
||||||
|
}
|
107
Godeps/_workspace/src/github.com/tarm/goserial/serial_posix.go
generated
vendored
Normal file
107
Godeps/_workspace/src/github.com/tarm/goserial/serial_posix.go
generated
vendored
Normal file
@@ -0,0 +1,107 @@
|
|||||||
|
// +build !windows,cgo
|
||||||
|
|
||||||
|
package serial
|
||||||
|
|
||||||
|
// #include <termios.h>
|
||||||
|
// #include <unistd.h>
|
||||||
|
import "C"
|
||||||
|
|
||||||
|
// TODO: Maybe change to using syscall package + ioctl instead of cgo
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"os"
|
||||||
|
"syscall"
|
||||||
|
//"unsafe"
|
||||||
|
)
|
||||||
|
|
||||||
|
func openPort(name string, baud int) (rwc io.ReadWriteCloser, err error) {
|
||||||
|
f, err := os.OpenFile(name, syscall.O_RDWR|syscall.O_NOCTTY|syscall.O_NONBLOCK, 0666)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
fd := C.int(f.Fd())
|
||||||
|
if C.isatty(fd) != 1 {
|
||||||
|
f.Close()
|
||||||
|
return nil, errors.New("File is not a tty")
|
||||||
|
}
|
||||||
|
|
||||||
|
var st C.struct_termios
|
||||||
|
_, err = C.tcgetattr(fd, &st)
|
||||||
|
if err != nil {
|
||||||
|
f.Close()
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
var speed C.speed_t
|
||||||
|
switch baud {
|
||||||
|
case 115200:
|
||||||
|
speed = C.B115200
|
||||||
|
case 57600:
|
||||||
|
speed = C.B57600
|
||||||
|
case 38400:
|
||||||
|
speed = C.B38400
|
||||||
|
case 19200:
|
||||||
|
speed = C.B19200
|
||||||
|
case 9600:
|
||||||
|
speed = C.B9600
|
||||||
|
case 4800:
|
||||||
|
speed = C.B4800
|
||||||
|
case 2400:
|
||||||
|
speed = C.B2400
|
||||||
|
default:
|
||||||
|
f.Close()
|
||||||
|
return nil, fmt.Errorf("Unknown baud rate %v", baud)
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err = C.cfsetispeed(&st, speed)
|
||||||
|
if err != nil {
|
||||||
|
f.Close()
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
_, err = C.cfsetospeed(&st, speed)
|
||||||
|
if err != nil {
|
||||||
|
f.Close()
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Select local mode
|
||||||
|
st.c_cflag |= (C.CLOCAL | C.CREAD)
|
||||||
|
|
||||||
|
// Select raw mode
|
||||||
|
st.c_lflag &= ^C.tcflag_t(C.ICANON | C.ECHO | C.ECHOE | C.ISIG)
|
||||||
|
st.c_oflag &= ^C.tcflag_t(C.OPOST)
|
||||||
|
|
||||||
|
_, err = C.tcsetattr(fd, C.TCSANOW, &st)
|
||||||
|
if err != nil {
|
||||||
|
f.Close()
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
//fmt.Println("Tweaking", name)
|
||||||
|
r1, _, e := syscall.Syscall(syscall.SYS_FCNTL,
|
||||||
|
uintptr(f.Fd()),
|
||||||
|
uintptr(syscall.F_SETFL),
|
||||||
|
uintptr(0))
|
||||||
|
if e != 0 || r1 != 0 {
|
||||||
|
s := fmt.Sprint("Clearing NONBLOCK syscall error:", e, r1)
|
||||||
|
f.Close()
|
||||||
|
return nil, errors.New(s)
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
r1, _, e = syscall.Syscall(syscall.SYS_IOCTL,
|
||||||
|
uintptr(f.Fd()),
|
||||||
|
uintptr(0x80045402), // IOSSIOSPEED
|
||||||
|
uintptr(unsafe.Pointer(&baud)));
|
||||||
|
if e != 0 || r1 != 0 {
|
||||||
|
s := fmt.Sprint("Baudrate syscall error:", e, r1)
|
||||||
|
f.Close()
|
||||||
|
return nil, os.NewError(s)
|
||||||
|
}
|
||||||
|
*/
|
||||||
|
|
||||||
|
return f, nil
|
||||||
|
}
|
263
Godeps/_workspace/src/github.com/tarm/goserial/serial_windows.go
generated
vendored
Normal file
263
Godeps/_workspace/src/github.com/tarm/goserial/serial_windows.go
generated
vendored
Normal file
@@ -0,0 +1,263 @@
|
|||||||
|
// +build windows
|
||||||
|
|
||||||
|
package serial
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"os"
|
||||||
|
"sync"
|
||||||
|
"syscall"
|
||||||
|
"unsafe"
|
||||||
|
)
|
||||||
|
|
||||||
|
type serialPort struct {
|
||||||
|
f *os.File
|
||||||
|
fd syscall.Handle
|
||||||
|
rl sync.Mutex
|
||||||
|
wl sync.Mutex
|
||||||
|
ro *syscall.Overlapped
|
||||||
|
wo *syscall.Overlapped
|
||||||
|
}
|
||||||
|
|
||||||
|
type structDCB struct {
|
||||||
|
DCBlength, BaudRate uint32
|
||||||
|
flags [4]byte
|
||||||
|
wReserved, XonLim, XoffLim uint16
|
||||||
|
ByteSize, Parity, StopBits byte
|
||||||
|
XonChar, XoffChar, ErrorChar, EofChar, EvtChar byte
|
||||||
|
wReserved1 uint16
|
||||||
|
}
|
||||||
|
|
||||||
|
type structTimeouts struct {
|
||||||
|
ReadIntervalTimeout uint32
|
||||||
|
ReadTotalTimeoutMultiplier uint32
|
||||||
|
ReadTotalTimeoutConstant uint32
|
||||||
|
WriteTotalTimeoutMultiplier uint32
|
||||||
|
WriteTotalTimeoutConstant uint32
|
||||||
|
}
|
||||||
|
|
||||||
|
func openPort(name string, baud int) (rwc io.ReadWriteCloser, err error) {
|
||||||
|
if len(name) > 0 && name[0] != '\\' {
|
||||||
|
name = "\\\\.\\" + name
|
||||||
|
}
|
||||||
|
|
||||||
|
h, err := syscall.CreateFile(syscall.StringToUTF16Ptr(name),
|
||||||
|
syscall.GENERIC_READ|syscall.GENERIC_WRITE,
|
||||||
|
0,
|
||||||
|
nil,
|
||||||
|
syscall.OPEN_EXISTING,
|
||||||
|
syscall.FILE_ATTRIBUTE_NORMAL|syscall.FILE_FLAG_OVERLAPPED,
|
||||||
|
0)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
f := os.NewFile(uintptr(h), name)
|
||||||
|
defer func() {
|
||||||
|
if err != nil {
|
||||||
|
f.Close()
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
if err = setCommState(h, baud); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if err = setupComm(h, 64, 64); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if err = setCommTimeouts(h); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if err = setCommMask(h); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
ro, err := newOverlapped()
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
wo, err := newOverlapped()
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
port := new(serialPort)
|
||||||
|
port.f = f
|
||||||
|
port.fd = h
|
||||||
|
port.ro = ro
|
||||||
|
port.wo = wo
|
||||||
|
|
||||||
|
return port, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *serialPort) Close() error {
|
||||||
|
return p.f.Close()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *serialPort) Write(buf []byte) (int, error) {
|
||||||
|
p.wl.Lock()
|
||||||
|
defer p.wl.Unlock()
|
||||||
|
|
||||||
|
if err := resetEvent(p.wo.HEvent); err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
var n uint32
|
||||||
|
err := syscall.WriteFile(p.fd, buf, &n, p.wo)
|
||||||
|
if err != nil && err != syscall.ERROR_IO_PENDING {
|
||||||
|
return int(n), err
|
||||||
|
}
|
||||||
|
return getOverlappedResult(p.fd, p.wo)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *serialPort) Read(buf []byte) (int, error) {
|
||||||
|
if p == nil || p.f == nil {
|
||||||
|
return 0, fmt.Errorf("Invalid port on read %v %v", p, p.f)
|
||||||
|
}
|
||||||
|
|
||||||
|
p.rl.Lock()
|
||||||
|
defer p.rl.Unlock()
|
||||||
|
|
||||||
|
if err := resetEvent(p.ro.HEvent); err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
var done uint32
|
||||||
|
err := syscall.ReadFile(p.fd, buf, &done, p.ro)
|
||||||
|
if err != nil && err != syscall.ERROR_IO_PENDING {
|
||||||
|
return int(done), err
|
||||||
|
}
|
||||||
|
return getOverlappedResult(p.fd, p.ro)
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
nSetCommState,
|
||||||
|
nSetCommTimeouts,
|
||||||
|
nSetCommMask,
|
||||||
|
nSetupComm,
|
||||||
|
nGetOverlappedResult,
|
||||||
|
nCreateEvent,
|
||||||
|
nResetEvent uintptr
|
||||||
|
)
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
k32, err := syscall.LoadLibrary("kernel32.dll")
|
||||||
|
if err != nil {
|
||||||
|
panic("LoadLibrary " + err.Error())
|
||||||
|
}
|
||||||
|
defer syscall.FreeLibrary(k32)
|
||||||
|
|
||||||
|
nSetCommState = getProcAddr(k32, "SetCommState")
|
||||||
|
nSetCommTimeouts = getProcAddr(k32, "SetCommTimeouts")
|
||||||
|
nSetCommMask = getProcAddr(k32, "SetCommMask")
|
||||||
|
nSetupComm = getProcAddr(k32, "SetupComm")
|
||||||
|
nGetOverlappedResult = getProcAddr(k32, "GetOverlappedResult")
|
||||||
|
nCreateEvent = getProcAddr(k32, "CreateEventW")
|
||||||
|
nResetEvent = getProcAddr(k32, "ResetEvent")
|
||||||
|
}
|
||||||
|
|
||||||
|
func getProcAddr(lib syscall.Handle, name string) uintptr {
|
||||||
|
addr, err := syscall.GetProcAddress(lib, name)
|
||||||
|
if err != nil {
|
||||||
|
panic(name + " " + err.Error())
|
||||||
|
}
|
||||||
|
return addr
|
||||||
|
}
|
||||||
|
|
||||||
|
func setCommState(h syscall.Handle, baud int) error {
|
||||||
|
var params structDCB
|
||||||
|
params.DCBlength = uint32(unsafe.Sizeof(params))
|
||||||
|
|
||||||
|
params.flags[0] = 0x01 // fBinary
|
||||||
|
params.flags[0] |= 0x10 // Assert DSR
|
||||||
|
|
||||||
|
params.BaudRate = uint32(baud)
|
||||||
|
params.ByteSize = 8
|
||||||
|
|
||||||
|
r, _, err := syscall.Syscall(nSetCommState, 2, uintptr(h), uintptr(unsafe.Pointer(¶ms)), 0)
|
||||||
|
if r == 0 {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func setCommTimeouts(h syscall.Handle) error {
|
||||||
|
var timeouts structTimeouts
|
||||||
|
const MAXDWORD = 1<<32 - 1
|
||||||
|
timeouts.ReadIntervalTimeout = MAXDWORD
|
||||||
|
timeouts.ReadTotalTimeoutMultiplier = MAXDWORD
|
||||||
|
timeouts.ReadTotalTimeoutConstant = MAXDWORD - 1
|
||||||
|
|
||||||
|
/* From http://msdn.microsoft.com/en-us/library/aa363190(v=VS.85).aspx
|
||||||
|
|
||||||
|
For blocking I/O see below:
|
||||||
|
|
||||||
|
Remarks:
|
||||||
|
|
||||||
|
If an application sets ReadIntervalTimeout and
|
||||||
|
ReadTotalTimeoutMultiplier to MAXDWORD and sets
|
||||||
|
ReadTotalTimeoutConstant to a value greater than zero and
|
||||||
|
less than MAXDWORD, one of the following occurs when the
|
||||||
|
ReadFile function is called:
|
||||||
|
|
||||||
|
If there are any bytes in the input buffer, ReadFile returns
|
||||||
|
immediately with the bytes in the buffer.
|
||||||
|
|
||||||
|
If there are no bytes in the input buffer, ReadFile waits
|
||||||
|
until a byte arrives and then returns immediately.
|
||||||
|
|
||||||
|
If no bytes arrive within the time specified by
|
||||||
|
ReadTotalTimeoutConstant, ReadFile times out.
|
||||||
|
*/
|
||||||
|
|
||||||
|
r, _, err := syscall.Syscall(nSetCommTimeouts, 2, uintptr(h), uintptr(unsafe.Pointer(&timeouts)), 0)
|
||||||
|
if r == 0 {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func setupComm(h syscall.Handle, in, out int) error {
|
||||||
|
r, _, err := syscall.Syscall(nSetupComm, 3, uintptr(h), uintptr(in), uintptr(out))
|
||||||
|
if r == 0 {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func setCommMask(h syscall.Handle) error {
|
||||||
|
const EV_RXCHAR = 0x0001
|
||||||
|
r, _, err := syscall.Syscall(nSetCommMask, 2, uintptr(h), EV_RXCHAR, 0)
|
||||||
|
if r == 0 {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func resetEvent(h syscall.Handle) error {
|
||||||
|
r, _, err := syscall.Syscall(nResetEvent, 1, uintptr(h), 0, 0)
|
||||||
|
if r == 0 {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func newOverlapped() (*syscall.Overlapped, error) {
|
||||||
|
var overlapped syscall.Overlapped
|
||||||
|
r, _, err := syscall.Syscall6(nCreateEvent, 4, 0, 1, 0, 0, 0, 0)
|
||||||
|
if r == 0 {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
overlapped.HEvent = syscall.Handle(r)
|
||||||
|
return &overlapped, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func getOverlappedResult(h syscall.Handle, overlapped *syscall.Overlapped) (int, error) {
|
||||||
|
var n int
|
||||||
|
r, _, err := syscall.Syscall6(nGetOverlappedResult, 4,
|
||||||
|
uintptr(h),
|
||||||
|
uintptr(unsafe.Pointer(overlapped)),
|
||||||
|
uintptr(unsafe.Pointer(&n)), 1, 0, 0)
|
||||||
|
if r == 0 {
|
||||||
|
return n, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return n, nil
|
||||||
|
}
|
@@ -1,3 +1,15 @@
|
|||||||
|
The following files were ported to Go from C files of libyaml, and thus
|
||||||
|
are still covered by their original copyright and license:
|
||||||
|
|
||||||
|
apic.go
|
||||||
|
emitterc.go
|
||||||
|
parserc.go
|
||||||
|
readerc.go
|
||||||
|
scannerc.go
|
||||||
|
writerc.go
|
||||||
|
yamlh.go
|
||||||
|
yamlprivateh.go
|
||||||
|
|
||||||
Copyright (c) 2006 Kirill Simonov
|
Copyright (c) 2006 Kirill Simonov
|
||||||
|
|
||||||
Permission is hereby granted, free of charge, to any person obtaining a copy of
|
Permission is hereby granted, free of charge, to any person obtaining a copy of
|
128
Godeps/_workspace/src/gopkg.in/yaml.v1/README.md
generated
vendored
Normal file
128
Godeps/_workspace/src/gopkg.in/yaml.v1/README.md
generated
vendored
Normal file
@@ -0,0 +1,128 @@
|
|||||||
|
# YAML support for the Go language
|
||||||
|
|
||||||
|
Introduction
|
||||||
|
------------
|
||||||
|
|
||||||
|
The yaml package enables Go programs to comfortably encode and decode YAML
|
||||||
|
values. It was developed within [Canonical](https://www.canonical.com) as
|
||||||
|
part of the [juju](https://juju.ubuntu.com) project, and is based on a
|
||||||
|
pure Go port of the well-known [libyaml](http://pyyaml.org/wiki/LibYAML)
|
||||||
|
C library to parse and generate YAML data quickly and reliably.
|
||||||
|
|
||||||
|
Compatibility
|
||||||
|
-------------
|
||||||
|
|
||||||
|
The yaml package is almost compatible with YAML 1.1, including support for
|
||||||
|
anchors, tags, etc. There are still a few missing bits, such as document
|
||||||
|
merging, base-60 floats (huh?), and multi-document unmarshalling. These
|
||||||
|
features are not hard to add, and will be introduced as necessary.
|
||||||
|
|
||||||
|
Installation and usage
|
||||||
|
----------------------
|
||||||
|
|
||||||
|
The import path for the package is *gopkg.in/yaml.v1*.
|
||||||
|
|
||||||
|
To install it, run:
|
||||||
|
|
||||||
|
go get gopkg.in/yaml.v1
|
||||||
|
|
||||||
|
API documentation
|
||||||
|
-----------------
|
||||||
|
|
||||||
|
If opened in a browser, the import path itself leads to the API documentation:
|
||||||
|
|
||||||
|
* [https://gopkg.in/yaml.v1](https://gopkg.in/yaml.v1)
|
||||||
|
|
||||||
|
API stability
|
||||||
|
-------------
|
||||||
|
|
||||||
|
The package API for yaml v1 will remain stable as described in [gopkg.in](https://gopkg.in).
|
||||||
|
|
||||||
|
|
||||||
|
License
|
||||||
|
-------
|
||||||
|
|
||||||
|
The yaml package is licensed under the LGPL with an exception that allows it to be linked statically. Please see the LICENSE file for details.
|
||||||
|
|
||||||
|
|
||||||
|
Example
|
||||||
|
-------
|
||||||
|
|
||||||
|
```Go
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"log"
|
||||||
|
|
||||||
|
"gopkg.in/yaml.v1"
|
||||||
|
)
|
||||||
|
|
||||||
|
var data = `
|
||||||
|
a: Easy!
|
||||||
|
b:
|
||||||
|
c: 2
|
||||||
|
d: [3, 4]
|
||||||
|
`
|
||||||
|
|
||||||
|
type T struct {
|
||||||
|
A string
|
||||||
|
B struct{C int; D []int ",flow"}
|
||||||
|
}
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
t := T{}
|
||||||
|
|
||||||
|
err := yaml.Unmarshal([]byte(data), &t)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatalf("error: %v", err)
|
||||||
|
}
|
||||||
|
fmt.Printf("--- t:\n%v\n\n", t)
|
||||||
|
|
||||||
|
d, err := yaml.Marshal(&t)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatalf("error: %v", err)
|
||||||
|
}
|
||||||
|
fmt.Printf("--- t dump:\n%s\n\n", string(d))
|
||||||
|
|
||||||
|
m := make(map[interface{}]interface{})
|
||||||
|
|
||||||
|
err = yaml.Unmarshal([]byte(data), &m)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatalf("error: %v", err)
|
||||||
|
}
|
||||||
|
fmt.Printf("--- m:\n%v\n\n", m)
|
||||||
|
|
||||||
|
d, err = yaml.Marshal(&m)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatalf("error: %v", err)
|
||||||
|
}
|
||||||
|
fmt.Printf("--- m dump:\n%s\n\n", string(d))
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
This example will generate the following output:
|
||||||
|
|
||||||
|
```
|
||||||
|
--- t:
|
||||||
|
{Easy! {2 [3 4]}}
|
||||||
|
|
||||||
|
--- t dump:
|
||||||
|
a: Easy!
|
||||||
|
b:
|
||||||
|
c: 2
|
||||||
|
d: [3, 4]
|
||||||
|
|
||||||
|
|
||||||
|
--- m:
|
||||||
|
map[a:Easy! b:map[c:2 d:[3 4]]]
|
||||||
|
|
||||||
|
--- m dump:
|
||||||
|
a: Easy!
|
||||||
|
b:
|
||||||
|
c: 2
|
||||||
|
d:
|
||||||
|
- 3
|
||||||
|
- 4
|
||||||
|
```
|
||||||
|
|
@@ -1,4 +1,4 @@
|
|||||||
package goyaml
|
package yaml
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"io"
|
"io"
|
@@ -1,8 +1,9 @@
|
|||||||
package goyaml
|
package yaml
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"reflect"
|
"reflect"
|
||||||
"strconv"
|
"strconv"
|
||||||
|
"time"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
@@ -211,6 +212,16 @@ func newDecoder() *decoder {
|
|||||||
// returned to call SetYAML() with the value of *out once it's defined.
|
// returned to call SetYAML() with the value of *out once it's defined.
|
||||||
//
|
//
|
||||||
func (d *decoder) setter(tag string, out *reflect.Value, good *bool) (set func()) {
|
func (d *decoder) setter(tag string, out *reflect.Value, good *bool) (set func()) {
|
||||||
|
if (*out).Kind() != reflect.Ptr && (*out).CanAddr() {
|
||||||
|
setter, _ := (*out).Addr().Interface().(Setter)
|
||||||
|
if setter != nil {
|
||||||
|
var arg interface{}
|
||||||
|
*out = reflect.ValueOf(&arg).Elem()
|
||||||
|
return func() {
|
||||||
|
*good = setter.SetYAML(tag, arg)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
again := true
|
again := true
|
||||||
for again {
|
for again {
|
||||||
again = false
|
again = false
|
||||||
@@ -279,16 +290,19 @@ func (d *decoder) alias(n *node, out reflect.Value) (good bool) {
|
|||||||
return good
|
return good
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var durationType = reflect.TypeOf(time.Duration(0))
|
||||||
|
|
||||||
func (d *decoder) scalar(n *node, out reflect.Value) (good bool) {
|
func (d *decoder) scalar(n *node, out reflect.Value) (good bool) {
|
||||||
var tag string
|
var tag string
|
||||||
var resolved interface{}
|
var resolved interface{}
|
||||||
if n.tag == "" && !n.implicit {
|
if n.tag == "" && !n.implicit {
|
||||||
|
tag = "!!str"
|
||||||
resolved = n.value
|
resolved = n.value
|
||||||
} else {
|
} else {
|
||||||
tag, resolved = resolve(n.tag, n.value)
|
tag, resolved = resolve(n.tag, n.value)
|
||||||
if set := d.setter(tag, &out, &good); set != nil {
|
}
|
||||||
defer set()
|
if set := d.setter(tag, &out, &good); set != nil {
|
||||||
}
|
defer set()
|
||||||
}
|
}
|
||||||
switch out.Kind() {
|
switch out.Kind() {
|
||||||
case reflect.String:
|
case reflect.String:
|
||||||
@@ -320,6 +334,14 @@ func (d *decoder) scalar(n *node, out reflect.Value) (good bool) {
|
|||||||
out.SetInt(int64(resolved))
|
out.SetInt(int64(resolved))
|
||||||
good = true
|
good = true
|
||||||
}
|
}
|
||||||
|
case string:
|
||||||
|
if out.Type() == durationType {
|
||||||
|
d, err := time.ParseDuration(resolved)
|
||||||
|
if err == nil {
|
||||||
|
out.SetInt(int64(d))
|
||||||
|
good = true
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
|
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
|
||||||
switch resolved := resolved.(type) {
|
switch resolved := resolved.(type) {
|
||||||
@@ -437,6 +459,10 @@ func (d *decoder) mapping(n *node, out reflect.Value) (good bool) {
|
|||||||
}
|
}
|
||||||
l := len(n.children)
|
l := len(n.children)
|
||||||
for i := 0; i < l; i += 2 {
|
for i := 0; i < l; i += 2 {
|
||||||
|
if isMerge(n.children[i]) {
|
||||||
|
d.merge(n.children[i+1], out)
|
||||||
|
continue
|
||||||
|
}
|
||||||
k := reflect.New(kt).Elem()
|
k := reflect.New(kt).Elem()
|
||||||
if d.unmarshal(n.children[i], k) {
|
if d.unmarshal(n.children[i], k) {
|
||||||
e := reflect.New(et).Elem()
|
e := reflect.New(et).Elem()
|
||||||
@@ -456,7 +482,12 @@ func (d *decoder) mappingStruct(n *node, out reflect.Value) (good bool) {
|
|||||||
name := settableValueOf("")
|
name := settableValueOf("")
|
||||||
l := len(n.children)
|
l := len(n.children)
|
||||||
for i := 0; i < l; i += 2 {
|
for i := 0; i < l; i += 2 {
|
||||||
if !d.unmarshal(n.children[i], name) {
|
ni := n.children[i]
|
||||||
|
if isMerge(ni) {
|
||||||
|
d.merge(n.children[i+1], out)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if !d.unmarshal(ni, name) {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if info, ok := sinfo.FieldsMap[name.String()]; ok {
|
if info, ok := sinfo.FieldsMap[name.String()]; ok {
|
||||||
@@ -471,3 +502,37 @@ func (d *decoder) mappingStruct(n *node, out reflect.Value) (good bool) {
|
|||||||
}
|
}
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (d *decoder) merge(n *node, out reflect.Value) {
|
||||||
|
const wantMap = "map merge requires map or sequence of maps as the value"
|
||||||
|
switch n.kind {
|
||||||
|
case mappingNode:
|
||||||
|
d.unmarshal(n, out)
|
||||||
|
case aliasNode:
|
||||||
|
an, ok := d.doc.anchors[n.value]
|
||||||
|
if ok && an.kind != mappingNode {
|
||||||
|
panic(wantMap)
|
||||||
|
}
|
||||||
|
d.unmarshal(n, out)
|
||||||
|
case sequenceNode:
|
||||||
|
// Step backwards as earlier nodes take precedence.
|
||||||
|
for i := len(n.children)-1; i >= 0; i-- {
|
||||||
|
ni := n.children[i]
|
||||||
|
if ni.kind == aliasNode {
|
||||||
|
an, ok := d.doc.anchors[ni.value]
|
||||||
|
if ok && an.kind != mappingNode {
|
||||||
|
panic(wantMap)
|
||||||
|
}
|
||||||
|
} else if ni.kind != mappingNode {
|
||||||
|
panic(wantMap)
|
||||||
|
}
|
||||||
|
d.unmarshal(ni, out)
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
panic(wantMap)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func isMerge(n *node) bool {
|
||||||
|
return n.kind == scalarNode && n.value == "<<" && (n.implicit == true || n.tag == "!!merge" || n.tag == "tag:yaml.org,2002:merge")
|
||||||
|
}
|
@@ -1,10 +1,11 @@
|
|||||||
package goyaml_test
|
package yaml_test
|
||||||
|
|
||||||
import (
|
import (
|
||||||
. "launchpad.net/gocheck"
|
. "gopkg.in/check.v1"
|
||||||
"github.com/coreos/coreos-cloudinit/third_party/launchpad.net/goyaml"
|
"github.com/coreos/coreos-cloudinit/Godeps/_workspace/src/gopkg.in/yaml.v1"
|
||||||
"math"
|
"math"
|
||||||
"reflect"
|
"reflect"
|
||||||
|
"time"
|
||||||
)
|
)
|
||||||
|
|
||||||
var unmarshalIntTest = 123
|
var unmarshalIntTest = 123
|
||||||
@@ -350,6 +351,32 @@ var unmarshalTests = []struct {
|
|||||||
C inlineB `yaml:",inline"`
|
C inlineB `yaml:",inline"`
|
||||||
}{1, inlineB{2, inlineC{3}}},
|
}{1, inlineB{2, inlineC{3}}},
|
||||||
},
|
},
|
||||||
|
|
||||||
|
// bug 1243827
|
||||||
|
{
|
||||||
|
"a: -b_c",
|
||||||
|
map[string]interface{}{"a": "-b_c"},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"a: +b_c",
|
||||||
|
map[string]interface{}{"a": "+b_c"},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"a: 50cent_of_dollar",
|
||||||
|
map[string]interface{}{"a": "50cent_of_dollar"},
|
||||||
|
},
|
||||||
|
|
||||||
|
// Duration
|
||||||
|
{
|
||||||
|
"a: 3s",
|
||||||
|
map[string]time.Duration{"a": 3 * time.Second},
|
||||||
|
},
|
||||||
|
|
||||||
|
// Issue #24.
|
||||||
|
{
|
||||||
|
"a: <foo>",
|
||||||
|
map[string]string{"a": "<foo>"},
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
type inlineB struct {
|
type inlineB struct {
|
||||||
@@ -377,7 +404,7 @@ func (s *S) TestUnmarshal(c *C) {
|
|||||||
pv := reflect.New(pt.Elem())
|
pv := reflect.New(pt.Elem())
|
||||||
value = pv.Interface()
|
value = pv.Interface()
|
||||||
}
|
}
|
||||||
err := goyaml.Unmarshal([]byte(item.data), value)
|
err := yaml.Unmarshal([]byte(item.data), value)
|
||||||
c.Assert(err, IsNil, Commentf("Item #%d", i))
|
c.Assert(err, IsNil, Commentf("Item #%d", i))
|
||||||
if t.Kind() == reflect.String {
|
if t.Kind() == reflect.String {
|
||||||
c.Assert(*value.(*string), Equals, item.value, Commentf("Item #%d", i))
|
c.Assert(*value.(*string), Equals, item.value, Commentf("Item #%d", i))
|
||||||
@@ -389,7 +416,7 @@ func (s *S) TestUnmarshal(c *C) {
|
|||||||
|
|
||||||
func (s *S) TestUnmarshalNaN(c *C) {
|
func (s *S) TestUnmarshalNaN(c *C) {
|
||||||
value := map[string]interface{}{}
|
value := map[string]interface{}{}
|
||||||
err := goyaml.Unmarshal([]byte("notanum: .NaN"), &value)
|
err := yaml.Unmarshal([]byte("notanum: .NaN"), &value)
|
||||||
c.Assert(err, IsNil)
|
c.Assert(err, IsNil)
|
||||||
c.Assert(math.IsNaN(value["notanum"].(float64)), Equals, true)
|
c.Assert(math.IsNaN(value["notanum"].(float64)), Equals, true)
|
||||||
}
|
}
|
||||||
@@ -408,7 +435,7 @@ var unmarshalErrorTests = []struct {
|
|||||||
func (s *S) TestUnmarshalErrors(c *C) {
|
func (s *S) TestUnmarshalErrors(c *C) {
|
||||||
for _, item := range unmarshalErrorTests {
|
for _, item := range unmarshalErrorTests {
|
||||||
var value interface{}
|
var value interface{}
|
||||||
err := goyaml.Unmarshal([]byte(item.data), &value)
|
err := yaml.Unmarshal([]byte(item.data), &value)
|
||||||
c.Assert(err, ErrorMatches, item.error, Commentf("Partial unmarshal: %#v", value))
|
c.Assert(err, ErrorMatches, item.error, Commentf("Partial unmarshal: %#v", value))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -421,6 +448,8 @@ var setterTests = []struct {
|
|||||||
{"_: [1,A]", "!!seq", []interface{}{1, "A"}},
|
{"_: [1,A]", "!!seq", []interface{}{1, "A"}},
|
||||||
{"_: 10", "!!int", 10},
|
{"_: 10", "!!int", 10},
|
||||||
{"_: null", "!!null", nil},
|
{"_: null", "!!null", nil},
|
||||||
|
{`_: BAR!`, "!!str", "BAR!"},
|
||||||
|
{`_: "BAR!"`, "!!str", "BAR!"},
|
||||||
{"_: !!foo 'BAR!'", "!!foo", "BAR!"},
|
{"_: !!foo 'BAR!'", "!!foo", "BAR!"},
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -442,17 +471,31 @@ func (o *typeWithSetter) SetYAML(tag string, value interface{}) (ok bool) {
|
|||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
type typeWithSetterField struct {
|
type setterPointerType struct {
|
||||||
Field *typeWithSetter "_"
|
Field *typeWithSetter "_"
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *S) TestUnmarshalWithSetter(c *C) {
|
type setterValueType struct {
|
||||||
|
Field typeWithSetter "_"
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *S) TestUnmarshalWithPointerSetter(c *C) {
|
||||||
for _, item := range setterTests {
|
for _, item := range setterTests {
|
||||||
obj := &typeWithSetterField{}
|
obj := &setterPointerType{}
|
||||||
err := goyaml.Unmarshal([]byte(item.data), obj)
|
err := yaml.Unmarshal([]byte(item.data), obj)
|
||||||
c.Assert(err, IsNil)
|
c.Assert(err, IsNil)
|
||||||
c.Assert(obj.Field, NotNil,
|
c.Assert(obj.Field, NotNil, Commentf("Pointer not initialized (%#v)", item.value))
|
||||||
Commentf("Pointer not initialized (%#v)", item.value))
|
c.Assert(obj.Field.tag, Equals, item.tag)
|
||||||
|
c.Assert(obj.Field.value, DeepEquals, item.value)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *S) TestUnmarshalWithValueSetter(c *C) {
|
||||||
|
for _, item := range setterTests {
|
||||||
|
obj := &setterValueType{}
|
||||||
|
err := yaml.Unmarshal([]byte(item.data), obj)
|
||||||
|
c.Assert(err, IsNil)
|
||||||
|
c.Assert(obj.Field, NotNil, Commentf("Pointer not initialized (%#v)", item.value))
|
||||||
c.Assert(obj.Field.tag, Equals, item.tag)
|
c.Assert(obj.Field.tag, Equals, item.tag)
|
||||||
c.Assert(obj.Field.value, DeepEquals, item.value)
|
c.Assert(obj.Field.value, DeepEquals, item.value)
|
||||||
}
|
}
|
||||||
@@ -460,7 +503,7 @@ func (s *S) TestUnmarshalWithSetter(c *C) {
|
|||||||
|
|
||||||
func (s *S) TestUnmarshalWholeDocumentWithSetter(c *C) {
|
func (s *S) TestUnmarshalWholeDocumentWithSetter(c *C) {
|
||||||
obj := &typeWithSetter{}
|
obj := &typeWithSetter{}
|
||||||
err := goyaml.Unmarshal([]byte(setterTests[0].data), obj)
|
err := yaml.Unmarshal([]byte(setterTests[0].data), obj)
|
||||||
c.Assert(err, IsNil)
|
c.Assert(err, IsNil)
|
||||||
c.Assert(obj.tag, Equals, setterTests[0].tag)
|
c.Assert(obj.tag, Equals, setterTests[0].tag)
|
||||||
value, ok := obj.value.(map[interface{}]interface{})
|
value, ok := obj.value.(map[interface{}]interface{})
|
||||||
@@ -477,8 +520,8 @@ func (s *S) TestUnmarshalWithFalseSetterIgnoresValue(c *C) {
|
|||||||
}()
|
}()
|
||||||
|
|
||||||
m := map[string]*typeWithSetter{}
|
m := map[string]*typeWithSetter{}
|
||||||
data := "{abc: 1, def: 2, ghi: 3, jkl: 4}"
|
data := `{abc: 1, def: 2, ghi: 3, jkl: 4}`
|
||||||
err := goyaml.Unmarshal([]byte(data), m)
|
err := yaml.Unmarshal([]byte(data), m)
|
||||||
c.Assert(err, IsNil)
|
c.Assert(err, IsNil)
|
||||||
c.Assert(m["abc"], NotNil)
|
c.Assert(m["abc"], NotNil)
|
||||||
c.Assert(m["def"], IsNil)
|
c.Assert(m["def"], IsNil)
|
||||||
@@ -489,6 +532,98 @@ func (s *S) TestUnmarshalWithFalseSetterIgnoresValue(c *C) {
|
|||||||
c.Assert(m["ghi"].value, Equals, 3)
|
c.Assert(m["ghi"].value, Equals, 3)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// From http://yaml.org/type/merge.html
|
||||||
|
var mergeTests = `
|
||||||
|
anchors:
|
||||||
|
- &CENTER { "x": 1, "y": 2 }
|
||||||
|
- &LEFT { "x": 0, "y": 2 }
|
||||||
|
- &BIG { "r": 10 }
|
||||||
|
- &SMALL { "r": 1 }
|
||||||
|
|
||||||
|
# All the following maps are equal:
|
||||||
|
|
||||||
|
plain:
|
||||||
|
# Explicit keys
|
||||||
|
"x": 1
|
||||||
|
"y": 2
|
||||||
|
"r": 10
|
||||||
|
label: center/big
|
||||||
|
|
||||||
|
mergeOne:
|
||||||
|
# Merge one map
|
||||||
|
<< : *CENTER
|
||||||
|
"r": 10
|
||||||
|
label: center/big
|
||||||
|
|
||||||
|
mergeMultiple:
|
||||||
|
# Merge multiple maps
|
||||||
|
<< : [ *CENTER, *BIG ]
|
||||||
|
label: center/big
|
||||||
|
|
||||||
|
override:
|
||||||
|
# Override
|
||||||
|
<< : [ *BIG, *LEFT, *SMALL ]
|
||||||
|
"x": 1
|
||||||
|
label: center/big
|
||||||
|
|
||||||
|
shortTag:
|
||||||
|
# Explicit short merge tag
|
||||||
|
!!merge "<<" : [ *CENTER, *BIG ]
|
||||||
|
label: center/big
|
||||||
|
|
||||||
|
longTag:
|
||||||
|
# Explicit merge long tag
|
||||||
|
!<tag:yaml.org,2002:merge> "<<" : [ *CENTER, *BIG ]
|
||||||
|
label: center/big
|
||||||
|
|
||||||
|
inlineMap:
|
||||||
|
# Inlined map
|
||||||
|
<< : {"x": 1, "y": 2, "r": 10}
|
||||||
|
label: center/big
|
||||||
|
|
||||||
|
inlineSequenceMap:
|
||||||
|
# Inlined map in sequence
|
||||||
|
<< : [ *CENTER, {"r": 10} ]
|
||||||
|
label: center/big
|
||||||
|
`
|
||||||
|
|
||||||
|
func (s *S) TestMerge(c *C) {
|
||||||
|
var want = map[interface{}]interface{}{
|
||||||
|
"x": 1,
|
||||||
|
"y": 2,
|
||||||
|
"r": 10,
|
||||||
|
"label": "center/big",
|
||||||
|
}
|
||||||
|
|
||||||
|
var m map[string]interface{}
|
||||||
|
err := yaml.Unmarshal([]byte(mergeTests), &m)
|
||||||
|
c.Assert(err, IsNil)
|
||||||
|
for name, test := range m {
|
||||||
|
if name == "anchors" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
c.Assert(test, DeepEquals, want, Commentf("test %q failed", name))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *S) TestMergeStruct(c *C) {
|
||||||
|
type Data struct {
|
||||||
|
X, Y, R int
|
||||||
|
Label string
|
||||||
|
}
|
||||||
|
want := Data{1, 2, 10, "center/big"}
|
||||||
|
|
||||||
|
var m map[string]Data
|
||||||
|
err := yaml.Unmarshal([]byte(mergeTests), &m)
|
||||||
|
c.Assert(err, IsNil)
|
||||||
|
for name, test := range m {
|
||||||
|
if name == "anchors" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
c.Assert(test, Equals, want, Commentf("test %q failed", name))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
//var data []byte
|
//var data []byte
|
||||||
//func init() {
|
//func init() {
|
||||||
// var err error
|
// var err error
|
||||||
@@ -502,7 +637,7 @@ func (s *S) TestUnmarshalWithFalseSetterIgnoresValue(c *C) {
|
|||||||
// var err error
|
// var err error
|
||||||
// for i := 0; i < c.N; i++ {
|
// for i := 0; i < c.N; i++ {
|
||||||
// var v map[string]interface{}
|
// var v map[string]interface{}
|
||||||
// err = goyaml.Unmarshal(data, &v)
|
// err = yaml.Unmarshal(data, &v)
|
||||||
// }
|
// }
|
||||||
// if err != nil {
|
// if err != nil {
|
||||||
// panic(err)
|
// panic(err)
|
||||||
@@ -511,9 +646,9 @@ func (s *S) TestUnmarshalWithFalseSetterIgnoresValue(c *C) {
|
|||||||
//
|
//
|
||||||
//func (s *S) BenchmarkMarshal(c *C) {
|
//func (s *S) BenchmarkMarshal(c *C) {
|
||||||
// var v map[string]interface{}
|
// var v map[string]interface{}
|
||||||
// goyaml.Unmarshal(data, &v)
|
// yaml.Unmarshal(data, &v)
|
||||||
// c.ResetTimer()
|
// c.ResetTimer()
|
||||||
// for i := 0; i < c.N; i++ {
|
// for i := 0; i < c.N; i++ {
|
||||||
// goyaml.Marshal(&v)
|
// yaml.Marshal(&v)
|
||||||
// }
|
// }
|
||||||
//}
|
//}
|
@@ -1,4 +1,4 @@
|
|||||||
package goyaml
|
package yaml
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
@@ -1,9 +1,10 @@
|
|||||||
package goyaml
|
package yaml
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"reflect"
|
"reflect"
|
||||||
"sort"
|
"sort"
|
||||||
"strconv"
|
"strconv"
|
||||||
|
"time"
|
||||||
)
|
)
|
||||||
|
|
||||||
type encoder struct {
|
type encoder struct {
|
||||||
@@ -85,7 +86,11 @@ func (e *encoder) marshal(tag string, in reflect.Value) {
|
|||||||
case reflect.String:
|
case reflect.String:
|
||||||
e.stringv(tag, in)
|
e.stringv(tag, in)
|
||||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||||
e.intv(tag, in)
|
if in.Type() == durationType {
|
||||||
|
e.stringv(tag, reflect.ValueOf(in.Interface().(time.Duration).String()))
|
||||||
|
} else {
|
||||||
|
e.intv(tag, in)
|
||||||
|
}
|
||||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
|
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
|
||||||
e.uintv(tag, in)
|
e.uintv(tag, in)
|
||||||
case reflect.Float32, reflect.Float64:
|
case reflect.Float32, reflect.Float64:
|
@@ -1,12 +1,13 @@
|
|||||||
package goyaml_test
|
package yaml_test
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
. "launchpad.net/gocheck"
|
"github.com/coreos/coreos-cloudinit/Godeps/_workspace/src/gopkg.in/yaml.v1"
|
||||||
"github.com/coreos/coreos-cloudinit/third_party/launchpad.net/goyaml"
|
. "gopkg.in/check.v1"
|
||||||
"math"
|
"math"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
|
"time"
|
||||||
)
|
)
|
||||||
|
|
||||||
var marshalIntTest = 123
|
var marshalIntTest = 123
|
||||||
@@ -212,11 +213,23 @@ var marshalTests = []struct {
|
|||||||
}{1, inlineB{2, inlineC{3}}},
|
}{1, inlineB{2, inlineC{3}}},
|
||||||
"a: 1\nb: 2\nc: 3\n",
|
"a: 1\nb: 2\nc: 3\n",
|
||||||
},
|
},
|
||||||
|
|
||||||
|
// Duration
|
||||||
|
{
|
||||||
|
map[string]time.Duration{"a": 3 * time.Second},
|
||||||
|
"a: 3s\n",
|
||||||
|
},
|
||||||
|
|
||||||
|
// Issue #24.
|
||||||
|
{
|
||||||
|
map[string]string{"a": "<foo>"},
|
||||||
|
"a: <foo>\n",
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *S) TestMarshal(c *C) {
|
func (s *S) TestMarshal(c *C) {
|
||||||
for _, item := range marshalTests {
|
for _, item := range marshalTests {
|
||||||
data, err := goyaml.Marshal(item.value)
|
data, err := yaml.Marshal(item.value)
|
||||||
c.Assert(err, IsNil)
|
c.Assert(err, IsNil)
|
||||||
c.Assert(string(data), Equals, item.data)
|
c.Assert(string(data), Equals, item.data)
|
||||||
}
|
}
|
||||||
@@ -237,7 +250,7 @@ var marshalErrorTests = []struct {
|
|||||||
|
|
||||||
func (s *S) TestMarshalErrors(c *C) {
|
func (s *S) TestMarshalErrors(c *C) {
|
||||||
for _, item := range marshalErrorTests {
|
for _, item := range marshalErrorTests {
|
||||||
_, err := goyaml.Marshal(item.value)
|
_, err := yaml.Marshal(item.value)
|
||||||
c.Assert(err, ErrorMatches, item.error)
|
c.Assert(err, ErrorMatches, item.error)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -269,12 +282,12 @@ func (s *S) TestMarshalTypeCache(c *C) {
|
|||||||
var err error
|
var err error
|
||||||
func() {
|
func() {
|
||||||
type T struct{ A int }
|
type T struct{ A int }
|
||||||
data, err = goyaml.Marshal(&T{})
|
data, err = yaml.Marshal(&T{})
|
||||||
c.Assert(err, IsNil)
|
c.Assert(err, IsNil)
|
||||||
}()
|
}()
|
||||||
func() {
|
func() {
|
||||||
type T struct{ B int }
|
type T struct{ B int }
|
||||||
data, err = goyaml.Marshal(&T{})
|
data, err = yaml.Marshal(&T{})
|
||||||
c.Assert(err, IsNil)
|
c.Assert(err, IsNil)
|
||||||
}()
|
}()
|
||||||
c.Assert(string(data), Equals, "b: 0\n")
|
c.Assert(string(data), Equals, "b: 0\n")
|
||||||
@@ -298,7 +311,7 @@ func (s *S) TestMashalWithGetter(c *C) {
|
|||||||
obj := &typeWithGetterField{}
|
obj := &typeWithGetterField{}
|
||||||
obj.Field.tag = item.tag
|
obj.Field.tag = item.tag
|
||||||
obj.Field.value = item.value
|
obj.Field.value = item.value
|
||||||
data, err := goyaml.Marshal(obj)
|
data, err := yaml.Marshal(obj)
|
||||||
c.Assert(err, IsNil)
|
c.Assert(err, IsNil)
|
||||||
c.Assert(string(data), Equals, string(item.data))
|
c.Assert(string(data), Equals, string(item.data))
|
||||||
}
|
}
|
||||||
@@ -308,7 +321,7 @@ func (s *S) TestUnmarshalWholeDocumentWithGetter(c *C) {
|
|||||||
obj := &typeWithGetter{}
|
obj := &typeWithGetter{}
|
||||||
obj.tag = ""
|
obj.tag = ""
|
||||||
obj.value = map[string]string{"hello": "world!"}
|
obj.value = map[string]string{"hello": "world!"}
|
||||||
data, err := goyaml.Marshal(obj)
|
data, err := yaml.Marshal(obj)
|
||||||
c.Assert(err, IsNil)
|
c.Assert(err, IsNil)
|
||||||
c.Assert(string(data), Equals, "hello: world!\n")
|
c.Assert(string(data), Equals, "hello: world!\n")
|
||||||
}
|
}
|
||||||
@@ -356,7 +369,7 @@ func (s *S) TestSortedOutput(c *C) {
|
|||||||
for _, k := range order {
|
for _, k := range order {
|
||||||
m[k] = 1
|
m[k] = 1
|
||||||
}
|
}
|
||||||
data, err := goyaml.Marshal(m)
|
data, err := yaml.Marshal(m)
|
||||||
c.Assert(err, IsNil)
|
c.Assert(err, IsNil)
|
||||||
out := "\n" + string(data)
|
out := "\n" + string(data)
|
||||||
last := 0
|
last := 0
|
@@ -1,4 +1,4 @@
|
|||||||
package goyaml
|
package yaml
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
@@ -1,4 +1,4 @@
|
|||||||
package goyaml
|
package yaml
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"io"
|
"io"
|
@@ -1,4 +1,4 @@
|
|||||||
package goyaml
|
package yaml
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"math"
|
"math"
|
||||||
@@ -27,7 +27,6 @@ func init() {
|
|||||||
t[int(c)] = 'M' // In map
|
t[int(c)] = 'M' // In map
|
||||||
}
|
}
|
||||||
t[int('.')] = '.' // Float (potentially in map)
|
t[int('.')] = '.' // Float (potentially in map)
|
||||||
t[int('<')] = '<' // Merge
|
|
||||||
|
|
||||||
var resolveMapList = []struct {
|
var resolveMapList = []struct {
|
||||||
v interface{}
|
v interface{}
|
||||||
@@ -45,6 +44,7 @@ func init() {
|
|||||||
{math.Inf(+1), "!!float", []string{".inf", ".Inf", ".INF"}},
|
{math.Inf(+1), "!!float", []string{".inf", ".Inf", ".INF"}},
|
||||||
{math.Inf(+1), "!!float", []string{"+.inf", "+.Inf", "+.INF"}},
|
{math.Inf(+1), "!!float", []string{"+.inf", "+.Inf", "+.INF"}},
|
||||||
{math.Inf(-1), "!!float", []string{"-.inf", "-.Inf", "-.INF"}},
|
{math.Inf(-1), "!!float", []string{"-.inf", "-.Inf", "-.INF"}},
|
||||||
|
{"<<", "!!merge", []string{"<<"}},
|
||||||
}
|
}
|
||||||
|
|
||||||
m := resolveMap
|
m := resolveMap
|
||||||
@@ -113,13 +113,8 @@ func resolve(tag string, in string) (rtag string, out interface{}) {
|
|||||||
|
|
||||||
case 'D', 'S':
|
case 'D', 'S':
|
||||||
// Int, float, or timestamp.
|
// Int, float, or timestamp.
|
||||||
for i := 0; i != len(in); i++ {
|
plain := strings.Replace(in, "_", "", -1)
|
||||||
if in[i] == '_' {
|
intv, err := strconv.ParseInt(plain, 0, 64)
|
||||||
in = strings.Replace(in, "_", "", -1)
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
intv, err := strconv.ParseInt(in, 0, 64)
|
|
||||||
if err == nil {
|
if err == nil {
|
||||||
if intv == int64(int(intv)) {
|
if intv == int64(int(intv)) {
|
||||||
return "!!int", int(intv)
|
return "!!int", int(intv)
|
||||||
@@ -127,26 +122,23 @@ func resolve(tag string, in string) (rtag string, out interface{}) {
|
|||||||
return "!!int", intv
|
return "!!int", intv
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
floatv, err := strconv.ParseFloat(in, 64)
|
floatv, err := strconv.ParseFloat(plain, 64)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
return "!!float", floatv
|
return "!!float", floatv
|
||||||
}
|
}
|
||||||
if strings.HasPrefix(in, "0b") {
|
if strings.HasPrefix(plain, "0b") {
|
||||||
intv, err := strconv.ParseInt(in[2:], 2, 64)
|
intv, err := strconv.ParseInt(plain[2:], 2, 64)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
return "!!int", int(intv)
|
return "!!int", int(intv)
|
||||||
}
|
}
|
||||||
} else if strings.HasPrefix(in, "-0b") {
|
} else if strings.HasPrefix(plain, "-0b") {
|
||||||
intv, err := strconv.ParseInt(in[3:], 2, 64)
|
intv, err := strconv.ParseInt(plain[3:], 2, 64)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
return "!!int", -int(intv)
|
return "!!int", -int(intv)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// XXX Handle timestamps here.
|
// XXX Handle timestamps here.
|
||||||
|
|
||||||
case '<':
|
|
||||||
// XXX Handle merge (<<) here.
|
|
||||||
|
|
||||||
default:
|
default:
|
||||||
panic("resolveTable item not yet handled: " +
|
panic("resolveTable item not yet handled: " +
|
||||||
string([]byte{c}) + " (with " + in + ")")
|
string([]byte{c}) + " (with " + in + ")")
|
@@ -1,4 +1,4 @@
|
|||||||
package goyaml
|
package yaml
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
@@ -1,4 +1,4 @@
|
|||||||
package goyaml
|
package yaml
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"reflect"
|
"reflect"
|
@@ -1,7 +1,7 @@
|
|||||||
package goyaml_test
|
package yaml_test
|
||||||
|
|
||||||
import (
|
import (
|
||||||
. "launchpad.net/gocheck"
|
. "gopkg.in/check.v1"
|
||||||
"testing"
|
"testing"
|
||||||
)
|
)
|
||||||
|
|
@@ -1,4 +1,4 @@
|
|||||||
package goyaml
|
package yaml
|
||||||
|
|
||||||
// Set the writer error and return false.
|
// Set the writer error and return false.
|
||||||
func yaml_emitter_set_writer_error(emitter *yaml_emitter_t, problem string) bool {
|
func yaml_emitter_set_writer_error(emitter *yaml_emitter_t, problem string) bool {
|
@@ -1,5 +1,10 @@
|
|||||||
// Package goyaml implements YAML support for the Go language.
|
// Package yaml implements YAML support for the Go language.
|
||||||
package goyaml
|
//
|
||||||
|
// Source code and other details for the project are available at GitHub:
|
||||||
|
//
|
||||||
|
// https://github.com/go-yaml/yaml
|
||||||
|
//
|
||||||
|
package yaml
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"errors"
|
"errors"
|
||||||
@@ -28,32 +33,31 @@ func handleErr(err *error) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Objects implementing the goyaml.Setter interface will receive the YAML
|
// The Setter interface may be implemented by types to do their own custom
|
||||||
// tag and value via the SetYAML method during unmarshaling, rather than
|
// unmarshalling of YAML values, rather than being implicitly assigned by
|
||||||
// being implicitly assigned by the goyaml machinery. If setting the value
|
// the yaml package machinery. If setting the value works, the method should
|
||||||
// works, the method should return true. If it returns false, the given
|
// return true. If it returns false, the value is considered unsupported
|
||||||
// value will be omitted from maps and slices.
|
// and is omitted from maps and slices.
|
||||||
type Setter interface {
|
type Setter interface {
|
||||||
SetYAML(tag string, value interface{}) bool
|
SetYAML(tag string, value interface{}) bool
|
||||||
}
|
}
|
||||||
|
|
||||||
// Objects implementing the goyaml.Getter interface will get the GetYAML()
|
// The Getter interface is implemented by types to do their own custom
|
||||||
// method called when goyaml is requested to marshal the given value, and
|
// marshalling into a YAML tag and value.
|
||||||
// the result of this method will be marshaled in place of the actual object.
|
|
||||||
type Getter interface {
|
type Getter interface {
|
||||||
GetYAML() (tag string, value interface{})
|
GetYAML() (tag string, value interface{})
|
||||||
}
|
}
|
||||||
|
|
||||||
// Unmarshal decodes the first document found within the in byte slice
|
// Unmarshal decodes the first document found within the in byte slice
|
||||||
// and assigns decoded values into the object pointed by out.
|
// and assigns decoded values into the out value.
|
||||||
//
|
//
|
||||||
// Maps, pointers to structs and ints, etc, may all be used as out values.
|
// Maps and pointers (to a struct, string, int, etc) are accepted as out
|
||||||
// If an internal pointer within a struct is not initialized, goyaml
|
// values. If an internal pointer within a struct is not initialized,
|
||||||
// will initialize it if necessary for unmarshalling the provided data,
|
// the yaml package will initialize it if necessary for unmarshalling
|
||||||
// but the struct provided as out must not be a nil pointer.
|
// the provided data. The out parameter must not be nil.
|
||||||
//
|
//
|
||||||
// The type of the decoded values and the type of out will be considered,
|
// The type of the decoded values and the type of out will be considered,
|
||||||
// and Unmarshal() will do the best possible job to unmarshal values
|
// and Unmarshal will do the best possible job to unmarshal values
|
||||||
// appropriately. It is NOT considered an error, though, to skip values
|
// appropriately. It is NOT considered an error, though, to skip values
|
||||||
// because they are not available in the decoded YAML, or if they are not
|
// because they are not available in the decoded YAML, or if they are not
|
||||||
// compatible with the out value. To ensure something was properly
|
// compatible with the out value. To ensure something was properly
|
||||||
@@ -61,11 +65,11 @@ type Getter interface {
|
|||||||
// field (usually the zero value).
|
// field (usually the zero value).
|
||||||
//
|
//
|
||||||
// Struct fields are only unmarshalled if they are exported (have an
|
// Struct fields are only unmarshalled if they are exported (have an
|
||||||
// upper case first letter), and will be unmarshalled using the field
|
// upper case first letter), and are unmarshalled using the field name
|
||||||
// name lowercased by default. When custom field names are desired, the
|
// lowercased as the default key. Custom keys may be defined via the
|
||||||
// tag value may be used to tweak the name. Everything before the first
|
// "yaml" name in the field tag: the content preceding the first comma
|
||||||
// comma in the field tag will be used as the name. The values following
|
// is used as the key, and the following comma-separated options are
|
||||||
// the comma are used to tweak the marshalling process (see Marshal).
|
// used to tweak the marshalling process (see Marshal).
|
||||||
// Conflicting names result in a runtime error.
|
// Conflicting names result in a runtime error.
|
||||||
//
|
//
|
||||||
// For example:
|
// For example:
|
||||||
@@ -75,7 +79,7 @@ type Getter interface {
|
|||||||
// B int
|
// B int
|
||||||
// }
|
// }
|
||||||
// var T t
|
// var T t
|
||||||
// goyaml.Unmarshal([]byte("a: 1\nb: 2"), &t)
|
// yaml.Unmarshal([]byte("a: 1\nb: 2"), &t)
|
||||||
//
|
//
|
||||||
// See the documentation of Marshal for the format of tags and a list of
|
// See the documentation of Marshal for the format of tags and a list of
|
||||||
// supported tag options.
|
// supported tag options.
|
||||||
@@ -94,14 +98,16 @@ func Unmarshal(in []byte, out interface{}) (err error) {
|
|||||||
|
|
||||||
// Marshal serializes the value provided into a YAML document. The structure
|
// Marshal serializes the value provided into a YAML document. The structure
|
||||||
// of the generated document will reflect the structure of the value itself.
|
// of the generated document will reflect the structure of the value itself.
|
||||||
// Maps, pointers to structs and ints, etc, may all be used as the in value.
|
// Maps and pointers (to struct, string, int, etc) are accepted as the in value.
|
||||||
//
|
//
|
||||||
// In the case of struct values, only exported fields will be serialized.
|
// Struct fields are only unmarshalled if they are exported (have an upper case
|
||||||
// The lowercased field name is used as the key for each exported field,
|
// first letter), and are unmarshalled using the field name lowercased as the
|
||||||
// but this behavior may be changed using the respective field tag.
|
// default key. Custom keys may be defined via the "yaml" name in the field
|
||||||
// The tag may also contain flags to tweak the marshalling behavior for
|
// tag: the content preceding the first comma is used as the key, and the
|
||||||
// the field. Conflicting names result in a runtime error. The tag format
|
// following comma-separated options are used to tweak the marshalling process.
|
||||||
// accepted is:
|
// Conflicting names result in a runtime error.
|
||||||
|
//
|
||||||
|
// The field tag format accepted is:
|
||||||
//
|
//
|
||||||
// `(...) yaml:"[<key>][,<flag1>[,<flag2>]]" (...)`
|
// `(...) yaml:"[<key>][,<flag1>[,<flag2>]]" (...)`
|
||||||
//
|
//
|
||||||
@@ -126,8 +132,8 @@ func Unmarshal(in []byte, out interface{}) (err error) {
|
|||||||
// F int "a,omitempty"
|
// F int "a,omitempty"
|
||||||
// B int
|
// B int
|
||||||
// }
|
// }
|
||||||
// goyaml.Marshal(&T{B: 2}) // Returns "b: 2\n"
|
// yaml.Marshal(&T{B: 2}) // Returns "b: 2\n"
|
||||||
// goyaml.Marshal(&T{F: 1}} // Returns "a: 1\nb: 0\n"
|
// yaml.Marshal(&T{F: 1}} // Returns "a: 1\nb: 0\n"
|
||||||
//
|
//
|
||||||
func Marshal(in interface{}) (out []byte, err error) {
|
func Marshal(in interface{}) (out []byte, err error) {
|
||||||
defer handleErr(&err)
|
defer handleErr(&err)
|
||||||
@@ -142,7 +148,7 @@ func Marshal(in interface{}) (out []byte, err error) {
|
|||||||
// --------------------------------------------------------------------------
|
// --------------------------------------------------------------------------
|
||||||
// Maintain a mapping of keys to structure field indexes
|
// Maintain a mapping of keys to structure field indexes
|
||||||
|
|
||||||
// The code in this section was copied from gobson.
|
// The code in this section was copied from mgo/bson.
|
||||||
|
|
||||||
// structInfo holds details for the serialization of fields of
|
// structInfo holds details for the serialization of fields of
|
||||||
// a given struct.
|
// a given struct.
|
@@ -1,4 +1,4 @@
|
|||||||
package goyaml
|
package yaml
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"io"
|
"io"
|
@@ -1,4 +1,4 @@
|
|||||||
package goyaml
|
package yaml
|
||||||
|
|
||||||
const (
|
const (
|
||||||
// The size of the input raw buffer.
|
// The size of the input raw buffer.
|
3
MAINTAINERS
Normal file
3
MAINTAINERS
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
Alex Crawford <alex.crawford@coreos.com> (@crawford)
|
||||||
|
Jonathan Boulle <jonathan.boulle@coreos.com> (@jonboulle)
|
||||||
|
Brian Waldon <brian.waldon@coreos.com> (@bcwaldon)
|
@@ -1,188 +1,377 @@
|
|||||||
|
/*
|
||||||
|
Copyright 2014 CoreOS, Inc.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
package main
|
package main
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"encoding/json"
|
|
||||||
"flag"
|
"flag"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io/ioutil"
|
|
||||||
"os"
|
"os"
|
||||||
"path"
|
"sync"
|
||||||
|
"time"
|
||||||
|
|
||||||
"github.com/coreos/coreos-cloudinit/datasource"
|
"github.com/coreos/coreos-cloudinit/datasource"
|
||||||
|
"github.com/coreos/coreos-cloudinit/datasource/configdrive"
|
||||||
|
"github.com/coreos/coreos-cloudinit/datasource/file"
|
||||||
|
"github.com/coreos/coreos-cloudinit/datasource/metadata/cloudsigma"
|
||||||
|
"github.com/coreos/coreos-cloudinit/datasource/metadata/digitalocean"
|
||||||
|
"github.com/coreos/coreos-cloudinit/datasource/metadata/ec2"
|
||||||
|
"github.com/coreos/coreos-cloudinit/datasource/proc_cmdline"
|
||||||
|
"github.com/coreos/coreos-cloudinit/datasource/url"
|
||||||
|
"github.com/coreos/coreos-cloudinit/datasource/waagent"
|
||||||
"github.com/coreos/coreos-cloudinit/initialize"
|
"github.com/coreos/coreos-cloudinit/initialize"
|
||||||
"github.com/coreos/coreos-cloudinit/network"
|
"github.com/coreos/coreos-cloudinit/pkg"
|
||||||
"github.com/coreos/coreos-cloudinit/system"
|
"github.com/coreos/coreos-cloudinit/system"
|
||||||
)
|
)
|
||||||
|
|
||||||
const version = "0.7.7"
|
const (
|
||||||
|
version = "0.10.7"
|
||||||
|
datasourceInterval = 100 * time.Millisecond
|
||||||
|
datasourceMaxInterval = 30 * time.Second
|
||||||
|
datasourceTimeout = 5 * time.Minute
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
flags = struct {
|
||||||
|
printVersion bool
|
||||||
|
ignoreFailure bool
|
||||||
|
sources struct {
|
||||||
|
file string
|
||||||
|
configDrive string
|
||||||
|
waagent string
|
||||||
|
metadataService bool
|
||||||
|
ec2MetadataService string
|
||||||
|
cloudSigmaMetadataService bool
|
||||||
|
digitalOceanMetadataService string
|
||||||
|
url string
|
||||||
|
procCmdLine bool
|
||||||
|
}
|
||||||
|
convertNetconf string
|
||||||
|
workspace string
|
||||||
|
sshKeyName string
|
||||||
|
oem string
|
||||||
|
}{}
|
||||||
|
)
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
flag.BoolVar(&flags.printVersion, "version", false, "Print the version and exit")
|
||||||
|
flag.BoolVar(&flags.ignoreFailure, "ignore-failure", false, "Exits with 0 status in the event of malformed input from user-data")
|
||||||
|
flag.StringVar(&flags.sources.file, "from-file", "", "Read user-data from provided file")
|
||||||
|
flag.StringVar(&flags.sources.configDrive, "from-configdrive", "", "Read data from provided cloud-drive directory")
|
||||||
|
flag.StringVar(&flags.sources.waagent, "from-waagent", "", "Read data from provided waagent directory")
|
||||||
|
flag.BoolVar(&flags.sources.metadataService, "from-metadata-service", false, "[DEPRECATED - Use -from-ec2-metadata] Download data from metadata service")
|
||||||
|
flag.StringVar(&flags.sources.ec2MetadataService, "from-ec2-metadata", "", "Download EC2 data from the provided url")
|
||||||
|
flag.BoolVar(&flags.sources.cloudSigmaMetadataService, "from-cloudsigma-metadata", false, "Download data from CloudSigma server context")
|
||||||
|
flag.StringVar(&flags.sources.digitalOceanMetadataService, "from-digitalocean-metadata", "", "Download DigitalOcean data from the provided url")
|
||||||
|
flag.StringVar(&flags.sources.url, "from-url", "", "Download user-data from provided url")
|
||||||
|
flag.BoolVar(&flags.sources.procCmdLine, "from-proc-cmdline", false, fmt.Sprintf("Parse %s for '%s=<url>', using the cloud-config served by an HTTP GET to <url>", proc_cmdline.ProcCmdlineLocation, proc_cmdline.ProcCmdlineCloudConfigFlag))
|
||||||
|
flag.StringVar(&flags.oem, "oem", "", "Use the settings specific to the provided OEM")
|
||||||
|
flag.StringVar(&flags.convertNetconf, "convert-netconf", "", "Read the network config provided in cloud-drive and translate it from the specified format into networkd unit files")
|
||||||
|
flag.StringVar(&flags.workspace, "workspace", "/var/lib/coreos-cloudinit", "Base directory coreos-cloudinit should use to store data")
|
||||||
|
flag.StringVar(&flags.sshKeyName, "ssh-key-name", initialize.DefaultSSHKeyName, "Add SSH keys to the system with the given name")
|
||||||
|
}
|
||||||
|
|
||||||
|
type oemConfig map[string]string
|
||||||
|
|
||||||
|
var (
|
||||||
|
oemConfigs = map[string]oemConfig{
|
||||||
|
"digitalocean": oemConfig{
|
||||||
|
"from-digitalocean-metadata": "http://169.254.169.254/",
|
||||||
|
"convert-netconf": "digitalocean",
|
||||||
|
},
|
||||||
|
"ec2-compat": oemConfig{
|
||||||
|
"from-ec2-metadata": "http://169.254.169.254/",
|
||||||
|
"from-configdrive": "/media/configdrive",
|
||||||
|
},
|
||||||
|
"rackspace-onmetal": oemConfig{
|
||||||
|
"from-configdrive": "/media/configdrive",
|
||||||
|
"convert-netconf": "debian",
|
||||||
|
},
|
||||||
|
"azure": oemConfig{
|
||||||
|
"from-waagent": "/var/lib/waagent",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
func main() {
|
func main() {
|
||||||
var printVersion bool
|
failure := false
|
||||||
flag.BoolVar(&printVersion, "version", false, "Print the version and exit")
|
|
||||||
|
|
||||||
var ignoreFailure bool
|
|
||||||
flag.BoolVar(&ignoreFailure, "ignore-failure", false, "Exits with 0 status in the event of malformed input from user-data")
|
|
||||||
|
|
||||||
var file string
|
|
||||||
flag.StringVar(&file, "from-file", "", "Read user-data from provided file")
|
|
||||||
|
|
||||||
var configdrive string
|
|
||||||
flag.StringVar(&configdrive, "from-configdrive", "", "Read user-data from provided cloud-drive directory")
|
|
||||||
|
|
||||||
var url string
|
|
||||||
flag.StringVar(&url, "from-url", "", "Download user-data from provided url")
|
|
||||||
|
|
||||||
var useProcCmdline bool
|
|
||||||
flag.BoolVar(&useProcCmdline, "from-proc-cmdline", false, fmt.Sprintf("Parse %s for '%s=<url>', using the cloud-config served by an HTTP GET to <url>", datasource.ProcCmdlineLocation, datasource.ProcCmdlineCloudConfigFlag))
|
|
||||||
|
|
||||||
var convertNetconf string
|
|
||||||
flag.StringVar(&convertNetconf, "convert-netconf", "", "Read the network config provided in cloud-drive and translate it from the specified format into networkd unit files (requires the -from-configdrive flag)")
|
|
||||||
|
|
||||||
var workspace string
|
|
||||||
flag.StringVar(&workspace, "workspace", "/var/lib/coreos-cloudinit", "Base directory coreos-cloudinit should use to store data")
|
|
||||||
|
|
||||||
var sshKeyName string
|
|
||||||
flag.StringVar(&sshKeyName, "ssh-key-name", initialize.DefaultSSHKeyName, "Add SSH keys to the system with the given name")
|
|
||||||
|
|
||||||
flag.Parse()
|
flag.Parse()
|
||||||
|
|
||||||
if printVersion == true {
|
if c, ok := oemConfigs[flags.oem]; ok {
|
||||||
|
for k, v := range c {
|
||||||
|
flag.Set(k, v)
|
||||||
|
}
|
||||||
|
} else if flags.oem != "" {
|
||||||
|
oems := make([]string, 0, len(oemConfigs))
|
||||||
|
for k := range oemConfigs {
|
||||||
|
oems = append(oems, k)
|
||||||
|
}
|
||||||
|
fmt.Printf("Invalid option to --oem: %q. Supported options: %q\n", flags.oem, oems)
|
||||||
|
os.Exit(2)
|
||||||
|
}
|
||||||
|
|
||||||
|
if flags.printVersion == true {
|
||||||
fmt.Printf("coreos-cloudinit version %s\n", version)
|
fmt.Printf("coreos-cloudinit version %s\n", version)
|
||||||
os.Exit(0)
|
os.Exit(0)
|
||||||
}
|
}
|
||||||
|
|
||||||
var ds datasource.Datasource
|
switch flags.convertNetconf {
|
||||||
if file != "" {
|
|
||||||
ds = datasource.NewLocalFile(file)
|
|
||||||
} else if url != "" {
|
|
||||||
ds = datasource.NewMetadataService(url)
|
|
||||||
} else if configdrive != "" {
|
|
||||||
ds = datasource.NewConfigDrive(configdrive)
|
|
||||||
} else if useProcCmdline {
|
|
||||||
ds = datasource.NewProcCmdline()
|
|
||||||
} else {
|
|
||||||
fmt.Println("Provide one of --from-file, --from-configdrive, --from-url or --from-proc-cmdline")
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
|
|
||||||
if convertNetconf != "" && configdrive == "" {
|
|
||||||
fmt.Println("-convert-netconf flag requires -from-configdrive")
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
|
|
||||||
switch convertNetconf {
|
|
||||||
case "":
|
case "":
|
||||||
case "debian":
|
case "debian":
|
||||||
|
case "digitalocean":
|
||||||
default:
|
default:
|
||||||
fmt.Printf("Invalid option to -convert-netconf: '%s'. Supported options: 'debian'\n", convertNetconf)
|
fmt.Printf("Invalid option to -convert-netconf: '%s'. Supported options: 'debian, digitalocean'\n", flags.convertNetconf)
|
||||||
|
os.Exit(2)
|
||||||
|
}
|
||||||
|
|
||||||
|
dss := getDatasources()
|
||||||
|
if len(dss) == 0 {
|
||||||
|
fmt.Println("Provide at least one of --from-file, --from-configdrive, --from-ec2-metadata, --from-cloudsigma-metadata, --from-url or --from-proc-cmdline")
|
||||||
|
os.Exit(2)
|
||||||
|
}
|
||||||
|
|
||||||
|
ds := selectDatasource(dss)
|
||||||
|
if ds == nil {
|
||||||
|
fmt.Println("No datasources available in time")
|
||||||
os.Exit(1)
|
os.Exit(1)
|
||||||
}
|
}
|
||||||
|
|
||||||
fmt.Printf("Fetching user-data from datasource of type %q\n", ds.Type())
|
fmt.Printf("Fetching user-data from datasource of type %q\n", ds.Type())
|
||||||
userdataBytes, err := ds.Fetch()
|
userdataBytes, err := ds.FetchUserdata()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fmt.Printf("Failed fetching user-data from datasource: %v\n", err)
|
fmt.Printf("Failed fetching user-data from datasource: %v\nContinuing...\n", err)
|
||||||
if ignoreFailure {
|
failure = true
|
||||||
os.Exit(0)
|
}
|
||||||
} else {
|
|
||||||
|
fmt.Printf("Fetching meta-data from datasource of type %q\n", ds.Type())
|
||||||
|
metadataBytes, err := ds.FetchMetadata()
|
||||||
|
if err != nil {
|
||||||
|
fmt.Printf("Failed fetching meta-data from datasource: %v\n", err)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Extract IPv4 addresses from metadata if possible
|
||||||
|
var subs map[string]string
|
||||||
|
if len(metadataBytes) > 0 {
|
||||||
|
subs, err = initialize.ExtractIPsFromMetadata(metadataBytes)
|
||||||
|
if err != nil {
|
||||||
|
fmt.Printf("Failed extracting IPs from meta-data: %v\n", err)
|
||||||
os.Exit(1)
|
os.Exit(1)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
env := initialize.NewEnvironment("/", workspace)
|
// Apply environment to user-data
|
||||||
if len(userdataBytes) > 0 {
|
env := initialize.NewEnvironment("/", ds.ConfigRoot(), flags.workspace, flags.convertNetconf, flags.sshKeyName, subs)
|
||||||
if err := processUserdata(string(userdataBytes), env); err != nil {
|
userdata := env.Apply(string(userdataBytes))
|
||||||
fmt.Printf("Failed resolving user-data: %v\n", err)
|
|
||||||
if !ignoreFailure {
|
var ccm, ccu *initialize.CloudConfig
|
||||||
os.Exit(1)
|
var script *system.Script
|
||||||
}
|
if ccm, err = initialize.ParseMetaData(string(metadataBytes)); err != nil {
|
||||||
}
|
fmt.Printf("Failed to parse meta-data: %v\n", err)
|
||||||
} else {
|
os.Exit(1)
|
||||||
fmt.Println("No user data to handle.")
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if convertNetconf != "" {
|
if ccm != nil && flags.convertNetconf != "" {
|
||||||
if err := processNetconf(convertNetconf, configdrive); err != nil {
|
fmt.Printf("Fetching network config from datasource of type %q\n", ds.Type())
|
||||||
fmt.Printf("Failed to process network config: %v\n", err)
|
netconfBytes, err := ds.FetchNetworkConfig(ccm.NetworkConfigPath)
|
||||||
if !ignoreFailure {
|
if err != nil {
|
||||||
os.Exit(1)
|
fmt.Printf("Failed fetching network config from datasource: %v\n", err)
|
||||||
}
|
os.Exit(1)
|
||||||
}
|
}
|
||||||
|
ccm.NetworkConfig = string(netconfBytes)
|
||||||
|
}
|
||||||
|
|
||||||
|
if ud, err := initialize.ParseUserData(userdata); err != nil {
|
||||||
|
fmt.Printf("Failed to parse user-data: %v\nContinuing...\n", err)
|
||||||
|
failure = true
|
||||||
|
} else {
|
||||||
|
switch t := ud.(type) {
|
||||||
|
case *initialize.CloudConfig:
|
||||||
|
ccu = t
|
||||||
|
case system.Script:
|
||||||
|
script = &t
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var cc *initialize.CloudConfig
|
||||||
|
if ccm != nil && ccu != nil {
|
||||||
|
fmt.Println("Merging cloud-config from meta-data and user-data")
|
||||||
|
merged := mergeCloudConfig(*ccm, *ccu)
|
||||||
|
cc = &merged
|
||||||
|
} else if ccm != nil && ccu == nil {
|
||||||
|
fmt.Println("Processing cloud-config from meta-data")
|
||||||
|
cc = ccm
|
||||||
|
} else if ccm == nil && ccu != nil {
|
||||||
|
fmt.Println("Processing cloud-config from user-data")
|
||||||
|
cc = ccu
|
||||||
|
} else {
|
||||||
|
fmt.Println("No cloud-config data to handle.")
|
||||||
|
}
|
||||||
|
|
||||||
|
if cc != nil {
|
||||||
|
if err = initialize.Apply(*cc, env); err != nil {
|
||||||
|
fmt.Printf("Failed to apply cloud-config: %v\n", err)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if script != nil {
|
||||||
|
if err = runScript(*script, env); err != nil {
|
||||||
|
fmt.Printf("Failed to run script: %v\n", err)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if failure && !flags.ignoreFailure {
|
||||||
|
os.Exit(1)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func processUserdata(userdata string, env *initialize.Environment) error {
|
// mergeCloudConfig merges certain options from mdcc (a CloudConfig derived from
|
||||||
userdata = env.Apply(userdata)
|
// meta-data) onto udcc (a CloudConfig derived from user-data), if they are
|
||||||
|
// not already set on udcc (i.e. user-data always takes precedence)
|
||||||
|
// NB: This needs to be kept in sync with ParseMetadata so that it tracks all
|
||||||
|
// elements of a CloudConfig which that function can populate.
|
||||||
|
func mergeCloudConfig(mdcc, udcc initialize.CloudConfig) (cc initialize.CloudConfig) {
|
||||||
|
if mdcc.Hostname != "" {
|
||||||
|
if udcc.Hostname != "" {
|
||||||
|
fmt.Printf("Warning: user-data hostname (%s) overrides metadata hostname (%s)\n", udcc.Hostname, mdcc.Hostname)
|
||||||
|
} else {
|
||||||
|
udcc.Hostname = mdcc.Hostname
|
||||||
|
}
|
||||||
|
|
||||||
parsed, err := initialize.ParseUserData(userdata)
|
}
|
||||||
if err != nil {
|
for _, key := range mdcc.SSHAuthorizedKeys {
|
||||||
fmt.Printf("Failed parsing user-data: %v\n", err)
|
udcc.SSHAuthorizedKeys = append(udcc.SSHAuthorizedKeys, key)
|
||||||
return err
|
}
|
||||||
|
if mdcc.NetworkConfigPath != "" {
|
||||||
|
if udcc.NetworkConfigPath != "" {
|
||||||
|
fmt.Printf("Warning: user-data NetworkConfigPath %s overrides metadata NetworkConfigPath %s\n", udcc.NetworkConfigPath, mdcc.NetworkConfigPath)
|
||||||
|
} else {
|
||||||
|
udcc.NetworkConfigPath = mdcc.NetworkConfigPath
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if mdcc.NetworkConfig != "" {
|
||||||
|
if udcc.NetworkConfig != "" {
|
||||||
|
fmt.Printf("Warning: user-data NetworkConfig %s overrides metadata NetworkConfig %s\n", udcc.NetworkConfig, mdcc.NetworkConfig)
|
||||||
|
} else {
|
||||||
|
udcc.NetworkConfig = mdcc.NetworkConfig
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return udcc
|
||||||
|
}
|
||||||
|
|
||||||
|
// getDatasources creates a slice of possible Datasources for cloudinit based
|
||||||
|
// on the different source command-line flags.
|
||||||
|
func getDatasources() []datasource.Datasource {
|
||||||
|
dss := make([]datasource.Datasource, 0, 5)
|
||||||
|
if flags.sources.file != "" {
|
||||||
|
dss = append(dss, file.NewDatasource(flags.sources.file))
|
||||||
|
}
|
||||||
|
if flags.sources.url != "" {
|
||||||
|
dss = append(dss, url.NewDatasource(flags.sources.url))
|
||||||
|
}
|
||||||
|
if flags.sources.configDrive != "" {
|
||||||
|
dss = append(dss, configdrive.NewDatasource(flags.sources.configDrive))
|
||||||
|
}
|
||||||
|
if flags.sources.metadataService {
|
||||||
|
dss = append(dss, ec2.NewDatasource(ec2.DefaultAddress))
|
||||||
|
}
|
||||||
|
if flags.sources.ec2MetadataService != "" {
|
||||||
|
dss = append(dss, ec2.NewDatasource(flags.sources.ec2MetadataService))
|
||||||
|
}
|
||||||
|
if flags.sources.cloudSigmaMetadataService {
|
||||||
|
dss = append(dss, cloudsigma.NewServerContextService())
|
||||||
|
}
|
||||||
|
if flags.sources.digitalOceanMetadataService != "" {
|
||||||
|
dss = append(dss, digitalocean.NewDatasource(flags.sources.digitalOceanMetadataService))
|
||||||
|
}
|
||||||
|
if flags.sources.waagent != "" {
|
||||||
|
dss = append(dss, waagent.NewDatasource(flags.sources.waagent))
|
||||||
|
}
|
||||||
|
if flags.sources.procCmdLine {
|
||||||
|
dss = append(dss, proc_cmdline.NewDatasource())
|
||||||
|
}
|
||||||
|
return dss
|
||||||
|
}
|
||||||
|
|
||||||
|
// selectDatasource attempts to choose a valid Datasource to use based on its
|
||||||
|
// current availability. The first Datasource to report to be available is
|
||||||
|
// returned. Datasources will be retried if possible if they are not
|
||||||
|
// immediately available. If all Datasources are permanently unavailable or
|
||||||
|
// datasourceTimeout is reached before one becomes available, nil is returned.
|
||||||
|
func selectDatasource(sources []datasource.Datasource) datasource.Datasource {
|
||||||
|
ds := make(chan datasource.Datasource)
|
||||||
|
stop := make(chan struct{})
|
||||||
|
var wg sync.WaitGroup
|
||||||
|
|
||||||
|
for _, s := range sources {
|
||||||
|
wg.Add(1)
|
||||||
|
go func(s datasource.Datasource) {
|
||||||
|
defer wg.Done()
|
||||||
|
|
||||||
|
duration := datasourceInterval
|
||||||
|
for {
|
||||||
|
fmt.Printf("Checking availability of %q\n", s.Type())
|
||||||
|
if s.IsAvailable() {
|
||||||
|
ds <- s
|
||||||
|
return
|
||||||
|
} else if !s.AvailabilityChanges() {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
select {
|
||||||
|
case <-stop:
|
||||||
|
return
|
||||||
|
case <-time.After(duration):
|
||||||
|
duration = pkg.ExpBackoff(duration, datasourceMaxInterval)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}(s)
|
||||||
}
|
}
|
||||||
|
|
||||||
err = initialize.PrepWorkspace(env.Workspace())
|
done := make(chan struct{})
|
||||||
|
go func() {
|
||||||
|
wg.Wait()
|
||||||
|
close(done)
|
||||||
|
}()
|
||||||
|
|
||||||
|
var s datasource.Datasource
|
||||||
|
select {
|
||||||
|
case s = <-ds:
|
||||||
|
case <-done:
|
||||||
|
case <-time.After(datasourceTimeout):
|
||||||
|
}
|
||||||
|
|
||||||
|
close(stop)
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO(jonboulle): this should probably be refactored and moved into a different module
|
||||||
|
func runScript(script system.Script, env *initialize.Environment) error {
|
||||||
|
err := initialize.PrepWorkspace(env.Workspace())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fmt.Printf("Failed preparing workspace: %v\n", err)
|
fmt.Printf("Failed preparing workspace: %v\n", err)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
path, err := initialize.PersistScriptInWorkspace(script, env.Workspace())
|
||||||
switch t := parsed.(type) {
|
if err == nil {
|
||||||
case initialize.CloudConfig:
|
var name string
|
||||||
err = initialize.Apply(t, env)
|
name, err = system.ExecuteScript(path)
|
||||||
case system.Script:
|
initialize.PersistUnitNameInWorkspace(name, env.Workspace())
|
||||||
var path string
|
|
||||||
path, err = initialize.PersistScriptInWorkspace(t, env.Workspace())
|
|
||||||
if err == nil {
|
|
||||||
var name string
|
|
||||||
name, err = system.ExecuteScript(path)
|
|
||||||
initialize.PersistUnitNameInWorkspace(name, env.Workspace())
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
func processNetconf(convertNetconf, configdrive string) error {
|
|
||||||
openstackRoot := path.Join(configdrive, "openstack")
|
|
||||||
metadataFilename := path.Join(openstackRoot, "latest", "meta_data.json")
|
|
||||||
metadataBytes, err := ioutil.ReadFile(metadataFilename)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
var metadata struct {
|
|
||||||
NetworkConfig struct {
|
|
||||||
ContentPath string `json:"content_path"`
|
|
||||||
} `json:"network_config"`
|
|
||||||
}
|
|
||||||
if err := json.Unmarshal(metadataBytes, &metadata); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
configPath := metadata.NetworkConfig.ContentPath
|
|
||||||
if configPath == "" {
|
|
||||||
fmt.Printf("No network config specified in %q.\n", metadataFilename)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
netconfBytes, err := ioutil.ReadFile(path.Join(openstackRoot, configPath))
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
var interfaces []network.InterfaceGenerator
|
|
||||||
switch convertNetconf {
|
|
||||||
case "debian":
|
|
||||||
interfaces, err = network.ProcessDebianNetconf(string(netconfBytes))
|
|
||||||
default:
|
|
||||||
return fmt.Errorf("Unsupported network config format %q", convertNetconf)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := system.WriteNetworkdConfigs(interfaces); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return system.RestartNetwork(interfaces)
|
|
||||||
}
|
|
||||||
|
136
coreos-cloudinit_test.go
Normal file
136
coreos-cloudinit_test.go
Normal file
@@ -0,0 +1,136 @@
|
|||||||
|
/*
|
||||||
|
Copyright 2014 CoreOS, Inc.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"reflect"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/coreos/coreos-cloudinit/initialize"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestMergeCloudConfig(t *testing.T) {
|
||||||
|
simplecc := initialize.CloudConfig{
|
||||||
|
SSHAuthorizedKeys: []string{"abc", "def"},
|
||||||
|
Hostname: "foobar",
|
||||||
|
NetworkConfigPath: "/path/somewhere",
|
||||||
|
NetworkConfig: `{}`,
|
||||||
|
}
|
||||||
|
for i, tt := range []struct {
|
||||||
|
udcc initialize.CloudConfig
|
||||||
|
mdcc initialize.CloudConfig
|
||||||
|
want initialize.CloudConfig
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
// If mdcc is empty, udcc should be returned unchanged
|
||||||
|
simplecc,
|
||||||
|
initialize.CloudConfig{},
|
||||||
|
simplecc,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
// If udcc is empty, mdcc should be returned unchanged(overridden)
|
||||||
|
initialize.CloudConfig{},
|
||||||
|
simplecc,
|
||||||
|
simplecc,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
// user-data should override completely in the case of conflicts
|
||||||
|
simplecc,
|
||||||
|
initialize.CloudConfig{
|
||||||
|
Hostname: "meta-hostname",
|
||||||
|
NetworkConfigPath: "/path/meta",
|
||||||
|
NetworkConfig: `{"hostname":"test"}`,
|
||||||
|
},
|
||||||
|
simplecc,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
// Mixed merge should succeed
|
||||||
|
initialize.CloudConfig{
|
||||||
|
SSHAuthorizedKeys: []string{"abc", "def"},
|
||||||
|
Hostname: "user-hostname",
|
||||||
|
NetworkConfigPath: "/path/somewhere",
|
||||||
|
NetworkConfig: `{"hostname":"test"}`,
|
||||||
|
},
|
||||||
|
initialize.CloudConfig{
|
||||||
|
SSHAuthorizedKeys: []string{"woof", "qux"},
|
||||||
|
Hostname: "meta-hostname",
|
||||||
|
},
|
||||||
|
initialize.CloudConfig{
|
||||||
|
SSHAuthorizedKeys: []string{"abc", "def", "woof", "qux"},
|
||||||
|
Hostname: "user-hostname",
|
||||||
|
NetworkConfigPath: "/path/somewhere",
|
||||||
|
NetworkConfig: `{"hostname":"test"}`,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
// Completely non-conflicting merge should be fine
|
||||||
|
initialize.CloudConfig{
|
||||||
|
Hostname: "supercool",
|
||||||
|
},
|
||||||
|
initialize.CloudConfig{
|
||||||
|
SSHAuthorizedKeys: []string{"zaphod", "beeblebrox"},
|
||||||
|
NetworkConfigPath: "/dev/fun",
|
||||||
|
NetworkConfig: `{"hostname":"test"}`,
|
||||||
|
},
|
||||||
|
initialize.CloudConfig{
|
||||||
|
Hostname: "supercool",
|
||||||
|
SSHAuthorizedKeys: []string{"zaphod", "beeblebrox"},
|
||||||
|
NetworkConfigPath: "/dev/fun",
|
||||||
|
NetworkConfig: `{"hostname":"test"}`,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
// Non-mergeable settings in user-data should not be affected
|
||||||
|
initialize.CloudConfig{
|
||||||
|
Hostname: "mememe",
|
||||||
|
ManageEtcHosts: initialize.EtcHosts("lolz"),
|
||||||
|
},
|
||||||
|
initialize.CloudConfig{
|
||||||
|
Hostname: "youyouyou",
|
||||||
|
NetworkConfigPath: "meta-meta-yo",
|
||||||
|
NetworkConfig: `{"hostname":"test"}`,
|
||||||
|
},
|
||||||
|
initialize.CloudConfig{
|
||||||
|
Hostname: "mememe",
|
||||||
|
ManageEtcHosts: initialize.EtcHosts("lolz"),
|
||||||
|
NetworkConfigPath: "meta-meta-yo",
|
||||||
|
NetworkConfig: `{"hostname":"test"}`,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
// Non-mergeable (unexpected) settings in meta-data are ignored
|
||||||
|
initialize.CloudConfig{
|
||||||
|
Hostname: "mememe",
|
||||||
|
},
|
||||||
|
initialize.CloudConfig{
|
||||||
|
ManageEtcHosts: initialize.EtcHosts("lolz"),
|
||||||
|
NetworkConfigPath: "meta-meta-yo",
|
||||||
|
NetworkConfig: `{"hostname":"test"}`,
|
||||||
|
},
|
||||||
|
initialize.CloudConfig{
|
||||||
|
Hostname: "mememe",
|
||||||
|
NetworkConfigPath: "meta-meta-yo",
|
||||||
|
NetworkConfig: `{"hostname":"test"}`,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
} {
|
||||||
|
got := mergeCloudConfig(tt.mdcc, tt.udcc)
|
||||||
|
if !reflect.DeepEqual(got, tt.want) {
|
||||||
|
t.Errorf("case #%d: mergeCloudConfig mutated CloudConfig unexpectedly:\ngot:\n%s\nwant:\n%s", i, got, tt.want)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
@@ -1,27 +0,0 @@
|
|||||||
package datasource
|
|
||||||
|
|
||||||
import (
|
|
||||||
"io/ioutil"
|
|
||||||
"os"
|
|
||||||
"path"
|
|
||||||
)
|
|
||||||
|
|
||||||
type configDrive struct {
|
|
||||||
path string
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewConfigDrive(path string) *configDrive {
|
|
||||||
return &configDrive{path}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (self *configDrive) Fetch() ([]byte, error) {
|
|
||||||
data, err := ioutil.ReadFile(path.Join(self.path, "openstack", "latest", "user_data"))
|
|
||||||
if os.IsNotExist(err) {
|
|
||||||
err = nil
|
|
||||||
}
|
|
||||||
return data, err
|
|
||||||
}
|
|
||||||
|
|
||||||
func (self *configDrive) Type() string {
|
|
||||||
return "cloud-drive"
|
|
||||||
}
|
|
86
datasource/configdrive/configdrive.go
Normal file
86
datasource/configdrive/configdrive.go
Normal file
@@ -0,0 +1,86 @@
|
|||||||
|
/*
|
||||||
|
Copyright 2014 CoreOS, Inc.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package configdrive
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"io/ioutil"
|
||||||
|
"os"
|
||||||
|
"path"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
openstackApiVersion = "latest"
|
||||||
|
)
|
||||||
|
|
||||||
|
type configDrive struct {
|
||||||
|
root string
|
||||||
|
readFile func(filename string) ([]byte, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewDatasource(root string) *configDrive {
|
||||||
|
return &configDrive{root, ioutil.ReadFile}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (cd *configDrive) IsAvailable() bool {
|
||||||
|
_, err := os.Stat(cd.root)
|
||||||
|
return !os.IsNotExist(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (cd *configDrive) AvailabilityChanges() bool {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
func (cd *configDrive) ConfigRoot() string {
|
||||||
|
return cd.openstackRoot()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (cd *configDrive) FetchMetadata() ([]byte, error) {
|
||||||
|
return cd.tryReadFile(path.Join(cd.openstackVersionRoot(), "meta_data.json"))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (cd *configDrive) FetchUserdata() ([]byte, error) {
|
||||||
|
return cd.tryReadFile(path.Join(cd.openstackVersionRoot(), "user_data"))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (cd *configDrive) FetchNetworkConfig(filename string) ([]byte, error) {
|
||||||
|
if filename == "" {
|
||||||
|
return []byte{}, nil
|
||||||
|
}
|
||||||
|
return cd.tryReadFile(path.Join(cd.openstackRoot(), filename))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (cd *configDrive) Type() string {
|
||||||
|
return "cloud-drive"
|
||||||
|
}
|
||||||
|
|
||||||
|
func (cd *configDrive) openstackRoot() string {
|
||||||
|
return path.Join(cd.root, "openstack")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (cd *configDrive) openstackVersionRoot() string {
|
||||||
|
return path.Join(cd.openstackRoot(), openstackApiVersion)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (cd *configDrive) tryReadFile(filename string) ([]byte, error) {
|
||||||
|
fmt.Printf("Attempting to read from %q\n", filename)
|
||||||
|
data, err := cd.readFile(filename)
|
||||||
|
if os.IsNotExist(err) {
|
||||||
|
err = nil
|
||||||
|
}
|
||||||
|
return data, err
|
||||||
|
}
|
141
datasource/configdrive/configdrive_test.go
Normal file
141
datasource/configdrive/configdrive_test.go
Normal file
@@ -0,0 +1,141 @@
|
|||||||
|
/*
|
||||||
|
Copyright 2014 CoreOS, Inc.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package configdrive
|
||||||
|
|
||||||
|
import (
|
||||||
|
"os"
|
||||||
|
"testing"
|
||||||
|
)
|
||||||
|
|
||||||
|
type mockFilesystem []string
|
||||||
|
|
||||||
|
func (m mockFilesystem) readFile(filename string) ([]byte, error) {
|
||||||
|
for _, file := range m {
|
||||||
|
if file == filename {
|
||||||
|
return []byte(filename), nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil, os.ErrNotExist
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestFetchMetadata(t *testing.T) {
|
||||||
|
for _, tt := range []struct {
|
||||||
|
root string
|
||||||
|
filename string
|
||||||
|
files mockFilesystem
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
"/",
|
||||||
|
"",
|
||||||
|
mockFilesystem{},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"/",
|
||||||
|
"/openstack/latest/meta_data.json",
|
||||||
|
mockFilesystem([]string{"/openstack/latest/meta_data.json"}),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"/media/configdrive",
|
||||||
|
"/media/configdrive/openstack/latest/meta_data.json",
|
||||||
|
mockFilesystem([]string{"/media/configdrive/openstack/latest/meta_data.json"}),
|
||||||
|
},
|
||||||
|
} {
|
||||||
|
cd := configDrive{tt.root, tt.files.readFile}
|
||||||
|
filename, err := cd.FetchMetadata()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("bad error for %q: want %v, got %q", tt, nil, err)
|
||||||
|
}
|
||||||
|
if string(filename) != tt.filename {
|
||||||
|
t.Fatalf("bad path for %q: want %q, got %q", tt, tt.filename, filename)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestFetchUserdata(t *testing.T) {
|
||||||
|
for _, tt := range []struct {
|
||||||
|
root string
|
||||||
|
filename string
|
||||||
|
files mockFilesystem
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
"/",
|
||||||
|
"",
|
||||||
|
mockFilesystem{},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"/",
|
||||||
|
"/openstack/latest/user_data",
|
||||||
|
mockFilesystem([]string{"/openstack/latest/user_data"}),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"/media/configdrive",
|
||||||
|
"/media/configdrive/openstack/latest/user_data",
|
||||||
|
mockFilesystem([]string{"/media/configdrive/openstack/latest/user_data"}),
|
||||||
|
},
|
||||||
|
} {
|
||||||
|
cd := configDrive{tt.root, tt.files.readFile}
|
||||||
|
filename, err := cd.FetchUserdata()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("bad error for %q: want %v, got %q", tt, nil, err)
|
||||||
|
}
|
||||||
|
if string(filename) != tt.filename {
|
||||||
|
t.Fatalf("bad path for %q: want %q, got %q", tt, tt.filename, filename)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestConfigRoot(t *testing.T) {
|
||||||
|
for _, tt := range []struct {
|
||||||
|
root string
|
||||||
|
configRoot string
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
"/",
|
||||||
|
"/openstack",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"/media/configdrive",
|
||||||
|
"/media/configdrive/openstack",
|
||||||
|
},
|
||||||
|
} {
|
||||||
|
cd := configDrive{tt.root, nil}
|
||||||
|
if configRoot := cd.ConfigRoot(); configRoot != tt.configRoot {
|
||||||
|
t.Fatalf("bad config root for %q: want %q, got %q", tt, tt.configRoot, configRoot)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestNewDatasource(t *testing.T) {
|
||||||
|
for _, tt := range []struct {
|
||||||
|
root string
|
||||||
|
expectRoot string
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
root: "",
|
||||||
|
expectRoot: "",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
root: "/media/configdrive",
|
||||||
|
expectRoot: "/media/configdrive",
|
||||||
|
},
|
||||||
|
} {
|
||||||
|
service := NewDatasource(tt.root)
|
||||||
|
if service.root != tt.expectRoot {
|
||||||
|
t.Fatalf("bad root (%q): want %q, got %q", tt.root, tt.expectRoot, service.root)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
@@ -1,6 +1,27 @@
|
|||||||
|
/*
|
||||||
|
Copyright 2014 CoreOS, Inc.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
package datasource
|
package datasource
|
||||||
|
|
||||||
type Datasource interface {
|
type Datasource interface {
|
||||||
Fetch() ([]byte, error)
|
IsAvailable() bool
|
||||||
|
AvailabilityChanges() bool
|
||||||
|
ConfigRoot() string
|
||||||
|
FetchMetadata() ([]byte, error)
|
||||||
|
FetchUserdata() ([]byte, error)
|
||||||
|
FetchNetworkConfig(string) ([]byte, error)
|
||||||
Type() string
|
Type() string
|
||||||
}
|
}
|
||||||
|
@@ -1,21 +0,0 @@
|
|||||||
package datasource
|
|
||||||
|
|
||||||
import (
|
|
||||||
"io/ioutil"
|
|
||||||
)
|
|
||||||
|
|
||||||
type localFile struct {
|
|
||||||
path string
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewLocalFile(path string) *localFile {
|
|
||||||
return &localFile{path}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (self *localFile) Fetch() ([]byte, error) {
|
|
||||||
return ioutil.ReadFile(self.path)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (self *localFile) Type() string {
|
|
||||||
return "local-file"
|
|
||||||
}
|
|
59
datasource/file/file.go
Normal file
59
datasource/file/file.go
Normal file
@@ -0,0 +1,59 @@
|
|||||||
|
/*
|
||||||
|
Copyright 2014 CoreOS, Inc.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package file
|
||||||
|
|
||||||
|
import (
|
||||||
|
"io/ioutil"
|
||||||
|
"os"
|
||||||
|
)
|
||||||
|
|
||||||
|
type localFile struct {
|
||||||
|
path string
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewDatasource(path string) *localFile {
|
||||||
|
return &localFile{path}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *localFile) IsAvailable() bool {
|
||||||
|
_, err := os.Stat(f.path)
|
||||||
|
return !os.IsNotExist(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *localFile) AvailabilityChanges() bool {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *localFile) ConfigRoot() string {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *localFile) FetchMetadata() ([]byte, error) {
|
||||||
|
return []byte{}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *localFile) FetchUserdata() ([]byte, error) {
|
||||||
|
return ioutil.ReadFile(f.path)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *localFile) FetchNetworkConfig(filename string) ([]byte, error) {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *localFile) Type() string {
|
||||||
|
return "local-file"
|
||||||
|
}
|
161
datasource/metadata/cloudsigma/server_context.go
Normal file
161
datasource/metadata/cloudsigma/server_context.go
Normal file
@@ -0,0 +1,161 @@
|
|||||||
|
/*
|
||||||
|
Copyright 2014 CoreOS, Inc.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package cloudsigma
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/base64"
|
||||||
|
"encoding/json"
|
||||||
|
"os"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/coreos/coreos-cloudinit/Godeps/_workspace/src/github.com/cloudsigma/cepgo"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
userDataFieldName = "cloudinit-user-data"
|
||||||
|
)
|
||||||
|
|
||||||
|
type serverContextService struct {
|
||||||
|
client interface {
|
||||||
|
All() (interface{}, error)
|
||||||
|
Key(string) (interface{}, error)
|
||||||
|
Meta() (map[string]string, error)
|
||||||
|
FetchRaw(string) ([]byte, error)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewServerContextService() *serverContextService {
|
||||||
|
return &serverContextService{
|
||||||
|
client: cepgo.NewCepgo(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (_ *serverContextService) IsAvailable() bool {
|
||||||
|
productNameFile, err := os.Open("/sys/class/dmi/id/product_name")
|
||||||
|
if err != nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
productName := make([]byte, 10)
|
||||||
|
_, err = productNameFile.Read(productName)
|
||||||
|
return err == nil && string(productName) == "CloudSigma"
|
||||||
|
}
|
||||||
|
|
||||||
|
func (_ *serverContextService) AvailabilityChanges() bool {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
func (_ *serverContextService) ConfigRoot() string {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func (_ *serverContextService) Type() string {
|
||||||
|
return "server-context"
|
||||||
|
}
|
||||||
|
|
||||||
|
func (scs *serverContextService) FetchMetadata() ([]byte, error) {
|
||||||
|
var (
|
||||||
|
inputMetadata struct {
|
||||||
|
Name string `json:"name"`
|
||||||
|
UUID string `json:"uuid"`
|
||||||
|
Meta map[string]string `json:"meta"`
|
||||||
|
Nics []struct {
|
||||||
|
Runtime struct {
|
||||||
|
InterfaceType string `json:"interface_type"`
|
||||||
|
IPv4 struct {
|
||||||
|
IP string `json:"uuid"`
|
||||||
|
} `json:"ip_v4"`
|
||||||
|
} `json:"runtime"`
|
||||||
|
} `json:"nics"`
|
||||||
|
}
|
||||||
|
outputMetadata struct {
|
||||||
|
Hostname string `json:"name"`
|
||||||
|
PublicKeys map[string]string `json:"public_keys"`
|
||||||
|
LocalIPv4 string `json:"local-ipv4"`
|
||||||
|
PublicIPv4 string `json:"public-ipv4"`
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
rawMetadata, err := scs.client.FetchRaw("")
|
||||||
|
if err != nil {
|
||||||
|
return []byte{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
err = json.Unmarshal(rawMetadata, &inputMetadata)
|
||||||
|
if err != nil {
|
||||||
|
return []byte{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if inputMetadata.Name != "" {
|
||||||
|
outputMetadata.Hostname = inputMetadata.Name
|
||||||
|
} else {
|
||||||
|
outputMetadata.Hostname = inputMetadata.UUID
|
||||||
|
}
|
||||||
|
|
||||||
|
if key, ok := inputMetadata.Meta["ssh_public_key"]; ok {
|
||||||
|
splitted := strings.Split(key, " ")
|
||||||
|
outputMetadata.PublicKeys = make(map[string]string)
|
||||||
|
outputMetadata.PublicKeys[splitted[len(splitted)-1]] = key
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, nic := range inputMetadata.Nics {
|
||||||
|
if nic.Runtime.IPv4.IP != "" {
|
||||||
|
if nic.Runtime.InterfaceType == "public" {
|
||||||
|
outputMetadata.PublicIPv4 = nic.Runtime.IPv4.IP
|
||||||
|
} else {
|
||||||
|
outputMetadata.LocalIPv4 = nic.Runtime.IPv4.IP
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return json.Marshal(outputMetadata)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (scs *serverContextService) FetchUserdata() ([]byte, error) {
|
||||||
|
metadata, err := scs.client.Meta()
|
||||||
|
if err != nil {
|
||||||
|
return []byte{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
userData, ok := metadata[userDataFieldName]
|
||||||
|
if ok && isBase64Encoded(userDataFieldName, metadata) {
|
||||||
|
if decodedUserData, err := base64.StdEncoding.DecodeString(userData); err == nil {
|
||||||
|
return decodedUserData, nil
|
||||||
|
} else {
|
||||||
|
return []byte{}, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return []byte(userData), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (scs *serverContextService) FetchNetworkConfig(a string) ([]byte, error) {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func isBase64Encoded(field string, userdata map[string]string) bool {
|
||||||
|
base64Fields, ok := userdata["base64_fields"]
|
||||||
|
if !ok {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, base64Field := range strings.Split(base64Fields, ",") {
|
||||||
|
if field == base64Field {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
168
datasource/metadata/cloudsigma/server_context_test.go
Normal file
168
datasource/metadata/cloudsigma/server_context_test.go
Normal file
@@ -0,0 +1,168 @@
|
|||||||
|
/*
|
||||||
|
Copyright 2014 CoreOS, Inc.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package cloudsigma
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"reflect"
|
||||||
|
"testing"
|
||||||
|
)
|
||||||
|
|
||||||
|
type fakeCepgoClient struct {
|
||||||
|
raw []byte
|
||||||
|
meta map[string]string
|
||||||
|
keys map[string]interface{}
|
||||||
|
err error
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *fakeCepgoClient) All() (interface{}, error) {
|
||||||
|
return f.keys, f.err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *fakeCepgoClient) Key(key string) (interface{}, error) {
|
||||||
|
return f.keys[key], f.err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *fakeCepgoClient) Meta() (map[string]string, error) {
|
||||||
|
return f.meta, f.err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *fakeCepgoClient) FetchRaw(key string) ([]byte, error) {
|
||||||
|
return f.raw, f.err
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestServerContextFetchMetadata(t *testing.T) {
|
||||||
|
var metadata struct {
|
||||||
|
Hostname string `json:"name"`
|
||||||
|
PublicKeys map[string]string `json:"public_keys"`
|
||||||
|
LocalIPv4 string `json:"local-ipv4"`
|
||||||
|
PublicIPv4 string `json:"public-ipv4"`
|
||||||
|
}
|
||||||
|
client := new(fakeCepgoClient)
|
||||||
|
scs := NewServerContextService()
|
||||||
|
scs.client = client
|
||||||
|
client.raw = []byte(`{
|
||||||
|
"context": true,
|
||||||
|
"cpu": 4000,
|
||||||
|
"cpu_model": null,
|
||||||
|
"cpus_instead_of_cores": false,
|
||||||
|
"enable_numa": false,
|
||||||
|
"grantees": [],
|
||||||
|
"hv_relaxed": false,
|
||||||
|
"hv_tsc": false,
|
||||||
|
"jobs": [],
|
||||||
|
"mem": 4294967296,
|
||||||
|
"meta": {
|
||||||
|
"base64_fields": "cloudinit-user-data",
|
||||||
|
"cloudinit-user-data": "I2Nsb3VkLWNvbmZpZwoKaG9zdG5hbWU6IGNvcmVvczE=",
|
||||||
|
"ssh_public_key": "ssh-rsa AAAAB3NzaC1yc2E.../hQ5D5 john@doe"
|
||||||
|
},
|
||||||
|
"name": "coreos",
|
||||||
|
"nics": [
|
||||||
|
{
|
||||||
|
"runtime": {
|
||||||
|
"interface_type": "public",
|
||||||
|
"ip_v4": {
|
||||||
|
"uuid": "31.171.251.74"
|
||||||
|
},
|
||||||
|
"ip_v6": null
|
||||||
|
},
|
||||||
|
"vlan": null
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"smp": 2,
|
||||||
|
"status": "running",
|
||||||
|
"uuid": "20a0059b-041e-4d0c-bcc6-9b2852de48b3"
|
||||||
|
}`)
|
||||||
|
|
||||||
|
metadataBytes, err := scs.FetchMetadata()
|
||||||
|
if err != nil {
|
||||||
|
t.Error(err.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := json.Unmarshal(metadataBytes, &metadata); err != nil {
|
||||||
|
t.Error(err.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
if metadata.Hostname != "coreos" {
|
||||||
|
t.Errorf("Hostname is not 'coreos' but %s instead", metadata.Hostname)
|
||||||
|
}
|
||||||
|
|
||||||
|
if metadata.PublicKeys["john@doe"] != "ssh-rsa AAAAB3NzaC1yc2E.../hQ5D5 john@doe" {
|
||||||
|
t.Error("Public SSH Keys are not being read properly")
|
||||||
|
}
|
||||||
|
|
||||||
|
if metadata.LocalIPv4 != "" {
|
||||||
|
t.Errorf("Local IP is not empty but %s instead", metadata.LocalIPv4)
|
||||||
|
}
|
||||||
|
|
||||||
|
if metadata.PublicIPv4 != "31.171.251.74" {
|
||||||
|
t.Errorf("Local IP is not 31.171.251.74 but %s instead", metadata.PublicIPv4)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestServerContextFetchUserdata(t *testing.T) {
|
||||||
|
client := new(fakeCepgoClient)
|
||||||
|
scs := NewServerContextService()
|
||||||
|
scs.client = client
|
||||||
|
userdataSets := []struct {
|
||||||
|
in map[string]string
|
||||||
|
err bool
|
||||||
|
out []byte
|
||||||
|
}{
|
||||||
|
{map[string]string{
|
||||||
|
"base64_fields": "cloudinit-user-data",
|
||||||
|
"cloudinit-user-data": "aG9zdG5hbWU6IGNvcmVvc190ZXN0",
|
||||||
|
}, false, []byte("hostname: coreos_test")},
|
||||||
|
{map[string]string{
|
||||||
|
"cloudinit-user-data": "#cloud-config\\nhostname: coreos1",
|
||||||
|
}, false, []byte("#cloud-config\\nhostname: coreos1")},
|
||||||
|
{map[string]string{}, false, []byte{}},
|
||||||
|
}
|
||||||
|
|
||||||
|
for i, set := range userdataSets {
|
||||||
|
client.meta = set.in
|
||||||
|
got, err := scs.FetchUserdata()
|
||||||
|
if (err != nil) != set.err {
|
||||||
|
t.Errorf("case %d: bad error state (got %t, want %t)", i, err != nil, set.err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if !reflect.DeepEqual(got, set.out) {
|
||||||
|
t.Errorf("case %d: got %s, want %s", i, got, set.out)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestServerContextDecodingBase64UserData(t *testing.T) {
|
||||||
|
base64Sets := []struct {
|
||||||
|
in string
|
||||||
|
out bool
|
||||||
|
}{
|
||||||
|
{"cloudinit-user-data,foo,bar", true},
|
||||||
|
{"bar,cloudinit-user-data,foo,bar", true},
|
||||||
|
{"cloudinit-user-data", true},
|
||||||
|
{"", false},
|
||||||
|
{"foo", false},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, set := range base64Sets {
|
||||||
|
userdata := map[string]string{"base64_fields": set.in}
|
||||||
|
if isBase64Encoded("cloudinit-user-data", userdata) != set.out {
|
||||||
|
t.Errorf("isBase64Encoded(cloudinit-user-data, %s) should be %t", userdata, set.out)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
123
datasource/metadata/digitalocean/metadata.go
Normal file
123
datasource/metadata/digitalocean/metadata.go
Normal file
@@ -0,0 +1,123 @@
|
|||||||
|
/*
|
||||||
|
Copyright 2014 CoreOS, Inc.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package digitalocean
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"strconv"
|
||||||
|
|
||||||
|
"github.com/coreos/coreos-cloudinit/datasource/metadata"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
DefaultAddress = "http://169.254.169.254/"
|
||||||
|
apiVersion = "metadata/v1"
|
||||||
|
userdataUrl = apiVersion + "/user-data"
|
||||||
|
metadataPath = apiVersion + ".json"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Address struct {
|
||||||
|
IPAddress string `json:"ip_address"`
|
||||||
|
Netmask string `json:"netmask"`
|
||||||
|
Cidr int `json:"cidr"`
|
||||||
|
Gateway string `json:"gateway"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type Interface struct {
|
||||||
|
IPv4 *Address `json:"ipv4"`
|
||||||
|
IPv6 *Address `json:"ipv6"`
|
||||||
|
MAC string `json:"mac"`
|
||||||
|
Type string `json:"type"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type Interfaces struct {
|
||||||
|
Public []Interface `json:"public"`
|
||||||
|
Private []Interface `json:"private"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type DNS struct {
|
||||||
|
Nameservers []string `json:"nameservers"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type Metadata struct {
|
||||||
|
Hostname string `json:"hostname"`
|
||||||
|
Interfaces Interfaces `json:"interfaces"`
|
||||||
|
PublicKeys []string `json:"public_keys"`
|
||||||
|
DNS DNS `json:"dns"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type metadataService struct {
|
||||||
|
interfaces Interfaces
|
||||||
|
dns DNS
|
||||||
|
metadata.MetadataService
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewDatasource(root string) *metadataService {
|
||||||
|
return &metadataService{MetadataService: metadata.NewDatasource(root, apiVersion, userdataUrl, metadataPath)}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ms *metadataService) FetchMetadata() ([]byte, error) {
|
||||||
|
data, err := ms.FetchData(ms.MetadataUrl())
|
||||||
|
if err != nil || len(data) == 0 {
|
||||||
|
return []byte{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
var metadata Metadata
|
||||||
|
if err := json.Unmarshal(data, &metadata); err != nil {
|
||||||
|
return []byte{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
ms.interfaces = metadata.Interfaces
|
||||||
|
ms.dns = metadata.DNS
|
||||||
|
|
||||||
|
attrs := make(map[string]interface{})
|
||||||
|
if len(metadata.Interfaces.Public) > 0 {
|
||||||
|
if metadata.Interfaces.Public[0].IPv4 != nil {
|
||||||
|
attrs["public-ipv4"] = metadata.Interfaces.Public[0].IPv4.IPAddress
|
||||||
|
}
|
||||||
|
if metadata.Interfaces.Public[0].IPv6 != nil {
|
||||||
|
attrs["public-ipv6"] = metadata.Interfaces.Public[0].IPv6.IPAddress
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if len(metadata.Interfaces.Private) > 0 {
|
||||||
|
if metadata.Interfaces.Private[0].IPv4 != nil {
|
||||||
|
attrs["local-ipv4"] = metadata.Interfaces.Private[0].IPv4.IPAddress
|
||||||
|
}
|
||||||
|
if metadata.Interfaces.Private[0].IPv6 != nil {
|
||||||
|
attrs["local-ipv6"] = metadata.Interfaces.Private[0].IPv6.IPAddress
|
||||||
|
}
|
||||||
|
}
|
||||||
|
attrs["hostname"] = metadata.Hostname
|
||||||
|
keys := make(map[string]string)
|
||||||
|
for i, key := range metadata.PublicKeys {
|
||||||
|
keys[strconv.Itoa(i)] = key
|
||||||
|
}
|
||||||
|
attrs["public_keys"] = keys
|
||||||
|
|
||||||
|
return json.Marshal(attrs)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ms metadataService) FetchNetworkConfig(filename string) ([]byte, error) {
|
||||||
|
return json.Marshal(Metadata{
|
||||||
|
Interfaces: ms.interfaces,
|
||||||
|
DNS: ms.dns,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ms metadataService) Type() string {
|
||||||
|
return "digitalocean-metadata-service"
|
||||||
|
}
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user