Compare commits
129 Commits
v0.9.1
...
0.10-relea
Author | SHA1 | Date | |
---|---|---|---|
|
ec4bfbc8fc | ||
|
de35d27bfc | ||
|
f0dea2475d | ||
|
62918a28ce | ||
|
8c936f10c5 | ||
|
92eb5eb48b | ||
|
ea1e4c38fa | ||
|
46cb51cf91 | ||
|
1a6cee5305 | ||
|
e9bda98b54 | ||
|
badc874b74 | ||
|
c9e8c887b8 | ||
|
8be307de49 | ||
|
562c474275 | ||
|
5c5834863b | ||
|
44f0a949c5 | ||
|
106c4e7a2c | ||
|
6c1ba590aa | ||
|
45da664c59 | ||
|
2a71551ef2 | ||
|
84e1cb3242 | ||
|
5214ead926 | ||
|
e2c24c4cef | ||
|
75e288c553 | ||
|
0785840fe3 | ||
|
c10bfc2f56 | ||
|
2f954dcdc2 | ||
|
cdfc94f4e9 | ||
|
18e2f98414 | ||
|
4b472795c4 | ||
|
85b8d804c8 | ||
|
1fbbaaec19 | ||
|
667dbd8fb7 | ||
|
6730cb7227 | ||
|
9454522033 | ||
|
c255739a93 | ||
|
2051cd3e1c | ||
|
b52cb3fea3 | ||
|
da5f85b3fb | ||
|
9999178538 | ||
|
8f766e4666 | ||
|
2d28d16c92 | ||
|
e9cd09dd7b | ||
|
8370b30aa2 | ||
|
3e015cc3a1 | ||
|
a0fe6d0884 | ||
|
585ce5fcd9 | ||
|
72445796ca | ||
|
7342d91a85 | ||
|
db1bc51c98 | ||
|
c1f373e648 | ||
|
db49a16002 | ||
|
a4a6c281d9 | ||
|
17f8733121 | ||
|
7dec922618 | ||
|
54d3ae27af | ||
|
ee2416af64 | ||
|
cda037f9a5 | ||
|
549806cf64 | ||
|
56815a6756 | ||
|
24a6f7c49c | ||
|
98484be434 | ||
|
9024659296 | ||
|
fc6940f7ba | ||
|
f2fd95699b | ||
|
65db96cc7c | ||
|
c17b93b5c0 | ||
|
d352f8ce6a | ||
|
78aa2c56ec | ||
|
c5b3788282 | ||
|
5e98970bb5 | ||
|
cbdd446c55 | ||
|
316cadcf44 | ||
|
5a939be21b | ||
|
8d76c64386 | ||
|
1b854eb51e | ||
|
9fcf338bf3 | ||
|
fda72bdb5c | ||
|
685a38c6c8 | ||
|
9d15f2cfaf | ||
|
2134fce791 | ||
|
3abd6b2225 | ||
|
2a8e6c9566 | ||
|
abe43537da | ||
|
3a550af651 | ||
|
61c3a0eb2d | ||
|
480176bc11 | ||
|
01b18eb551 | ||
|
970ef435b6 | ||
|
e8d0021140 | ||
|
e9ec78ac6f | ||
|
4a2e417781 | ||
|
604ef7ecb4 | ||
|
c39dd5cc67 | ||
|
a923161f4a | ||
|
e59e2f6cd5 | ||
|
e90fe3eba8 | ||
|
fb0187b197 | ||
|
6babe74716 | ||
|
b1e88284ca | ||
|
18a65f7dac | ||
|
0c212c72c9 | ||
|
6a800d8cc0 | ||
|
5e112147bb | ||
|
7e78b1563f | ||
|
ecbe81f103 | ||
|
45c20c1dd3 | ||
|
8ce925a060 | ||
|
eadb6ef42c | ||
|
7518f0ec93 | ||
|
f0b9eaf2fe | ||
|
7320a2cbf2 | ||
|
57950b3ed9 | ||
|
85c6a2a16a | ||
|
24b44e86a6 | ||
|
2f52ad4ef8 | ||
|
735d6c6161 | ||
|
1cf275bad6 | ||
|
f1c97cb4d5 | ||
|
d143904aa9 | ||
|
c428ce2cc5 | ||
|
dfb5b4fc3a | ||
|
97d5538533 | ||
|
6b8f82b5d3 | ||
|
facde6609f | ||
|
d68ae84b37 | ||
|
54aa39543b | ||
|
8566a2c118 | ||
|
49ac083af5 |
@@ -1,8 +1,11 @@
|
|||||||
language: go
|
language: go
|
||||||
go: 1.2
|
go:
|
||||||
|
- 1.3
|
||||||
|
- 1.2
|
||||||
|
|
||||||
install:
|
install:
|
||||||
- go get code.google.com/p/go.tools/cmd/cover
|
- go get code.google.com/p/go.tools/cmd/cover
|
||||||
|
- go get code.google.com/p/go.tools/cmd/vet
|
||||||
|
|
||||||
script:
|
script:
|
||||||
- ./test
|
- ./test
|
@@ -39,22 +39,25 @@ Thanks for your contributions!
|
|||||||
|
|
||||||
### Format of the Commit Message
|
### Format of the Commit Message
|
||||||
|
|
||||||
We follow a rough convention for commit messages borrowed from AngularJS. This
|
We follow a rough convention for commit messages that is designed to answer two
|
||||||
is an example of a commit:
|
questions: what changed and why. The subject line should feature the what and
|
||||||
|
the body of the commit should describe the why.
|
||||||
|
|
||||||
```
|
```
|
||||||
feat(scripts/test-cluster): add a cluster test command
|
environment: write new keys in consistent order
|
||||||
|
|
||||||
this uses tmux to setup a test cluster that you can easily kill and
|
Go 1.3 randomizes the ordering of keys when iterating over a map.
|
||||||
start for debugging.
|
Sort the keys to make this ordering consistent.
|
||||||
|
|
||||||
|
Fixes #38
|
||||||
```
|
```
|
||||||
|
|
||||||
The format can be described more formally as follows:
|
The format can be described more formally as follows:
|
||||||
|
|
||||||
```
|
```
|
||||||
<type>(<scope>): <subject>
|
<subsystem>: <what changed>
|
||||||
<BLANK LINE>
|
<BLANK LINE>
|
||||||
<body>
|
<why this change was made>
|
||||||
<BLANK LINE>
|
<BLANK LINE>
|
||||||
<footer>
|
<footer>
|
||||||
```
|
```
|
||||||
@@ -63,25 +66,3 @@ The first line is the subject and should be no longer than 70 characters, the
|
|||||||
second line is always blank, and other lines should be wrapped at 80 characters.
|
second line is always blank, and other lines should be wrapped at 80 characters.
|
||||||
This allows the message to be easier to read on GitHub as well as in various
|
This allows the message to be easier to read on GitHub as well as in various
|
||||||
git tools.
|
git tools.
|
||||||
|
|
||||||
#### Subject Line
|
|
||||||
|
|
||||||
The subject line contains a succinct description of the change.
|
|
||||||
|
|
||||||
#### Allowed `<type>`s
|
|
||||||
- *feat* (feature)
|
|
||||||
- *fix* (bug fix)
|
|
||||||
- *docs* (documentation)
|
|
||||||
- *style* (formatting, missing semi colons, …)
|
|
||||||
- *refactor*
|
|
||||||
- *test* (when adding missing tests)
|
|
||||||
- *chore* (maintain)
|
|
||||||
|
|
||||||
#### Allowed `<scope>`s
|
|
||||||
|
|
||||||
Scopes can anything specifying the place of the commit change in the code base -
|
|
||||||
for example, "api", "store", etc.
|
|
||||||
|
|
||||||
|
|
||||||
For more details on the commit format, see the [AngularJS commit style
|
|
||||||
guide](https://docs.google.com/a/coreos.com/document/d/1QrDFcIiPjSLDn3EL15IJygNPiHORgU1_OOAqWjiDU5Y/edit#).
|
|
||||||
|
@@ -4,7 +4,7 @@ CoreOS allows you to declaratively customize various OS-level items, such as net
|
|||||||
|
|
||||||
## Configuration File
|
## Configuration File
|
||||||
|
|
||||||
The file used by this system initialization program is called a "cloud-config" file. It is inspired by the [cloud-init][cloud-init] project's [cloud-config][cloud-config] file. which is "the defacto multi-distribution package that handles early initialization of a cloud instance" ([cloud-init docs][cloud-init-docs]). Because the cloud-init project includes tools which aren't used by CoreOS, only the relevant subset of its configuration items will be implemented in our cloud-config file. In addition to those, we added a few CoreOS-specific items, such as etcd configuration, OEM definition, and systemd units.
|
The file used by this system initialization program is called a "cloud-config" file. It is inspired by the [cloud-init][cloud-init] project's [cloud-config][cloud-config] file, which is "the defacto multi-distribution package that handles early initialization of a cloud instance" ([cloud-init docs][cloud-init-docs]). Because the cloud-init project includes tools which aren't used by CoreOS, only the relevant subset of its configuration items will be implemented in our cloud-config file. In addition to those, we added a few CoreOS-specific items, such as etcd configuration, OEM definition, and systemd units.
|
||||||
|
|
||||||
We've designed our implementation to allow the same cloud-config file to work across all of our supported platforms.
|
We've designed our implementation to allow the same cloud-config file to work across all of our supported platforms.
|
||||||
|
|
||||||
@@ -68,7 +68,7 @@ Environment="ETCD_PEER_ADDR=192.0.2.13:7001"
|
|||||||
For more information about the available configuration parameters, see the [etcd documentation][etcd-config].
|
For more information about the available configuration parameters, see the [etcd documentation][etcd-config].
|
||||||
Note that hyphens in the coreos.etcd.* keys are mapped to underscores.
|
Note that hyphens in the coreos.etcd.* keys are mapped to underscores.
|
||||||
|
|
||||||
_Note: The `$private_ipv4` and `$public_ipv4` substitution variables referenced in other documents are only supported on Amazon EC2, Google Compute Engine, OpenStack, Rackspace, and Vagrant._
|
_Note: The `$private_ipv4` and `$public_ipv4` substitution variables referenced in other documents are only supported on Amazon EC2, Google Compute Engine, OpenStack, Rackspace, DigitalOcean, and Vagrant._
|
||||||
|
|
||||||
[etcd-config]: https://github.com/coreos/etcd/blob/master/Documentation/configuration.md
|
[etcd-config]: https://github.com/coreos/etcd/blob/master/Documentation/configuration.md
|
||||||
|
|
||||||
@@ -95,7 +95,7 @@ Environment="FLEET_METADATA=region=us-west"
|
|||||||
|
|
||||||
For more information on fleet configuration, see the [fleet documentation][fleet-config].
|
For more information on fleet configuration, see the [fleet documentation][fleet-config].
|
||||||
|
|
||||||
[fleet-config]: https://github.com/coreos/fleet/blob/master/Documentation/configuration.md
|
[fleet-config]: https://github.com/coreos/fleet/blob/master/Documentation/deployment-and-configuration.md#configuration
|
||||||
|
|
||||||
#### update
|
#### update
|
||||||
|
|
||||||
@@ -130,11 +130,11 @@ The `coreos.units.*` parameters define a list of arbitrary systemd units to star
|
|||||||
Each item is an object with the following fields:
|
Each item is an object with the following fields:
|
||||||
|
|
||||||
- **name**: String representing unit's name. Required.
|
- **name**: String representing unit's name. Required.
|
||||||
- **runtime**: Boolean indicating whether or not to persist the unit across reboots. This is analogous to the `--runtime` argument to `systemctl enable`. Default value is false.
|
- **runtime**: Boolean indicating whether or not to persist the unit across reboots. This is analogous to the `--runtime` argument to `systemctl enable`. The default value is false.
|
||||||
- **enable**: Boolean indicating whether or not to handle the [Install] section of the unit file. This is similar to running `systemctl enable <name>`. Default value is false.
|
- **enable**: Boolean indicating whether or not to handle the [Install] section of the unit file. This is similar to running `systemctl enable <name>`. The default value is false.
|
||||||
- **content**: Plaintext string representing entire unit file. If no value is provided, the unit is assumed to exist already.
|
- **content**: Plaintext string representing entire unit file. If no value is provided, the unit is assumed to exist already.
|
||||||
- **command**: Command to execute on unit: start, stop, reload, restart, try-restart, reload-or-restart, reload-or-try-restart. Default value is restart.
|
- **command**: Command to execute on unit: start, stop, reload, restart, try-restart, reload-or-restart, reload-or-try-restart. The default behavior is to not execute any commands.
|
||||||
- **mask**: Whether to mask the unit file by symlinking it to `/dev/null` (analogous to `systemctl mask <name>`). Note that unlike `systemctl mask`, **this will destructively remove any existing unit file** located at `/etc/systemd/system/<unit>`, to ensure that the mask succeeds. Default value is false.
|
- **mask**: Whether to mask the unit file by symlinking it to `/dev/null` (analogous to `systemctl mask <name>`). Note that unlike `systemctl mask`, **this will destructively remove any existing unit file** located at `/etc/systemd/system/<unit>`, to ensure that the mask succeeds. The default value is false.
|
||||||
|
|
||||||
**NOTE:** The command field is ignored for all network, netdev, and link units. The systemd-networkd.service unit will be restarted in their place.
|
**NOTE:** The command field is ignored for all network, netdev, and link units. The systemd-networkd.service unit will be restarted in their place.
|
||||||
|
|
||||||
@@ -298,7 +298,8 @@ users:
|
|||||||
|
|
||||||
### write_files
|
### write_files
|
||||||
|
|
||||||
The `write-file` parameter defines a list of files to create on the local filesystem. Each file is represented as an associative array which has the following keys:
|
The `write_files` directive defines a set of files to create on the local filesystem.
|
||||||
|
Each item in the list may have the following keys:
|
||||||
|
|
||||||
- **path**: Absolute location on disk where contents should be written
|
- **path**: Absolute location on disk where contents should be written
|
||||||
- **content**: Data to write at the provided `path`
|
- **content**: Data to write at the provided `path`
|
||||||
@@ -311,11 +312,16 @@ The **content** field must represent exactly what should be written to disk.
|
|||||||
```yaml
|
```yaml
|
||||||
#cloud-config
|
#cloud-config
|
||||||
write_files:
|
write_files:
|
||||||
- path: /etc/fleet/fleet.conf
|
- path: /etc/resolv.conf
|
||||||
permissions: 0644
|
permissions: 0644
|
||||||
|
owner: root
|
||||||
content: |
|
content: |
|
||||||
verbosity=1
|
nameserver 8.8.8.8
|
||||||
metadata="region=us-west,type=ssd"
|
- path: /etc/motd
|
||||||
|
permissions: 0644
|
||||||
|
owner: root
|
||||||
|
content: |
|
||||||
|
Good news, everyone!
|
||||||
```
|
```
|
||||||
|
|
||||||
### manage_etc_hosts
|
### manage_etc_hosts
|
||||||
|
34
Godeps/Godeps.json
generated
Normal file
34
Godeps/Godeps.json
generated
Normal file
@@ -0,0 +1,34 @@
|
|||||||
|
{
|
||||||
|
"ImportPath": "github.com/coreos/coreos-cloudinit",
|
||||||
|
"GoVersion": "go1.3.1",
|
||||||
|
"Packages": [
|
||||||
|
"./..."
|
||||||
|
],
|
||||||
|
"Deps": [
|
||||||
|
{
|
||||||
|
"ImportPath": "github.com/cloudsigma/cepgo",
|
||||||
|
"Rev": "1bfc4895bf5c4d3b599f3f6ee142299488c8739b"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"ImportPath": "github.com/coreos/go-systemd/dbus",
|
||||||
|
"Rev": "4fbc5060a317b142e6c7bfbedb65596d5f0ab99b"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"ImportPath": "github.com/dotcloud/docker/pkg/netlink",
|
||||||
|
"Comment": "v0.11.1-359-g55d41c3e21e1",
|
||||||
|
"Rev": "55d41c3e21e1593b944c06196ffb2ac57ab7f653"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"ImportPath": "github.com/guelfey/go.dbus",
|
||||||
|
"Rev": "f6a3a2366cc39b8479cadc499d3c735fb10fbdda"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"ImportPath": "github.com/tarm/goserial",
|
||||||
|
"Rev": "cdabc8d44e8e84f58f18074ae44337e1f2f375b9"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"ImportPath": "gopkg.in/yaml.v1",
|
||||||
|
"Rev": "feb4ca79644e8e7e39c06095246ee54b1282c118"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
5
Godeps/Readme
generated
Normal file
5
Godeps/Readme
generated
Normal file
@@ -0,0 +1,5 @@
|
|||||||
|
This directory tree is generated automatically by godep.
|
||||||
|
|
||||||
|
Please do not edit.
|
||||||
|
|
||||||
|
See https://github.com/tools/godep for more information.
|
2
Godeps/_workspace/.gitignore
generated
vendored
Normal file
2
Godeps/_workspace/.gitignore
generated
vendored
Normal file
@@ -0,0 +1,2 @@
|
|||||||
|
/pkg
|
||||||
|
/bin
|
23
Godeps/_workspace/src/github.com/cloudsigma/cepgo/.gitignore
generated
vendored
Normal file
23
Godeps/_workspace/src/github.com/cloudsigma/cepgo/.gitignore
generated
vendored
Normal file
@@ -0,0 +1,23 @@
|
|||||||
|
# Compiled Object files, Static and Dynamic libs (Shared Objects)
|
||||||
|
*.o
|
||||||
|
*.a
|
||||||
|
*.so
|
||||||
|
|
||||||
|
# Folders
|
||||||
|
_obj
|
||||||
|
_test
|
||||||
|
|
||||||
|
# Architecture specific extensions/prefixes
|
||||||
|
*.[568vq]
|
||||||
|
[568vq].out
|
||||||
|
|
||||||
|
*.cgo1.go
|
||||||
|
*.cgo2.c
|
||||||
|
_cgo_defun.c
|
||||||
|
_cgo_gotypes.go
|
||||||
|
_cgo_export.*
|
||||||
|
|
||||||
|
_testmain.go
|
||||||
|
|
||||||
|
*.exe
|
||||||
|
*.test
|
202
Godeps/_workspace/src/github.com/cloudsigma/cepgo/LICENSE
generated
vendored
Normal file
202
Godeps/_workspace/src/github.com/cloudsigma/cepgo/LICENSE
generated
vendored
Normal file
@@ -0,0 +1,202 @@
|
|||||||
|
Apache License
|
||||||
|
Version 2.0, January 2004
|
||||||
|
http://www.apache.org/licenses/
|
||||||
|
|
||||||
|
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||||
|
|
||||||
|
1. Definitions.
|
||||||
|
|
||||||
|
"License" shall mean the terms and conditions for use, reproduction,
|
||||||
|
and distribution as defined by Sections 1 through 9 of this document.
|
||||||
|
|
||||||
|
"Licensor" shall mean the copyright owner or entity authorized by
|
||||||
|
the copyright owner that is granting the License.
|
||||||
|
|
||||||
|
"Legal Entity" shall mean the union of the acting entity and all
|
||||||
|
other entities that control, are controlled by, or are under common
|
||||||
|
control with that entity. For the purposes of this definition,
|
||||||
|
"control" means (i) the power, direct or indirect, to cause the
|
||||||
|
direction or management of such entity, whether by contract or
|
||||||
|
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||||
|
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||||
|
|
||||||
|
"You" (or "Your") shall mean an individual or Legal Entity
|
||||||
|
exercising permissions granted by this License.
|
||||||
|
|
||||||
|
"Source" form shall mean the preferred form for making modifications,
|
||||||
|
including but not limited to software source code, documentation
|
||||||
|
source, and configuration files.
|
||||||
|
|
||||||
|
"Object" form shall mean any form resulting from mechanical
|
||||||
|
transformation or translation of a Source form, including but
|
||||||
|
not limited to compiled object code, generated documentation,
|
||||||
|
and conversions to other media types.
|
||||||
|
|
||||||
|
"Work" shall mean the work of authorship, whether in Source or
|
||||||
|
Object form, made available under the License, as indicated by a
|
||||||
|
copyright notice that is included in or attached to the work
|
||||||
|
(an example is provided in the Appendix below).
|
||||||
|
|
||||||
|
"Derivative Works" shall mean any work, whether in Source or Object
|
||||||
|
form, that is based on (or derived from) the Work and for which the
|
||||||
|
editorial revisions, annotations, elaborations, or other modifications
|
||||||
|
represent, as a whole, an original work of authorship. For the purposes
|
||||||
|
of this License, Derivative Works shall not include works that remain
|
||||||
|
separable from, or merely link (or bind by name) to the interfaces of,
|
||||||
|
the Work and Derivative Works thereof.
|
||||||
|
|
||||||
|
"Contribution" shall mean any work of authorship, including
|
||||||
|
the original version of the Work and any modifications or additions
|
||||||
|
to that Work or Derivative Works thereof, that is intentionally
|
||||||
|
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||||
|
or by an individual or Legal Entity authorized to submit on behalf of
|
||||||
|
the copyright owner. For the purposes of this definition, "submitted"
|
||||||
|
means any form of electronic, verbal, or written communication sent
|
||||||
|
to the Licensor or its representatives, including but not limited to
|
||||||
|
communication on electronic mailing lists, source code control systems,
|
||||||
|
and issue tracking systems that are managed by, or on behalf of, the
|
||||||
|
Licensor for the purpose of discussing and improving the Work, but
|
||||||
|
excluding communication that is conspicuously marked or otherwise
|
||||||
|
designated in writing by the copyright owner as "Not a Contribution."
|
||||||
|
|
||||||
|
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||||
|
on behalf of whom a Contribution has been received by Licensor and
|
||||||
|
subsequently incorporated within the Work.
|
||||||
|
|
||||||
|
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||||
|
this License, each Contributor hereby grants to You a perpetual,
|
||||||
|
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||||
|
copyright license to reproduce, prepare Derivative Works of,
|
||||||
|
publicly display, publicly perform, sublicense, and distribute the
|
||||||
|
Work and such Derivative Works in Source or Object form.
|
||||||
|
|
||||||
|
3. Grant of Patent License. Subject to the terms and conditions of
|
||||||
|
this License, each Contributor hereby grants to You a perpetual,
|
||||||
|
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||||
|
(except as stated in this section) patent license to make, have made,
|
||||||
|
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||||
|
where such license applies only to those patent claims licensable
|
||||||
|
by such Contributor that are necessarily infringed by their
|
||||||
|
Contribution(s) alone or by combination of their Contribution(s)
|
||||||
|
with the Work to which such Contribution(s) was submitted. If You
|
||||||
|
institute patent litigation against any entity (including a
|
||||||
|
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||||
|
or a Contribution incorporated within the Work constitutes direct
|
||||||
|
or contributory patent infringement, then any patent licenses
|
||||||
|
granted to You under this License for that Work shall terminate
|
||||||
|
as of the date such litigation is filed.
|
||||||
|
|
||||||
|
4. Redistribution. You may reproduce and distribute copies of the
|
||||||
|
Work or Derivative Works thereof in any medium, with or without
|
||||||
|
modifications, and in Source or Object form, provided that You
|
||||||
|
meet the following conditions:
|
||||||
|
|
||||||
|
(a) You must give any other recipients of the Work or
|
||||||
|
Derivative Works a copy of this License; and
|
||||||
|
|
||||||
|
(b) You must cause any modified files to carry prominent notices
|
||||||
|
stating that You changed the files; and
|
||||||
|
|
||||||
|
(c) You must retain, in the Source form of any Derivative Works
|
||||||
|
that You distribute, all copyright, patent, trademark, and
|
||||||
|
attribution notices from the Source form of the Work,
|
||||||
|
excluding those notices that do not pertain to any part of
|
||||||
|
the Derivative Works; and
|
||||||
|
|
||||||
|
(d) If the Work includes a "NOTICE" text file as part of its
|
||||||
|
distribution, then any Derivative Works that You distribute must
|
||||||
|
include a readable copy of the attribution notices contained
|
||||||
|
within such NOTICE file, excluding those notices that do not
|
||||||
|
pertain to any part of the Derivative Works, in at least one
|
||||||
|
of the following places: within a NOTICE text file distributed
|
||||||
|
as part of the Derivative Works; within the Source form or
|
||||||
|
documentation, if provided along with the Derivative Works; or,
|
||||||
|
within a display generated by the Derivative Works, if and
|
||||||
|
wherever such third-party notices normally appear. The contents
|
||||||
|
of the NOTICE file are for informational purposes only and
|
||||||
|
do not modify the License. You may add Your own attribution
|
||||||
|
notices within Derivative Works that You distribute, alongside
|
||||||
|
or as an addendum to the NOTICE text from the Work, provided
|
||||||
|
that such additional attribution notices cannot be construed
|
||||||
|
as modifying the License.
|
||||||
|
|
||||||
|
You may add Your own copyright statement to Your modifications and
|
||||||
|
may provide additional or different license terms and conditions
|
||||||
|
for use, reproduction, or distribution of Your modifications, or
|
||||||
|
for any such Derivative Works as a whole, provided Your use,
|
||||||
|
reproduction, and distribution of the Work otherwise complies with
|
||||||
|
the conditions stated in this License.
|
||||||
|
|
||||||
|
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||||
|
any Contribution intentionally submitted for inclusion in the Work
|
||||||
|
by You to the Licensor shall be under the terms and conditions of
|
||||||
|
this License, without any additional terms or conditions.
|
||||||
|
Notwithstanding the above, nothing herein shall supersede or modify
|
||||||
|
the terms of any separate license agreement you may have executed
|
||||||
|
with Licensor regarding such Contributions.
|
||||||
|
|
||||||
|
6. Trademarks. This License does not grant permission to use the trade
|
||||||
|
names, trademarks, service marks, or product names of the Licensor,
|
||||||
|
except as required for reasonable and customary use in describing the
|
||||||
|
origin of the Work and reproducing the content of the NOTICE file.
|
||||||
|
|
||||||
|
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||||
|
agreed to in writing, Licensor provides the Work (and each
|
||||||
|
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||||
|
implied, including, without limitation, any warranties or conditions
|
||||||
|
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||||
|
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||||
|
appropriateness of using or redistributing the Work and assume any
|
||||||
|
risks associated with Your exercise of permissions under this License.
|
||||||
|
|
||||||
|
8. Limitation of Liability. In no event and under no legal theory,
|
||||||
|
whether in tort (including negligence), contract, or otherwise,
|
||||||
|
unless required by applicable law (such as deliberate and grossly
|
||||||
|
negligent acts) or agreed to in writing, shall any Contributor be
|
||||||
|
liable to You for damages, including any direct, indirect, special,
|
||||||
|
incidental, or consequential damages of any character arising as a
|
||||||
|
result of this License or out of the use or inability to use the
|
||||||
|
Work (including but not limited to damages for loss of goodwill,
|
||||||
|
work stoppage, computer failure or malfunction, or any and all
|
||||||
|
other commercial damages or losses), even if such Contributor
|
||||||
|
has been advised of the possibility of such damages.
|
||||||
|
|
||||||
|
9. Accepting Warranty or Additional Liability. While redistributing
|
||||||
|
the Work or Derivative Works thereof, You may choose to offer,
|
||||||
|
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||||
|
or other liability obligations and/or rights consistent with this
|
||||||
|
License. However, in accepting such obligations, You may act only
|
||||||
|
on Your own behalf and on Your sole responsibility, not on behalf
|
||||||
|
of any other Contributor, and only if You agree to indemnify,
|
||||||
|
defend, and hold each Contributor harmless for any liability
|
||||||
|
incurred by, or claims asserted against, such Contributor by reason
|
||||||
|
of your accepting any such warranty or additional liability.
|
||||||
|
|
||||||
|
END OF TERMS AND CONDITIONS
|
||||||
|
|
||||||
|
APPENDIX: How to apply the Apache License to your work.
|
||||||
|
|
||||||
|
To apply the Apache License to your work, attach the following
|
||||||
|
boilerplate notice, with the fields enclosed by brackets "{}"
|
||||||
|
replaced with your own identifying information. (Don't include
|
||||||
|
the brackets!) The text should be enclosed in the appropriate
|
||||||
|
comment syntax for the file format. We also recommend that a
|
||||||
|
file or class name and description of purpose be included on the
|
||||||
|
same "printed page" as the copyright notice for easier
|
||||||
|
identification within third-party archives.
|
||||||
|
|
||||||
|
Copyright {yyyy} {name of copyright owner}
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
|
43
Godeps/_workspace/src/github.com/cloudsigma/cepgo/README.md
generated
vendored
Normal file
43
Godeps/_workspace/src/github.com/cloudsigma/cepgo/README.md
generated
vendored
Normal file
@@ -0,0 +1,43 @@
|
|||||||
|
cepgo
|
||||||
|
=====
|
||||||
|
|
||||||
|
Cepko implements easy-to-use communication with CloudSigma's VMs through a
|
||||||
|
virtual serial port without bothering with formatting the messages properly nor
|
||||||
|
parsing the output with the specific and sometimes confusing shell tools for
|
||||||
|
that purpose.
|
||||||
|
|
||||||
|
Having the server definition accessible by the VM can be useful in various
|
||||||
|
ways. For example it is possible to easily determine from within the VM, which
|
||||||
|
network interfaces are connected to public and which to private network.
|
||||||
|
Another use is to pass some data to initial VM setup scripts, like setting the
|
||||||
|
hostname to the VM name or passing ssh public keys through server meta.
|
||||||
|
|
||||||
|
Example usage:
|
||||||
|
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/cloudsigma/cepgo"
|
||||||
|
)
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
c := cepgo.NewCepgo()
|
||||||
|
result, err := c.Meta()
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
fmt.Printf("%#v", result)
|
||||||
|
}
|
||||||
|
|
||||||
|
Output:
|
||||||
|
|
||||||
|
map[string]interface {}{
|
||||||
|
"optimize_for":"custom",
|
||||||
|
"ssh_public_key":"ssh-rsa AAA...",
|
||||||
|
"description":"[...]",
|
||||||
|
}
|
||||||
|
|
||||||
|
For more information take a look at the Server Context section of CloudSigma
|
||||||
|
API Docs: http://cloudsigma-docs.readthedocs.org/en/latest/server_context.html
|
186
Godeps/_workspace/src/github.com/cloudsigma/cepgo/cepgo.go
generated
vendored
Normal file
186
Godeps/_workspace/src/github.com/cloudsigma/cepgo/cepgo.go
generated
vendored
Normal file
@@ -0,0 +1,186 @@
|
|||||||
|
// Cepko implements easy-to-use communication with CloudSigma's VMs through a
|
||||||
|
// virtual serial port without bothering with formatting the messages properly
|
||||||
|
// nor parsing the output with the specific and sometimes confusing shell tools
|
||||||
|
// for that purpose.
|
||||||
|
//
|
||||||
|
// Having the server definition accessible by the VM can be useful in various
|
||||||
|
// ways. For example it is possible to easily determine from within the VM,
|
||||||
|
// which network interfaces are connected to public and which to private
|
||||||
|
// network. Another use is to pass some data to initial VM setup scripts, like
|
||||||
|
// setting the hostname to the VM name or passing ssh public keys through
|
||||||
|
// server meta.
|
||||||
|
//
|
||||||
|
// Example usage:
|
||||||
|
//
|
||||||
|
// package main
|
||||||
|
//
|
||||||
|
// import (
|
||||||
|
// "fmt"
|
||||||
|
//
|
||||||
|
// "github.com/cloudsigma/cepgo"
|
||||||
|
// )
|
||||||
|
//
|
||||||
|
// func main() {
|
||||||
|
// c := cepgo.NewCepgo()
|
||||||
|
// result, err := c.Meta()
|
||||||
|
// if err != nil {
|
||||||
|
// panic(err)
|
||||||
|
// }
|
||||||
|
// fmt.Printf("%#v", result)
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// Output:
|
||||||
|
//
|
||||||
|
// map[string]string{
|
||||||
|
// "optimize_for":"custom",
|
||||||
|
// "ssh_public_key":"ssh-rsa AAA...",
|
||||||
|
// "description":"[...]",
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// For more information take a look at the Server Context section API Docs:
|
||||||
|
// http://cloudsigma-docs.readthedocs.org/en/latest/server_context.html
|
||||||
|
package cepgo
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bufio"
|
||||||
|
"encoding/json"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"runtime"
|
||||||
|
|
||||||
|
"github.com/coreos/coreos-cloudinit/Godeps/_workspace/src/github.com/tarm/goserial"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
requestPattern = "<\n%s\n>"
|
||||||
|
EOT = '\x04' // End Of Transmission
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
SerialPort string = "/dev/ttyS1"
|
||||||
|
Baud int = 115200
|
||||||
|
)
|
||||||
|
|
||||||
|
// Sets the serial port. If the operating system is windows CloudSigma's server
|
||||||
|
// context is at COM2 port, otherwise (linux, freebsd, darwin) the port is
|
||||||
|
// being left to the default /dev/ttyS1.
|
||||||
|
func init() {
|
||||||
|
if runtime.GOOS == "windows" {
|
||||||
|
SerialPort = "COM2"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// The default fetcher makes the connection to the serial port,
|
||||||
|
// writes given query and reads until the EOT symbol.
|
||||||
|
func fetchViaSerialPort(key string) ([]byte, error) {
|
||||||
|
config := &serial.Config{Name: SerialPort, Baud: Baud}
|
||||||
|
connection, err := serial.OpenPort(config)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
query := fmt.Sprintf(requestPattern, key)
|
||||||
|
if _, err := connection.Write([]byte(query)); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
reader := bufio.NewReader(connection)
|
||||||
|
answer, err := reader.ReadBytes(EOT)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return answer[0 : len(answer)-1], nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Queries to the serial port can be executed only from instance of this type.
|
||||||
|
// The result from each of them can be either interface{}, map[string]string or
|
||||||
|
// a single in case of single value is returned. There is also a public metod
|
||||||
|
// who directly calls the fetcher and returns raw []byte from the serial port.
|
||||||
|
type Cepgo struct {
|
||||||
|
fetcher func(string) ([]byte, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Creates a Cepgo instance with the default serial port fetcher.
|
||||||
|
func NewCepgo() *Cepgo {
|
||||||
|
cepgo := new(Cepgo)
|
||||||
|
cepgo.fetcher = fetchViaSerialPort
|
||||||
|
return cepgo
|
||||||
|
}
|
||||||
|
|
||||||
|
// Creates a Cepgo instance with custom fetcher.
|
||||||
|
func NewCepgoFetcher(fetcher func(string) ([]byte, error)) *Cepgo {
|
||||||
|
cepgo := new(Cepgo)
|
||||||
|
cepgo.fetcher = fetcher
|
||||||
|
return cepgo
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fetches raw []byte from the serial port using directly the fetcher member.
|
||||||
|
func (c *Cepgo) FetchRaw(key string) ([]byte, error) {
|
||||||
|
return c.fetcher(key)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fetches a single key and tries to unmarshal the result to json and returns
|
||||||
|
// it. If the unmarshalling fails it's safe to assume the result it's just a
|
||||||
|
// string and returns it.
|
||||||
|
func (c *Cepgo) Key(key string) (interface{}, error) {
|
||||||
|
var result interface{}
|
||||||
|
|
||||||
|
fetched, err := c.FetchRaw(key)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
err = json.Unmarshal(fetched, &result)
|
||||||
|
if err != nil {
|
||||||
|
return string(fetched), nil
|
||||||
|
}
|
||||||
|
return result, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fetches all the server context. Equivalent of c.Key("")
|
||||||
|
func (c *Cepgo) All() (interface{}, error) {
|
||||||
|
return c.Key("")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fetches only the object meta field and makes sure to return a proper
|
||||||
|
// map[string]string
|
||||||
|
func (c *Cepgo) Meta() (map[string]string, error) {
|
||||||
|
rawMeta, err := c.Key("/meta/")
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return typeAssertToMapOfStrings(rawMeta)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fetches only the global context and makes sure to return a proper
|
||||||
|
// map[string]string
|
||||||
|
func (c *Cepgo) GlobalContext() (map[string]string, error) {
|
||||||
|
rawContext, err := c.Key("/global_context/")
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return typeAssertToMapOfStrings(rawContext)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Just a little helper function that uses type assertions in order to convert
|
||||||
|
// a interface{} to map[string]string if this is possible.
|
||||||
|
func typeAssertToMapOfStrings(raw interface{}) (map[string]string, error) {
|
||||||
|
result := make(map[string]string)
|
||||||
|
|
||||||
|
dictionary, ok := raw.(map[string]interface{})
|
||||||
|
if !ok {
|
||||||
|
return nil, errors.New("Received bytes are formatted badly")
|
||||||
|
}
|
||||||
|
|
||||||
|
for key, rawValue := range dictionary {
|
||||||
|
if value, ok := rawValue.(string); ok {
|
||||||
|
result[key] = value
|
||||||
|
} else {
|
||||||
|
return nil, errors.New("Server context metadata is formatted badly")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return result, nil
|
||||||
|
}
|
122
Godeps/_workspace/src/github.com/cloudsigma/cepgo/cepgo_test.go
generated
vendored
Normal file
122
Godeps/_workspace/src/github.com/cloudsigma/cepgo/cepgo_test.go
generated
vendored
Normal file
@@ -0,0 +1,122 @@
|
|||||||
|
package cepgo
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"testing"
|
||||||
|
)
|
||||||
|
|
||||||
|
func fetchMock(key string) ([]byte, error) {
|
||||||
|
context := []byte(`{
|
||||||
|
"context": true,
|
||||||
|
"cpu": 4000,
|
||||||
|
"cpu_model": null,
|
||||||
|
"cpus_instead_of_cores": false,
|
||||||
|
"enable_numa": false,
|
||||||
|
"global_context": {
|
||||||
|
"some_global_key": "some_global_val"
|
||||||
|
},
|
||||||
|
"grantees": [],
|
||||||
|
"hv_relaxed": false,
|
||||||
|
"hv_tsc": false,
|
||||||
|
"jobs": [],
|
||||||
|
"mem": 4294967296,
|
||||||
|
"meta": {
|
||||||
|
"base64_fields": "cloudinit-user-data",
|
||||||
|
"cloudinit-user-data": "I2Nsb3VkLWNvbmZpZwoKaG9zdG5hbWU6IGNvcmVvczE=",
|
||||||
|
"ssh_public_key": "ssh-rsa AAAAB2NzaC1yc2E.../hQ5D5 john@doe"
|
||||||
|
},
|
||||||
|
"name": "coreos",
|
||||||
|
"nics": [
|
||||||
|
{
|
||||||
|
"runtime": {
|
||||||
|
"interface_type": "public",
|
||||||
|
"ip_v4": {
|
||||||
|
"uuid": "31.171.251.74"
|
||||||
|
},
|
||||||
|
"ip_v6": null
|
||||||
|
},
|
||||||
|
"vlan": null
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"smp": 2,
|
||||||
|
"status": "running",
|
||||||
|
"uuid": "20a0059b-041e-4d0c-bcc6-9b2852de48b3"
|
||||||
|
}`)
|
||||||
|
|
||||||
|
if key == "" {
|
||||||
|
return context, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var marshalledContext map[string]interface{}
|
||||||
|
|
||||||
|
err := json.Unmarshal(context, &marshalledContext)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if key[0] == '/' {
|
||||||
|
key = key[1:]
|
||||||
|
}
|
||||||
|
if key[len(key)-1] == '/' {
|
||||||
|
key = key[:len(key)-1]
|
||||||
|
}
|
||||||
|
|
||||||
|
return json.Marshal(marshalledContext[key])
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAll(t *testing.T) {
|
||||||
|
cepgo := NewCepgoFetcher(fetchMock)
|
||||||
|
|
||||||
|
result, err := cepgo.All()
|
||||||
|
if err != nil {
|
||||||
|
t.Error(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, key := range []string{"meta", "name", "uuid", "global_context"} {
|
||||||
|
if _, ok := result.(map[string]interface{})[key]; !ok {
|
||||||
|
t.Errorf("%s not in all keys", key)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestKey(t *testing.T) {
|
||||||
|
cepgo := NewCepgoFetcher(fetchMock)
|
||||||
|
|
||||||
|
result, err := cepgo.Key("uuid")
|
||||||
|
if err != nil {
|
||||||
|
t.Error(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, ok := result.(string); !ok {
|
||||||
|
t.Errorf("%#v\n", result)
|
||||||
|
|
||||||
|
t.Error("Fetching the uuid did not return a string")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestMeta(t *testing.T) {
|
||||||
|
cepgo := NewCepgoFetcher(fetchMock)
|
||||||
|
|
||||||
|
meta, err := cepgo.Meta()
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("%#v\n", meta)
|
||||||
|
t.Error(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, ok := meta["ssh_public_key"]; !ok {
|
||||||
|
t.Error("ssh_public_key is not in the meta")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestGlobalContext(t *testing.T) {
|
||||||
|
cepgo := NewCepgoFetcher(fetchMock)
|
||||||
|
|
||||||
|
result, err := cepgo.GlobalContext()
|
||||||
|
if err != nil {
|
||||||
|
t.Error(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, ok := result["some_global_key"]; !ok {
|
||||||
|
t.Error("some_global_key is not in the global context")
|
||||||
|
}
|
||||||
|
}
|
@@ -23,7 +23,7 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
"github.com/coreos/coreos-cloudinit/third_party/github.com/guelfey/go.dbus"
|
"github.com/coreos/coreos-cloudinit/Godeps/_workspace/src/github.com/guelfey/go.dbus"
|
||||||
)
|
)
|
||||||
|
|
||||||
const signalBuffer = 100
|
const signalBuffer = 100
|
@@ -18,7 +18,7 @@ package dbus
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"errors"
|
"errors"
|
||||||
"github.com/coreos/coreos-cloudinit/third_party/github.com/guelfey/go.dbus"
|
"github.com/coreos/coreos-cloudinit/Godeps/_workspace/src/github.com/guelfey/go.dbus"
|
||||||
)
|
)
|
||||||
|
|
||||||
func (c *Conn) initJobs() {
|
func (c *Conn) initJobs() {
|
||||||
@@ -208,7 +208,7 @@ func (c *Conn) SetUnitProperties(name string, runtime bool, properties ...Proper
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (c *Conn) GetUnitTypeProperty(unit string, unitType string, propertyName string) (*Property, error) {
|
func (c *Conn) GetUnitTypeProperty(unit string, unitType string, propertyName string) (*Property, error) {
|
||||||
return c.getProperty(unit, "org.freedesktop.systemd1." + unitType, propertyName)
|
return c.getProperty(unit, "org.freedesktop.systemd1."+unitType, propertyName)
|
||||||
}
|
}
|
||||||
|
|
||||||
// ListUnits returns an array with all currently loaded units. Note that
|
// ListUnits returns an array with all currently loaded units. Note that
|
@@ -18,7 +18,7 @@ package dbus
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"github.com/coreos/coreos-cloudinit/third_party/github.com/guelfey/go.dbus"
|
"github.com/coreos/coreos-cloudinit/Godeps/_workspace/src/github.com/guelfey/go.dbus"
|
||||||
"math/rand"
|
"math/rand"
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
@@ -17,7 +17,7 @@ limitations under the License.
|
|||||||
package dbus
|
package dbus
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"github.com/coreos/coreos-cloudinit/third_party/github.com/guelfey/go.dbus"
|
"github.com/coreos/coreos-cloudinit/Godeps/_workspace/src/github.com/guelfey/go.dbus"
|
||||||
)
|
)
|
||||||
|
|
||||||
// From the systemd docs:
|
// From the systemd docs:
|
@@ -20,7 +20,7 @@ import (
|
|||||||
"errors"
|
"errors"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/coreos/coreos-cloudinit/third_party/github.com/guelfey/go.dbus"
|
"github.com/coreos/coreos-cloudinit/Godeps/_workspace/src/github.com/guelfey/go.dbus"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
@@ -101,7 +101,7 @@ func (c *Conn) SubscribeUnits(interval time.Duration) (<-chan map[string]*UnitSt
|
|||||||
// SubscribeUnitsCustom is like SubscribeUnits but lets you specify the buffer
|
// SubscribeUnitsCustom is like SubscribeUnits but lets you specify the buffer
|
||||||
// size of the channels, the comparison function for detecting changes and a filter
|
// size of the channels, the comparison function for detecting changes and a filter
|
||||||
// function for cutting down on the noise that your channel receives.
|
// function for cutting down on the noise that your channel receives.
|
||||||
func (c *Conn) SubscribeUnitsCustom(interval time.Duration, buffer int, isChanged func(*UnitStatus, *UnitStatus) bool, filterUnit func (string) bool) (<-chan map[string]*UnitStatus, <-chan error) {
|
func (c *Conn) SubscribeUnitsCustom(interval time.Duration, buffer int, isChanged func(*UnitStatus, *UnitStatus) bool, filterUnit func(string) bool) (<-chan map[string]*UnitStatus, <-chan error) {
|
||||||
old := make(map[string]*UnitStatus)
|
old := make(map[string]*UnitStatus)
|
||||||
statusChan := make(chan map[string]*UnitStatus, buffer)
|
statusChan := make(chan map[string]*UnitStatus, buffer)
|
||||||
errChan := make(chan error, buffer)
|
errChan := make(chan error, buffer)
|
@@ -2,7 +2,7 @@ package introspect
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"encoding/xml"
|
"encoding/xml"
|
||||||
"github.com/coreos/coreos-cloudinit/third_party/github.com/guelfey/go.dbus"
|
"github.com/coreos/coreos-cloudinit/Godeps/_workspace/src/github.com/guelfey/go.dbus"
|
||||||
"strings"
|
"strings"
|
||||||
)
|
)
|
||||||
|
|
@@ -2,7 +2,7 @@ package introspect
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"encoding/xml"
|
"encoding/xml"
|
||||||
"github.com/coreos/coreos-cloudinit/third_party/github.com/guelfey/go.dbus"
|
"github.com/coreos/coreos-cloudinit/Godeps/_workspace/src/github.com/guelfey/go.dbus"
|
||||||
"reflect"
|
"reflect"
|
||||||
)
|
)
|
||||||
|
|
@@ -3,8 +3,8 @@
|
|||||||
package prop
|
package prop
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"github.com/coreos/coreos-cloudinit/third_party/github.com/guelfey/go.dbus"
|
"github.com/coreos/coreos-cloudinit/Godeps/_workspace/src/github.com/guelfey/go.dbus"
|
||||||
"github.com/coreos/coreos-cloudinit/third_party/github.com/guelfey/go.dbus/introspect"
|
"github.com/coreos/coreos-cloudinit/Godeps/_workspace/src/github.com/guelfey/go.dbus/introspect"
|
||||||
"sync"
|
"sync"
|
||||||
)
|
)
|
||||||
|
|
27
Godeps/_workspace/src/github.com/tarm/goserial/LICENSE
generated
vendored
Normal file
27
Godeps/_workspace/src/github.com/tarm/goserial/LICENSE
generated
vendored
Normal file
@@ -0,0 +1,27 @@
|
|||||||
|
Copyright (c) 2009 The Go Authors. All rights reserved.
|
||||||
|
|
||||||
|
Redistribution and use in source and binary forms, with or without
|
||||||
|
modification, are permitted provided that the following conditions are
|
||||||
|
met:
|
||||||
|
|
||||||
|
* Redistributions of source code must retain the above copyright
|
||||||
|
notice, this list of conditions and the following disclaimer.
|
||||||
|
* Redistributions in binary form must reproduce the above
|
||||||
|
copyright notice, this list of conditions and the following disclaimer
|
||||||
|
in the documentation and/or other materials provided with the
|
||||||
|
distribution.
|
||||||
|
* Neither the name of Google Inc. nor the names of its
|
||||||
|
contributors may be used to endorse or promote products derived from
|
||||||
|
this software without specific prior written permission.
|
||||||
|
|
||||||
|
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||||
|
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||||
|
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||||
|
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||||
|
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||||
|
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||||
|
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||||
|
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||||
|
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||||
|
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
|
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
63
Godeps/_workspace/src/github.com/tarm/goserial/README.md
generated
vendored
Normal file
63
Godeps/_workspace/src/github.com/tarm/goserial/README.md
generated
vendored
Normal file
@@ -0,0 +1,63 @@
|
|||||||
|
GoSerial
|
||||||
|
========
|
||||||
|
A simple go package to allow you to read and write from the
|
||||||
|
serial port as a stream of bytes.
|
||||||
|
|
||||||
|
Details
|
||||||
|
-------
|
||||||
|
It aims to have the same API on all platforms, including windows. As
|
||||||
|
an added bonus, the windows package does not use cgo, so you can cross
|
||||||
|
compile for windows from another platform. Unfortunately goinstall
|
||||||
|
does not currently let you cross compile so you will have to do it
|
||||||
|
manually:
|
||||||
|
|
||||||
|
GOOS=windows make clean install
|
||||||
|
|
||||||
|
Currently there is very little in the way of configurability. You can
|
||||||
|
set the baud rate. Then you can Read(), Write(), or Close() the
|
||||||
|
connection. Read() will block until at least one byte is returned.
|
||||||
|
Write is the same. There is currently no exposed way to set the
|
||||||
|
timeouts, though patches are welcome.
|
||||||
|
|
||||||
|
Currently all ports are opened with 8 data bits, 1 stop bit, no
|
||||||
|
parity, no hardware flow control, and no software flow control. This
|
||||||
|
works fine for many real devices and many faux serial devices
|
||||||
|
including usb-to-serial converters and bluetooth serial ports.
|
||||||
|
|
||||||
|
You may Read() and Write() simulantiously on the same connection (from
|
||||||
|
different goroutines).
|
||||||
|
|
||||||
|
Usage
|
||||||
|
-----
|
||||||
|
```go
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/tarm/goserial"
|
||||||
|
"log"
|
||||||
|
)
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
c := &serial.Config{Name: "COM45", Baud: 115200}
|
||||||
|
s, err := serial.OpenPort(c)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
n, err := s.Write([]byte("test"))
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
buf := make([]byte, 128)
|
||||||
|
n, err = s.Read(buf)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
log.Print("%q", buf[:n])
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
Possible Future Work
|
||||||
|
--------------------
|
||||||
|
- better tests (loopback etc)
|
61
Godeps/_workspace/src/github.com/tarm/goserial/basic_test.go
generated
vendored
Normal file
61
Godeps/_workspace/src/github.com/tarm/goserial/basic_test.go
generated
vendored
Normal file
@@ -0,0 +1,61 @@
|
|||||||
|
package serial
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestConnection(t *testing.T) {
|
||||||
|
c0 := &Config{Name: "/dev/ttyUSB0", Baud: 115200}
|
||||||
|
c1 := &Config{Name: "/dev/ttyUSB1", Baud: 115200}
|
||||||
|
|
||||||
|
s1, err := OpenPort(c0)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
s2, err := OpenPort(c1)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
ch := make(chan int, 1)
|
||||||
|
go func() {
|
||||||
|
buf := make([]byte, 128)
|
||||||
|
var readCount int
|
||||||
|
for {
|
||||||
|
n, err := s2.Read(buf)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
readCount++
|
||||||
|
t.Logf("Read %v %v bytes: % 02x %s", readCount, n, buf[:n], buf[:n])
|
||||||
|
select {
|
||||||
|
case <-ch:
|
||||||
|
ch <- readCount
|
||||||
|
close(ch)
|
||||||
|
default:
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
if _, err = s1.Write([]byte("hello")); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
if _, err = s1.Write([]byte(" ")); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
time.Sleep(time.Second)
|
||||||
|
if _, err = s1.Write([]byte("world")); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
time.Sleep(time.Second / 10)
|
||||||
|
|
||||||
|
ch <- 0
|
||||||
|
s1.Write([]byte(" ")) // We could be blocked in the read without this
|
||||||
|
c := <-ch
|
||||||
|
exp := 5
|
||||||
|
if c >= exp {
|
||||||
|
t.Fatalf("Expected less than %v read, got %v", exp, c)
|
||||||
|
}
|
||||||
|
}
|
99
Godeps/_workspace/src/github.com/tarm/goserial/serial.go
generated
vendored
Normal file
99
Godeps/_workspace/src/github.com/tarm/goserial/serial.go
generated
vendored
Normal file
@@ -0,0 +1,99 @@
|
|||||||
|
/*
|
||||||
|
Goserial is a simple go package to allow you to read and write from
|
||||||
|
the serial port as a stream of bytes.
|
||||||
|
|
||||||
|
It aims to have the same API on all platforms, including windows. As
|
||||||
|
an added bonus, the windows package does not use cgo, so you can cross
|
||||||
|
compile for windows from another platform. Unfortunately goinstall
|
||||||
|
does not currently let you cross compile so you will have to do it
|
||||||
|
manually:
|
||||||
|
|
||||||
|
GOOS=windows make clean install
|
||||||
|
|
||||||
|
Currently there is very little in the way of configurability. You can
|
||||||
|
set the baud rate. Then you can Read(), Write(), or Close() the
|
||||||
|
connection. Read() will block until at least one byte is returned.
|
||||||
|
Write is the same. There is currently no exposed way to set the
|
||||||
|
timeouts, though patches are welcome.
|
||||||
|
|
||||||
|
Currently all ports are opened with 8 data bits, 1 stop bit, no
|
||||||
|
parity, no hardware flow control, and no software flow control. This
|
||||||
|
works fine for many real devices and many faux serial devices
|
||||||
|
including usb-to-serial converters and bluetooth serial ports.
|
||||||
|
|
||||||
|
You may Read() and Write() simulantiously on the same connection (from
|
||||||
|
different goroutines).
|
||||||
|
|
||||||
|
Example usage:
|
||||||
|
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/tarm/goserial"
|
||||||
|
"log"
|
||||||
|
)
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
c := &serial.Config{Name: "COM5", Baud: 115200}
|
||||||
|
s, err := serial.OpenPort(c)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
n, err := s.Write([]byte("test"))
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
buf := make([]byte, 128)
|
||||||
|
n, err = s.Read(buf)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
log.Print("%q", buf[:n])
|
||||||
|
}
|
||||||
|
*/
|
||||||
|
package serial
|
||||||
|
|
||||||
|
import "io"
|
||||||
|
|
||||||
|
// Config contains the information needed to open a serial port.
|
||||||
|
//
|
||||||
|
// Currently few options are implemented, but more may be added in the
|
||||||
|
// future (patches welcome), so it is recommended that you create a
|
||||||
|
// new config addressing the fields by name rather than by order.
|
||||||
|
//
|
||||||
|
// For example:
|
||||||
|
//
|
||||||
|
// c0 := &serial.Config{Name: "COM45", Baud: 115200}
|
||||||
|
// or
|
||||||
|
// c1 := new(serial.Config)
|
||||||
|
// c1.Name = "/dev/tty.usbserial"
|
||||||
|
// c1.Baud = 115200
|
||||||
|
//
|
||||||
|
type Config struct {
|
||||||
|
Name string
|
||||||
|
Baud int
|
||||||
|
|
||||||
|
// Size int // 0 get translated to 8
|
||||||
|
// Parity SomeNewTypeToGetCorrectDefaultOf_None
|
||||||
|
// StopBits SomeNewTypeToGetCorrectDefaultOf_1
|
||||||
|
|
||||||
|
// RTSFlowControl bool
|
||||||
|
// DTRFlowControl bool
|
||||||
|
// XONFlowControl bool
|
||||||
|
|
||||||
|
// CRLFTranslate bool
|
||||||
|
// TimeoutStuff int
|
||||||
|
}
|
||||||
|
|
||||||
|
// OpenPort opens a serial port with the specified configuration
|
||||||
|
func OpenPort(c *Config) (io.ReadWriteCloser, error) {
|
||||||
|
return openPort(c.Name, c.Baud)
|
||||||
|
}
|
||||||
|
|
||||||
|
// func Flush()
|
||||||
|
|
||||||
|
// func SendBreak()
|
||||||
|
|
||||||
|
// func RegisterBreakHandler(func())
|
90
Godeps/_workspace/src/github.com/tarm/goserial/serial_linux.go
generated
vendored
Normal file
90
Godeps/_workspace/src/github.com/tarm/goserial/serial_linux.go
generated
vendored
Normal file
@@ -0,0 +1,90 @@
|
|||||||
|
// +build linux,!cgo
|
||||||
|
|
||||||
|
package serial
|
||||||
|
|
||||||
|
import (
|
||||||
|
"io"
|
||||||
|
"os"
|
||||||
|
"syscall"
|
||||||
|
"unsafe"
|
||||||
|
)
|
||||||
|
|
||||||
|
func openPort(name string, baud int) (rwc io.ReadWriteCloser, err error) {
|
||||||
|
|
||||||
|
var bauds = map[int]uint32{
|
||||||
|
50: syscall.B50,
|
||||||
|
75: syscall.B75,
|
||||||
|
110: syscall.B110,
|
||||||
|
134: syscall.B134,
|
||||||
|
150: syscall.B150,
|
||||||
|
200: syscall.B200,
|
||||||
|
300: syscall.B300,
|
||||||
|
600: syscall.B600,
|
||||||
|
1200: syscall.B1200,
|
||||||
|
1800: syscall.B1800,
|
||||||
|
2400: syscall.B2400,
|
||||||
|
4800: syscall.B4800,
|
||||||
|
9600: syscall.B9600,
|
||||||
|
19200: syscall.B19200,
|
||||||
|
38400: syscall.B38400,
|
||||||
|
57600: syscall.B57600,
|
||||||
|
115200: syscall.B115200,
|
||||||
|
230400: syscall.B230400,
|
||||||
|
460800: syscall.B460800,
|
||||||
|
500000: syscall.B500000,
|
||||||
|
576000: syscall.B576000,
|
||||||
|
921600: syscall.B921600,
|
||||||
|
1000000: syscall.B1000000,
|
||||||
|
1152000: syscall.B1152000,
|
||||||
|
1500000: syscall.B1500000,
|
||||||
|
2000000: syscall.B2000000,
|
||||||
|
2500000: syscall.B2500000,
|
||||||
|
3000000: syscall.B3000000,
|
||||||
|
3500000: syscall.B3500000,
|
||||||
|
4000000: syscall.B4000000,
|
||||||
|
}
|
||||||
|
|
||||||
|
rate := bauds[baud]
|
||||||
|
|
||||||
|
if rate == 0 {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
f, err := os.OpenFile(name, syscall.O_RDWR|syscall.O_NOCTTY|syscall.O_NONBLOCK, 0666)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
defer func() {
|
||||||
|
if err != nil && f != nil {
|
||||||
|
f.Close()
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
fd := f.Fd()
|
||||||
|
t := syscall.Termios{
|
||||||
|
Iflag: syscall.IGNPAR,
|
||||||
|
Cflag: syscall.CS8 | syscall.CREAD | syscall.CLOCAL | rate,
|
||||||
|
Cc: [32]uint8{syscall.VMIN: 1},
|
||||||
|
Ispeed: rate,
|
||||||
|
Ospeed: rate,
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, _, errno := syscall.Syscall6(
|
||||||
|
syscall.SYS_IOCTL,
|
||||||
|
uintptr(fd),
|
||||||
|
uintptr(syscall.TCSETS),
|
||||||
|
uintptr(unsafe.Pointer(&t)),
|
||||||
|
0,
|
||||||
|
0,
|
||||||
|
0,
|
||||||
|
); errno != 0 {
|
||||||
|
return nil, errno
|
||||||
|
}
|
||||||
|
|
||||||
|
if err = syscall.SetNonblock(int(fd), false); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
return f, nil
|
||||||
|
}
|
107
Godeps/_workspace/src/github.com/tarm/goserial/serial_posix.go
generated
vendored
Normal file
107
Godeps/_workspace/src/github.com/tarm/goserial/serial_posix.go
generated
vendored
Normal file
@@ -0,0 +1,107 @@
|
|||||||
|
// +build !windows,cgo
|
||||||
|
|
||||||
|
package serial
|
||||||
|
|
||||||
|
// #include <termios.h>
|
||||||
|
// #include <unistd.h>
|
||||||
|
import "C"
|
||||||
|
|
||||||
|
// TODO: Maybe change to using syscall package + ioctl instead of cgo
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"os"
|
||||||
|
"syscall"
|
||||||
|
//"unsafe"
|
||||||
|
)
|
||||||
|
|
||||||
|
func openPort(name string, baud int) (rwc io.ReadWriteCloser, err error) {
|
||||||
|
f, err := os.OpenFile(name, syscall.O_RDWR|syscall.O_NOCTTY|syscall.O_NONBLOCK, 0666)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
fd := C.int(f.Fd())
|
||||||
|
if C.isatty(fd) != 1 {
|
||||||
|
f.Close()
|
||||||
|
return nil, errors.New("File is not a tty")
|
||||||
|
}
|
||||||
|
|
||||||
|
var st C.struct_termios
|
||||||
|
_, err = C.tcgetattr(fd, &st)
|
||||||
|
if err != nil {
|
||||||
|
f.Close()
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
var speed C.speed_t
|
||||||
|
switch baud {
|
||||||
|
case 115200:
|
||||||
|
speed = C.B115200
|
||||||
|
case 57600:
|
||||||
|
speed = C.B57600
|
||||||
|
case 38400:
|
||||||
|
speed = C.B38400
|
||||||
|
case 19200:
|
||||||
|
speed = C.B19200
|
||||||
|
case 9600:
|
||||||
|
speed = C.B9600
|
||||||
|
case 4800:
|
||||||
|
speed = C.B4800
|
||||||
|
case 2400:
|
||||||
|
speed = C.B2400
|
||||||
|
default:
|
||||||
|
f.Close()
|
||||||
|
return nil, fmt.Errorf("Unknown baud rate %v", baud)
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err = C.cfsetispeed(&st, speed)
|
||||||
|
if err != nil {
|
||||||
|
f.Close()
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
_, err = C.cfsetospeed(&st, speed)
|
||||||
|
if err != nil {
|
||||||
|
f.Close()
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Select local mode
|
||||||
|
st.c_cflag |= (C.CLOCAL | C.CREAD)
|
||||||
|
|
||||||
|
// Select raw mode
|
||||||
|
st.c_lflag &= ^C.tcflag_t(C.ICANON | C.ECHO | C.ECHOE | C.ISIG)
|
||||||
|
st.c_oflag &= ^C.tcflag_t(C.OPOST)
|
||||||
|
|
||||||
|
_, err = C.tcsetattr(fd, C.TCSANOW, &st)
|
||||||
|
if err != nil {
|
||||||
|
f.Close()
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
//fmt.Println("Tweaking", name)
|
||||||
|
r1, _, e := syscall.Syscall(syscall.SYS_FCNTL,
|
||||||
|
uintptr(f.Fd()),
|
||||||
|
uintptr(syscall.F_SETFL),
|
||||||
|
uintptr(0))
|
||||||
|
if e != 0 || r1 != 0 {
|
||||||
|
s := fmt.Sprint("Clearing NONBLOCK syscall error:", e, r1)
|
||||||
|
f.Close()
|
||||||
|
return nil, errors.New(s)
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
r1, _, e = syscall.Syscall(syscall.SYS_IOCTL,
|
||||||
|
uintptr(f.Fd()),
|
||||||
|
uintptr(0x80045402), // IOSSIOSPEED
|
||||||
|
uintptr(unsafe.Pointer(&baud)));
|
||||||
|
if e != 0 || r1 != 0 {
|
||||||
|
s := fmt.Sprint("Baudrate syscall error:", e, r1)
|
||||||
|
f.Close()
|
||||||
|
return nil, os.NewError(s)
|
||||||
|
}
|
||||||
|
*/
|
||||||
|
|
||||||
|
return f, nil
|
||||||
|
}
|
263
Godeps/_workspace/src/github.com/tarm/goserial/serial_windows.go
generated
vendored
Normal file
263
Godeps/_workspace/src/github.com/tarm/goserial/serial_windows.go
generated
vendored
Normal file
@@ -0,0 +1,263 @@
|
|||||||
|
// +build windows
|
||||||
|
|
||||||
|
package serial
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"os"
|
||||||
|
"sync"
|
||||||
|
"syscall"
|
||||||
|
"unsafe"
|
||||||
|
)
|
||||||
|
|
||||||
|
type serialPort struct {
|
||||||
|
f *os.File
|
||||||
|
fd syscall.Handle
|
||||||
|
rl sync.Mutex
|
||||||
|
wl sync.Mutex
|
||||||
|
ro *syscall.Overlapped
|
||||||
|
wo *syscall.Overlapped
|
||||||
|
}
|
||||||
|
|
||||||
|
type structDCB struct {
|
||||||
|
DCBlength, BaudRate uint32
|
||||||
|
flags [4]byte
|
||||||
|
wReserved, XonLim, XoffLim uint16
|
||||||
|
ByteSize, Parity, StopBits byte
|
||||||
|
XonChar, XoffChar, ErrorChar, EofChar, EvtChar byte
|
||||||
|
wReserved1 uint16
|
||||||
|
}
|
||||||
|
|
||||||
|
type structTimeouts struct {
|
||||||
|
ReadIntervalTimeout uint32
|
||||||
|
ReadTotalTimeoutMultiplier uint32
|
||||||
|
ReadTotalTimeoutConstant uint32
|
||||||
|
WriteTotalTimeoutMultiplier uint32
|
||||||
|
WriteTotalTimeoutConstant uint32
|
||||||
|
}
|
||||||
|
|
||||||
|
func openPort(name string, baud int) (rwc io.ReadWriteCloser, err error) {
|
||||||
|
if len(name) > 0 && name[0] != '\\' {
|
||||||
|
name = "\\\\.\\" + name
|
||||||
|
}
|
||||||
|
|
||||||
|
h, err := syscall.CreateFile(syscall.StringToUTF16Ptr(name),
|
||||||
|
syscall.GENERIC_READ|syscall.GENERIC_WRITE,
|
||||||
|
0,
|
||||||
|
nil,
|
||||||
|
syscall.OPEN_EXISTING,
|
||||||
|
syscall.FILE_ATTRIBUTE_NORMAL|syscall.FILE_FLAG_OVERLAPPED,
|
||||||
|
0)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
f := os.NewFile(uintptr(h), name)
|
||||||
|
defer func() {
|
||||||
|
if err != nil {
|
||||||
|
f.Close()
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
if err = setCommState(h, baud); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if err = setupComm(h, 64, 64); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if err = setCommTimeouts(h); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if err = setCommMask(h); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
ro, err := newOverlapped()
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
wo, err := newOverlapped()
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
port := new(serialPort)
|
||||||
|
port.f = f
|
||||||
|
port.fd = h
|
||||||
|
port.ro = ro
|
||||||
|
port.wo = wo
|
||||||
|
|
||||||
|
return port, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *serialPort) Close() error {
|
||||||
|
return p.f.Close()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *serialPort) Write(buf []byte) (int, error) {
|
||||||
|
p.wl.Lock()
|
||||||
|
defer p.wl.Unlock()
|
||||||
|
|
||||||
|
if err := resetEvent(p.wo.HEvent); err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
var n uint32
|
||||||
|
err := syscall.WriteFile(p.fd, buf, &n, p.wo)
|
||||||
|
if err != nil && err != syscall.ERROR_IO_PENDING {
|
||||||
|
return int(n), err
|
||||||
|
}
|
||||||
|
return getOverlappedResult(p.fd, p.wo)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *serialPort) Read(buf []byte) (int, error) {
|
||||||
|
if p == nil || p.f == nil {
|
||||||
|
return 0, fmt.Errorf("Invalid port on read %v %v", p, p.f)
|
||||||
|
}
|
||||||
|
|
||||||
|
p.rl.Lock()
|
||||||
|
defer p.rl.Unlock()
|
||||||
|
|
||||||
|
if err := resetEvent(p.ro.HEvent); err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
var done uint32
|
||||||
|
err := syscall.ReadFile(p.fd, buf, &done, p.ro)
|
||||||
|
if err != nil && err != syscall.ERROR_IO_PENDING {
|
||||||
|
return int(done), err
|
||||||
|
}
|
||||||
|
return getOverlappedResult(p.fd, p.ro)
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
nSetCommState,
|
||||||
|
nSetCommTimeouts,
|
||||||
|
nSetCommMask,
|
||||||
|
nSetupComm,
|
||||||
|
nGetOverlappedResult,
|
||||||
|
nCreateEvent,
|
||||||
|
nResetEvent uintptr
|
||||||
|
)
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
k32, err := syscall.LoadLibrary("kernel32.dll")
|
||||||
|
if err != nil {
|
||||||
|
panic("LoadLibrary " + err.Error())
|
||||||
|
}
|
||||||
|
defer syscall.FreeLibrary(k32)
|
||||||
|
|
||||||
|
nSetCommState = getProcAddr(k32, "SetCommState")
|
||||||
|
nSetCommTimeouts = getProcAddr(k32, "SetCommTimeouts")
|
||||||
|
nSetCommMask = getProcAddr(k32, "SetCommMask")
|
||||||
|
nSetupComm = getProcAddr(k32, "SetupComm")
|
||||||
|
nGetOverlappedResult = getProcAddr(k32, "GetOverlappedResult")
|
||||||
|
nCreateEvent = getProcAddr(k32, "CreateEventW")
|
||||||
|
nResetEvent = getProcAddr(k32, "ResetEvent")
|
||||||
|
}
|
||||||
|
|
||||||
|
func getProcAddr(lib syscall.Handle, name string) uintptr {
|
||||||
|
addr, err := syscall.GetProcAddress(lib, name)
|
||||||
|
if err != nil {
|
||||||
|
panic(name + " " + err.Error())
|
||||||
|
}
|
||||||
|
return addr
|
||||||
|
}
|
||||||
|
|
||||||
|
func setCommState(h syscall.Handle, baud int) error {
|
||||||
|
var params structDCB
|
||||||
|
params.DCBlength = uint32(unsafe.Sizeof(params))
|
||||||
|
|
||||||
|
params.flags[0] = 0x01 // fBinary
|
||||||
|
params.flags[0] |= 0x10 // Assert DSR
|
||||||
|
|
||||||
|
params.BaudRate = uint32(baud)
|
||||||
|
params.ByteSize = 8
|
||||||
|
|
||||||
|
r, _, err := syscall.Syscall(nSetCommState, 2, uintptr(h), uintptr(unsafe.Pointer(¶ms)), 0)
|
||||||
|
if r == 0 {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func setCommTimeouts(h syscall.Handle) error {
|
||||||
|
var timeouts structTimeouts
|
||||||
|
const MAXDWORD = 1<<32 - 1
|
||||||
|
timeouts.ReadIntervalTimeout = MAXDWORD
|
||||||
|
timeouts.ReadTotalTimeoutMultiplier = MAXDWORD
|
||||||
|
timeouts.ReadTotalTimeoutConstant = MAXDWORD - 1
|
||||||
|
|
||||||
|
/* From http://msdn.microsoft.com/en-us/library/aa363190(v=VS.85).aspx
|
||||||
|
|
||||||
|
For blocking I/O see below:
|
||||||
|
|
||||||
|
Remarks:
|
||||||
|
|
||||||
|
If an application sets ReadIntervalTimeout and
|
||||||
|
ReadTotalTimeoutMultiplier to MAXDWORD and sets
|
||||||
|
ReadTotalTimeoutConstant to a value greater than zero and
|
||||||
|
less than MAXDWORD, one of the following occurs when the
|
||||||
|
ReadFile function is called:
|
||||||
|
|
||||||
|
If there are any bytes in the input buffer, ReadFile returns
|
||||||
|
immediately with the bytes in the buffer.
|
||||||
|
|
||||||
|
If there are no bytes in the input buffer, ReadFile waits
|
||||||
|
until a byte arrives and then returns immediately.
|
||||||
|
|
||||||
|
If no bytes arrive within the time specified by
|
||||||
|
ReadTotalTimeoutConstant, ReadFile times out.
|
||||||
|
*/
|
||||||
|
|
||||||
|
r, _, err := syscall.Syscall(nSetCommTimeouts, 2, uintptr(h), uintptr(unsafe.Pointer(&timeouts)), 0)
|
||||||
|
if r == 0 {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func setupComm(h syscall.Handle, in, out int) error {
|
||||||
|
r, _, err := syscall.Syscall(nSetupComm, 3, uintptr(h), uintptr(in), uintptr(out))
|
||||||
|
if r == 0 {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func setCommMask(h syscall.Handle) error {
|
||||||
|
const EV_RXCHAR = 0x0001
|
||||||
|
r, _, err := syscall.Syscall(nSetCommMask, 2, uintptr(h), EV_RXCHAR, 0)
|
||||||
|
if r == 0 {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func resetEvent(h syscall.Handle) error {
|
||||||
|
r, _, err := syscall.Syscall(nResetEvent, 1, uintptr(h), 0, 0)
|
||||||
|
if r == 0 {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func newOverlapped() (*syscall.Overlapped, error) {
|
||||||
|
var overlapped syscall.Overlapped
|
||||||
|
r, _, err := syscall.Syscall6(nCreateEvent, 4, 0, 1, 0, 0, 0, 0)
|
||||||
|
if r == 0 {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
overlapped.HEvent = syscall.Handle(r)
|
||||||
|
return &overlapped, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func getOverlappedResult(h syscall.Handle, overlapped *syscall.Overlapped) (int, error) {
|
||||||
|
var n int
|
||||||
|
r, _, err := syscall.Syscall6(nGetOverlappedResult, 4,
|
||||||
|
uintptr(h),
|
||||||
|
uintptr(unsafe.Pointer(overlapped)),
|
||||||
|
uintptr(unsafe.Pointer(&n)), 1, 0, 0)
|
||||||
|
if r == 0 {
|
||||||
|
return n, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return n, nil
|
||||||
|
}
|
@@ -1,3 +1,15 @@
|
|||||||
|
The following files were ported to Go from C files of libyaml, and thus
|
||||||
|
are still covered by their original copyright and license:
|
||||||
|
|
||||||
|
apic.go
|
||||||
|
emitterc.go
|
||||||
|
parserc.go
|
||||||
|
readerc.go
|
||||||
|
scannerc.go
|
||||||
|
writerc.go
|
||||||
|
yamlh.go
|
||||||
|
yamlprivateh.go
|
||||||
|
|
||||||
Copyright (c) 2006 Kirill Simonov
|
Copyright (c) 2006 Kirill Simonov
|
||||||
|
|
||||||
Permission is hereby granted, free of charge, to any person obtaining a copy of
|
Permission is hereby granted, free of charge, to any person obtaining a copy of
|
128
Godeps/_workspace/src/gopkg.in/yaml.v1/README.md
generated
vendored
Normal file
128
Godeps/_workspace/src/gopkg.in/yaml.v1/README.md
generated
vendored
Normal file
@@ -0,0 +1,128 @@
|
|||||||
|
# YAML support for the Go language
|
||||||
|
|
||||||
|
Introduction
|
||||||
|
------------
|
||||||
|
|
||||||
|
The yaml package enables Go programs to comfortably encode and decode YAML
|
||||||
|
values. It was developed within [Canonical](https://www.canonical.com) as
|
||||||
|
part of the [juju](https://juju.ubuntu.com) project, and is based on a
|
||||||
|
pure Go port of the well-known [libyaml](http://pyyaml.org/wiki/LibYAML)
|
||||||
|
C library to parse and generate YAML data quickly and reliably.
|
||||||
|
|
||||||
|
Compatibility
|
||||||
|
-------------
|
||||||
|
|
||||||
|
The yaml package is almost compatible with YAML 1.1, including support for
|
||||||
|
anchors, tags, etc. There are still a few missing bits, such as document
|
||||||
|
merging, base-60 floats (huh?), and multi-document unmarshalling. These
|
||||||
|
features are not hard to add, and will be introduced as necessary.
|
||||||
|
|
||||||
|
Installation and usage
|
||||||
|
----------------------
|
||||||
|
|
||||||
|
The import path for the package is *gopkg.in/yaml.v1*.
|
||||||
|
|
||||||
|
To install it, run:
|
||||||
|
|
||||||
|
go get gopkg.in/yaml.v1
|
||||||
|
|
||||||
|
API documentation
|
||||||
|
-----------------
|
||||||
|
|
||||||
|
If opened in a browser, the import path itself leads to the API documentation:
|
||||||
|
|
||||||
|
* [https://gopkg.in/yaml.v1](https://gopkg.in/yaml.v1)
|
||||||
|
|
||||||
|
API stability
|
||||||
|
-------------
|
||||||
|
|
||||||
|
The package API for yaml v1 will remain stable as described in [gopkg.in](https://gopkg.in).
|
||||||
|
|
||||||
|
|
||||||
|
License
|
||||||
|
-------
|
||||||
|
|
||||||
|
The yaml package is licensed under the LGPL with an exception that allows it to be linked statically. Please see the LICENSE file for details.
|
||||||
|
|
||||||
|
|
||||||
|
Example
|
||||||
|
-------
|
||||||
|
|
||||||
|
```Go
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"log"
|
||||||
|
|
||||||
|
"gopkg.in/yaml.v1"
|
||||||
|
)
|
||||||
|
|
||||||
|
var data = `
|
||||||
|
a: Easy!
|
||||||
|
b:
|
||||||
|
c: 2
|
||||||
|
d: [3, 4]
|
||||||
|
`
|
||||||
|
|
||||||
|
type T struct {
|
||||||
|
A string
|
||||||
|
B struct{C int; D []int ",flow"}
|
||||||
|
}
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
t := T{}
|
||||||
|
|
||||||
|
err := yaml.Unmarshal([]byte(data), &t)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatalf("error: %v", err)
|
||||||
|
}
|
||||||
|
fmt.Printf("--- t:\n%v\n\n", t)
|
||||||
|
|
||||||
|
d, err := yaml.Marshal(&t)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatalf("error: %v", err)
|
||||||
|
}
|
||||||
|
fmt.Printf("--- t dump:\n%s\n\n", string(d))
|
||||||
|
|
||||||
|
m := make(map[interface{}]interface{})
|
||||||
|
|
||||||
|
err = yaml.Unmarshal([]byte(data), &m)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatalf("error: %v", err)
|
||||||
|
}
|
||||||
|
fmt.Printf("--- m:\n%v\n\n", m)
|
||||||
|
|
||||||
|
d, err = yaml.Marshal(&m)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatalf("error: %v", err)
|
||||||
|
}
|
||||||
|
fmt.Printf("--- m dump:\n%s\n\n", string(d))
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
This example will generate the following output:
|
||||||
|
|
||||||
|
```
|
||||||
|
--- t:
|
||||||
|
{Easy! {2 [3 4]}}
|
||||||
|
|
||||||
|
--- t dump:
|
||||||
|
a: Easy!
|
||||||
|
b:
|
||||||
|
c: 2
|
||||||
|
d: [3, 4]
|
||||||
|
|
||||||
|
|
||||||
|
--- m:
|
||||||
|
map[a:Easy! b:map[c:2 d:[3 4]]]
|
||||||
|
|
||||||
|
--- m dump:
|
||||||
|
a: Easy!
|
||||||
|
b:
|
||||||
|
c: 2
|
||||||
|
d:
|
||||||
|
- 3
|
||||||
|
- 4
|
||||||
|
```
|
||||||
|
|
@@ -1,4 +1,4 @@
|
|||||||
package goyaml
|
package yaml
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"io"
|
"io"
|
@@ -1,8 +1,9 @@
|
|||||||
package goyaml
|
package yaml
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"reflect"
|
"reflect"
|
||||||
"strconv"
|
"strconv"
|
||||||
|
"time"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
@@ -211,6 +212,16 @@ func newDecoder() *decoder {
|
|||||||
// returned to call SetYAML() with the value of *out once it's defined.
|
// returned to call SetYAML() with the value of *out once it's defined.
|
||||||
//
|
//
|
||||||
func (d *decoder) setter(tag string, out *reflect.Value, good *bool) (set func()) {
|
func (d *decoder) setter(tag string, out *reflect.Value, good *bool) (set func()) {
|
||||||
|
if (*out).Kind() != reflect.Ptr && (*out).CanAddr() {
|
||||||
|
setter, _ := (*out).Addr().Interface().(Setter)
|
||||||
|
if setter != nil {
|
||||||
|
var arg interface{}
|
||||||
|
*out = reflect.ValueOf(&arg).Elem()
|
||||||
|
return func() {
|
||||||
|
*good = setter.SetYAML(tag, arg)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
again := true
|
again := true
|
||||||
for again {
|
for again {
|
||||||
again = false
|
again = false
|
||||||
@@ -279,16 +290,19 @@ func (d *decoder) alias(n *node, out reflect.Value) (good bool) {
|
|||||||
return good
|
return good
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var durationType = reflect.TypeOf(time.Duration(0))
|
||||||
|
|
||||||
func (d *decoder) scalar(n *node, out reflect.Value) (good bool) {
|
func (d *decoder) scalar(n *node, out reflect.Value) (good bool) {
|
||||||
var tag string
|
var tag string
|
||||||
var resolved interface{}
|
var resolved interface{}
|
||||||
if n.tag == "" && !n.implicit {
|
if n.tag == "" && !n.implicit {
|
||||||
|
tag = "!!str"
|
||||||
resolved = n.value
|
resolved = n.value
|
||||||
} else {
|
} else {
|
||||||
tag, resolved = resolve(n.tag, n.value)
|
tag, resolved = resolve(n.tag, n.value)
|
||||||
if set := d.setter(tag, &out, &good); set != nil {
|
}
|
||||||
defer set()
|
if set := d.setter(tag, &out, &good); set != nil {
|
||||||
}
|
defer set()
|
||||||
}
|
}
|
||||||
switch out.Kind() {
|
switch out.Kind() {
|
||||||
case reflect.String:
|
case reflect.String:
|
||||||
@@ -320,6 +334,14 @@ func (d *decoder) scalar(n *node, out reflect.Value) (good bool) {
|
|||||||
out.SetInt(int64(resolved))
|
out.SetInt(int64(resolved))
|
||||||
good = true
|
good = true
|
||||||
}
|
}
|
||||||
|
case string:
|
||||||
|
if out.Type() == durationType {
|
||||||
|
d, err := time.ParseDuration(resolved)
|
||||||
|
if err == nil {
|
||||||
|
out.SetInt(int64(d))
|
||||||
|
good = true
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
|
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
|
||||||
switch resolved := resolved.(type) {
|
switch resolved := resolved.(type) {
|
||||||
@@ -437,6 +459,10 @@ func (d *decoder) mapping(n *node, out reflect.Value) (good bool) {
|
|||||||
}
|
}
|
||||||
l := len(n.children)
|
l := len(n.children)
|
||||||
for i := 0; i < l; i += 2 {
|
for i := 0; i < l; i += 2 {
|
||||||
|
if isMerge(n.children[i]) {
|
||||||
|
d.merge(n.children[i+1], out)
|
||||||
|
continue
|
||||||
|
}
|
||||||
k := reflect.New(kt).Elem()
|
k := reflect.New(kt).Elem()
|
||||||
if d.unmarshal(n.children[i], k) {
|
if d.unmarshal(n.children[i], k) {
|
||||||
e := reflect.New(et).Elem()
|
e := reflect.New(et).Elem()
|
||||||
@@ -456,7 +482,12 @@ func (d *decoder) mappingStruct(n *node, out reflect.Value) (good bool) {
|
|||||||
name := settableValueOf("")
|
name := settableValueOf("")
|
||||||
l := len(n.children)
|
l := len(n.children)
|
||||||
for i := 0; i < l; i += 2 {
|
for i := 0; i < l; i += 2 {
|
||||||
if !d.unmarshal(n.children[i], name) {
|
ni := n.children[i]
|
||||||
|
if isMerge(ni) {
|
||||||
|
d.merge(n.children[i+1], out)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if !d.unmarshal(ni, name) {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if info, ok := sinfo.FieldsMap[name.String()]; ok {
|
if info, ok := sinfo.FieldsMap[name.String()]; ok {
|
||||||
@@ -471,3 +502,37 @@ func (d *decoder) mappingStruct(n *node, out reflect.Value) (good bool) {
|
|||||||
}
|
}
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (d *decoder) merge(n *node, out reflect.Value) {
|
||||||
|
const wantMap = "map merge requires map or sequence of maps as the value"
|
||||||
|
switch n.kind {
|
||||||
|
case mappingNode:
|
||||||
|
d.unmarshal(n, out)
|
||||||
|
case aliasNode:
|
||||||
|
an, ok := d.doc.anchors[n.value]
|
||||||
|
if ok && an.kind != mappingNode {
|
||||||
|
panic(wantMap)
|
||||||
|
}
|
||||||
|
d.unmarshal(n, out)
|
||||||
|
case sequenceNode:
|
||||||
|
// Step backwards as earlier nodes take precedence.
|
||||||
|
for i := len(n.children)-1; i >= 0; i-- {
|
||||||
|
ni := n.children[i]
|
||||||
|
if ni.kind == aliasNode {
|
||||||
|
an, ok := d.doc.anchors[ni.value]
|
||||||
|
if ok && an.kind != mappingNode {
|
||||||
|
panic(wantMap)
|
||||||
|
}
|
||||||
|
} else if ni.kind != mappingNode {
|
||||||
|
panic(wantMap)
|
||||||
|
}
|
||||||
|
d.unmarshal(ni, out)
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
panic(wantMap)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func isMerge(n *node) bool {
|
||||||
|
return n.kind == scalarNode && n.value == "<<" && (n.implicit == true || n.tag == "!!merge" || n.tag == "tag:yaml.org,2002:merge")
|
||||||
|
}
|
@@ -1,10 +1,11 @@
|
|||||||
package goyaml_test
|
package yaml_test
|
||||||
|
|
||||||
import (
|
import (
|
||||||
. "launchpad.net/gocheck"
|
. "gopkg.in/check.v1"
|
||||||
"github.com/coreos/coreos-cloudinit/third_party/launchpad.net/goyaml"
|
"github.com/coreos/coreos-cloudinit/Godeps/_workspace/src/gopkg.in/yaml.v1"
|
||||||
"math"
|
"math"
|
||||||
"reflect"
|
"reflect"
|
||||||
|
"time"
|
||||||
)
|
)
|
||||||
|
|
||||||
var unmarshalIntTest = 123
|
var unmarshalIntTest = 123
|
||||||
@@ -350,6 +351,32 @@ var unmarshalTests = []struct {
|
|||||||
C inlineB `yaml:",inline"`
|
C inlineB `yaml:",inline"`
|
||||||
}{1, inlineB{2, inlineC{3}}},
|
}{1, inlineB{2, inlineC{3}}},
|
||||||
},
|
},
|
||||||
|
|
||||||
|
// bug 1243827
|
||||||
|
{
|
||||||
|
"a: -b_c",
|
||||||
|
map[string]interface{}{"a": "-b_c"},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"a: +b_c",
|
||||||
|
map[string]interface{}{"a": "+b_c"},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"a: 50cent_of_dollar",
|
||||||
|
map[string]interface{}{"a": "50cent_of_dollar"},
|
||||||
|
},
|
||||||
|
|
||||||
|
// Duration
|
||||||
|
{
|
||||||
|
"a: 3s",
|
||||||
|
map[string]time.Duration{"a": 3 * time.Second},
|
||||||
|
},
|
||||||
|
|
||||||
|
// Issue #24.
|
||||||
|
{
|
||||||
|
"a: <foo>",
|
||||||
|
map[string]string{"a": "<foo>"},
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
type inlineB struct {
|
type inlineB struct {
|
||||||
@@ -377,7 +404,7 @@ func (s *S) TestUnmarshal(c *C) {
|
|||||||
pv := reflect.New(pt.Elem())
|
pv := reflect.New(pt.Elem())
|
||||||
value = pv.Interface()
|
value = pv.Interface()
|
||||||
}
|
}
|
||||||
err := goyaml.Unmarshal([]byte(item.data), value)
|
err := yaml.Unmarshal([]byte(item.data), value)
|
||||||
c.Assert(err, IsNil, Commentf("Item #%d", i))
|
c.Assert(err, IsNil, Commentf("Item #%d", i))
|
||||||
if t.Kind() == reflect.String {
|
if t.Kind() == reflect.String {
|
||||||
c.Assert(*value.(*string), Equals, item.value, Commentf("Item #%d", i))
|
c.Assert(*value.(*string), Equals, item.value, Commentf("Item #%d", i))
|
||||||
@@ -389,7 +416,7 @@ func (s *S) TestUnmarshal(c *C) {
|
|||||||
|
|
||||||
func (s *S) TestUnmarshalNaN(c *C) {
|
func (s *S) TestUnmarshalNaN(c *C) {
|
||||||
value := map[string]interface{}{}
|
value := map[string]interface{}{}
|
||||||
err := goyaml.Unmarshal([]byte("notanum: .NaN"), &value)
|
err := yaml.Unmarshal([]byte("notanum: .NaN"), &value)
|
||||||
c.Assert(err, IsNil)
|
c.Assert(err, IsNil)
|
||||||
c.Assert(math.IsNaN(value["notanum"].(float64)), Equals, true)
|
c.Assert(math.IsNaN(value["notanum"].(float64)), Equals, true)
|
||||||
}
|
}
|
||||||
@@ -408,7 +435,7 @@ var unmarshalErrorTests = []struct {
|
|||||||
func (s *S) TestUnmarshalErrors(c *C) {
|
func (s *S) TestUnmarshalErrors(c *C) {
|
||||||
for _, item := range unmarshalErrorTests {
|
for _, item := range unmarshalErrorTests {
|
||||||
var value interface{}
|
var value interface{}
|
||||||
err := goyaml.Unmarshal([]byte(item.data), &value)
|
err := yaml.Unmarshal([]byte(item.data), &value)
|
||||||
c.Assert(err, ErrorMatches, item.error, Commentf("Partial unmarshal: %#v", value))
|
c.Assert(err, ErrorMatches, item.error, Commentf("Partial unmarshal: %#v", value))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -421,6 +448,8 @@ var setterTests = []struct {
|
|||||||
{"_: [1,A]", "!!seq", []interface{}{1, "A"}},
|
{"_: [1,A]", "!!seq", []interface{}{1, "A"}},
|
||||||
{"_: 10", "!!int", 10},
|
{"_: 10", "!!int", 10},
|
||||||
{"_: null", "!!null", nil},
|
{"_: null", "!!null", nil},
|
||||||
|
{`_: BAR!`, "!!str", "BAR!"},
|
||||||
|
{`_: "BAR!"`, "!!str", "BAR!"},
|
||||||
{"_: !!foo 'BAR!'", "!!foo", "BAR!"},
|
{"_: !!foo 'BAR!'", "!!foo", "BAR!"},
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -442,17 +471,31 @@ func (o *typeWithSetter) SetYAML(tag string, value interface{}) (ok bool) {
|
|||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
type typeWithSetterField struct {
|
type setterPointerType struct {
|
||||||
Field *typeWithSetter "_"
|
Field *typeWithSetter "_"
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *S) TestUnmarshalWithSetter(c *C) {
|
type setterValueType struct {
|
||||||
|
Field typeWithSetter "_"
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *S) TestUnmarshalWithPointerSetter(c *C) {
|
||||||
for _, item := range setterTests {
|
for _, item := range setterTests {
|
||||||
obj := &typeWithSetterField{}
|
obj := &setterPointerType{}
|
||||||
err := goyaml.Unmarshal([]byte(item.data), obj)
|
err := yaml.Unmarshal([]byte(item.data), obj)
|
||||||
c.Assert(err, IsNil)
|
c.Assert(err, IsNil)
|
||||||
c.Assert(obj.Field, NotNil,
|
c.Assert(obj.Field, NotNil, Commentf("Pointer not initialized (%#v)", item.value))
|
||||||
Commentf("Pointer not initialized (%#v)", item.value))
|
c.Assert(obj.Field.tag, Equals, item.tag)
|
||||||
|
c.Assert(obj.Field.value, DeepEquals, item.value)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *S) TestUnmarshalWithValueSetter(c *C) {
|
||||||
|
for _, item := range setterTests {
|
||||||
|
obj := &setterValueType{}
|
||||||
|
err := yaml.Unmarshal([]byte(item.data), obj)
|
||||||
|
c.Assert(err, IsNil)
|
||||||
|
c.Assert(obj.Field, NotNil, Commentf("Pointer not initialized (%#v)", item.value))
|
||||||
c.Assert(obj.Field.tag, Equals, item.tag)
|
c.Assert(obj.Field.tag, Equals, item.tag)
|
||||||
c.Assert(obj.Field.value, DeepEquals, item.value)
|
c.Assert(obj.Field.value, DeepEquals, item.value)
|
||||||
}
|
}
|
||||||
@@ -460,7 +503,7 @@ func (s *S) TestUnmarshalWithSetter(c *C) {
|
|||||||
|
|
||||||
func (s *S) TestUnmarshalWholeDocumentWithSetter(c *C) {
|
func (s *S) TestUnmarshalWholeDocumentWithSetter(c *C) {
|
||||||
obj := &typeWithSetter{}
|
obj := &typeWithSetter{}
|
||||||
err := goyaml.Unmarshal([]byte(setterTests[0].data), obj)
|
err := yaml.Unmarshal([]byte(setterTests[0].data), obj)
|
||||||
c.Assert(err, IsNil)
|
c.Assert(err, IsNil)
|
||||||
c.Assert(obj.tag, Equals, setterTests[0].tag)
|
c.Assert(obj.tag, Equals, setterTests[0].tag)
|
||||||
value, ok := obj.value.(map[interface{}]interface{})
|
value, ok := obj.value.(map[interface{}]interface{})
|
||||||
@@ -477,8 +520,8 @@ func (s *S) TestUnmarshalWithFalseSetterIgnoresValue(c *C) {
|
|||||||
}()
|
}()
|
||||||
|
|
||||||
m := map[string]*typeWithSetter{}
|
m := map[string]*typeWithSetter{}
|
||||||
data := "{abc: 1, def: 2, ghi: 3, jkl: 4}"
|
data := `{abc: 1, def: 2, ghi: 3, jkl: 4}`
|
||||||
err := goyaml.Unmarshal([]byte(data), m)
|
err := yaml.Unmarshal([]byte(data), m)
|
||||||
c.Assert(err, IsNil)
|
c.Assert(err, IsNil)
|
||||||
c.Assert(m["abc"], NotNil)
|
c.Assert(m["abc"], NotNil)
|
||||||
c.Assert(m["def"], IsNil)
|
c.Assert(m["def"], IsNil)
|
||||||
@@ -489,6 +532,98 @@ func (s *S) TestUnmarshalWithFalseSetterIgnoresValue(c *C) {
|
|||||||
c.Assert(m["ghi"].value, Equals, 3)
|
c.Assert(m["ghi"].value, Equals, 3)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// From http://yaml.org/type/merge.html
|
||||||
|
var mergeTests = `
|
||||||
|
anchors:
|
||||||
|
- &CENTER { "x": 1, "y": 2 }
|
||||||
|
- &LEFT { "x": 0, "y": 2 }
|
||||||
|
- &BIG { "r": 10 }
|
||||||
|
- &SMALL { "r": 1 }
|
||||||
|
|
||||||
|
# All the following maps are equal:
|
||||||
|
|
||||||
|
plain:
|
||||||
|
# Explicit keys
|
||||||
|
"x": 1
|
||||||
|
"y": 2
|
||||||
|
"r": 10
|
||||||
|
label: center/big
|
||||||
|
|
||||||
|
mergeOne:
|
||||||
|
# Merge one map
|
||||||
|
<< : *CENTER
|
||||||
|
"r": 10
|
||||||
|
label: center/big
|
||||||
|
|
||||||
|
mergeMultiple:
|
||||||
|
# Merge multiple maps
|
||||||
|
<< : [ *CENTER, *BIG ]
|
||||||
|
label: center/big
|
||||||
|
|
||||||
|
override:
|
||||||
|
# Override
|
||||||
|
<< : [ *BIG, *LEFT, *SMALL ]
|
||||||
|
"x": 1
|
||||||
|
label: center/big
|
||||||
|
|
||||||
|
shortTag:
|
||||||
|
# Explicit short merge tag
|
||||||
|
!!merge "<<" : [ *CENTER, *BIG ]
|
||||||
|
label: center/big
|
||||||
|
|
||||||
|
longTag:
|
||||||
|
# Explicit merge long tag
|
||||||
|
!<tag:yaml.org,2002:merge> "<<" : [ *CENTER, *BIG ]
|
||||||
|
label: center/big
|
||||||
|
|
||||||
|
inlineMap:
|
||||||
|
# Inlined map
|
||||||
|
<< : {"x": 1, "y": 2, "r": 10}
|
||||||
|
label: center/big
|
||||||
|
|
||||||
|
inlineSequenceMap:
|
||||||
|
# Inlined map in sequence
|
||||||
|
<< : [ *CENTER, {"r": 10} ]
|
||||||
|
label: center/big
|
||||||
|
`
|
||||||
|
|
||||||
|
func (s *S) TestMerge(c *C) {
|
||||||
|
var want = map[interface{}]interface{}{
|
||||||
|
"x": 1,
|
||||||
|
"y": 2,
|
||||||
|
"r": 10,
|
||||||
|
"label": "center/big",
|
||||||
|
}
|
||||||
|
|
||||||
|
var m map[string]interface{}
|
||||||
|
err := yaml.Unmarshal([]byte(mergeTests), &m)
|
||||||
|
c.Assert(err, IsNil)
|
||||||
|
for name, test := range m {
|
||||||
|
if name == "anchors" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
c.Assert(test, DeepEquals, want, Commentf("test %q failed", name))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *S) TestMergeStruct(c *C) {
|
||||||
|
type Data struct {
|
||||||
|
X, Y, R int
|
||||||
|
Label string
|
||||||
|
}
|
||||||
|
want := Data{1, 2, 10, "center/big"}
|
||||||
|
|
||||||
|
var m map[string]Data
|
||||||
|
err := yaml.Unmarshal([]byte(mergeTests), &m)
|
||||||
|
c.Assert(err, IsNil)
|
||||||
|
for name, test := range m {
|
||||||
|
if name == "anchors" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
c.Assert(test, Equals, want, Commentf("test %q failed", name))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
//var data []byte
|
//var data []byte
|
||||||
//func init() {
|
//func init() {
|
||||||
// var err error
|
// var err error
|
||||||
@@ -502,7 +637,7 @@ func (s *S) TestUnmarshalWithFalseSetterIgnoresValue(c *C) {
|
|||||||
// var err error
|
// var err error
|
||||||
// for i := 0; i < c.N; i++ {
|
// for i := 0; i < c.N; i++ {
|
||||||
// var v map[string]interface{}
|
// var v map[string]interface{}
|
||||||
// err = goyaml.Unmarshal(data, &v)
|
// err = yaml.Unmarshal(data, &v)
|
||||||
// }
|
// }
|
||||||
// if err != nil {
|
// if err != nil {
|
||||||
// panic(err)
|
// panic(err)
|
||||||
@@ -511,9 +646,9 @@ func (s *S) TestUnmarshalWithFalseSetterIgnoresValue(c *C) {
|
|||||||
//
|
//
|
||||||
//func (s *S) BenchmarkMarshal(c *C) {
|
//func (s *S) BenchmarkMarshal(c *C) {
|
||||||
// var v map[string]interface{}
|
// var v map[string]interface{}
|
||||||
// goyaml.Unmarshal(data, &v)
|
// yaml.Unmarshal(data, &v)
|
||||||
// c.ResetTimer()
|
// c.ResetTimer()
|
||||||
// for i := 0; i < c.N; i++ {
|
// for i := 0; i < c.N; i++ {
|
||||||
// goyaml.Marshal(&v)
|
// yaml.Marshal(&v)
|
||||||
// }
|
// }
|
||||||
//}
|
//}
|
@@ -1,4 +1,4 @@
|
|||||||
package goyaml
|
package yaml
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
@@ -1,9 +1,10 @@
|
|||||||
package goyaml
|
package yaml
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"reflect"
|
"reflect"
|
||||||
"sort"
|
"sort"
|
||||||
"strconv"
|
"strconv"
|
||||||
|
"time"
|
||||||
)
|
)
|
||||||
|
|
||||||
type encoder struct {
|
type encoder struct {
|
||||||
@@ -85,7 +86,11 @@ func (e *encoder) marshal(tag string, in reflect.Value) {
|
|||||||
case reflect.String:
|
case reflect.String:
|
||||||
e.stringv(tag, in)
|
e.stringv(tag, in)
|
||||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||||
e.intv(tag, in)
|
if in.Type() == durationType {
|
||||||
|
e.stringv(tag, reflect.ValueOf(in.Interface().(time.Duration).String()))
|
||||||
|
} else {
|
||||||
|
e.intv(tag, in)
|
||||||
|
}
|
||||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
|
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
|
||||||
e.uintv(tag, in)
|
e.uintv(tag, in)
|
||||||
case reflect.Float32, reflect.Float64:
|
case reflect.Float32, reflect.Float64:
|
@@ -1,12 +1,13 @@
|
|||||||
package goyaml_test
|
package yaml_test
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
. "launchpad.net/gocheck"
|
"github.com/coreos/coreos-cloudinit/Godeps/_workspace/src/gopkg.in/yaml.v1"
|
||||||
"github.com/coreos/coreos-cloudinit/third_party/launchpad.net/goyaml"
|
. "gopkg.in/check.v1"
|
||||||
"math"
|
"math"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
|
"time"
|
||||||
)
|
)
|
||||||
|
|
||||||
var marshalIntTest = 123
|
var marshalIntTest = 123
|
||||||
@@ -212,11 +213,23 @@ var marshalTests = []struct {
|
|||||||
}{1, inlineB{2, inlineC{3}}},
|
}{1, inlineB{2, inlineC{3}}},
|
||||||
"a: 1\nb: 2\nc: 3\n",
|
"a: 1\nb: 2\nc: 3\n",
|
||||||
},
|
},
|
||||||
|
|
||||||
|
// Duration
|
||||||
|
{
|
||||||
|
map[string]time.Duration{"a": 3 * time.Second},
|
||||||
|
"a: 3s\n",
|
||||||
|
},
|
||||||
|
|
||||||
|
// Issue #24.
|
||||||
|
{
|
||||||
|
map[string]string{"a": "<foo>"},
|
||||||
|
"a: <foo>\n",
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *S) TestMarshal(c *C) {
|
func (s *S) TestMarshal(c *C) {
|
||||||
for _, item := range marshalTests {
|
for _, item := range marshalTests {
|
||||||
data, err := goyaml.Marshal(item.value)
|
data, err := yaml.Marshal(item.value)
|
||||||
c.Assert(err, IsNil)
|
c.Assert(err, IsNil)
|
||||||
c.Assert(string(data), Equals, item.data)
|
c.Assert(string(data), Equals, item.data)
|
||||||
}
|
}
|
||||||
@@ -237,7 +250,7 @@ var marshalErrorTests = []struct {
|
|||||||
|
|
||||||
func (s *S) TestMarshalErrors(c *C) {
|
func (s *S) TestMarshalErrors(c *C) {
|
||||||
for _, item := range marshalErrorTests {
|
for _, item := range marshalErrorTests {
|
||||||
_, err := goyaml.Marshal(item.value)
|
_, err := yaml.Marshal(item.value)
|
||||||
c.Assert(err, ErrorMatches, item.error)
|
c.Assert(err, ErrorMatches, item.error)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -269,12 +282,12 @@ func (s *S) TestMarshalTypeCache(c *C) {
|
|||||||
var err error
|
var err error
|
||||||
func() {
|
func() {
|
||||||
type T struct{ A int }
|
type T struct{ A int }
|
||||||
data, err = goyaml.Marshal(&T{})
|
data, err = yaml.Marshal(&T{})
|
||||||
c.Assert(err, IsNil)
|
c.Assert(err, IsNil)
|
||||||
}()
|
}()
|
||||||
func() {
|
func() {
|
||||||
type T struct{ B int }
|
type T struct{ B int }
|
||||||
data, err = goyaml.Marshal(&T{})
|
data, err = yaml.Marshal(&T{})
|
||||||
c.Assert(err, IsNil)
|
c.Assert(err, IsNil)
|
||||||
}()
|
}()
|
||||||
c.Assert(string(data), Equals, "b: 0\n")
|
c.Assert(string(data), Equals, "b: 0\n")
|
||||||
@@ -298,7 +311,7 @@ func (s *S) TestMashalWithGetter(c *C) {
|
|||||||
obj := &typeWithGetterField{}
|
obj := &typeWithGetterField{}
|
||||||
obj.Field.tag = item.tag
|
obj.Field.tag = item.tag
|
||||||
obj.Field.value = item.value
|
obj.Field.value = item.value
|
||||||
data, err := goyaml.Marshal(obj)
|
data, err := yaml.Marshal(obj)
|
||||||
c.Assert(err, IsNil)
|
c.Assert(err, IsNil)
|
||||||
c.Assert(string(data), Equals, string(item.data))
|
c.Assert(string(data), Equals, string(item.data))
|
||||||
}
|
}
|
||||||
@@ -308,7 +321,7 @@ func (s *S) TestUnmarshalWholeDocumentWithGetter(c *C) {
|
|||||||
obj := &typeWithGetter{}
|
obj := &typeWithGetter{}
|
||||||
obj.tag = ""
|
obj.tag = ""
|
||||||
obj.value = map[string]string{"hello": "world!"}
|
obj.value = map[string]string{"hello": "world!"}
|
||||||
data, err := goyaml.Marshal(obj)
|
data, err := yaml.Marshal(obj)
|
||||||
c.Assert(err, IsNil)
|
c.Assert(err, IsNil)
|
||||||
c.Assert(string(data), Equals, "hello: world!\n")
|
c.Assert(string(data), Equals, "hello: world!\n")
|
||||||
}
|
}
|
||||||
@@ -356,7 +369,7 @@ func (s *S) TestSortedOutput(c *C) {
|
|||||||
for _, k := range order {
|
for _, k := range order {
|
||||||
m[k] = 1
|
m[k] = 1
|
||||||
}
|
}
|
||||||
data, err := goyaml.Marshal(m)
|
data, err := yaml.Marshal(m)
|
||||||
c.Assert(err, IsNil)
|
c.Assert(err, IsNil)
|
||||||
out := "\n" + string(data)
|
out := "\n" + string(data)
|
||||||
last := 0
|
last := 0
|
@@ -1,4 +1,4 @@
|
|||||||
package goyaml
|
package yaml
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
@@ -1,4 +1,4 @@
|
|||||||
package goyaml
|
package yaml
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"io"
|
"io"
|
@@ -1,4 +1,4 @@
|
|||||||
package goyaml
|
package yaml
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"math"
|
"math"
|
||||||
@@ -27,7 +27,6 @@ func init() {
|
|||||||
t[int(c)] = 'M' // In map
|
t[int(c)] = 'M' // In map
|
||||||
}
|
}
|
||||||
t[int('.')] = '.' // Float (potentially in map)
|
t[int('.')] = '.' // Float (potentially in map)
|
||||||
t[int('<')] = '<' // Merge
|
|
||||||
|
|
||||||
var resolveMapList = []struct {
|
var resolveMapList = []struct {
|
||||||
v interface{}
|
v interface{}
|
||||||
@@ -45,6 +44,7 @@ func init() {
|
|||||||
{math.Inf(+1), "!!float", []string{".inf", ".Inf", ".INF"}},
|
{math.Inf(+1), "!!float", []string{".inf", ".Inf", ".INF"}},
|
||||||
{math.Inf(+1), "!!float", []string{"+.inf", "+.Inf", "+.INF"}},
|
{math.Inf(+1), "!!float", []string{"+.inf", "+.Inf", "+.INF"}},
|
||||||
{math.Inf(-1), "!!float", []string{"-.inf", "-.Inf", "-.INF"}},
|
{math.Inf(-1), "!!float", []string{"-.inf", "-.Inf", "-.INF"}},
|
||||||
|
{"<<", "!!merge", []string{"<<"}},
|
||||||
}
|
}
|
||||||
|
|
||||||
m := resolveMap
|
m := resolveMap
|
||||||
@@ -113,13 +113,8 @@ func resolve(tag string, in string) (rtag string, out interface{}) {
|
|||||||
|
|
||||||
case 'D', 'S':
|
case 'D', 'S':
|
||||||
// Int, float, or timestamp.
|
// Int, float, or timestamp.
|
||||||
for i := 0; i != len(in); i++ {
|
plain := strings.Replace(in, "_", "", -1)
|
||||||
if in[i] == '_' {
|
intv, err := strconv.ParseInt(plain, 0, 64)
|
||||||
in = strings.Replace(in, "_", "", -1)
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
intv, err := strconv.ParseInt(in, 0, 64)
|
|
||||||
if err == nil {
|
if err == nil {
|
||||||
if intv == int64(int(intv)) {
|
if intv == int64(int(intv)) {
|
||||||
return "!!int", int(intv)
|
return "!!int", int(intv)
|
||||||
@@ -127,26 +122,23 @@ func resolve(tag string, in string) (rtag string, out interface{}) {
|
|||||||
return "!!int", intv
|
return "!!int", intv
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
floatv, err := strconv.ParseFloat(in, 64)
|
floatv, err := strconv.ParseFloat(plain, 64)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
return "!!float", floatv
|
return "!!float", floatv
|
||||||
}
|
}
|
||||||
if strings.HasPrefix(in, "0b") {
|
if strings.HasPrefix(plain, "0b") {
|
||||||
intv, err := strconv.ParseInt(in[2:], 2, 64)
|
intv, err := strconv.ParseInt(plain[2:], 2, 64)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
return "!!int", int(intv)
|
return "!!int", int(intv)
|
||||||
}
|
}
|
||||||
} else if strings.HasPrefix(in, "-0b") {
|
} else if strings.HasPrefix(plain, "-0b") {
|
||||||
intv, err := strconv.ParseInt(in[3:], 2, 64)
|
intv, err := strconv.ParseInt(plain[3:], 2, 64)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
return "!!int", -int(intv)
|
return "!!int", -int(intv)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// XXX Handle timestamps here.
|
// XXX Handle timestamps here.
|
||||||
|
|
||||||
case '<':
|
|
||||||
// XXX Handle merge (<<) here.
|
|
||||||
|
|
||||||
default:
|
default:
|
||||||
panic("resolveTable item not yet handled: " +
|
panic("resolveTable item not yet handled: " +
|
||||||
string([]byte{c}) + " (with " + in + ")")
|
string([]byte{c}) + " (with " + in + ")")
|
@@ -1,4 +1,4 @@
|
|||||||
package goyaml
|
package yaml
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
@@ -1,4 +1,4 @@
|
|||||||
package goyaml
|
package yaml
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"reflect"
|
"reflect"
|
@@ -1,7 +1,7 @@
|
|||||||
package goyaml_test
|
package yaml_test
|
||||||
|
|
||||||
import (
|
import (
|
||||||
. "launchpad.net/gocheck"
|
. "gopkg.in/check.v1"
|
||||||
"testing"
|
"testing"
|
||||||
)
|
)
|
||||||
|
|
@@ -1,4 +1,4 @@
|
|||||||
package goyaml
|
package yaml
|
||||||
|
|
||||||
// Set the writer error and return false.
|
// Set the writer error and return false.
|
||||||
func yaml_emitter_set_writer_error(emitter *yaml_emitter_t, problem string) bool {
|
func yaml_emitter_set_writer_error(emitter *yaml_emitter_t, problem string) bool {
|
@@ -1,5 +1,10 @@
|
|||||||
// Package goyaml implements YAML support for the Go language.
|
// Package yaml implements YAML support for the Go language.
|
||||||
package goyaml
|
//
|
||||||
|
// Source code and other details for the project are available at GitHub:
|
||||||
|
//
|
||||||
|
// https://github.com/go-yaml/yaml
|
||||||
|
//
|
||||||
|
package yaml
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"errors"
|
"errors"
|
||||||
@@ -28,32 +33,31 @@ func handleErr(err *error) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Objects implementing the goyaml.Setter interface will receive the YAML
|
// The Setter interface may be implemented by types to do their own custom
|
||||||
// tag and value via the SetYAML method during unmarshaling, rather than
|
// unmarshalling of YAML values, rather than being implicitly assigned by
|
||||||
// being implicitly assigned by the goyaml machinery. If setting the value
|
// the yaml package machinery. If setting the value works, the method should
|
||||||
// works, the method should return true. If it returns false, the given
|
// return true. If it returns false, the value is considered unsupported
|
||||||
// value will be omitted from maps and slices.
|
// and is omitted from maps and slices.
|
||||||
type Setter interface {
|
type Setter interface {
|
||||||
SetYAML(tag string, value interface{}) bool
|
SetYAML(tag string, value interface{}) bool
|
||||||
}
|
}
|
||||||
|
|
||||||
// Objects implementing the goyaml.Getter interface will get the GetYAML()
|
// The Getter interface is implemented by types to do their own custom
|
||||||
// method called when goyaml is requested to marshal the given value, and
|
// marshalling into a YAML tag and value.
|
||||||
// the result of this method will be marshaled in place of the actual object.
|
|
||||||
type Getter interface {
|
type Getter interface {
|
||||||
GetYAML() (tag string, value interface{})
|
GetYAML() (tag string, value interface{})
|
||||||
}
|
}
|
||||||
|
|
||||||
// Unmarshal decodes the first document found within the in byte slice
|
// Unmarshal decodes the first document found within the in byte slice
|
||||||
// and assigns decoded values into the object pointed by out.
|
// and assigns decoded values into the out value.
|
||||||
//
|
//
|
||||||
// Maps, pointers to structs and ints, etc, may all be used as out values.
|
// Maps and pointers (to a struct, string, int, etc) are accepted as out
|
||||||
// If an internal pointer within a struct is not initialized, goyaml
|
// values. If an internal pointer within a struct is not initialized,
|
||||||
// will initialize it if necessary for unmarshalling the provided data,
|
// the yaml package will initialize it if necessary for unmarshalling
|
||||||
// but the struct provided as out must not be a nil pointer.
|
// the provided data. The out parameter must not be nil.
|
||||||
//
|
//
|
||||||
// The type of the decoded values and the type of out will be considered,
|
// The type of the decoded values and the type of out will be considered,
|
||||||
// and Unmarshal() will do the best possible job to unmarshal values
|
// and Unmarshal will do the best possible job to unmarshal values
|
||||||
// appropriately. It is NOT considered an error, though, to skip values
|
// appropriately. It is NOT considered an error, though, to skip values
|
||||||
// because they are not available in the decoded YAML, or if they are not
|
// because they are not available in the decoded YAML, or if they are not
|
||||||
// compatible with the out value. To ensure something was properly
|
// compatible with the out value. To ensure something was properly
|
||||||
@@ -61,11 +65,11 @@ type Getter interface {
|
|||||||
// field (usually the zero value).
|
// field (usually the zero value).
|
||||||
//
|
//
|
||||||
// Struct fields are only unmarshalled if they are exported (have an
|
// Struct fields are only unmarshalled if they are exported (have an
|
||||||
// upper case first letter), and will be unmarshalled using the field
|
// upper case first letter), and are unmarshalled using the field name
|
||||||
// name lowercased by default. When custom field names are desired, the
|
// lowercased as the default key. Custom keys may be defined via the
|
||||||
// tag value may be used to tweak the name. Everything before the first
|
// "yaml" name in the field tag: the content preceding the first comma
|
||||||
// comma in the field tag will be used as the name. The values following
|
// is used as the key, and the following comma-separated options are
|
||||||
// the comma are used to tweak the marshalling process (see Marshal).
|
// used to tweak the marshalling process (see Marshal).
|
||||||
// Conflicting names result in a runtime error.
|
// Conflicting names result in a runtime error.
|
||||||
//
|
//
|
||||||
// For example:
|
// For example:
|
||||||
@@ -75,7 +79,7 @@ type Getter interface {
|
|||||||
// B int
|
// B int
|
||||||
// }
|
// }
|
||||||
// var T t
|
// var T t
|
||||||
// goyaml.Unmarshal([]byte("a: 1\nb: 2"), &t)
|
// yaml.Unmarshal([]byte("a: 1\nb: 2"), &t)
|
||||||
//
|
//
|
||||||
// See the documentation of Marshal for the format of tags and a list of
|
// See the documentation of Marshal for the format of tags and a list of
|
||||||
// supported tag options.
|
// supported tag options.
|
||||||
@@ -94,14 +98,16 @@ func Unmarshal(in []byte, out interface{}) (err error) {
|
|||||||
|
|
||||||
// Marshal serializes the value provided into a YAML document. The structure
|
// Marshal serializes the value provided into a YAML document. The structure
|
||||||
// of the generated document will reflect the structure of the value itself.
|
// of the generated document will reflect the structure of the value itself.
|
||||||
// Maps, pointers to structs and ints, etc, may all be used as the in value.
|
// Maps and pointers (to struct, string, int, etc) are accepted as the in value.
|
||||||
//
|
//
|
||||||
// In the case of struct values, only exported fields will be serialized.
|
// Struct fields are only unmarshalled if they are exported (have an upper case
|
||||||
// The lowercased field name is used as the key for each exported field,
|
// first letter), and are unmarshalled using the field name lowercased as the
|
||||||
// but this behavior may be changed using the respective field tag.
|
// default key. Custom keys may be defined via the "yaml" name in the field
|
||||||
// The tag may also contain flags to tweak the marshalling behavior for
|
// tag: the content preceding the first comma is used as the key, and the
|
||||||
// the field. Conflicting names result in a runtime error. The tag format
|
// following comma-separated options are used to tweak the marshalling process.
|
||||||
// accepted is:
|
// Conflicting names result in a runtime error.
|
||||||
|
//
|
||||||
|
// The field tag format accepted is:
|
||||||
//
|
//
|
||||||
// `(...) yaml:"[<key>][,<flag1>[,<flag2>]]" (...)`
|
// `(...) yaml:"[<key>][,<flag1>[,<flag2>]]" (...)`
|
||||||
//
|
//
|
||||||
@@ -126,8 +132,8 @@ func Unmarshal(in []byte, out interface{}) (err error) {
|
|||||||
// F int "a,omitempty"
|
// F int "a,omitempty"
|
||||||
// B int
|
// B int
|
||||||
// }
|
// }
|
||||||
// goyaml.Marshal(&T{B: 2}) // Returns "b: 2\n"
|
// yaml.Marshal(&T{B: 2}) // Returns "b: 2\n"
|
||||||
// goyaml.Marshal(&T{F: 1}} // Returns "a: 1\nb: 0\n"
|
// yaml.Marshal(&T{F: 1}} // Returns "a: 1\nb: 0\n"
|
||||||
//
|
//
|
||||||
func Marshal(in interface{}) (out []byte, err error) {
|
func Marshal(in interface{}) (out []byte, err error) {
|
||||||
defer handleErr(&err)
|
defer handleErr(&err)
|
||||||
@@ -142,7 +148,7 @@ func Marshal(in interface{}) (out []byte, err error) {
|
|||||||
// --------------------------------------------------------------------------
|
// --------------------------------------------------------------------------
|
||||||
// Maintain a mapping of keys to structure field indexes
|
// Maintain a mapping of keys to structure field indexes
|
||||||
|
|
||||||
// The code in this section was copied from gobson.
|
// The code in this section was copied from mgo/bson.
|
||||||
|
|
||||||
// structInfo holds details for the serialization of fields of
|
// structInfo holds details for the serialization of fields of
|
||||||
// a given struct.
|
// a given struct.
|
@@ -1,4 +1,4 @@
|
|||||||
package goyaml
|
package yaml
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"io"
|
"io"
|
@@ -1,4 +1,4 @@
|
|||||||
package goyaml
|
package yaml
|
||||||
|
|
||||||
const (
|
const (
|
||||||
// The size of the input raw buffer.
|
// The size of the input raw buffer.
|
3
MAINTAINERS
Normal file
3
MAINTAINERS
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
Alex Crawford <alex.crawford@coreos.com> (@crawford)
|
||||||
|
Jonathan Boulle <jonathan.boulle@coreos.com> (@jonboulle)
|
||||||
|
Brian Waldon <brian.waldon@coreos.com> (@bcwaldon)
|
@@ -1,3 +1,19 @@
|
|||||||
|
/*
|
||||||
|
Copyright 2014 CoreOS, Inc.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
package main
|
package main
|
||||||
|
|
||||||
import (
|
import (
|
||||||
@@ -8,98 +24,144 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/coreos/coreos-cloudinit/datasource"
|
"github.com/coreos/coreos-cloudinit/datasource"
|
||||||
|
"github.com/coreos/coreos-cloudinit/datasource/configdrive"
|
||||||
|
"github.com/coreos/coreos-cloudinit/datasource/file"
|
||||||
|
"github.com/coreos/coreos-cloudinit/datasource/metadata/cloudsigma"
|
||||||
|
"github.com/coreos/coreos-cloudinit/datasource/metadata/digitalocean"
|
||||||
|
"github.com/coreos/coreos-cloudinit/datasource/metadata/ec2"
|
||||||
|
"github.com/coreos/coreos-cloudinit/datasource/proc_cmdline"
|
||||||
|
"github.com/coreos/coreos-cloudinit/datasource/url"
|
||||||
|
"github.com/coreos/coreos-cloudinit/datasource/waagent"
|
||||||
"github.com/coreos/coreos-cloudinit/initialize"
|
"github.com/coreos/coreos-cloudinit/initialize"
|
||||||
"github.com/coreos/coreos-cloudinit/pkg"
|
"github.com/coreos/coreos-cloudinit/pkg"
|
||||||
"github.com/coreos/coreos-cloudinit/system"
|
"github.com/coreos/coreos-cloudinit/system"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
version = "0.9.1"
|
version = "0.10.9"
|
||||||
datasourceInterval = 100 * time.Millisecond
|
datasourceInterval = 100 * time.Millisecond
|
||||||
datasourceMaxInterval = 30 * time.Second
|
datasourceMaxInterval = 30 * time.Second
|
||||||
datasourceTimeout = 5 * time.Minute
|
datasourceTimeout = 5 * time.Minute
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
printVersion bool
|
flags = struct {
|
||||||
ignoreFailure bool
|
printVersion bool
|
||||||
sources struct {
|
ignoreFailure bool
|
||||||
file string
|
sources struct {
|
||||||
configDrive string
|
file string
|
||||||
metadataService bool
|
configDrive string
|
||||||
url string
|
waagent string
|
||||||
procCmdLine bool
|
metadataService bool
|
||||||
}
|
ec2MetadataService string
|
||||||
convertNetconf string
|
cloudSigmaMetadataService bool
|
||||||
workspace string
|
digitalOceanMetadataService string
|
||||||
sshKeyName string
|
url string
|
||||||
|
procCmdLine bool
|
||||||
|
}
|
||||||
|
convertNetconf string
|
||||||
|
workspace string
|
||||||
|
sshKeyName string
|
||||||
|
oem string
|
||||||
|
}{}
|
||||||
)
|
)
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
flag.BoolVar(&printVersion, "version", false, "Print the version and exit")
|
flag.BoolVar(&flags.printVersion, "version", false, "Print the version and exit")
|
||||||
flag.BoolVar(&ignoreFailure, "ignore-failure", false, "Exits with 0 status in the event of malformed input from user-data")
|
flag.BoolVar(&flags.ignoreFailure, "ignore-failure", false, "Exits with 0 status in the event of malformed input from user-data")
|
||||||
flag.StringVar(&sources.file, "from-file", "", "Read user-data from provided file")
|
flag.StringVar(&flags.sources.file, "from-file", "", "Read user-data from provided file")
|
||||||
flag.StringVar(&sources.configDrive, "from-configdrive", "", "Read data from provided cloud-drive directory")
|
flag.StringVar(&flags.sources.configDrive, "from-configdrive", "", "Read data from provided cloud-drive directory")
|
||||||
flag.BoolVar(&sources.metadataService, "from-metadata-service", false, "Download data from metadata service")
|
flag.StringVar(&flags.sources.waagent, "from-waagent", "", "Read data from provided waagent directory")
|
||||||
flag.StringVar(&sources.url, "from-url", "", "Download user-data from provided url")
|
flag.BoolVar(&flags.sources.metadataService, "from-metadata-service", false, "[DEPRECATED - Use -from-ec2-metadata] Download data from metadata service")
|
||||||
flag.BoolVar(&sources.procCmdLine, "from-proc-cmdline", false, fmt.Sprintf("Parse %s for '%s=<url>', using the cloud-config served by an HTTP GET to <url>", datasource.ProcCmdlineLocation, datasource.ProcCmdlineCloudConfigFlag))
|
flag.StringVar(&flags.sources.ec2MetadataService, "from-ec2-metadata", "", "Download EC2 data from the provided url")
|
||||||
flag.StringVar(&convertNetconf, "convert-netconf", "", "Read the network config provided in cloud-drive and translate it from the specified format into networkd unit files (requires the -from-configdrive flag)")
|
flag.BoolVar(&flags.sources.cloudSigmaMetadataService, "from-cloudsigma-metadata", false, "Download data from CloudSigma server context")
|
||||||
flag.StringVar(&workspace, "workspace", "/var/lib/coreos-cloudinit", "Base directory coreos-cloudinit should use to store data")
|
flag.StringVar(&flags.sources.digitalOceanMetadataService, "from-digitalocean-metadata", "", "Download DigitalOcean data from the provided url")
|
||||||
flag.StringVar(&sshKeyName, "ssh-key-name", initialize.DefaultSSHKeyName, "Add SSH keys to the system with the given name")
|
flag.StringVar(&flags.sources.url, "from-url", "", "Download user-data from provided url")
|
||||||
|
flag.BoolVar(&flags.sources.procCmdLine, "from-proc-cmdline", false, fmt.Sprintf("Parse %s for '%s=<url>', using the cloud-config served by an HTTP GET to <url>", proc_cmdline.ProcCmdlineLocation, proc_cmdline.ProcCmdlineCloudConfigFlag))
|
||||||
|
flag.StringVar(&flags.oem, "oem", "", "Use the settings specific to the provided OEM")
|
||||||
|
flag.StringVar(&flags.convertNetconf, "convert-netconf", "", "Read the network config provided in cloud-drive and translate it from the specified format into networkd unit files")
|
||||||
|
flag.StringVar(&flags.workspace, "workspace", "/var/lib/coreos-cloudinit", "Base directory coreos-cloudinit should use to store data")
|
||||||
|
flag.StringVar(&flags.sshKeyName, "ssh-key-name", initialize.DefaultSSHKeyName, "Add SSH keys to the system with the given name")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type oemConfig map[string]string
|
||||||
|
|
||||||
|
var (
|
||||||
|
oemConfigs = map[string]oemConfig{
|
||||||
|
"digitalocean": oemConfig{
|
||||||
|
"from-digitalocean-metadata": "http://169.254.169.254/",
|
||||||
|
"convert-netconf": "digitalocean",
|
||||||
|
},
|
||||||
|
"ec2-compat": oemConfig{
|
||||||
|
"from-ec2-metadata": "http://169.254.169.254/",
|
||||||
|
"from-configdrive": "/media/configdrive",
|
||||||
|
},
|
||||||
|
"rackspace-onmetal": oemConfig{
|
||||||
|
"from-configdrive": "/media/configdrive",
|
||||||
|
"convert-netconf": "debian",
|
||||||
|
},
|
||||||
|
"azure": oemConfig{
|
||||||
|
"from-waagent": "/var/lib/waagent",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
func main() {
|
func main() {
|
||||||
|
failure := false
|
||||||
|
|
||||||
flag.Parse()
|
flag.Parse()
|
||||||
|
|
||||||
die := func() {
|
if c, ok := oemConfigs[flags.oem]; ok {
|
||||||
if ignoreFailure {
|
for k, v := range c {
|
||||||
os.Exit(0)
|
flag.Set(k, v)
|
||||||
}
|
}
|
||||||
os.Exit(1)
|
} else if flags.oem != "" {
|
||||||
|
oems := make([]string, 0, len(oemConfigs))
|
||||||
|
for k := range oemConfigs {
|
||||||
|
oems = append(oems, k)
|
||||||
|
}
|
||||||
|
fmt.Printf("Invalid option to --oem: %q. Supported options: %q\n", flags.oem, oems)
|
||||||
|
os.Exit(2)
|
||||||
}
|
}
|
||||||
|
|
||||||
if printVersion == true {
|
if flags.printVersion == true {
|
||||||
fmt.Printf("coreos-cloudinit version %s\n", version)
|
fmt.Printf("coreos-cloudinit version %s\n", version)
|
||||||
os.Exit(0)
|
os.Exit(0)
|
||||||
}
|
}
|
||||||
|
|
||||||
if convertNetconf != "" && sources.configDrive == "" {
|
switch flags.convertNetconf {
|
||||||
fmt.Println("-convert-netconf flag requires -from-configdrive")
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
|
|
||||||
switch convertNetconf {
|
|
||||||
case "":
|
case "":
|
||||||
case "debian":
|
case "debian":
|
||||||
|
case "digitalocean":
|
||||||
default:
|
default:
|
||||||
fmt.Printf("Invalid option to -convert-netconf: '%s'. Supported options: 'debian'\n", convertNetconf)
|
fmt.Printf("Invalid option to -convert-netconf: '%s'. Supported options: 'debian, digitalocean'\n", flags.convertNetconf)
|
||||||
os.Exit(1)
|
os.Exit(2)
|
||||||
}
|
}
|
||||||
|
|
||||||
dss := getDatasources()
|
dss := getDatasources()
|
||||||
if len(dss) == 0 {
|
if len(dss) == 0 {
|
||||||
fmt.Println("Provide at least one of --from-file, --from-configdrive, --from-metadata-service, --from-url or --from-proc-cmdline")
|
fmt.Println("Provide at least one of --from-file, --from-configdrive, --from-ec2-metadata, --from-cloudsigma-metadata, --from-url or --from-proc-cmdline")
|
||||||
os.Exit(1)
|
os.Exit(2)
|
||||||
}
|
}
|
||||||
|
|
||||||
ds := selectDatasource(dss)
|
ds := selectDatasource(dss)
|
||||||
if ds == nil {
|
if ds == nil {
|
||||||
fmt.Println("No datasources available in time")
|
fmt.Println("No datasources available in time")
|
||||||
die()
|
os.Exit(1)
|
||||||
}
|
}
|
||||||
|
|
||||||
fmt.Printf("Fetching user-data from datasource of type %q\n", ds.Type())
|
fmt.Printf("Fetching user-data from datasource of type %q\n", ds.Type())
|
||||||
userdataBytes, err := ds.FetchUserdata()
|
userdataBytes, err := ds.FetchUserdata()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fmt.Printf("Failed fetching user-data from datasource: %v\n", err)
|
fmt.Printf("Failed fetching user-data from datasource: %v\nContinuing...\n", err)
|
||||||
die()
|
failure = true
|
||||||
}
|
}
|
||||||
|
|
||||||
fmt.Printf("Fetching meta-data from datasource of type %q\n", ds.Type())
|
fmt.Printf("Fetching meta-data from datasource of type %q\n", ds.Type())
|
||||||
metadataBytes, err := ds.FetchMetadata()
|
metadataBytes, err := ds.FetchMetadata()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fmt.Printf("Failed fetching meta-data from datasource: %v\n", err)
|
fmt.Printf("Failed fetching meta-data from datasource: %v\n", err)
|
||||||
die()
|
os.Exit(1)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Extract IPv4 addresses from metadata if possible
|
// Extract IPv4 addresses from metadata if possible
|
||||||
@@ -108,23 +170,34 @@ func main() {
|
|||||||
subs, err = initialize.ExtractIPsFromMetadata(metadataBytes)
|
subs, err = initialize.ExtractIPsFromMetadata(metadataBytes)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fmt.Printf("Failed extracting IPs from meta-data: %v\n", err)
|
fmt.Printf("Failed extracting IPs from meta-data: %v\n", err)
|
||||||
die()
|
os.Exit(1)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Apply environment to user-data
|
// Apply environment to user-data
|
||||||
env := initialize.NewEnvironment("/", ds.ConfigRoot(), workspace, convertNetconf, sshKeyName, subs)
|
env := initialize.NewEnvironment("/", ds.ConfigRoot(), flags.workspace, flags.convertNetconf, flags.sshKeyName, subs)
|
||||||
userdata := env.Apply(string(userdataBytes))
|
userdata := env.Apply(string(userdataBytes))
|
||||||
|
|
||||||
var ccm, ccu *initialize.CloudConfig
|
var ccm, ccu *initialize.CloudConfig
|
||||||
var script *system.Script
|
var script *system.Script
|
||||||
if ccm, err = initialize.ParseMetaData(string(metadataBytes)); err != nil {
|
if ccm, err = initialize.ParseMetaData(string(metadataBytes)); err != nil {
|
||||||
fmt.Printf("Failed to parse meta-data: %v\n", err)
|
fmt.Printf("Failed to parse meta-data: %v\n", err)
|
||||||
die()
|
os.Exit(1)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if ccm != nil && flags.convertNetconf != "" {
|
||||||
|
fmt.Printf("Fetching network config from datasource of type %q\n", ds.Type())
|
||||||
|
netconfBytes, err := ds.FetchNetworkConfig(ccm.NetworkConfigPath)
|
||||||
|
if err != nil {
|
||||||
|
fmt.Printf("Failed fetching network config from datasource: %v\n", err)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
ccm.NetworkConfig = string(netconfBytes)
|
||||||
|
}
|
||||||
|
|
||||||
if ud, err := initialize.ParseUserData(userdata); err != nil {
|
if ud, err := initialize.ParseUserData(userdata); err != nil {
|
||||||
fmt.Printf("Failed to parse user-data: %v\n", err)
|
fmt.Printf("Failed to parse user-data: %v\nContinuing...\n", err)
|
||||||
die()
|
failure = true
|
||||||
} else {
|
} else {
|
||||||
switch t := ud.(type) {
|
switch t := ud.(type) {
|
||||||
case *initialize.CloudConfig:
|
case *initialize.CloudConfig:
|
||||||
@@ -152,16 +225,20 @@ func main() {
|
|||||||
if cc != nil {
|
if cc != nil {
|
||||||
if err = initialize.Apply(*cc, env); err != nil {
|
if err = initialize.Apply(*cc, env); err != nil {
|
||||||
fmt.Printf("Failed to apply cloud-config: %v\n", err)
|
fmt.Printf("Failed to apply cloud-config: %v\n", err)
|
||||||
die()
|
os.Exit(1)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if script != nil {
|
if script != nil {
|
||||||
if err = runScript(*script, env); err != nil {
|
if err = runScript(*script, env); err != nil {
|
||||||
fmt.Printf("Failed to run script: %v\n", err)
|
fmt.Printf("Failed to run script: %v\n", err)
|
||||||
die()
|
os.Exit(1)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if failure && !flags.ignoreFailure {
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// mergeCloudConfig merges certain options from mdcc (a CloudConfig derived from
|
// mergeCloudConfig merges certain options from mdcc (a CloudConfig derived from
|
||||||
@@ -172,7 +249,7 @@ func main() {
|
|||||||
func mergeCloudConfig(mdcc, udcc initialize.CloudConfig) (cc initialize.CloudConfig) {
|
func mergeCloudConfig(mdcc, udcc initialize.CloudConfig) (cc initialize.CloudConfig) {
|
||||||
if mdcc.Hostname != "" {
|
if mdcc.Hostname != "" {
|
||||||
if udcc.Hostname != "" {
|
if udcc.Hostname != "" {
|
||||||
fmt.Printf("Warning: user-data hostname (%s) overrides metadata hostname (%s)", udcc.Hostname, mdcc.Hostname)
|
fmt.Printf("Warning: user-data hostname (%s) overrides metadata hostname (%s)\n", udcc.Hostname, mdcc.Hostname)
|
||||||
} else {
|
} else {
|
||||||
udcc.Hostname = mdcc.Hostname
|
udcc.Hostname = mdcc.Hostname
|
||||||
}
|
}
|
||||||
@@ -183,11 +260,18 @@ func mergeCloudConfig(mdcc, udcc initialize.CloudConfig) (cc initialize.CloudCon
|
|||||||
}
|
}
|
||||||
if mdcc.NetworkConfigPath != "" {
|
if mdcc.NetworkConfigPath != "" {
|
||||||
if udcc.NetworkConfigPath != "" {
|
if udcc.NetworkConfigPath != "" {
|
||||||
fmt.Printf("Warning: user-data NetworkConfigPath %s overrides metadata NetworkConfigPath %s", udcc.NetworkConfigPath, mdcc.NetworkConfigPath)
|
fmt.Printf("Warning: user-data NetworkConfigPath %s overrides metadata NetworkConfigPath %s\n", udcc.NetworkConfigPath, mdcc.NetworkConfigPath)
|
||||||
} else {
|
} else {
|
||||||
udcc.NetworkConfigPath = mdcc.NetworkConfigPath
|
udcc.NetworkConfigPath = mdcc.NetworkConfigPath
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
if mdcc.NetworkConfig != "" {
|
||||||
|
if udcc.NetworkConfig != "" {
|
||||||
|
fmt.Printf("Warning: user-data NetworkConfig %s overrides metadata NetworkConfig %s\n", udcc.NetworkConfig, mdcc.NetworkConfig)
|
||||||
|
} else {
|
||||||
|
udcc.NetworkConfig = mdcc.NetworkConfig
|
||||||
|
}
|
||||||
|
}
|
||||||
return udcc
|
return udcc
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -195,20 +279,32 @@ func mergeCloudConfig(mdcc, udcc initialize.CloudConfig) (cc initialize.CloudCon
|
|||||||
// on the different source command-line flags.
|
// on the different source command-line flags.
|
||||||
func getDatasources() []datasource.Datasource {
|
func getDatasources() []datasource.Datasource {
|
||||||
dss := make([]datasource.Datasource, 0, 5)
|
dss := make([]datasource.Datasource, 0, 5)
|
||||||
if sources.file != "" {
|
if flags.sources.file != "" {
|
||||||
dss = append(dss, datasource.NewLocalFile(sources.file))
|
dss = append(dss, file.NewDatasource(flags.sources.file))
|
||||||
}
|
}
|
||||||
if sources.url != "" {
|
if flags.sources.url != "" {
|
||||||
dss = append(dss, datasource.NewRemoteFile(sources.url))
|
dss = append(dss, url.NewDatasource(flags.sources.url))
|
||||||
}
|
}
|
||||||
if sources.configDrive != "" {
|
if flags.sources.configDrive != "" {
|
||||||
dss = append(dss, datasource.NewConfigDrive(sources.configDrive))
|
dss = append(dss, configdrive.NewDatasource(flags.sources.configDrive))
|
||||||
}
|
}
|
||||||
if sources.metadataService {
|
if flags.sources.metadataService {
|
||||||
dss = append(dss, datasource.NewMetadataService())
|
dss = append(dss, ec2.NewDatasource(ec2.DefaultAddress))
|
||||||
}
|
}
|
||||||
if sources.procCmdLine {
|
if flags.sources.ec2MetadataService != "" {
|
||||||
dss = append(dss, datasource.NewProcCmdline())
|
dss = append(dss, ec2.NewDatasource(flags.sources.ec2MetadataService))
|
||||||
|
}
|
||||||
|
if flags.sources.cloudSigmaMetadataService {
|
||||||
|
dss = append(dss, cloudsigma.NewServerContextService())
|
||||||
|
}
|
||||||
|
if flags.sources.digitalOceanMetadataService != "" {
|
||||||
|
dss = append(dss, digitalocean.NewDatasource(flags.sources.digitalOceanMetadataService))
|
||||||
|
}
|
||||||
|
if flags.sources.waagent != "" {
|
||||||
|
dss = append(dss, waagent.NewDatasource(flags.sources.waagent))
|
||||||
|
}
|
||||||
|
if flags.sources.procCmdLine {
|
||||||
|
dss = append(dss, proc_cmdline.NewDatasource())
|
||||||
}
|
}
|
||||||
return dss
|
return dss
|
||||||
}
|
}
|
||||||
@@ -240,7 +336,7 @@ func selectDatasource(sources []datasource.Datasource) datasource.Datasource {
|
|||||||
select {
|
select {
|
||||||
case <-stop:
|
case <-stop:
|
||||||
return
|
return
|
||||||
case <-time.Tick(duration):
|
case <-time.After(duration):
|
||||||
duration = pkg.ExpBackoff(duration, datasourceMaxInterval)
|
duration = pkg.ExpBackoff(duration, datasourceMaxInterval)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -257,7 +353,7 @@ func selectDatasource(sources []datasource.Datasource) datasource.Datasource {
|
|||||||
select {
|
select {
|
||||||
case s = <-ds:
|
case s = <-ds:
|
||||||
case <-done:
|
case <-done:
|
||||||
case <-time.Tick(datasourceTimeout):
|
case <-time.After(datasourceTimeout):
|
||||||
}
|
}
|
||||||
|
|
||||||
close(stop)
|
close(stop)
|
||||||
|
@@ -1,3 +1,19 @@
|
|||||||
|
/*
|
||||||
|
Copyright 2014 CoreOS, Inc.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
package main
|
package main
|
||||||
|
|
||||||
import (
|
import (
|
||||||
@@ -12,6 +28,7 @@ func TestMergeCloudConfig(t *testing.T) {
|
|||||||
SSHAuthorizedKeys: []string{"abc", "def"},
|
SSHAuthorizedKeys: []string{"abc", "def"},
|
||||||
Hostname: "foobar",
|
Hostname: "foobar",
|
||||||
NetworkConfigPath: "/path/somewhere",
|
NetworkConfigPath: "/path/somewhere",
|
||||||
|
NetworkConfig: `{}`,
|
||||||
}
|
}
|
||||||
for i, tt := range []struct {
|
for i, tt := range []struct {
|
||||||
udcc initialize.CloudConfig
|
udcc initialize.CloudConfig
|
||||||
@@ -36,6 +53,7 @@ func TestMergeCloudConfig(t *testing.T) {
|
|||||||
initialize.CloudConfig{
|
initialize.CloudConfig{
|
||||||
Hostname: "meta-hostname",
|
Hostname: "meta-hostname",
|
||||||
NetworkConfigPath: "/path/meta",
|
NetworkConfigPath: "/path/meta",
|
||||||
|
NetworkConfig: `{"hostname":"test"}`,
|
||||||
},
|
},
|
||||||
simplecc,
|
simplecc,
|
||||||
},
|
},
|
||||||
@@ -45,6 +63,7 @@ func TestMergeCloudConfig(t *testing.T) {
|
|||||||
SSHAuthorizedKeys: []string{"abc", "def"},
|
SSHAuthorizedKeys: []string{"abc", "def"},
|
||||||
Hostname: "user-hostname",
|
Hostname: "user-hostname",
|
||||||
NetworkConfigPath: "/path/somewhere",
|
NetworkConfigPath: "/path/somewhere",
|
||||||
|
NetworkConfig: `{"hostname":"test"}`,
|
||||||
},
|
},
|
||||||
initialize.CloudConfig{
|
initialize.CloudConfig{
|
||||||
SSHAuthorizedKeys: []string{"woof", "qux"},
|
SSHAuthorizedKeys: []string{"woof", "qux"},
|
||||||
@@ -54,6 +73,7 @@ func TestMergeCloudConfig(t *testing.T) {
|
|||||||
SSHAuthorizedKeys: []string{"abc", "def", "woof", "qux"},
|
SSHAuthorizedKeys: []string{"abc", "def", "woof", "qux"},
|
||||||
Hostname: "user-hostname",
|
Hostname: "user-hostname",
|
||||||
NetworkConfigPath: "/path/somewhere",
|
NetworkConfigPath: "/path/somewhere",
|
||||||
|
NetworkConfig: `{"hostname":"test"}`,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -64,11 +84,13 @@ func TestMergeCloudConfig(t *testing.T) {
|
|||||||
initialize.CloudConfig{
|
initialize.CloudConfig{
|
||||||
SSHAuthorizedKeys: []string{"zaphod", "beeblebrox"},
|
SSHAuthorizedKeys: []string{"zaphod", "beeblebrox"},
|
||||||
NetworkConfigPath: "/dev/fun",
|
NetworkConfigPath: "/dev/fun",
|
||||||
|
NetworkConfig: `{"hostname":"test"}`,
|
||||||
},
|
},
|
||||||
initialize.CloudConfig{
|
initialize.CloudConfig{
|
||||||
Hostname: "supercool",
|
Hostname: "supercool",
|
||||||
SSHAuthorizedKeys: []string{"zaphod", "beeblebrox"},
|
SSHAuthorizedKeys: []string{"zaphod", "beeblebrox"},
|
||||||
NetworkConfigPath: "/dev/fun",
|
NetworkConfigPath: "/dev/fun",
|
||||||
|
NetworkConfig: `{"hostname":"test"}`,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -80,11 +102,13 @@ func TestMergeCloudConfig(t *testing.T) {
|
|||||||
initialize.CloudConfig{
|
initialize.CloudConfig{
|
||||||
Hostname: "youyouyou",
|
Hostname: "youyouyou",
|
||||||
NetworkConfigPath: "meta-meta-yo",
|
NetworkConfigPath: "meta-meta-yo",
|
||||||
|
NetworkConfig: `{"hostname":"test"}`,
|
||||||
},
|
},
|
||||||
initialize.CloudConfig{
|
initialize.CloudConfig{
|
||||||
Hostname: "mememe",
|
Hostname: "mememe",
|
||||||
ManageEtcHosts: initialize.EtcHosts("lolz"),
|
ManageEtcHosts: initialize.EtcHosts("lolz"),
|
||||||
NetworkConfigPath: "meta-meta-yo",
|
NetworkConfigPath: "meta-meta-yo",
|
||||||
|
NetworkConfig: `{"hostname":"test"}`,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -95,10 +119,12 @@ func TestMergeCloudConfig(t *testing.T) {
|
|||||||
initialize.CloudConfig{
|
initialize.CloudConfig{
|
||||||
ManageEtcHosts: initialize.EtcHosts("lolz"),
|
ManageEtcHosts: initialize.EtcHosts("lolz"),
|
||||||
NetworkConfigPath: "meta-meta-yo",
|
NetworkConfigPath: "meta-meta-yo",
|
||||||
|
NetworkConfig: `{"hostname":"test"}`,
|
||||||
},
|
},
|
||||||
initialize.CloudConfig{
|
initialize.CloudConfig{
|
||||||
Hostname: "mememe",
|
Hostname: "mememe",
|
||||||
NetworkConfigPath: "meta-meta-yo",
|
NetworkConfigPath: "meta-meta-yo",
|
||||||
|
NetworkConfig: `{"hostname":"test"}`,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
} {
|
} {
|
||||||
|
@@ -1,64 +0,0 @@
|
|||||||
package datasource
|
|
||||||
|
|
||||||
import (
|
|
||||||
"io/ioutil"
|
|
||||||
"os"
|
|
||||||
"path"
|
|
||||||
)
|
|
||||||
|
|
||||||
type configDrive struct {
|
|
||||||
root string
|
|
||||||
readFile func(filename string) ([]byte, error)
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewConfigDrive(root string) *configDrive {
|
|
||||||
return &configDrive{root, ioutil.ReadFile}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (cd *configDrive) IsAvailable() bool {
|
|
||||||
_, err := os.Stat(cd.root)
|
|
||||||
return !os.IsNotExist(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (cd *configDrive) AvailabilityChanges() bool {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
func (cd *configDrive) ConfigRoot() string {
|
|
||||||
return cd.openstackRoot()
|
|
||||||
}
|
|
||||||
|
|
||||||
// FetchMetadata attempts to retrieve metadata from ec2/2009-04-04/meta_data.json.
|
|
||||||
func (cd *configDrive) FetchMetadata() ([]byte, error) {
|
|
||||||
return cd.tryReadFile(path.Join(cd.ec2Root(), "meta_data.json"))
|
|
||||||
}
|
|
||||||
|
|
||||||
// FetchUserdata attempts to retrieve the userdata from ec2/2009-04-04/user_data.
|
|
||||||
// If no data is found, it will attempt to read from openstack/latest/user_data.
|
|
||||||
func (cd *configDrive) FetchUserdata() ([]byte, error) {
|
|
||||||
bytes, err := cd.tryReadFile(path.Join(cd.ec2Root(), "user_data"))
|
|
||||||
if bytes == nil && err == nil {
|
|
||||||
bytes, err = cd.tryReadFile(path.Join(cd.openstackRoot(), "user_data"))
|
|
||||||
}
|
|
||||||
return bytes, err
|
|
||||||
}
|
|
||||||
|
|
||||||
func (cd *configDrive) Type() string {
|
|
||||||
return "cloud-drive"
|
|
||||||
}
|
|
||||||
|
|
||||||
func (cd *configDrive) ec2Root() string {
|
|
||||||
return path.Join(cd.root, "ec2", Ec2ApiVersion)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (cd *configDrive) openstackRoot() string {
|
|
||||||
return path.Join(cd.root, "openstack", "latest")
|
|
||||||
}
|
|
||||||
|
|
||||||
func (cd *configDrive) tryReadFile(filename string) ([]byte, error) {
|
|
||||||
data, err := cd.readFile(filename)
|
|
||||||
if os.IsNotExist(err) {
|
|
||||||
err = nil
|
|
||||||
}
|
|
||||||
return data, err
|
|
||||||
}
|
|
86
datasource/configdrive/configdrive.go
Normal file
86
datasource/configdrive/configdrive.go
Normal file
@@ -0,0 +1,86 @@
|
|||||||
|
/*
|
||||||
|
Copyright 2014 CoreOS, Inc.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package configdrive
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"io/ioutil"
|
||||||
|
"os"
|
||||||
|
"path"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
openstackApiVersion = "latest"
|
||||||
|
)
|
||||||
|
|
||||||
|
type configDrive struct {
|
||||||
|
root string
|
||||||
|
readFile func(filename string) ([]byte, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewDatasource(root string) *configDrive {
|
||||||
|
return &configDrive{root, ioutil.ReadFile}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (cd *configDrive) IsAvailable() bool {
|
||||||
|
_, err := os.Stat(cd.root)
|
||||||
|
return !os.IsNotExist(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (cd *configDrive) AvailabilityChanges() bool {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
func (cd *configDrive) ConfigRoot() string {
|
||||||
|
return cd.openstackRoot()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (cd *configDrive) FetchMetadata() ([]byte, error) {
|
||||||
|
return cd.tryReadFile(path.Join(cd.openstackVersionRoot(), "meta_data.json"))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (cd *configDrive) FetchUserdata() ([]byte, error) {
|
||||||
|
return cd.tryReadFile(path.Join(cd.openstackVersionRoot(), "user_data"))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (cd *configDrive) FetchNetworkConfig(filename string) ([]byte, error) {
|
||||||
|
if filename == "" {
|
||||||
|
return []byte{}, nil
|
||||||
|
}
|
||||||
|
return cd.tryReadFile(path.Join(cd.openstackRoot(), filename))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (cd *configDrive) Type() string {
|
||||||
|
return "cloud-drive"
|
||||||
|
}
|
||||||
|
|
||||||
|
func (cd *configDrive) openstackRoot() string {
|
||||||
|
return path.Join(cd.root, "openstack")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (cd *configDrive) openstackVersionRoot() string {
|
||||||
|
return path.Join(cd.openstackRoot(), openstackApiVersion)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (cd *configDrive) tryReadFile(filename string) ([]byte, error) {
|
||||||
|
fmt.Printf("Attempting to read from %q\n", filename)
|
||||||
|
data, err := cd.readFile(filename)
|
||||||
|
if os.IsNotExist(err) {
|
||||||
|
err = nil
|
||||||
|
}
|
||||||
|
return data, err
|
||||||
|
}
|
141
datasource/configdrive/configdrive_test.go
Normal file
141
datasource/configdrive/configdrive_test.go
Normal file
@@ -0,0 +1,141 @@
|
|||||||
|
/*
|
||||||
|
Copyright 2014 CoreOS, Inc.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package configdrive
|
||||||
|
|
||||||
|
import (
|
||||||
|
"os"
|
||||||
|
"testing"
|
||||||
|
)
|
||||||
|
|
||||||
|
type mockFilesystem []string
|
||||||
|
|
||||||
|
func (m mockFilesystem) readFile(filename string) ([]byte, error) {
|
||||||
|
for _, file := range m {
|
||||||
|
if file == filename {
|
||||||
|
return []byte(filename), nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil, os.ErrNotExist
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestFetchMetadata(t *testing.T) {
|
||||||
|
for _, tt := range []struct {
|
||||||
|
root string
|
||||||
|
filename string
|
||||||
|
files mockFilesystem
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
"/",
|
||||||
|
"",
|
||||||
|
mockFilesystem{},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"/",
|
||||||
|
"/openstack/latest/meta_data.json",
|
||||||
|
mockFilesystem([]string{"/openstack/latest/meta_data.json"}),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"/media/configdrive",
|
||||||
|
"/media/configdrive/openstack/latest/meta_data.json",
|
||||||
|
mockFilesystem([]string{"/media/configdrive/openstack/latest/meta_data.json"}),
|
||||||
|
},
|
||||||
|
} {
|
||||||
|
cd := configDrive{tt.root, tt.files.readFile}
|
||||||
|
filename, err := cd.FetchMetadata()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("bad error for %q: want %v, got %q", tt, nil, err)
|
||||||
|
}
|
||||||
|
if string(filename) != tt.filename {
|
||||||
|
t.Fatalf("bad path for %q: want %q, got %q", tt, tt.filename, filename)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestFetchUserdata(t *testing.T) {
|
||||||
|
for _, tt := range []struct {
|
||||||
|
root string
|
||||||
|
filename string
|
||||||
|
files mockFilesystem
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
"/",
|
||||||
|
"",
|
||||||
|
mockFilesystem{},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"/",
|
||||||
|
"/openstack/latest/user_data",
|
||||||
|
mockFilesystem([]string{"/openstack/latest/user_data"}),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"/media/configdrive",
|
||||||
|
"/media/configdrive/openstack/latest/user_data",
|
||||||
|
mockFilesystem([]string{"/media/configdrive/openstack/latest/user_data"}),
|
||||||
|
},
|
||||||
|
} {
|
||||||
|
cd := configDrive{tt.root, tt.files.readFile}
|
||||||
|
filename, err := cd.FetchUserdata()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("bad error for %q: want %v, got %q", tt, nil, err)
|
||||||
|
}
|
||||||
|
if string(filename) != tt.filename {
|
||||||
|
t.Fatalf("bad path for %q: want %q, got %q", tt, tt.filename, filename)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestConfigRoot(t *testing.T) {
|
||||||
|
for _, tt := range []struct {
|
||||||
|
root string
|
||||||
|
configRoot string
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
"/",
|
||||||
|
"/openstack",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"/media/configdrive",
|
||||||
|
"/media/configdrive/openstack",
|
||||||
|
},
|
||||||
|
} {
|
||||||
|
cd := configDrive{tt.root, nil}
|
||||||
|
if configRoot := cd.ConfigRoot(); configRoot != tt.configRoot {
|
||||||
|
t.Fatalf("bad config root for %q: want %q, got %q", tt, tt.configRoot, configRoot)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestNewDatasource(t *testing.T) {
|
||||||
|
for _, tt := range []struct {
|
||||||
|
root string
|
||||||
|
expectRoot string
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
root: "",
|
||||||
|
expectRoot: "",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
root: "/media/configdrive",
|
||||||
|
expectRoot: "/media/configdrive",
|
||||||
|
},
|
||||||
|
} {
|
||||||
|
service := NewDatasource(tt.root)
|
||||||
|
if service.root != tt.expectRoot {
|
||||||
|
t.Fatalf("bad root (%q): want %q, got %q", tt.root, tt.expectRoot, service.root)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
@@ -1,114 +0,0 @@
|
|||||||
package datasource
|
|
||||||
|
|
||||||
import (
|
|
||||||
"os"
|
|
||||||
"testing"
|
|
||||||
)
|
|
||||||
|
|
||||||
type mockFilesystem []string
|
|
||||||
|
|
||||||
func (m mockFilesystem) readFile(filename string) ([]byte, error) {
|
|
||||||
for _, file := range m {
|
|
||||||
if file == filename {
|
|
||||||
return []byte(filename), nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil, os.ErrNotExist
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestCDFetchMetadata(t *testing.T) {
|
|
||||||
for _, tt := range []struct {
|
|
||||||
root string
|
|
||||||
filename string
|
|
||||||
files mockFilesystem
|
|
||||||
}{
|
|
||||||
{
|
|
||||||
"/",
|
|
||||||
"",
|
|
||||||
mockFilesystem{},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"/",
|
|
||||||
"/ec2/2009-04-04/meta_data.json",
|
|
||||||
mockFilesystem([]string{"/ec2/2009-04-04/meta_data.json"}),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"/media/configdrive",
|
|
||||||
"/media/configdrive/ec2/2009-04-04/meta_data.json",
|
|
||||||
mockFilesystem([]string{"/media/configdrive/ec2/2009-04-04/meta_data.json"}),
|
|
||||||
},
|
|
||||||
} {
|
|
||||||
cd := configDrive{tt.root, tt.files.readFile}
|
|
||||||
filename, err := cd.FetchMetadata()
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("bad error for %q: want %q, got %q", tt, nil, err)
|
|
||||||
}
|
|
||||||
if string(filename) != tt.filename {
|
|
||||||
t.Fatalf("bad path for %q: want %q, got %q", tt, tt.filename, filename)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestCDFetchUserdata(t *testing.T) {
|
|
||||||
for _, tt := range []struct {
|
|
||||||
root string
|
|
||||||
filename string
|
|
||||||
files mockFilesystem
|
|
||||||
}{
|
|
||||||
{
|
|
||||||
"/",
|
|
||||||
"",
|
|
||||||
mockFilesystem{},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"/",
|
|
||||||
"/ec2/2009-04-04/user_data",
|
|
||||||
mockFilesystem([]string{"/ec2/2009-04-04/user_data"}),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"/",
|
|
||||||
"/openstack/latest/user_data",
|
|
||||||
mockFilesystem([]string{"/openstack/latest/user_data"}),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"/",
|
|
||||||
"/ec2/2009-04-04/user_data",
|
|
||||||
mockFilesystem([]string{"/openstack/latest/user_data", "/ec2/2009-04-04/user_data"}),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"/media/configdrive",
|
|
||||||
"/media/configdrive/ec2/2009-04-04/user_data",
|
|
||||||
mockFilesystem([]string{"/media/configdrive/ec2/2009-04-04/user_data"}),
|
|
||||||
},
|
|
||||||
} {
|
|
||||||
cd := configDrive{tt.root, tt.files.readFile}
|
|
||||||
filename, err := cd.FetchUserdata()
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("bad error for %q: want %q, got %q", tt, nil, err)
|
|
||||||
}
|
|
||||||
if string(filename) != tt.filename {
|
|
||||||
t.Fatalf("bad path for %q: want %q, got %q", tt, tt.filename, filename)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestCDConfigRoot(t *testing.T) {
|
|
||||||
for _, tt := range []struct {
|
|
||||||
root string
|
|
||||||
configRoot string
|
|
||||||
}{
|
|
||||||
{
|
|
||||||
"/",
|
|
||||||
"/openstack/latest",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"/media/configdrive",
|
|
||||||
"/media/configdrive/openstack/latest",
|
|
||||||
},
|
|
||||||
} {
|
|
||||||
cd := configDrive{tt.root, nil}
|
|
||||||
if configRoot := cd.ConfigRoot(); configRoot != tt.configRoot {
|
|
||||||
t.Fatalf("bad config root for %q: want %q, got %q", tt, tt.configRoot, configRoot)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
@@ -1,9 +1,20 @@
|
|||||||
package datasource
|
/*
|
||||||
|
Copyright 2014 CoreOS, Inc.
|
||||||
|
|
||||||
const (
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
Ec2ApiVersion = "2009-04-04"
|
you may not use this file except in compliance with the License.
|
||||||
OpenstackApiVersion = "2012-08-10"
|
You may obtain a copy of the License at
|
||||||
)
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package datasource
|
||||||
|
|
||||||
type Datasource interface {
|
type Datasource interface {
|
||||||
IsAvailable() bool
|
IsAvailable() bool
|
||||||
@@ -11,5 +22,6 @@ type Datasource interface {
|
|||||||
ConfigRoot() string
|
ConfigRoot() string
|
||||||
FetchMetadata() ([]byte, error)
|
FetchMetadata() ([]byte, error)
|
||||||
FetchUserdata() ([]byte, error)
|
FetchUserdata() ([]byte, error)
|
||||||
|
FetchNetworkConfig(string) ([]byte, error)
|
||||||
Type() string
|
Type() string
|
||||||
}
|
}
|
||||||
|
@@ -1,39 +0,0 @@
|
|||||||
package datasource
|
|
||||||
|
|
||||||
import (
|
|
||||||
"io/ioutil"
|
|
||||||
"os"
|
|
||||||
)
|
|
||||||
|
|
||||||
type localFile struct {
|
|
||||||
path string
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewLocalFile(path string) *localFile {
|
|
||||||
return &localFile{path}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *localFile) IsAvailable() bool {
|
|
||||||
_, err := os.Stat(f.path)
|
|
||||||
return !os.IsNotExist(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *localFile) AvailabilityChanges() bool {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *localFile) ConfigRoot() string {
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *localFile) FetchMetadata() ([]byte, error) {
|
|
||||||
return []byte{}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *localFile) FetchUserdata() ([]byte, error) {
|
|
||||||
return ioutil.ReadFile(f.path)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *localFile) Type() string {
|
|
||||||
return "local-file"
|
|
||||||
}
|
|
59
datasource/file/file.go
Normal file
59
datasource/file/file.go
Normal file
@@ -0,0 +1,59 @@
|
|||||||
|
/*
|
||||||
|
Copyright 2014 CoreOS, Inc.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package file
|
||||||
|
|
||||||
|
import (
|
||||||
|
"io/ioutil"
|
||||||
|
"os"
|
||||||
|
)
|
||||||
|
|
||||||
|
type localFile struct {
|
||||||
|
path string
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewDatasource(path string) *localFile {
|
||||||
|
return &localFile{path}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *localFile) IsAvailable() bool {
|
||||||
|
_, err := os.Stat(f.path)
|
||||||
|
return !os.IsNotExist(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *localFile) AvailabilityChanges() bool {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *localFile) ConfigRoot() string {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *localFile) FetchMetadata() ([]byte, error) {
|
||||||
|
return []byte{}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *localFile) FetchUserdata() ([]byte, error) {
|
||||||
|
return ioutil.ReadFile(f.path)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *localFile) FetchNetworkConfig(filename string) ([]byte, error) {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *localFile) Type() string {
|
||||||
|
return "local-file"
|
||||||
|
}
|
161
datasource/metadata/cloudsigma/server_context.go
Normal file
161
datasource/metadata/cloudsigma/server_context.go
Normal file
@@ -0,0 +1,161 @@
|
|||||||
|
/*
|
||||||
|
Copyright 2014 CoreOS, Inc.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package cloudsigma
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/base64"
|
||||||
|
"encoding/json"
|
||||||
|
"os"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/coreos/coreos-cloudinit/Godeps/_workspace/src/github.com/cloudsigma/cepgo"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
userDataFieldName = "cloudinit-user-data"
|
||||||
|
)
|
||||||
|
|
||||||
|
type serverContextService struct {
|
||||||
|
client interface {
|
||||||
|
All() (interface{}, error)
|
||||||
|
Key(string) (interface{}, error)
|
||||||
|
Meta() (map[string]string, error)
|
||||||
|
FetchRaw(string) ([]byte, error)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewServerContextService() *serverContextService {
|
||||||
|
return &serverContextService{
|
||||||
|
client: cepgo.NewCepgo(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (_ *serverContextService) IsAvailable() bool {
|
||||||
|
productNameFile, err := os.Open("/sys/class/dmi/id/product_name")
|
||||||
|
if err != nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
productName := make([]byte, 10)
|
||||||
|
_, err = productNameFile.Read(productName)
|
||||||
|
return err == nil && string(productName) == "CloudSigma"
|
||||||
|
}
|
||||||
|
|
||||||
|
func (_ *serverContextService) AvailabilityChanges() bool {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
func (_ *serverContextService) ConfigRoot() string {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func (_ *serverContextService) Type() string {
|
||||||
|
return "server-context"
|
||||||
|
}
|
||||||
|
|
||||||
|
func (scs *serverContextService) FetchMetadata() ([]byte, error) {
|
||||||
|
var (
|
||||||
|
inputMetadata struct {
|
||||||
|
Name string `json:"name"`
|
||||||
|
UUID string `json:"uuid"`
|
||||||
|
Meta map[string]string `json:"meta"`
|
||||||
|
Nics []struct {
|
||||||
|
Runtime struct {
|
||||||
|
InterfaceType string `json:"interface_type"`
|
||||||
|
IPv4 struct {
|
||||||
|
IP string `json:"uuid"`
|
||||||
|
} `json:"ip_v4"`
|
||||||
|
} `json:"runtime"`
|
||||||
|
} `json:"nics"`
|
||||||
|
}
|
||||||
|
outputMetadata struct {
|
||||||
|
Hostname string `json:"name"`
|
||||||
|
PublicKeys map[string]string `json:"public_keys"`
|
||||||
|
LocalIPv4 string `json:"local-ipv4"`
|
||||||
|
PublicIPv4 string `json:"public-ipv4"`
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
rawMetadata, err := scs.client.FetchRaw("")
|
||||||
|
if err != nil {
|
||||||
|
return []byte{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
err = json.Unmarshal(rawMetadata, &inputMetadata)
|
||||||
|
if err != nil {
|
||||||
|
return []byte{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if inputMetadata.Name != "" {
|
||||||
|
outputMetadata.Hostname = inputMetadata.Name
|
||||||
|
} else {
|
||||||
|
outputMetadata.Hostname = inputMetadata.UUID
|
||||||
|
}
|
||||||
|
|
||||||
|
if key, ok := inputMetadata.Meta["ssh_public_key"]; ok {
|
||||||
|
splitted := strings.Split(key, " ")
|
||||||
|
outputMetadata.PublicKeys = make(map[string]string)
|
||||||
|
outputMetadata.PublicKeys[splitted[len(splitted)-1]] = key
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, nic := range inputMetadata.Nics {
|
||||||
|
if nic.Runtime.IPv4.IP != "" {
|
||||||
|
if nic.Runtime.InterfaceType == "public" {
|
||||||
|
outputMetadata.PublicIPv4 = nic.Runtime.IPv4.IP
|
||||||
|
} else {
|
||||||
|
outputMetadata.LocalIPv4 = nic.Runtime.IPv4.IP
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return json.Marshal(outputMetadata)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (scs *serverContextService) FetchUserdata() ([]byte, error) {
|
||||||
|
metadata, err := scs.client.Meta()
|
||||||
|
if err != nil {
|
||||||
|
return []byte{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
userData, ok := metadata[userDataFieldName]
|
||||||
|
if ok && isBase64Encoded(userDataFieldName, metadata) {
|
||||||
|
if decodedUserData, err := base64.StdEncoding.DecodeString(userData); err == nil {
|
||||||
|
return decodedUserData, nil
|
||||||
|
} else {
|
||||||
|
return []byte{}, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return []byte(userData), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (scs *serverContextService) FetchNetworkConfig(a string) ([]byte, error) {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func isBase64Encoded(field string, userdata map[string]string) bool {
|
||||||
|
base64Fields, ok := userdata["base64_fields"]
|
||||||
|
if !ok {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, base64Field := range strings.Split(base64Fields, ",") {
|
||||||
|
if field == base64Field {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
168
datasource/metadata/cloudsigma/server_context_test.go
Normal file
168
datasource/metadata/cloudsigma/server_context_test.go
Normal file
@@ -0,0 +1,168 @@
|
|||||||
|
/*
|
||||||
|
Copyright 2014 CoreOS, Inc.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package cloudsigma
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"reflect"
|
||||||
|
"testing"
|
||||||
|
)
|
||||||
|
|
||||||
|
type fakeCepgoClient struct {
|
||||||
|
raw []byte
|
||||||
|
meta map[string]string
|
||||||
|
keys map[string]interface{}
|
||||||
|
err error
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *fakeCepgoClient) All() (interface{}, error) {
|
||||||
|
return f.keys, f.err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *fakeCepgoClient) Key(key string) (interface{}, error) {
|
||||||
|
return f.keys[key], f.err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *fakeCepgoClient) Meta() (map[string]string, error) {
|
||||||
|
return f.meta, f.err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *fakeCepgoClient) FetchRaw(key string) ([]byte, error) {
|
||||||
|
return f.raw, f.err
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestServerContextFetchMetadata(t *testing.T) {
|
||||||
|
var metadata struct {
|
||||||
|
Hostname string `json:"name"`
|
||||||
|
PublicKeys map[string]string `json:"public_keys"`
|
||||||
|
LocalIPv4 string `json:"local-ipv4"`
|
||||||
|
PublicIPv4 string `json:"public-ipv4"`
|
||||||
|
}
|
||||||
|
client := new(fakeCepgoClient)
|
||||||
|
scs := NewServerContextService()
|
||||||
|
scs.client = client
|
||||||
|
client.raw = []byte(`{
|
||||||
|
"context": true,
|
||||||
|
"cpu": 4000,
|
||||||
|
"cpu_model": null,
|
||||||
|
"cpus_instead_of_cores": false,
|
||||||
|
"enable_numa": false,
|
||||||
|
"grantees": [],
|
||||||
|
"hv_relaxed": false,
|
||||||
|
"hv_tsc": false,
|
||||||
|
"jobs": [],
|
||||||
|
"mem": 4294967296,
|
||||||
|
"meta": {
|
||||||
|
"base64_fields": "cloudinit-user-data",
|
||||||
|
"cloudinit-user-data": "I2Nsb3VkLWNvbmZpZwoKaG9zdG5hbWU6IGNvcmVvczE=",
|
||||||
|
"ssh_public_key": "ssh-rsa AAAAB3NzaC1yc2E.../hQ5D5 john@doe"
|
||||||
|
},
|
||||||
|
"name": "coreos",
|
||||||
|
"nics": [
|
||||||
|
{
|
||||||
|
"runtime": {
|
||||||
|
"interface_type": "public",
|
||||||
|
"ip_v4": {
|
||||||
|
"uuid": "31.171.251.74"
|
||||||
|
},
|
||||||
|
"ip_v6": null
|
||||||
|
},
|
||||||
|
"vlan": null
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"smp": 2,
|
||||||
|
"status": "running",
|
||||||
|
"uuid": "20a0059b-041e-4d0c-bcc6-9b2852de48b3"
|
||||||
|
}`)
|
||||||
|
|
||||||
|
metadataBytes, err := scs.FetchMetadata()
|
||||||
|
if err != nil {
|
||||||
|
t.Error(err.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := json.Unmarshal(metadataBytes, &metadata); err != nil {
|
||||||
|
t.Error(err.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
if metadata.Hostname != "coreos" {
|
||||||
|
t.Errorf("Hostname is not 'coreos' but %s instead", metadata.Hostname)
|
||||||
|
}
|
||||||
|
|
||||||
|
if metadata.PublicKeys["john@doe"] != "ssh-rsa AAAAB3NzaC1yc2E.../hQ5D5 john@doe" {
|
||||||
|
t.Error("Public SSH Keys are not being read properly")
|
||||||
|
}
|
||||||
|
|
||||||
|
if metadata.LocalIPv4 != "" {
|
||||||
|
t.Errorf("Local IP is not empty but %s instead", metadata.LocalIPv4)
|
||||||
|
}
|
||||||
|
|
||||||
|
if metadata.PublicIPv4 != "31.171.251.74" {
|
||||||
|
t.Errorf("Local IP is not 31.171.251.74 but %s instead", metadata.PublicIPv4)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestServerContextFetchUserdata(t *testing.T) {
|
||||||
|
client := new(fakeCepgoClient)
|
||||||
|
scs := NewServerContextService()
|
||||||
|
scs.client = client
|
||||||
|
userdataSets := []struct {
|
||||||
|
in map[string]string
|
||||||
|
err bool
|
||||||
|
out []byte
|
||||||
|
}{
|
||||||
|
{map[string]string{
|
||||||
|
"base64_fields": "cloudinit-user-data",
|
||||||
|
"cloudinit-user-data": "aG9zdG5hbWU6IGNvcmVvc190ZXN0",
|
||||||
|
}, false, []byte("hostname: coreos_test")},
|
||||||
|
{map[string]string{
|
||||||
|
"cloudinit-user-data": "#cloud-config\\nhostname: coreos1",
|
||||||
|
}, false, []byte("#cloud-config\\nhostname: coreos1")},
|
||||||
|
{map[string]string{}, false, []byte{}},
|
||||||
|
}
|
||||||
|
|
||||||
|
for i, set := range userdataSets {
|
||||||
|
client.meta = set.in
|
||||||
|
got, err := scs.FetchUserdata()
|
||||||
|
if (err != nil) != set.err {
|
||||||
|
t.Errorf("case %d: bad error state (got %t, want %t)", i, err != nil, set.err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if !reflect.DeepEqual(got, set.out) {
|
||||||
|
t.Errorf("case %d: got %s, want %s", i, got, set.out)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestServerContextDecodingBase64UserData(t *testing.T) {
|
||||||
|
base64Sets := []struct {
|
||||||
|
in string
|
||||||
|
out bool
|
||||||
|
}{
|
||||||
|
{"cloudinit-user-data,foo,bar", true},
|
||||||
|
{"bar,cloudinit-user-data,foo,bar", true},
|
||||||
|
{"cloudinit-user-data", true},
|
||||||
|
{"", false},
|
||||||
|
{"foo", false},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, set := range base64Sets {
|
||||||
|
userdata := map[string]string{"base64_fields": set.in}
|
||||||
|
if isBase64Encoded("cloudinit-user-data", userdata) != set.out {
|
||||||
|
t.Errorf("isBase64Encoded(cloudinit-user-data, %s) should be %t", userdata, set.out)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
123
datasource/metadata/digitalocean/metadata.go
Normal file
123
datasource/metadata/digitalocean/metadata.go
Normal file
@@ -0,0 +1,123 @@
|
|||||||
|
/*
|
||||||
|
Copyright 2014 CoreOS, Inc.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package digitalocean
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"strconv"
|
||||||
|
|
||||||
|
"github.com/coreos/coreos-cloudinit/datasource/metadata"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
DefaultAddress = "http://169.254.169.254/"
|
||||||
|
apiVersion = "metadata/v1"
|
||||||
|
userdataUrl = apiVersion + "/user-data"
|
||||||
|
metadataPath = apiVersion + ".json"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Address struct {
|
||||||
|
IPAddress string `json:"ip_address"`
|
||||||
|
Netmask string `json:"netmask"`
|
||||||
|
Cidr int `json:"cidr"`
|
||||||
|
Gateway string `json:"gateway"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type Interface struct {
|
||||||
|
IPv4 *Address `json:"ipv4"`
|
||||||
|
IPv6 *Address `json:"ipv6"`
|
||||||
|
MAC string `json:"mac"`
|
||||||
|
Type string `json:"type"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type Interfaces struct {
|
||||||
|
Public []Interface `json:"public"`
|
||||||
|
Private []Interface `json:"private"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type DNS struct {
|
||||||
|
Nameservers []string `json:"nameservers"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type Metadata struct {
|
||||||
|
Hostname string `json:"hostname"`
|
||||||
|
Interfaces Interfaces `json:"interfaces"`
|
||||||
|
PublicKeys []string `json:"public_keys"`
|
||||||
|
DNS DNS `json:"dns"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type metadataService struct {
|
||||||
|
interfaces Interfaces
|
||||||
|
dns DNS
|
||||||
|
metadata.MetadataService
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewDatasource(root string) *metadataService {
|
||||||
|
return &metadataService{MetadataService: metadata.NewDatasource(root, apiVersion, userdataUrl, metadataPath)}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ms *metadataService) FetchMetadata() ([]byte, error) {
|
||||||
|
data, err := ms.FetchData(ms.MetadataUrl())
|
||||||
|
if err != nil || len(data) == 0 {
|
||||||
|
return []byte{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
var metadata Metadata
|
||||||
|
if err := json.Unmarshal(data, &metadata); err != nil {
|
||||||
|
return []byte{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
ms.interfaces = metadata.Interfaces
|
||||||
|
ms.dns = metadata.DNS
|
||||||
|
|
||||||
|
attrs := make(map[string]interface{})
|
||||||
|
if len(metadata.Interfaces.Public) > 0 {
|
||||||
|
if metadata.Interfaces.Public[0].IPv4 != nil {
|
||||||
|
attrs["public-ipv4"] = metadata.Interfaces.Public[0].IPv4.IPAddress
|
||||||
|
}
|
||||||
|
if metadata.Interfaces.Public[0].IPv6 != nil {
|
||||||
|
attrs["public-ipv6"] = metadata.Interfaces.Public[0].IPv6.IPAddress
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if len(metadata.Interfaces.Private) > 0 {
|
||||||
|
if metadata.Interfaces.Private[0].IPv4 != nil {
|
||||||
|
attrs["local-ipv4"] = metadata.Interfaces.Private[0].IPv4.IPAddress
|
||||||
|
}
|
||||||
|
if metadata.Interfaces.Private[0].IPv6 != nil {
|
||||||
|
attrs["local-ipv6"] = metadata.Interfaces.Private[0].IPv6.IPAddress
|
||||||
|
}
|
||||||
|
}
|
||||||
|
attrs["hostname"] = metadata.Hostname
|
||||||
|
keys := make(map[string]string)
|
||||||
|
for i, key := range metadata.PublicKeys {
|
||||||
|
keys[strconv.Itoa(i)] = key
|
||||||
|
}
|
||||||
|
attrs["public_keys"] = keys
|
||||||
|
|
||||||
|
return json.Marshal(attrs)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ms metadataService) FetchNetworkConfig(filename string) ([]byte, error) {
|
||||||
|
return json.Marshal(Metadata{
|
||||||
|
Interfaces: ms.interfaces,
|
||||||
|
DNS: ms.dns,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ms metadataService) Type() string {
|
||||||
|
return "digitalocean-metadata-service"
|
||||||
|
}
|
115
datasource/metadata/digitalocean/metadata_test.go
Normal file
115
datasource/metadata/digitalocean/metadata_test.go
Normal file
@@ -0,0 +1,115 @@
|
|||||||
|
/*
|
||||||
|
Copyright 2014 CoreOS, Inc.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package digitalocean
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"fmt"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/coreos/coreos-cloudinit/datasource/metadata"
|
||||||
|
"github.com/coreos/coreos-cloudinit/datasource/metadata/test"
|
||||||
|
"github.com/coreos/coreos-cloudinit/pkg"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestType(t *testing.T) {
|
||||||
|
want := "digitalocean-metadata-service"
|
||||||
|
if kind := (metadataService{}).Type(); kind != want {
|
||||||
|
t.Fatalf("bad type: want %q, got %q", want, kind)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestFetchMetadata(t *testing.T) {
|
||||||
|
for _, tt := range []struct {
|
||||||
|
root string
|
||||||
|
metadataPath string
|
||||||
|
resources map[string]string
|
||||||
|
expect []byte
|
||||||
|
clientErr error
|
||||||
|
expectErr error
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
root: "/",
|
||||||
|
metadataPath: "v1.json",
|
||||||
|
resources: map[string]string{
|
||||||
|
"/v1.json": "bad",
|
||||||
|
},
|
||||||
|
expectErr: fmt.Errorf("invalid character 'b' looking for beginning of value"),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
root: "/",
|
||||||
|
metadataPath: "v1.json",
|
||||||
|
resources: map[string]string{
|
||||||
|
"/v1.json": `{
|
||||||
|
"droplet_id": 1,
|
||||||
|
"user_data": "hello",
|
||||||
|
"vendor_data": "hello",
|
||||||
|
"public_keys": [
|
||||||
|
"publickey1",
|
||||||
|
"publickey2"
|
||||||
|
],
|
||||||
|
"region": "nyc2",
|
||||||
|
"interfaces": {
|
||||||
|
"public": [
|
||||||
|
{
|
||||||
|
"ipv4": {
|
||||||
|
"ip_address": "192.168.1.2",
|
||||||
|
"netmask": "255.255.255.0",
|
||||||
|
"gateway": "192.168.1.1"
|
||||||
|
},
|
||||||
|
"ipv6": {
|
||||||
|
"ip_address": "fe00::",
|
||||||
|
"cidr": 126,
|
||||||
|
"gateway": "fe00::"
|
||||||
|
},
|
||||||
|
"mac": "ab:cd:ef:gh:ij",
|
||||||
|
"type": "public"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}`,
|
||||||
|
},
|
||||||
|
expect: []byte(`{"hostname":"","public-ipv4":"192.168.1.2","public-ipv6":"fe00::","public_keys":{"0":"publickey1","1":"publickey2"}}`),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
clientErr: pkg.ErrTimeout{Err: fmt.Errorf("test error")},
|
||||||
|
expectErr: pkg.ErrTimeout{Err: fmt.Errorf("test error")},
|
||||||
|
},
|
||||||
|
} {
|
||||||
|
service := &metadataService{
|
||||||
|
MetadataService: metadata.MetadataService{
|
||||||
|
Root: tt.root,
|
||||||
|
Client: &test.HttpClient{Resources: tt.resources, Err: tt.clientErr},
|
||||||
|
MetadataPath: tt.metadataPath,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
metadata, err := service.FetchMetadata()
|
||||||
|
if Error(err) != Error(tt.expectErr) {
|
||||||
|
t.Fatalf("bad error (%q): want %q, got %q", tt.resources, tt.expectErr, err)
|
||||||
|
}
|
||||||
|
if !bytes.Equal(metadata, tt.expect) {
|
||||||
|
t.Fatalf("bad fetch (%q): want %q, got %q", tt.resources, tt.expect, metadata)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func Error(err error) string {
|
||||||
|
if err != nil {
|
||||||
|
return err.Error()
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
123
datasource/metadata/ec2/metadata.go
Normal file
123
datasource/metadata/ec2/metadata.go
Normal file
@@ -0,0 +1,123 @@
|
|||||||
|
/*
|
||||||
|
Copyright 2014 CoreOS, Inc.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package ec2
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bufio"
|
||||||
|
"bytes"
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/coreos/coreos-cloudinit/datasource/metadata"
|
||||||
|
"github.com/coreos/coreos-cloudinit/pkg"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
DefaultAddress = "http://169.254.169.254/"
|
||||||
|
apiVersion = "2009-04-04/"
|
||||||
|
userdataPath = apiVersion + "user-data"
|
||||||
|
metadataPath = apiVersion + "meta-data"
|
||||||
|
)
|
||||||
|
|
||||||
|
type metadataService struct {
|
||||||
|
metadata.MetadataService
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewDatasource(root string) *metadataService {
|
||||||
|
return &metadataService{metadata.NewDatasource(root, apiVersion, userdataPath, metadataPath)}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ms metadataService) FetchMetadata() ([]byte, error) {
|
||||||
|
attrs := make(map[string]interface{})
|
||||||
|
if keynames, err := ms.fetchAttributes(fmt.Sprintf("%s/public-keys", ms.MetadataUrl())); err == nil {
|
||||||
|
keyIDs := make(map[string]string)
|
||||||
|
for _, keyname := range keynames {
|
||||||
|
tokens := strings.SplitN(keyname, "=", 2)
|
||||||
|
if len(tokens) != 2 {
|
||||||
|
return nil, fmt.Errorf("malformed public key: %q", keyname)
|
||||||
|
}
|
||||||
|
keyIDs[tokens[1]] = tokens[0]
|
||||||
|
}
|
||||||
|
|
||||||
|
keys := make(map[string]string)
|
||||||
|
for name, id := range keyIDs {
|
||||||
|
sshkey, err := ms.fetchAttribute(fmt.Sprintf("%s/public-keys/%s/openssh-key", ms.MetadataUrl(), id))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
keys[name] = sshkey
|
||||||
|
fmt.Printf("Found SSH key for %q\n", name)
|
||||||
|
}
|
||||||
|
attrs["public_keys"] = keys
|
||||||
|
} else if _, ok := err.(pkg.ErrNotFound); !ok {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if hostname, err := ms.fetchAttribute(fmt.Sprintf("%s/hostname", ms.MetadataUrl())); err == nil {
|
||||||
|
attrs["hostname"] = hostname
|
||||||
|
} else if _, ok := err.(pkg.ErrNotFound); !ok {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if localAddr, err := ms.fetchAttribute(fmt.Sprintf("%s/local-ipv4", ms.MetadataUrl())); err == nil {
|
||||||
|
attrs["local-ipv4"] = localAddr
|
||||||
|
} else if _, ok := err.(pkg.ErrNotFound); !ok {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if publicAddr, err := ms.fetchAttribute(fmt.Sprintf("%s/public-ipv4", ms.MetadataUrl())); err == nil {
|
||||||
|
attrs["public-ipv4"] = publicAddr
|
||||||
|
} else if _, ok := err.(pkg.ErrNotFound); !ok {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if content_path, err := ms.fetchAttribute(fmt.Sprintf("%s/network_config/content_path", ms.MetadataUrl())); err == nil {
|
||||||
|
attrs["network_config"] = map[string]string{
|
||||||
|
"content_path": content_path,
|
||||||
|
}
|
||||||
|
} else if _, ok := err.(pkg.ErrNotFound); !ok {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return json.Marshal(attrs)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ms metadataService) Type() string {
|
||||||
|
return "ec2-metadata-service"
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ms metadataService) fetchAttributes(url string) ([]string, error) {
|
||||||
|
resp, err := ms.FetchData(url)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
scanner := bufio.NewScanner(bytes.NewBuffer(resp))
|
||||||
|
data := make([]string, 0)
|
||||||
|
for scanner.Scan() {
|
||||||
|
data = append(data, scanner.Text())
|
||||||
|
}
|
||||||
|
return data, scanner.Err()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ms metadataService) fetchAttribute(url string) (string, error) {
|
||||||
|
if attrs, err := ms.fetchAttributes(url); err == nil && len(attrs) > 0 {
|
||||||
|
return attrs[0], nil
|
||||||
|
} else {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
}
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user