Compare commits
65 Commits
Author | SHA1 | Date | |
---|---|---|---|
|
fac805dc11 | ||
|
94ea0b99ea | ||
|
56a80d84cf | ||
|
00c9174da4 | ||
|
ec8742c9ba | ||
|
b3b09aeb19 | ||
|
481d98c0b5 | ||
|
f30727a675 | ||
|
e1305937e6 | ||
|
20c4653ecf | ||
|
43c6da06a5 | ||
|
7ab84601c3 | ||
|
a24b23663c | ||
|
91fe744bd2 | ||
|
eb8fc045ee | ||
|
ba83b2871f | ||
|
f36821f7ce | ||
|
97fe210760 | ||
|
c6400f7751 | ||
|
f6647634f0 | ||
|
837d3d3622 | ||
|
1063a4b9ee | ||
|
081f77a102 | ||
|
41289286ca | ||
|
d50a4069a6 | ||
|
be0c9c56e4 | ||
|
6467f06656 | ||
|
7a05e63fcc | ||
|
ca6f97d050 | ||
|
d086bca9e4 | ||
|
d25f18776f | ||
|
c583b77cdb | ||
|
ed4d5fac4c | ||
|
40429204ba | ||
|
d72d54be59 | ||
|
373c7ecbd9 | ||
|
31c46c7051 | ||
|
66ec7d805c | ||
|
2563896f89 | ||
|
94a242cc58 | ||
|
5b159fcf56 | ||
|
a9e8940132 | ||
|
cf194ab85e | ||
|
33bc5fc63d | ||
|
09f6a279ef | ||
|
e8c8b811fe | ||
|
f5ecc05d62 | ||
|
66a2f00679 | ||
|
14cad6f7c3 | ||
|
6f188bd5d4 | ||
|
41832ab19e | ||
|
672e4c07af | ||
|
be53013431 | ||
|
c30fc51b03 | ||
|
b429eaab84 | ||
|
e0104e6d93 | ||
|
7bf9712724 | ||
|
78b0f82918 | ||
|
987aa21883 | ||
|
47ac4f6931 | ||
|
f8aa7a43b8 | ||
|
2fe0b0b2a8 | ||
|
19ce7ac849 | ||
|
477053ffde | ||
|
eb0d2dbfa3 |
@@ -4,10 +4,6 @@ matrix:
|
|||||||
include:
|
include:
|
||||||
- go: 1.4
|
- go: 1.4
|
||||||
env: TOOLS_CMD=golang.org/x/tools/cmd
|
env: TOOLS_CMD=golang.org/x/tools/cmd
|
||||||
- go: 1.3
|
|
||||||
env: TOOLS_CMD=code.google.com/p/go.tools/cmd
|
|
||||||
- go: 1.2
|
|
||||||
env: TOOLS_CMD=code.google.com/p/go.tools/cmd
|
|
||||||
|
|
||||||
install:
|
install:
|
||||||
- go get ${TOOLS_CMD}/cover
|
- go get ${TOOLS_CMD}/cover
|
||||||
|
38
Documentation/cloud-config-deprecated.md
Normal file
38
Documentation/cloud-config-deprecated.md
Normal file
@@ -0,0 +1,38 @@
|
|||||||
|
# Deprecated Cloud-Config Features
|
||||||
|
|
||||||
|
## Retrieving SSH Authorized Keys
|
||||||
|
|
||||||
|
### From a GitHub User
|
||||||
|
|
||||||
|
Using the `coreos-ssh-import-github` field, we can import public SSH keys from a GitHub user to use as authorized keys to a server.
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
#cloud-config
|
||||||
|
|
||||||
|
users:
|
||||||
|
- name: elroy
|
||||||
|
coreos-ssh-import-github: elroy
|
||||||
|
```
|
||||||
|
|
||||||
|
### From an HTTP Endpoint
|
||||||
|
|
||||||
|
We can also pull public SSH keys from any HTTP endpoint which matches [GitHub's API response format](https://developer.github.com/v3/users/keys/#list-public-keys-for-a-user).
|
||||||
|
For example, if you have an installation of GitHub Enterprise, you can provide a complete URL with an authentication token:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
#cloud-config
|
||||||
|
|
||||||
|
users:
|
||||||
|
- name: elroy
|
||||||
|
coreos-ssh-import-url: https://github-enterprise.example.com/api/v3/users/elroy/keys?access_token=<TOKEN>
|
||||||
|
```
|
||||||
|
|
||||||
|
You can also specify any URL whose response matches the JSON format for public keys:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
#cloud-config
|
||||||
|
|
||||||
|
users:
|
||||||
|
- name: elroy
|
||||||
|
coreos-ssh-import-url: https://example.com/public-keys
|
||||||
|
```
|
@@ -39,7 +39,7 @@ CoreOS tries to conform to each platform's native method to provide user data. E
|
|||||||
|
|
||||||
### coreos
|
### coreos
|
||||||
|
|
||||||
#### etcd
|
#### etcd (deprecated. see etcd2)
|
||||||
|
|
||||||
The `coreos.etcd.*` parameters will be translated to a partial systemd unit acting as an etcd configuration file.
|
The `coreos.etcd.*` parameters will be translated to a partial systemd unit acting as an etcd configuration file.
|
||||||
If the platform environment supports the templating feature of coreos-cloudinit it is possible to automate etcd configuration with the `$private_ipv4` and `$public_ipv4` fields. For example, the following cloud-config document...
|
If the platform environment supports the templating feature of coreos-cloudinit it is possible to automate etcd configuration with the `$private_ipv4` and `$public_ipv4` fields. For example, the following cloud-config document...
|
||||||
@@ -49,15 +49,15 @@ If the platform environment supports the templating feature of coreos-cloudinit
|
|||||||
|
|
||||||
coreos:
|
coreos:
|
||||||
etcd:
|
etcd:
|
||||||
name: node001
|
name: node001
|
||||||
# generate a new token for each unique cluster from https://discovery.etcd.io/new
|
# generate a new token for each unique cluster from https://discovery.etcd.io/new
|
||||||
discovery: https://discovery.etcd.io/<token>
|
discovery: https://discovery.etcd.io/<token>
|
||||||
# multi-region and multi-cloud deployments need to use $public_ipv4
|
# multi-region and multi-cloud deployments need to use $public_ipv4
|
||||||
addr: $public_ipv4:4001
|
addr: $public_ipv4:4001
|
||||||
peer-addr: $private_ipv4:7001
|
peer-addr: $private_ipv4:7001
|
||||||
```
|
```
|
||||||
|
|
||||||
...will generate a systemd unit drop-in like this:
|
...will generate a systemd unit drop-in for etcd.service with the following contents:
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
[Service]
|
[Service]
|
||||||
@@ -71,11 +71,51 @@ For more information about the available configuration parameters, see the [etcd
|
|||||||
|
|
||||||
_Note: The `$private_ipv4` and `$public_ipv4` substitution variables referenced in other documents are only supported on Amazon EC2, Google Compute Engine, OpenStack, Rackspace, DigitalOcean, and Vagrant._
|
_Note: The `$private_ipv4` and `$public_ipv4` substitution variables referenced in other documents are only supported on Amazon EC2, Google Compute Engine, OpenStack, Rackspace, DigitalOcean, and Vagrant._
|
||||||
|
|
||||||
[etcd-config]: https://github.com/coreos/etcd/blob/master/Documentation/configuration.md
|
[etcd-config]: https://github.com/coreos/etcd/blob/9fa3bea5a22265151f0d5063ce38a79c5b5d0271/Documentation/configuration.md
|
||||||
|
|
||||||
|
#### etcd2
|
||||||
|
|
||||||
|
The `coreos.etcd2.*` parameters will be translated to a partial systemd unit acting as an etcd configuration file.
|
||||||
|
If the platform environment supports the templating feature of coreos-cloudinit it is possible to automate etcd configuration with the `$private_ipv4` and `$public_ipv4` fields. When generating a [discovery token](https://discovery.etcd.io/new?size=3), set the `size` parameter, since etcd uses this to determine if all members have joined the cluster. After the cluster is bootstrapped, it can grow or shrink from this configured size.
|
||||||
|
|
||||||
|
For example, the following cloud-config document...
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
#cloud-config
|
||||||
|
|
||||||
|
coreos:
|
||||||
|
etcd2:
|
||||||
|
# generate a new token for each unique cluster from https://discovery.etcd.io/new?size=3
|
||||||
|
discovery: https://discovery.etcd.io/<token>
|
||||||
|
# multi-region and multi-cloud deployments need to use $public_ipv4
|
||||||
|
advertise-client-urls: http://$public_ipv4:2379
|
||||||
|
initial-advertise-peer-urls: http://$private_ipv4:2380
|
||||||
|
# listen on both the official ports and the legacy ports
|
||||||
|
# legacy ports can be omitted if your application doesn't depend on them
|
||||||
|
listen-client-urls: http://0.0.0.0:2379,http://0.0.0.0:4001
|
||||||
|
listen-peer-urls: http://$private_ipv4:2380,http://$private_ipv4:7001
|
||||||
|
```
|
||||||
|
|
||||||
|
...will generate a systemd unit drop-in for etcd2.service with the following contents:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
[Service]
|
||||||
|
Environment="ETCD_DISCOVERY=https://discovery.etcd.io/<token>"
|
||||||
|
Environment="ETCD_ADVERTISE_CLIENT_URLS=http://203.0.113.29:2379"
|
||||||
|
Environment="ETCD_INITIAL_ADVERTISE_PEER_URLS=http://192.0.2.13:2380"
|
||||||
|
Environment="ETCD_LISTEN_CLIENT_URLS=http://0.0.0.0:2379,http://0.0.0.0:4001"
|
||||||
|
Environment="ETCD_LISTEN_PEER_URLS=http://192.0.2.13:2380,http://192.0.2.13:7001"
|
||||||
|
```
|
||||||
|
|
||||||
|
For more information about the available configuration parameters, see the [etcd documentation][etcd-config].
|
||||||
|
|
||||||
|
_Note: The `$private_ipv4` and `$public_ipv4` substitution variables referenced in other documents are only supported on Amazon EC2, Google Compute Engine, OpenStack, Rackspace, DigitalOcean, and Vagrant._
|
||||||
|
|
||||||
|
[etcd-config]: https://github.com/coreos/etcd/blob/86e616c6e974828fc9119c1eb0f6439577a9ce0b/Documentation/configuration.md
|
||||||
|
|
||||||
#### fleet
|
#### fleet
|
||||||
|
|
||||||
The `coreos.fleet.*` parameters work very similarly to `coreos.etcd.*`, and allow for the configuration of fleet through environment variables. For example, the following cloud-config document...
|
The `coreos.fleet.*` parameters work very similarly to `coreos.etcd2.*`, and allow for the configuration of fleet through environment variables. For example, the following cloud-config document...
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
#cloud-config
|
#cloud-config
|
||||||
@@ -100,7 +140,7 @@ For more information on fleet configuration, see the [fleet documentation][fleet
|
|||||||
|
|
||||||
#### flannel
|
#### flannel
|
||||||
|
|
||||||
The `coreos.flannel.*` parameters also work very similarly to `coreos.etcd.*`
|
The `coreos.flannel.*` parameters also work very similarly to `coreos.etcd2.*`
|
||||||
and `coreos.fleet.*`. They can be used to set environment variables for
|
and `coreos.fleet.*`. They can be used to set environment variables for
|
||||||
flanneld. For example, the following cloud-config...
|
flanneld. For example, the following cloud-config...
|
||||||
|
|
||||||
@@ -120,14 +160,16 @@ Environment="FLANNELD_ETCD_PREFIX=/coreos.com/network2"
|
|||||||
```
|
```
|
||||||
|
|
||||||
List of flannel configuration parameters:
|
List of flannel configuration parameters:
|
||||||
|
|
||||||
- **etcd_endpoints**: Comma separated list of etcd endpoints
|
- **etcd_endpoints**: Comma separated list of etcd endpoints
|
||||||
- **etcd_cafile**: Path to CA file used for TLS communication with etcd
|
- **etcd_cafile**: Path to CA file used for TLS communication with etcd
|
||||||
- **etcd_certfile**: Path to certificate file used for TLS communication with etcd
|
- **etcd_certfile**: Path to certificate file used for TLS communication with etcd
|
||||||
- **etcd_keyfile**: Path to private key file used for TLS communication with etcd
|
- **etcd_keyfile**: Path to private key file used for TLS communication with etcd
|
||||||
- **etcd_prefix**: Etcd prefix path to be used for flannel keys
|
- **etcd_prefix**: etcd prefix path to be used for flannel keys
|
||||||
- **ip_masq**: Install IP masquerade rules for traffic outside of flannel subnet
|
- **ip_masq**: Install IP masquerade rules for traffic outside of flannel subnet
|
||||||
- **subnet_file**: Path to flannel subnet file to write out
|
- **subnet_file**: Path to flannel subnet file to write out
|
||||||
- **interface**: Interface (name or IP) that should be used for inter-host communication
|
- **interface**: Interface (name or IP) that should be used for inter-host communication
|
||||||
|
- **public_ip**: IP accessible by other nodes for inter-host communication
|
||||||
|
|
||||||
[flannel-readme]: https://github.com/coreos/flannel/blob/master/README.md
|
[flannel-readme]: https://github.com/coreos/flannel/blob/master/README.md
|
||||||
|
|
||||||
@@ -241,14 +283,14 @@ coreos:
|
|||||||
Environment=DOCKER_OPTS='--insecure-registry="10.0.1.0/24"'
|
Environment=DOCKER_OPTS='--insecure-registry="10.0.1.0/24"'
|
||||||
```
|
```
|
||||||
|
|
||||||
Start the built-in `etcd` and `fleet` services:
|
Start the built-in `etcd2` and `fleet` services:
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
#cloud-config
|
#cloud-config
|
||||||
|
|
||||||
coreos:
|
coreos:
|
||||||
units:
|
units:
|
||||||
- name: etcd.service
|
- name: etcd2.service
|
||||||
command: start
|
command: start
|
||||||
- name: fleet.service
|
- name: fleet.service
|
||||||
command: start
|
command: start
|
||||||
@@ -293,11 +335,12 @@ All but the `passwd` and `ssh-authorized-keys` fields will be ignored if the use
|
|||||||
- **groups**: Add user to these additional groups
|
- **groups**: Add user to these additional groups
|
||||||
- **no-user-group**: Boolean. Skip default group creation.
|
- **no-user-group**: Boolean. Skip default group creation.
|
||||||
- **ssh-authorized-keys**: List of public SSH keys to authorize for this user
|
- **ssh-authorized-keys**: List of public SSH keys to authorize for this user
|
||||||
- **coreos-ssh-import-github**: Authorize SSH keys from GitHub user
|
- **coreos-ssh-import-github** [DEPRECATED]: Authorize SSH keys from GitHub user
|
||||||
- **coreos-ssh-import-github-users**: Authorize SSH keys from a list of GitHub users
|
- **coreos-ssh-import-github-users** [DEPRECATED]: Authorize SSH keys from a list of GitHub users
|
||||||
- **coreos-ssh-import-url**: Authorize SSH keys imported from a url endpoint.
|
- **coreos-ssh-import-url** [DEPRECATED]: Authorize SSH keys imported from a url endpoint.
|
||||||
- **system**: Create the user as a system user. No home directory will be created.
|
- **system**: Create the user as a system user. No home directory will be created.
|
||||||
- **no-log-init**: Boolean. Skip initialization of lastlog and faillog databases.
|
- **no-log-init**: Boolean. Skip initialization of lastlog and faillog databases.
|
||||||
|
- **shell**: User's login shell.
|
||||||
|
|
||||||
The following fields are not yet implemented:
|
The following fields are not yet implemented:
|
||||||
|
|
||||||
@@ -340,43 +383,6 @@ perl -e 'print crypt("password","\$6\$SALT\$") . "\n"'
|
|||||||
|
|
||||||
Using a higher number of rounds will help create more secure passwords, but given enough time, password hashes can be reversed. On most RPM based distributions there is a tool called mkpasswd available in the `expect` package, but this does not handle "rounds" nor advanced hashing algorithms.
|
Using a higher number of rounds will help create more secure passwords, but given enough time, password hashes can be reversed. On most RPM based distributions there is a tool called mkpasswd available in the `expect` package, but this does not handle "rounds" nor advanced hashing algorithms.
|
||||||
|
|
||||||
#### Retrieving SSH Authorized Keys
|
|
||||||
|
|
||||||
##### From a GitHub User
|
|
||||||
|
|
||||||
Using the `coreos-ssh-import-github` field, we can import public SSH keys from a GitHub user to use as authorized keys to a server.
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
#cloud-config
|
|
||||||
|
|
||||||
users:
|
|
||||||
- name: elroy
|
|
||||||
coreos-ssh-import-github: elroy
|
|
||||||
```
|
|
||||||
|
|
||||||
##### From an HTTP Endpoint
|
|
||||||
|
|
||||||
We can also pull public SSH keys from any HTTP endpoint which matches [GitHub's API response format](https://developer.github.com/v3/users/keys/#list-public-keys-for-a-user).
|
|
||||||
For example, if you have an installation of GitHub Enterprise, you can provide a complete URL with an authentication token:
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
#cloud-config
|
|
||||||
|
|
||||||
users:
|
|
||||||
- name: elroy
|
|
||||||
coreos-ssh-import-url: https://github-enterprise.example.com/api/v3/users/elroy/keys?access_token=<TOKEN>
|
|
||||||
```
|
|
||||||
|
|
||||||
You can also specify any URL whose response matches the JSON format for public keys:
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
#cloud-config
|
|
||||||
|
|
||||||
users:
|
|
||||||
- name: elroy
|
|
||||||
coreos-ssh-import-url: https://example.com/public-keys
|
|
||||||
```
|
|
||||||
|
|
||||||
### write_files
|
### write_files
|
||||||
|
|
||||||
The `write_files` directive defines a set of files to create on the local filesystem.
|
The `write_files` directive defines a set of files to create on the local filesystem.
|
||||||
|
@@ -21,6 +21,12 @@ mkisofs -R -V config-2 -o configdrive.iso /tmp/new-drive
|
|||||||
rm -r /tmp/new-drive
|
rm -r /tmp/new-drive
|
||||||
```
|
```
|
||||||
|
|
||||||
|
If on OS X, replace the `mkisofs` invocation with:
|
||||||
|
|
||||||
|
```sh
|
||||||
|
hdiutil makehybrid -iso -joliet -default-volume-name config-2 -o configdrive.iso /tmp/new-drive
|
||||||
|
```
|
||||||
|
|
||||||
## QEMU virtfs
|
## QEMU virtfs
|
||||||
|
|
||||||
One exception to the above, when using QEMU it is possible to skip creating an
|
One exception to the above, when using QEMU it is possible to skip creating an
|
||||||
|
7
build
7
build
@@ -1,7 +1,10 @@
|
|||||||
#!/bin/bash -e
|
#!/bin/bash -e
|
||||||
|
|
||||||
|
NAME="coreos-cloudinit"
|
||||||
ORG_PATH="github.com/coreos"
|
ORG_PATH="github.com/coreos"
|
||||||
REPO_PATH="${ORG_PATH}/coreos-cloudinit"
|
REPO_PATH="${ORG_PATH}/${NAME}"
|
||||||
|
VERSION=$(git describe --dirty --tags)
|
||||||
|
GLDFLAGS="-X main.version \"${VERSION}\""
|
||||||
|
|
||||||
if [ ! -h gopath/src/${REPO_PATH} ]; then
|
if [ ! -h gopath/src/${REPO_PATH} ]; then
|
||||||
mkdir -p gopath/src/${ORG_PATH}
|
mkdir -p gopath/src/${ORG_PATH}
|
||||||
@@ -11,4 +14,4 @@ fi
|
|||||||
export GOBIN=${PWD}/bin
|
export GOBIN=${PWD}/bin
|
||||||
export GOPATH=${PWD}/gopath
|
export GOPATH=${PWD}/gopath
|
||||||
|
|
||||||
go build -o bin/coreos-cloudinit ${REPO_PATH}
|
go build -ldflags "${GLDFLAGS}" -o ${GOBIN}/${NAME} ${REPO_PATH}
|
||||||
|
@@ -37,6 +37,7 @@ type CloudConfig struct {
|
|||||||
|
|
||||||
type CoreOS struct {
|
type CoreOS struct {
|
||||||
Etcd Etcd `yaml:"etcd"`
|
Etcd Etcd `yaml:"etcd"`
|
||||||
|
Etcd2 Etcd2 `yaml:"etcd2"`
|
||||||
Flannel Flannel `yaml:"flannel"`
|
Flannel Flannel `yaml:"flannel"`
|
||||||
Fleet Fleet `yaml:"fleet"`
|
Fleet Fleet `yaml:"fleet"`
|
||||||
Locksmith Locksmith `yaml:"locksmith"`
|
Locksmith Locksmith `yaml:"locksmith"`
|
||||||
|
@@ -374,6 +374,7 @@ users:
|
|||||||
no_user_group: true
|
no_user_group: true
|
||||||
system: y
|
system: y
|
||||||
no_log_init: True
|
no_log_init: True
|
||||||
|
shell: /bin/sh
|
||||||
`
|
`
|
||||||
cfg, err := NewCloudConfig(contents)
|
cfg, err := NewCloudConfig(contents)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -441,6 +442,10 @@ users:
|
|||||||
if !user.NoLogInit {
|
if !user.NoLogInit {
|
||||||
t.Errorf("Failed to parse no_log_init field")
|
t.Errorf("Failed to parse no_log_init field")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if user.Shell != "/bin/sh" {
|
||||||
|
t.Errorf("Failed to parse shell field, got %q", user.Shell)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestCloudConfigUsersGithubUser(t *testing.T) {
|
func TestCloudConfigUsersGithubUser(t *testing.T) {
|
||||||
|
@@ -16,7 +16,7 @@ package config
|
|||||||
|
|
||||||
type Etcd struct {
|
type Etcd struct {
|
||||||
Addr string `yaml:"addr" env:"ETCD_ADDR"`
|
Addr string `yaml:"addr" env:"ETCD_ADDR"`
|
||||||
AdvertiseClientURLs string `yaml:"advertise_client_urls" env:"ETCD_ADVERTISE_CLIENT_URLS"`
|
AdvertiseClientURLs string `yaml:"advertise_client_urls" env:"ETCD_ADVERTISE_CLIENT_URLS" deprecated:"etcd2 options no longer work for etcd"`
|
||||||
BindAddr string `yaml:"bind_addr" env:"ETCD_BIND_ADDR"`
|
BindAddr string `yaml:"bind_addr" env:"ETCD_BIND_ADDR"`
|
||||||
CAFile string `yaml:"ca_file" env:"ETCD_CA_FILE"`
|
CAFile string `yaml:"ca_file" env:"ETCD_CA_FILE"`
|
||||||
CertFile string `yaml:"cert_file" env:"ETCD_CERT_FILE"`
|
CertFile string `yaml:"cert_file" env:"ETCD_CERT_FILE"`
|
||||||
@@ -26,26 +26,26 @@ type Etcd struct {
|
|||||||
CorsOrigins string `yaml:"cors" env:"ETCD_CORS"`
|
CorsOrigins string `yaml:"cors" env:"ETCD_CORS"`
|
||||||
DataDir string `yaml:"data_dir" env:"ETCD_DATA_DIR"`
|
DataDir string `yaml:"data_dir" env:"ETCD_DATA_DIR"`
|
||||||
Discovery string `yaml:"discovery" env:"ETCD_DISCOVERY"`
|
Discovery string `yaml:"discovery" env:"ETCD_DISCOVERY"`
|
||||||
DiscoveryFallback string `yaml:"discovery_fallback" env:"ETCD_DISCOVERY_FALLBACK"`
|
DiscoveryFallback string `yaml:"discovery_fallback" env:"ETCD_DISCOVERY_FALLBACK" deprecated:"etcd2 options no longer work for etcd"`
|
||||||
DiscoverySRV string `yaml:"discovery_srv" env:"ETCD_DISCOVERY_SRV"`
|
DiscoverySRV string `yaml:"discovery_srv" env:"ETCD_DISCOVERY_SRV" deprecated:"etcd2 options no longer work for etcd"`
|
||||||
DiscoveryProxy string `yaml:"discovery_proxy" env:"ETCD_DISCOVERY_PROXY"`
|
DiscoveryProxy string `yaml:"discovery_proxy" env:"ETCD_DISCOVERY_PROXY" deprecated:"etcd2 options no longer work for etcd"`
|
||||||
ElectionTimeout int `yaml:"election_timeout" env:"ETCD_ELECTION_TIMEOUT"`
|
ElectionTimeout int `yaml:"election_timeout" env:"ETCD_ELECTION_TIMEOUT" deprecated:"etcd2 options no longer work for etcd"`
|
||||||
ForceNewCluster bool `yaml:"force_new_cluster" env:"ETCD_FORCE_NEW_CLUSTER"`
|
ForceNewCluster bool `yaml:"force_new_cluster" env:"ETCD_FORCE_NEW_CLUSTER" deprecated:"etcd2 options no longer work for etcd"`
|
||||||
GraphiteHost string `yaml:"graphite_host" env:"ETCD_GRAPHITE_HOST"`
|
GraphiteHost string `yaml:"graphite_host" env:"ETCD_GRAPHITE_HOST"`
|
||||||
HeartbeatInterval int `yaml:"heartbeat_interval" env:"ETCD_HEARTBEAT_INTERVAL"`
|
HeartbeatInterval int `yaml:"heartbeat_interval" env:"ETCD_HEARTBEAT_INTERVAL" deprecated:"etcd2 options no longer work for etcd"`
|
||||||
HTTPReadTimeout float64 `yaml:"http_read_timeout" env:"ETCD_HTTP_READ_TIMEOUT"`
|
HTTPReadTimeout float64 `yaml:"http_read_timeout" env:"ETCD_HTTP_READ_TIMEOUT"`
|
||||||
HTTPWriteTimeout float64 `yaml:"http_write_timeout" env:"ETCD_HTTP_WRITE_TIMEOUT"`
|
HTTPWriteTimeout float64 `yaml:"http_write_timeout" env:"ETCD_HTTP_WRITE_TIMEOUT"`
|
||||||
InitialAdvertisePeerURLs string `yaml:"initial_advertise_peer_urls" env:"ETCD_INITIAL_ADVERTISE_PEER_URLS"`
|
InitialAdvertisePeerURLs string `yaml:"initial_advertise_peer_urls" env:"ETCD_INITIAL_ADVERTISE_PEER_URLS" deprecated:"etcd2 options no longer work for etcd"`
|
||||||
InitialCluster string `yaml:"initial_cluster" env:"ETCD_INITIAL_CLUSTER"`
|
InitialCluster string `yaml:"initial_cluster" env:"ETCD_INITIAL_CLUSTER" deprecated:"etcd2 options no longer work for etcd"`
|
||||||
InitialClusterState string `yaml:"initial_cluster_state" env:"ETCD_INITIAL_CLUSTER_STATE"`
|
InitialClusterState string `yaml:"initial_cluster_state" env:"ETCD_INITIAL_CLUSTER_STATE" deprecated:"etcd2 options no longer work for etcd"`
|
||||||
InitialClusterToken string `yaml:"initial_cluster_token" env:"ETCD_INITIAL_CLUSTER_TOKEN"`
|
InitialClusterToken string `yaml:"initial_cluster_token" env:"ETCD_INITIAL_CLUSTER_TOKEN" deprecated:"etcd2 options no longer work for etcd"`
|
||||||
KeyFile string `yaml:"key_file" env:"ETCD_KEY_FILE"`
|
KeyFile string `yaml:"key_file" env:"ETCD_KEY_FILE"`
|
||||||
ListenClientURLs string `yaml:"listen_client_urls" env:"ETCD_LISTEN_CLIENT_URLS"`
|
ListenClientURLs string `yaml:"listen_client_urls" env:"ETCD_LISTEN_CLIENT_URLS" deprecated:"etcd2 options no longer work for etcd"`
|
||||||
ListenPeerURLs string `yaml:"listen_peer_urls" env:"ETCD_LISTEN_PEER_URLS"`
|
ListenPeerURLs string `yaml:"listen_peer_urls" env:"ETCD_LISTEN_PEER_URLS" deprecated:"etcd2 options no longer work for etcd"`
|
||||||
MaxResultBuffer int `yaml:"max_result_buffer" env:"ETCD_MAX_RESULT_BUFFER"`
|
MaxResultBuffer int `yaml:"max_result_buffer" env:"ETCD_MAX_RESULT_BUFFER"`
|
||||||
MaxRetryAttempts int `yaml:"max_retry_attempts" env:"ETCD_MAX_RETRY_ATTEMPTS"`
|
MaxRetryAttempts int `yaml:"max_retry_attempts" env:"ETCD_MAX_RETRY_ATTEMPTS"`
|
||||||
MaxSnapshots int `yaml:"max_snapshots" env:"ETCD_MAX_SNAPSHOTS"`
|
MaxSnapshots int `yaml:"max_snapshots" env:"ETCD_MAX_SNAPSHOTS" deprecated:"etcd2 options no longer work for etcd"`
|
||||||
MaxWALs int `yaml:"max_wals" env:"ETCD_MAX_WALS"`
|
MaxWALs int `yaml:"max_wals" env:"ETCD_MAX_WALS" deprecated:"etcd2 options no longer work for etcd"`
|
||||||
Name string `yaml:"name" env:"ETCD_NAME"`
|
Name string `yaml:"name" env:"ETCD_NAME"`
|
||||||
PeerAddr string `yaml:"peer_addr" env:"ETCD_PEER_ADDR"`
|
PeerAddr string `yaml:"peer_addr" env:"ETCD_PEER_ADDR"`
|
||||||
PeerBindAddr string `yaml:"peer_bind_addr" env:"ETCD_PEER_BIND_ADDR"`
|
PeerBindAddr string `yaml:"peer_bind_addr" env:"ETCD_PEER_BIND_ADDR"`
|
||||||
@@ -56,7 +56,7 @@ type Etcd struct {
|
|||||||
PeerKeyFile string `yaml:"peer_key_file" env:"ETCD_PEER_KEY_FILE"`
|
PeerKeyFile string `yaml:"peer_key_file" env:"ETCD_PEER_KEY_FILE"`
|
||||||
Peers string `yaml:"peers" env:"ETCD_PEERS"`
|
Peers string `yaml:"peers" env:"ETCD_PEERS"`
|
||||||
PeersFile string `yaml:"peers_file" env:"ETCD_PEERS_FILE"`
|
PeersFile string `yaml:"peers_file" env:"ETCD_PEERS_FILE"`
|
||||||
Proxy string `yaml:"proxy" env:"ETCD_PROXY"`
|
Proxy string `yaml:"proxy" env:"ETCD_PROXY" deprecated:"etcd2 options no longer work for etcd"`
|
||||||
RetryInterval float64 `yaml:"retry_interval" env:"ETCD_RETRY_INTERVAL"`
|
RetryInterval float64 `yaml:"retry_interval" env:"ETCD_RETRY_INTERVAL"`
|
||||||
Snapshot bool `yaml:"snapshot" env:"ETCD_SNAPSHOT"`
|
Snapshot bool `yaml:"snapshot" env:"ETCD_SNAPSHOT"`
|
||||||
SnapshotCount int `yaml:"snapshot_count" env:"ETCD_SNAPSHOTCOUNT"`
|
SnapshotCount int `yaml:"snapshot_count" env:"ETCD_SNAPSHOTCOUNT"`
|
||||||
|
56
config/etcd2.go
Normal file
56
config/etcd2.go
Normal file
@@ -0,0 +1,56 @@
|
|||||||
|
// Copyright 2015 CoreOS, Inc.
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package config
|
||||||
|
|
||||||
|
type Etcd2 struct {
|
||||||
|
AdvertiseClientURLs string `yaml:"advertise_client_urls" env:"ETCD_ADVERTISE_CLIENT_URLS"`
|
||||||
|
CAFile string `yaml:"ca_file" env:"ETCD_CA_FILE" deprecated:"ca_file obsoleted by trusted_ca_file and client_cert_auth"`
|
||||||
|
CertFile string `yaml:"cert_file" env:"ETCD_CERT_FILE"`
|
||||||
|
ClientCertAuth bool `yaml:"client_cert_auth" env:"ETCD_CLIENT_CERT_AUTH"`
|
||||||
|
CorsOrigins string `yaml:"cors" env:"ETCD_CORS"`
|
||||||
|
DataDir string `yaml:"data_dir" env:"ETCD_DATA_DIR"`
|
||||||
|
Debug bool `yaml:"debug" env:"ETCD_DEBUG"`
|
||||||
|
Discovery string `yaml:"discovery" env:"ETCD_DISCOVERY"`
|
||||||
|
DiscoveryFallback string `yaml:"discovery_fallback" env:"ETCD_DISCOVERY_FALLBACK"`
|
||||||
|
DiscoverySRV string `yaml:"discovery_srv" env:"ETCD_DISCOVERY_SRV"`
|
||||||
|
DiscoveryProxy string `yaml:"discovery_proxy" env:"ETCD_DISCOVERY_PROXY"`
|
||||||
|
ElectionTimeout int `yaml:"election_timeout" env:"ETCD_ELECTION_TIMEOUT"`
|
||||||
|
ForceNewCluster bool `yaml:"force_new_cluster" env:"ETCD_FORCE_NEW_CLUSTER"`
|
||||||
|
HeartbeatInterval int `yaml:"heartbeat_interval" env:"ETCD_HEARTBEAT_INTERVAL"`
|
||||||
|
InitialAdvertisePeerURLs string `yaml:"initial_advertise_peer_urls" env:"ETCD_INITIAL_ADVERTISE_PEER_URLS"`
|
||||||
|
InitialCluster string `yaml:"initial_cluster" env:"ETCD_INITIAL_CLUSTER"`
|
||||||
|
InitialClusterState string `yaml:"initial_cluster_state" env:"ETCD_INITIAL_CLUSTER_STATE"`
|
||||||
|
InitialClusterToken string `yaml:"initial_cluster_token" env:"ETCD_INITIAL_CLUSTER_TOKEN"`
|
||||||
|
KeyFile string `yaml:"key_file" env:"ETCD_KEY_FILE"`
|
||||||
|
ListenClientURLs string `yaml:"listen_client_urls" env:"ETCD_LISTEN_CLIENT_URLS"`
|
||||||
|
ListenPeerURLs string `yaml:"listen_peer_urls" env:"ETCD_LISTEN_PEER_URLS"`
|
||||||
|
LogPackageLevels string `yaml:"log_package_levels" env:"ETCD_LOG_PACKAGE_LEVELS"`
|
||||||
|
MaxSnapshots int `yaml:"max_snapshots" env:"ETCD_MAX_SNAPSHOTS"`
|
||||||
|
MaxWALs int `yaml:"max_wals" env:"ETCD_MAX_WALS"`
|
||||||
|
Name string `yaml:"name" env:"ETCD_NAME"`
|
||||||
|
PeerCAFile string `yaml:"peer_ca_file" env:"ETCD_PEER_CA_FILE" deprecated:"peer_ca_file obsoleted peer_trusted_ca_file and peer_client_cert_auth"`
|
||||||
|
PeerCertFile string `yaml:"peer_cert_file" env:"ETCD_PEER_CERT_FILE"`
|
||||||
|
PeerKeyFile string `yaml:"peer_key_file" env:"ETCD_PEER_KEY_FILE"`
|
||||||
|
PeerClientCertAuth bool `yaml:"peer_client_cert_auth" env:"ETCD_PEER_CLIENT_CERT_AUTH"`
|
||||||
|
PeerTrustedCAFile string `yaml:"peer_trusted_ca_file" env:"ETCD_PEER_TRUSTED_CA_FILE"`
|
||||||
|
Proxy string `yaml:"proxy" env:"ETCD_PROXY" valid:"^(on|off|readonly)$"`
|
||||||
|
ProxyDialTimeout int `yaml:"proxy_dial_timeout" env:"ETCD_PROXY_DIAL_TIMEOUT"`
|
||||||
|
ProxyFailureWait int `yaml:"proxy_failure_wait" env:"ETCD_PROXY_FAILURE_WAIT"`
|
||||||
|
ProxyReadTimeout int `yaml:"proxy_read_timeout" env:"ETCD_PROXY_READ_TIMEOUT"`
|
||||||
|
ProxyRefreshInterval int `yaml:"proxy_refresh_interval" env:"ETCD_PROXY_REFRESH_INTERVAL"`
|
||||||
|
ProxyWriteTimeout int `yaml:"proxy_write_timeout" env:"ETCD_PROXY_WRITE_TIMEOUT"`
|
||||||
|
SnapshotCount int `yaml:"snapshot_count" env:"ETCD_SNAPSHOT_COUNT"`
|
||||||
|
TrustedCAFile string `yaml:"trusted_ca_file" env:"ETCD_TRUSTED_CA_FILE"`
|
||||||
|
}
|
@@ -23,4 +23,5 @@ type Flannel struct {
|
|||||||
IPMasq string `yaml:"ip_masq" env:"FLANNELD_IP_MASQ"`
|
IPMasq string `yaml:"ip_masq" env:"FLANNELD_IP_MASQ"`
|
||||||
SubnetFile string `yaml:"subnet_file" env:"FLANNELD_SUBNET_FILE"`
|
SubnetFile string `yaml:"subnet_file" env:"FLANNELD_SUBNET_FILE"`
|
||||||
Iface string `yaml:"interface" env:"FLANNELD_IFACE"`
|
Iface string `yaml:"interface" env:"FLANNELD_IFACE"`
|
||||||
|
PublicIP string `yaml:"public_ip" env:"FLANNELD_PUBLIC_IP"`
|
||||||
}
|
}
|
||||||
|
@@ -12,17 +12,15 @@
|
|||||||
// See the License for the specific language governing permissions and
|
// See the License for the specific language governing permissions and
|
||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
package test
|
package config
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"os"
|
"encoding/json"
|
||||||
)
|
)
|
||||||
|
|
||||||
type MockFilesystem map[string]string
|
func IsIgnitionConfig(userdata string) bool {
|
||||||
|
var cfg struct {
|
||||||
func (m MockFilesystem) ReadFile(filename string) ([]byte, error) {
|
Version *int `json:"ignitionVersion" yaml:"ignition_version"`
|
||||||
if contents, ok := m[filename]; ok {
|
|
||||||
return []byte(contents), nil
|
|
||||||
}
|
}
|
||||||
return nil, os.ErrNotExist
|
return (json.Unmarshal([]byte(userdata), &cfg) == nil && cfg.Version != nil)
|
||||||
}
|
}
|
@@ -18,9 +18,9 @@ type User struct {
|
|||||||
Name string `yaml:"name"`
|
Name string `yaml:"name"`
|
||||||
PasswordHash string `yaml:"passwd"`
|
PasswordHash string `yaml:"passwd"`
|
||||||
SSHAuthorizedKeys []string `yaml:"ssh_authorized_keys"`
|
SSHAuthorizedKeys []string `yaml:"ssh_authorized_keys"`
|
||||||
SSHImportGithubUser string `yaml:"coreos_ssh_import_github"`
|
SSHImportGithubUser string `yaml:"coreos_ssh_import_github" deprecated:"trying to fetch from a remote endpoint introduces too many intermittent errors"`
|
||||||
SSHImportGithubUsers []string `yaml:"coreos_ssh_import_github_users"`
|
SSHImportGithubUsers []string `yaml:"coreos_ssh_import_github_users" deprecated:"trying to fetch from a remote endpoint introduces too many intermittent errors"`
|
||||||
SSHImportURL string `yaml:"coreos_ssh_import_url"`
|
SSHImportURL string `yaml:"coreos_ssh_import_url" deprecated:"trying to fetch from a remote endpoint introduces too many intermittent errors"`
|
||||||
GECOS string `yaml:"gecos"`
|
GECOS string `yaml:"gecos"`
|
||||||
Homedir string `yaml:"homedir"`
|
Homedir string `yaml:"homedir"`
|
||||||
NoCreateHome bool `yaml:"no_create_home"`
|
NoCreateHome bool `yaml:"no_create_home"`
|
||||||
@@ -29,4 +29,5 @@ type User struct {
|
|||||||
NoUserGroup bool `yaml:"no_user_group"`
|
NoUserGroup bool `yaml:"no_user_group"`
|
||||||
System bool `yaml:"system"`
|
System bool `yaml:"system"`
|
||||||
NoLogInit bool `yaml:"no_log_init"`
|
NoLogInit bool `yaml:"no_log_init"`
|
||||||
|
Shell string `yaml:"shell"`
|
||||||
}
|
}
|
||||||
|
@@ -57,9 +57,9 @@ func checkEncoding(cfg node, report *Report) {
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
c := f.Child("contents")
|
c := f.Child("content")
|
||||||
if _, err := config.DecodeContent(c.String(), e.String()); err != nil {
|
if _, err := config.DecodeContent(c.String(), e.String()); err != nil {
|
||||||
report.Error(c.line, fmt.Sprintf("contents cannot be decoded as %q", e.String()))
|
report.Error(c.line, fmt.Sprintf("content cannot be decoded as %q", e.String()))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -82,6 +82,9 @@ func checkNodeStructure(n, g node, r *Report) {
|
|||||||
case reflect.Struct:
|
case reflect.Struct:
|
||||||
for _, cn := range n.children {
|
for _, cn := range n.children {
|
||||||
if cg := g.Child(cn.name); cg.IsValid() {
|
if cg := g.Child(cn.name); cg.IsValid() {
|
||||||
|
if msg := cg.field.Tag.Get("deprecated"); msg != "" {
|
||||||
|
r.Warning(cn.line, fmt.Sprintf("deprecated key %q (%s)", cn.name, msg))
|
||||||
|
}
|
||||||
checkNodeStructure(cn, cg, r)
|
checkNodeStructure(cn, cg, r)
|
||||||
} else {
|
} else {
|
||||||
r.Warning(cn.line, fmt.Sprintf("unrecognized key %q", cn.name))
|
r.Warning(cn.line, fmt.Sprintf("unrecognized key %q", cn.name))
|
||||||
|
@@ -60,27 +60,27 @@ func TestCheckEncoding(t *testing.T) {
|
|||||||
}{
|
}{
|
||||||
{},
|
{},
|
||||||
{
|
{
|
||||||
config: "write_files:\n - encoding: base64\n contents: aGVsbG8K",
|
config: "write_files:\n - encoding: base64\n content: aGVsbG8K",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
config: "write_files:\n - contents: !!binary aGVsbG8K",
|
config: "write_files:\n - content: !!binary aGVsbG8K",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
config: "write_files:\n - encoding: base64\n contents: !!binary aGVsbG8K",
|
config: "write_files:\n - encoding: base64\n content: !!binary aGVsbG8K",
|
||||||
entries: []Entry{{entryError, `contents cannot be decoded as "base64"`, 3}},
|
entries: []Entry{{entryError, `content cannot be decoded as "base64"`, 3}},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
config: "write_files:\n - encoding: base64\n contents: !!binary YUdWc2JHOEsK",
|
config: "write_files:\n - encoding: base64\n content: !!binary YUdWc2JHOEsK",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
config: "write_files:\n - encoding: gzip\n contents: !!binary H4sIAOC3tVQAA8tIzcnJ5wIAIDA6NgYAAAA=",
|
config: "write_files:\n - encoding: gzip\n content: !!binary H4sIAOC3tVQAA8tIzcnJ5wIAIDA6NgYAAAA=",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
config: "write_files:\n - encoding: gzip+base64\n contents: H4sIAOC3tVQAA8tIzcnJ5wIAIDA6NgYAAAA=",
|
config: "write_files:\n - encoding: gzip+base64\n content: H4sIAOC3tVQAA8tIzcnJ5wIAIDA6NgYAAAA=",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
config: "write_files:\n - encoding: custom\n contents: hello",
|
config: "write_files:\n - encoding: custom\n content: hello",
|
||||||
entries: []Entry{{entryError, `contents cannot be decoded as "custom"`, 3}},
|
entries: []Entry{{entryError, `content cannot be decoded as "custom"`, 3}},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -119,6 +119,15 @@ func TestCheckStructure(t *testing.T) {
|
|||||||
config: "coreos:\n etcd:\n discovery: good",
|
config: "coreos:\n etcd:\n discovery: good",
|
||||||
},
|
},
|
||||||
|
|
||||||
|
// Test for deprecated keys
|
||||||
|
{
|
||||||
|
config: "coreos:\n etcd:\n addr: hi",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
config: "coreos:\n etcd:\n proxy: hi",
|
||||||
|
entries: []Entry{{entryWarning, "deprecated key \"proxy\" (etcd2 options no longer work for etcd)", 3}},
|
||||||
|
},
|
||||||
|
|
||||||
// Test for error on list of nodes
|
// Test for error on list of nodes
|
||||||
{
|
{
|
||||||
config: "coreos:\n units:\n - hello\n - goodbye",
|
config: "coreos:\n units:\n - hello\n - goodbye",
|
||||||
|
@@ -40,6 +40,8 @@ func Validate(userdataBytes []byte) (Report, error) {
|
|||||||
return Report{}, nil
|
return Report{}, nil
|
||||||
case config.IsScript(string(userdataBytes)):
|
case config.IsScript(string(userdataBytes)):
|
||||||
return Report{}, nil
|
return Report{}, nil
|
||||||
|
case config.IsIgnitionConfig(string(userdataBytes)):
|
||||||
|
return Report{}, nil
|
||||||
case config.IsCloudConfig(string(userdataBytes)):
|
case config.IsCloudConfig(string(userdataBytes)):
|
||||||
return validateCloudConfig(userdataBytes, Rules)
|
return validateCloudConfig(userdataBytes, Rules)
|
||||||
default:
|
default:
|
||||||
|
@@ -111,6 +111,16 @@ func TestValidate(t *testing.T) {
|
|||||||
{
|
{
|
||||||
config: "#!/bin/bash\necho hey",
|
config: "#!/bin/bash\necho hey",
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
config: "{}",
|
||||||
|
report: Report{entries: []Entry{{entryError, `must be "#cloud-config" or begin with "#!"`, 1}}},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
config: `{"ignitionVersion":0}`,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
config: `{"ignitionVersion":1}`,
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
for i, tt := range tests {
|
for i, tt := range tests {
|
||||||
|
@@ -29,6 +29,7 @@ import (
|
|||||||
"github.com/coreos/coreos-cloudinit/datasource/metadata/cloudsigma"
|
"github.com/coreos/coreos-cloudinit/datasource/metadata/cloudsigma"
|
||||||
"github.com/coreos/coreos-cloudinit/datasource/metadata/digitalocean"
|
"github.com/coreos/coreos-cloudinit/datasource/metadata/digitalocean"
|
||||||
"github.com/coreos/coreos-cloudinit/datasource/metadata/ec2"
|
"github.com/coreos/coreos-cloudinit/datasource/metadata/ec2"
|
||||||
|
"github.com/coreos/coreos-cloudinit/datasource/metadata/packet"
|
||||||
"github.com/coreos/coreos-cloudinit/datasource/proc_cmdline"
|
"github.com/coreos/coreos-cloudinit/datasource/proc_cmdline"
|
||||||
"github.com/coreos/coreos-cloudinit/datasource/url"
|
"github.com/coreos/coreos-cloudinit/datasource/url"
|
||||||
"github.com/coreos/coreos-cloudinit/datasource/waagent"
|
"github.com/coreos/coreos-cloudinit/datasource/waagent"
|
||||||
@@ -39,7 +40,6 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
version = "1.3.0"
|
|
||||||
datasourceInterval = 100 * time.Millisecond
|
datasourceInterval = 100 * time.Millisecond
|
||||||
datasourceMaxInterval = 30 * time.Second
|
datasourceMaxInterval = 30 * time.Second
|
||||||
datasourceTimeout = 5 * time.Minute
|
datasourceTimeout = 5 * time.Minute
|
||||||
@@ -57,6 +57,7 @@ var (
|
|||||||
ec2MetadataService string
|
ec2MetadataService string
|
||||||
cloudSigmaMetadataService bool
|
cloudSigmaMetadataService bool
|
||||||
digitalOceanMetadataService string
|
digitalOceanMetadataService string
|
||||||
|
packetMetadataService string
|
||||||
url string
|
url string
|
||||||
procCmdLine bool
|
procCmdLine bool
|
||||||
}
|
}
|
||||||
@@ -66,6 +67,7 @@ var (
|
|||||||
oem string
|
oem string
|
||||||
validate bool
|
validate bool
|
||||||
}{}
|
}{}
|
||||||
|
version = "was not built properly"
|
||||||
)
|
)
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
@@ -78,6 +80,7 @@ func init() {
|
|||||||
flag.StringVar(&flags.sources.ec2MetadataService, "from-ec2-metadata", "", "Download EC2 data from the provided url")
|
flag.StringVar(&flags.sources.ec2MetadataService, "from-ec2-metadata", "", "Download EC2 data from the provided url")
|
||||||
flag.BoolVar(&flags.sources.cloudSigmaMetadataService, "from-cloudsigma-metadata", false, "Download data from CloudSigma server context")
|
flag.BoolVar(&flags.sources.cloudSigmaMetadataService, "from-cloudsigma-metadata", false, "Download data from CloudSigma server context")
|
||||||
flag.StringVar(&flags.sources.digitalOceanMetadataService, "from-digitalocean-metadata", "", "Download DigitalOcean data from the provided url")
|
flag.StringVar(&flags.sources.digitalOceanMetadataService, "from-digitalocean-metadata", "", "Download DigitalOcean data from the provided url")
|
||||||
|
flag.StringVar(&flags.sources.packetMetadataService, "from-packet-metadata", "", "Download Packet data from metadata service")
|
||||||
flag.StringVar(&flags.sources.url, "from-url", "", "Download user-data from provided url")
|
flag.StringVar(&flags.sources.url, "from-url", "", "Download user-data from provided url")
|
||||||
flag.BoolVar(&flags.sources.procCmdLine, "from-proc-cmdline", false, fmt.Sprintf("Parse %s for '%s=<url>', using the cloud-config served by an HTTP GET to <url>", proc_cmdline.ProcCmdlineLocation, proc_cmdline.ProcCmdlineCloudConfigFlag))
|
flag.BoolVar(&flags.sources.procCmdLine, "from-proc-cmdline", false, fmt.Sprintf("Parse %s for '%s=<url>', using the cloud-config served by an HTTP GET to <url>", proc_cmdline.ProcCmdlineLocation, proc_cmdline.ProcCmdlineCloudConfigFlag))
|
||||||
flag.StringVar(&flags.oem, "oem", "", "Use the settings specific to the provided OEM")
|
flag.StringVar(&flags.oem, "oem", "", "Use the settings specific to the provided OEM")
|
||||||
@@ -109,6 +112,10 @@ var (
|
|||||||
"cloudsigma": oemConfig{
|
"cloudsigma": oemConfig{
|
||||||
"from-cloudsigma-metadata": "true",
|
"from-cloudsigma-metadata": "true",
|
||||||
},
|
},
|
||||||
|
"packet": oemConfig{
|
||||||
|
"from-packet-metadata": "https://metadata.packet.net/",
|
||||||
|
"convert-netconf": "packet",
|
||||||
|
},
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -131,7 +138,7 @@ func main() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if flags.printVersion == true {
|
if flags.printVersion == true {
|
||||||
fmt.Printf("coreos-cloudinit version %s\n", version)
|
fmt.Printf("coreos-cloudinit %s\n", version)
|
||||||
os.Exit(0)
|
os.Exit(0)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -139,14 +146,15 @@ func main() {
|
|||||||
case "":
|
case "":
|
||||||
case "debian":
|
case "debian":
|
||||||
case "digitalocean":
|
case "digitalocean":
|
||||||
|
case "packet":
|
||||||
default:
|
default:
|
||||||
fmt.Printf("Invalid option to -convert-netconf: '%s'. Supported options: 'debian, digitalocean'\n", flags.convertNetconf)
|
fmt.Printf("Invalid option to -convert-netconf: '%s'. Supported options: 'debian, digitalocean, packet'\n", flags.convertNetconf)
|
||||||
os.Exit(2)
|
os.Exit(2)
|
||||||
}
|
}
|
||||||
|
|
||||||
dss := getDatasources()
|
dss := getDatasources()
|
||||||
if len(dss) == 0 {
|
if len(dss) == 0 {
|
||||||
fmt.Println("Provide at least one of --from-file, --from-configdrive, --from-ec2-metadata, --from-cloudsigma-metadata, --from-url or --from-proc-cmdline")
|
fmt.Println("Provide at least one of --from-file, --from-configdrive, --from-ec2-metadata, --from-cloudsigma-metadata, --from-packet-metadata, --from-url or --from-proc-cmdline")
|
||||||
os.Exit(2)
|
os.Exit(2)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -192,16 +200,20 @@ func main() {
|
|||||||
|
|
||||||
var ccu *config.CloudConfig
|
var ccu *config.CloudConfig
|
||||||
var script *config.Script
|
var script *config.Script
|
||||||
if ud, err := initialize.ParseUserData(userdata); err != nil {
|
switch ud, err := initialize.ParseUserData(userdata); err {
|
||||||
fmt.Printf("Failed to parse user-data: %v\nContinuing...\n", err)
|
case initialize.ErrIgnitionConfig:
|
||||||
failure = true
|
fmt.Printf("Detected an Ignition config. Exiting...")
|
||||||
} else {
|
os.Exit(0)
|
||||||
|
case nil:
|
||||||
switch t := ud.(type) {
|
switch t := ud.(type) {
|
||||||
case *config.CloudConfig:
|
case *config.CloudConfig:
|
||||||
ccu = t
|
ccu = t
|
||||||
case *config.Script:
|
case *config.Script:
|
||||||
script = t
|
script = t
|
||||||
}
|
}
|
||||||
|
default:
|
||||||
|
fmt.Printf("Failed to parse user-data: %v\nContinuing...\n", err)
|
||||||
|
failure = true
|
||||||
}
|
}
|
||||||
|
|
||||||
fmt.Println("Merging cloud-config from meta-data and user-data")
|
fmt.Println("Merging cloud-config from meta-data and user-data")
|
||||||
@@ -215,6 +227,8 @@ func main() {
|
|||||||
ifaces, err = network.ProcessDebianNetconf(metadata.NetworkConfig)
|
ifaces, err = network.ProcessDebianNetconf(metadata.NetworkConfig)
|
||||||
case "digitalocean":
|
case "digitalocean":
|
||||||
ifaces, err = network.ProcessDigitalOceanNetconf(metadata.NetworkConfig)
|
ifaces, err = network.ProcessDigitalOceanNetconf(metadata.NetworkConfig)
|
||||||
|
case "packet":
|
||||||
|
ifaces, err = network.ProcessPacketNetconf(metadata.NetworkConfig)
|
||||||
default:
|
default:
|
||||||
err = fmt.Errorf("Unsupported network config format %q", flags.convertNetconf)
|
err = fmt.Errorf("Unsupported network config format %q", flags.convertNetconf)
|
||||||
}
|
}
|
||||||
@@ -290,6 +304,9 @@ func getDatasources() []datasource.Datasource {
|
|||||||
if flags.sources.waagent != "" {
|
if flags.sources.waagent != "" {
|
||||||
dss = append(dss, waagent.NewDatasource(flags.sources.waagent))
|
dss = append(dss, waagent.NewDatasource(flags.sources.waagent))
|
||||||
}
|
}
|
||||||
|
if flags.sources.packetMetadataService != "" {
|
||||||
|
dss = append(dss, packet.NewDatasource(flags.sources.packetMetadataService))
|
||||||
|
}
|
||||||
if flags.sources.procCmdLine {
|
if flags.sources.procCmdLine {
|
||||||
dss = append(dss, proc_cmdline.NewDatasource())
|
dss = append(dss, proc_cmdline.NewDatasource())
|
||||||
}
|
}
|
||||||
|
@@ -60,7 +60,7 @@ func (cd *configDrive) FetchMetadata() (metadata datasource.Metadata, err error)
|
|||||||
} `json:"network_config"`
|
} `json:"network_config"`
|
||||||
}
|
}
|
||||||
|
|
||||||
if data, err = cd.tryReadFile(path.Join(cd.openstackVersionRoot(), "meta_data.json")); err != nil {
|
if data, err = cd.tryReadFile(path.Join(cd.openstackVersionRoot(), "meta_data.json")); err != nil || len(data) == 0 {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if err = json.Unmarshal([]byte(data), &m); err != nil {
|
if err = json.Unmarshal([]byte(data), &m); err != nil {
|
||||||
@@ -69,7 +69,9 @@ func (cd *configDrive) FetchMetadata() (metadata datasource.Metadata, err error)
|
|||||||
|
|
||||||
metadata.SSHPublicKeys = m.SSHAuthorizedKeyMap
|
metadata.SSHPublicKeys = m.SSHAuthorizedKeyMap
|
||||||
metadata.Hostname = m.Hostname
|
metadata.Hostname = m.Hostname
|
||||||
metadata.NetworkConfig, err = cd.tryReadFile(path.Join(cd.openstackRoot(), m.NetworkConfig.ContentPath))
|
if m.NetworkConfig.ContentPath != "" {
|
||||||
|
metadata.NetworkConfig, err = cd.tryReadFile(path.Join(cd.openstackRoot(), m.NetworkConfig.ContentPath))
|
||||||
|
}
|
||||||
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
@@ -31,19 +31,22 @@ func TestFetchMetadata(t *testing.T) {
|
|||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
root: "/",
|
root: "/",
|
||||||
files: test.MockFilesystem{"/openstack/latest/meta_data.json": `{"ignore": "me"}`},
|
files: test.NewMockFilesystem(test.File{Path: "/openstack/latest/meta_data.json", Contents: ""}),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
root: "/",
|
||||||
|
files: test.NewMockFilesystem(test.File{Path: "/openstack/latest/meta_data.json", Contents: `{"ignore": "me"}`}),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
root: "/",
|
root: "/",
|
||||||
files: test.MockFilesystem{"/openstack/latest/meta_data.json": `{"hostname": "host"}`},
|
files: test.NewMockFilesystem(test.File{Path: "/openstack/latest/meta_data.json", Contents: `{"hostname": "host"}`}),
|
||||||
metadata: datasource.Metadata{Hostname: "host"},
|
metadata: datasource.Metadata{Hostname: "host"},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
root: "/media/configdrive",
|
root: "/media/configdrive",
|
||||||
files: test.MockFilesystem{
|
files: test.NewMockFilesystem(test.File{Path: "/media/configdrive/openstack/latest/meta_data.json", Contents: `{"hostname": "host", "network_config": {"content_path": "config_file.json"}, "public_keys":{"1": "key1", "2": "key2"}}`},
|
||||||
"/media/configdrive/openstack/latest/meta_data.json": `{"hostname": "host", "network_config": {"content_path": "config_file.json"}, "public_keys":{"1": "key1", "2": "key2"}}`,
|
test.File{Path: "/media/configdrive/openstack/config_file.json", Contents: "make it work"},
|
||||||
"/media/configdrive/openstack/config_file.json": "make it work",
|
),
|
||||||
},
|
|
||||||
metadata: datasource.Metadata{
|
metadata: datasource.Metadata{
|
||||||
Hostname: "host",
|
Hostname: "host",
|
||||||
NetworkConfig: []byte("make it work"),
|
NetworkConfig: []byte("make it work"),
|
||||||
@@ -57,10 +60,10 @@ func TestFetchMetadata(t *testing.T) {
|
|||||||
cd := configDrive{tt.root, tt.files.ReadFile}
|
cd := configDrive{tt.root, tt.files.ReadFile}
|
||||||
metadata, err := cd.FetchMetadata()
|
metadata, err := cd.FetchMetadata()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("bad error for %q: want %v, got %q", tt, nil, err)
|
t.Fatalf("bad error for %+v: want %v, got %q", tt, nil, err)
|
||||||
}
|
}
|
||||||
if !reflect.DeepEqual(tt.metadata, metadata) {
|
if !reflect.DeepEqual(tt.metadata, metadata) {
|
||||||
t.Fatalf("bad metadata for %q: want %#v, got %#v", tt, tt.metadata, metadata)
|
t.Fatalf("bad metadata for %+v: want %#v, got %#v", tt, tt.metadata, metadata)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -74,27 +77,27 @@ func TestFetchUserdata(t *testing.T) {
|
|||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
"/",
|
"/",
|
||||||
test.MockFilesystem{},
|
test.NewMockFilesystem(),
|
||||||
"",
|
"",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"/",
|
"/",
|
||||||
test.MockFilesystem{"/openstack/latest/user_data": "userdata"},
|
test.NewMockFilesystem(test.File{Path: "/openstack/latest/user_data", Contents: "userdata"}),
|
||||||
"userdata",
|
"userdata",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"/media/configdrive",
|
"/media/configdrive",
|
||||||
test.MockFilesystem{"/media/configdrive/openstack/latest/user_data": "userdata"},
|
test.NewMockFilesystem(test.File{Path: "/media/configdrive/openstack/latest/user_data", Contents: "userdata"}),
|
||||||
"userdata",
|
"userdata",
|
||||||
},
|
},
|
||||||
} {
|
} {
|
||||||
cd := configDrive{tt.root, tt.files.ReadFile}
|
cd := configDrive{tt.root, tt.files.ReadFile}
|
||||||
userdata, err := cd.FetchUserdata()
|
userdata, err := cd.FetchUserdata()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("bad error for %q: want %v, got %q", tt, nil, err)
|
t.Fatalf("bad error for %+v: want %v, got %q", tt, nil, err)
|
||||||
}
|
}
|
||||||
if string(userdata) != tt.userdata {
|
if string(userdata) != tt.userdata {
|
||||||
t.Fatalf("bad userdata for %q: want %q, got %q", tt, tt.userdata, userdata)
|
t.Fatalf("bad userdata for %+v: want %q, got %q", tt, tt.userdata, userdata)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@@ -108,7 +108,9 @@ func (scs *serverContextService) FetchMetadata() (metadata datasource.Metadata,
|
|||||||
}
|
}
|
||||||
|
|
||||||
metadata.SSHPublicKeys = map[string]string{}
|
metadata.SSHPublicKeys = map[string]string{}
|
||||||
if key, ok := inputMetadata.Meta["ssh_public_key"]; ok {
|
// CloudSigma uses an empty string, rather than no string,
|
||||||
|
// to represent the lack of a SSH key
|
||||||
|
if key, _ := inputMetadata.Meta["ssh_public_key"]; len(key) > 0 {
|
||||||
splitted := strings.Split(key, " ")
|
splitted := strings.Split(key, " ")
|
||||||
metadata.SSHPublicKeys[splitted[len(splitted)-1]] = key
|
metadata.SSHPublicKeys[splitted[len(splitted)-1]] = key
|
||||||
}
|
}
|
||||||
|
@@ -43,6 +43,27 @@ func (f *fakeCepgoClient) FetchRaw(key string) ([]byte, error) {
|
|||||||
return f.raw, f.err
|
return f.raw, f.err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestServerContextWithEmptyPublicSSHKey(t *testing.T) {
|
||||||
|
client := new(fakeCepgoClient)
|
||||||
|
scs := NewServerContextService()
|
||||||
|
scs.client = client
|
||||||
|
client.raw = []byte(`{
|
||||||
|
"meta": {
|
||||||
|
"base64_fields": "cloudinit-user-data",
|
||||||
|
"cloudinit-user-data": "I2Nsb3VkLWNvbmZpZwoKaG9zdG5hbWU6IGNvcmVvczE=",
|
||||||
|
"ssh_public_key": ""
|
||||||
|
}
|
||||||
|
}`)
|
||||||
|
metadata, err := scs.FetchMetadata()
|
||||||
|
if err != nil {
|
||||||
|
t.Error(err.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(metadata.SSHPublicKeys) != 0 {
|
||||||
|
t.Error("There should be no Public SSH Keys provided")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func TestServerContextFetchMetadata(t *testing.T) {
|
func TestServerContextFetchMetadata(t *testing.T) {
|
||||||
client := new(fakeCepgoClient)
|
client := new(fakeCepgoClient)
|
||||||
scs := NewServerContextService()
|
scs := NewServerContextService()
|
||||||
|
106
datasource/metadata/packet/metadata.go
Normal file
106
datasource/metadata/packet/metadata.go
Normal file
@@ -0,0 +1,106 @@
|
|||||||
|
// Copyright 2015 CoreOS, Inc.
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package packet
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"net"
|
||||||
|
"strconv"
|
||||||
|
|
||||||
|
"github.com/coreos/coreos-cloudinit/datasource"
|
||||||
|
"github.com/coreos/coreos-cloudinit/datasource/metadata"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
DefaultAddress = "https://metadata.packet.net/"
|
||||||
|
apiVersion = ""
|
||||||
|
userdataUrl = "userdata"
|
||||||
|
metadataPath = "metadata"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Netblock struct {
|
||||||
|
Address net.IP `json:"address"`
|
||||||
|
Cidr int `json:"cidr"`
|
||||||
|
Netmask net.IP `json:"netmask"`
|
||||||
|
Gateway net.IP `json:"gateway"`
|
||||||
|
AddressFamily int `json:"address_family"`
|
||||||
|
Public bool `json:"public"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type Nic struct {
|
||||||
|
Name string `json:"name"`
|
||||||
|
Mac string `json:"mac"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type NetworkData struct {
|
||||||
|
Interfaces []Nic `json:"interfaces"`
|
||||||
|
Netblocks []Netblock `json:"addresses"`
|
||||||
|
DNS []net.IP `json:"dns"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// Metadata that will be pulled from the https://metadata.packet.net/metadata only. We have the opportunity to add more later.
|
||||||
|
type Metadata struct {
|
||||||
|
Hostname string `json:"hostname"`
|
||||||
|
SSHKeys []string `json:"ssh_keys"`
|
||||||
|
NetworkData NetworkData `json:"network"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type metadataService struct {
|
||||||
|
metadata.MetadataService
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewDatasource(root string) *metadataService {
|
||||||
|
return &metadataService{MetadataService: metadata.NewDatasource(root, apiVersion, userdataUrl, metadataPath)}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ms *metadataService) FetchMetadata() (metadata datasource.Metadata, err error) {
|
||||||
|
var data []byte
|
||||||
|
var m Metadata
|
||||||
|
|
||||||
|
if data, err = ms.FetchData(ms.MetadataUrl()); err != nil || len(data) == 0 {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if err = json.Unmarshal(data, &m); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(m.NetworkData.Netblocks) > 0 {
|
||||||
|
for _, Netblock := range m.NetworkData.Netblocks {
|
||||||
|
if Netblock.AddressFamily == 4 {
|
||||||
|
if Netblock.Public == true {
|
||||||
|
metadata.PublicIPv4 = Netblock.Address
|
||||||
|
} else {
|
||||||
|
metadata.PrivateIPv4 = Netblock.Address
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
metadata.PublicIPv6 = Netblock.Address
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
metadata.Hostname = m.Hostname
|
||||||
|
metadata.SSHPublicKeys = map[string]string{}
|
||||||
|
for i, key := range m.SSHKeys {
|
||||||
|
metadata.SSHPublicKeys[strconv.Itoa(i)] = key
|
||||||
|
}
|
||||||
|
|
||||||
|
metadata.NetworkConfig, err = json.Marshal(m.NetworkData)
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ms metadataService) Type() string {
|
||||||
|
return "packet-metadata-service"
|
||||||
|
}
|
57
datasource/test/filesystem.go
Normal file
57
datasource/test/filesystem.go
Normal file
@@ -0,0 +1,57 @@
|
|||||||
|
// Copyright 2015 CoreOS, Inc.
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"path"
|
||||||
|
)
|
||||||
|
|
||||||
|
type MockFilesystem map[string]File
|
||||||
|
|
||||||
|
type File struct {
|
||||||
|
Path string
|
||||||
|
Contents string
|
||||||
|
Directory bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m MockFilesystem) ReadFile(filename string) ([]byte, error) {
|
||||||
|
if f, ok := m[path.Clean(filename)]; ok {
|
||||||
|
if f.Directory {
|
||||||
|
return nil, fmt.Errorf("read %s: is a directory", filename)
|
||||||
|
}
|
||||||
|
return []byte(f.Contents), nil
|
||||||
|
}
|
||||||
|
return nil, os.ErrNotExist
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewMockFilesystem(files ...File) MockFilesystem {
|
||||||
|
fs := MockFilesystem{}
|
||||||
|
for _, file := range files {
|
||||||
|
fs[file.Path] = file
|
||||||
|
|
||||||
|
// Create the directories leading up to the file
|
||||||
|
p := path.Dir(file.Path)
|
||||||
|
for p != "/" && p != "." {
|
||||||
|
if f, ok := fs[p]; ok && !f.Directory {
|
||||||
|
panic(fmt.Sprintf("%q already exists and is not a directory (%#v)", p, f))
|
||||||
|
}
|
||||||
|
fs[p] = File{Path: p, Directory: true}
|
||||||
|
p = path.Dir(p)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return fs
|
||||||
|
}
|
115
datasource/test/filesystem_test.go
Normal file
115
datasource/test/filesystem_test.go
Normal file
@@ -0,0 +1,115 @@
|
|||||||
|
// Copyright 2015 CoreOS, Inc.
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"os"
|
||||||
|
"reflect"
|
||||||
|
"testing"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestReadFile(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
filesystem MockFilesystem
|
||||||
|
|
||||||
|
filename string
|
||||||
|
contents string
|
||||||
|
err error
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
filename: "dne",
|
||||||
|
err: os.ErrNotExist,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
filesystem: MockFilesystem{
|
||||||
|
"exists": File{Contents: "hi"},
|
||||||
|
},
|
||||||
|
filename: "exists",
|
||||||
|
contents: "hi",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
filesystem: MockFilesystem{
|
||||||
|
"dir": File{Directory: true},
|
||||||
|
},
|
||||||
|
filename: "dir",
|
||||||
|
err: errors.New("read dir: is a directory"),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for i, tt := range tests {
|
||||||
|
contents, err := tt.filesystem.ReadFile(tt.filename)
|
||||||
|
if tt.contents != string(contents) {
|
||||||
|
t.Errorf("bad contents (test %d): want %q, got %q", i, tt.contents, string(contents))
|
||||||
|
}
|
||||||
|
if !reflect.DeepEqual(tt.err, err) {
|
||||||
|
t.Errorf("bad error (test %d): want %v, got %v", i, tt.err, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestNewMockFilesystem(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
files []File
|
||||||
|
|
||||||
|
filesystem MockFilesystem
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
filesystem: MockFilesystem{},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
files: []File{File{Path: "file"}},
|
||||||
|
filesystem: MockFilesystem{
|
||||||
|
"file": File{Path: "file"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
files: []File{File{Path: "/file"}},
|
||||||
|
filesystem: MockFilesystem{
|
||||||
|
"/file": File{Path: "/file"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
files: []File{File{Path: "/dir/file"}},
|
||||||
|
filesystem: MockFilesystem{
|
||||||
|
"/dir": File{Path: "/dir", Directory: true},
|
||||||
|
"/dir/file": File{Path: "/dir/file"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
files: []File{File{Path: "/dir/dir/file"}},
|
||||||
|
filesystem: MockFilesystem{
|
||||||
|
"/dir": File{Path: "/dir", Directory: true},
|
||||||
|
"/dir/dir": File{Path: "/dir/dir", Directory: true},
|
||||||
|
"/dir/dir/file": File{Path: "/dir/dir/file"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
files: []File{File{Path: "/dir/dir/dir", Directory: true}},
|
||||||
|
filesystem: MockFilesystem{
|
||||||
|
"/dir": File{Path: "/dir", Directory: true},
|
||||||
|
"/dir/dir": File{Path: "/dir/dir", Directory: true},
|
||||||
|
"/dir/dir/dir": File{Path: "/dir/dir/dir", Directory: true},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for i, tt := range tests {
|
||||||
|
filesystem := NewMockFilesystem(tt.files...)
|
||||||
|
if !reflect.DeepEqual(tt.filesystem, filesystem) {
|
||||||
|
t.Errorf("bad filesystem (test %d): want %#v, got %#v", i, tt.filesystem, filesystem)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
@@ -31,19 +31,19 @@ func TestFetchMetadata(t *testing.T) {
|
|||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
root: "/",
|
root: "/",
|
||||||
files: test.MockFilesystem{},
|
files: test.NewMockFilesystem(),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
root: "/",
|
root: "/",
|
||||||
files: test.MockFilesystem{"/SharedConfig.xml": ""},
|
files: test.NewMockFilesystem(test.File{Path: "/SharedConfig.xml", Contents: ""}),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
root: "/var/lib/waagent",
|
root: "/var/lib/waagent",
|
||||||
files: test.MockFilesystem{"/var/lib/waagent/SharedConfig.xml": ""},
|
files: test.NewMockFilesystem(test.File{Path: "/var/lib/waagent/SharedConfig.xml", Contents: ""}),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
root: "/var/lib/waagent",
|
root: "/var/lib/waagent",
|
||||||
files: test.MockFilesystem{"/var/lib/waagent/SharedConfig.xml": `<?xml version="1.0" encoding="utf-8"?>
|
files: test.NewMockFilesystem(test.File{Path: "/var/lib/waagent/SharedConfig.xml", Contents: `<?xml version="1.0" encoding="utf-8"?>
|
||||||
<SharedConfig version="1.0.0.0" goalStateIncarnation="1">
|
<SharedConfig version="1.0.0.0" goalStateIncarnation="1">
|
||||||
<Deployment name="c8f9e4c9c18948e1bebf57c5685da756" guid="{1d10394f-c741-4a1a-a6bb-278f213c5a5e}" incarnation="0" isNonCancellableTopologyChangeEnabled="false">
|
<Deployment name="c8f9e4c9c18948e1bebf57c5685da756" guid="{1d10394f-c741-4a1a-a6bb-278f213c5a5e}" incarnation="0" isNonCancellableTopologyChangeEnabled="false">
|
||||||
<Service name="core-test-1" guid="{00000000-0000-0000-0000-000000000000}" />
|
<Service name="core-test-1" guid="{00000000-0000-0000-0000-000000000000}" />
|
||||||
@@ -79,7 +79,7 @@ func TestFetchMetadata(t *testing.T) {
|
|||||||
</InputEndpoints>
|
</InputEndpoints>
|
||||||
</Instance>
|
</Instance>
|
||||||
</Instances>
|
</Instances>
|
||||||
</SharedConfig>`},
|
</SharedConfig>`}),
|
||||||
metadata: datasource.Metadata{
|
metadata: datasource.Metadata{
|
||||||
PrivateIPv4: net.ParseIP("100.73.202.64"),
|
PrivateIPv4: net.ParseIP("100.73.202.64"),
|
||||||
PublicIPv4: net.ParseIP("191.239.39.77"),
|
PublicIPv4: net.ParseIP("191.239.39.77"),
|
||||||
@@ -89,10 +89,10 @@ func TestFetchMetadata(t *testing.T) {
|
|||||||
a := waagent{tt.root, tt.files.ReadFile}
|
a := waagent{tt.root, tt.files.ReadFile}
|
||||||
metadata, err := a.FetchMetadata()
|
metadata, err := a.FetchMetadata()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("bad error for %q: want %v, got %q", tt, nil, err)
|
t.Fatalf("bad error for %+v: want %v, got %q", tt, nil, err)
|
||||||
}
|
}
|
||||||
if !reflect.DeepEqual(tt.metadata, metadata) {
|
if !reflect.DeepEqual(tt.metadata, metadata) {
|
||||||
t.Fatalf("bad metadata for %q: want %#v, got %#v", tt, tt.metadata, metadata)
|
t.Fatalf("bad metadata for %+v: want %#v, got %#v", tt, tt.metadata, metadata)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -104,21 +104,21 @@ func TestFetchUserdata(t *testing.T) {
|
|||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
"/",
|
"/",
|
||||||
test.MockFilesystem{},
|
test.NewMockFilesystem(),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"/",
|
"/",
|
||||||
test.MockFilesystem{"/CustomData": ""},
|
test.NewMockFilesystem(test.File{Path: "/CustomData", Contents: ""}),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"/var/lib/waagent/",
|
"/var/lib/waagent/",
|
||||||
test.MockFilesystem{"/var/lib/waagent/CustomData": ""},
|
test.NewMockFilesystem(test.File{Path: "/var/lib/waagent/CustomData", Contents: ""}),
|
||||||
},
|
},
|
||||||
} {
|
} {
|
||||||
a := waagent{tt.root, tt.files.ReadFile}
|
a := waagent{tt.root, tt.files.ReadFile}
|
||||||
_, err := a.FetchUserdata()
|
_, err := a.FetchUserdata()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("bad error for %q: want %v, got %q", tt, nil, err)
|
t.Fatalf("bad error for %+v: want %v, got %q", tt, nil, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@@ -135,6 +135,7 @@ func Apply(cfg config.CloudConfig, ifaces []network.InterfaceGenerator, env *Env
|
|||||||
|
|
||||||
for _, ccu := range []CloudConfigUnit{
|
for _, ccu := range []CloudConfigUnit{
|
||||||
system.Etcd{Etcd: cfg.CoreOS.Etcd},
|
system.Etcd{Etcd: cfg.CoreOS.Etcd},
|
||||||
|
system.Etcd2{Etcd2: cfg.CoreOS.Etcd2},
|
||||||
system.Fleet{Fleet: cfg.CoreOS.Fleet},
|
system.Fleet{Fleet: cfg.CoreOS.Fleet},
|
||||||
system.Locksmith{Locksmith: cfg.CoreOS.Locksmith},
|
system.Locksmith{Locksmith: cfg.CoreOS.Locksmith},
|
||||||
system.Update{Update: cfg.CoreOS.Update, ReadConfig: system.DefaultReadConfig},
|
system.Update{Update: cfg.CoreOS.Update, ReadConfig: system.DefaultReadConfig},
|
||||||
|
@@ -21,6 +21,10 @@ import (
|
|||||||
"github.com/coreos/coreos-cloudinit/config"
|
"github.com/coreos/coreos-cloudinit/config"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
ErrIgnitionConfig = errors.New("not a config (found Ignition)")
|
||||||
|
)
|
||||||
|
|
||||||
func ParseUserData(contents string) (interface{}, error) {
|
func ParseUserData(contents string) (interface{}, error) {
|
||||||
if len(contents) == 0 {
|
if len(contents) == 0 {
|
||||||
return nil, nil
|
return nil, nil
|
||||||
@@ -33,6 +37,8 @@ func ParseUserData(contents string) (interface{}, error) {
|
|||||||
case config.IsCloudConfig(contents):
|
case config.IsCloudConfig(contents):
|
||||||
log.Printf("Parsing user-data as cloud-config")
|
log.Printf("Parsing user-data as cloud-config")
|
||||||
return config.NewCloudConfig(contents)
|
return config.NewCloudConfig(contents)
|
||||||
|
case config.IsIgnitionConfig(contents):
|
||||||
|
return nil, ErrIgnitionConfig
|
||||||
default:
|
default:
|
||||||
return nil, errors.New("Unrecognized user-data format")
|
return nil, errors.New("Unrecognized user-data format")
|
||||||
}
|
}
|
||||||
|
@@ -130,7 +130,17 @@ type bondInterface struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (b *bondInterface) Netdev() string {
|
func (b *bondInterface) Netdev() string {
|
||||||
return fmt.Sprintf("[NetDev]\nKind=bond\nName=%s\n", b.name)
|
config := fmt.Sprintf("[NetDev]\nKind=bond\nName=%s\n", b.name)
|
||||||
|
if b.hwaddr != nil {
|
||||||
|
config += fmt.Sprintf("MACAddress=%s\n", b.hwaddr.String())
|
||||||
|
}
|
||||||
|
|
||||||
|
config += fmt.Sprintf("\n[Bond]\n")
|
||||||
|
for _, name := range sortedKeys(b.options) {
|
||||||
|
config += fmt.Sprintf("%s=%s\n", name, b.options[name])
|
||||||
|
}
|
||||||
|
|
||||||
|
return config
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *bondInterface) Type() string {
|
func (b *bondInterface) Type() string {
|
||||||
|
@@ -52,7 +52,7 @@ func TestInterfaceGenerators(t *testing.T) {
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "testname",
|
name: "testname",
|
||||||
netdev: "[NetDev]\nKind=bond\nName=testname\n",
|
netdev: "[NetDev]\nKind=bond\nName=testname\n\n[Bond]\n",
|
||||||
network: "[Match]\nName=testname\n\n[Network]\nBond=testbond1\nVLAN=testvlan1\nVLAN=testvlan2\nDHCP=true\n",
|
network: "[Match]\nName=testname\n\n[Network]\nBond=testbond1\nVLAN=testvlan1\nVLAN=testvlan2\nDHCP=true\n",
|
||||||
kind: "bond",
|
kind: "bond",
|
||||||
iface: &bondInterface{logicalInterface: logicalInterface{
|
iface: &bondInterface{logicalInterface: logicalInterface{
|
||||||
|
133
network/packet.go
Normal file
133
network/packet.go
Normal file
@@ -0,0 +1,133 @@
|
|||||||
|
// Copyright 2015 CoreOS, Inc.
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package network
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"net"
|
||||||
|
|
||||||
|
"github.com/coreos/coreos-cloudinit/datasource/metadata/packet"
|
||||||
|
)
|
||||||
|
|
||||||
|
func ProcessPacketNetconf(config []byte) ([]InterfaceGenerator, error) {
|
||||||
|
var netdata packet.NetworkData
|
||||||
|
if err := json.Unmarshal(config, &netdata); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
var nameservers []net.IP
|
||||||
|
if netdata.DNS != nil {
|
||||||
|
nameservers = netdata.DNS
|
||||||
|
} else {
|
||||||
|
nameservers = append(nameservers, net.ParseIP("8.8.8.8"), net.ParseIP("8.8.4.4"))
|
||||||
|
}
|
||||||
|
|
||||||
|
generators, err := parseNetwork(netdata, nameservers)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return generators, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func parseNetwork(netdata packet.NetworkData, nameservers []net.IP) ([]InterfaceGenerator, error) {
|
||||||
|
var interfaces []InterfaceGenerator
|
||||||
|
var addresses []net.IPNet
|
||||||
|
var routes []route
|
||||||
|
for _, netblock := range netdata.Netblocks {
|
||||||
|
addresses = append(addresses, net.IPNet{
|
||||||
|
IP: netblock.Address,
|
||||||
|
Mask: net.IPMask(netblock.Netmask),
|
||||||
|
})
|
||||||
|
if netblock.Public == false {
|
||||||
|
routes = append(routes, route{
|
||||||
|
destination: net.IPNet{
|
||||||
|
IP: net.IPv4(10, 0, 0, 0),
|
||||||
|
Mask: net.IPv4Mask(255, 0, 0, 0),
|
||||||
|
},
|
||||||
|
gateway: netblock.Gateway,
|
||||||
|
})
|
||||||
|
} else {
|
||||||
|
if netblock.AddressFamily == 4 {
|
||||||
|
routes = append(routes, route{
|
||||||
|
destination: net.IPNet{
|
||||||
|
IP: net.IPv4zero,
|
||||||
|
Mask: net.IPMask(net.IPv4zero),
|
||||||
|
},
|
||||||
|
gateway: netblock.Gateway,
|
||||||
|
})
|
||||||
|
} else {
|
||||||
|
routes = append(routes, route{
|
||||||
|
destination: net.IPNet{
|
||||||
|
IP: net.IPv6zero,
|
||||||
|
Mask: net.IPMask(net.IPv6zero),
|
||||||
|
},
|
||||||
|
gateway: netblock.Gateway,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
bond := bondInterface{
|
||||||
|
logicalInterface: logicalInterface{
|
||||||
|
name: "bond0",
|
||||||
|
config: configMethodStatic{
|
||||||
|
addresses: addresses,
|
||||||
|
nameservers: nameservers,
|
||||||
|
routes: routes,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
options: map[string]string{
|
||||||
|
"Mode": "802.3ad",
|
||||||
|
"LACPTransmitRate": "fast",
|
||||||
|
"MIIMonitorSec": ".2",
|
||||||
|
"UpDelaySec": ".2",
|
||||||
|
"DownDelaySec": ".2",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, iface := range netdata.Interfaces {
|
||||||
|
if iface.Name != "chassis0" && iface.Name != "ipmi0" {
|
||||||
|
bond.slaves = append(bond.slaves, iface.Name)
|
||||||
|
if iface.Name == "enp1s0f0" {
|
||||||
|
bond.hwaddr, _ = net.ParseMAC(iface.Mac)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, iface := range netdata.Interfaces {
|
||||||
|
if iface.Name != "chassis0" && iface.Name != "ipmi0" {
|
||||||
|
p := physicalInterface{
|
||||||
|
logicalInterface: logicalInterface{
|
||||||
|
name: iface.Name,
|
||||||
|
config: configMethodStatic{
|
||||||
|
nameservers: nameservers,
|
||||||
|
},
|
||||||
|
children: []networkInterface{&bond},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
if iface.Name == "enp1s0f0" {
|
||||||
|
p.configDepth = 20
|
||||||
|
}
|
||||||
|
|
||||||
|
interfaces = append(interfaces, &p)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
interfaces = append(interfaces, &bond)
|
||||||
|
|
||||||
|
return interfaces, nil
|
||||||
|
}
|
@@ -15,12 +15,10 @@
|
|||||||
package pkg
|
package pkg
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"crypto/tls"
|
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"log"
|
"log"
|
||||||
"net"
|
|
||||||
"net/http"
|
"net/http"
|
||||||
neturl "net/url"
|
neturl "net/url"
|
||||||
"strings"
|
"strings"
|
||||||
@@ -55,16 +53,15 @@ type ErrNetwork struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
type HttpClient struct {
|
type HttpClient struct {
|
||||||
|
// Initial backoff duration. Defaults to 50 milliseconds
|
||||||
|
InitialBackoff time.Duration
|
||||||
|
|
||||||
// Maximum exp backoff duration. Defaults to 5 seconds
|
// Maximum exp backoff duration. Defaults to 5 seconds
|
||||||
MaxBackoff time.Duration
|
MaxBackoff time.Duration
|
||||||
|
|
||||||
// Maximum number of connection retries. Defaults to 15
|
// Maximum number of connection retries. Defaults to 15
|
||||||
MaxRetries int
|
MaxRetries int
|
||||||
|
|
||||||
// HTTP client timeout, this is suggested to be low since exponential
|
|
||||||
// backoff will kick off too. Defaults to 2 seconds
|
|
||||||
Timeout time.Duration
|
|
||||||
|
|
||||||
// Whether or not to skip TLS verification. Defaults to false
|
// Whether or not to skip TLS verification. Defaults to false
|
||||||
SkipTLS bool
|
SkipTLS bool
|
||||||
|
|
||||||
@@ -78,29 +75,12 @@ type Getter interface {
|
|||||||
|
|
||||||
func NewHttpClient() *HttpClient {
|
func NewHttpClient() *HttpClient {
|
||||||
hc := &HttpClient{
|
hc := &HttpClient{
|
||||||
MaxBackoff: time.Second * 5,
|
InitialBackoff: 50 * time.Millisecond,
|
||||||
MaxRetries: 15,
|
MaxBackoff: time.Second * 5,
|
||||||
Timeout: time.Duration(2) * time.Second,
|
MaxRetries: 15,
|
||||||
SkipTLS: false,
|
SkipTLS: false,
|
||||||
}
|
client: &http.Client{
|
||||||
|
Timeout: 10 * time.Second,
|
||||||
// We need to create our own client in order to add timeout support.
|
|
||||||
// TODO(c4milo) Replace it once Go 1.3 is officially used by CoreOS
|
|
||||||
// More info: https://code.google.com/p/go/source/detail?r=ada6f2d5f99f
|
|
||||||
hc.client = &http.Client{
|
|
||||||
Transport: &http.Transport{
|
|
||||||
TLSClientConfig: &tls.Config{
|
|
||||||
InsecureSkipVerify: hc.SkipTLS,
|
|
||||||
},
|
|
||||||
Dial: func(network, addr string) (net.Conn, error) {
|
|
||||||
deadline := time.Now().Add(hc.Timeout)
|
|
||||||
c, err := net.DialTimeout(network, addr, hc.Timeout)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
c.SetDeadline(deadline)
|
|
||||||
return c, nil
|
|
||||||
},
|
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -134,7 +114,7 @@ func (h *HttpClient) GetRetry(rawurl string) ([]byte, error) {
|
|||||||
|
|
||||||
dataURL := url.String()
|
dataURL := url.String()
|
||||||
|
|
||||||
duration := 50 * time.Millisecond
|
duration := h.InitialBackoff
|
||||||
for retry := 1; retry <= h.MaxRetries; retry++ {
|
for retry := 1; retry <= h.MaxRetries; retry++ {
|
||||||
log.Printf("Fetching data from %s. Attempt #%d", dataURL, retry)
|
log.Printf("Fetching data from %s. Attempt #%d", dataURL, retry)
|
||||||
|
|
||||||
|
37
system/etcd2.go
Normal file
37
system/etcd2.go
Normal file
@@ -0,0 +1,37 @@
|
|||||||
|
// Copyright 2015 CoreOS, Inc.
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package system
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/coreos/coreos-cloudinit/config"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Etcd2 is a top-level structure which embeds its underlying configuration,
|
||||||
|
// config.Etcd2, and provides the system-specific Unit().
|
||||||
|
type Etcd2 struct {
|
||||||
|
config.Etcd2
|
||||||
|
}
|
||||||
|
|
||||||
|
// Units creates a Unit file drop-in for etcd, using any configured options.
|
||||||
|
func (ee Etcd2) Units() []Unit {
|
||||||
|
return []Unit{{config.Unit{
|
||||||
|
Name: "etcd2.service",
|
||||||
|
Runtime: true,
|
||||||
|
DropIns: []config.UnitDropIn{{
|
||||||
|
Name: "20-cloudinit.conf",
|
||||||
|
Content: serviceContents(ee.Etcd2),
|
||||||
|
}},
|
||||||
|
}}}
|
||||||
|
}
|
@@ -72,6 +72,10 @@ func CreateUser(u *config.User) error {
|
|||||||
args = append(args, "--no-log-init")
|
args = append(args, "--no-log-init")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if u.Shell != "" {
|
||||||
|
args = append(args, "--shell", u.Shell)
|
||||||
|
}
|
||||||
|
|
||||||
args = append(args, u.Name)
|
args = append(args, u.Name)
|
||||||
|
|
||||||
output, err := exec.Command("useradd", args...).CombinedOutput()
|
output, err := exec.Command("useradd", args...).CombinedOutput()
|
||||||
|
52
test
52
test
@@ -1,19 +1,8 @@
|
|||||||
#!/bin/bash -e
|
#!/bin/bash -e
|
||||||
#
|
|
||||||
# Run all coreos-cloudinit tests
|
|
||||||
# ./test
|
|
||||||
# ./test -v
|
|
||||||
#
|
|
||||||
# Run tests for one package
|
|
||||||
# PKG=initialize ./test
|
|
||||||
#
|
|
||||||
|
|
||||||
# Invoke ./cover for HTML output
|
|
||||||
COVER=${COVER:-"-cover"}
|
|
||||||
|
|
||||||
source ./build
|
source ./build
|
||||||
|
|
||||||
declare -a TESTPKGS=(
|
SRC="
|
||||||
config
|
config
|
||||||
config/validate
|
config/validate
|
||||||
datasource
|
datasource
|
||||||
@@ -24,42 +13,29 @@ declare -a TESTPKGS=(
|
|||||||
datasource/metadata/digitalocean
|
datasource/metadata/digitalocean
|
||||||
datasource/metadata/ec2
|
datasource/metadata/ec2
|
||||||
datasource/proc_cmdline
|
datasource/proc_cmdline
|
||||||
|
datasource/test
|
||||||
datasource/url
|
datasource/url
|
||||||
datasource/waagent
|
datasource/waagent
|
||||||
initialize
|
initialize
|
||||||
network
|
network
|
||||||
pkg
|
pkg
|
||||||
system
|
system
|
||||||
)
|
"
|
||||||
|
|
||||||
if [ -z "$PKG" ]; then
|
echo "Checking gofix..."
|
||||||
GOFMTPATH="${TESTPKGS[*]} *.go"
|
go tool fix -diff $SRC
|
||||||
# prepend repo path to each package
|
|
||||||
TESTPKGS="${TESTPKGS[*]/#/${REPO_PATH}/} ./"
|
|
||||||
else
|
|
||||||
GOFMTPATH="$TESTPKGS"
|
|
||||||
# strip out slashes and dots from PKG=./foo/
|
|
||||||
TESTPKGS=${PKG//\//}
|
|
||||||
TESTPKGS=${TESTPKGS//./}
|
|
||||||
TESTPKGS=${TESTPKGS/#/${REPO_PATH}/}
|
|
||||||
fi
|
|
||||||
|
|
||||||
echo "Running tests..."
|
|
||||||
go test -i ${TESTPKGS}
|
|
||||||
go test ${COVER} $@ ${TESTPKGS}
|
|
||||||
|
|
||||||
echo "Checking gofmt..."
|
echo "Checking gofmt..."
|
||||||
fmtRes=$(gofmt -l $GOFMTPATH)
|
gofmt -d -e $SRC
|
||||||
if [ -n "$fmtRes" ]; then
|
|
||||||
echo "$fmtRes"
|
# split SRC into an array and prepend REPO_PATH to each local package for go vet
|
||||||
exit 1
|
split_vet=(${SRC// / })
|
||||||
fi
|
VET_TEST=${split_vet[@]/#/${REPO_PATH}/}
|
||||||
|
|
||||||
echo "Checking govet..."
|
echo "Checking govet..."
|
||||||
vetRes=$(go vet $TESTPKGS)
|
go vet $VET_TEST
|
||||||
if [ -n "${vetRes}" ]; then
|
|
||||||
echo -e "govet checking failed:\n${vetRes}"
|
echo "Running tests..."
|
||||||
exit 255
|
go test -timeout 60s -cover $@ ${VET_TEST} --race
|
||||||
fi
|
|
||||||
|
|
||||||
echo "Success"
|
echo "Success"
|
||||||
|
Reference in New Issue
Block a user