Compare commits

...

26 Commits

Author SHA1 Message Date
Alex Crawford
5e112147bb coreos-cloudinit: bump to 0.9.4 2014-08-24 18:40:53 -07:00
Alex Crawford
7e78b1563f Merge pull request #206 from crawford/tests
test: Enable tests for CloudSigma datasource
2014-08-24 18:36:38 -07:00
Alex Crawford
ecbe81f103 test: Enable tests for CloudSigma datasource 2014-08-24 17:08:49 -07:00
Alex Crawford
45c20c1dd3 Merge pull request #196 from Vladimiroff/cloudsigma
cloudsigma: Add support for CloudSigma datasource
2014-08-15 15:21:33 -07:00
Alex Crawford
8ce925a060 coreos-cloudinit: bump to 0.9.3+git 2014-08-15 10:47:28 -07:00
Alex Crawford
eadb6ef42c coreos-cloudinit: bump to 0.9.3 2014-08-15 10:46:46 -07:00
Alex Crawford
7518f0ec93 Merge pull request #204 from crawford/configdrive
configdrive: Remove broken support for ec2 metadata
2014-08-15 10:43:26 -07:00
Alex Crawford
f0b9eaf2fe configdrive: Remove broken support for ec2 metadata
As it turns out, certain metadata is only present in the ec2 flavor
of metadata (e.g. public_ipv4) and other data is only present in
the openstack flavor (e.g. network_config). For now, just read the
openstack metadata.
2014-08-15 10:35:21 -07:00
Kiril Vladimirov
7320a2cbf2 feat(datasource/metadata): Add datasource for CloudSigma 2014-08-15 12:08:55 +03:00
Kiril Vladimirov
57950b3ed9 add(goserial): import github.com/tarm/goserial 2014-08-15 12:08:34 +03:00
Kiril Vladimirov
85c6a2a16a add(cepgo): import github.com/cloudsigma/cepgo 2014-08-15 12:07:58 +03:00
Jonathan Boulle
24b44e86a6 coreos-cloudinit: bump to 0.9.2+git 2014-08-12 11:38:51 -07:00
Jonathan Boulle
2f52ad4ef8 coreos-cloudinit: bump to 0.9.2 2014-08-12 11:38:12 -07:00
Jonathan Boulle
735d6c6161 Merge pull request #202 from jonboulle/env
environment: write new keys in consistent order
2014-08-11 22:40:42 -07:00
Alex Crawford
1cf275bad6 Merge pull request #201 from crawford/configdrive
configdrive: fix root path
2014-08-11 20:11:17 -07:00
Jonathan Boulle
f1c97cb4d5 environment: write new keys in consistent order 2014-08-11 18:24:58 -07:00
Alex Crawford
d143904aa9 configdrive: fix root path 2014-08-11 17:57:10 -07:00
Jonathan Boulle
c428ce2cc5 Merge pull request #200 from jonboulle/fu
initialize: use correct heuristic to check if etcdenvironment is set
2014-08-11 17:44:44 -07:00
Jonathan Boulle
dfb5b4fc3a initialize: use correct heuristic to check if etcdenvironment is set
In some circumstances (e.g. nova-agent-watcher) cloudconfig files will
be created where the EtcdEnvironment is an empty map, and hence != nil.
If this is the case we should not do anything at all (because the user
hasn't explicitly asked us to configure etcd). This change standardises
behaviour with the check that we already do for FleetEnvironment.
2014-08-11 16:01:08 -07:00
Alex Crawford
97d5538533 Merge pull request #197 from crawford/ec2
datasource: Fix ec2 URLs
2014-08-06 22:45:03 -07:00
Alex Crawford
6b8f82b5d3 datasource: Fix ec2 URLs
_ vs -
2014-08-06 21:31:43 -07:00
Alex Crawford
facde6609f Merge pull request #194 from crawford/metadata
datasource: Refactoring datasources
2014-08-06 15:55:13 -07:00
Alex Crawford
d68ae84b37 metadata: Refactor metadata service into ec2 metadata
Added more testing.
2014-08-05 17:19:43 -07:00
Alex Crawford
54aa39543b timeouts: Use After() instead of Tick() 2014-08-04 15:10:14 -07:00
Alex Crawford
8566a2c118 datasource: Move datasources into their own packages. 2014-08-04 15:10:07 -07:00
Alex Crawford
49ac083af5 coreos-cloudinit: bump to 0.9.1+git 2014-08-04 14:14:24 -07:00
31 changed files with 2135 additions and 382 deletions

View File

@@ -8,13 +8,19 @@ import (
"time"
"github.com/coreos/coreos-cloudinit/datasource"
"github.com/coreos/coreos-cloudinit/datasource/configdrive"
"github.com/coreos/coreos-cloudinit/datasource/file"
"github.com/coreos/coreos-cloudinit/datasource/metadata/cloudsigma"
"github.com/coreos/coreos-cloudinit/datasource/metadata/ec2"
"github.com/coreos/coreos-cloudinit/datasource/proc_cmdline"
"github.com/coreos/coreos-cloudinit/datasource/url"
"github.com/coreos/coreos-cloudinit/initialize"
"github.com/coreos/coreos-cloudinit/pkg"
"github.com/coreos/coreos-cloudinit/system"
)
const (
version = "0.9.1"
version = "0.9.4"
datasourceInterval = 100 * time.Millisecond
datasourceMaxInterval = 30 * time.Second
datasourceTimeout = 5 * time.Minute
@@ -24,11 +30,13 @@ var (
printVersion bool
ignoreFailure bool
sources struct {
file string
configDrive string
metadataService bool
url string
procCmdLine bool
file string
configDrive string
metadataService bool
ec2MetadataService string
cloudSigmaMetadataService bool
url string
procCmdLine bool
}
convertNetconf string
workspace string
@@ -40,9 +48,11 @@ func init() {
flag.BoolVar(&ignoreFailure, "ignore-failure", false, "Exits with 0 status in the event of malformed input from user-data")
flag.StringVar(&sources.file, "from-file", "", "Read user-data from provided file")
flag.StringVar(&sources.configDrive, "from-configdrive", "", "Read data from provided cloud-drive directory")
flag.BoolVar(&sources.metadataService, "from-metadata-service", false, "Download data from metadata service")
flag.BoolVar(&sources.metadataService, "from-metadata-service", false, "[DEPRECATED - Use -from-ec2-metadata] Download data from metadata service")
flag.StringVar(&sources.ec2MetadataService, "from-ec2-metadata", "", "Download data from the provided metadata service")
flag.BoolVar(&sources.cloudSigmaMetadataService, "from-cloudsigma-metadata", false, "Download data from CloudSigma server context")
flag.StringVar(&sources.url, "from-url", "", "Download user-data from provided url")
flag.BoolVar(&sources.procCmdLine, "from-proc-cmdline", false, fmt.Sprintf("Parse %s for '%s=<url>', using the cloud-config served by an HTTP GET to <url>", datasource.ProcCmdlineLocation, datasource.ProcCmdlineCloudConfigFlag))
flag.BoolVar(&sources.procCmdLine, "from-proc-cmdline", false, fmt.Sprintf("Parse %s for '%s=<url>', using the cloud-config served by an HTTP GET to <url>", proc_cmdline.ProcCmdlineLocation, proc_cmdline.ProcCmdlineCloudConfigFlag))
flag.StringVar(&convertNetconf, "convert-netconf", "", "Read the network config provided in cloud-drive and translate it from the specified format into networkd unit files (requires the -from-configdrive flag)")
flag.StringVar(&workspace, "workspace", "/var/lib/coreos-cloudinit", "Base directory coreos-cloudinit should use to store data")
flag.StringVar(&sshKeyName, "ssh-key-name", initialize.DefaultSSHKeyName, "Add SSH keys to the system with the given name")
@@ -78,7 +88,7 @@ func main() {
dss := getDatasources()
if len(dss) == 0 {
fmt.Println("Provide at least one of --from-file, --from-configdrive, --from-metadata-service, --from-url or --from-proc-cmdline")
fmt.Println("Provide at least one of --from-file, --from-configdrive, --from-ec2-metadata, --from-cloudsigma-metadata, --from-url or --from-proc-cmdline")
os.Exit(1)
}
@@ -172,7 +182,7 @@ func main() {
func mergeCloudConfig(mdcc, udcc initialize.CloudConfig) (cc initialize.CloudConfig) {
if mdcc.Hostname != "" {
if udcc.Hostname != "" {
fmt.Printf("Warning: user-data hostname (%s) overrides metadata hostname (%s)", udcc.Hostname, mdcc.Hostname)
fmt.Printf("Warning: user-data hostname (%s) overrides metadata hostname (%s)\n", udcc.Hostname, mdcc.Hostname)
} else {
udcc.Hostname = mdcc.Hostname
}
@@ -183,7 +193,7 @@ func mergeCloudConfig(mdcc, udcc initialize.CloudConfig) (cc initialize.CloudCon
}
if mdcc.NetworkConfigPath != "" {
if udcc.NetworkConfigPath != "" {
fmt.Printf("Warning: user-data NetworkConfigPath %s overrides metadata NetworkConfigPath %s", udcc.NetworkConfigPath, mdcc.NetworkConfigPath)
fmt.Printf("Warning: user-data NetworkConfigPath %s overrides metadata NetworkConfigPath %s\n", udcc.NetworkConfigPath, mdcc.NetworkConfigPath)
} else {
udcc.NetworkConfigPath = mdcc.NetworkConfigPath
}
@@ -196,19 +206,25 @@ func mergeCloudConfig(mdcc, udcc initialize.CloudConfig) (cc initialize.CloudCon
func getDatasources() []datasource.Datasource {
dss := make([]datasource.Datasource, 0, 5)
if sources.file != "" {
dss = append(dss, datasource.NewLocalFile(sources.file))
dss = append(dss, file.NewDatasource(sources.file))
}
if sources.url != "" {
dss = append(dss, datasource.NewRemoteFile(sources.url))
dss = append(dss, url.NewDatasource(sources.url))
}
if sources.configDrive != "" {
dss = append(dss, datasource.NewConfigDrive(sources.configDrive))
dss = append(dss, configdrive.NewDatasource(sources.configDrive))
}
if sources.metadataService {
dss = append(dss, datasource.NewMetadataService())
dss = append(dss, ec2.NewDatasource(ec2.DefaultAddress))
}
if sources.ec2MetadataService != "" {
dss = append(dss, ec2.NewDatasource(sources.ec2MetadataService))
}
if sources.cloudSigmaMetadataService {
dss = append(dss, cloudsigma.NewServerContextService())
}
if sources.procCmdLine {
dss = append(dss, datasource.NewProcCmdline())
dss = append(dss, proc_cmdline.NewDatasource())
}
return dss
}
@@ -240,7 +256,7 @@ func selectDatasource(sources []datasource.Datasource) datasource.Datasource {
select {
case <-stop:
return
case <-time.Tick(duration):
case <-time.After(duration):
duration = pkg.ExpBackoff(duration, datasourceMaxInterval)
}
}
@@ -257,7 +273,7 @@ func selectDatasource(sources []datasource.Datasource) datasource.Datasource {
select {
case s = <-ds:
case <-done:
case <-time.Tick(datasourceTimeout):
case <-time.After(datasourceTimeout):
}
close(stop)

View File

@@ -1,17 +1,22 @@
package datasource
package configdrive
import (
"fmt"
"io/ioutil"
"os"
"path"
)
const (
openstackApiVersion = "latest"
)
type configDrive struct {
root string
readFile func(filename string) ([]byte, error)
}
func NewConfigDrive(root string) *configDrive {
func NewDatasource(root string) *configDrive {
return &configDrive{root, ioutil.ReadFile}
}
@@ -28,34 +33,28 @@ func (cd *configDrive) ConfigRoot() string {
return cd.openstackRoot()
}
// FetchMetadata attempts to retrieve metadata from ec2/2009-04-04/meta_data.json.
func (cd *configDrive) FetchMetadata() ([]byte, error) {
return cd.tryReadFile(path.Join(cd.ec2Root(), "meta_data.json"))
return cd.tryReadFile(path.Join(cd.openstackVersionRoot(), "meta_data.json"))
}
// FetchUserdata attempts to retrieve the userdata from ec2/2009-04-04/user_data.
// If no data is found, it will attempt to read from openstack/latest/user_data.
func (cd *configDrive) FetchUserdata() ([]byte, error) {
bytes, err := cd.tryReadFile(path.Join(cd.ec2Root(), "user_data"))
if bytes == nil && err == nil {
bytes, err = cd.tryReadFile(path.Join(cd.openstackRoot(), "user_data"))
}
return bytes, err
return cd.tryReadFile(path.Join(cd.openstackVersionRoot(), "user_data"))
}
func (cd *configDrive) Type() string {
return "cloud-drive"
}
func (cd *configDrive) ec2Root() string {
return path.Join(cd.root, "ec2", Ec2ApiVersion)
func (cd *configDrive) openstackRoot() string {
return path.Join(cd.root, "openstack")
}
func (cd *configDrive) openstackRoot() string {
return path.Join(cd.root, "openstack", "latest")
func (cd *configDrive) openstackVersionRoot() string {
return path.Join(cd.openstackRoot(), openstackApiVersion)
}
func (cd *configDrive) tryReadFile(filename string) ([]byte, error) {
fmt.Printf("Attempting to read from %q\n", filename)
data, err := cd.readFile(filename)
if os.IsNotExist(err) {
err = nil

View File

@@ -1,4 +1,4 @@
package datasource
package configdrive
import (
"os"
@@ -16,7 +16,7 @@ func (m mockFilesystem) readFile(filename string) ([]byte, error) {
return nil, os.ErrNotExist
}
func TestCDFetchMetadata(t *testing.T) {
func TestFetchMetadata(t *testing.T) {
for _, tt := range []struct {
root string
filename string
@@ -29,13 +29,13 @@ func TestCDFetchMetadata(t *testing.T) {
},
{
"/",
"/ec2/2009-04-04/meta_data.json",
mockFilesystem([]string{"/ec2/2009-04-04/meta_data.json"}),
"/openstack/latest/meta_data.json",
mockFilesystem([]string{"/openstack/latest/meta_data.json"}),
},
{
"/media/configdrive",
"/media/configdrive/ec2/2009-04-04/meta_data.json",
mockFilesystem([]string{"/media/configdrive/ec2/2009-04-04/meta_data.json"}),
"/media/configdrive/openstack/latest/meta_data.json",
mockFilesystem([]string{"/media/configdrive/openstack/latest/meta_data.json"}),
},
} {
cd := configDrive{tt.root, tt.files.readFile}
@@ -49,7 +49,7 @@ func TestCDFetchMetadata(t *testing.T) {
}
}
func TestCDFetchUserdata(t *testing.T) {
func TestFetchUserdata(t *testing.T) {
for _, tt := range []struct {
root string
filename string
@@ -60,25 +60,15 @@ func TestCDFetchUserdata(t *testing.T) {
"",
mockFilesystem{},
},
{
"/",
"/ec2/2009-04-04/user_data",
mockFilesystem([]string{"/ec2/2009-04-04/user_data"}),
},
{
"/",
"/openstack/latest/user_data",
mockFilesystem([]string{"/openstack/latest/user_data"}),
},
{
"/",
"/ec2/2009-04-04/user_data",
mockFilesystem([]string{"/openstack/latest/user_data", "/ec2/2009-04-04/user_data"}),
},
{
"/media/configdrive",
"/media/configdrive/ec2/2009-04-04/user_data",
mockFilesystem([]string{"/media/configdrive/ec2/2009-04-04/user_data"}),
"/media/configdrive/openstack/latest/user_data",
mockFilesystem([]string{"/media/configdrive/openstack/latest/user_data"}),
},
} {
cd := configDrive{tt.root, tt.files.readFile}
@@ -92,18 +82,18 @@ func TestCDFetchUserdata(t *testing.T) {
}
}
func TestCDConfigRoot(t *testing.T) {
func TestConfigRoot(t *testing.T) {
for _, tt := range []struct {
root string
configRoot string
}{
{
"/",
"/openstack/latest",
"/openstack",
},
{
"/media/configdrive",
"/media/configdrive/openstack/latest",
"/media/configdrive/openstack",
},
} {
cd := configDrive{tt.root, nil}
@@ -112,3 +102,24 @@ func TestCDConfigRoot(t *testing.T) {
}
}
}
func TestNewDatasource(t *testing.T) {
for _, tt := range []struct {
root string
expectRoot string
}{
{
root: "",
expectRoot: "",
},
{
root: "/media/configdrive",
expectRoot: "/media/configdrive",
},
} {
service := NewDatasource(tt.root)
if service.root != tt.expectRoot {
t.Fatalf("bad root (%q): want %q, got %q", tt.root, tt.expectRoot, service.root)
}
}
}

View File

@@ -1,4 +1,4 @@
package datasource
package file
import (
"io/ioutil"
@@ -9,7 +9,7 @@ type localFile struct {
path string
}
func NewLocalFile(path string) *localFile {
func NewDatasource(path string) *localFile {
return &localFile{path}
}

View File

@@ -0,0 +1,141 @@
package cloudsigma
import (
"encoding/base64"
"encoding/json"
"os"
"strings"
"github.com/coreos/coreos-cloudinit/third_party/github.com/cloudsigma/cepgo"
)
const (
userDataFieldName = "cloudinit-user-data"
)
type serverContextService struct {
client interface {
All() (interface{}, error)
Key(string) (interface{}, error)
Meta() (map[string]string, error)
FetchRaw(string) ([]byte, error)
}
}
func NewServerContextService() *serverContextService {
return &serverContextService{
client: cepgo.NewCepgo(),
}
}
func (_ *serverContextService) IsAvailable() bool {
productNameFile, err := os.Open("/sys/class/dmi/id/product_name")
if err != nil {
return false
}
productName := make([]byte, 10)
_, err = productNameFile.Read(productName)
return err == nil && string(productName) == "CloudSigma"
}
func (_ *serverContextService) AvailabilityChanges() bool {
return true
}
func (_ *serverContextService) ConfigRoot() string {
return ""
}
func (_ *serverContextService) Type() string {
return "server-context"
}
func (scs *serverContextService) FetchMetadata() ([]byte, error) {
var (
inputMetadata struct {
Name string `json:"name"`
UUID string `json:"uuid"`
Meta map[string]string `json:"meta"`
Nics []struct {
Runtime struct {
InterfaceType string `json:"interface_type"`
IPv4 struct {
IP string `json:"uuid"`
} `json:"ip_v4"`
} `json:"runtime"`
} `json:"nics"`
}
outputMetadata struct {
Hostname string `json:"name"`
PublicKeys map[string]string `json:"public_keys"`
LocalIPv4 string `json:"local-ipv4"`
PublicIPv4 string `json:"public-ipv4"`
}
)
rawMetadata, err := scs.client.FetchRaw("")
if err != nil {
return []byte{}, err
}
err = json.Unmarshal(rawMetadata, &inputMetadata)
if err != nil {
return []byte{}, err
}
if inputMetadata.Name != "" {
outputMetadata.Hostname = inputMetadata.Name
} else {
outputMetadata.Hostname = inputMetadata.UUID
}
if key, ok := inputMetadata.Meta["ssh_public_key"]; ok {
splitted := strings.Split(key, " ")
outputMetadata.PublicKeys = make(map[string]string)
outputMetadata.PublicKeys[splitted[len(splitted)-1]] = key
}
for _, nic := range inputMetadata.Nics {
if nic.Runtime.IPv4.IP != "" {
if nic.Runtime.InterfaceType == "public" {
outputMetadata.PublicIPv4 = nic.Runtime.IPv4.IP
} else {
outputMetadata.LocalIPv4 = nic.Runtime.IPv4.IP
}
}
}
return json.Marshal(outputMetadata)
}
func (scs *serverContextService) FetchUserdata() ([]byte, error) {
metadata, err := scs.client.Meta()
if err != nil {
return []byte{}, err
}
userData, ok := metadata[userDataFieldName]
if ok && isBase64Encoded(userDataFieldName, metadata) {
if decodedUserData, err := base64.StdEncoding.DecodeString(userData); err == nil {
return decodedUserData, nil
} else {
return []byte{}, nil
}
}
return []byte(userData), nil
}
func isBase64Encoded(field string, userdata map[string]string) bool {
base64Fields, ok := userdata["base64_fields"]
if !ok {
return false
}
for _, base64Field := range strings.Split(base64Fields, ",") {
if field == base64Field {
return true
}
}
return false
}

View File

@@ -0,0 +1,152 @@
package cloudsigma
import (
"encoding/json"
"reflect"
"testing"
)
type fakeCepgoClient struct {
raw []byte
meta map[string]string
keys map[string]interface{}
err error
}
func (f *fakeCepgoClient) All() (interface{}, error) {
return f.keys, f.err
}
func (f *fakeCepgoClient) Key(key string) (interface{}, error) {
return f.keys[key], f.err
}
func (f *fakeCepgoClient) Meta() (map[string]string, error) {
return f.meta, f.err
}
func (f *fakeCepgoClient) FetchRaw(key string) ([]byte, error) {
return f.raw, f.err
}
func TestServerContextFetchMetadata(t *testing.T) {
var metadata struct {
Hostname string `json:"name"`
PublicKeys map[string]string `json:"public_keys"`
LocalIPv4 string `json:"local-ipv4"`
PublicIPv4 string `json:"public-ipv4"`
}
client := new(fakeCepgoClient)
scs := NewServerContextService()
scs.client = client
client.raw = []byte(`{
"context": true,
"cpu": 4000,
"cpu_model": null,
"cpus_instead_of_cores": false,
"enable_numa": false,
"grantees": [],
"hv_relaxed": false,
"hv_tsc": false,
"jobs": [],
"mem": 4294967296,
"meta": {
"base64_fields": "cloudinit-user-data",
"cloudinit-user-data": "I2Nsb3VkLWNvbmZpZwoKaG9zdG5hbWU6IGNvcmVvczE=",
"ssh_public_key": "ssh-rsa AAAAB3NzaC1yc2E.../hQ5D5 john@doe"
},
"name": "coreos",
"nics": [
{
"runtime": {
"interface_type": "public",
"ip_v4": {
"uuid": "31.171.251.74"
},
"ip_v6": null
},
"vlan": null
}
],
"smp": 2,
"status": "running",
"uuid": "20a0059b-041e-4d0c-bcc6-9b2852de48b3"
}`)
metadataBytes, err := scs.FetchMetadata()
if err != nil {
t.Error(err.Error())
}
if err := json.Unmarshal(metadataBytes, &metadata); err != nil {
t.Error(err.Error())
}
if metadata.Hostname != "coreos" {
t.Errorf("Hostname is not 'coreos' but %s instead", metadata.Hostname)
}
if metadata.PublicKeys["john@doe"] != "ssh-rsa AAAAB3NzaC1yc2E.../hQ5D5 john@doe" {
t.Error("Public SSH Keys are not being read properly")
}
if metadata.LocalIPv4 != "" {
t.Errorf("Local IP is not empty but %s instead", metadata.LocalIPv4)
}
if metadata.PublicIPv4 != "31.171.251.74" {
t.Errorf("Local IP is not 31.171.251.74 but %s instead", metadata.PublicIPv4)
}
}
func TestServerContextFetchUserdata(t *testing.T) {
client := new(fakeCepgoClient)
scs := NewServerContextService()
scs.client = client
userdataSets := []struct {
in map[string]string
err bool
out []byte
}{
{map[string]string{
"base64_fields": "cloudinit-user-data",
"cloudinit-user-data": "aG9zdG5hbWU6IGNvcmVvc190ZXN0",
}, false, []byte("hostname: coreos_test")},
{map[string]string{
"cloudinit-user-data": "#cloud-config\\nhostname: coreos1",
}, false, []byte("#cloud-config\\nhostname: coreos1")},
{map[string]string{}, false, []byte{}},
}
for i, set := range userdataSets {
client.meta = set.in
got, err := scs.FetchUserdata()
if (err != nil) != set.err {
t.Errorf("case %d: bad error state (got %t, want %t)", i, err != nil, set.err)
}
if !reflect.DeepEqual(got, set.out) {
t.Errorf("case %d: got %s, want %s", i, got, set.out)
}
}
}
func TestServerContextDecodingBase64UserData(t *testing.T) {
base64Sets := []struct {
in string
out bool
}{
{"cloudinit-user-data,foo,bar", true},
{"bar,cloudinit-user-data,foo,bar", true},
{"cloudinit-user-data", true},
{"", false},
{"foo", false},
}
for _, set := range base64Sets {
userdata := map[string]string{"base64_fields": set.in}
if isBase64Encoded("cloudinit-user-data", userdata) != set.out {
t.Errorf("isBase64Encoded(cloudinit-user-data, %s) should be %t", userdata, set.out)
}
}
}

View File

@@ -0,0 +1,141 @@
package ec2
import (
"bufio"
"bytes"
"encoding/json"
"fmt"
"strings"
"github.com/coreos/coreos-cloudinit/pkg"
)
const (
DefaultAddress = "http://169.254.169.254/"
apiVersion = "2009-04-04"
userdataUrl = apiVersion + "/user-data"
metadataUrl = apiVersion + "/meta-data"
)
type metadataService struct {
root string
client pkg.Getter
}
func NewDatasource(root string) *metadataService {
if !strings.HasSuffix(root, "/") {
root += "/"
}
return &metadataService{root, pkg.NewHttpClient()}
}
func (ms metadataService) IsAvailable() bool {
_, err := ms.client.Get(ms.root + apiVersion)
return (err == nil)
}
func (ms metadataService) AvailabilityChanges() bool {
return true
}
func (ms metadataService) ConfigRoot() string {
return ms.root
}
func (ms metadataService) FetchMetadata() ([]byte, error) {
attrs := make(map[string]interface{})
if keynames, err := fetchAttributes(ms.client, fmt.Sprintf("%s/public-keys", ms.metadataUrl())); err == nil {
keyIDs := make(map[string]string)
for _, keyname := range keynames {
tokens := strings.SplitN(keyname, "=", 2)
if len(tokens) != 2 {
return nil, fmt.Errorf("malformed public key: %q", keyname)
}
keyIDs[tokens[1]] = tokens[0]
}
keys := make(map[string]string)
for name, id := range keyIDs {
sshkey, err := fetchAttribute(ms.client, fmt.Sprintf("%s/public-keys/%s/openssh-key", ms.metadataUrl(), id))
if err != nil {
return nil, err
}
keys[name] = sshkey
fmt.Printf("Found SSH key for %q\n", name)
}
attrs["public_keys"] = keys
} else if _, ok := err.(pkg.ErrNotFound); !ok {
return nil, err
}
if hostname, err := fetchAttribute(ms.client, fmt.Sprintf("%s/hostname", ms.metadataUrl())); err == nil {
attrs["hostname"] = hostname
} else if _, ok := err.(pkg.ErrNotFound); !ok {
return nil, err
}
if localAddr, err := fetchAttribute(ms.client, fmt.Sprintf("%s/local-ipv4", ms.metadataUrl())); err == nil {
attrs["local-ipv4"] = localAddr
} else if _, ok := err.(pkg.ErrNotFound); !ok {
return nil, err
}
if publicAddr, err := fetchAttribute(ms.client, fmt.Sprintf("%s/public-ipv4", ms.metadataUrl())); err == nil {
attrs["public-ipv4"] = publicAddr
} else if _, ok := err.(pkg.ErrNotFound); !ok {
return nil, err
}
if content_path, err := fetchAttribute(ms.client, fmt.Sprintf("%s/network_config/content_path", ms.metadataUrl())); err == nil {
attrs["network_config"] = map[string]string{
"content_path": content_path,
}
} else if _, ok := err.(pkg.ErrNotFound); !ok {
return nil, err
}
return json.Marshal(attrs)
}
func (ms metadataService) FetchUserdata() ([]byte, error) {
if data, err := ms.client.GetRetry(ms.userdataUrl()); err == nil {
return data, err
} else if _, ok := err.(pkg.ErrNotFound); ok {
return []byte{}, nil
} else {
return data, err
}
}
func (ms metadataService) Type() string {
return "ec2-metadata-service"
}
func (ms metadataService) metadataUrl() string {
return (ms.root + metadataUrl)
}
func (ms metadataService) userdataUrl() string {
return (ms.root + userdataUrl)
}
func fetchAttributes(client pkg.Getter, url string) ([]string, error) {
resp, err := client.GetRetry(url)
if err != nil {
return nil, err
}
scanner := bufio.NewScanner(bytes.NewBuffer(resp))
data := make([]string, 0)
for scanner.Scan() {
data = append(data, scanner.Text())
}
return data, scanner.Err()
}
func fetchAttribute(client pkg.Getter, url string) (string, error) {
if attrs, err := fetchAttributes(client, url); err == nil && len(attrs) > 0 {
return attrs[0], nil
} else {
return "", err
}
}

View File

@@ -0,0 +1,324 @@
package ec2
import (
"bytes"
"fmt"
"reflect"
"testing"
"github.com/coreos/coreos-cloudinit/pkg"
)
type testHttpClient struct {
resources map[string]string
err error
}
func (t *testHttpClient) GetRetry(url string) ([]byte, error) {
if t.err != nil {
return nil, t.err
}
if val, ok := t.resources[url]; ok {
return []byte(val), nil
} else {
return nil, pkg.ErrNotFound{fmt.Errorf("not found: %q", url)}
}
}
func (t *testHttpClient) Get(url string) ([]byte, error) {
return t.GetRetry(url)
}
func TestAvailabilityChanges(t *testing.T) {
want := true
if ac := (metadataService{}).AvailabilityChanges(); ac != want {
t.Fatalf("bad AvailabilityChanges: want %q, got %q", want, ac)
}
}
func TestType(t *testing.T) {
want := "ec2-metadata-service"
if kind := (metadataService{}).Type(); kind != want {
t.Fatalf("bad type: want %q, got %q", want, kind)
}
}
func TestIsAvailable(t *testing.T) {
for _, tt := range []struct {
root string
resources map[string]string
expect bool
}{
{
root: "/",
resources: map[string]string{
"/2009-04-04": "",
},
expect: true,
},
{
root: "/",
resources: map[string]string{},
expect: false,
},
} {
service := &metadataService{tt.root, &testHttpClient{tt.resources, nil}}
if a := service.IsAvailable(); a != tt.expect {
t.Fatalf("bad isAvailable (%q): want %q, got %q", tt.resources, tt.expect, a)
}
}
}
func TestFetchUserdata(t *testing.T) {
for _, tt := range []struct {
root string
resources map[string]string
userdata []byte
clientErr error
expectErr error
}{
{
root: "/",
resources: map[string]string{
"/2009-04-04/user-data": "hello",
},
userdata: []byte("hello"),
},
{
root: "/",
clientErr: pkg.ErrNotFound{fmt.Errorf("test not found error")},
userdata: []byte{},
},
{
root: "/",
clientErr: pkg.ErrTimeout{fmt.Errorf("test timeout error")},
expectErr: pkg.ErrTimeout{fmt.Errorf("test timeout error")},
},
} {
service := &metadataService{tt.root, &testHttpClient{tt.resources, tt.clientErr}}
data, err := service.FetchUserdata()
if Error(err) != Error(tt.expectErr) {
t.Fatalf("bad error (%q): want %q, got %q", tt.resources, tt.expectErr, err)
}
if !bytes.Equal(data, tt.userdata) {
t.Fatalf("bad userdata (%q): want %q, got %q", tt.resources, tt.userdata, data)
}
}
}
func TestUrls(t *testing.T) {
for _, tt := range []struct {
root string
expectRoot string
userdata string
metadata string
}{
{
root: "/",
expectRoot: "/",
userdata: "/2009-04-04/user-data",
metadata: "/2009-04-04/meta-data",
},
{
root: "http://169.254.169.254/",
expectRoot: "http://169.254.169.254/",
userdata: "http://169.254.169.254/2009-04-04/user-data",
metadata: "http://169.254.169.254/2009-04-04/meta-data",
},
} {
service := &metadataService{tt.root, nil}
if url := service.userdataUrl(); url != tt.userdata {
t.Fatalf("bad url (%q): want %q, got %q", tt.root, tt.userdata, url)
}
if url := service.metadataUrl(); url != tt.metadata {
t.Fatalf("bad url (%q): want %q, got %q", tt.root, tt.metadata, url)
}
if url := service.ConfigRoot(); url != tt.expectRoot {
t.Fatalf("bad url (%q): want %q, got %q", tt.root, tt.expectRoot, url)
}
}
}
func TestFetchAttributes(t *testing.T) {
for _, s := range []struct {
resources map[string]string
err error
tests []struct {
path string
val []string
}
}{
{
resources: map[string]string{
"/": "a\nb\nc/",
"/c/": "d\ne/",
"/c/e/": "f",
"/a": "1",
"/b": "2",
"/c/d": "3",
"/c/e/f": "4",
},
tests: []struct {
path string
val []string
}{
{"/", []string{"a", "b", "c/"}},
{"/b", []string{"2"}},
{"/c/d", []string{"3"}},
{"/c/e/", []string{"f"}},
},
},
{
err: pkg.ErrNotFound{fmt.Errorf("test error")},
tests: []struct {
path string
val []string
}{
{"", nil},
},
},
} {
client := &testHttpClient{s.resources, s.err}
for _, tt := range s.tests {
attrs, err := fetchAttributes(client, tt.path)
if err != s.err {
t.Fatalf("bad error for %q (%q): want %q, got %q", tt.path, s.resources, s.err, err)
}
if !reflect.DeepEqual(attrs, tt.val) {
t.Fatalf("bad fetch for %q (%q): want %q, got %q", tt.path, s.resources, tt.val, attrs)
}
}
}
}
func TestFetchAttribute(t *testing.T) {
for _, s := range []struct {
resources map[string]string
err error
tests []struct {
path string
val string
}
}{
{
resources: map[string]string{
"/": "a\nb\nc/",
"/c/": "d\ne/",
"/c/e/": "f",
"/a": "1",
"/b": "2",
"/c/d": "3",
"/c/e/f": "4",
},
tests: []struct {
path string
val string
}{
{"/a", "1"},
{"/b", "2"},
{"/c/d", "3"},
{"/c/e/f", "4"},
},
},
{
err: pkg.ErrNotFound{fmt.Errorf("test error")},
tests: []struct {
path string
val string
}{
{"", ""},
},
},
} {
client := &testHttpClient{s.resources, s.err}
for _, tt := range s.tests {
attr, err := fetchAttribute(client, tt.path)
if err != s.err {
t.Fatalf("bad error for %q (%q): want %q, got %q", tt.path, s.resources, s.err, err)
}
if attr != tt.val {
t.Fatalf("bad fetch for %q (%q): want %q, got %q", tt.path, s.resources, tt.val, attr)
}
}
}
}
func TestFetchMetadata(t *testing.T) {
for _, tt := range []struct {
root string
resources map[string]string
expect []byte
clientErr error
expectErr error
}{
{
root: "/",
resources: map[string]string{
"/2009-04-04/meta-data/public-keys": "bad\n",
},
expectErr: fmt.Errorf("malformed public key: \"bad\""),
},
{
root: "/",
resources: map[string]string{
"/2009-04-04/meta-data/hostname": "host",
"/2009-04-04/meta-data/local-ipv4": "1.2.3.4",
"/2009-04-04/meta-data/public-ipv4": "5.6.7.8",
"/2009-04-04/meta-data/public-keys": "0=test1\n",
"/2009-04-04/meta-data/public-keys/0": "openssh-key",
"/2009-04-04/meta-data/public-keys/0/openssh-key": "key",
"/2009-04-04/meta-data/network_config/content_path": "path",
},
expect: []byte(`{"hostname":"host","local-ipv4":"1.2.3.4","network_config":{"content_path":"path"},"public-ipv4":"5.6.7.8","public_keys":{"test1":"key"}}`),
},
{
clientErr: pkg.ErrTimeout{fmt.Errorf("test error")},
expectErr: pkg.ErrTimeout{fmt.Errorf("test error")},
},
} {
service := &metadataService{tt.root, &testHttpClient{tt.resources, tt.clientErr}}
metadata, err := service.FetchMetadata()
if Error(err) != Error(tt.expectErr) {
t.Fatalf("bad error (%q): want %q, got %q", tt.resources, tt.expectErr, err)
}
if !bytes.Equal(metadata, tt.expect) {
t.Fatalf("bad fetch (%q): want %q, got %q", tt.resources, tt.expect, metadata)
}
}
}
func TestNewDatasource(t *testing.T) {
for _, tt := range []struct {
root string
expectRoot string
}{
{
root: "",
expectRoot: "/",
},
{
root: "/",
expectRoot: "/",
},
{
root: "http://169.254.169.254",
expectRoot: "http://169.254.169.254/",
},
{
root: "http://169.254.169.254/",
expectRoot: "http://169.254.169.254/",
},
} {
service := NewDatasource(tt.root)
if service.root != tt.expectRoot {
t.Fatalf("bad root (%q): want %q, got %q", tt.root, tt.expectRoot, service.root)
}
}
}
func Error(err error) string {
if err != nil {
return err.Error()
}
return ""
}

View File

@@ -1,153 +0,0 @@
package datasource
import (
"bufio"
"bytes"
"encoding/json"
"fmt"
"strings"
"github.com/coreos/coreos-cloudinit/pkg"
)
// metadataService retrieves metadata from either an OpenStack[1] (2012-08-10)
// or EC2[2] (2009-04-04) compatible endpoint. It will first attempt to
// directly retrieve a JSON blob from the OpenStack endpoint. If that fails
// with a 404, it then attempts to retrieve metadata bit-by-bit from the EC2
// endpoint, and populates that into an equivalent JSON blob. metadataService
// also checks for userdata from EC2 and, if that fails with a 404, OpenStack.
//
// [1] http://docs.openstack.org/grizzly/openstack-compute/admin/content/metadata-service.html
// [2] http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/AESDG-chapter-instancedata.html#instancedata-data-categories
const (
BaseUrl = "http://169.254.169.254/"
Ec2UserdataUrl = BaseUrl + Ec2ApiVersion + "/user-data"
Ec2MetadataUrl = BaseUrl + Ec2ApiVersion + "/meta-data"
OpenstackUserdataUrl = BaseUrl + "openstack/" + OpenstackApiVersion + "/user_data"
)
type metadataService struct{}
type getter interface {
GetRetry(string) ([]byte, error)
}
func NewMetadataService() *metadataService {
return &metadataService{}
}
func (ms *metadataService) IsAvailable() bool {
client := pkg.NewHttpClient()
_, err := client.Get(BaseUrl)
return (err == nil)
}
func (ms *metadataService) AvailabilityChanges() bool {
return true
}
func (ms *metadataService) ConfigRoot() string {
return ""
}
func (ms *metadataService) FetchMetadata() ([]byte, error) {
return fetchMetadata(pkg.NewHttpClient())
}
func (ms *metadataService) FetchUserdata() ([]byte, error) {
client := pkg.NewHttpClient()
if data, err := client.GetRetry(Ec2UserdataUrl); err == nil {
return data, err
} else if _, ok := err.(pkg.ErrTimeout); ok {
return data, err
}
if data, err := client.GetRetry(OpenstackUserdataUrl); err == nil {
return data, err
} else if _, ok := err.(pkg.ErrNotFound); ok {
return []byte{}, nil
} else {
return data, err
}
}
func (ms *metadataService) Type() string {
return "metadata-service"
}
func fetchMetadata(client getter) ([]byte, error) {
attrs := make(map[string]interface{})
if keynames, err := fetchAttributes(client, fmt.Sprintf("%s/public-keys", Ec2MetadataUrl)); err == nil {
keyIDs := make(map[string]string)
for _, keyname := range keynames {
tokens := strings.SplitN(keyname, "=", 2)
if len(tokens) != 2 {
return nil, fmt.Errorf("malformed public key: %q\n", keyname)
}
keyIDs[tokens[1]] = tokens[0]
}
keys := make(map[string]string)
for name, id := range keyIDs {
sshkey, err := fetchAttribute(client, fmt.Sprintf("%s/public-keys/%s/openssh-key", Ec2MetadataUrl, id))
if err != nil {
return nil, err
}
keys[name] = sshkey
fmt.Printf("Found SSH key for %q\n", name)
}
attrs["public_keys"] = keys
} else if _, ok := err.(pkg.ErrNotFound); !ok {
return nil, err
}
if hostname, err := fetchAttribute(client, fmt.Sprintf("%s/hostname", Ec2MetadataUrl)); err == nil {
attrs["hostname"] = hostname
} else if _, ok := err.(pkg.ErrNotFound); !ok {
return nil, err
}
if localAddr, err := fetchAttribute(client, fmt.Sprintf("%s/local-ipv4", Ec2MetadataUrl)); err == nil {
attrs["local-ipv4"] = localAddr
} else if _, ok := err.(pkg.ErrNotFound); !ok {
return nil, err
}
if publicAddr, err := fetchAttribute(client, fmt.Sprintf("%s/public-ipv4", Ec2MetadataUrl)); err == nil {
attrs["public-ipv4"] = publicAddr
} else if _, ok := err.(pkg.ErrNotFound); !ok {
return nil, err
}
if content_path, err := fetchAttribute(client, fmt.Sprintf("%s/network_config/content_path", Ec2MetadataUrl)); err == nil {
attrs["network_config"] = map[string]string{
"content_path": content_path,
}
} else if _, ok := err.(pkg.ErrNotFound); !ok {
return nil, err
}
return json.Marshal(attrs)
}
func fetchAttributes(client getter, url string) ([]string, error) {
resp, err := client.GetRetry(url)
if err != nil {
return nil, err
}
scanner := bufio.NewScanner(bytes.NewBuffer(resp))
data := make([]string, 0)
for scanner.Scan() {
data = append(data, scanner.Text())
}
return data, scanner.Err()
}
func fetchAttribute(client getter, url string) (string, error) {
if attrs, err := fetchAttributes(client, url); err == nil && len(attrs) > 0 {
return attrs[0], nil
} else {
return "", err
}
}

View File

@@ -1,159 +0,0 @@
package datasource
import (
"bytes"
"fmt"
"reflect"
"testing"
"github.com/coreos/coreos-cloudinit/pkg"
)
type TestHttpClient struct {
metadata map[string]string
err error
}
func (t *TestHttpClient) GetRetry(url string) ([]byte, error) {
if t.err != nil {
return nil, t.err
}
if val, ok := t.metadata[url]; ok {
return []byte(val), nil
} else {
return nil, pkg.ErrNotFound{fmt.Errorf("not found: %q", url)}
}
}
func TestMSFetchAttributes(t *testing.T) {
for _, s := range []struct {
metadata map[string]string
err error
tests []struct {
path string
val []string
}
}{
{
metadata: map[string]string{
"/": "a\nb\nc/",
"/c/": "d\ne/",
"/c/e/": "f",
"/a": "1",
"/b": "2",
"/c/d": "3",
"/c/e/f": "4",
},
tests: []struct {
path string
val []string
}{
{"/", []string{"a", "b", "c/"}},
{"/b", []string{"2"}},
{"/c/d", []string{"3"}},
{"/c/e/", []string{"f"}},
},
},
{
err: pkg.ErrNotFound{fmt.Errorf("test error")},
tests: []struct {
path string
val []string
}{
{"", nil},
},
},
} {
client := &TestHttpClient{s.metadata, s.err}
for _, tt := range s.tests {
attrs, err := fetchAttributes(client, tt.path)
if err != s.err {
t.Fatalf("bad error for %q (%q): want %q, got %q", tt.path, s.metadata, s.err, err)
}
if !reflect.DeepEqual(attrs, tt.val) {
t.Fatalf("bad fetch for %q (%q): want %q, got %q", tt.path, s.metadata, tt.val, attrs)
}
}
}
}
func TestMSFetchAttribute(t *testing.T) {
for _, s := range []struct {
metadata map[string]string
err error
tests []struct {
path string
val string
}
}{
{
metadata: map[string]string{
"/": "a\nb\nc/",
"/c/": "d\ne/",
"/c/e/": "f",
"/a": "1",
"/b": "2",
"/c/d": "3",
"/c/e/f": "4",
},
tests: []struct {
path string
val string
}{
{"/a", "1"},
{"/b", "2"},
{"/c/d", "3"},
{"/c/e/f", "4"},
},
},
{
err: pkg.ErrNotFound{fmt.Errorf("test error")},
tests: []struct {
path string
val string
}{
{"", ""},
},
},
} {
client := &TestHttpClient{s.metadata, s.err}
for _, tt := range s.tests {
attr, err := fetchAttribute(client, tt.path)
if err != s.err {
t.Fatalf("bad error for %q (%q): want %q, got %q", tt.path, s.metadata, s.err, err)
}
if attr != tt.val {
t.Fatalf("bad fetch for %q (%q): want %q, got %q", tt.path, s.metadata, tt.val, attr)
}
}
}
}
func TestMSFetchMetadata(t *testing.T) {
for _, tt := range []struct {
metadata map[string]string
err error
expect []byte
}{
{
metadata: map[string]string{
"http://169.254.169.254/2009-04-04/meta-data/hostname": "host",
"http://169.254.169.254/2009-04-04/meta-data/public-keys": "0=test1\n",
"http://169.254.169.254/2009-04-04/meta-data/public-keys/0": "openssh-key",
"http://169.254.169.254/2009-04-04/meta-data/public-keys/0/openssh-key": "key",
"http://169.254.169.254/2009-04-04/meta-data/network_config/content_path": "path",
},
expect: []byte(`{"hostname":"host","network_config":{"content_path":"path"},"public_keys":{"test1":"key"}}`),
},
{err: pkg.ErrTimeout{fmt.Errorf("test error")}},
} {
client := &TestHttpClient{tt.metadata, tt.err}
metadata, err := fetchMetadata(client)
if err != tt.err {
t.Fatalf("bad error (%q): want %q, got %q", tt.metadata, tt.err, err)
}
if !bytes.Equal(metadata, tt.expect) {
t.Fatalf("bad fetch (%q): want %q, got %q", tt.metadata, tt.expect, metadata)
}
}
}

View File

@@ -1,4 +1,4 @@
package datasource
package proc_cmdline
import (
"errors"
@@ -18,7 +18,7 @@ type procCmdline struct {
Location string
}
func NewProcCmdline() *procCmdline {
func NewDatasource() *procCmdline {
return &procCmdline{Location: ProcCmdlineLocation}
}

View File

@@ -1,4 +1,4 @@
package datasource
package proc_cmdline
import (
"fmt"
@@ -75,7 +75,7 @@ func TestProcCmdlineAndFetchConfig(t *testing.T) {
t.Errorf("Test produced error: %v", err)
}
p := NewProcCmdline()
p := NewDatasource()
p.Location = file.Name()
cfg, err := p.FetchUserdata()
if err != nil {

View File

@@ -1,4 +1,4 @@
package datasource
package url
import "github.com/coreos/coreos-cloudinit/pkg"
@@ -6,7 +6,7 @@ type remoteFile struct {
url string
}
func NewRemoteFile(url string) *remoteFile {
func NewDatasource(url string) *remoteFile {
return &remoteFile{url}
}

View File

@@ -258,7 +258,9 @@ func Apply(cfg CloudConfig, env *Environment) error {
}
if env.NetconfType() != "" {
netconfBytes, err := ioutil.ReadFile(path.Join(env.ConfigRoot(), cfg.NetworkConfigPath))
filename := path.Join(env.ConfigRoot(), cfg.NetworkConfigPath)
log.Printf("Attempting to read config from %q\n", filename)
netconfBytes, err := ioutil.ReadFile(filename)
if err != nil {
return err
}

View File

@@ -66,7 +66,7 @@ func TestEnvironmentFile(t *testing.T) {
"$public_ipv4": "1.2.3.4",
"$private_ipv4": "5.6.7.8",
}
expect := "COREOS_PUBLIC_IPV4=1.2.3.4\nCOREOS_PRIVATE_IPV4=5.6.7.8\n"
expect := "COREOS_PRIVATE_IPV4=5.6.7.8\nCOREOS_PUBLIC_IPV4=1.2.3.4\n"
dir, err := ioutil.TempDir(os.TempDir(), "coreos-cloudinit-")
if err != nil {

View File

@@ -39,7 +39,7 @@ func (ee EtcdEnvironment) String() (out string) {
// Units creates a Unit file drop-in for etcd, using any configured
// options and adding a default MachineID if unset.
func (ee EtcdEnvironment) Units(root string) ([]system.Unit, error) {
if ee == nil {
if len(ee) < 1 {
return nil, nil
}

View File

@@ -113,8 +113,19 @@ Environment="ETCD_PEER_BIND_ADDR=127.0.0.1:7002"
}
}
func TestEtcdEnvironmentWrittenToDiskDefaultToMachineID(t *testing.T) {
func TestEtcdEnvironmentEmptyNoOp(t *testing.T) {
ee := EtcdEnvironment{}
uu, err := ee.Units("")
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
if len(uu) > 0 {
t.Fatalf("Generated etcd units unexpectedly: %v")
}
}
func TestEtcdEnvironmentWrittenToDiskDefaultToMachineID(t *testing.T) {
ee := EtcdEnvironment{"foo": "bar"}
dir, err := ioutil.TempDir(os.TempDir(), "coreos-cloudinit-")
if err != nil {
t.Fatalf("Unable to create tempdir: %v", err)
@@ -152,6 +163,7 @@ func TestEtcdEnvironmentWrittenToDiskDefaultToMachineID(t *testing.T) {
}
expect := `[Service]
Environment="ETCD_FOO=bar"
Environment="ETCD_NAME=node007"
`
if string(contents) != expect {

View File

@@ -57,6 +57,11 @@ type HttpClient struct {
client *http.Client
}
type Getter interface {
Get(string) ([]byte, error)
GetRetry(string) ([]byte, error)
}
func NewHttpClient() *HttpClient {
hc := &HttpClient{
MaxBackoff: time.Second * 5,

View File

@@ -7,6 +7,7 @@ import (
"os"
"path"
"regexp"
"sort"
)
type EnvFile struct {
@@ -24,7 +25,7 @@ var lineLexer = regexp.MustCompile(`(?m)^((?:([a-zA-Z0-9_]+)=)?.*?)\r?\n`)
// mergeEnvContents: Update the existing file contents with new values,
// preserving variable ordering and all content this code doesn't understand.
// All new values are appended to the bottom of the old.
// All new values are appended to the bottom of the old, sorted by key.
func mergeEnvContents(old []byte, pending map[string]string) []byte {
var buf bytes.Buffer
var match [][]byte
@@ -44,7 +45,8 @@ func mergeEnvContents(old []byte, pending map[string]string) []byte {
}
}
for key, value := range pending {
for _, key := range keys(pending) {
value := pending[key]
fmt.Fprintf(&buf, "%s=%s\n", key, value)
}
@@ -87,3 +89,12 @@ func WriteEnvFile(ef *EnvFile, root string) error {
_, err = WriteFile(ef.File, root)
return err
}
// keys returns the keys of a map in sorted order
func keys(m map[string]string) (s []string) {
for k, _ := range m {
s = append(s, k)
}
sort.Strings(s)
return
}

12
test
View File

@@ -13,7 +13,17 @@ COVER=${COVER:-"-cover"}
source ./build
declare -a TESTPKGS=(initialize system datasource pkg network)
declare -a TESTPKGS=(initialize
system
datasource
datasource/configdrive
datasource/file
datasource/metadata/cloudsigma
datasource/metadata/ec2
datasource/proc_cmdline
datasource/url
pkg
network)
if [ -z "$PKG" ]; then
GOFMTPATH="$TESTPKGS coreos-cloudinit.go"

View File

@@ -0,0 +1,202 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "{}"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright {yyyy} {name of copyright owner}
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@@ -0,0 +1,43 @@
cepgo
=====
Cepko implements easy-to-use communication with CloudSigma's VMs through a
virtual serial port without bothering with formatting the messages properly nor
parsing the output with the specific and sometimes confusing shell tools for
that purpose.
Having the server definition accessible by the VM can be useful in various
ways. For example it is possible to easily determine from within the VM, which
network interfaces are connected to public and which to private network.
Another use is to pass some data to initial VM setup scripts, like setting the
hostname to the VM name or passing ssh public keys through server meta.
Example usage:
package main
import (
"fmt"
"github.com/cloudsigma/cepgo"
)
func main() {
c := cepgo.NewCepgo()
result, err := c.Meta()
if err != nil {
panic(err)
}
fmt.Printf("%#v", result)
}
Output:
map[string]interface {}{
"optimize_for":"custom",
"ssh_public_key":"ssh-rsa AAA...",
"description":"[...]",
}
For more information take a look at the Server Context section of CloudSigma
API Docs: http://cloudsigma-docs.readthedocs.org/en/latest/server_context.html

View File

@@ -0,0 +1,186 @@
// Cepko implements easy-to-use communication with CloudSigma's VMs through a
// virtual serial port without bothering with formatting the messages properly
// nor parsing the output with the specific and sometimes confusing shell tools
// for that purpose.
//
// Having the server definition accessible by the VM can be useful in various
// ways. For example it is possible to easily determine from within the VM,
// which network interfaces are connected to public and which to private
// network. Another use is to pass some data to initial VM setup scripts, like
// setting the hostname to the VM name or passing ssh public keys through
// server meta.
//
// Example usage:
//
// package main
//
// import (
// "fmt"
//
// "github.com/cloudsigma/cepgo"
// )
//
// func main() {
// c := cepgo.NewCepgo()
// result, err := c.Meta()
// if err != nil {
// panic(err)
// }
// fmt.Printf("%#v", result)
// }
//
// Output:
//
// map[string]string{
// "optimize_for":"custom",
// "ssh_public_key":"ssh-rsa AAA...",
// "description":"[...]",
// }
//
// For more information take a look at the Server Context section API Docs:
// http://cloudsigma-docs.readthedocs.org/en/latest/server_context.html
package cepgo
import (
"bufio"
"encoding/json"
"errors"
"fmt"
"runtime"
"github.com/coreos/coreos-cloudinit/third_party/github.com/tarm/goserial"
)
const (
requestPattern = "<\n%s\n>"
EOT = '\x04' // End Of Transmission
)
var (
SerialPort string = "/dev/ttyS1"
Baud int = 115200
)
// Sets the serial port. If the operating system is windows CloudSigma's server
// context is at COM2 port, otherwise (linux, freebsd, darwin) the port is
// being left to the default /dev/ttyS1.
func init() {
if runtime.GOOS == "windows" {
SerialPort = "COM2"
}
}
// The default fetcher makes the connection to the serial port,
// writes given query and reads until the EOT symbol.
func fetchViaSerialPort(key string) ([]byte, error) {
config := &serial.Config{Name: SerialPort, Baud: Baud}
connection, err := serial.OpenPort(config)
if err != nil {
return nil, err
}
query := fmt.Sprintf(requestPattern, key)
if _, err := connection.Write([]byte(query)); err != nil {
return nil, err
}
reader := bufio.NewReader(connection)
answer, err := reader.ReadBytes(EOT)
if err != nil {
return nil, err
}
return answer[0 : len(answer)-1], nil
}
// Queries to the serial port can be executed only from instance of this type.
// The result from each of them can be either interface{}, map[string]string or
// a single in case of single value is returned. There is also a public metod
// who directly calls the fetcher and returns raw []byte from the serial port.
type Cepgo struct {
fetcher func(string) ([]byte, error)
}
// Creates a Cepgo instance with the default serial port fetcher.
func NewCepgo() *Cepgo {
cepgo := new(Cepgo)
cepgo.fetcher = fetchViaSerialPort
return cepgo
}
// Creates a Cepgo instance with custom fetcher.
func NewCepgoFetcher(fetcher func(string) ([]byte, error)) *Cepgo {
cepgo := new(Cepgo)
cepgo.fetcher = fetcher
return cepgo
}
// Fetches raw []byte from the serial port using directly the fetcher member.
func (c *Cepgo) FetchRaw(key string) ([]byte, error) {
return c.fetcher(key)
}
// Fetches a single key and tries to unmarshal the result to json and returns
// it. If the unmarshalling fails it's safe to assume the result it's just a
// string and returns it.
func (c *Cepgo) Key(key string) (interface{}, error) {
var result interface{}
fetched, err := c.FetchRaw(key)
if err != nil {
return nil, err
}
err = json.Unmarshal(fetched, &result)
if err != nil {
return string(fetched), nil
}
return result, nil
}
// Fetches all the server context. Equivalent of c.Key("")
func (c *Cepgo) All() (interface{}, error) {
return c.Key("")
}
// Fetches only the object meta field and makes sure to return a proper
// map[string]string
func (c *Cepgo) Meta() (map[string]string, error) {
rawMeta, err := c.Key("/meta/")
if err != nil {
return nil, err
}
return typeAssertToMapOfStrings(rawMeta)
}
// Fetches only the global context and makes sure to return a proper
// map[string]string
func (c *Cepgo) GlobalContext() (map[string]string, error) {
rawContext, err := c.Key("/global_context/")
if err != nil {
return nil, err
}
return typeAssertToMapOfStrings(rawContext)
}
// Just a little helper function that uses type assertions in order to convert
// a interface{} to map[string]string if this is possible.
func typeAssertToMapOfStrings(raw interface{}) (map[string]string, error) {
result := make(map[string]string)
dictionary, ok := raw.(map[string]interface{})
if !ok {
return nil, errors.New("Received bytes are formatted badly")
}
for key, rawValue := range dictionary {
if value, ok := rawValue.(string); ok {
result[key] = value
} else {
return nil, errors.New("Server context metadata is formatted badly")
}
}
return result, nil
}

View File

@@ -0,0 +1,122 @@
package cepgo
import (
"encoding/json"
"testing"
)
func fetchMock(key string) ([]byte, error) {
context := []byte(`{
"context": true,
"cpu": 4000,
"cpu_model": null,
"cpus_instead_of_cores": false,
"enable_numa": false,
"global_context": {
"some_global_key": "some_global_val"
},
"grantees": [],
"hv_relaxed": false,
"hv_tsc": false,
"jobs": [],
"mem": 4294967296,
"meta": {
"base64_fields": "cloudinit-user-data",
"cloudinit-user-data": "I2Nsb3VkLWNvbmZpZwoKaG9zdG5hbWU6IGNvcmVvczE=",
"ssh_public_key": "ssh-rsa AAAAB2NzaC1yc2E.../hQ5D5 john@doe"
},
"name": "coreos",
"nics": [
{
"runtime": {
"interface_type": "public",
"ip_v4": {
"uuid": "31.171.251.74"
},
"ip_v6": null
},
"vlan": null
}
],
"smp": 2,
"status": "running",
"uuid": "20a0059b-041e-4d0c-bcc6-9b2852de48b3"
}`)
if key == "" {
return context, nil
}
var marshalledContext map[string]interface{}
err := json.Unmarshal(context, &marshalledContext)
if err != nil {
return nil, err
}
if key[0] == '/' {
key = key[1:]
}
if key[len(key)-1] == '/' {
key = key[:len(key)-1]
}
return json.Marshal(marshalledContext[key])
}
func TestAll(t *testing.T) {
cepgo := NewCepgoFetcher(fetchMock)
result, err := cepgo.All()
if err != nil {
t.Error(err)
}
for _, key := range []string{"meta", "name", "uuid", "global_context"} {
if _, ok := result.(map[string]interface{})[key]; !ok {
t.Errorf("%s not in all keys", key)
}
}
}
func TestKey(t *testing.T) {
cepgo := NewCepgoFetcher(fetchMock)
result, err := cepgo.Key("uuid")
if err != nil {
t.Error(err)
}
if _, ok := result.(string); !ok {
t.Errorf("%#v\n", result)
t.Error("Fetching the uuid did not return a string")
}
}
func TestMeta(t *testing.T) {
cepgo := NewCepgoFetcher(fetchMock)
meta, err := cepgo.Meta()
if err != nil {
t.Errorf("%#v\n", meta)
t.Error(err)
}
if _, ok := meta["ssh_public_key"]; !ok {
t.Error("ssh_public_key is not in the meta")
}
}
func TestGlobalContext(t *testing.T) {
cepgo := NewCepgoFetcher(fetchMock)
result, err := cepgo.GlobalContext()
if err != nil {
t.Error(err)
}
if _, ok := result["some_global_key"]; !ok {
t.Error("some_global_key is not in the global context")
}
}

View File

@@ -0,0 +1,27 @@
Copyright (c) 2009 The Go Authors. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

View File

@@ -0,0 +1,63 @@
GoSerial
========
A simple go package to allow you to read and write from the
serial port as a stream of bytes.
Details
-------
It aims to have the same API on all platforms, including windows. As
an added bonus, the windows package does not use cgo, so you can cross
compile for windows from another platform. Unfortunately goinstall
does not currently let you cross compile so you will have to do it
manually:
GOOS=windows make clean install
Currently there is very little in the way of configurability. You can
set the baud rate. Then you can Read(), Write(), or Close() the
connection. Read() will block until at least one byte is returned.
Write is the same. There is currently no exposed way to set the
timeouts, though patches are welcome.
Currently all ports are opened with 8 data bits, 1 stop bit, no
parity, no hardware flow control, and no software flow control. This
works fine for many real devices and many faux serial devices
including usb-to-serial converters and bluetooth serial ports.
You may Read() and Write() simulantiously on the same connection (from
different goroutines).
Usage
-----
```go
package main
import (
"github.com/tarm/goserial"
"log"
)
func main() {
c := &serial.Config{Name: "COM45", Baud: 115200}
s, err := serial.OpenPort(c)
if err != nil {
log.Fatal(err)
}
n, err := s.Write([]byte("test"))
if err != nil {
log.Fatal(err)
}
buf := make([]byte, 128)
n, err = s.Read(buf)
if err != nil {
log.Fatal(err)
}
log.Print("%q", buf[:n])
}
```
Possible Future Work
--------------------
- better tests (loopback etc)

View File

@@ -0,0 +1,39 @@
package serial
import (
"testing"
)
func TestConnection(t *testing.T) {
if testing.Short() {
return
}
c0 := &Config{Name: "COM5", Baud: 115200}
/*
c1 := new(Config)
c1.Name = "COM5"
c1.Baud = 115200
*/
s, err := OpenPort(c0)
if err != nil {
t.Fatal(err)
}
_, err = s.Write([]byte("test"))
if err != nil {
t.Fatal(err)
}
buf := make([]byte, 128)
_, err = s.Read(buf)
if err != nil {
t.Fatal(err)
}
}
// BUG(tarmigan): Add loopback test
func TestLoopback(t *testing.T) {
}

View File

@@ -0,0 +1,99 @@
/*
Goserial is a simple go package to allow you to read and write from
the serial port as a stream of bytes.
It aims to have the same API on all platforms, including windows. As
an added bonus, the windows package does not use cgo, so you can cross
compile for windows from another platform. Unfortunately goinstall
does not currently let you cross compile so you will have to do it
manually:
GOOS=windows make clean install
Currently there is very little in the way of configurability. You can
set the baud rate. Then you can Read(), Write(), or Close() the
connection. Read() will block until at least one byte is returned.
Write is the same. There is currently no exposed way to set the
timeouts, though patches are welcome.
Currently all ports are opened with 8 data bits, 1 stop bit, no
parity, no hardware flow control, and no software flow control. This
works fine for many real devices and many faux serial devices
including usb-to-serial converters and bluetooth serial ports.
You may Read() and Write() simulantiously on the same connection (from
different goroutines).
Example usage:
package main
import (
"github.com/tarm/goserial"
"log"
)
func main() {
c := &serial.Config{Name: "COM5", Baud: 115200}
s, err := serial.OpenPort(c)
if err != nil {
log.Fatal(err)
}
n, err := s.Write([]byte("test"))
if err != nil {
log.Fatal(err)
}
buf := make([]byte, 128)
n, err = s.Read(buf)
if err != nil {
log.Fatal(err)
}
log.Print("%q", buf[:n])
}
*/
package serial
import "io"
// Config contains the information needed to open a serial port.
//
// Currently few options are implemented, but more may be added in the
// future (patches welcome), so it is recommended that you create a
// new config addressing the fields by name rather than by order.
//
// For example:
//
// c0 := &serial.Config{Name: "COM45", Baud: 115200}
// or
// c1 := new(serial.Config)
// c1.Name = "/dev/tty.usbserial"
// c1.Baud = 115200
//
type Config struct {
Name string
Baud int
// Size int // 0 get translated to 8
// Parity SomeNewTypeToGetCorrectDefaultOf_None
// StopBits SomeNewTypeToGetCorrectDefaultOf_1
// RTSFlowControl bool
// DTRFlowControl bool
// XONFlowControl bool
// CRLFTranslate bool
// TimeoutStuff int
}
// OpenPort opens a serial port with the specified configuration
func OpenPort(c *Config) (io.ReadWriteCloser, error) {
return openPort(c.Name, c.Baud)
}
// func Flush()
// func SendBreak()
// func RegisterBreakHandler(func())

View File

@@ -0,0 +1,90 @@
// +build linux,!cgo
package serial
import (
"io"
"os"
"syscall"
"unsafe"
)
func openPort(name string, baud int) (rwc io.ReadWriteCloser, err error) {
var bauds = map[int]uint32{
50: syscall.B50,
75: syscall.B75,
110: syscall.B110,
134: syscall.B134,
150: syscall.B150,
200: syscall.B200,
300: syscall.B300,
600: syscall.B600,
1200: syscall.B1200,
1800: syscall.B1800,
2400: syscall.B2400,
4800: syscall.B4800,
9600: syscall.B9600,
19200: syscall.B19200,
38400: syscall.B38400,
57600: syscall.B57600,
115200: syscall.B115200,
230400: syscall.B230400,
460800: syscall.B460800,
500000: syscall.B500000,
576000: syscall.B576000,
921600: syscall.B921600,
1000000: syscall.B1000000,
1152000: syscall.B1152000,
1500000: syscall.B1500000,
2000000: syscall.B2000000,
2500000: syscall.B2500000,
3000000: syscall.B3000000,
3500000: syscall.B3500000,
4000000: syscall.B4000000,
}
rate := bauds[baud]
if rate == 0 {
return
}
f, err := os.OpenFile(name, syscall.O_RDWR|syscall.O_NOCTTY|syscall.O_NONBLOCK, 0666)
if err != nil {
return nil, err
}
defer func() {
if err != nil && f != nil {
f.Close()
}
}()
fd := f.Fd()
t := syscall.Termios{
Iflag: syscall.IGNPAR,
Cflag: syscall.CS8 | syscall.CREAD | syscall.CLOCAL | rate,
Cc: [32]uint8{syscall.VMIN: 1},
Ispeed: rate,
Ospeed: rate,
}
if _, _, errno := syscall.Syscall6(
syscall.SYS_IOCTL,
uintptr(fd),
uintptr(syscall.TCSETS),
uintptr(unsafe.Pointer(&t)),
0,
0,
0,
); errno != 0 {
return nil, errno
}
if err = syscall.SetNonblock(int(fd), false); err != nil {
return
}
return f, nil
}

View File

@@ -0,0 +1,107 @@
// +build !windows,cgo
package serial
// #include <termios.h>
// #include <unistd.h>
import "C"
// TODO: Maybe change to using syscall package + ioctl instead of cgo
import (
"errors"
"fmt"
"io"
"os"
"syscall"
//"unsafe"
)
func openPort(name string, baud int) (rwc io.ReadWriteCloser, err error) {
f, err := os.OpenFile(name, syscall.O_RDWR|syscall.O_NOCTTY|syscall.O_NONBLOCK, 0666)
if err != nil {
return
}
fd := C.int(f.Fd())
if C.isatty(fd) != 1 {
f.Close()
return nil, errors.New("File is not a tty")
}
var st C.struct_termios
_, err = C.tcgetattr(fd, &st)
if err != nil {
f.Close()
return nil, err
}
var speed C.speed_t
switch baud {
case 115200:
speed = C.B115200
case 57600:
speed = C.B57600
case 38400:
speed = C.B38400
case 19200:
speed = C.B19200
case 9600:
speed = C.B9600
case 4800:
speed = C.B4800
case 2400:
speed = C.B2400
default:
f.Close()
return nil, fmt.Errorf("Unknown baud rate %v", baud)
}
_, err = C.cfsetispeed(&st, speed)
if err != nil {
f.Close()
return nil, err
}
_, err = C.cfsetospeed(&st, speed)
if err != nil {
f.Close()
return nil, err
}
// Select local mode
st.c_cflag |= (C.CLOCAL | C.CREAD)
// Select raw mode
st.c_lflag &= ^C.tcflag_t(C.ICANON | C.ECHO | C.ECHOE | C.ISIG)
st.c_oflag &= ^C.tcflag_t(C.OPOST)
_, err = C.tcsetattr(fd, C.TCSANOW, &st)
if err != nil {
f.Close()
return nil, err
}
//fmt.Println("Tweaking", name)
r1, _, e := syscall.Syscall(syscall.SYS_FCNTL,
uintptr(f.Fd()),
uintptr(syscall.F_SETFL),
uintptr(0))
if e != 0 || r1 != 0 {
s := fmt.Sprint("Clearing NONBLOCK syscall error:", e, r1)
f.Close()
return nil, errors.New(s)
}
/*
r1, _, e = syscall.Syscall(syscall.SYS_IOCTL,
uintptr(f.Fd()),
uintptr(0x80045402), // IOSSIOSPEED
uintptr(unsafe.Pointer(&baud)));
if e != 0 || r1 != 0 {
s := fmt.Sprint("Baudrate syscall error:", e, r1)
f.Close()
return nil, os.NewError(s)
}
*/
return f, nil
}

View File

@@ -0,0 +1,263 @@
// +build windows
package serial
import (
"fmt"
"io"
"os"
"sync"
"syscall"
"unsafe"
)
type serialPort struct {
f *os.File
fd syscall.Handle
rl sync.Mutex
wl sync.Mutex
ro *syscall.Overlapped
wo *syscall.Overlapped
}
type structDCB struct {
DCBlength, BaudRate uint32
flags [4]byte
wReserved, XonLim, XoffLim uint16
ByteSize, Parity, StopBits byte
XonChar, XoffChar, ErrorChar, EofChar, EvtChar byte
wReserved1 uint16
}
type structTimeouts struct {
ReadIntervalTimeout uint32
ReadTotalTimeoutMultiplier uint32
ReadTotalTimeoutConstant uint32
WriteTotalTimeoutMultiplier uint32
WriteTotalTimeoutConstant uint32
}
func openPort(name string, baud int) (rwc io.ReadWriteCloser, err error) {
if len(name) > 0 && name[0] != '\\' {
name = "\\\\.\\" + name
}
h, err := syscall.CreateFile(syscall.StringToUTF16Ptr(name),
syscall.GENERIC_READ|syscall.GENERIC_WRITE,
0,
nil,
syscall.OPEN_EXISTING,
syscall.FILE_ATTRIBUTE_NORMAL|syscall.FILE_FLAG_OVERLAPPED,
0)
if err != nil {
return nil, err
}
f := os.NewFile(uintptr(h), name)
defer func() {
if err != nil {
f.Close()
}
}()
if err = setCommState(h, baud); err != nil {
return
}
if err = setupComm(h, 64, 64); err != nil {
return
}
if err = setCommTimeouts(h); err != nil {
return
}
if err = setCommMask(h); err != nil {
return
}
ro, err := newOverlapped()
if err != nil {
return
}
wo, err := newOverlapped()
if err != nil {
return
}
port := new(serialPort)
port.f = f
port.fd = h
port.ro = ro
port.wo = wo
return port, nil
}
func (p *serialPort) Close() error {
return p.f.Close()
}
func (p *serialPort) Write(buf []byte) (int, error) {
p.wl.Lock()
defer p.wl.Unlock()
if err := resetEvent(p.wo.HEvent); err != nil {
return 0, err
}
var n uint32
err := syscall.WriteFile(p.fd, buf, &n, p.wo)
if err != nil && err != syscall.ERROR_IO_PENDING {
return int(n), err
}
return getOverlappedResult(p.fd, p.wo)
}
func (p *serialPort) Read(buf []byte) (int, error) {
if p == nil || p.f == nil {
return 0, fmt.Errorf("Invalid port on read %v %v", p, p.f)
}
p.rl.Lock()
defer p.rl.Unlock()
if err := resetEvent(p.ro.HEvent); err != nil {
return 0, err
}
var done uint32
err := syscall.ReadFile(p.fd, buf, &done, p.ro)
if err != nil && err != syscall.ERROR_IO_PENDING {
return int(done), err
}
return getOverlappedResult(p.fd, p.ro)
}
var (
nSetCommState,
nSetCommTimeouts,
nSetCommMask,
nSetupComm,
nGetOverlappedResult,
nCreateEvent,
nResetEvent uintptr
)
func init() {
k32, err := syscall.LoadLibrary("kernel32.dll")
if err != nil {
panic("LoadLibrary " + err.Error())
}
defer syscall.FreeLibrary(k32)
nSetCommState = getProcAddr(k32, "SetCommState")
nSetCommTimeouts = getProcAddr(k32, "SetCommTimeouts")
nSetCommMask = getProcAddr(k32, "SetCommMask")
nSetupComm = getProcAddr(k32, "SetupComm")
nGetOverlappedResult = getProcAddr(k32, "GetOverlappedResult")
nCreateEvent = getProcAddr(k32, "CreateEventW")
nResetEvent = getProcAddr(k32, "ResetEvent")
}
func getProcAddr(lib syscall.Handle, name string) uintptr {
addr, err := syscall.GetProcAddress(lib, name)
if err != nil {
panic(name + " " + err.Error())
}
return addr
}
func setCommState(h syscall.Handle, baud int) error {
var params structDCB
params.DCBlength = uint32(unsafe.Sizeof(params))
params.flags[0] = 0x01 // fBinary
params.flags[0] |= 0x10 // Assert DSR
params.BaudRate = uint32(baud)
params.ByteSize = 8
r, _, err := syscall.Syscall(nSetCommState, 2, uintptr(h), uintptr(unsafe.Pointer(&params)), 0)
if r == 0 {
return err
}
return nil
}
func setCommTimeouts(h syscall.Handle) error {
var timeouts structTimeouts
const MAXDWORD = 1<<32 - 1
timeouts.ReadIntervalTimeout = MAXDWORD
timeouts.ReadTotalTimeoutMultiplier = MAXDWORD
timeouts.ReadTotalTimeoutConstant = MAXDWORD - 1
/* From http://msdn.microsoft.com/en-us/library/aa363190(v=VS.85).aspx
For blocking I/O see below:
Remarks:
If an application sets ReadIntervalTimeout and
ReadTotalTimeoutMultiplier to MAXDWORD and sets
ReadTotalTimeoutConstant to a value greater than zero and
less than MAXDWORD, one of the following occurs when the
ReadFile function is called:
If there are any bytes in the input buffer, ReadFile returns
immediately with the bytes in the buffer.
If there are no bytes in the input buffer, ReadFile waits
until a byte arrives and then returns immediately.
If no bytes arrive within the time specified by
ReadTotalTimeoutConstant, ReadFile times out.
*/
r, _, err := syscall.Syscall(nSetCommTimeouts, 2, uintptr(h), uintptr(unsafe.Pointer(&timeouts)), 0)
if r == 0 {
return err
}
return nil
}
func setupComm(h syscall.Handle, in, out int) error {
r, _, err := syscall.Syscall(nSetupComm, 3, uintptr(h), uintptr(in), uintptr(out))
if r == 0 {
return err
}
return nil
}
func setCommMask(h syscall.Handle) error {
const EV_RXCHAR = 0x0001
r, _, err := syscall.Syscall(nSetCommMask, 2, uintptr(h), EV_RXCHAR, 0)
if r == 0 {
return err
}
return nil
}
func resetEvent(h syscall.Handle) error {
r, _, err := syscall.Syscall(nResetEvent, 1, uintptr(h), 0, 0)
if r == 0 {
return err
}
return nil
}
func newOverlapped() (*syscall.Overlapped, error) {
var overlapped syscall.Overlapped
r, _, err := syscall.Syscall6(nCreateEvent, 4, 0, 1, 0, 0, 0, 0)
if r == 0 {
return nil, err
}
overlapped.HEvent = syscall.Handle(r)
return &overlapped, nil
}
func getOverlappedResult(h syscall.Handle, overlapped *syscall.Overlapped) (int, error) {
var n int
r, _, err := syscall.Syscall6(nGetOverlappedResult, 4,
uintptr(h),
uintptr(unsafe.Pointer(overlapped)),
uintptr(unsafe.Pointer(&n)), 1, 0, 0)
if r == 0 {
return n, err
}
return n, nil
}