diff --git a/Godeps/Godeps.json b/Godeps/Godeps.json
deleted file mode 100644
index 77a58e9..0000000
--- a/Godeps/Godeps.json
+++ /dev/null
@@ -1,34 +0,0 @@
-{
- "ImportPath": "github.com/coreos/coreos-cloudinit",
- "GoVersion": "go1.3.3",
- "Packages": [
- "./..."
- ],
- "Deps": [
- {
- "ImportPath": "github.com/cloudsigma/cepgo",
- "Rev": "1bfc4895bf5c4d3b599f3f6ee142299488c8739b"
- },
- {
- "ImportPath": "github.com/coreos/go-systemd/dbus",
- "Rev": "4fbc5060a317b142e6c7bfbedb65596d5f0ab99b"
- },
- {
- "ImportPath": "github.com/coreos/yaml",
- "Rev": "6b16a5714269b2f70720a45406b1babd947a17ef"
- },
- {
- "ImportPath": "github.com/dotcloud/docker/pkg/netlink",
- "Comment": "v0.11.1-359-g55d41c3e21e1",
- "Rev": "55d41c3e21e1593b944c06196ffb2ac57ab7f653"
- },
- {
- "ImportPath": "github.com/guelfey/go.dbus",
- "Rev": "f6a3a2366cc39b8479cadc499d3c735fb10fbdda"
- },
- {
- "ImportPath": "github.com/tarm/goserial",
- "Rev": "cdabc8d44e8e84f58f18074ae44337e1f2f375b9"
- }
- ]
-}
diff --git a/Godeps/Readme b/Godeps/Readme
deleted file mode 100644
index 4cdaa53..0000000
--- a/Godeps/Readme
+++ /dev/null
@@ -1,5 +0,0 @@
-This directory tree is generated automatically by godep.
-
-Please do not edit.
-
-See https://github.com/tools/godep for more information.
diff --git a/Godeps/_workspace/.gitignore b/Godeps/_workspace/.gitignore
deleted file mode 100644
index f037d68..0000000
--- a/Godeps/_workspace/.gitignore
+++ /dev/null
@@ -1,2 +0,0 @@
-/pkg
-/bin
diff --git a/Godeps/_workspace/src/github.com/cloudsigma/cepgo/.gitignore b/Godeps/_workspace/src/github.com/cloudsigma/cepgo/.gitignore
deleted file mode 100644
index 8365624..0000000
--- a/Godeps/_workspace/src/github.com/cloudsigma/cepgo/.gitignore
+++ /dev/null
@@ -1,23 +0,0 @@
-# Compiled Object files, Static and Dynamic libs (Shared Objects)
-*.o
-*.a
-*.so
-
-# Folders
-_obj
-_test
-
-# Architecture specific extensions/prefixes
-*.[568vq]
-[568vq].out
-
-*.cgo1.go
-*.cgo2.c
-_cgo_defun.c
-_cgo_gotypes.go
-_cgo_export.*
-
-_testmain.go
-
-*.exe
-*.test
diff --git a/Godeps/_workspace/src/github.com/cloudsigma/cepgo/LICENSE b/Godeps/_workspace/src/github.com/cloudsigma/cepgo/LICENSE
deleted file mode 100644
index e06d208..0000000
--- a/Godeps/_workspace/src/github.com/cloudsigma/cepgo/LICENSE
+++ /dev/null
@@ -1,202 +0,0 @@
-Apache License
- Version 2.0, January 2004
- http://www.apache.org/licenses/
-
- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
- 1. Definitions.
-
- "License" shall mean the terms and conditions for use, reproduction,
- and distribution as defined by Sections 1 through 9 of this document.
-
- "Licensor" shall mean the copyright owner or entity authorized by
- the copyright owner that is granting the License.
-
- "Legal Entity" shall mean the union of the acting entity and all
- other entities that control, are controlled by, or are under common
- control with that entity. For the purposes of this definition,
- "control" means (i) the power, direct or indirect, to cause the
- direction or management of such entity, whether by contract or
- otherwise, or (ii) ownership of fifty percent (50%) or more of the
- outstanding shares, or (iii) beneficial ownership of such entity.
-
- "You" (or "Your") shall mean an individual or Legal Entity
- exercising permissions granted by this License.
-
- "Source" form shall mean the preferred form for making modifications,
- including but not limited to software source code, documentation
- source, and configuration files.
-
- "Object" form shall mean any form resulting from mechanical
- transformation or translation of a Source form, including but
- not limited to compiled object code, generated documentation,
- and conversions to other media types.
-
- "Work" shall mean the work of authorship, whether in Source or
- Object form, made available under the License, as indicated by a
- copyright notice that is included in or attached to the work
- (an example is provided in the Appendix below).
-
- "Derivative Works" shall mean any work, whether in Source or Object
- form, that is based on (or derived from) the Work and for which the
- editorial revisions, annotations, elaborations, or other modifications
- represent, as a whole, an original work of authorship. For the purposes
- of this License, Derivative Works shall not include works that remain
- separable from, or merely link (or bind by name) to the interfaces of,
- the Work and Derivative Works thereof.
-
- "Contribution" shall mean any work of authorship, including
- the original version of the Work and any modifications or additions
- to that Work or Derivative Works thereof, that is intentionally
- submitted to Licensor for inclusion in the Work by the copyright owner
- or by an individual or Legal Entity authorized to submit on behalf of
- the copyright owner. For the purposes of this definition, "submitted"
- means any form of electronic, verbal, or written communication sent
- to the Licensor or its representatives, including but not limited to
- communication on electronic mailing lists, source code control systems,
- and issue tracking systems that are managed by, or on behalf of, the
- Licensor for the purpose of discussing and improving the Work, but
- excluding communication that is conspicuously marked or otherwise
- designated in writing by the copyright owner as "Not a Contribution."
-
- "Contributor" shall mean Licensor and any individual or Legal Entity
- on behalf of whom a Contribution has been received by Licensor and
- subsequently incorporated within the Work.
-
- 2. Grant of Copyright License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- copyright license to reproduce, prepare Derivative Works of,
- publicly display, publicly perform, sublicense, and distribute the
- Work and such Derivative Works in Source or Object form.
-
- 3. Grant of Patent License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- (except as stated in this section) patent license to make, have made,
- use, offer to sell, sell, import, and otherwise transfer the Work,
- where such license applies only to those patent claims licensable
- by such Contributor that are necessarily infringed by their
- Contribution(s) alone or by combination of their Contribution(s)
- with the Work to which such Contribution(s) was submitted. If You
- institute patent litigation against any entity (including a
- cross-claim or counterclaim in a lawsuit) alleging that the Work
- or a Contribution incorporated within the Work constitutes direct
- or contributory patent infringement, then any patent licenses
- granted to You under this License for that Work shall terminate
- as of the date such litigation is filed.
-
- 4. Redistribution. You may reproduce and distribute copies of the
- Work or Derivative Works thereof in any medium, with or without
- modifications, and in Source or Object form, provided that You
- meet the following conditions:
-
- (a) You must give any other recipients of the Work or
- Derivative Works a copy of this License; and
-
- (b) You must cause any modified files to carry prominent notices
- stating that You changed the files; and
-
- (c) You must retain, in the Source form of any Derivative Works
- that You distribute, all copyright, patent, trademark, and
- attribution notices from the Source form of the Work,
- excluding those notices that do not pertain to any part of
- the Derivative Works; and
-
- (d) If the Work includes a "NOTICE" text file as part of its
- distribution, then any Derivative Works that You distribute must
- include a readable copy of the attribution notices contained
- within such NOTICE file, excluding those notices that do not
- pertain to any part of the Derivative Works, in at least one
- of the following places: within a NOTICE text file distributed
- as part of the Derivative Works; within the Source form or
- documentation, if provided along with the Derivative Works; or,
- within a display generated by the Derivative Works, if and
- wherever such third-party notices normally appear. The contents
- of the NOTICE file are for informational purposes only and
- do not modify the License. You may add Your own attribution
- notices within Derivative Works that You distribute, alongside
- or as an addendum to the NOTICE text from the Work, provided
- that such additional attribution notices cannot be construed
- as modifying the License.
-
- You may add Your own copyright statement to Your modifications and
- may provide additional or different license terms and conditions
- for use, reproduction, or distribution of Your modifications, or
- for any such Derivative Works as a whole, provided Your use,
- reproduction, and distribution of the Work otherwise complies with
- the conditions stated in this License.
-
- 5. Submission of Contributions. Unless You explicitly state otherwise,
- any Contribution intentionally submitted for inclusion in the Work
- by You to the Licensor shall be under the terms and conditions of
- this License, without any additional terms or conditions.
- Notwithstanding the above, nothing herein shall supersede or modify
- the terms of any separate license agreement you may have executed
- with Licensor regarding such Contributions.
-
- 6. Trademarks. This License does not grant permission to use the trade
- names, trademarks, service marks, or product names of the Licensor,
- except as required for reasonable and customary use in describing the
- origin of the Work and reproducing the content of the NOTICE file.
-
- 7. Disclaimer of Warranty. Unless required by applicable law or
- agreed to in writing, Licensor provides the Work (and each
- Contributor provides its Contributions) on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- implied, including, without limitation, any warranties or conditions
- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
- PARTICULAR PURPOSE. You are solely responsible for determining the
- appropriateness of using or redistributing the Work and assume any
- risks associated with Your exercise of permissions under this License.
-
- 8. Limitation of Liability. In no event and under no legal theory,
- whether in tort (including negligence), contract, or otherwise,
- unless required by applicable law (such as deliberate and grossly
- negligent acts) or agreed to in writing, shall any Contributor be
- liable to You for damages, including any direct, indirect, special,
- incidental, or consequential damages of any character arising as a
- result of this License or out of the use or inability to use the
- Work (including but not limited to damages for loss of goodwill,
- work stoppage, computer failure or malfunction, or any and all
- other commercial damages or losses), even if such Contributor
- has been advised of the possibility of such damages.
-
- 9. Accepting Warranty or Additional Liability. While redistributing
- the Work or Derivative Works thereof, You may choose to offer,
- and charge a fee for, acceptance of support, warranty, indemnity,
- or other liability obligations and/or rights consistent with this
- License. However, in accepting such obligations, You may act only
- on Your own behalf and on Your sole responsibility, not on behalf
- of any other Contributor, and only if You agree to indemnify,
- defend, and hold each Contributor harmless for any liability
- incurred by, or claims asserted against, such Contributor by reason
- of your accepting any such warranty or additional liability.
-
- END OF TERMS AND CONDITIONS
-
- APPENDIX: How to apply the Apache License to your work.
-
- To apply the Apache License to your work, attach the following
- boilerplate notice, with the fields enclosed by brackets "{}"
- replaced with your own identifying information. (Don't include
- the brackets!) The text should be enclosed in the appropriate
- comment syntax for the file format. We also recommend that a
- file or class name and description of purpose be included on the
- same "printed page" as the copyright notice for easier
- identification within third-party archives.
-
- Copyright {yyyy} {name of copyright owner}
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
-
diff --git a/Godeps/_workspace/src/github.com/cloudsigma/cepgo/README.md b/Godeps/_workspace/src/github.com/cloudsigma/cepgo/README.md
deleted file mode 100644
index 1125d86..0000000
--- a/Godeps/_workspace/src/github.com/cloudsigma/cepgo/README.md
+++ /dev/null
@@ -1,43 +0,0 @@
-cepgo
-=====
-
-Cepko implements easy-to-use communication with CloudSigma's VMs through a
-virtual serial port without bothering with formatting the messages properly nor
-parsing the output with the specific and sometimes confusing shell tools for
-that purpose.
-
-Having the server definition accessible by the VM can be useful in various
-ways. For example it is possible to easily determine from within the VM, which
-network interfaces are connected to public and which to private network.
-Another use is to pass some data to initial VM setup scripts, like setting the
-hostname to the VM name or passing ssh public keys through server meta.
-
-Example usage:
-
- package main
-
- import (
- "fmt"
-
- "github.com/cloudsigma/cepgo"
- )
-
- func main() {
- c := cepgo.NewCepgo()
- result, err := c.Meta()
- if err != nil {
- panic(err)
- }
- fmt.Printf("%#v", result)
- }
-
-Output:
-
- map[string]interface {}{
- "optimize_for":"custom",
- "ssh_public_key":"ssh-rsa AAA...",
- "description":"[...]",
- }
-
-For more information take a look at the Server Context section of CloudSigma
-API Docs: http://cloudsigma-docs.readthedocs.org/en/latest/server_context.html
diff --git a/Godeps/_workspace/src/github.com/cloudsigma/cepgo/cepgo.go b/Godeps/_workspace/src/github.com/cloudsigma/cepgo/cepgo.go
deleted file mode 100644
index ce2deeb..0000000
--- a/Godeps/_workspace/src/github.com/cloudsigma/cepgo/cepgo.go
+++ /dev/null
@@ -1,186 +0,0 @@
-// Cepko implements easy-to-use communication with CloudSigma's VMs through a
-// virtual serial port without bothering with formatting the messages properly
-// nor parsing the output with the specific and sometimes confusing shell tools
-// for that purpose.
-//
-// Having the server definition accessible by the VM can be useful in various
-// ways. For example it is possible to easily determine from within the VM,
-// which network interfaces are connected to public and which to private
-// network. Another use is to pass some data to initial VM setup scripts, like
-// setting the hostname to the VM name or passing ssh public keys through
-// server meta.
-//
-// Example usage:
-//
-// package main
-//
-// import (
-// "fmt"
-//
-// "github.com/cloudsigma/cepgo"
-// )
-//
-// func main() {
-// c := cepgo.NewCepgo()
-// result, err := c.Meta()
-// if err != nil {
-// panic(err)
-// }
-// fmt.Printf("%#v", result)
-// }
-//
-// Output:
-//
-// map[string]string{
-// "optimize_for":"custom",
-// "ssh_public_key":"ssh-rsa AAA...",
-// "description":"[...]",
-// }
-//
-// For more information take a look at the Server Context section API Docs:
-// http://cloudsigma-docs.readthedocs.org/en/latest/server_context.html
-package cepgo
-
-import (
- "bufio"
- "encoding/json"
- "errors"
- "fmt"
- "runtime"
-
- "github.com/coreos/coreos-cloudinit/Godeps/_workspace/src/github.com/tarm/goserial"
-)
-
-const (
- requestPattern = "<\n%s\n>"
- EOT = '\x04' // End Of Transmission
-)
-
-var (
- SerialPort string = "/dev/ttyS1"
- Baud int = 115200
-)
-
-// Sets the serial port. If the operating system is windows CloudSigma's server
-// context is at COM2 port, otherwise (linux, freebsd, darwin) the port is
-// being left to the default /dev/ttyS1.
-func init() {
- if runtime.GOOS == "windows" {
- SerialPort = "COM2"
- }
-}
-
-// The default fetcher makes the connection to the serial port,
-// writes given query and reads until the EOT symbol.
-func fetchViaSerialPort(key string) ([]byte, error) {
- config := &serial.Config{Name: SerialPort, Baud: Baud}
- connection, err := serial.OpenPort(config)
- if err != nil {
- return nil, err
- }
-
- query := fmt.Sprintf(requestPattern, key)
- if _, err := connection.Write([]byte(query)); err != nil {
- return nil, err
- }
-
- reader := bufio.NewReader(connection)
- answer, err := reader.ReadBytes(EOT)
- if err != nil {
- return nil, err
- }
-
- return answer[0 : len(answer)-1], nil
-}
-
-// Queries to the serial port can be executed only from instance of this type.
-// The result from each of them can be either interface{}, map[string]string or
-// a single in case of single value is returned. There is also a public metod
-// who directly calls the fetcher and returns raw []byte from the serial port.
-type Cepgo struct {
- fetcher func(string) ([]byte, error)
-}
-
-// Creates a Cepgo instance with the default serial port fetcher.
-func NewCepgo() *Cepgo {
- cepgo := new(Cepgo)
- cepgo.fetcher = fetchViaSerialPort
- return cepgo
-}
-
-// Creates a Cepgo instance with custom fetcher.
-func NewCepgoFetcher(fetcher func(string) ([]byte, error)) *Cepgo {
- cepgo := new(Cepgo)
- cepgo.fetcher = fetcher
- return cepgo
-}
-
-// Fetches raw []byte from the serial port using directly the fetcher member.
-func (c *Cepgo) FetchRaw(key string) ([]byte, error) {
- return c.fetcher(key)
-}
-
-// Fetches a single key and tries to unmarshal the result to json and returns
-// it. If the unmarshalling fails it's safe to assume the result it's just a
-// string and returns it.
-func (c *Cepgo) Key(key string) (interface{}, error) {
- var result interface{}
-
- fetched, err := c.FetchRaw(key)
- if err != nil {
- return nil, err
- }
-
- err = json.Unmarshal(fetched, &result)
- if err != nil {
- return string(fetched), nil
- }
- return result, nil
-}
-
-// Fetches all the server context. Equivalent of c.Key("")
-func (c *Cepgo) All() (interface{}, error) {
- return c.Key("")
-}
-
-// Fetches only the object meta field and makes sure to return a proper
-// map[string]string
-func (c *Cepgo) Meta() (map[string]string, error) {
- rawMeta, err := c.Key("/meta/")
- if err != nil {
- return nil, err
- }
-
- return typeAssertToMapOfStrings(rawMeta)
-}
-
-// Fetches only the global context and makes sure to return a proper
-// map[string]string
-func (c *Cepgo) GlobalContext() (map[string]string, error) {
- rawContext, err := c.Key("/global_context/")
- if err != nil {
- return nil, err
- }
-
- return typeAssertToMapOfStrings(rawContext)
-}
-
-// Just a little helper function that uses type assertions in order to convert
-// a interface{} to map[string]string if this is possible.
-func typeAssertToMapOfStrings(raw interface{}) (map[string]string, error) {
- result := make(map[string]string)
-
- dictionary, ok := raw.(map[string]interface{})
- if !ok {
- return nil, errors.New("Received bytes are formatted badly")
- }
-
- for key, rawValue := range dictionary {
- if value, ok := rawValue.(string); ok {
- result[key] = value
- } else {
- return nil, errors.New("Server context metadata is formatted badly")
- }
- }
- return result, nil
-}
diff --git a/Godeps/_workspace/src/github.com/cloudsigma/cepgo/cepgo_test.go b/Godeps/_workspace/src/github.com/cloudsigma/cepgo/cepgo_test.go
deleted file mode 100644
index 8de1df9..0000000
--- a/Godeps/_workspace/src/github.com/cloudsigma/cepgo/cepgo_test.go
+++ /dev/null
@@ -1,122 +0,0 @@
-package cepgo
-
-import (
- "encoding/json"
- "testing"
-)
-
-func fetchMock(key string) ([]byte, error) {
- context := []byte(`{
- "context": true,
- "cpu": 4000,
- "cpu_model": null,
- "cpus_instead_of_cores": false,
- "enable_numa": false,
- "global_context": {
- "some_global_key": "some_global_val"
- },
- "grantees": [],
- "hv_relaxed": false,
- "hv_tsc": false,
- "jobs": [],
- "mem": 4294967296,
- "meta": {
- "base64_fields": "cloudinit-user-data",
- "cloudinit-user-data": "I2Nsb3VkLWNvbmZpZwoKaG9zdG5hbWU6IGNvcmVvczE=",
- "ssh_public_key": "ssh-rsa AAAAB2NzaC1yc2E.../hQ5D5 john@doe"
- },
- "name": "coreos",
- "nics": [
- {
- "runtime": {
- "interface_type": "public",
- "ip_v4": {
- "uuid": "31.171.251.74"
- },
- "ip_v6": null
- },
- "vlan": null
- }
- ],
- "smp": 2,
- "status": "running",
- "uuid": "20a0059b-041e-4d0c-bcc6-9b2852de48b3"
- }`)
-
- if key == "" {
- return context, nil
- }
-
- var marshalledContext map[string]interface{}
-
- err := json.Unmarshal(context, &marshalledContext)
- if err != nil {
- return nil, err
- }
-
- if key[0] == '/' {
- key = key[1:]
- }
- if key[len(key)-1] == '/' {
- key = key[:len(key)-1]
- }
-
- return json.Marshal(marshalledContext[key])
-}
-
-func TestAll(t *testing.T) {
- cepgo := NewCepgoFetcher(fetchMock)
-
- result, err := cepgo.All()
- if err != nil {
- t.Error(err)
- }
-
- for _, key := range []string{"meta", "name", "uuid", "global_context"} {
- if _, ok := result.(map[string]interface{})[key]; !ok {
- t.Errorf("%s not in all keys", key)
- }
- }
-}
-
-func TestKey(t *testing.T) {
- cepgo := NewCepgoFetcher(fetchMock)
-
- result, err := cepgo.Key("uuid")
- if err != nil {
- t.Error(err)
- }
-
- if _, ok := result.(string); !ok {
- t.Errorf("%#v\n", result)
-
- t.Error("Fetching the uuid did not return a string")
- }
-}
-
-func TestMeta(t *testing.T) {
- cepgo := NewCepgoFetcher(fetchMock)
-
- meta, err := cepgo.Meta()
- if err != nil {
- t.Errorf("%#v\n", meta)
- t.Error(err)
- }
-
- if _, ok := meta["ssh_public_key"]; !ok {
- t.Error("ssh_public_key is not in the meta")
- }
-}
-
-func TestGlobalContext(t *testing.T) {
- cepgo := NewCepgoFetcher(fetchMock)
-
- result, err := cepgo.GlobalContext()
- if err != nil {
- t.Error(err)
- }
-
- if _, ok := result["some_global_key"]; !ok {
- t.Error("some_global_key is not in the global context")
- }
-}
diff --git a/Godeps/_workspace/src/github.com/coreos/go-systemd/dbus/dbus.go b/Godeps/_workspace/src/github.com/coreos/go-systemd/dbus/dbus.go
deleted file mode 100644
index ba7968e..0000000
--- a/Godeps/_workspace/src/github.com/coreos/go-systemd/dbus/dbus.go
+++ /dev/null
@@ -1,104 +0,0 @@
-/*
-Copyright 2013 CoreOS Inc.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-// Integration with the systemd D-Bus API. See http://www.freedesktop.org/wiki/Software/systemd/dbus/
-package dbus
-
-import (
- "os"
- "strconv"
- "strings"
- "sync"
-
- "github.com/coreos/coreos-cloudinit/Godeps/_workspace/src/github.com/guelfey/go.dbus"
-)
-
-const signalBuffer = 100
-
-// ObjectPath creates a dbus.ObjectPath using the rules that systemd uses for
-// serializing special characters.
-func ObjectPath(path string) dbus.ObjectPath {
- path = strings.Replace(path, ".", "_2e", -1)
- path = strings.Replace(path, "-", "_2d", -1)
- path = strings.Replace(path, "@", "_40", -1)
-
- return dbus.ObjectPath(path)
-}
-
-// Conn is a connection to systemds dbus endpoint.
-type Conn struct {
- sysconn *dbus.Conn
- sysobj *dbus.Object
- jobListener struct {
- jobs map[dbus.ObjectPath]chan string
- sync.Mutex
- }
- subscriber struct {
- updateCh chan<- *SubStateUpdate
- errCh chan<- error
- sync.Mutex
- ignore map[dbus.ObjectPath]int64
- cleanIgnore int64
- }
- dispatch map[string]func(dbus.Signal)
-}
-
-// New() establishes a connection to the system bus and authenticates.
-func New() (*Conn, error) {
- c := new(Conn)
-
- if err := c.initConnection(); err != nil {
- return nil, err
- }
-
- c.initJobs()
- return c, nil
-}
-
-func (c *Conn) initConnection() error {
- var err error
- c.sysconn, err = dbus.SystemBusPrivate()
- if err != nil {
- return err
- }
-
- // Only use EXTERNAL method, and hardcode the uid (not username)
- // to avoid a username lookup (which requires a dynamically linked
- // libc)
- methods := []dbus.Auth{dbus.AuthExternal(strconv.Itoa(os.Getuid()))}
-
- err = c.sysconn.Auth(methods)
- if err != nil {
- c.sysconn.Close()
- return err
- }
-
- err = c.sysconn.Hello()
- if err != nil {
- c.sysconn.Close()
- return err
- }
-
- c.sysobj = c.sysconn.Object("org.freedesktop.systemd1", dbus.ObjectPath("/org/freedesktop/systemd1"))
-
- // Setup the listeners on jobs so that we can get completions
- c.sysconn.BusObject().Call("org.freedesktop.DBus.AddMatch", 0,
- "type='signal', interface='org.freedesktop.systemd1.Manager', member='JobRemoved'")
- c.initSubscription()
- c.initDispatch()
-
- return nil
-}
diff --git a/Godeps/_workspace/src/github.com/coreos/go-systemd/dbus/dbus_test.go b/Godeps/_workspace/src/github.com/coreos/go-systemd/dbus/dbus_test.go
deleted file mode 100644
index 2e80f73..0000000
--- a/Godeps/_workspace/src/github.com/coreos/go-systemd/dbus/dbus_test.go
+++ /dev/null
@@ -1,41 +0,0 @@
-/*
-Copyright 2013 CoreOS Inc.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package dbus
-
-import (
- "testing"
-)
-
-// TestObjectPath ensures path encoding of the systemd rules works.
-func TestObjectPath(t *testing.T) {
- input := "/silly-path/to@a/unit..service"
- output := ObjectPath(input)
- expected := "/silly_2dpath/to_40a/unit_2e_2eservice"
-
- if string(output) != expected {
- t.Fatalf("Output '%s' did not match expected '%s'", output, expected)
- }
-}
-
-// TestNew ensures that New() works without errors.
-func TestNew(t *testing.T) {
- _, err := New()
-
- if err != nil {
- t.Fatal(err)
- }
-}
diff --git a/Godeps/_workspace/src/github.com/coreos/go-systemd/dbus/methods.go b/Godeps/_workspace/src/github.com/coreos/go-systemd/dbus/methods.go
deleted file mode 100644
index 4f552b7..0000000
--- a/Godeps/_workspace/src/github.com/coreos/go-systemd/dbus/methods.go
+++ /dev/null
@@ -1,354 +0,0 @@
-/*
-Copyright 2013 CoreOS Inc.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package dbus
-
-import (
- "errors"
- "github.com/coreos/coreos-cloudinit/Godeps/_workspace/src/github.com/guelfey/go.dbus"
-)
-
-func (c *Conn) initJobs() {
- c.jobListener.jobs = make(map[dbus.ObjectPath]chan string)
-}
-
-func (c *Conn) jobComplete(signal *dbus.Signal) {
- var id uint32
- var job dbus.ObjectPath
- var unit string
- var result string
- dbus.Store(signal.Body, &id, &job, &unit, &result)
- c.jobListener.Lock()
- out, ok := c.jobListener.jobs[job]
- if ok {
- out <- result
- delete(c.jobListener.jobs, job)
- }
- c.jobListener.Unlock()
-}
-
-func (c *Conn) startJob(job string, args ...interface{}) (<-chan string, error) {
- c.jobListener.Lock()
- defer c.jobListener.Unlock()
-
- ch := make(chan string, 1)
- var path dbus.ObjectPath
- err := c.sysobj.Call(job, 0, args...).Store(&path)
- if err != nil {
- return nil, err
- }
- c.jobListener.jobs[path] = ch
- return ch, nil
-}
-
-func (c *Conn) runJob(job string, args ...interface{}) (string, error) {
- respCh, err := c.startJob(job, args...)
- if err != nil {
- return "", err
- }
- return <-respCh, nil
-}
-
-// StartUnit enqeues a start job and depending jobs, if any (unless otherwise
-// specified by the mode string).
-//
-// Takes the unit to activate, plus a mode string. The mode needs to be one of
-// replace, fail, isolate, ignore-dependencies, ignore-requirements. If
-// "replace" the call will start the unit and its dependencies, possibly
-// replacing already queued jobs that conflict with this. If "fail" the call
-// will start the unit and its dependencies, but will fail if this would change
-// an already queued job. If "isolate" the call will start the unit in question
-// and terminate all units that aren't dependencies of it. If
-// "ignore-dependencies" it will start a unit but ignore all its dependencies.
-// If "ignore-requirements" it will start a unit but only ignore the
-// requirement dependencies. It is not recommended to make use of the latter
-// two options.
-//
-// Result string: one of done, canceled, timeout, failed, dependency, skipped.
-// done indicates successful execution of a job. canceled indicates that a job
-// has been canceled before it finished execution. timeout indicates that the
-// job timeout was reached. failed indicates that the job failed. dependency
-// indicates that a job this job has been depending on failed and the job hence
-// has been removed too. skipped indicates that a job was skipped because it
-// didn't apply to the units current state.
-func (c *Conn) StartUnit(name string, mode string) (string, error) {
- return c.runJob("org.freedesktop.systemd1.Manager.StartUnit", name, mode)
-}
-
-// StopUnit is similar to StartUnit but stops the specified unit rather
-// than starting it.
-func (c *Conn) StopUnit(name string, mode string) (string, error) {
- return c.runJob("org.freedesktop.systemd1.Manager.StopUnit", name, mode)
-}
-
-// ReloadUnit reloads a unit. Reloading is done only if the unit is already running and fails otherwise.
-func (c *Conn) ReloadUnit(name string, mode string) (string, error) {
- return c.runJob("org.freedesktop.systemd1.Manager.ReloadUnit", name, mode)
-}
-
-// RestartUnit restarts a service. If a service is restarted that isn't
-// running it will be started.
-func (c *Conn) RestartUnit(name string, mode string) (string, error) {
- return c.runJob("org.freedesktop.systemd1.Manager.RestartUnit", name, mode)
-}
-
-// TryRestartUnit is like RestartUnit, except that a service that isn't running
-// is not affected by the restart.
-func (c *Conn) TryRestartUnit(name string, mode string) (string, error) {
- return c.runJob("org.freedesktop.systemd1.Manager.TryRestartUnit", name, mode)
-}
-
-// ReloadOrRestart attempts a reload if the unit supports it and use a restart
-// otherwise.
-func (c *Conn) ReloadOrRestartUnit(name string, mode string) (string, error) {
- return c.runJob("org.freedesktop.systemd1.Manager.ReloadOrRestartUnit", name, mode)
-}
-
-// ReloadOrTryRestart attempts a reload if the unit supports it and use a "Try"
-// flavored restart otherwise.
-func (c *Conn) ReloadOrTryRestartUnit(name string, mode string) (string, error) {
- return c.runJob("org.freedesktop.systemd1.Manager.ReloadOrTryRestartUnit", name, mode)
-}
-
-// StartTransientUnit() may be used to create and start a transient unit, which
-// will be released as soon as it is not running or referenced anymore or the
-// system is rebooted. name is the unit name including suffix, and must be
-// unique. mode is the same as in StartUnit(), properties contains properties
-// of the unit.
-func (c *Conn) StartTransientUnit(name string, mode string, properties ...Property) (string, error) {
- return c.runJob("org.freedesktop.systemd1.Manager.StartTransientUnit", name, mode, properties, make([]PropertyCollection, 0))
-}
-
-// KillUnit takes the unit name and a UNIX signal number to send. All of the unit's
-// processes are killed.
-func (c *Conn) KillUnit(name string, signal int32) {
- c.sysobj.Call("org.freedesktop.systemd1.Manager.KillUnit", 0, name, "all", signal).Store()
-}
-
-// getProperties takes the unit name and returns all of its dbus object properties, for the given dbus interface
-func (c *Conn) getProperties(unit string, dbusInterface string) (map[string]interface{}, error) {
- var err error
- var props map[string]dbus.Variant
-
- path := ObjectPath("/org/freedesktop/systemd1/unit/" + unit)
- if !path.IsValid() {
- return nil, errors.New("invalid unit name: " + unit)
- }
-
- obj := c.sysconn.Object("org.freedesktop.systemd1", path)
- err = obj.Call("org.freedesktop.DBus.Properties.GetAll", 0, dbusInterface).Store(&props)
- if err != nil {
- return nil, err
- }
-
- out := make(map[string]interface{}, len(props))
- for k, v := range props {
- out[k] = v.Value()
- }
-
- return out, nil
-}
-
-// GetUnitProperties takes the unit name and returns all of its dbus object properties.
-func (c *Conn) GetUnitProperties(unit string) (map[string]interface{}, error) {
- return c.getProperties(unit, "org.freedesktop.systemd1.Unit")
-}
-
-func (c *Conn) getProperty(unit string, dbusInterface string, propertyName string) (*Property, error) {
- var err error
- var prop dbus.Variant
-
- path := ObjectPath("/org/freedesktop/systemd1/unit/" + unit)
- if !path.IsValid() {
- return nil, errors.New("invalid unit name: " + unit)
- }
-
- obj := c.sysconn.Object("org.freedesktop.systemd1", path)
- err = obj.Call("org.freedesktop.DBus.Properties.Get", 0, dbusInterface, propertyName).Store(&prop)
- if err != nil {
- return nil, err
- }
-
- return &Property{Name: propertyName, Value: prop}, nil
-}
-
-func (c *Conn) GetUnitProperty(unit string, propertyName string) (*Property, error) {
- return c.getProperty(unit, "org.freedesktop.systemd1.Unit", propertyName)
-}
-
-// GetUnitTypeProperties returns the extra properties for a unit, specific to the unit type.
-// Valid values for unitType: Service, Socket, Target, Device, Mount, Automount, Snapshot, Timer, Swap, Path, Slice, Scope
-// return "dbus.Error: Unknown interface" if the unitType is not the correct type of the unit
-func (c *Conn) GetUnitTypeProperties(unit string, unitType string) (map[string]interface{}, error) {
- return c.getProperties(unit, "org.freedesktop.systemd1."+unitType)
-}
-
-// SetUnitProperties() may be used to modify certain unit properties at runtime.
-// Not all properties may be changed at runtime, but many resource management
-// settings (primarily those in systemd.cgroup(5)) may. The changes are applied
-// instantly, and stored on disk for future boots, unless runtime is true, in which
-// case the settings only apply until the next reboot. name is the name of the unit
-// to modify. properties are the settings to set, encoded as an array of property
-// name and value pairs.
-func (c *Conn) SetUnitProperties(name string, runtime bool, properties ...Property) error {
- return c.sysobj.Call("SetUnitProperties", 0, name, runtime, properties).Store()
-}
-
-func (c *Conn) GetUnitTypeProperty(unit string, unitType string, propertyName string) (*Property, error) {
- return c.getProperty(unit, "org.freedesktop.systemd1."+unitType, propertyName)
-}
-
-// ListUnits returns an array with all currently loaded units. Note that
-// units may be known by multiple names at the same time, and hence there might
-// be more unit names loaded than actual units behind them.
-func (c *Conn) ListUnits() ([]UnitStatus, error) {
- result := make([][]interface{}, 0)
- err := c.sysobj.Call("org.freedesktop.systemd1.Manager.ListUnits", 0).Store(&result)
- if err != nil {
- return nil, err
- }
-
- resultInterface := make([]interface{}, len(result))
- for i := range result {
- resultInterface[i] = result[i]
- }
-
- status := make([]UnitStatus, len(result))
- statusInterface := make([]interface{}, len(status))
- for i := range status {
- statusInterface[i] = &status[i]
- }
-
- err = dbus.Store(resultInterface, statusInterface...)
- if err != nil {
- return nil, err
- }
-
- return status, nil
-}
-
-type UnitStatus struct {
- Name string // The primary unit name as string
- Description string // The human readable description string
- LoadState string // The load state (i.e. whether the unit file has been loaded successfully)
- ActiveState string // The active state (i.e. whether the unit is currently started or not)
- SubState string // The sub state (a more fine-grained version of the active state that is specific to the unit type, which the active state is not)
- Followed string // A unit that is being followed in its state by this unit, if there is any, otherwise the empty string.
- Path dbus.ObjectPath // The unit object path
- JobId uint32 // If there is a job queued for the job unit the numeric job id, 0 otherwise
- JobType string // The job type as string
- JobPath dbus.ObjectPath // The job object path
-}
-
-// EnableUnitFiles() may be used to enable one or more units in the system (by
-// creating symlinks to them in /etc or /run).
-//
-// It takes a list of unit files to enable (either just file names or full
-// absolute paths if the unit files are residing outside the usual unit
-// search paths), and two booleans: the first controls whether the unit shall
-// be enabled for runtime only (true, /run), or persistently (false, /etc).
-// The second one controls whether symlinks pointing to other units shall
-// be replaced if necessary.
-//
-// This call returns one boolean and an array with the changes made. The
-// boolean signals whether the unit files contained any enablement
-// information (i.e. an [Install]) section. The changes list consists of
-// structures with three strings: the type of the change (one of symlink
-// or unlink), the file name of the symlink and the destination of the
-// symlink.
-func (c *Conn) EnableUnitFiles(files []string, runtime bool, force bool) (bool, []EnableUnitFileChange, error) {
- var carries_install_info bool
-
- result := make([][]interface{}, 0)
- err := c.sysobj.Call("org.freedesktop.systemd1.Manager.EnableUnitFiles", 0, files, runtime, force).Store(&carries_install_info, &result)
- if err != nil {
- return false, nil, err
- }
-
- resultInterface := make([]interface{}, len(result))
- for i := range result {
- resultInterface[i] = result[i]
- }
-
- changes := make([]EnableUnitFileChange, len(result))
- changesInterface := make([]interface{}, len(changes))
- for i := range changes {
- changesInterface[i] = &changes[i]
- }
-
- err = dbus.Store(resultInterface, changesInterface...)
- if err != nil {
- return false, nil, err
- }
-
- return carries_install_info, changes, nil
-}
-
-type EnableUnitFileChange struct {
- Type string // Type of the change (one of symlink or unlink)
- Filename string // File name of the symlink
- Destination string // Destination of the symlink
-}
-
-// DisableUnitFiles() may be used to disable one or more units in the system (by
-// removing symlinks to them from /etc or /run).
-//
-// It takes a list of unit files to disable (either just file names or full
-// absolute paths if the unit files are residing outside the usual unit
-// search paths), and one boolean: whether the unit was enabled for runtime
-// only (true, /run), or persistently (false, /etc).
-//
-// This call returns an array with the changes made. The changes list
-// consists of structures with three strings: the type of the change (one of
-// symlink or unlink), the file name of the symlink and the destination of the
-// symlink.
-func (c *Conn) DisableUnitFiles(files []string, runtime bool) ([]DisableUnitFileChange, error) {
- result := make([][]interface{}, 0)
- err := c.sysobj.Call("DisableUnitFiles", 0, files, runtime).Store(&result)
- if err != nil {
- return nil, err
- }
-
- resultInterface := make([]interface{}, len(result))
- for i := range result {
- resultInterface[i] = result[i]
- }
-
- changes := make([]DisableUnitFileChange, len(result))
- changesInterface := make([]interface{}, len(changes))
- for i := range changes {
- changesInterface[i] = &changes[i]
- }
-
- err = dbus.Store(resultInterface, changesInterface...)
- if err != nil {
- return nil, err
- }
-
- return changes, nil
-}
-
-type DisableUnitFileChange struct {
- Type string // Type of the change (one of symlink or unlink)
- Filename string // File name of the symlink
- Destination string // Destination of the symlink
-}
-
-// Reload instructs systemd to scan for and reload unit files. This is
-// equivalent to a 'systemctl daemon-reload'.
-func (c *Conn) Reload() error {
- return c.sysobj.Call("org.freedesktop.systemd1.Manager.Reload", 0).Store()
-}
diff --git a/Godeps/_workspace/src/github.com/coreos/go-systemd/dbus/methods_test.go b/Godeps/_workspace/src/github.com/coreos/go-systemd/dbus/methods_test.go
deleted file mode 100644
index 123de37..0000000
--- a/Godeps/_workspace/src/github.com/coreos/go-systemd/dbus/methods_test.go
+++ /dev/null
@@ -1,314 +0,0 @@
-/*
-Copyright 2013 CoreOS Inc.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package dbus
-
-import (
- "fmt"
- "github.com/coreos/coreos-cloudinit/Godeps/_workspace/src/github.com/guelfey/go.dbus"
- "math/rand"
- "os"
- "path/filepath"
- "reflect"
- "testing"
-)
-
-func setupConn(t *testing.T) *Conn {
- conn, err := New()
- if err != nil {
- t.Fatal(err)
- }
-
- return conn
-}
-
-func setupUnit(target string, conn *Conn, t *testing.T) {
- // Blindly stop the unit in case it is running
- conn.StopUnit(target, "replace")
-
- // Blindly remove the symlink in case it exists
- targetRun := filepath.Join("/run/systemd/system/", target)
- err := os.Remove(targetRun)
-
- // 1. Enable the unit
- abs, err := filepath.Abs("../fixtures/" + target)
- if err != nil {
- t.Fatal(err)
- }
-
- fixture := []string{abs}
-
- install, changes, err := conn.EnableUnitFiles(fixture, true, true)
- if err != nil {
- t.Fatal(err)
- }
-
- if install != false {
- t.Fatal("Install was true")
- }
-
- if len(changes) < 1 {
- t.Fatalf("Expected one change, got %v", changes)
- }
-
- if changes[0].Filename != targetRun {
- t.Fatal("Unexpected target filename")
- }
-}
-
-// Ensure that basic unit starting and stopping works.
-func TestStartStopUnit(t *testing.T) {
- target := "start-stop.service"
- conn := setupConn(t)
-
- setupUnit(target, conn, t)
-
- // 2. Start the unit
- job, err := conn.StartUnit(target, "replace")
- if err != nil {
- t.Fatal(err)
- }
-
- if job != "done" {
- t.Fatal("Job is not done, %v", job)
- }
-
- units, err := conn.ListUnits()
-
- var unit *UnitStatus
- for _, u := range units {
- if u.Name == target {
- unit = &u
- }
- }
-
- if unit == nil {
- t.Fatalf("Test unit not found in list")
- }
-
- if unit.ActiveState != "active" {
- t.Fatalf("Test unit not active")
- }
-
- // 3. Stop the unit
- job, err = conn.StopUnit(target, "replace")
- if err != nil {
- t.Fatal(err)
- }
-
- units, err = conn.ListUnits()
-
- unit = nil
- for _, u := range units {
- if u.Name == target {
- unit = &u
- }
- }
-
- if unit != nil {
- t.Fatalf("Test unit found in list, should be stopped")
- }
-}
-
-// Enables a unit and then immediately tears it down
-func TestEnableDisableUnit(t *testing.T) {
- target := "enable-disable.service"
- conn := setupConn(t)
-
- setupUnit(target, conn, t)
-
- abs, err := filepath.Abs("../fixtures/" + target)
- if err != nil {
- t.Fatal(err)
- }
-
- path := filepath.Join("/run/systemd/system/", target)
-
- // 2. Disable the unit
- changes, err := conn.DisableUnitFiles([]string{abs}, true)
- if err != nil {
- t.Fatal(err)
- }
-
- if len(changes) != 1 {
- t.Fatalf("Changes should include the path, %v", changes)
- }
- if changes[0].Filename != path {
- t.Fatalf("Change should include correct filename, %+v", changes[0])
- }
- if changes[0].Destination != "" {
- t.Fatalf("Change destination should be empty, %+v", changes[0])
- }
-}
-
-// TestGetUnitProperties reads the `-.mount` which should exist on all systemd
-// systems and ensures that one of its properties is valid.
-func TestGetUnitProperties(t *testing.T) {
- conn := setupConn(t)
-
- unit := "-.mount"
-
- info, err := conn.GetUnitProperties(unit)
- if err != nil {
- t.Fatal(err)
- }
-
- names := info["Wants"].([]string)
-
- if len(names) < 1 {
- t.Fatal("/ is unwanted")
- }
-
- if names[0] != "system.slice" {
- t.Fatal("unexpected wants for /")
- }
-
- prop, err := conn.GetUnitProperty(unit, "Wants")
- if err != nil {
- t.Fatal(err)
- }
-
- if prop.Name != "Wants" {
- t.Fatal("unexpected property name")
- }
-
- val := prop.Value.Value().([]string)
- if !reflect.DeepEqual(val, names) {
- t.Fatal("unexpected property value")
- }
-}
-
-// TestGetUnitPropertiesRejectsInvalidName attempts to get the properties for a
-// unit with an invalid name. This test should be run with --test.timeout set,
-// as a fail will manifest as GetUnitProperties hanging indefinitely.
-func TestGetUnitPropertiesRejectsInvalidName(t *testing.T) {
- conn := setupConn(t)
-
- unit := "//invalid#$^/"
-
- _, err := conn.GetUnitProperties(unit)
- if err == nil {
- t.Fatal("Expected an error, got nil")
- }
-
- _, err = conn.GetUnitProperty(unit, "Wants")
- if err == nil {
- t.Fatal("Expected an error, got nil")
- }
-}
-
-// TestSetUnitProperties changes a cgroup setting on the `tmp.mount`
-// which should exist on all systemd systems and ensures that the
-// property was set.
-func TestSetUnitProperties(t *testing.T) {
- conn := setupConn(t)
-
- unit := "tmp.mount"
-
- if err := conn.SetUnitProperties(unit, true, Property{"CPUShares", dbus.MakeVariant(uint64(1023))}); err != nil {
- t.Fatal(err)
- }
-
- info, err := conn.GetUnitTypeProperties(unit, "Mount")
- if err != nil {
- t.Fatal(err)
- }
-
- value := info["CPUShares"].(uint64)
- if value != 1023 {
- t.Fatal("CPUShares of unit is not 1023, %s", value)
- }
-}
-
-// Ensure that basic transient unit starting and stopping works.
-func TestStartStopTransientUnit(t *testing.T) {
- conn := setupConn(t)
-
- props := []Property{
- PropExecStart([]string{"/bin/sleep", "400"}, false),
- }
- target := fmt.Sprintf("testing-transient-%d.service", rand.Int())
-
- // Start the unit
- job, err := conn.StartTransientUnit(target, "replace", props...)
- if err != nil {
- t.Fatal(err)
- }
-
- if job != "done" {
- t.Fatal("Job is not done, %v", job)
- }
-
- units, err := conn.ListUnits()
-
- var unit *UnitStatus
- for _, u := range units {
- if u.Name == target {
- unit = &u
- }
- }
-
- if unit == nil {
- t.Fatalf("Test unit not found in list")
- }
-
- if unit.ActiveState != "active" {
- t.Fatalf("Test unit not active")
- }
-
- // 3. Stop the unit
- job, err = conn.StopUnit(target, "replace")
- if err != nil {
- t.Fatal(err)
- }
-
- units, err = conn.ListUnits()
-
- unit = nil
- for _, u := range units {
- if u.Name == target {
- unit = &u
- }
- }
-
- if unit != nil {
- t.Fatalf("Test unit found in list, should be stopped")
- }
-}
-
-func TestConnJobListener(t *testing.T) {
- target := "start-stop.service"
- conn := setupConn(t)
-
- setupUnit(target, conn, t)
-
- jobSize := len(conn.jobListener.jobs)
-
- _, err := conn.StartUnit(target, "replace")
- if err != nil {
- t.Fatal(err)
- }
-
- _, err = conn.StopUnit(target, "replace")
- if err != nil {
- t.Fatal(err)
- }
-
- currentJobSize := len(conn.jobListener.jobs)
- if jobSize != currentJobSize {
- t.Fatal("JobListener jobs leaked")
- }
-}
diff --git a/Godeps/_workspace/src/github.com/coreos/go-systemd/dbus/properties.go b/Godeps/_workspace/src/github.com/coreos/go-systemd/dbus/properties.go
deleted file mode 100644
index a3593a9..0000000
--- a/Godeps/_workspace/src/github.com/coreos/go-systemd/dbus/properties.go
+++ /dev/null
@@ -1,220 +0,0 @@
-/*
-Copyright 2013 CoreOS Inc.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package dbus
-
-import (
- "github.com/coreos/coreos-cloudinit/Godeps/_workspace/src/github.com/guelfey/go.dbus"
-)
-
-// From the systemd docs:
-//
-// The properties array of StartTransientUnit() may take many of the settings
-// that may also be configured in unit files. Not all parameters are currently
-// accepted though, but we plan to cover more properties with future release.
-// Currently you may set the Description, Slice and all dependency types of
-// units, as well as RemainAfterExit, ExecStart for service units,
-// TimeoutStopUSec and PIDs for scope units, and CPUAccounting, CPUShares,
-// BlockIOAccounting, BlockIOWeight, BlockIOReadBandwidth,
-// BlockIOWriteBandwidth, BlockIODeviceWeight, MemoryAccounting, MemoryLimit,
-// DevicePolicy, DeviceAllow for services/scopes/slices. These fields map
-// directly to their counterparts in unit files and as normal D-Bus object
-// properties. The exception here is the PIDs field of scope units which is
-// used for construction of the scope only and specifies the initial PIDs to
-// add to the scope object.
-
-type Property struct {
- Name string
- Value dbus.Variant
-}
-
-type PropertyCollection struct {
- Name string
- Properties []Property
-}
-
-type execStart struct {
- Path string // the binary path to execute
- Args []string // an array with all arguments to pass to the executed command, starting with argument 0
- UncleanIsFailure bool // a boolean whether it should be considered a failure if the process exits uncleanly
-}
-
-// PropExecStart sets the ExecStart service property. The first argument is a
-// slice with the binary path to execute followed by the arguments to pass to
-// the executed command. See
-// http://www.freedesktop.org/software/systemd/man/systemd.service.html#ExecStart=
-func PropExecStart(command []string, uncleanIsFailure bool) Property {
- execStarts := []execStart{
- execStart{
- Path: command[0],
- Args: command,
- UncleanIsFailure: uncleanIsFailure,
- },
- }
-
- return Property{
- Name: "ExecStart",
- Value: dbus.MakeVariant(execStarts),
- }
-}
-
-// PropRemainAfterExit sets the RemainAfterExit service property. See
-// http://www.freedesktop.org/software/systemd/man/systemd.service.html#RemainAfterExit=
-func PropRemainAfterExit(b bool) Property {
- return Property{
- Name: "RemainAfterExit",
- Value: dbus.MakeVariant(b),
- }
-}
-
-// PropDescription sets the Description unit property. See
-// http://www.freedesktop.org/software/systemd/man/systemd.unit#Description=
-func PropDescription(desc string) Property {
- return Property{
- Name: "Description",
- Value: dbus.MakeVariant(desc),
- }
-}
-
-func propDependency(name string, units []string) Property {
- return Property{
- Name: name,
- Value: dbus.MakeVariant(units),
- }
-}
-
-// PropRequires sets the Requires unit property. See
-// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#Requires=
-func PropRequires(units ...string) Property {
- return propDependency("Requires", units)
-}
-
-// PropRequiresOverridable sets the RequiresOverridable unit property. See
-// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#RequiresOverridable=
-func PropRequiresOverridable(units ...string) Property {
- return propDependency("RequiresOverridable", units)
-}
-
-// PropRequisite sets the Requisite unit property. See
-// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#Requisite=
-func PropRequisite(units ...string) Property {
- return propDependency("Requisite", units)
-}
-
-// PropRequisiteOverridable sets the RequisiteOverridable unit property. See
-// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#RequisiteOverridable=
-func PropRequisiteOverridable(units ...string) Property {
- return propDependency("RequisiteOverridable", units)
-}
-
-// PropWants sets the Wants unit property. See
-// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#Wants=
-func PropWants(units ...string) Property {
- return propDependency("Wants", units)
-}
-
-// PropBindsTo sets the BindsTo unit property. See
-// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#BindsTo=
-func PropBindsTo(units ...string) Property {
- return propDependency("BindsTo", units)
-}
-
-// PropRequiredBy sets the RequiredBy unit property. See
-// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#RequiredBy=
-func PropRequiredBy(units ...string) Property {
- return propDependency("RequiredBy", units)
-}
-
-// PropRequiredByOverridable sets the RequiredByOverridable unit property. See
-// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#RequiredByOverridable=
-func PropRequiredByOverridable(units ...string) Property {
- return propDependency("RequiredByOverridable", units)
-}
-
-// PropWantedBy sets the WantedBy unit property. See
-// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#WantedBy=
-func PropWantedBy(units ...string) Property {
- return propDependency("WantedBy", units)
-}
-
-// PropBoundBy sets the BoundBy unit property. See
-// http://www.freedesktop.org/software/systemd/main/systemd.unit.html#BoundBy=
-func PropBoundBy(units ...string) Property {
- return propDependency("BoundBy", units)
-}
-
-// PropConflicts sets the Conflicts unit property. See
-// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#Conflicts=
-func PropConflicts(units ...string) Property {
- return propDependency("Conflicts", units)
-}
-
-// PropConflictedBy sets the ConflictedBy unit property. See
-// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#ConflictedBy=
-func PropConflictedBy(units ...string) Property {
- return propDependency("ConflictedBy", units)
-}
-
-// PropBefore sets the Before unit property. See
-// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#Before=
-func PropBefore(units ...string) Property {
- return propDependency("Before", units)
-}
-
-// PropAfter sets the After unit property. See
-// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#After=
-func PropAfter(units ...string) Property {
- return propDependency("After", units)
-}
-
-// PropOnFailure sets the OnFailure unit property. See
-// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#OnFailure=
-func PropOnFailure(units ...string) Property {
- return propDependency("OnFailure", units)
-}
-
-// PropTriggers sets the Triggers unit property. See
-// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#Triggers=
-func PropTriggers(units ...string) Property {
- return propDependency("Triggers", units)
-}
-
-// PropTriggeredBy sets the TriggeredBy unit property. See
-// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#TriggeredBy=
-func PropTriggeredBy(units ...string) Property {
- return propDependency("TriggeredBy", units)
-}
-
-// PropPropagatesReloadTo sets the PropagatesReloadTo unit property. See
-// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#PropagatesReloadTo=
-func PropPropagatesReloadTo(units ...string) Property {
- return propDependency("PropagatesReloadTo", units)
-}
-
-// PropRequiresMountsFor sets the RequiresMountsFor unit property. See
-// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#RequiresMountsFor=
-func PropRequiresMountsFor(units ...string) Property {
- return propDependency("RequiresMountsFor", units)
-}
-
-// PropSlice sets the Slice unit property. See
-// http://www.freedesktop.org/software/systemd/man/systemd.resource-control.html#Slice=
-func PropSlice(slice string) Property {
- return Property{
- Name: "Slice",
- Value: dbus.MakeVariant(slice),
- }
-}
diff --git a/Godeps/_workspace/src/github.com/coreos/go-systemd/dbus/set.go b/Godeps/_workspace/src/github.com/coreos/go-systemd/dbus/set.go
deleted file mode 100644
index 88378b2..0000000
--- a/Godeps/_workspace/src/github.com/coreos/go-systemd/dbus/set.go
+++ /dev/null
@@ -1,26 +0,0 @@
-package dbus
-
-type set struct {
- data map[string]bool
-}
-
-func (s *set) Add(value string) {
- s.data[value] = true
-}
-
-func (s *set) Remove(value string) {
- delete(s.data, value)
-}
-
-func (s *set) Contains(value string) (exists bool) {
- _, exists = s.data[value]
- return
-}
-
-func (s *set) Length() (int) {
- return len(s.data)
-}
-
-func newSet() (*set) {
- return &set{make(map[string] bool)}
-}
diff --git a/Godeps/_workspace/src/github.com/coreos/go-systemd/dbus/set_test.go b/Godeps/_workspace/src/github.com/coreos/go-systemd/dbus/set_test.go
deleted file mode 100644
index d8d174d..0000000
--- a/Godeps/_workspace/src/github.com/coreos/go-systemd/dbus/set_test.go
+++ /dev/null
@@ -1,26 +0,0 @@
-package dbus
-
-import (
- "testing"
-)
-
-// TestBasicSetActions asserts that Add & Remove behavior is correct
-func TestBasicSetActions(t *testing.T) {
- s := newSet()
-
- if s.Contains("foo") {
- t.Fatal("set should not contain 'foo'")
- }
-
- s.Add("foo")
-
- if !s.Contains("foo") {
- t.Fatal("set should contain 'foo'")
- }
-
- s.Remove("foo")
-
- if s.Contains("foo") {
- t.Fatal("set should not contain 'foo'")
- }
-}
diff --git a/Godeps/_workspace/src/github.com/coreos/go-systemd/dbus/subscription.go b/Godeps/_workspace/src/github.com/coreos/go-systemd/dbus/subscription.go
deleted file mode 100644
index e8b04af..0000000
--- a/Godeps/_workspace/src/github.com/coreos/go-systemd/dbus/subscription.go
+++ /dev/null
@@ -1,249 +0,0 @@
-/*
-Copyright 2013 CoreOS Inc.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package dbus
-
-import (
- "errors"
- "time"
-
- "github.com/coreos/coreos-cloudinit/Godeps/_workspace/src/github.com/guelfey/go.dbus"
-)
-
-const (
- cleanIgnoreInterval = int64(10 * time.Second)
- ignoreInterval = int64(30 * time.Millisecond)
-)
-
-// Subscribe sets up this connection to subscribe to all systemd dbus events.
-// This is required before calling SubscribeUnits. When the connection closes
-// systemd will automatically stop sending signals so there is no need to
-// explicitly call Unsubscribe().
-func (c *Conn) Subscribe() error {
- c.sysconn.BusObject().Call("org.freedesktop.DBus.AddMatch", 0,
- "type='signal',interface='org.freedesktop.systemd1.Manager',member='UnitNew'")
- c.sysconn.BusObject().Call("org.freedesktop.DBus.AddMatch", 0,
- "type='signal',interface='org.freedesktop.DBus.Properties',member='PropertiesChanged'")
-
- err := c.sysobj.Call("org.freedesktop.systemd1.Manager.Subscribe", 0).Store()
- if err != nil {
- c.sysconn.Close()
- return err
- }
-
- return nil
-}
-
-// Unsubscribe this connection from systemd dbus events.
-func (c *Conn) Unsubscribe() error {
- err := c.sysobj.Call("org.freedesktop.systemd1.Manager.Unsubscribe", 0).Store()
- if err != nil {
- c.sysconn.Close()
- return err
- }
-
- return nil
-}
-
-func (c *Conn) initSubscription() {
- c.subscriber.ignore = make(map[dbus.ObjectPath]int64)
-}
-
-func (c *Conn) initDispatch() {
- ch := make(chan *dbus.Signal, signalBuffer)
-
- c.sysconn.Signal(ch)
-
- go func() {
- for {
- signal := <-ch
- switch signal.Name {
- case "org.freedesktop.systemd1.Manager.JobRemoved":
- c.jobComplete(signal)
-
- unitName := signal.Body[2].(string)
- var unitPath dbus.ObjectPath
- c.sysobj.Call("org.freedesktop.systemd1.Manager.GetUnit", 0, unitName).Store(&unitPath)
- if unitPath != dbus.ObjectPath("") {
- c.sendSubStateUpdate(unitPath)
- }
- case "org.freedesktop.systemd1.Manager.UnitNew":
- c.sendSubStateUpdate(signal.Body[1].(dbus.ObjectPath))
- case "org.freedesktop.DBus.Properties.PropertiesChanged":
- if signal.Body[0].(string) == "org.freedesktop.systemd1.Unit" {
- // we only care about SubState updates, which are a Unit property
- c.sendSubStateUpdate(signal.Path)
- }
- }
- }
- }()
-}
-
-// Returns two unbuffered channels which will receive all changed units every
-// interval. Deleted units are sent as nil.
-func (c *Conn) SubscribeUnits(interval time.Duration) (<-chan map[string]*UnitStatus, <-chan error) {
- return c.SubscribeUnitsCustom(interval, 0, func(u1, u2 *UnitStatus) bool { return *u1 != *u2 }, nil)
-}
-
-// SubscribeUnitsCustom is like SubscribeUnits but lets you specify the buffer
-// size of the channels, the comparison function for detecting changes and a filter
-// function for cutting down on the noise that your channel receives.
-func (c *Conn) SubscribeUnitsCustom(interval time.Duration, buffer int, isChanged func(*UnitStatus, *UnitStatus) bool, filterUnit func(string) bool) (<-chan map[string]*UnitStatus, <-chan error) {
- old := make(map[string]*UnitStatus)
- statusChan := make(chan map[string]*UnitStatus, buffer)
- errChan := make(chan error, buffer)
-
- go func() {
- for {
- timerChan := time.After(interval)
-
- units, err := c.ListUnits()
- if err == nil {
- cur := make(map[string]*UnitStatus)
- for i := range units {
- if filterUnit != nil && filterUnit(units[i].Name) {
- continue
- }
- cur[units[i].Name] = &units[i]
- }
-
- // add all new or changed units
- changed := make(map[string]*UnitStatus)
- for n, u := range cur {
- if oldU, ok := old[n]; !ok || isChanged(oldU, u) {
- changed[n] = u
- }
- delete(old, n)
- }
-
- // add all deleted units
- for oldN := range old {
- changed[oldN] = nil
- }
-
- old = cur
-
- if len(changed) != 0 {
- statusChan <- changed
- }
- } else {
- errChan <- err
- }
-
- <-timerChan
- }
- }()
-
- return statusChan, errChan
-}
-
-type SubStateUpdate struct {
- UnitName string
- SubState string
-}
-
-// SetSubStateSubscriber writes to updateCh when any unit's substate changes.
-// Although this writes to updateCh on every state change, the reported state
-// may be more recent than the change that generated it (due to an unavoidable
-// race in the systemd dbus interface). That is, this method provides a good
-// way to keep a current view of all units' states, but is not guaranteed to
-// show every state transition they go through. Furthermore, state changes
-// will only be written to the channel with non-blocking writes. If updateCh
-// is full, it attempts to write an error to errCh; if errCh is full, the error
-// passes silently.
-func (c *Conn) SetSubStateSubscriber(updateCh chan<- *SubStateUpdate, errCh chan<- error) {
- c.subscriber.Lock()
- defer c.subscriber.Unlock()
- c.subscriber.updateCh = updateCh
- c.subscriber.errCh = errCh
-}
-
-func (c *Conn) sendSubStateUpdate(path dbus.ObjectPath) {
- c.subscriber.Lock()
- defer c.subscriber.Unlock()
- if c.subscriber.updateCh == nil {
- return
- }
-
- if c.shouldIgnore(path) {
- return
- }
-
- info, err := c.GetUnitProperties(string(path))
- if err != nil {
- select {
- case c.subscriber.errCh <- err:
- default:
- }
- }
-
- name := info["Id"].(string)
- substate := info["SubState"].(string)
-
- update := &SubStateUpdate{name, substate}
- select {
- case c.subscriber.updateCh <- update:
- default:
- select {
- case c.subscriber.errCh <- errors.New("update channel full!"):
- default:
- }
- }
-
- c.updateIgnore(path, info)
-}
-
-// The ignore functions work around a wart in the systemd dbus interface.
-// Requesting the properties of an unloaded unit will cause systemd to send a
-// pair of UnitNew/UnitRemoved signals. Because we need to get a unit's
-// properties on UnitNew (as that's the only indication of a new unit coming up
-// for the first time), we would enter an infinite loop if we did not attempt
-// to detect and ignore these spurious signals. The signal themselves are
-// indistinguishable from relevant ones, so we (somewhat hackishly) ignore an
-// unloaded unit's signals for a short time after requesting its properties.
-// This means that we will miss e.g. a transient unit being restarted
-// *immediately* upon failure and also a transient unit being started
-// immediately after requesting its status (with systemctl status, for example,
-// because this causes a UnitNew signal to be sent which then causes us to fetch
-// the properties).
-
-func (c *Conn) shouldIgnore(path dbus.ObjectPath) bool {
- t, ok := c.subscriber.ignore[path]
- return ok && t >= time.Now().UnixNano()
-}
-
-func (c *Conn) updateIgnore(path dbus.ObjectPath, info map[string]interface{}) {
- c.cleanIgnore()
-
- // unit is unloaded - it will trigger bad systemd dbus behavior
- if info["LoadState"].(string) == "not-found" {
- c.subscriber.ignore[path] = time.Now().UnixNano() + ignoreInterval
- }
-}
-
-// without this, ignore would grow unboundedly over time
-func (c *Conn) cleanIgnore() {
- now := time.Now().UnixNano()
- if c.subscriber.cleanIgnore < now {
- c.subscriber.cleanIgnore = now + cleanIgnoreInterval
-
- for p, t := range c.subscriber.ignore {
- if t < now {
- delete(c.subscriber.ignore, p)
- }
- }
- }
-}
diff --git a/Godeps/_workspace/src/github.com/coreos/go-systemd/dbus/subscription_set.go b/Godeps/_workspace/src/github.com/coreos/go-systemd/dbus/subscription_set.go
deleted file mode 100644
index 2625786..0000000
--- a/Godeps/_workspace/src/github.com/coreos/go-systemd/dbus/subscription_set.go
+++ /dev/null
@@ -1,32 +0,0 @@
-package dbus
-
-import (
- "time"
-)
-
-// SubscriptionSet returns a subscription set which is like conn.Subscribe but
-// can filter to only return events for a set of units.
-type SubscriptionSet struct {
- *set
- conn *Conn
-}
-
-
-func (s *SubscriptionSet) filter(unit string) bool {
- return !s.Contains(unit)
-}
-
-// Subscribe starts listening for dbus events for all of the units in the set.
-// Returns channels identical to conn.SubscribeUnits.
-func (s *SubscriptionSet) Subscribe() (<-chan map[string]*UnitStatus, <-chan error) {
- // TODO: Make fully evented by using systemd 209 with properties changed values
- return s.conn.SubscribeUnitsCustom(time.Second, 0,
- func(u1, u2 *UnitStatus) bool { return *u1 != *u2 },
- func(unit string) bool { return s.filter(unit) },
- )
-}
-
-// NewSubscriptionSet returns a new subscription set.
-func (conn *Conn) NewSubscriptionSet() (*SubscriptionSet) {
- return &SubscriptionSet{newSet(), conn}
-}
diff --git a/Godeps/_workspace/src/github.com/coreos/go-systemd/dbus/subscription_set_test.go b/Godeps/_workspace/src/github.com/coreos/go-systemd/dbus/subscription_set_test.go
deleted file mode 100644
index db60085..0000000
--- a/Godeps/_workspace/src/github.com/coreos/go-systemd/dbus/subscription_set_test.go
+++ /dev/null
@@ -1,67 +0,0 @@
-package dbus
-
-import (
- "testing"
- "time"
-)
-
-// TestSubscribeUnit exercises the basics of subscription of a particular unit.
-func TestSubscriptionSetUnit(t *testing.T) {
- target := "subscribe-events-set.service"
-
- conn, err := New()
-
- if err != nil {
- t.Fatal(err)
- }
-
- err = conn.Subscribe()
- if err != nil {
- t.Fatal(err)
- }
-
- subSet := conn.NewSubscriptionSet()
- evChan, errChan := subSet.Subscribe()
-
- subSet.Add(target)
- setupUnit(target, conn, t)
-
- job, err := conn.StartUnit(target, "replace")
- if err != nil {
- t.Fatal(err)
- }
-
- if job != "done" {
- t.Fatal("Couldn't start", target)
- }
-
- timeout := make(chan bool, 1)
- go func() {
- time.Sleep(3 * time.Second)
- close(timeout)
- }()
-
- for {
- select {
- case changes := <-evChan:
- tCh, ok := changes[target]
-
- if !ok {
- t.Fatal("Unexpected event %v", changes)
- }
-
- if tCh.ActiveState == "active" && tCh.Name == target {
- goto success
- }
- case err = <-errChan:
- t.Fatal(err)
- case <-timeout:
- t.Fatal("Reached timeout")
- }
- }
-
-success:
- return
-}
-
-
diff --git a/Godeps/_workspace/src/github.com/coreos/go-systemd/dbus/subscription_test.go b/Godeps/_workspace/src/github.com/coreos/go-systemd/dbus/subscription_test.go
deleted file mode 100644
index 6f4d0b3..0000000
--- a/Godeps/_workspace/src/github.com/coreos/go-systemd/dbus/subscription_test.go
+++ /dev/null
@@ -1,90 +0,0 @@
-package dbus
-
-import (
- "testing"
- "time"
-)
-
-// TestSubscribe exercises the basics of subscription
-func TestSubscribe(t *testing.T) {
- conn, err := New()
-
- if err != nil {
- t.Fatal(err)
- }
-
- err = conn.Subscribe()
- if err != nil {
- t.Fatal(err)
- }
-
- err = conn.Unsubscribe()
- if err != nil {
- t.Fatal(err)
- }
-}
-
-// TestSubscribeUnit exercises the basics of subscription of a particular unit.
-func TestSubscribeUnit(t *testing.T) {
- target := "subscribe-events.service"
-
- conn, err := New()
-
- if err != nil {
- t.Fatal(err)
- }
-
- err = conn.Subscribe()
- if err != nil {
- t.Fatal(err)
- }
-
- err = conn.Unsubscribe()
- if err != nil {
- t.Fatal(err)
- }
-
- evChan, errChan := conn.SubscribeUnits(time.Second)
-
- setupUnit(target, conn, t)
-
- job, err := conn.StartUnit(target, "replace")
- if err != nil {
- t.Fatal(err)
- }
-
- if job != "done" {
- t.Fatal("Couldn't start", target)
- }
-
- timeout := make(chan bool, 1)
- go func() {
- time.Sleep(3 * time.Second)
- close(timeout)
- }()
-
- for {
- select {
- case changes := <-evChan:
- tCh, ok := changes[target]
-
- // Just continue until we see our event.
- if !ok {
- continue
- }
-
- if tCh.ActiveState == "active" && tCh.Name == target {
- goto success
- }
- case err = <-errChan:
- t.Fatal(err)
- case <-timeout:
- t.Fatal("Reached timeout")
- }
- }
-
-success:
- return
-}
-
-
diff --git a/Godeps/_workspace/src/github.com/coreos/yaml/LICENSE b/Godeps/_workspace/src/github.com/coreos/yaml/LICENSE
deleted file mode 100644
index a68e67f..0000000
--- a/Godeps/_workspace/src/github.com/coreos/yaml/LICENSE
+++ /dev/null
@@ -1,188 +0,0 @@
-
-Copyright (c) 2011-2014 - Canonical Inc.
-
-This software is licensed under the LGPLv3, included below.
-
-As a special exception to the GNU Lesser General Public License version 3
-("LGPL3"), the copyright holders of this Library give you permission to
-convey to a third party a Combined Work that links statically or dynamically
-to this Library without providing any Minimal Corresponding Source or
-Minimal Application Code as set out in 4d or providing the installation
-information set out in section 4e, provided that you comply with the other
-provisions of LGPL3 and provided that you meet, for the Application the
-terms and conditions of the license(s) which apply to the Application.
-
-Except as stated in this special exception, the provisions of LGPL3 will
-continue to comply in full to this Library. If you modify this Library, you
-may apply this exception to your version of this Library, but you are not
-obliged to do so. If you do not wish to do so, delete this exception
-statement from your version. This exception does not (and cannot) modify any
-license terms which apply to the Application, with which you must still
-comply.
-
-
- GNU LESSER GENERAL PUBLIC LICENSE
- Version 3, 29 June 2007
-
- Copyright (C) 2007 Free Software Foundation, Inc.
- Everyone is permitted to copy and distribute verbatim copies
- of this license document, but changing it is not allowed.
-
-
- This version of the GNU Lesser General Public License incorporates
-the terms and conditions of version 3 of the GNU General Public
-License, supplemented by the additional permissions listed below.
-
- 0. Additional Definitions.
-
- As used herein, "this License" refers to version 3 of the GNU Lesser
-General Public License, and the "GNU GPL" refers to version 3 of the GNU
-General Public License.
-
- "The Library" refers to a covered work governed by this License,
-other than an Application or a Combined Work as defined below.
-
- An "Application" is any work that makes use of an interface provided
-by the Library, but which is not otherwise based on the Library.
-Defining a subclass of a class defined by the Library is deemed a mode
-of using an interface provided by the Library.
-
- A "Combined Work" is a work produced by combining or linking an
-Application with the Library. The particular version of the Library
-with which the Combined Work was made is also called the "Linked
-Version".
-
- The "Minimal Corresponding Source" for a Combined Work means the
-Corresponding Source for the Combined Work, excluding any source code
-for portions of the Combined Work that, considered in isolation, are
-based on the Application, and not on the Linked Version.
-
- The "Corresponding Application Code" for a Combined Work means the
-object code and/or source code for the Application, including any data
-and utility programs needed for reproducing the Combined Work from the
-Application, but excluding the System Libraries of the Combined Work.
-
- 1. Exception to Section 3 of the GNU GPL.
-
- You may convey a covered work under sections 3 and 4 of this License
-without being bound by section 3 of the GNU GPL.
-
- 2. Conveying Modified Versions.
-
- If you modify a copy of the Library, and, in your modifications, a
-facility refers to a function or data to be supplied by an Application
-that uses the facility (other than as an argument passed when the
-facility is invoked), then you may convey a copy of the modified
-version:
-
- a) under this License, provided that you make a good faith effort to
- ensure that, in the event an Application does not supply the
- function or data, the facility still operates, and performs
- whatever part of its purpose remains meaningful, or
-
- b) under the GNU GPL, with none of the additional permissions of
- this License applicable to that copy.
-
- 3. Object Code Incorporating Material from Library Header Files.
-
- The object code form of an Application may incorporate material from
-a header file that is part of the Library. You may convey such object
-code under terms of your choice, provided that, if the incorporated
-material is not limited to numerical parameters, data structure
-layouts and accessors, or small macros, inline functions and templates
-(ten or fewer lines in length), you do both of the following:
-
- a) Give prominent notice with each copy of the object code that the
- Library is used in it and that the Library and its use are
- covered by this License.
-
- b) Accompany the object code with a copy of the GNU GPL and this license
- document.
-
- 4. Combined Works.
-
- You may convey a Combined Work under terms of your choice that,
-taken together, effectively do not restrict modification of the
-portions of the Library contained in the Combined Work and reverse
-engineering for debugging such modifications, if you also do each of
-the following:
-
- a) Give prominent notice with each copy of the Combined Work that
- the Library is used in it and that the Library and its use are
- covered by this License.
-
- b) Accompany the Combined Work with a copy of the GNU GPL and this license
- document.
-
- c) For a Combined Work that displays copyright notices during
- execution, include the copyright notice for the Library among
- these notices, as well as a reference directing the user to the
- copies of the GNU GPL and this license document.
-
- d) Do one of the following:
-
- 0) Convey the Minimal Corresponding Source under the terms of this
- License, and the Corresponding Application Code in a form
- suitable for, and under terms that permit, the user to
- recombine or relink the Application with a modified version of
- the Linked Version to produce a modified Combined Work, in the
- manner specified by section 6 of the GNU GPL for conveying
- Corresponding Source.
-
- 1) Use a suitable shared library mechanism for linking with the
- Library. A suitable mechanism is one that (a) uses at run time
- a copy of the Library already present on the user's computer
- system, and (b) will operate properly with a modified version
- of the Library that is interface-compatible with the Linked
- Version.
-
- e) Provide Installation Information, but only if you would otherwise
- be required to provide such information under section 6 of the
- GNU GPL, and only to the extent that such information is
- necessary to install and execute a modified version of the
- Combined Work produced by recombining or relinking the
- Application with a modified version of the Linked Version. (If
- you use option 4d0, the Installation Information must accompany
- the Minimal Corresponding Source and Corresponding Application
- Code. If you use option 4d1, you must provide the Installation
- Information in the manner specified by section 6 of the GNU GPL
- for conveying Corresponding Source.)
-
- 5. Combined Libraries.
-
- You may place library facilities that are a work based on the
-Library side by side in a single library together with other library
-facilities that are not Applications and are not covered by this
-License, and convey such a combined library under terms of your
-choice, if you do both of the following:
-
- a) Accompany the combined library with a copy of the same work based
- on the Library, uncombined with any other library facilities,
- conveyed under the terms of this License.
-
- b) Give prominent notice with the combined library that part of it
- is a work based on the Library, and explaining where to find the
- accompanying uncombined form of the same work.
-
- 6. Revised Versions of the GNU Lesser General Public License.
-
- The Free Software Foundation may publish revised and/or new versions
-of the GNU Lesser General Public License from time to time. Such new
-versions will be similar in spirit to the present version, but may
-differ in detail to address new problems or concerns.
-
- Each version is given a distinguishing version number. If the
-Library as you received it specifies that a certain numbered version
-of the GNU Lesser General Public License "or any later version"
-applies to it, you have the option of following the terms and
-conditions either of that published version or of any later version
-published by the Free Software Foundation. If the Library as you
-received it does not specify a version number of the GNU Lesser
-General Public License, you may choose any version of the GNU Lesser
-General Public License ever published by the Free Software Foundation.
-
- If the Library as you received it specifies that a proxy can decide
-whether future versions of the GNU Lesser General Public License shall
-apply, that proxy's public statement of acceptance of any version is
-permanent authorization for you to choose that version for the
-Library.
diff --git a/Godeps/_workspace/src/github.com/coreos/yaml/LICENSE.libyaml b/Godeps/_workspace/src/github.com/coreos/yaml/LICENSE.libyaml
deleted file mode 100644
index 8da58fb..0000000
--- a/Godeps/_workspace/src/github.com/coreos/yaml/LICENSE.libyaml
+++ /dev/null
@@ -1,31 +0,0 @@
-The following files were ported to Go from C files of libyaml, and thus
-are still covered by their original copyright and license:
-
- apic.go
- emitterc.go
- parserc.go
- readerc.go
- scannerc.go
- writerc.go
- yamlh.go
- yamlprivateh.go
-
-Copyright (c) 2006 Kirill Simonov
-
-Permission is hereby granted, free of charge, to any person obtaining a copy of
-this software and associated documentation files (the "Software"), to deal in
-the Software without restriction, including without limitation the rights to
-use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
-of the Software, and to permit persons to whom the Software is furnished to do
-so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
diff --git a/Godeps/_workspace/src/github.com/coreos/yaml/README.md b/Godeps/_workspace/src/github.com/coreos/yaml/README.md
deleted file mode 100644
index 4427005..0000000
--- a/Godeps/_workspace/src/github.com/coreos/yaml/README.md
+++ /dev/null
@@ -1,131 +0,0 @@
-Note: This is a fork of https://github.com/go-yaml/yaml. The following README
-doesn't necessarily apply to this fork.
-
-# YAML support for the Go language
-
-Introduction
-------------
-
-The yaml package enables Go programs to comfortably encode and decode YAML
-values. It was developed within [Canonical](https://www.canonical.com) as
-part of the [juju](https://juju.ubuntu.com) project, and is based on a
-pure Go port of the well-known [libyaml](http://pyyaml.org/wiki/LibYAML)
-C library to parse and generate YAML data quickly and reliably.
-
-Compatibility
--------------
-
-The yaml package supports most of YAML 1.1 and 1.2, including support for
-anchors, tags, map merging, etc. Multi-document unmarshalling is not yet
-implemented, and base-60 floats from YAML 1.1 are purposefully not
-supported since they're a poor design and are gone in YAML 1.2.
-
-Installation and usage
-----------------------
-
-The import path for the package is *gopkg.in/yaml.v1*.
-
-To install it, run:
-
- go get gopkg.in/yaml.v1
-
-API documentation
------------------
-
-If opened in a browser, the import path itself leads to the API documentation:
-
- * [https://gopkg.in/yaml.v1](https://gopkg.in/yaml.v1)
-
-API stability
--------------
-
-The package API for yaml v1 will remain stable as described in [gopkg.in](https://gopkg.in).
-
-
-License
--------
-
-The yaml package is licensed under the LGPL with an exception that allows it to be linked statically. Please see the LICENSE file for details.
-
-
-Example
--------
-
-```Go
-package main
-
-import (
- "fmt"
- "log"
-
- "gopkg.in/yaml.v1"
-)
-
-var data = `
-a: Easy!
-b:
- c: 2
- d: [3, 4]
-`
-
-type T struct {
- A string
- B struct{C int; D []int ",flow"}
-}
-
-func main() {
- t := T{}
-
- err := yaml.Unmarshal([]byte(data), &t)
- if err != nil {
- log.Fatalf("error: %v", err)
- }
- fmt.Printf("--- t:\n%v\n\n", t)
-
- d, err := yaml.Marshal(&t)
- if err != nil {
- log.Fatalf("error: %v", err)
- }
- fmt.Printf("--- t dump:\n%s\n\n", string(d))
-
- m := make(map[interface{}]interface{})
-
- err = yaml.Unmarshal([]byte(data), &m)
- if err != nil {
- log.Fatalf("error: %v", err)
- }
- fmt.Printf("--- m:\n%v\n\n", m)
-
- d, err = yaml.Marshal(&m)
- if err != nil {
- log.Fatalf("error: %v", err)
- }
- fmt.Printf("--- m dump:\n%s\n\n", string(d))
-}
-```
-
-This example will generate the following output:
-
-```
---- t:
-{Easy! {2 [3 4]}}
-
---- t dump:
-a: Easy!
-b:
- c: 2
- d: [3, 4]
-
-
---- m:
-map[a:Easy! b:map[c:2 d:[3 4]]]
-
---- m dump:
-a: Easy!
-b:
- c: 2
- d:
- - 3
- - 4
-```
-
diff --git a/Godeps/_workspace/src/github.com/coreos/yaml/apic.go b/Godeps/_workspace/src/github.com/coreos/yaml/apic.go
deleted file mode 100644
index 95ec014..0000000
--- a/Godeps/_workspace/src/github.com/coreos/yaml/apic.go
+++ /dev/null
@@ -1,742 +0,0 @@
-package yaml
-
-import (
- "io"
- "os"
-)
-
-func yaml_insert_token(parser *yaml_parser_t, pos int, token *yaml_token_t) {
- //fmt.Println("yaml_insert_token", "pos:", pos, "typ:", token.typ, "head:", parser.tokens_head, "len:", len(parser.tokens))
-
- // Check if we can move the queue at the beginning of the buffer.
- if parser.tokens_head > 0 && len(parser.tokens) == cap(parser.tokens) {
- if parser.tokens_head != len(parser.tokens) {
- copy(parser.tokens, parser.tokens[parser.tokens_head:])
- }
- parser.tokens = parser.tokens[:len(parser.tokens)-parser.tokens_head]
- parser.tokens_head = 0
- }
- parser.tokens = append(parser.tokens, *token)
- if pos < 0 {
- return
- }
- copy(parser.tokens[parser.tokens_head+pos+1:], parser.tokens[parser.tokens_head+pos:])
- parser.tokens[parser.tokens_head+pos] = *token
-}
-
-// Create a new parser object.
-func yaml_parser_initialize(parser *yaml_parser_t) bool {
- *parser = yaml_parser_t{
- raw_buffer: make([]byte, 0, input_raw_buffer_size),
- buffer: make([]byte, 0, input_buffer_size),
- }
- return true
-}
-
-// Destroy a parser object.
-func yaml_parser_delete(parser *yaml_parser_t) {
- *parser = yaml_parser_t{}
-}
-
-// String read handler.
-func yaml_string_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) {
- if parser.input_pos == len(parser.input) {
- return 0, io.EOF
- }
- n = copy(buffer, parser.input[parser.input_pos:])
- parser.input_pos += n
- return n, nil
-}
-
-// File read handler.
-func yaml_file_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) {
- return parser.input_file.Read(buffer)
-}
-
-// Set a string input.
-func yaml_parser_set_input_string(parser *yaml_parser_t, input []byte) {
- if parser.read_handler != nil {
- panic("must set the input source only once")
- }
- parser.read_handler = yaml_string_read_handler
- parser.input = input
- parser.input_pos = 0
-}
-
-// Set a file input.
-func yaml_parser_set_input_file(parser *yaml_parser_t, file *os.File) {
- if parser.read_handler != nil {
- panic("must set the input source only once")
- }
- parser.read_handler = yaml_file_read_handler
- parser.input_file = file
-}
-
-// Set the source encoding.
-func yaml_parser_set_encoding(parser *yaml_parser_t, encoding yaml_encoding_t) {
- if parser.encoding != yaml_ANY_ENCODING {
- panic("must set the encoding only once")
- }
- parser.encoding = encoding
-}
-
-// Create a new emitter object.
-func yaml_emitter_initialize(emitter *yaml_emitter_t) bool {
- *emitter = yaml_emitter_t{
- buffer: make([]byte, output_buffer_size),
- raw_buffer: make([]byte, 0, output_raw_buffer_size),
- states: make([]yaml_emitter_state_t, 0, initial_stack_size),
- events: make([]yaml_event_t, 0, initial_queue_size),
- }
- return true
-}
-
-// Destroy an emitter object.
-func yaml_emitter_delete(emitter *yaml_emitter_t) {
- *emitter = yaml_emitter_t{}
-}
-
-// String write handler.
-func yaml_string_write_handler(emitter *yaml_emitter_t, buffer []byte) error {
- *emitter.output_buffer = append(*emitter.output_buffer, buffer...)
- return nil
-}
-
-// File write handler.
-func yaml_file_write_handler(emitter *yaml_emitter_t, buffer []byte) error {
- _, err := emitter.output_file.Write(buffer)
- return err
-}
-
-// Set a string output.
-func yaml_emitter_set_output_string(emitter *yaml_emitter_t, output_buffer *[]byte) {
- if emitter.write_handler != nil {
- panic("must set the output target only once")
- }
- emitter.write_handler = yaml_string_write_handler
- emitter.output_buffer = output_buffer
-}
-
-// Set a file output.
-func yaml_emitter_set_output_file(emitter *yaml_emitter_t, file io.Writer) {
- if emitter.write_handler != nil {
- panic("must set the output target only once")
- }
- emitter.write_handler = yaml_file_write_handler
- emitter.output_file = file
-}
-
-// Set the output encoding.
-func yaml_emitter_set_encoding(emitter *yaml_emitter_t, encoding yaml_encoding_t) {
- if emitter.encoding != yaml_ANY_ENCODING {
- panic("must set the output encoding only once")
- }
- emitter.encoding = encoding
-}
-
-// Set the canonical output style.
-func yaml_emitter_set_canonical(emitter *yaml_emitter_t, canonical bool) {
- emitter.canonical = canonical
-}
-
-//// Set the indentation increment.
-func yaml_emitter_set_indent(emitter *yaml_emitter_t, indent int) {
- if indent < 2 || indent > 9 {
- indent = 2
- }
- emitter.best_indent = indent
-}
-
-// Set the preferred line width.
-func yaml_emitter_set_width(emitter *yaml_emitter_t, width int) {
- if width < 0 {
- width = -1
- }
- emitter.best_width = width
-}
-
-// Set if unescaped non-ASCII characters are allowed.
-func yaml_emitter_set_unicode(emitter *yaml_emitter_t, unicode bool) {
- emitter.unicode = unicode
-}
-
-// Set the preferred line break character.
-func yaml_emitter_set_break(emitter *yaml_emitter_t, line_break yaml_break_t) {
- emitter.line_break = line_break
-}
-
-///*
-// * Destroy a token object.
-// */
-//
-//YAML_DECLARE(void)
-//yaml_token_delete(yaml_token_t *token)
-//{
-// assert(token); // Non-NULL token object expected.
-//
-// switch (token.type)
-// {
-// case YAML_TAG_DIRECTIVE_TOKEN:
-// yaml_free(token.data.tag_directive.handle);
-// yaml_free(token.data.tag_directive.prefix);
-// break;
-//
-// case YAML_ALIAS_TOKEN:
-// yaml_free(token.data.alias.value);
-// break;
-//
-// case YAML_ANCHOR_TOKEN:
-// yaml_free(token.data.anchor.value);
-// break;
-//
-// case YAML_TAG_TOKEN:
-// yaml_free(token.data.tag.handle);
-// yaml_free(token.data.tag.suffix);
-// break;
-//
-// case YAML_SCALAR_TOKEN:
-// yaml_free(token.data.scalar.value);
-// break;
-//
-// default:
-// break;
-// }
-//
-// memset(token, 0, sizeof(yaml_token_t));
-//}
-//
-///*
-// * Check if a string is a valid UTF-8 sequence.
-// *
-// * Check 'reader.c' for more details on UTF-8 encoding.
-// */
-//
-//static int
-//yaml_check_utf8(yaml_char_t *start, size_t length)
-//{
-// yaml_char_t *end = start+length;
-// yaml_char_t *pointer = start;
-//
-// while (pointer < end) {
-// unsigned char octet;
-// unsigned int width;
-// unsigned int value;
-// size_t k;
-//
-// octet = pointer[0];
-// width = (octet & 0x80) == 0x00 ? 1 :
-// (octet & 0xE0) == 0xC0 ? 2 :
-// (octet & 0xF0) == 0xE0 ? 3 :
-// (octet & 0xF8) == 0xF0 ? 4 : 0;
-// value = (octet & 0x80) == 0x00 ? octet & 0x7F :
-// (octet & 0xE0) == 0xC0 ? octet & 0x1F :
-// (octet & 0xF0) == 0xE0 ? octet & 0x0F :
-// (octet & 0xF8) == 0xF0 ? octet & 0x07 : 0;
-// if (!width) return 0;
-// if (pointer+width > end) return 0;
-// for (k = 1; k < width; k ++) {
-// octet = pointer[k];
-// if ((octet & 0xC0) != 0x80) return 0;
-// value = (value << 6) + (octet & 0x3F);
-// }
-// if (!((width == 1) ||
-// (width == 2 && value >= 0x80) ||
-// (width == 3 && value >= 0x800) ||
-// (width == 4 && value >= 0x10000))) return 0;
-//
-// pointer += width;
-// }
-//
-// return 1;
-//}
-//
-
-// Create STREAM-START.
-func yaml_stream_start_event_initialize(event *yaml_event_t, encoding yaml_encoding_t) bool {
- *event = yaml_event_t{
- typ: yaml_STREAM_START_EVENT,
- encoding: encoding,
- }
- return true
-}
-
-// Create STREAM-END.
-func yaml_stream_end_event_initialize(event *yaml_event_t) bool {
- *event = yaml_event_t{
- typ: yaml_STREAM_END_EVENT,
- }
- return true
-}
-
-// Create DOCUMENT-START.
-func yaml_document_start_event_initialize(event *yaml_event_t, version_directive *yaml_version_directive_t,
- tag_directives []yaml_tag_directive_t, implicit bool) bool {
- *event = yaml_event_t{
- typ: yaml_DOCUMENT_START_EVENT,
- version_directive: version_directive,
- tag_directives: tag_directives,
- implicit: implicit,
- }
- return true
-}
-
-// Create DOCUMENT-END.
-func yaml_document_end_event_initialize(event *yaml_event_t, implicit bool) bool {
- *event = yaml_event_t{
- typ: yaml_DOCUMENT_END_EVENT,
- implicit: implicit,
- }
- return true
-}
-
-///*
-// * Create ALIAS.
-// */
-//
-//YAML_DECLARE(int)
-//yaml_alias_event_initialize(event *yaml_event_t, anchor *yaml_char_t)
-//{
-// mark yaml_mark_t = { 0, 0, 0 }
-// anchor_copy *yaml_char_t = NULL
-//
-// assert(event) // Non-NULL event object is expected.
-// assert(anchor) // Non-NULL anchor is expected.
-//
-// if (!yaml_check_utf8(anchor, strlen((char *)anchor))) return 0
-//
-// anchor_copy = yaml_strdup(anchor)
-// if (!anchor_copy)
-// return 0
-//
-// ALIAS_EVENT_INIT(*event, anchor_copy, mark, mark)
-//
-// return 1
-//}
-
-// Create SCALAR.
-func yaml_scalar_event_initialize(event *yaml_event_t, anchor, tag, value []byte, plain_implicit, quoted_implicit bool, style yaml_scalar_style_t) bool {
- *event = yaml_event_t{
- typ: yaml_SCALAR_EVENT,
- anchor: anchor,
- tag: tag,
- value: value,
- implicit: plain_implicit,
- quoted_implicit: quoted_implicit,
- style: yaml_style_t(style),
- }
- return true
-}
-
-// Create SEQUENCE-START.
-func yaml_sequence_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_sequence_style_t) bool {
- *event = yaml_event_t{
- typ: yaml_SEQUENCE_START_EVENT,
- anchor: anchor,
- tag: tag,
- implicit: implicit,
- style: yaml_style_t(style),
- }
- return true
-}
-
-// Create SEQUENCE-END.
-func yaml_sequence_end_event_initialize(event *yaml_event_t) bool {
- *event = yaml_event_t{
- typ: yaml_SEQUENCE_END_EVENT,
- }
- return true
-}
-
-// Create MAPPING-START.
-func yaml_mapping_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_mapping_style_t) bool {
- *event = yaml_event_t{
- typ: yaml_MAPPING_START_EVENT,
- anchor: anchor,
- tag: tag,
- implicit: implicit,
- style: yaml_style_t(style),
- }
- return true
-}
-
-// Create MAPPING-END.
-func yaml_mapping_end_event_initialize(event *yaml_event_t) bool {
- *event = yaml_event_t{
- typ: yaml_MAPPING_END_EVENT,
- }
- return true
-}
-
-// Destroy an event object.
-func yaml_event_delete(event *yaml_event_t) {
- *event = yaml_event_t{}
-}
-
-///*
-// * Create a document object.
-// */
-//
-//YAML_DECLARE(int)
-//yaml_document_initialize(document *yaml_document_t,
-// version_directive *yaml_version_directive_t,
-// tag_directives_start *yaml_tag_directive_t,
-// tag_directives_end *yaml_tag_directive_t,
-// start_implicit int, end_implicit int)
-//{
-// struct {
-// error yaml_error_type_t
-// } context
-// struct {
-// start *yaml_node_t
-// end *yaml_node_t
-// top *yaml_node_t
-// } nodes = { NULL, NULL, NULL }
-// version_directive_copy *yaml_version_directive_t = NULL
-// struct {
-// start *yaml_tag_directive_t
-// end *yaml_tag_directive_t
-// top *yaml_tag_directive_t
-// } tag_directives_copy = { NULL, NULL, NULL }
-// value yaml_tag_directive_t = { NULL, NULL }
-// mark yaml_mark_t = { 0, 0, 0 }
-//
-// assert(document) // Non-NULL document object is expected.
-// assert((tag_directives_start && tag_directives_end) ||
-// (tag_directives_start == tag_directives_end))
-// // Valid tag directives are expected.
-//
-// if (!STACK_INIT(&context, nodes, INITIAL_STACK_SIZE)) goto error
-//
-// if (version_directive) {
-// version_directive_copy = yaml_malloc(sizeof(yaml_version_directive_t))
-// if (!version_directive_copy) goto error
-// version_directive_copy.major = version_directive.major
-// version_directive_copy.minor = version_directive.minor
-// }
-//
-// if (tag_directives_start != tag_directives_end) {
-// tag_directive *yaml_tag_directive_t
-// if (!STACK_INIT(&context, tag_directives_copy, INITIAL_STACK_SIZE))
-// goto error
-// for (tag_directive = tag_directives_start
-// tag_directive != tag_directives_end; tag_directive ++) {
-// assert(tag_directive.handle)
-// assert(tag_directive.prefix)
-// if (!yaml_check_utf8(tag_directive.handle,
-// strlen((char *)tag_directive.handle)))
-// goto error
-// if (!yaml_check_utf8(tag_directive.prefix,
-// strlen((char *)tag_directive.prefix)))
-// goto error
-// value.handle = yaml_strdup(tag_directive.handle)
-// value.prefix = yaml_strdup(tag_directive.prefix)
-// if (!value.handle || !value.prefix) goto error
-// if (!PUSH(&context, tag_directives_copy, value))
-// goto error
-// value.handle = NULL
-// value.prefix = NULL
-// }
-// }
-//
-// DOCUMENT_INIT(*document, nodes.start, nodes.end, version_directive_copy,
-// tag_directives_copy.start, tag_directives_copy.top,
-// start_implicit, end_implicit, mark, mark)
-//
-// return 1
-//
-//error:
-// STACK_DEL(&context, nodes)
-// yaml_free(version_directive_copy)
-// while (!STACK_EMPTY(&context, tag_directives_copy)) {
-// value yaml_tag_directive_t = POP(&context, tag_directives_copy)
-// yaml_free(value.handle)
-// yaml_free(value.prefix)
-// }
-// STACK_DEL(&context, tag_directives_copy)
-// yaml_free(value.handle)
-// yaml_free(value.prefix)
-//
-// return 0
-//}
-//
-///*
-// * Destroy a document object.
-// */
-//
-//YAML_DECLARE(void)
-//yaml_document_delete(document *yaml_document_t)
-//{
-// struct {
-// error yaml_error_type_t
-// } context
-// tag_directive *yaml_tag_directive_t
-//
-// context.error = YAML_NO_ERROR // Eliminate a compliler warning.
-//
-// assert(document) // Non-NULL document object is expected.
-//
-// while (!STACK_EMPTY(&context, document.nodes)) {
-// node yaml_node_t = POP(&context, document.nodes)
-// yaml_free(node.tag)
-// switch (node.type) {
-// case YAML_SCALAR_NODE:
-// yaml_free(node.data.scalar.value)
-// break
-// case YAML_SEQUENCE_NODE:
-// STACK_DEL(&context, node.data.sequence.items)
-// break
-// case YAML_MAPPING_NODE:
-// STACK_DEL(&context, node.data.mapping.pairs)
-// break
-// default:
-// assert(0) // Should not happen.
-// }
-// }
-// STACK_DEL(&context, document.nodes)
-//
-// yaml_free(document.version_directive)
-// for (tag_directive = document.tag_directives.start
-// tag_directive != document.tag_directives.end
-// tag_directive++) {
-// yaml_free(tag_directive.handle)
-// yaml_free(tag_directive.prefix)
-// }
-// yaml_free(document.tag_directives.start)
-//
-// memset(document, 0, sizeof(yaml_document_t))
-//}
-//
-///**
-// * Get a document node.
-// */
-//
-//YAML_DECLARE(yaml_node_t *)
-//yaml_document_get_node(document *yaml_document_t, index int)
-//{
-// assert(document) // Non-NULL document object is expected.
-//
-// if (index > 0 && document.nodes.start + index <= document.nodes.top) {
-// return document.nodes.start + index - 1
-// }
-// return NULL
-//}
-//
-///**
-// * Get the root object.
-// */
-//
-//YAML_DECLARE(yaml_node_t *)
-//yaml_document_get_root_node(document *yaml_document_t)
-//{
-// assert(document) // Non-NULL document object is expected.
-//
-// if (document.nodes.top != document.nodes.start) {
-// return document.nodes.start
-// }
-// return NULL
-//}
-//
-///*
-// * Add a scalar node to a document.
-// */
-//
-//YAML_DECLARE(int)
-//yaml_document_add_scalar(document *yaml_document_t,
-// tag *yaml_char_t, value *yaml_char_t, length int,
-// style yaml_scalar_style_t)
-//{
-// struct {
-// error yaml_error_type_t
-// } context
-// mark yaml_mark_t = { 0, 0, 0 }
-// tag_copy *yaml_char_t = NULL
-// value_copy *yaml_char_t = NULL
-// node yaml_node_t
-//
-// assert(document) // Non-NULL document object is expected.
-// assert(value) // Non-NULL value is expected.
-//
-// if (!tag) {
-// tag = (yaml_char_t *)YAML_DEFAULT_SCALAR_TAG
-// }
-//
-// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error
-// tag_copy = yaml_strdup(tag)
-// if (!tag_copy) goto error
-//
-// if (length < 0) {
-// length = strlen((char *)value)
-// }
-//
-// if (!yaml_check_utf8(value, length)) goto error
-// value_copy = yaml_malloc(length+1)
-// if (!value_copy) goto error
-// memcpy(value_copy, value, length)
-// value_copy[length] = '\0'
-//
-// SCALAR_NODE_INIT(node, tag_copy, value_copy, length, style, mark, mark)
-// if (!PUSH(&context, document.nodes, node)) goto error
-//
-// return document.nodes.top - document.nodes.start
-//
-//error:
-// yaml_free(tag_copy)
-// yaml_free(value_copy)
-//
-// return 0
-//}
-//
-///*
-// * Add a sequence node to a document.
-// */
-//
-//YAML_DECLARE(int)
-//yaml_document_add_sequence(document *yaml_document_t,
-// tag *yaml_char_t, style yaml_sequence_style_t)
-//{
-// struct {
-// error yaml_error_type_t
-// } context
-// mark yaml_mark_t = { 0, 0, 0 }
-// tag_copy *yaml_char_t = NULL
-// struct {
-// start *yaml_node_item_t
-// end *yaml_node_item_t
-// top *yaml_node_item_t
-// } items = { NULL, NULL, NULL }
-// node yaml_node_t
-//
-// assert(document) // Non-NULL document object is expected.
-//
-// if (!tag) {
-// tag = (yaml_char_t *)YAML_DEFAULT_SEQUENCE_TAG
-// }
-//
-// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error
-// tag_copy = yaml_strdup(tag)
-// if (!tag_copy) goto error
-//
-// if (!STACK_INIT(&context, items, INITIAL_STACK_SIZE)) goto error
-//
-// SEQUENCE_NODE_INIT(node, tag_copy, items.start, items.end,
-// style, mark, mark)
-// if (!PUSH(&context, document.nodes, node)) goto error
-//
-// return document.nodes.top - document.nodes.start
-//
-//error:
-// STACK_DEL(&context, items)
-// yaml_free(tag_copy)
-//
-// return 0
-//}
-//
-///*
-// * Add a mapping node to a document.
-// */
-//
-//YAML_DECLARE(int)
-//yaml_document_add_mapping(document *yaml_document_t,
-// tag *yaml_char_t, style yaml_mapping_style_t)
-//{
-// struct {
-// error yaml_error_type_t
-// } context
-// mark yaml_mark_t = { 0, 0, 0 }
-// tag_copy *yaml_char_t = NULL
-// struct {
-// start *yaml_node_pair_t
-// end *yaml_node_pair_t
-// top *yaml_node_pair_t
-// } pairs = { NULL, NULL, NULL }
-// node yaml_node_t
-//
-// assert(document) // Non-NULL document object is expected.
-//
-// if (!tag) {
-// tag = (yaml_char_t *)YAML_DEFAULT_MAPPING_TAG
-// }
-//
-// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error
-// tag_copy = yaml_strdup(tag)
-// if (!tag_copy) goto error
-//
-// if (!STACK_INIT(&context, pairs, INITIAL_STACK_SIZE)) goto error
-//
-// MAPPING_NODE_INIT(node, tag_copy, pairs.start, pairs.end,
-// style, mark, mark)
-// if (!PUSH(&context, document.nodes, node)) goto error
-//
-// return document.nodes.top - document.nodes.start
-//
-//error:
-// STACK_DEL(&context, pairs)
-// yaml_free(tag_copy)
-//
-// return 0
-//}
-//
-///*
-// * Append an item to a sequence node.
-// */
-//
-//YAML_DECLARE(int)
-//yaml_document_append_sequence_item(document *yaml_document_t,
-// sequence int, item int)
-//{
-// struct {
-// error yaml_error_type_t
-// } context
-//
-// assert(document) // Non-NULL document is required.
-// assert(sequence > 0
-// && document.nodes.start + sequence <= document.nodes.top)
-// // Valid sequence id is required.
-// assert(document.nodes.start[sequence-1].type == YAML_SEQUENCE_NODE)
-// // A sequence node is required.
-// assert(item > 0 && document.nodes.start + item <= document.nodes.top)
-// // Valid item id is required.
-//
-// if (!PUSH(&context,
-// document.nodes.start[sequence-1].data.sequence.items, item))
-// return 0
-//
-// return 1
-//}
-//
-///*
-// * Append a pair of a key and a value to a mapping node.
-// */
-//
-//YAML_DECLARE(int)
-//yaml_document_append_mapping_pair(document *yaml_document_t,
-// mapping int, key int, value int)
-//{
-// struct {
-// error yaml_error_type_t
-// } context
-//
-// pair yaml_node_pair_t
-//
-// assert(document) // Non-NULL document is required.
-// assert(mapping > 0
-// && document.nodes.start + mapping <= document.nodes.top)
-// // Valid mapping id is required.
-// assert(document.nodes.start[mapping-1].type == YAML_MAPPING_NODE)
-// // A mapping node is required.
-// assert(key > 0 && document.nodes.start + key <= document.nodes.top)
-// // Valid key id is required.
-// assert(value > 0 && document.nodes.start + value <= document.nodes.top)
-// // Valid value id is required.
-//
-// pair.key = key
-// pair.value = value
-//
-// if (!PUSH(&context,
-// document.nodes.start[mapping-1].data.mapping.pairs, pair))
-// return 0
-//
-// return 1
-//}
-//
-//
diff --git a/Godeps/_workspace/src/github.com/coreos/yaml/decode.go b/Godeps/_workspace/src/github.com/coreos/yaml/decode.go
deleted file mode 100644
index e219d4b..0000000
--- a/Godeps/_workspace/src/github.com/coreos/yaml/decode.go
+++ /dev/null
@@ -1,571 +0,0 @@
-package yaml
-
-import (
- "encoding/base64"
- "fmt"
- "reflect"
- "strconv"
- "time"
-)
-
-const (
- documentNode = 1 << iota
- mappingNode
- sequenceNode
- scalarNode
- aliasNode
-)
-
-type node struct {
- kind int
- line, column int
- tag string
- value string
- implicit bool
- children []*node
- anchors map[string]*node
-}
-
-// ----------------------------------------------------------------------------
-// Parser, produces a node tree out of a libyaml event stream.
-
-type parser struct {
- parser yaml_parser_t
- event yaml_event_t
- doc *node
- transform transformString
-}
-
-func newParser(b []byte, t transformString) *parser {
- p := parser{transform: t}
-
- if !yaml_parser_initialize(&p.parser) {
- panic("Failed to initialize YAML emitter")
- }
-
- if len(b) == 0 {
- b = []byte{'\n'}
- }
-
- yaml_parser_set_input_string(&p.parser, b)
-
- p.skip()
- if p.event.typ != yaml_STREAM_START_EVENT {
- panic("Expected stream start event, got " + strconv.Itoa(int(p.event.typ)))
- }
- p.skip()
- return &p
-}
-
-func (p *parser) destroy() {
- if p.event.typ != yaml_NO_EVENT {
- yaml_event_delete(&p.event)
- }
- yaml_parser_delete(&p.parser)
-}
-
-func (p *parser) skip() {
- if p.event.typ != yaml_NO_EVENT {
- if p.event.typ == yaml_STREAM_END_EVENT {
- fail("Attempted to go past the end of stream. Corrupted value?")
- }
- yaml_event_delete(&p.event)
- }
- if !yaml_parser_parse(&p.parser, &p.event) {
- p.fail()
- }
-}
-
-func (p *parser) fail() {
- var where string
- var line int
- if p.parser.problem_mark.line != 0 {
- line = p.parser.problem_mark.line
- } else if p.parser.context_mark.line != 0 {
- line = p.parser.context_mark.line
- }
- if line != 0 {
- where = "line " + strconv.Itoa(line) + ": "
- }
- var msg string
- if len(p.parser.problem) > 0 {
- msg = p.parser.problem
- } else {
- msg = "Unknown problem parsing YAML content"
- }
- fail(where + msg)
-}
-
-func (p *parser) anchor(n *node, anchor []byte) {
- if anchor != nil {
- p.doc.anchors[string(anchor)] = n
- }
-}
-
-func (p *parser) parse() *node {
- switch p.event.typ {
- case yaml_SCALAR_EVENT:
- return p.scalar()
- case yaml_ALIAS_EVENT:
- return p.alias()
- case yaml_MAPPING_START_EVENT:
- return p.mapping()
- case yaml_SEQUENCE_START_EVENT:
- return p.sequence()
- case yaml_DOCUMENT_START_EVENT:
- return p.document()
- case yaml_STREAM_END_EVENT:
- // Happens when attempting to decode an empty buffer.
- return nil
- default:
- panic("Attempted to parse unknown event: " + strconv.Itoa(int(p.event.typ)))
- }
- panic("unreachable")
-}
-
-func (p *parser) node(kind int) *node {
- return &node{
- kind: kind,
- line: p.event.start_mark.line,
- column: p.event.start_mark.column,
- }
-}
-
-func (p *parser) document() *node {
- n := p.node(documentNode)
- n.anchors = make(map[string]*node)
- p.doc = n
- p.skip()
- n.children = append(n.children, p.parse())
- if p.event.typ != yaml_DOCUMENT_END_EVENT {
- panic("Expected end of document event but got " + strconv.Itoa(int(p.event.typ)))
- }
- p.skip()
- return n
-}
-
-func (p *parser) alias() *node {
- n := p.node(aliasNode)
- n.value = string(p.event.anchor)
- p.skip()
- return n
-}
-
-func (p *parser) scalar() *node {
- n := p.node(scalarNode)
- n.value = string(p.event.value)
- n.tag = string(p.event.tag)
- n.implicit = p.event.implicit
- p.anchor(n, p.event.anchor)
- p.skip()
- return n
-}
-
-func (p *parser) sequence() *node {
- n := p.node(sequenceNode)
- p.anchor(n, p.event.anchor)
- p.skip()
- for p.event.typ != yaml_SEQUENCE_END_EVENT {
- n.children = append(n.children, p.parse())
- }
- p.skip()
- return n
-}
-
-func (p *parser) mapping() *node {
- n := p.node(mappingNode)
- p.anchor(n, p.event.anchor)
- p.skip()
- for p.event.typ != yaml_MAPPING_END_EVENT {
- key := p.parse()
- key.value = p.transform(key.value)
- value := p.parse()
- n.children = append(n.children, key, value)
- }
- p.skip()
- return n
-}
-
-// ----------------------------------------------------------------------------
-// Decoder, unmarshals a node into a provided value.
-
-type decoder struct {
- doc *node
- aliases map[string]bool
-}
-
-func newDecoder() *decoder {
- d := &decoder{}
- d.aliases = make(map[string]bool)
- return d
-}
-
-// d.setter deals with setters and pointer dereferencing and initialization.
-//
-// It's a slightly convoluted case to handle properly:
-//
-// - nil pointers should be initialized, unless being set to nil
-// - we don't know at this point yet what's the value to SetYAML() with.
-// - we can't separate pointer deref/init and setter checking, because
-// a setter may be found while going down a pointer chain.
-//
-// Thus, here is how it takes care of it:
-//
-// - out is provided as a pointer, so that it can be replaced.
-// - when looking at a non-setter ptr, *out=ptr.Elem(), unless tag=!!null
-// - when a setter is found, *out=interface{}, and a set() function is
-// returned to call SetYAML() with the value of *out once it's defined.
-//
-func (d *decoder) setter(tag string, out *reflect.Value, good *bool) (set func()) {
- if (*out).Kind() != reflect.Ptr && (*out).CanAddr() {
- setter, _ := (*out).Addr().Interface().(Setter)
- if setter != nil {
- var arg interface{}
- *out = reflect.ValueOf(&arg).Elem()
- return func() {
- *good = setter.SetYAML(shortTag(tag), arg)
- }
- }
- }
- again := true
- for again {
- again = false
- setter, _ := (*out).Interface().(Setter)
- if tag != yaml_NULL_TAG || setter != nil {
- if pv := (*out); pv.Kind() == reflect.Ptr {
- if pv.IsNil() {
- *out = reflect.New(pv.Type().Elem()).Elem()
- pv.Set((*out).Addr())
- } else {
- *out = pv.Elem()
- }
- setter, _ = pv.Interface().(Setter)
- again = true
- }
- }
- if setter != nil {
- var arg interface{}
- *out = reflect.ValueOf(&arg).Elem()
- return func() {
- *good = setter.SetYAML(shortTag(tag), arg)
- }
- }
- }
- return nil
-}
-
-func (d *decoder) unmarshal(n *node, out reflect.Value) (good bool) {
- switch n.kind {
- case documentNode:
- good = d.document(n, out)
- case scalarNode:
- good = d.scalar(n, out)
- case aliasNode:
- good = d.alias(n, out)
- case mappingNode:
- good = d.mapping(n, out)
- case sequenceNode:
- good = d.sequence(n, out)
- default:
- panic("Internal error: unknown node kind: " + strconv.Itoa(n.kind))
- }
- return
-}
-
-func (d *decoder) document(n *node, out reflect.Value) (good bool) {
- if len(n.children) == 1 {
- d.doc = n
- d.unmarshal(n.children[0], out)
- return true
- }
- return false
-}
-
-func (d *decoder) alias(n *node, out reflect.Value) (good bool) {
- an, ok := d.doc.anchors[n.value]
- if !ok {
- fail("Unknown anchor '" + n.value + "' referenced")
- }
- if d.aliases[n.value] {
- fail("Anchor '" + n.value + "' value contains itself")
- }
- d.aliases[n.value] = true
- good = d.unmarshal(an, out)
- delete(d.aliases, n.value)
- return good
-}
-
-var zeroValue reflect.Value
-
-func resetMap(out reflect.Value) {
- for _, k := range out.MapKeys() {
- out.SetMapIndex(k, zeroValue)
- }
-}
-
-var durationType = reflect.TypeOf(time.Duration(0))
-
-func (d *decoder) scalar(n *node, out reflect.Value) (good bool) {
- var tag string
- var resolved interface{}
- if n.tag == "" && !n.implicit {
- tag = yaml_STR_TAG
- resolved = n.value
- } else {
- tag, resolved = resolve(n.tag, n.value)
- if tag == yaml_BINARY_TAG {
- data, err := base64.StdEncoding.DecodeString(resolved.(string))
- if err != nil {
- fail("!!binary value contains invalid base64 data")
- }
- resolved = string(data)
- }
- }
- if set := d.setter(tag, &out, &good); set != nil {
- defer set()
- }
- if resolved == nil {
- if out.Kind() == reflect.Map && !out.CanAddr() {
- resetMap(out)
- } else {
- out.Set(reflect.Zero(out.Type()))
- }
- good = true
- return
- }
- switch out.Kind() {
- case reflect.String:
- if tag == yaml_BINARY_TAG {
- out.SetString(resolved.(string))
- good = true
- } else if resolved != nil {
- out.SetString(n.value)
- good = true
- }
- case reflect.Interface:
- if resolved == nil {
- out.Set(reflect.Zero(out.Type()))
- } else {
- out.Set(reflect.ValueOf(resolved))
- }
- good = true
- case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
- switch resolved := resolved.(type) {
- case int:
- if !out.OverflowInt(int64(resolved)) {
- out.SetInt(int64(resolved))
- good = true
- }
- case int64:
- if !out.OverflowInt(resolved) {
- out.SetInt(resolved)
- good = true
- }
- case float64:
- if resolved < 1<<63-1 && !out.OverflowInt(int64(resolved)) {
- out.SetInt(int64(resolved))
- good = true
- }
- case string:
- if out.Type() == durationType {
- d, err := time.ParseDuration(resolved)
- if err == nil {
- out.SetInt(int64(d))
- good = true
- }
- }
- }
- case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
- switch resolved := resolved.(type) {
- case int:
- if resolved >= 0 {
- out.SetUint(uint64(resolved))
- good = true
- }
- case int64:
- if resolved >= 0 {
- out.SetUint(uint64(resolved))
- good = true
- }
- case float64:
- if resolved < 1<<64-1 && !out.OverflowUint(uint64(resolved)) {
- out.SetUint(uint64(resolved))
- good = true
- }
- }
- case reflect.Bool:
- switch resolved := resolved.(type) {
- case bool:
- out.SetBool(resolved)
- good = true
- }
- case reflect.Float32, reflect.Float64:
- switch resolved := resolved.(type) {
- case int:
- out.SetFloat(float64(resolved))
- good = true
- case int64:
- out.SetFloat(float64(resolved))
- good = true
- case float64:
- out.SetFloat(resolved)
- good = true
- }
- case reflect.Ptr:
- if out.Type().Elem() == reflect.TypeOf(resolved) {
- elem := reflect.New(out.Type().Elem())
- elem.Elem().Set(reflect.ValueOf(resolved))
- out.Set(elem)
- good = true
- }
- }
- return good
-}
-
-func settableValueOf(i interface{}) reflect.Value {
- v := reflect.ValueOf(i)
- sv := reflect.New(v.Type()).Elem()
- sv.Set(v)
- return sv
-}
-
-func (d *decoder) sequence(n *node, out reflect.Value) (good bool) {
- if set := d.setter(yaml_SEQ_TAG, &out, &good); set != nil {
- defer set()
- }
- var iface reflect.Value
- if out.Kind() == reflect.Interface {
- // No type hints. Will have to use a generic sequence.
- iface = out
- out = settableValueOf(make([]interface{}, 0))
- }
-
- if out.Kind() != reflect.Slice {
- return false
- }
- et := out.Type().Elem()
-
- l := len(n.children)
- for i := 0; i < l; i++ {
- e := reflect.New(et).Elem()
- if ok := d.unmarshal(n.children[i], e); ok {
- out.Set(reflect.Append(out, e))
- }
- }
- if iface.IsValid() {
- iface.Set(out)
- }
- return true
-}
-
-func (d *decoder) mapping(n *node, out reflect.Value) (good bool) {
- if set := d.setter(yaml_MAP_TAG, &out, &good); set != nil {
- defer set()
- }
- if out.Kind() == reflect.Struct {
- return d.mappingStruct(n, out)
- }
-
- if out.Kind() == reflect.Interface {
- // No type hints. Will have to use a generic map.
- iface := out
- out = settableValueOf(make(map[interface{}]interface{}))
- iface.Set(out)
- }
-
- if out.Kind() != reflect.Map {
- return false
- }
- outt := out.Type()
- kt := outt.Key()
- et := outt.Elem()
-
- if out.IsNil() {
- out.Set(reflect.MakeMap(outt))
- }
- l := len(n.children)
- for i := 0; i < l; i += 2 {
- if isMerge(n.children[i]) {
- d.merge(n.children[i+1], out)
- continue
- }
- k := reflect.New(kt).Elem()
- if d.unmarshal(n.children[i], k) {
- kkind := k.Kind()
- if kkind == reflect.Interface {
- kkind = k.Elem().Kind()
- }
- if kkind == reflect.Map || kkind == reflect.Slice {
- fail(fmt.Sprintf("invalid map key: %#v", k.Interface()))
- }
- e := reflect.New(et).Elem()
- if d.unmarshal(n.children[i+1], e) {
- out.SetMapIndex(k, e)
- }
- }
- }
- return true
-}
-
-func (d *decoder) mappingStruct(n *node, out reflect.Value) (good bool) {
- sinfo, err := getStructInfo(out.Type())
- if err != nil {
- panic(err)
- }
- name := settableValueOf("")
- l := len(n.children)
- for i := 0; i < l; i += 2 {
- ni := n.children[i]
- if isMerge(ni) {
- d.merge(n.children[i+1], out)
- continue
- }
- if !d.unmarshal(ni, name) {
- continue
- }
- if info, ok := sinfo.FieldsMap[name.String()]; ok {
- var field reflect.Value
- if info.Inline == nil {
- field = out.Field(info.Num)
- } else {
- field = out.FieldByIndex(info.Inline)
- }
- d.unmarshal(n.children[i+1], field)
- }
- }
- return true
-}
-
-func (d *decoder) merge(n *node, out reflect.Value) {
- const wantMap = "map merge requires map or sequence of maps as the value"
- switch n.kind {
- case mappingNode:
- d.unmarshal(n, out)
- case aliasNode:
- an, ok := d.doc.anchors[n.value]
- if ok && an.kind != mappingNode {
- fail(wantMap)
- }
- d.unmarshal(n, out)
- case sequenceNode:
- // Step backwards as earlier nodes take precedence.
- for i := len(n.children) - 1; i >= 0; i-- {
- ni := n.children[i]
- if ni.kind == aliasNode {
- an, ok := d.doc.anchors[ni.value]
- if ok && an.kind != mappingNode {
- fail(wantMap)
- }
- } else if ni.kind != mappingNode {
- fail(wantMap)
- }
- d.unmarshal(ni, out)
- }
- default:
- fail(wantMap)
- }
-}
-
-func isMerge(n *node) bool {
- return n.kind == scalarNode && n.value == "<<" && (n.implicit == true || n.tag == yaml_MERGE_TAG)
-}
diff --git a/Godeps/_workspace/src/github.com/coreos/yaml/decode_test.go b/Godeps/_workspace/src/github.com/coreos/yaml/decode_test.go
deleted file mode 100644
index 349bee7..0000000
--- a/Godeps/_workspace/src/github.com/coreos/yaml/decode_test.go
+++ /dev/null
@@ -1,720 +0,0 @@
-package yaml_test
-
-import (
- "github.com/coreos/yaml"
- . "gopkg.in/check.v1"
- "math"
- "reflect"
- "strings"
- "time"
-)
-
-var unmarshalIntTest = 123
-
-var unmarshalTests = []struct {
- data string
- value interface{}
-}{
- {
- "",
- &struct{}{},
- }, {
- "{}", &struct{}{},
- }, {
- "v: hi",
- map[string]string{"v": "hi"},
- }, {
- "v: hi", map[string]interface{}{"v": "hi"},
- }, {
- "v: true",
- map[string]string{"v": "true"},
- }, {
- "v: true",
- map[string]interface{}{"v": true},
- }, {
- "v: 10",
- map[string]interface{}{"v": 10},
- }, {
- "v: 0b10",
- map[string]interface{}{"v": 2},
- }, {
- "v: 0xA",
- map[string]interface{}{"v": 10},
- }, {
- "v: 4294967296",
- map[string]int64{"v": 4294967296},
- }, {
- "v: 0.1",
- map[string]interface{}{"v": 0.1},
- }, {
- "v: .1",
- map[string]interface{}{"v": 0.1},
- }, {
- "v: .Inf",
- map[string]interface{}{"v": math.Inf(+1)},
- }, {
- "v: -.Inf",
- map[string]interface{}{"v": math.Inf(-1)},
- }, {
- "v: -10",
- map[string]interface{}{"v": -10},
- }, {
- "v: -.1",
- map[string]interface{}{"v": -0.1},
- },
-
- // Simple values.
- {
- "123",
- &unmarshalIntTest,
- },
-
- // Floats from spec
- {
- "canonical: 6.8523e+5",
- map[string]interface{}{"canonical": 6.8523e+5},
- }, {
- "expo: 685.230_15e+03",
- map[string]interface{}{"expo": 685.23015e+03},
- }, {
- "fixed: 685_230.15",
- map[string]interface{}{"fixed": 685230.15},
- }, {
- "neginf: -.inf",
- map[string]interface{}{"neginf": math.Inf(-1)},
- }, {
- "fixed: 685_230.15",
- map[string]float64{"fixed": 685230.15},
- },
- //{"sexa: 190:20:30.15", map[string]interface{}{"sexa": 0}}, // Unsupported
- //{"notanum: .NaN", map[string]interface{}{"notanum": math.NaN()}}, // Equality of NaN fails.
-
- // Bools from spec
- {
- "canonical: y",
- map[string]interface{}{"canonical": true},
- }, {
- "answer: NO",
- map[string]interface{}{"answer": false},
- }, {
- "logical: True",
- map[string]interface{}{"logical": true},
- }, {
- "option: on",
- map[string]interface{}{"option": true},
- }, {
- "option: on",
- map[string]bool{"option": true},
- },
- // Ints from spec
- {
- "canonical: 685230",
- map[string]interface{}{"canonical": 685230},
- }, {
- "decimal: +685_230",
- map[string]interface{}{"decimal": 685230},
- }, {
- "octal: 02472256",
- map[string]interface{}{"octal": 685230},
- }, {
- "hexa: 0x_0A_74_AE",
- map[string]interface{}{"hexa": 685230},
- }, {
- "bin: 0b1010_0111_0100_1010_1110",
- map[string]interface{}{"bin": 685230},
- }, {
- "bin: -0b101010",
- map[string]interface{}{"bin": -42},
- }, {
- "decimal: +685_230",
- map[string]int{"decimal": 685230},
- },
-
- //{"sexa: 190:20:30", map[string]interface{}{"sexa": 0}}, // Unsupported
-
- // Nulls from spec
- {
- "empty:",
- map[string]interface{}{"empty": nil},
- }, {
- "canonical: ~",
- map[string]interface{}{"canonical": nil},
- }, {
- "english: null",
- map[string]interface{}{"english": nil},
- }, {
- "~: null key",
- map[interface{}]string{nil: "null key"},
- }, {
- "empty:",
- map[string]*bool{"empty": nil},
- },
-
- // Flow sequence
- {
- "seq: [A,B]",
- map[string]interface{}{"seq": []interface{}{"A", "B"}},
- }, {
- "seq: [A,B,C,]",
- map[string][]string{"seq": []string{"A", "B", "C"}},
- }, {
- "seq: [A,1,C]",
- map[string][]string{"seq": []string{"A", "1", "C"}},
- }, {
- "seq: [A,1,C]",
- map[string][]int{"seq": []int{1}},
- }, {
- "seq: [A,1,C]",
- map[string]interface{}{"seq": []interface{}{"A", 1, "C"}},
- },
- // Block sequence
- {
- "seq:\n - A\n - B",
- map[string]interface{}{"seq": []interface{}{"A", "B"}},
- }, {
- "seq:\n - A\n - B\n - C",
- map[string][]string{"seq": []string{"A", "B", "C"}},
- }, {
- "seq:\n - A\n - 1\n - C",
- map[string][]string{"seq": []string{"A", "1", "C"}},
- }, {
- "seq:\n - A\n - 1\n - C",
- map[string][]int{"seq": []int{1}},
- }, {
- "seq:\n - A\n - 1\n - C",
- map[string]interface{}{"seq": []interface{}{"A", 1, "C"}},
- },
-
- // Literal block scalar
- {
- "scalar: | # Comment\n\n literal\n\n \ttext\n\n",
- map[string]string{"scalar": "\nliteral\n\n\ttext\n"},
- },
-
- // Folded block scalar
- {
- "scalar: > # Comment\n\n folded\n line\n \n next\n line\n * one\n * two\n\n last\n line\n\n",
- map[string]string{"scalar": "\nfolded line\nnext line\n * one\n * two\n\nlast line\n"},
- },
-
- // Map inside interface with no type hints.
- {
- "a: {b: c}",
- map[string]interface{}{"a": map[interface{}]interface{}{"b": "c"}},
- },
-
- // Structs and type conversions.
- {
- "hello: world",
- &struct{ Hello string }{"world"},
- }, {
- "a: {b: c}",
- &struct{ A struct{ B string } }{struct{ B string }{"c"}},
- }, {
- "a: {b: c}",
- &struct{ A *struct{ B string } }{&struct{ B string }{"c"}},
- }, {
- "a: {b: c}",
- &struct{ A map[string]string }{map[string]string{"b": "c"}},
- }, {
- "a: {b: c}",
- &struct{ A *map[string]string }{&map[string]string{"b": "c"}},
- }, {
- "a:",
- &struct{ A map[string]string }{},
- }, {
- "a: 1",
- &struct{ A int }{1},
- }, {
- "a: 1",
- &struct{ A float64 }{1},
- }, {
- "a: 1.0",
- &struct{ A int }{1},
- }, {
- "a: 1.0",
- &struct{ A uint }{1},
- }, {
- "a: [1, 2]",
- &struct{ A []int }{[]int{1, 2}},
- }, {
- "a: 1",
- &struct{ B int }{0},
- }, {
- "a: 1",
- &struct {
- B int "a"
- }{1},
- }, {
- "a: y",
- &struct{ A bool }{true},
- },
-
- // Some cross type conversions
- {
- "v: 42",
- map[string]uint{"v": 42},
- }, {
- "v: -42",
- map[string]uint{},
- }, {
- "v: 4294967296",
- map[string]uint64{"v": 4294967296},
- }, {
- "v: -4294967296",
- map[string]uint64{},
- },
-
- // Overflow cases.
- {
- "v: 4294967297",
- map[string]int32{},
- }, {
- "v: 128",
- map[string]int8{},
- },
-
- // Quoted values.
- {
- "'1': '\"2\"'",
- map[interface{}]interface{}{"1": "\"2\""},
- }, {
- "v:\n- A\n- 'B\n\n C'\n",
- map[string][]string{"v": []string{"A", "B\nC"}},
- },
-
- // Explicit tags.
- {
- "v: !!float '1.1'",
- map[string]interface{}{"v": 1.1},
- }, {
- "v: !!null ''",
- map[string]interface{}{"v": nil},
- }, {
- "%TAG !y! tag:yaml.org,2002:\n---\nv: !y!int '1'",
- map[string]interface{}{"v": 1},
- },
-
- // Anchors and aliases.
- {
- "a: &x 1\nb: &y 2\nc: *x\nd: *y\n",
- &struct{ A, B, C, D int }{1, 2, 1, 2},
- }, {
- "a: &a {c: 1}\nb: *a",
- &struct {
- A, B struct {
- C int
- }
- }{struct{ C int }{1}, struct{ C int }{1}},
- }, {
- "a: &a [1, 2]\nb: *a",
- &struct{ B []int }{[]int{1, 2}},
- },
-
- // Bug #1133337
- {
- "foo: ''",
- map[string]*string{"foo": new(string)},
- }, {
- "foo: null",
- map[string]string{"foo": ""},
- }, {
- "foo: null",
- map[string]interface{}{"foo": nil},
- },
-
- // Ignored field
- {
- "a: 1\nb: 2\n",
- &struct {
- A int
- B int "-"
- }{1, 0},
- },
-
- // Bug #1191981
- {
- "" +
- "%YAML 1.1\n" +
- "--- !!str\n" +
- `"Generic line break (no glyph)\n\` + "\n" +
- ` Generic line break (glyphed)\n\` + "\n" +
- ` Line separator\u2028\` + "\n" +
- ` Paragraph separator\u2029"` + "\n",
- "" +
- "Generic line break (no glyph)\n" +
- "Generic line break (glyphed)\n" +
- "Line separator\u2028Paragraph separator\u2029",
- },
-
- // Struct inlining
- {
- "a: 1\nb: 2\nc: 3\n",
- &struct {
- A int
- C inlineB `yaml:",inline"`
- }{1, inlineB{2, inlineC{3}}},
- },
-
- // bug 1243827
- {
- "a: -b_c",
- map[string]interface{}{"a": "-b_c"},
- },
- {
- "a: +b_c",
- map[string]interface{}{"a": "+b_c"},
- },
- {
- "a: 50cent_of_dollar",
- map[string]interface{}{"a": "50cent_of_dollar"},
- },
-
- // Duration
- {
- "a: 3s",
- map[string]time.Duration{"a": 3 * time.Second},
- },
-
- // Issue #24.
- {
- "a: ",
- map[string]string{"a": ""},
- },
-
- // Base 60 floats are obsolete and unsupported.
- {
- "a: 1:1\n",
- map[string]string{"a": "1:1"},
- },
-
- // Binary data.
- {
- "a: !!binary gIGC\n",
- map[string]string{"a": "\x80\x81\x82"},
- }, {
- "a: !!binary |\n " + strings.Repeat("kJCQ", 17) + "kJ\n CQ\n",
- map[string]string{"a": strings.Repeat("\x90", 54)},
- }, {
- "a: !!binary |\n " + strings.Repeat("A", 70) + "\n ==\n",
- map[string]string{"a": strings.Repeat("\x00", 52)},
- },
-}
-
-type inlineB struct {
- B int
- inlineC `yaml:",inline"`
-}
-
-type inlineC struct {
- C int
-}
-
-func (s *S) TestUnmarshal(c *C) {
- for i, item := range unmarshalTests {
- t := reflect.ValueOf(item.value).Type()
- var value interface{}
- switch t.Kind() {
- case reflect.Map:
- value = reflect.MakeMap(t).Interface()
- case reflect.String:
- t := reflect.ValueOf(item.value).Type()
- v := reflect.New(t)
- value = v.Interface()
- default:
- pt := reflect.ValueOf(item.value).Type()
- pv := reflect.New(pt.Elem())
- value = pv.Interface()
- }
- err := yaml.Unmarshal([]byte(item.data), value)
- c.Assert(err, IsNil, Commentf("Item #%d", i))
- if t.Kind() == reflect.String {
- c.Assert(*value.(*string), Equals, item.value, Commentf("Item #%d", i))
- } else {
- c.Assert(value, DeepEquals, item.value, Commentf("Item #%d", i))
- }
- }
-}
-
-func (s *S) TestUnmarshalNaN(c *C) {
- value := map[string]interface{}{}
- err := yaml.Unmarshal([]byte("notanum: .NaN"), &value)
- c.Assert(err, IsNil)
- c.Assert(math.IsNaN(value["notanum"].(float64)), Equals, true)
-}
-
-var unmarshalErrorTests = []struct {
- data, error string
-}{
- {"v: !!float 'error'", "YAML error: cannot decode !!str `error` as a !!float"},
- {"v: [A,", "YAML error: line 1: did not find expected node content"},
- {"v:\n- [A,", "YAML error: line 2: did not find expected node content"},
- {"a: *b\n", "YAML error: Unknown anchor 'b' referenced"},
- {"a: &a\n b: *a\n", "YAML error: Anchor 'a' value contains itself"},
- {"value: -", "YAML error: block sequence entries are not allowed in this context"},
- {"a: !!binary ==", "YAML error: !!binary value contains invalid base64 data"},
- {"{[.]}", `YAML error: invalid map key: \[\]interface \{\}\{"\."\}`},
- {"{{.}}", `YAML error: invalid map key: map\[interface\ \{\}\]interface \{\}\{".":interface \{\}\(nil\)\}`},
-}
-
-func (s *S) TestUnmarshalErrors(c *C) {
- for _, item := range unmarshalErrorTests {
- var value interface{}
- err := yaml.Unmarshal([]byte(item.data), &value)
- c.Assert(err, ErrorMatches, item.error, Commentf("Partial unmarshal: %#v", value))
- }
-}
-
-var setterTests = []struct {
- data, tag string
- value interface{}
-}{
- {"_: {hi: there}", "!!map", map[interface{}]interface{}{"hi": "there"}},
- {"_: [1,A]", "!!seq", []interface{}{1, "A"}},
- {"_: 10", "!!int", 10},
- {"_: null", "!!null", nil},
- {`_: BAR!`, "!!str", "BAR!"},
- {`_: "BAR!"`, "!!str", "BAR!"},
- {"_: !!foo 'BAR!'", "!!foo", "BAR!"},
-}
-
-var setterResult = map[int]bool{}
-
-type typeWithSetter struct {
- tag string
- value interface{}
-}
-
-func (o *typeWithSetter) SetYAML(tag string, value interface{}) (ok bool) {
- o.tag = tag
- o.value = value
- if i, ok := value.(int); ok {
- if result, ok := setterResult[i]; ok {
- return result
- }
- }
- return true
-}
-
-type setterPointerType struct {
- Field *typeWithSetter "_"
-}
-
-type setterValueType struct {
- Field typeWithSetter "_"
-}
-
-func (s *S) TestUnmarshalWithPointerSetter(c *C) {
- for _, item := range setterTests {
- obj := &setterPointerType{}
- err := yaml.Unmarshal([]byte(item.data), obj)
- c.Assert(err, IsNil)
- c.Assert(obj.Field, NotNil, Commentf("Pointer not initialized (%#v)", item.value))
- c.Assert(obj.Field.tag, Equals, item.tag)
- c.Assert(obj.Field.value, DeepEquals, item.value)
- }
-}
-
-func (s *S) TestUnmarshalWithValueSetter(c *C) {
- for _, item := range setterTests {
- obj := &setterValueType{}
- err := yaml.Unmarshal([]byte(item.data), obj)
- c.Assert(err, IsNil)
- c.Assert(obj.Field, NotNil, Commentf("Pointer not initialized (%#v)", item.value))
- c.Assert(obj.Field.tag, Equals, item.tag)
- c.Assert(obj.Field.value, DeepEquals, item.value)
- }
-}
-
-func (s *S) TestUnmarshalWholeDocumentWithSetter(c *C) {
- obj := &typeWithSetter{}
- err := yaml.Unmarshal([]byte(setterTests[0].data), obj)
- c.Assert(err, IsNil)
- c.Assert(obj.tag, Equals, setterTests[0].tag)
- value, ok := obj.value.(map[interface{}]interface{})
- c.Assert(ok, Equals, true)
- c.Assert(value["_"], DeepEquals, setterTests[0].value)
-}
-
-func (s *S) TestUnmarshalWithFalseSetterIgnoresValue(c *C) {
- setterResult[2] = false
- setterResult[4] = false
- defer func() {
- delete(setterResult, 2)
- delete(setterResult, 4)
- }()
-
- m := map[string]*typeWithSetter{}
- data := `{abc: 1, def: 2, ghi: 3, jkl: 4}`
- err := yaml.Unmarshal([]byte(data), m)
- c.Assert(err, IsNil)
- c.Assert(m["abc"], NotNil)
- c.Assert(m["def"], IsNil)
- c.Assert(m["ghi"], NotNil)
- c.Assert(m["jkl"], IsNil)
-
- c.Assert(m["abc"].value, Equals, 1)
- c.Assert(m["ghi"].value, Equals, 3)
-}
-
-func (s *S) TestUnmarshalWithTransform(c *C) {
- data := `{a_b: 1, c-d: 2, e-f_g: 3, h_i-j: 4}`
- expect := map[string]int{
- "a_b": 1,
- "c_d": 2,
- "e_f_g": 3,
- "h_i_j": 4,
- }
- m := map[string]int{}
- yaml.UnmarshalMappingKeyTransform = func(i string) string {
- return strings.Replace(i, "-", "_", -1)
- }
- err := yaml.Unmarshal([]byte(data), m)
- c.Assert(err, IsNil)
- c.Assert(m, DeepEquals, expect)
-}
-
-// From http://yaml.org/type/merge.html
-var mergeTests = `
-anchors:
- - &CENTER { "x": 1, "y": 2 }
- - &LEFT { "x": 0, "y": 2 }
- - &BIG { "r": 10 }
- - &SMALL { "r": 1 }
-
-# All the following maps are equal:
-
-plain:
- # Explicit keys
- "x": 1
- "y": 2
- "r": 10
- label: center/big
-
-mergeOne:
- # Merge one map
- << : *CENTER
- "r": 10
- label: center/big
-
-mergeMultiple:
- # Merge multiple maps
- << : [ *CENTER, *BIG ]
- label: center/big
-
-override:
- # Override
- << : [ *BIG, *LEFT, *SMALL ]
- "x": 1
- label: center/big
-
-shortTag:
- # Explicit short merge tag
- !!merge "<<" : [ *CENTER, *BIG ]
- label: center/big
-
-longTag:
- # Explicit merge long tag
- ! "<<" : [ *CENTER, *BIG ]
- label: center/big
-
-inlineMap:
- # Inlined map
- << : {"x": 1, "y": 2, "r": 10}
- label: center/big
-
-inlineSequenceMap:
- # Inlined map in sequence
- << : [ *CENTER, {"r": 10} ]
- label: center/big
-`
-
-func (s *S) TestMerge(c *C) {
- var want = map[interface{}]interface{}{
- "x": 1,
- "y": 2,
- "r": 10,
- "label": "center/big",
- }
-
- var m map[string]interface{}
- err := yaml.Unmarshal([]byte(mergeTests), &m)
- c.Assert(err, IsNil)
- for name, test := range m {
- if name == "anchors" {
- continue
- }
- c.Assert(test, DeepEquals, want, Commentf("test %q failed", name))
- }
-}
-
-func (s *S) TestMergeStruct(c *C) {
- type Data struct {
- X, Y, R int
- Label string
- }
- want := Data{1, 2, 10, "center/big"}
-
- var m map[string]Data
- err := yaml.Unmarshal([]byte(mergeTests), &m)
- c.Assert(err, IsNil)
- for name, test := range m {
- if name == "anchors" {
- continue
- }
- c.Assert(test, Equals, want, Commentf("test %q failed", name))
- }
-}
-
-var unmarshalNullTests = []func() interface{}{
- func() interface{} { var v interface{}; v = "v"; return &v },
- func() interface{} { var s = "s"; return &s },
- func() interface{} { var s = "s"; sptr := &s; return &sptr },
- func() interface{} { var i = 1; return &i },
- func() interface{} { var i = 1; iptr := &i; return &iptr },
- func() interface{} { m := map[string]int{"s": 1}; return &m },
- func() interface{} { m := map[string]int{"s": 1}; return m },
-}
-
-func (s *S) TestUnmarshalNull(c *C) {
- for _, test := range unmarshalNullTests {
- item := test()
- zero := reflect.Zero(reflect.TypeOf(item).Elem()).Interface()
- err := yaml.Unmarshal([]byte("null"), item)
- c.Assert(err, IsNil)
- if reflect.TypeOf(item).Kind() == reflect.Map {
- c.Assert(reflect.ValueOf(item).Interface(), DeepEquals, reflect.MakeMap(reflect.TypeOf(item)).Interface())
- } else {
- c.Assert(reflect.ValueOf(item).Elem().Interface(), DeepEquals, zero)
- }
- }
-}
-
-//var data []byte
-//func init() {
-// var err error
-// data, err = ioutil.ReadFile("/tmp/file.yaml")
-// if err != nil {
-// panic(err)
-// }
-//}
-//
-//func (s *S) BenchmarkUnmarshal(c *C) {
-// var err error
-// for i := 0; i < c.N; i++ {
-// var v map[string]interface{}
-// err = yaml.Unmarshal(data, &v)
-// }
-// if err != nil {
-// panic(err)
-// }
-//}
-//
-//func (s *S) BenchmarkMarshal(c *C) {
-// var v map[string]interface{}
-// yaml.Unmarshal(data, &v)
-// c.ResetTimer()
-// for i := 0; i < c.N; i++ {
-// yaml.Marshal(&v)
-// }
-//}
diff --git a/Godeps/_workspace/src/github.com/coreos/yaml/emitterc.go b/Godeps/_workspace/src/github.com/coreos/yaml/emitterc.go
deleted file mode 100644
index 9b3dc4a..0000000
--- a/Godeps/_workspace/src/github.com/coreos/yaml/emitterc.go
+++ /dev/null
@@ -1,1685 +0,0 @@
-package yaml
-
-import (
- "bytes"
-)
-
-// Flush the buffer if needed.
-func flush(emitter *yaml_emitter_t) bool {
- if emitter.buffer_pos+5 >= len(emitter.buffer) {
- return yaml_emitter_flush(emitter)
- }
- return true
-}
-
-// Put a character to the output buffer.
-func put(emitter *yaml_emitter_t, value byte) bool {
- if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) {
- return false
- }
- emitter.buffer[emitter.buffer_pos] = value
- emitter.buffer_pos++
- emitter.column++
- return true
-}
-
-// Put a line break to the output buffer.
-func put_break(emitter *yaml_emitter_t) bool {
- if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) {
- return false
- }
- switch emitter.line_break {
- case yaml_CR_BREAK:
- emitter.buffer[emitter.buffer_pos] = '\r'
- emitter.buffer_pos += 1
- case yaml_LN_BREAK:
- emitter.buffer[emitter.buffer_pos] = '\n'
- emitter.buffer_pos += 1
- case yaml_CRLN_BREAK:
- emitter.buffer[emitter.buffer_pos+0] = '\r'
- emitter.buffer[emitter.buffer_pos+1] = '\n'
- emitter.buffer_pos += 2
- default:
- panic("unknown line break setting")
- }
- emitter.column = 0
- emitter.line++
- return true
-}
-
-// Copy a character from a string into buffer.
-func write(emitter *yaml_emitter_t, s []byte, i *int) bool {
- if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) {
- return false
- }
- p := emitter.buffer_pos
- w := width(s[*i])
- switch w {
- case 4:
- emitter.buffer[p+3] = s[*i+3]
- fallthrough
- case 3:
- emitter.buffer[p+2] = s[*i+2]
- fallthrough
- case 2:
- emitter.buffer[p+1] = s[*i+1]
- fallthrough
- case 1:
- emitter.buffer[p+0] = s[*i+0]
- default:
- panic("unknown character width")
- }
- emitter.column++
- emitter.buffer_pos += w
- *i += w
- return true
-}
-
-// Write a whole string into buffer.
-func write_all(emitter *yaml_emitter_t, s []byte) bool {
- for i := 0; i < len(s); {
- if !write(emitter, s, &i) {
- return false
- }
- }
- return true
-}
-
-// Copy a line break character from a string into buffer.
-func write_break(emitter *yaml_emitter_t, s []byte, i *int) bool {
- if s[*i] == '\n' {
- if !put_break(emitter) {
- return false
- }
- *i++
- } else {
- if !write(emitter, s, i) {
- return false
- }
- emitter.column = 0
- emitter.line++
- }
- return true
-}
-
-// Set an emitter error and return false.
-func yaml_emitter_set_emitter_error(emitter *yaml_emitter_t, problem string) bool {
- emitter.error = yaml_EMITTER_ERROR
- emitter.problem = problem
- return false
-}
-
-// Emit an event.
-func yaml_emitter_emit(emitter *yaml_emitter_t, event *yaml_event_t) bool {
- emitter.events = append(emitter.events, *event)
- for !yaml_emitter_need_more_events(emitter) {
- event := &emitter.events[emitter.events_head]
- if !yaml_emitter_analyze_event(emitter, event) {
- return false
- }
- if !yaml_emitter_state_machine(emitter, event) {
- return false
- }
- yaml_event_delete(event)
- emitter.events_head++
- }
- return true
-}
-
-// Check if we need to accumulate more events before emitting.
-//
-// We accumulate extra
-// - 1 event for DOCUMENT-START
-// - 2 events for SEQUENCE-START
-// - 3 events for MAPPING-START
-//
-func yaml_emitter_need_more_events(emitter *yaml_emitter_t) bool {
- if emitter.events_head == len(emitter.events) {
- return true
- }
- var accumulate int
- switch emitter.events[emitter.events_head].typ {
- case yaml_DOCUMENT_START_EVENT:
- accumulate = 1
- break
- case yaml_SEQUENCE_START_EVENT:
- accumulate = 2
- break
- case yaml_MAPPING_START_EVENT:
- accumulate = 3
- break
- default:
- return false
- }
- if len(emitter.events)-emitter.events_head > accumulate {
- return false
- }
- var level int
- for i := emitter.events_head; i < len(emitter.events); i++ {
- switch emitter.events[i].typ {
- case yaml_STREAM_START_EVENT, yaml_DOCUMENT_START_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT:
- level++
- case yaml_STREAM_END_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_END_EVENT, yaml_MAPPING_END_EVENT:
- level--
- }
- if level == 0 {
- return false
- }
- }
- return true
-}
-
-// Append a directive to the directives stack.
-func yaml_emitter_append_tag_directive(emitter *yaml_emitter_t, value *yaml_tag_directive_t, allow_duplicates bool) bool {
- for i := 0; i < len(emitter.tag_directives); i++ {
- if bytes.Equal(value.handle, emitter.tag_directives[i].handle) {
- if allow_duplicates {
- return true
- }
- return yaml_emitter_set_emitter_error(emitter, "duplicate %TAG directive")
- }
- }
-
- // [Go] Do we actually need to copy this given garbage collection
- // and the lack of deallocating destructors?
- tag_copy := yaml_tag_directive_t{
- handle: make([]byte, len(value.handle)),
- prefix: make([]byte, len(value.prefix)),
- }
- copy(tag_copy.handle, value.handle)
- copy(tag_copy.prefix, value.prefix)
- emitter.tag_directives = append(emitter.tag_directives, tag_copy)
- return true
-}
-
-// Increase the indentation level.
-func yaml_emitter_increase_indent(emitter *yaml_emitter_t, flow, indentless bool) bool {
- emitter.indents = append(emitter.indents, emitter.indent)
- if emitter.indent < 0 {
- if flow {
- emitter.indent = emitter.best_indent
- } else {
- emitter.indent = 0
- }
- } else if !indentless {
- emitter.indent += emitter.best_indent
- }
- return true
-}
-
-// State dispatcher.
-func yaml_emitter_state_machine(emitter *yaml_emitter_t, event *yaml_event_t) bool {
- switch emitter.state {
- default:
- case yaml_EMIT_STREAM_START_STATE:
- return yaml_emitter_emit_stream_start(emitter, event)
-
- case yaml_EMIT_FIRST_DOCUMENT_START_STATE:
- return yaml_emitter_emit_document_start(emitter, event, true)
-
- case yaml_EMIT_DOCUMENT_START_STATE:
- return yaml_emitter_emit_document_start(emitter, event, false)
-
- case yaml_EMIT_DOCUMENT_CONTENT_STATE:
- return yaml_emitter_emit_document_content(emitter, event)
-
- case yaml_EMIT_DOCUMENT_END_STATE:
- return yaml_emitter_emit_document_end(emitter, event)
-
- case yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE:
- return yaml_emitter_emit_flow_sequence_item(emitter, event, true)
-
- case yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE:
- return yaml_emitter_emit_flow_sequence_item(emitter, event, false)
-
- case yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE:
- return yaml_emitter_emit_flow_mapping_key(emitter, event, true)
-
- case yaml_EMIT_FLOW_MAPPING_KEY_STATE:
- return yaml_emitter_emit_flow_mapping_key(emitter, event, false)
-
- case yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE:
- return yaml_emitter_emit_flow_mapping_value(emitter, event, true)
-
- case yaml_EMIT_FLOW_MAPPING_VALUE_STATE:
- return yaml_emitter_emit_flow_mapping_value(emitter, event, false)
-
- case yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE:
- return yaml_emitter_emit_block_sequence_item(emitter, event, true)
-
- case yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE:
- return yaml_emitter_emit_block_sequence_item(emitter, event, false)
-
- case yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE:
- return yaml_emitter_emit_block_mapping_key(emitter, event, true)
-
- case yaml_EMIT_BLOCK_MAPPING_KEY_STATE:
- return yaml_emitter_emit_block_mapping_key(emitter, event, false)
-
- case yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE:
- return yaml_emitter_emit_block_mapping_value(emitter, event, true)
-
- case yaml_EMIT_BLOCK_MAPPING_VALUE_STATE:
- return yaml_emitter_emit_block_mapping_value(emitter, event, false)
-
- case yaml_EMIT_END_STATE:
- return yaml_emitter_set_emitter_error(emitter, "expected nothing after STREAM-END")
- }
- panic("invalid emitter state")
-}
-
-// Expect STREAM-START.
-func yaml_emitter_emit_stream_start(emitter *yaml_emitter_t, event *yaml_event_t) bool {
- if event.typ != yaml_STREAM_START_EVENT {
- return yaml_emitter_set_emitter_error(emitter, "expected STREAM-START")
- }
- if emitter.encoding == yaml_ANY_ENCODING {
- emitter.encoding = event.encoding
- if emitter.encoding == yaml_ANY_ENCODING {
- emitter.encoding = yaml_UTF8_ENCODING
- }
- }
- if emitter.best_indent < 2 || emitter.best_indent > 9 {
- emitter.best_indent = 2
- }
- if emitter.best_width >= 0 && emitter.best_width <= emitter.best_indent*2 {
- emitter.best_width = 80
- }
- if emitter.best_width < 0 {
- emitter.best_width = 1<<31 - 1
- }
- if emitter.line_break == yaml_ANY_BREAK {
- emitter.line_break = yaml_LN_BREAK
- }
-
- emitter.indent = -1
- emitter.line = 0
- emitter.column = 0
- emitter.whitespace = true
- emitter.indention = true
-
- if emitter.encoding != yaml_UTF8_ENCODING {
- if !yaml_emitter_write_bom(emitter) {
- return false
- }
- }
- emitter.state = yaml_EMIT_FIRST_DOCUMENT_START_STATE
- return true
-}
-
-// Expect DOCUMENT-START or STREAM-END.
-func yaml_emitter_emit_document_start(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool {
-
- if event.typ == yaml_DOCUMENT_START_EVENT {
-
- if event.version_directive != nil {
- if !yaml_emitter_analyze_version_directive(emitter, event.version_directive) {
- return false
- }
- }
-
- for i := 0; i < len(event.tag_directives); i++ {
- tag_directive := &event.tag_directives[i]
- if !yaml_emitter_analyze_tag_directive(emitter, tag_directive) {
- return false
- }
- if !yaml_emitter_append_tag_directive(emitter, tag_directive, false) {
- return false
- }
- }
-
- for i := 0; i < len(default_tag_directives); i++ {
- tag_directive := &default_tag_directives[i]
- if !yaml_emitter_append_tag_directive(emitter, tag_directive, true) {
- return false
- }
- }
-
- implicit := event.implicit
- if !first || emitter.canonical {
- implicit = false
- }
-
- if emitter.open_ended && (event.version_directive != nil || len(event.tag_directives) > 0) {
- if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) {
- return false
- }
- if !yaml_emitter_write_indent(emitter) {
- return false
- }
- }
-
- if event.version_directive != nil {
- implicit = false
- if !yaml_emitter_write_indicator(emitter, []byte("%YAML"), true, false, false) {
- return false
- }
- if !yaml_emitter_write_indicator(emitter, []byte("1.1"), true, false, false) {
- return false
- }
- if !yaml_emitter_write_indent(emitter) {
- return false
- }
- }
-
- if len(event.tag_directives) > 0 {
- implicit = false
- for i := 0; i < len(event.tag_directives); i++ {
- tag_directive := &event.tag_directives[i]
- if !yaml_emitter_write_indicator(emitter, []byte("%TAG"), true, false, false) {
- return false
- }
- if !yaml_emitter_write_tag_handle(emitter, tag_directive.handle) {
- return false
- }
- if !yaml_emitter_write_tag_content(emitter, tag_directive.prefix, true) {
- return false
- }
- if !yaml_emitter_write_indent(emitter) {
- return false
- }
- }
- }
-
- if yaml_emitter_check_empty_document(emitter) {
- implicit = false
- }
- if !implicit {
- if !yaml_emitter_write_indent(emitter) {
- return false
- }
- if !yaml_emitter_write_indicator(emitter, []byte("---"), true, false, false) {
- return false
- }
- if emitter.canonical {
- if !yaml_emitter_write_indent(emitter) {
- return false
- }
- }
- }
-
- emitter.state = yaml_EMIT_DOCUMENT_CONTENT_STATE
- return true
- }
-
- if event.typ == yaml_STREAM_END_EVENT {
- if emitter.open_ended {
- if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) {
- return false
- }
- if !yaml_emitter_write_indent(emitter) {
- return false
- }
- }
- if !yaml_emitter_flush(emitter) {
- return false
- }
- emitter.state = yaml_EMIT_END_STATE
- return true
- }
-
- return yaml_emitter_set_emitter_error(emitter, "expected DOCUMENT-START or STREAM-END")
-}
-
-// Expect the root node.
-func yaml_emitter_emit_document_content(emitter *yaml_emitter_t, event *yaml_event_t) bool {
- emitter.states = append(emitter.states, yaml_EMIT_DOCUMENT_END_STATE)
- return yaml_emitter_emit_node(emitter, event, true, false, false, false)
-}
-
-// Expect DOCUMENT-END.
-func yaml_emitter_emit_document_end(emitter *yaml_emitter_t, event *yaml_event_t) bool {
- if event.typ != yaml_DOCUMENT_END_EVENT {
- return yaml_emitter_set_emitter_error(emitter, "expected DOCUMENT-END")
- }
- if !yaml_emitter_write_indent(emitter) {
- return false
- }
- if !event.implicit {
- // [Go] Allocate the slice elsewhere.
- if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) {
- return false
- }
- if !yaml_emitter_write_indent(emitter) {
- return false
- }
- }
- if !yaml_emitter_flush(emitter) {
- return false
- }
- emitter.state = yaml_EMIT_DOCUMENT_START_STATE
- emitter.tag_directives = emitter.tag_directives[:0]
- return true
-}
-
-// Expect a flow item node.
-func yaml_emitter_emit_flow_sequence_item(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool {
- if first {
- if !yaml_emitter_write_indicator(emitter, []byte{'['}, true, true, false) {
- return false
- }
- if !yaml_emitter_increase_indent(emitter, true, false) {
- return false
- }
- emitter.flow_level++
- }
-
- if event.typ == yaml_SEQUENCE_END_EVENT {
- emitter.flow_level--
- emitter.indent = emitter.indents[len(emitter.indents)-1]
- emitter.indents = emitter.indents[:len(emitter.indents)-1]
- if emitter.canonical && !first {
- if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) {
- return false
- }
- if !yaml_emitter_write_indent(emitter) {
- return false
- }
- }
- if !yaml_emitter_write_indicator(emitter, []byte{']'}, false, false, false) {
- return false
- }
- emitter.state = emitter.states[len(emitter.states)-1]
- emitter.states = emitter.states[:len(emitter.states)-1]
-
- return true
- }
-
- if !first {
- if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) {
- return false
- }
- }
-
- if emitter.canonical || emitter.column > emitter.best_width {
- if !yaml_emitter_write_indent(emitter) {
- return false
- }
- }
- emitter.states = append(emitter.states, yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE)
- return yaml_emitter_emit_node(emitter, event, false, true, false, false)
-}
-
-// Expect a flow key node.
-func yaml_emitter_emit_flow_mapping_key(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool {
- if first {
- if !yaml_emitter_write_indicator(emitter, []byte{'{'}, true, true, false) {
- return false
- }
- if !yaml_emitter_increase_indent(emitter, true, false) {
- return false
- }
- emitter.flow_level++
- }
-
- if event.typ == yaml_MAPPING_END_EVENT {
- emitter.flow_level--
- emitter.indent = emitter.indents[len(emitter.indents)-1]
- emitter.indents = emitter.indents[:len(emitter.indents)-1]
- if emitter.canonical && !first {
- if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) {
- return false
- }
- if !yaml_emitter_write_indent(emitter) {
- return false
- }
- }
- if !yaml_emitter_write_indicator(emitter, []byte{'}'}, false, false, false) {
- return false
- }
- emitter.state = emitter.states[len(emitter.states)-1]
- emitter.states = emitter.states[:len(emitter.states)-1]
- return true
- }
-
- if !first {
- if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) {
- return false
- }
- }
- if emitter.canonical || emitter.column > emitter.best_width {
- if !yaml_emitter_write_indent(emitter) {
- return false
- }
- }
-
- if !emitter.canonical && yaml_emitter_check_simple_key(emitter) {
- emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE)
- return yaml_emitter_emit_node(emitter, event, false, false, true, true)
- }
- if !yaml_emitter_write_indicator(emitter, []byte{'?'}, true, false, false) {
- return false
- }
- emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_VALUE_STATE)
- return yaml_emitter_emit_node(emitter, event, false, false, true, false)
-}
-
-// Expect a flow value node.
-func yaml_emitter_emit_flow_mapping_value(emitter *yaml_emitter_t, event *yaml_event_t, simple bool) bool {
- if simple {
- if !yaml_emitter_write_indicator(emitter, []byte{':'}, false, false, false) {
- return false
- }
- } else {
- if emitter.canonical || emitter.column > emitter.best_width {
- if !yaml_emitter_write_indent(emitter) {
- return false
- }
- }
- if !yaml_emitter_write_indicator(emitter, []byte{':'}, true, false, false) {
- return false
- }
- }
- emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_KEY_STATE)
- return yaml_emitter_emit_node(emitter, event, false, false, true, false)
-}
-
-// Expect a block item node.
-func yaml_emitter_emit_block_sequence_item(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool {
- if first {
- if !yaml_emitter_increase_indent(emitter, false, emitter.mapping_context && !emitter.indention) {
- return false
- }
- }
- if event.typ == yaml_SEQUENCE_END_EVENT {
- emitter.indent = emitter.indents[len(emitter.indents)-1]
- emitter.indents = emitter.indents[:len(emitter.indents)-1]
- emitter.state = emitter.states[len(emitter.states)-1]
- emitter.states = emitter.states[:len(emitter.states)-1]
- return true
- }
- if !yaml_emitter_write_indent(emitter) {
- return false
- }
- if !yaml_emitter_write_indicator(emitter, []byte{'-'}, true, false, true) {
- return false
- }
- emitter.states = append(emitter.states, yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE)
- return yaml_emitter_emit_node(emitter, event, false, true, false, false)
-}
-
-// Expect a block key node.
-func yaml_emitter_emit_block_mapping_key(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool {
- if first {
- if !yaml_emitter_increase_indent(emitter, false, false) {
- return false
- }
- }
- if event.typ == yaml_MAPPING_END_EVENT {
- emitter.indent = emitter.indents[len(emitter.indents)-1]
- emitter.indents = emitter.indents[:len(emitter.indents)-1]
- emitter.state = emitter.states[len(emitter.states)-1]
- emitter.states = emitter.states[:len(emitter.states)-1]
- return true
- }
- if !yaml_emitter_write_indent(emitter) {
- return false
- }
- if yaml_emitter_check_simple_key(emitter) {
- emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE)
- return yaml_emitter_emit_node(emitter, event, false, false, true, true)
- }
- if !yaml_emitter_write_indicator(emitter, []byte{'?'}, true, false, true) {
- return false
- }
- emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_VALUE_STATE)
- return yaml_emitter_emit_node(emitter, event, false, false, true, false)
-}
-
-// Expect a block value node.
-func yaml_emitter_emit_block_mapping_value(emitter *yaml_emitter_t, event *yaml_event_t, simple bool) bool {
- if simple {
- if !yaml_emitter_write_indicator(emitter, []byte{':'}, false, false, false) {
- return false
- }
- } else {
- if !yaml_emitter_write_indent(emitter) {
- return false
- }
- if !yaml_emitter_write_indicator(emitter, []byte{':'}, true, false, true) {
- return false
- }
- }
- emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_KEY_STATE)
- return yaml_emitter_emit_node(emitter, event, false, false, true, false)
-}
-
-// Expect a node.
-func yaml_emitter_emit_node(emitter *yaml_emitter_t, event *yaml_event_t,
- root bool, sequence bool, mapping bool, simple_key bool) bool {
-
- emitter.root_context = root
- emitter.sequence_context = sequence
- emitter.mapping_context = mapping
- emitter.simple_key_context = simple_key
-
- switch event.typ {
- case yaml_ALIAS_EVENT:
- return yaml_emitter_emit_alias(emitter, event)
- case yaml_SCALAR_EVENT:
- return yaml_emitter_emit_scalar(emitter, event)
- case yaml_SEQUENCE_START_EVENT:
- return yaml_emitter_emit_sequence_start(emitter, event)
- case yaml_MAPPING_START_EVENT:
- return yaml_emitter_emit_mapping_start(emitter, event)
- default:
- return yaml_emitter_set_emitter_error(emitter,
- "expected SCALAR, SEQUENCE-START, MAPPING-START, or ALIAS")
- }
- return false
-}
-
-// Expect ALIAS.
-func yaml_emitter_emit_alias(emitter *yaml_emitter_t, event *yaml_event_t) bool {
- if !yaml_emitter_process_anchor(emitter) {
- return false
- }
- emitter.state = emitter.states[len(emitter.states)-1]
- emitter.states = emitter.states[:len(emitter.states)-1]
- return true
-}
-
-// Expect SCALAR.
-func yaml_emitter_emit_scalar(emitter *yaml_emitter_t, event *yaml_event_t) bool {
- if !yaml_emitter_select_scalar_style(emitter, event) {
- return false
- }
- if !yaml_emitter_process_anchor(emitter) {
- return false
- }
- if !yaml_emitter_process_tag(emitter) {
- return false
- }
- if !yaml_emitter_increase_indent(emitter, true, false) {
- return false
- }
- if !yaml_emitter_process_scalar(emitter) {
- return false
- }
- emitter.indent = emitter.indents[len(emitter.indents)-1]
- emitter.indents = emitter.indents[:len(emitter.indents)-1]
- emitter.state = emitter.states[len(emitter.states)-1]
- emitter.states = emitter.states[:len(emitter.states)-1]
- return true
-}
-
-// Expect SEQUENCE-START.
-func yaml_emitter_emit_sequence_start(emitter *yaml_emitter_t, event *yaml_event_t) bool {
- if !yaml_emitter_process_anchor(emitter) {
- return false
- }
- if !yaml_emitter_process_tag(emitter) {
- return false
- }
- if emitter.flow_level > 0 || emitter.canonical || event.sequence_style() == yaml_FLOW_SEQUENCE_STYLE ||
- yaml_emitter_check_empty_sequence(emitter) {
- emitter.state = yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE
- } else {
- emitter.state = yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE
- }
- return true
-}
-
-// Expect MAPPING-START.
-func yaml_emitter_emit_mapping_start(emitter *yaml_emitter_t, event *yaml_event_t) bool {
- if !yaml_emitter_process_anchor(emitter) {
- return false
- }
- if !yaml_emitter_process_tag(emitter) {
- return false
- }
- if emitter.flow_level > 0 || emitter.canonical || event.mapping_style() == yaml_FLOW_MAPPING_STYLE ||
- yaml_emitter_check_empty_mapping(emitter) {
- emitter.state = yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE
- } else {
- emitter.state = yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE
- }
- return true
-}
-
-// Check if the document content is an empty scalar.
-func yaml_emitter_check_empty_document(emitter *yaml_emitter_t) bool {
- return false // [Go] Huh?
-}
-
-// Check if the next events represent an empty sequence.
-func yaml_emitter_check_empty_sequence(emitter *yaml_emitter_t) bool {
- if len(emitter.events)-emitter.events_head < 2 {
- return false
- }
- return emitter.events[emitter.events_head].typ == yaml_SEQUENCE_START_EVENT &&
- emitter.events[emitter.events_head+1].typ == yaml_SEQUENCE_END_EVENT
-}
-
-// Check if the next events represent an empty mapping.
-func yaml_emitter_check_empty_mapping(emitter *yaml_emitter_t) bool {
- if len(emitter.events)-emitter.events_head < 2 {
- return false
- }
- return emitter.events[emitter.events_head].typ == yaml_MAPPING_START_EVENT &&
- emitter.events[emitter.events_head+1].typ == yaml_MAPPING_END_EVENT
-}
-
-// Check if the next node can be expressed as a simple key.
-func yaml_emitter_check_simple_key(emitter *yaml_emitter_t) bool {
- length := 0
- switch emitter.events[emitter.events_head].typ {
- case yaml_ALIAS_EVENT:
- length += len(emitter.anchor_data.anchor)
- case yaml_SCALAR_EVENT:
- if emitter.scalar_data.multiline {
- return false
- }
- length += len(emitter.anchor_data.anchor) +
- len(emitter.tag_data.handle) +
- len(emitter.tag_data.suffix) +
- len(emitter.scalar_data.value)
- case yaml_SEQUENCE_START_EVENT:
- if !yaml_emitter_check_empty_sequence(emitter) {
- return false
- }
- length += len(emitter.anchor_data.anchor) +
- len(emitter.tag_data.handle) +
- len(emitter.tag_data.suffix)
- case yaml_MAPPING_START_EVENT:
- if !yaml_emitter_check_empty_mapping(emitter) {
- return false
- }
- length += len(emitter.anchor_data.anchor) +
- len(emitter.tag_data.handle) +
- len(emitter.tag_data.suffix)
- default:
- return false
- }
- return length <= 128
-}
-
-// Determine an acceptable scalar style.
-func yaml_emitter_select_scalar_style(emitter *yaml_emitter_t, event *yaml_event_t) bool {
-
- no_tag := len(emitter.tag_data.handle) == 0 && len(emitter.tag_data.suffix) == 0
- if no_tag && !event.implicit && !event.quoted_implicit {
- return yaml_emitter_set_emitter_error(emitter, "neither tag nor implicit flags are specified")
- }
-
- style := event.scalar_style()
- if style == yaml_ANY_SCALAR_STYLE {
- style = yaml_PLAIN_SCALAR_STYLE
- }
- if emitter.canonical {
- style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
- }
- if emitter.simple_key_context && emitter.scalar_data.multiline {
- style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
- }
-
- if style == yaml_PLAIN_SCALAR_STYLE {
- if emitter.flow_level > 0 && !emitter.scalar_data.flow_plain_allowed ||
- emitter.flow_level == 0 && !emitter.scalar_data.block_plain_allowed {
- style = yaml_SINGLE_QUOTED_SCALAR_STYLE
- }
- if len(emitter.scalar_data.value) == 0 && (emitter.flow_level > 0 || emitter.simple_key_context) {
- style = yaml_SINGLE_QUOTED_SCALAR_STYLE
- }
- if no_tag && !event.implicit {
- style = yaml_SINGLE_QUOTED_SCALAR_STYLE
- }
- }
- if style == yaml_SINGLE_QUOTED_SCALAR_STYLE {
- if !emitter.scalar_data.single_quoted_allowed {
- style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
- }
- }
- if style == yaml_LITERAL_SCALAR_STYLE || style == yaml_FOLDED_SCALAR_STYLE {
- if !emitter.scalar_data.block_allowed || emitter.flow_level > 0 || emitter.simple_key_context {
- style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
- }
- }
-
- if no_tag && !event.quoted_implicit && style != yaml_PLAIN_SCALAR_STYLE {
- emitter.tag_data.handle = []byte{'!'}
- }
- emitter.scalar_data.style = style
- return true
-}
-
-// Write an achor.
-func yaml_emitter_process_anchor(emitter *yaml_emitter_t) bool {
- if emitter.anchor_data.anchor == nil {
- return true
- }
- c := []byte{'&'}
- if emitter.anchor_data.alias {
- c[0] = '*'
- }
- if !yaml_emitter_write_indicator(emitter, c, true, false, false) {
- return false
- }
- return yaml_emitter_write_anchor(emitter, emitter.anchor_data.anchor)
-}
-
-// Write a tag.
-func yaml_emitter_process_tag(emitter *yaml_emitter_t) bool {
- if len(emitter.tag_data.handle) == 0 && len(emitter.tag_data.suffix) == 0 {
- return true
- }
- if len(emitter.tag_data.handle) > 0 {
- if !yaml_emitter_write_tag_handle(emitter, emitter.tag_data.handle) {
- return false
- }
- if len(emitter.tag_data.suffix) > 0 {
- if !yaml_emitter_write_tag_content(emitter, emitter.tag_data.suffix, false) {
- return false
- }
- }
- } else {
- // [Go] Allocate these slices elsewhere.
- if !yaml_emitter_write_indicator(emitter, []byte("!<"), true, false, false) {
- return false
- }
- if !yaml_emitter_write_tag_content(emitter, emitter.tag_data.suffix, false) {
- return false
- }
- if !yaml_emitter_write_indicator(emitter, []byte{'>'}, false, false, false) {
- return false
- }
- }
- return true
-}
-
-// Write a scalar.
-func yaml_emitter_process_scalar(emitter *yaml_emitter_t) bool {
- switch emitter.scalar_data.style {
- case yaml_PLAIN_SCALAR_STYLE:
- return yaml_emitter_write_plain_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context)
-
- case yaml_SINGLE_QUOTED_SCALAR_STYLE:
- return yaml_emitter_write_single_quoted_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context)
-
- case yaml_DOUBLE_QUOTED_SCALAR_STYLE:
- return yaml_emitter_write_double_quoted_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context)
-
- case yaml_LITERAL_SCALAR_STYLE:
- return yaml_emitter_write_literal_scalar(emitter, emitter.scalar_data.value)
-
- case yaml_FOLDED_SCALAR_STYLE:
- return yaml_emitter_write_folded_scalar(emitter, emitter.scalar_data.value)
- }
- panic("unknown scalar style")
-}
-
-// Check if a %YAML directive is valid.
-func yaml_emitter_analyze_version_directive(emitter *yaml_emitter_t, version_directive *yaml_version_directive_t) bool {
- if version_directive.major != 1 || version_directive.minor != 1 {
- return yaml_emitter_set_emitter_error(emitter, "incompatible %YAML directive")
- }
- return true
-}
-
-// Check if a %TAG directive is valid.
-func yaml_emitter_analyze_tag_directive(emitter *yaml_emitter_t, tag_directive *yaml_tag_directive_t) bool {
- handle := tag_directive.handle
- prefix := tag_directive.prefix
- if len(handle) == 0 {
- return yaml_emitter_set_emitter_error(emitter, "tag handle must not be empty")
- }
- if handle[0] != '!' {
- return yaml_emitter_set_emitter_error(emitter, "tag handle must start with '!'")
- }
- if handle[len(handle)-1] != '!' {
- return yaml_emitter_set_emitter_error(emitter, "tag handle must end with '!'")
- }
- for i := 1; i < len(handle)-1; i += width(handle[i]) {
- if !is_alpha(handle, i) {
- return yaml_emitter_set_emitter_error(emitter, "tag handle must contain alphanumerical characters only")
- }
- }
- if len(prefix) == 0 {
- return yaml_emitter_set_emitter_error(emitter, "tag prefix must not be empty")
- }
- return true
-}
-
-// Check if an anchor is valid.
-func yaml_emitter_analyze_anchor(emitter *yaml_emitter_t, anchor []byte, alias bool) bool {
- if len(anchor) == 0 {
- problem := "anchor value must not be empty"
- if alias {
- problem = "alias value must not be empty"
- }
- return yaml_emitter_set_emitter_error(emitter, problem)
- }
- for i := 0; i < len(anchor); i += width(anchor[i]) {
- if !is_alpha(anchor, i) {
- problem := "anchor value must contain alphanumerical characters only"
- if alias {
- problem = "alias value must contain alphanumerical characters only"
- }
- return yaml_emitter_set_emitter_error(emitter, problem)
- }
- }
- emitter.anchor_data.anchor = anchor
- emitter.anchor_data.alias = alias
- return true
-}
-
-// Check if a tag is valid.
-func yaml_emitter_analyze_tag(emitter *yaml_emitter_t, tag []byte) bool {
- if len(tag) == 0 {
- return yaml_emitter_set_emitter_error(emitter, "tag value must not be empty")
- }
- for i := 0; i < len(emitter.tag_directives); i++ {
- tag_directive := &emitter.tag_directives[i]
- if bytes.HasPrefix(tag, tag_directive.prefix) {
- emitter.tag_data.handle = tag_directive.handle
- emitter.tag_data.suffix = tag[len(tag_directive.prefix):]
- return true
- }
- }
- emitter.tag_data.suffix = tag
- return true
-}
-
-// Check if a scalar is valid.
-func yaml_emitter_analyze_scalar(emitter *yaml_emitter_t, value []byte) bool {
- var (
- block_indicators = false
- flow_indicators = false
- line_breaks = false
- special_characters = false
-
- leading_space = false
- leading_break = false
- trailing_space = false
- trailing_break = false
- break_space = false
- space_break = false
-
- preceeded_by_whitespace = false
- followed_by_whitespace = false
- previous_space = false
- previous_break = false
- )
-
- emitter.scalar_data.value = value
-
- if len(value) == 0 {
- emitter.scalar_data.multiline = false
- emitter.scalar_data.flow_plain_allowed = false
- emitter.scalar_data.block_plain_allowed = true
- emitter.scalar_data.single_quoted_allowed = true
- emitter.scalar_data.block_allowed = false
- return true
- }
-
- if len(value) >= 3 && ((value[0] == '-' && value[1] == '-' && value[2] == '-') || (value[0] == '.' && value[1] == '.' && value[2] == '.')) {
- block_indicators = true
- flow_indicators = true
- }
-
- preceeded_by_whitespace = true
- for i, w := 0, 0; i < len(value); i += w {
- w = width(value[0])
- followed_by_whitespace = i+w >= len(value) || is_blank(value, i+w)
-
- if i == 0 {
- switch value[i] {
- case '#', ',', '[', ']', '{', '}', '&', '*', '!', '|', '>', '\'', '"', '%', '@', '`':
- flow_indicators = true
- block_indicators = true
- case '?', ':':
- flow_indicators = true
- if followed_by_whitespace {
- block_indicators = true
- }
- case '-':
- if followed_by_whitespace {
- flow_indicators = true
- block_indicators = true
- }
- }
- } else {
- switch value[i] {
- case ',', '?', '[', ']', '{', '}':
- flow_indicators = true
- case ':':
- flow_indicators = true
- if followed_by_whitespace {
- block_indicators = true
- }
- case '#':
- if preceeded_by_whitespace {
- flow_indicators = true
- block_indicators = true
- }
- }
- }
-
- if !is_printable(value, i) || !is_ascii(value, i) && !emitter.unicode {
- special_characters = true
- }
- if is_space(value, i) {
- if i == 0 {
- leading_space = true
- }
- if i+width(value[i]) == len(value) {
- trailing_space = true
- }
- if previous_break {
- break_space = true
- }
- previous_space = true
- previous_break = false
- } else if is_break(value, i) {
- line_breaks = true
- if i == 0 {
- leading_break = true
- }
- if i+width(value[i]) == len(value) {
- trailing_break = true
- }
- if previous_space {
- space_break = true
- }
- previous_space = false
- previous_break = true
- } else {
- previous_space = false
- previous_break = false
- }
-
- // [Go]: Why 'z'? Couldn't be the end of the string as that's the loop condition.
- preceeded_by_whitespace = is_blankz(value, i)
- }
-
- emitter.scalar_data.multiline = line_breaks
- emitter.scalar_data.flow_plain_allowed = true
- emitter.scalar_data.block_plain_allowed = true
- emitter.scalar_data.single_quoted_allowed = true
- emitter.scalar_data.block_allowed = true
-
- if leading_space || leading_break || trailing_space || trailing_break {
- emitter.scalar_data.flow_plain_allowed = false
- emitter.scalar_data.block_plain_allowed = false
- }
- if trailing_space {
- emitter.scalar_data.block_allowed = false
- }
- if break_space {
- emitter.scalar_data.flow_plain_allowed = false
- emitter.scalar_data.block_plain_allowed = false
- emitter.scalar_data.single_quoted_allowed = false
- }
- if space_break || special_characters {
- emitter.scalar_data.flow_plain_allowed = false
- emitter.scalar_data.block_plain_allowed = false
- emitter.scalar_data.single_quoted_allowed = false
- emitter.scalar_data.block_allowed = false
- }
- if line_breaks {
- emitter.scalar_data.flow_plain_allowed = false
- emitter.scalar_data.block_plain_allowed = false
- }
- if flow_indicators {
- emitter.scalar_data.flow_plain_allowed = false
- }
- if block_indicators {
- emitter.scalar_data.block_plain_allowed = false
- }
- return true
-}
-
-// Check if the event data is valid.
-func yaml_emitter_analyze_event(emitter *yaml_emitter_t, event *yaml_event_t) bool {
-
- emitter.anchor_data.anchor = nil
- emitter.tag_data.handle = nil
- emitter.tag_data.suffix = nil
- emitter.scalar_data.value = nil
-
- switch event.typ {
- case yaml_ALIAS_EVENT:
- if !yaml_emitter_analyze_anchor(emitter, event.anchor, true) {
- return false
- }
-
- case yaml_SCALAR_EVENT:
- if len(event.anchor) > 0 {
- if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) {
- return false
- }
- }
- if len(event.tag) > 0 && (emitter.canonical || (!event.implicit && !event.quoted_implicit)) {
- if !yaml_emitter_analyze_tag(emitter, event.tag) {
- return false
- }
- }
- if !yaml_emitter_analyze_scalar(emitter, event.value) {
- return false
- }
-
- case yaml_SEQUENCE_START_EVENT:
- if len(event.anchor) > 0 {
- if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) {
- return false
- }
- }
- if len(event.tag) > 0 && (emitter.canonical || !event.implicit) {
- if !yaml_emitter_analyze_tag(emitter, event.tag) {
- return false
- }
- }
-
- case yaml_MAPPING_START_EVENT:
- if len(event.anchor) > 0 {
- if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) {
- return false
- }
- }
- if len(event.tag) > 0 && (emitter.canonical || !event.implicit) {
- if !yaml_emitter_analyze_tag(emitter, event.tag) {
- return false
- }
- }
- }
- return true
-}
-
-// Write the BOM character.
-func yaml_emitter_write_bom(emitter *yaml_emitter_t) bool {
- if !flush(emitter) {
- return false
- }
- pos := emitter.buffer_pos
- emitter.buffer[pos+0] = '\xEF'
- emitter.buffer[pos+1] = '\xBB'
- emitter.buffer[pos+2] = '\xBF'
- emitter.buffer_pos += 3
- return true
-}
-
-func yaml_emitter_write_indent(emitter *yaml_emitter_t) bool {
- indent := emitter.indent
- if indent < 0 {
- indent = 0
- }
- if !emitter.indention || emitter.column > indent || (emitter.column == indent && !emitter.whitespace) {
- if !put_break(emitter) {
- return false
- }
- }
- for emitter.column < indent {
- if !put(emitter, ' ') {
- return false
- }
- }
- emitter.whitespace = true
- emitter.indention = true
- return true
-}
-
-func yaml_emitter_write_indicator(emitter *yaml_emitter_t, indicator []byte, need_whitespace, is_whitespace, is_indention bool) bool {
- if need_whitespace && !emitter.whitespace {
- if !put(emitter, ' ') {
- return false
- }
- }
- if !write_all(emitter, indicator) {
- return false
- }
- emitter.whitespace = is_whitespace
- emitter.indention = (emitter.indention && is_indention)
- emitter.open_ended = false
- return true
-}
-
-func yaml_emitter_write_anchor(emitter *yaml_emitter_t, value []byte) bool {
- if !write_all(emitter, value) {
- return false
- }
- emitter.whitespace = false
- emitter.indention = false
- return true
-}
-
-func yaml_emitter_write_tag_handle(emitter *yaml_emitter_t, value []byte) bool {
- if !emitter.whitespace {
- if !put(emitter, ' ') {
- return false
- }
- }
- if !write_all(emitter, value) {
- return false
- }
- emitter.whitespace = false
- emitter.indention = false
- return true
-}
-
-func yaml_emitter_write_tag_content(emitter *yaml_emitter_t, value []byte, need_whitespace bool) bool {
- if need_whitespace && !emitter.whitespace {
- if !put(emitter, ' ') {
- return false
- }
- }
- for i := 0; i < len(value); {
- var must_write bool
- switch value[i] {
- case ';', '/', '?', ':', '@', '&', '=', '+', '$', ',', '_', '.', '~', '*', '\'', '(', ')', '[', ']':
- must_write = true
- default:
- must_write = is_alpha(value, i)
- }
- if must_write {
- if !write(emitter, value, &i) {
- return false
- }
- } else {
- w := width(value[i])
- for k := 0; k < w; k++ {
- octet := value[i]
- i++
- if !put(emitter, '%') {
- return false
- }
-
- c := octet >> 4
- if c < 10 {
- c += '0'
- } else {
- c += 'A' - 10
- }
- if !put(emitter, c) {
- return false
- }
-
- c = octet & 0x0f
- if c < 10 {
- c += '0'
- } else {
- c += 'A' - 10
- }
- if !put(emitter, c) {
- return false
- }
- }
- }
- }
- emitter.whitespace = false
- emitter.indention = false
- return true
-}
-
-func yaml_emitter_write_plain_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool {
- if !emitter.whitespace {
- if !put(emitter, ' ') {
- return false
- }
- }
-
- spaces := false
- breaks := false
- for i := 0; i < len(value); {
- if is_space(value, i) {
- if allow_breaks && !spaces && emitter.column > emitter.best_width && !is_space(value, i+1) {
- if !yaml_emitter_write_indent(emitter) {
- return false
- }
- i += width(value[i])
- } else {
- if !write(emitter, value, &i) {
- return false
- }
- }
- spaces = true
- } else if is_break(value, i) {
- if !breaks && value[i] == '\n' {
- if !put_break(emitter) {
- return false
- }
- }
- if !write_break(emitter, value, &i) {
- return false
- }
- emitter.indention = true
- breaks = true
- } else {
- if breaks {
- if !yaml_emitter_write_indent(emitter) {
- return false
- }
- }
- if !write(emitter, value, &i) {
- return false
- }
- emitter.indention = false
- spaces = false
- breaks = false
- }
- }
-
- emitter.whitespace = false
- emitter.indention = false
- if emitter.root_context {
- emitter.open_ended = true
- }
-
- return true
-}
-
-func yaml_emitter_write_single_quoted_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool {
-
- if !yaml_emitter_write_indicator(emitter, []byte{'\''}, true, false, false) {
- return false
- }
-
- spaces := false
- breaks := false
- for i := 0; i < len(value); {
- if is_space(value, i) {
- if allow_breaks && !spaces && emitter.column > emitter.best_width && i > 0 && i < len(value)-1 && !is_space(value, i+1) {
- if !yaml_emitter_write_indent(emitter) {
- return false
- }
- i += width(value[i])
- } else {
- if !write(emitter, value, &i) {
- return false
- }
- }
- spaces = true
- } else if is_break(value, i) {
- if !breaks && value[i] == '\n' {
- if !put_break(emitter) {
- return false
- }
- }
- if !write_break(emitter, value, &i) {
- return false
- }
- emitter.indention = true
- breaks = true
- } else {
- if breaks {
- if !yaml_emitter_write_indent(emitter) {
- return false
- }
- }
- if value[i] == '\'' {
- if !put(emitter, '\'') {
- return false
- }
- }
- if !write(emitter, value, &i) {
- return false
- }
- emitter.indention = false
- spaces = false
- breaks = false
- }
- }
- if !yaml_emitter_write_indicator(emitter, []byte{'\''}, false, false, false) {
- return false
- }
- emitter.whitespace = false
- emitter.indention = false
- return true
-}
-
-func yaml_emitter_write_double_quoted_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool {
- spaces := false
- if !yaml_emitter_write_indicator(emitter, []byte{'"'}, true, false, false) {
- return false
- }
-
- for i := 0; i < len(value); {
- if !is_printable(value, i) || (!emitter.unicode && !is_ascii(value, i)) ||
- is_bom(value, i) || is_break(value, i) ||
- value[i] == '"' || value[i] == '\\' {
-
- octet := value[i]
-
- var w int
- var v rune
- switch {
- case octet&0x80 == 0x00:
- w, v = 1, rune(octet&0x7F)
- case octet&0xE0 == 0xC0:
- w, v = 2, rune(octet&0x1F)
- case octet&0xF0 == 0xE0:
- w, v = 3, rune(octet&0x0F)
- case octet&0xF8 == 0xF0:
- w, v = 4, rune(octet&0x07)
- }
- for k := 1; k < w; k++ {
- octet = value[i+k]
- v = (v << 6) + (rune(octet) & 0x3F)
- }
- i += w
-
- if !put(emitter, '\\') {
- return false
- }
-
- var ok bool
- switch v {
- case 0x00:
- ok = put(emitter, '0')
- case 0x07:
- ok = put(emitter, 'a')
- case 0x08:
- ok = put(emitter, 'b')
- case 0x09:
- ok = put(emitter, 't')
- case 0x0A:
- ok = put(emitter, 'n')
- case 0x0b:
- ok = put(emitter, 'v')
- case 0x0c:
- ok = put(emitter, 'f')
- case 0x0d:
- ok = put(emitter, 'r')
- case 0x1b:
- ok = put(emitter, 'e')
- case 0x22:
- ok = put(emitter, '"')
- case 0x5c:
- ok = put(emitter, '\\')
- case 0x85:
- ok = put(emitter, 'N')
- case 0xA0:
- ok = put(emitter, '_')
- case 0x2028:
- ok = put(emitter, 'L')
- case 0x2029:
- ok = put(emitter, 'P')
- default:
- if v <= 0xFF {
- ok = put(emitter, 'x')
- w = 2
- } else if v <= 0xFFFF {
- ok = put(emitter, 'u')
- w = 4
- } else {
- ok = put(emitter, 'U')
- w = 8
- }
- for k := (w - 1) * 4; ok && k >= 0; k -= 4 {
- digit := byte((v >> uint(k)) & 0x0F)
- if digit < 10 {
- ok = put(emitter, digit+'0')
- } else {
- ok = put(emitter, digit+'A'-10)
- }
- }
- }
- if !ok {
- return false
- }
- spaces = false
- } else if is_space(value, i) {
- if allow_breaks && !spaces && emitter.column > emitter.best_width && i > 0 && i < len(value)-1 {
- if !yaml_emitter_write_indent(emitter) {
- return false
- }
- if is_space(value, i+1) {
- if !put(emitter, '\\') {
- return false
- }
- }
- i += width(value[i])
- } else if !write(emitter, value, &i) {
- return false
- }
- spaces = true
- } else {
- if !write(emitter, value, &i) {
- return false
- }
- spaces = false
- }
- }
- if !yaml_emitter_write_indicator(emitter, []byte{'"'}, false, false, false) {
- return false
- }
- emitter.whitespace = false
- emitter.indention = false
- return true
-}
-
-func yaml_emitter_write_block_scalar_hints(emitter *yaml_emitter_t, value []byte) bool {
- if is_space(value, 0) || is_break(value, 0) {
- indent_hint := []byte{'0' + byte(emitter.best_indent)}
- if !yaml_emitter_write_indicator(emitter, indent_hint, false, false, false) {
- return false
- }
- }
-
- emitter.open_ended = false
-
- var chomp_hint [1]byte
- if len(value) == 0 {
- chomp_hint[0] = '-'
- } else {
- i := len(value) - 1
- for value[i]&0xC0 == 0x80 {
- i--
- }
- if !is_break(value, i) {
- chomp_hint[0] = '-'
- } else if i == 0 {
- chomp_hint[0] = '+'
- emitter.open_ended = true
- } else {
- i--
- for value[i]&0xC0 == 0x80 {
- i--
- }
- if is_break(value, i) {
- chomp_hint[0] = '+'
- emitter.open_ended = true
- }
- }
- }
- if chomp_hint[0] != 0 {
- if !yaml_emitter_write_indicator(emitter, chomp_hint[:], false, false, false) {
- return false
- }
- }
- return true
-}
-
-func yaml_emitter_write_literal_scalar(emitter *yaml_emitter_t, value []byte) bool {
- if !yaml_emitter_write_indicator(emitter, []byte{'|'}, true, false, false) {
- return false
- }
- if !yaml_emitter_write_block_scalar_hints(emitter, value) {
- return false
- }
- if !put_break(emitter) {
- return false
- }
- emitter.indention = true
- emitter.whitespace = true
- breaks := true
- for i := 0; i < len(value); {
- if is_break(value, i) {
- if !write_break(emitter, value, &i) {
- return false
- }
- emitter.indention = true
- breaks = true
- } else {
- if breaks {
- if !yaml_emitter_write_indent(emitter) {
- return false
- }
- }
- if !write(emitter, value, &i) {
- return false
- }
- emitter.indention = false
- breaks = false
- }
- }
-
- return true
-}
-
-func yaml_emitter_write_folded_scalar(emitter *yaml_emitter_t, value []byte) bool {
- if !yaml_emitter_write_indicator(emitter, []byte{'>'}, true, false, false) {
- return false
- }
- if !yaml_emitter_write_block_scalar_hints(emitter, value) {
- return false
- }
-
- if !put_break(emitter) {
- return false
- }
- emitter.indention = true
- emitter.whitespace = true
-
- breaks := true
- leading_spaces := true
- for i := 0; i < len(value); {
- if is_break(value, i) {
- if !breaks && !leading_spaces && value[i] == '\n' {
- k := 0
- for is_break(value, k) {
- k += width(value[k])
- }
- if !is_blankz(value, k) {
- if !put_break(emitter) {
- return false
- }
- }
- }
- if !write_break(emitter, value, &i) {
- return false
- }
- emitter.indention = true
- breaks = true
- } else {
- if breaks {
- if !yaml_emitter_write_indent(emitter) {
- return false
- }
- leading_spaces = is_blank(value, i)
- }
- if !breaks && is_space(value, i) && !is_space(value, i+1) && emitter.column > emitter.best_width {
- if !yaml_emitter_write_indent(emitter) {
- return false
- }
- i += width(value[i])
- } else {
- if !write(emitter, value, &i) {
- return false
- }
- }
- emitter.indention = false
- breaks = false
- }
- }
- return true
-}
diff --git a/Godeps/_workspace/src/github.com/coreos/yaml/encode.go b/Godeps/_workspace/src/github.com/coreos/yaml/encode.go
deleted file mode 100644
index 0b9048d..0000000
--- a/Godeps/_workspace/src/github.com/coreos/yaml/encode.go
+++ /dev/null
@@ -1,265 +0,0 @@
-package yaml
-
-import (
- "reflect"
- "regexp"
- "sort"
- "strconv"
- "strings"
- "time"
-)
-
-type encoder struct {
- emitter yaml_emitter_t
- event yaml_event_t
- out []byte
- flow bool
-}
-
-func newEncoder() (e *encoder) {
- e = &encoder{}
- e.must(yaml_emitter_initialize(&e.emitter))
- yaml_emitter_set_output_string(&e.emitter, &e.out)
- e.must(yaml_stream_start_event_initialize(&e.event, yaml_UTF8_ENCODING))
- e.emit()
- e.must(yaml_document_start_event_initialize(&e.event, nil, nil, true))
- e.emit()
- return e
-}
-
-func (e *encoder) finish() {
- e.must(yaml_document_end_event_initialize(&e.event, true))
- e.emit()
- e.emitter.open_ended = false
- e.must(yaml_stream_end_event_initialize(&e.event))
- e.emit()
-}
-
-func (e *encoder) destroy() {
- yaml_emitter_delete(&e.emitter)
-}
-
-func (e *encoder) emit() {
- // This will internally delete the e.event value.
- if !yaml_emitter_emit(&e.emitter, &e.event) && e.event.typ != yaml_DOCUMENT_END_EVENT && e.event.typ != yaml_STREAM_END_EVENT {
- e.must(false)
- }
-}
-
-func (e *encoder) must(ok bool) {
- if !ok {
- msg := e.emitter.problem
- if msg == "" {
- msg = "Unknown problem generating YAML content"
- }
- fail(msg)
- }
-}
-
-func (e *encoder) marshal(tag string, in reflect.Value) {
- if !in.IsValid() {
- e.nilv()
- return
- }
- var value interface{}
- if getter, ok := in.Interface().(Getter); ok {
- tag, value = getter.GetYAML()
- tag = longTag(tag)
- if value == nil {
- e.nilv()
- return
- }
- in = reflect.ValueOf(value)
- }
- switch in.Kind() {
- case reflect.Interface:
- if in.IsNil() {
- e.nilv()
- } else {
- e.marshal(tag, in.Elem())
- }
- case reflect.Map:
- e.mapv(tag, in)
- case reflect.Ptr:
- if in.IsNil() {
- e.nilv()
- } else {
- e.marshal(tag, in.Elem())
- }
- case reflect.Struct:
- e.structv(tag, in)
- case reflect.Slice:
- e.slicev(tag, in)
- case reflect.String:
- e.stringv(tag, in)
- case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
- if in.Type() == durationType {
- e.stringv(tag, reflect.ValueOf(in.Interface().(time.Duration).String()))
- } else {
- e.intv(tag, in)
- }
- case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
- e.uintv(tag, in)
- case reflect.Float32, reflect.Float64:
- e.floatv(tag, in)
- case reflect.Bool:
- e.boolv(tag, in)
- default:
- panic("Can't marshal type: " + in.Type().String())
- }
-}
-
-func (e *encoder) mapv(tag string, in reflect.Value) {
- e.mappingv(tag, func() {
- keys := keyList(in.MapKeys())
- sort.Sort(keys)
- for _, k := range keys {
- e.marshal("", k)
- e.marshal("", in.MapIndex(k))
- }
- })
-}
-
-func (e *encoder) structv(tag string, in reflect.Value) {
- sinfo, err := getStructInfo(in.Type())
- if err != nil {
- panic(err)
- }
- e.mappingv(tag, func() {
- for _, info := range sinfo.FieldsList {
- var value reflect.Value
- if info.Inline == nil {
- value = in.Field(info.Num)
- } else {
- value = in.FieldByIndex(info.Inline)
- }
- if info.OmitEmpty && isZero(value) {
- continue
- }
- e.marshal("", reflect.ValueOf(info.Key))
- e.flow = info.Flow
- e.marshal("", value)
- }
- })
-}
-
-func (e *encoder) mappingv(tag string, f func()) {
- implicit := tag == ""
- style := yaml_BLOCK_MAPPING_STYLE
- if e.flow {
- e.flow = false
- style = yaml_FLOW_MAPPING_STYLE
- }
- e.must(yaml_mapping_start_event_initialize(&e.event, nil, []byte(tag), implicit, style))
- e.emit()
- f()
- e.must(yaml_mapping_end_event_initialize(&e.event))
- e.emit()
-}
-
-func (e *encoder) slicev(tag string, in reflect.Value) {
- implicit := tag == ""
- style := yaml_BLOCK_SEQUENCE_STYLE
- if e.flow {
- e.flow = false
- style = yaml_FLOW_SEQUENCE_STYLE
- }
- e.must(yaml_sequence_start_event_initialize(&e.event, nil, []byte(tag), implicit, style))
- e.emit()
- n := in.Len()
- for i := 0; i < n; i++ {
- e.marshal("", in.Index(i))
- }
- e.must(yaml_sequence_end_event_initialize(&e.event))
- e.emit()
-}
-
-// isBase60 returns whether s is in base 60 notation as defined in YAML 1.1.
-//
-// The base 60 float notation in YAML 1.1 is a terrible idea and is unsupported
-// in YAML 1.2 and by this package, but these should be marshalled quoted for
-// the time being for compatibility with other parsers.
-func isBase60Float(s string) (result bool) {
- // Fast path.
- if s == "" {
- return false
- }
- c := s[0]
- if !(c == '+' || c == '-' || c >= '0' && c <= '9') || strings.IndexByte(s, ':') < 0 {
- return false
- }
- // Do the full match.
- return base60float.MatchString(s)
-}
-
-// From http://yaml.org/type/float.html, except the regular expression there
-// is bogus. In practice parsers do not enforce the "\.[0-9_]*" suffix.
-var base60float = regexp.MustCompile(`^[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+(?:\.[0-9_]*)?$`)
-
-func (e *encoder) stringv(tag string, in reflect.Value) {
- var style yaml_scalar_style_t
- s := in.String()
- rtag, rs := resolve("", s)
- if rtag == yaml_BINARY_TAG {
- if tag == "" || tag == yaml_STR_TAG {
- tag = rtag
- s = rs.(string)
- } else if tag == yaml_BINARY_TAG {
- fail("explicitly tagged !!binary data must be base64-encoded")
- } else {
- fail("cannot marshal invalid UTF-8 data as " + shortTag(tag))
- }
- }
- if tag == "" && (rtag != yaml_STR_TAG || isBase60Float(s)) {
- style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
- } else if strings.Contains(s, "\n") {
- style = yaml_LITERAL_SCALAR_STYLE
- } else {
- style = yaml_PLAIN_SCALAR_STYLE
- }
- e.emitScalar(s, "", tag, style)
-}
-
-func (e *encoder) boolv(tag string, in reflect.Value) {
- var s string
- if in.Bool() {
- s = "true"
- } else {
- s = "false"
- }
- e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE)
-}
-
-func (e *encoder) intv(tag string, in reflect.Value) {
- s := strconv.FormatInt(in.Int(), 10)
- e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE)
-}
-
-func (e *encoder) uintv(tag string, in reflect.Value) {
- s := strconv.FormatUint(in.Uint(), 10)
- e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE)
-}
-
-func (e *encoder) floatv(tag string, in reflect.Value) {
- // FIXME: Handle 64 bits here.
- s := strconv.FormatFloat(float64(in.Float()), 'g', -1, 32)
- switch s {
- case "+Inf":
- s = ".inf"
- case "-Inf":
- s = "-.inf"
- case "NaN":
- s = ".nan"
- }
- e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE)
-}
-
-func (e *encoder) nilv() {
- e.emitScalar("null", "", "", yaml_PLAIN_SCALAR_STYLE)
-}
-
-func (e *encoder) emitScalar(value, anchor, tag string, style yaml_scalar_style_t) {
- implicit := tag == ""
- e.must(yaml_scalar_event_initialize(&e.event, []byte(anchor), []byte(tag), []byte(value), implicit, implicit, style))
- e.emit()
-}
diff --git a/Godeps/_workspace/src/github.com/coreos/yaml/encode_test.go b/Godeps/_workspace/src/github.com/coreos/yaml/encode_test.go
deleted file mode 100644
index 2cd0ea7..0000000
--- a/Godeps/_workspace/src/github.com/coreos/yaml/encode_test.go
+++ /dev/null
@@ -1,433 +0,0 @@
-package yaml_test
-
-import (
- "fmt"
- "math"
- "strconv"
- "strings"
- "time"
-
- "github.com/coreos/yaml"
- . "gopkg.in/check.v1"
-)
-
-var marshalIntTest = 123
-
-var marshalTests = []struct {
- value interface{}
- data string
-}{
- {
- nil,
- "null\n",
- }, {
- &struct{}{},
- "{}\n",
- }, {
- map[string]string{"v": "hi"},
- "v: hi\n",
- }, {
- map[string]interface{}{"v": "hi"},
- "v: hi\n",
- }, {
- map[string]string{"v": "true"},
- "v: \"true\"\n",
- }, {
- map[string]string{"v": "false"},
- "v: \"false\"\n",
- }, {
- map[string]interface{}{"v": true},
- "v: true\n",
- }, {
- map[string]interface{}{"v": false},
- "v: false\n",
- }, {
- map[string]interface{}{"v": 10},
- "v: 10\n",
- }, {
- map[string]interface{}{"v": -10},
- "v: -10\n",
- }, {
- map[string]uint{"v": 42},
- "v: 42\n",
- }, {
- map[string]interface{}{"v": int64(4294967296)},
- "v: 4294967296\n",
- }, {
- map[string]int64{"v": int64(4294967296)},
- "v: 4294967296\n",
- }, {
- map[string]uint64{"v": 4294967296},
- "v: 4294967296\n",
- }, {
- map[string]interface{}{"v": "10"},
- "v: \"10\"\n",
- }, {
- map[string]interface{}{"v": 0.1},
- "v: 0.1\n",
- }, {
- map[string]interface{}{"v": float64(0.1)},
- "v: 0.1\n",
- }, {
- map[string]interface{}{"v": -0.1},
- "v: -0.1\n",
- }, {
- map[string]interface{}{"v": math.Inf(+1)},
- "v: .inf\n",
- }, {
- map[string]interface{}{"v": math.Inf(-1)},
- "v: -.inf\n",
- }, {
- map[string]interface{}{"v": math.NaN()},
- "v: .nan\n",
- }, {
- map[string]interface{}{"v": nil},
- "v: null\n",
- }, {
- map[string]interface{}{"v": ""},
- "v: \"\"\n",
- }, {
- map[string][]string{"v": []string{"A", "B"}},
- "v:\n- A\n- B\n",
- }, {
- map[string][]string{"v": []string{"A", "B\nC"}},
- "v:\n- A\n- |-\n B\n C\n",
- }, {
- map[string][]interface{}{"v": []interface{}{"A", 1, map[string][]int{"B": []int{2, 3}}}},
- "v:\n- A\n- 1\n- B:\n - 2\n - 3\n",
- }, {
- map[string]interface{}{"a": map[interface{}]interface{}{"b": "c"}},
- "a:\n b: c\n",
- }, {
- map[string]interface{}{"a": "-"},
- "a: '-'\n",
- },
-
- // Simple values.
- {
- &marshalIntTest,
- "123\n",
- },
-
- // Structures
- {
- &struct{ Hello string }{"world"},
- "hello: world\n",
- }, {
- &struct {
- A struct {
- B string
- }
- }{struct{ B string }{"c"}},
- "a:\n b: c\n",
- }, {
- &struct {
- A *struct {
- B string
- }
- }{&struct{ B string }{"c"}},
- "a:\n b: c\n",
- }, {
- &struct {
- A *struct {
- B string
- }
- }{},
- "a: null\n",
- }, {
- &struct{ A int }{1},
- "a: 1\n",
- }, {
- &struct{ A []int }{[]int{1, 2}},
- "a:\n- 1\n- 2\n",
- }, {
- &struct {
- B int "a"
- }{1},
- "a: 1\n",
- }, {
- &struct{ A bool }{true},
- "a: true\n",
- },
-
- // Conditional flag
- {
- &struct {
- A int "a,omitempty"
- B int "b,omitempty"
- }{1, 0},
- "a: 1\n",
- }, {
- &struct {
- A int "a,omitempty"
- B int "b,omitempty"
- }{0, 0},
- "{}\n",
- }, {
- &struct {
- A *struct{ X int } "a,omitempty"
- B int "b,omitempty"
- }{nil, 0},
- "{}\n",
- },
-
- // Flow flag
- {
- &struct {
- A []int "a,flow"
- }{[]int{1, 2}},
- "a: [1, 2]\n",
- }, {
- &struct {
- A map[string]string "a,flow"
- }{map[string]string{"b": "c", "d": "e"}},
- "a: {b: c, d: e}\n",
- }, {
- &struct {
- A struct {
- B, D string
- } "a,flow"
- }{struct{ B, D string }{"c", "e"}},
- "a: {b: c, d: e}\n",
- },
-
- // Unexported field
- {
- &struct {
- u int
- A int
- }{0, 1},
- "a: 1\n",
- },
-
- // Ignored field
- {
- &struct {
- A int
- B int "-"
- }{1, 2},
- "a: 1\n",
- },
-
- // Struct inlining
- {
- &struct {
- A int
- C inlineB `yaml:",inline"`
- }{1, inlineB{2, inlineC{3}}},
- "a: 1\nb: 2\nc: 3\n",
- },
-
- // Duration
- {
- map[string]time.Duration{"a": 3 * time.Second},
- "a: 3s\n",
- },
-
- // Issue #24: bug in map merging logic.
- {
- map[string]string{"a": ""},
- "a: \n",
- },
-
- // Issue #34: marshal unsupported base 60 floats quoted for compatibility
- // with old YAML 1.1 parsers.
- {
- map[string]string{"a": "1:1"},
- "a: \"1:1\"\n",
- },
-
- // Binary data.
- {
- map[string]string{"a": "\x00"},
- "a: \"\\0\"\n",
- }, {
- map[string]string{"a": "\x80\x81\x82"},
- "a: !!binary gIGC\n",
- }, {
- map[string]string{"a": strings.Repeat("\x90", 54)},
- "a: !!binary |\n " + strings.Repeat("kJCQ", 17) + "kJ\n CQ\n",
- }, {
- map[string]interface{}{"a": typeWithGetter{"!!str", "\x80\x81\x82"}},
- "a: !!binary gIGC\n",
- },
-
- // Escaping of tags.
- {
- map[string]interface{}{"a": typeWithGetter{"foo!bar", 1}},
- "a: ! 1\n",
- },
-}
-
-func (s *S) TestMarshal(c *C) {
- for _, item := range marshalTests {
- data, err := yaml.Marshal(item.value)
- c.Assert(err, IsNil)
- c.Assert(string(data), Equals, item.data)
- }
-}
-
-var marshalErrorTests = []struct {
- value interface{}
- error string
- panic string
-}{{
- value: &struct {
- B int
- inlineB ",inline"
- }{1, inlineB{2, inlineC{3}}},
- panic: `Duplicated key 'b' in struct struct \{ B int; .*`,
-}, {
- value: typeWithGetter{"!!binary", "\x80"},
- error: "YAML error: explicitly tagged !!binary data must be base64-encoded",
-}, {
- value: typeWithGetter{"!!float", "\x80"},
- error: `YAML error: cannot marshal invalid UTF-8 data as !!float`,
-}}
-
-func (s *S) TestMarshalErrors(c *C) {
- for _, item := range marshalErrorTests {
- if item.panic != "" {
- c.Assert(func() { yaml.Marshal(item.value) }, PanicMatches, item.panic)
- } else {
- _, err := yaml.Marshal(item.value)
- c.Assert(err, ErrorMatches, item.error)
- }
- }
-}
-
-var marshalTaggedIfaceTest interface{} = &struct{ A string }{"B"}
-
-var getterTests = []struct {
- data, tag string
- value interface{}
-}{
- {"_:\n hi: there\n", "", map[interface{}]interface{}{"hi": "there"}},
- {"_:\n- 1\n- A\n", "", []interface{}{1, "A"}},
- {"_: 10\n", "", 10},
- {"_: null\n", "", nil},
- {"_: !foo BAR!\n", "!foo", "BAR!"},
- {"_: !foo 1\n", "!foo", "1"},
- {"_: !foo '\"1\"'\n", "!foo", "\"1\""},
- {"_: !foo 1.1\n", "!foo", 1.1},
- {"_: !foo 1\n", "!foo", 1},
- {"_: !foo 1\n", "!foo", uint(1)},
- {"_: !foo true\n", "!foo", true},
- {"_: !foo\n- A\n- B\n", "!foo", []string{"A", "B"}},
- {"_: !foo\n A: B\n", "!foo", map[string]string{"A": "B"}},
- {"_: !foo\n a: B\n", "!foo", &marshalTaggedIfaceTest},
-}
-
-func (s *S) TestMarshalTypeCache(c *C) {
- var data []byte
- var err error
- func() {
- type T struct{ A int }
- data, err = yaml.Marshal(&T{})
- c.Assert(err, IsNil)
- }()
- func() {
- type T struct{ B int }
- data, err = yaml.Marshal(&T{})
- c.Assert(err, IsNil)
- }()
- c.Assert(string(data), Equals, "b: 0\n")
-}
-
-type typeWithGetter struct {
- tag string
- value interface{}
-}
-
-func (o typeWithGetter) GetYAML() (tag string, value interface{}) {
- return o.tag, o.value
-}
-
-type typeWithGetterField struct {
- Field typeWithGetter "_"
-}
-
-func (s *S) TestMashalWithGetter(c *C) {
- for _, item := range getterTests {
- obj := &typeWithGetterField{}
- obj.Field.tag = item.tag
- obj.Field.value = item.value
- data, err := yaml.Marshal(obj)
- c.Assert(err, IsNil)
- c.Assert(string(data), Equals, string(item.data))
- }
-}
-
-func (s *S) TestUnmarshalWholeDocumentWithGetter(c *C) {
- obj := &typeWithGetter{}
- obj.tag = ""
- obj.value = map[string]string{"hello": "world!"}
- data, err := yaml.Marshal(obj)
- c.Assert(err, IsNil)
- c.Assert(string(data), Equals, "hello: world!\n")
-}
-
-func (s *S) TestSortedOutput(c *C) {
- order := []interface{}{
- false,
- true,
- 1,
- uint(1),
- 1.0,
- 1.1,
- 1.2,
- 2,
- uint(2),
- 2.0,
- 2.1,
- "",
- ".1",
- ".2",
- ".a",
- "1",
- "2",
- "a!10",
- "a/2",
- "a/10",
- "a~10",
- "ab/1",
- "b/1",
- "b/01",
- "b/2",
- "b/02",
- "b/3",
- "b/03",
- "b1",
- "b01",
- "b3",
- "c2.10",
- "c10.2",
- "d1",
- "d12",
- "d12a",
- }
- m := make(map[interface{}]int)
- for _, k := range order {
- m[k] = 1
- }
- data, err := yaml.Marshal(m)
- c.Assert(err, IsNil)
- out := "\n" + string(data)
- last := 0
- for i, k := range order {
- repr := fmt.Sprint(k)
- if s, ok := k.(string); ok {
- if _, err = strconv.ParseFloat(repr, 32); s == "" || err == nil {
- repr = `"` + repr + `"`
- }
- }
- index := strings.Index(out, "\n"+repr+":")
- if index == -1 {
- c.Fatalf("%#v is not in the output: %#v", k, out)
- }
- if index < last {
- c.Fatalf("%#v was generated before %#v: %q", k, order[i-1], out)
- }
- last = index
- }
-}
diff --git a/Godeps/_workspace/src/github.com/coreos/yaml/parserc.go b/Godeps/_workspace/src/github.com/coreos/yaml/parserc.go
deleted file mode 100644
index 0a7037a..0000000
--- a/Godeps/_workspace/src/github.com/coreos/yaml/parserc.go
+++ /dev/null
@@ -1,1096 +0,0 @@
-package yaml
-
-import (
- "bytes"
-)
-
-// The parser implements the following grammar:
-//
-// stream ::= STREAM-START implicit_document? explicit_document* STREAM-END
-// implicit_document ::= block_node DOCUMENT-END*
-// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
-// block_node_or_indentless_sequence ::=
-// ALIAS
-// | properties (block_content | indentless_block_sequence)?
-// | block_content
-// | indentless_block_sequence
-// block_node ::= ALIAS
-// | properties block_content?
-// | block_content
-// flow_node ::= ALIAS
-// | properties flow_content?
-// | flow_content
-// properties ::= TAG ANCHOR? | ANCHOR TAG?
-// block_content ::= block_collection | flow_collection | SCALAR
-// flow_content ::= flow_collection | SCALAR
-// block_collection ::= block_sequence | block_mapping
-// flow_collection ::= flow_sequence | flow_mapping
-// block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END
-// indentless_sequence ::= (BLOCK-ENTRY block_node?)+
-// block_mapping ::= BLOCK-MAPPING_START
-// ((KEY block_node_or_indentless_sequence?)?
-// (VALUE block_node_or_indentless_sequence?)?)*
-// BLOCK-END
-// flow_sequence ::= FLOW-SEQUENCE-START
-// (flow_sequence_entry FLOW-ENTRY)*
-// flow_sequence_entry?
-// FLOW-SEQUENCE-END
-// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
-// flow_mapping ::= FLOW-MAPPING-START
-// (flow_mapping_entry FLOW-ENTRY)*
-// flow_mapping_entry?
-// FLOW-MAPPING-END
-// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
-
-// Peek the next token in the token queue.
-func peek_token(parser *yaml_parser_t) *yaml_token_t {
- if parser.token_available || yaml_parser_fetch_more_tokens(parser) {
- return &parser.tokens[parser.tokens_head]
- }
- return nil
-}
-
-// Remove the next token from the queue (must be called after peek_token).
-func skip_token(parser *yaml_parser_t) {
- parser.token_available = false
- parser.tokens_parsed++
- parser.stream_end_produced = parser.tokens[parser.tokens_head].typ == yaml_STREAM_END_TOKEN
- parser.tokens_head++
-}
-
-// Get the next event.
-func yaml_parser_parse(parser *yaml_parser_t, event *yaml_event_t) bool {
- // Erase the event object.
- *event = yaml_event_t{}
-
- // No events after the end of the stream or error.
- if parser.stream_end_produced || parser.error != yaml_NO_ERROR || parser.state == yaml_PARSE_END_STATE {
- return true
- }
-
- // Generate the next event.
- return yaml_parser_state_machine(parser, event)
-}
-
-// Set parser error.
-func yaml_parser_set_parser_error(parser *yaml_parser_t, problem string, problem_mark yaml_mark_t) bool {
- parser.error = yaml_PARSER_ERROR
- parser.problem = problem
- parser.problem_mark = problem_mark
- return false
-}
-
-func yaml_parser_set_parser_error_context(parser *yaml_parser_t, context string, context_mark yaml_mark_t, problem string, problem_mark yaml_mark_t) bool {
- parser.error = yaml_PARSER_ERROR
- parser.context = context
- parser.context_mark = context_mark
- parser.problem = problem
- parser.problem_mark = problem_mark
- return false
-}
-
-// State dispatcher.
-func yaml_parser_state_machine(parser *yaml_parser_t, event *yaml_event_t) bool {
- //trace("yaml_parser_state_machine", "state:", parser.state.String())
-
- switch parser.state {
- case yaml_PARSE_STREAM_START_STATE:
- return yaml_parser_parse_stream_start(parser, event)
-
- case yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE:
- return yaml_parser_parse_document_start(parser, event, true)
-
- case yaml_PARSE_DOCUMENT_START_STATE:
- return yaml_parser_parse_document_start(parser, event, false)
-
- case yaml_PARSE_DOCUMENT_CONTENT_STATE:
- return yaml_parser_parse_document_content(parser, event)
-
- case yaml_PARSE_DOCUMENT_END_STATE:
- return yaml_parser_parse_document_end(parser, event)
-
- case yaml_PARSE_BLOCK_NODE_STATE:
- return yaml_parser_parse_node(parser, event, true, false)
-
- case yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE:
- return yaml_parser_parse_node(parser, event, true, true)
-
- case yaml_PARSE_FLOW_NODE_STATE:
- return yaml_parser_parse_node(parser, event, false, false)
-
- case yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE:
- return yaml_parser_parse_block_sequence_entry(parser, event, true)
-
- case yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE:
- return yaml_parser_parse_block_sequence_entry(parser, event, false)
-
- case yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE:
- return yaml_parser_parse_indentless_sequence_entry(parser, event)
-
- case yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE:
- return yaml_parser_parse_block_mapping_key(parser, event, true)
-
- case yaml_PARSE_BLOCK_MAPPING_KEY_STATE:
- return yaml_parser_parse_block_mapping_key(parser, event, false)
-
- case yaml_PARSE_BLOCK_MAPPING_VALUE_STATE:
- return yaml_parser_parse_block_mapping_value(parser, event)
-
- case yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE:
- return yaml_parser_parse_flow_sequence_entry(parser, event, true)
-
- case yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE:
- return yaml_parser_parse_flow_sequence_entry(parser, event, false)
-
- case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE:
- return yaml_parser_parse_flow_sequence_entry_mapping_key(parser, event)
-
- case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE:
- return yaml_parser_parse_flow_sequence_entry_mapping_value(parser, event)
-
- case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE:
- return yaml_parser_parse_flow_sequence_entry_mapping_end(parser, event)
-
- case yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE:
- return yaml_parser_parse_flow_mapping_key(parser, event, true)
-
- case yaml_PARSE_FLOW_MAPPING_KEY_STATE:
- return yaml_parser_parse_flow_mapping_key(parser, event, false)
-
- case yaml_PARSE_FLOW_MAPPING_VALUE_STATE:
- return yaml_parser_parse_flow_mapping_value(parser, event, false)
-
- case yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE:
- return yaml_parser_parse_flow_mapping_value(parser, event, true)
-
- default:
- panic("invalid parser state")
- }
- return false
-}
-
-// Parse the production:
-// stream ::= STREAM-START implicit_document? explicit_document* STREAM-END
-// ************
-func yaml_parser_parse_stream_start(parser *yaml_parser_t, event *yaml_event_t) bool {
- token := peek_token(parser)
- if token == nil {
- return false
- }
- if token.typ != yaml_STREAM_START_TOKEN {
- return yaml_parser_set_parser_error(parser, "did not find expected ", token.start_mark)
- }
- parser.state = yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE
- *event = yaml_event_t{
- typ: yaml_STREAM_START_EVENT,
- start_mark: token.start_mark,
- end_mark: token.end_mark,
- encoding: token.encoding,
- }
- skip_token(parser)
- return true
-}
-
-// Parse the productions:
-// implicit_document ::= block_node DOCUMENT-END*
-// *
-// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
-// *************************
-func yaml_parser_parse_document_start(parser *yaml_parser_t, event *yaml_event_t, implicit bool) bool {
-
- token := peek_token(parser)
- if token == nil {
- return false
- }
-
- // Parse extra document end indicators.
- if !implicit {
- for token.typ == yaml_DOCUMENT_END_TOKEN {
- skip_token(parser)
- token = peek_token(parser)
- if token == nil {
- return false
- }
- }
- }
-
- if implicit && token.typ != yaml_VERSION_DIRECTIVE_TOKEN &&
- token.typ != yaml_TAG_DIRECTIVE_TOKEN &&
- token.typ != yaml_DOCUMENT_START_TOKEN &&
- token.typ != yaml_STREAM_END_TOKEN {
- // Parse an implicit document.
- if !yaml_parser_process_directives(parser, nil, nil) {
- return false
- }
- parser.states = append(parser.states, yaml_PARSE_DOCUMENT_END_STATE)
- parser.state = yaml_PARSE_BLOCK_NODE_STATE
-
- *event = yaml_event_t{
- typ: yaml_DOCUMENT_START_EVENT,
- start_mark: token.start_mark,
- end_mark: token.end_mark,
- }
-
- } else if token.typ != yaml_STREAM_END_TOKEN {
- // Parse an explicit document.
- var version_directive *yaml_version_directive_t
- var tag_directives []yaml_tag_directive_t
- start_mark := token.start_mark
- if !yaml_parser_process_directives(parser, &version_directive, &tag_directives) {
- return false
- }
- token = peek_token(parser)
- if token == nil {
- return false
- }
- if token.typ != yaml_DOCUMENT_START_TOKEN {
- yaml_parser_set_parser_error(parser,
- "did not find expected ", token.start_mark)
- return false
- }
- parser.states = append(parser.states, yaml_PARSE_DOCUMENT_END_STATE)
- parser.state = yaml_PARSE_DOCUMENT_CONTENT_STATE
- end_mark := token.end_mark
-
- *event = yaml_event_t{
- typ: yaml_DOCUMENT_START_EVENT,
- start_mark: start_mark,
- end_mark: end_mark,
- version_directive: version_directive,
- tag_directives: tag_directives,
- implicit: false,
- }
- skip_token(parser)
-
- } else {
- // Parse the stream end.
- parser.state = yaml_PARSE_END_STATE
- *event = yaml_event_t{
- typ: yaml_STREAM_END_EVENT,
- start_mark: token.start_mark,
- end_mark: token.end_mark,
- }
- skip_token(parser)
- }
-
- return true
-}
-
-// Parse the productions:
-// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
-// ***********
-//
-func yaml_parser_parse_document_content(parser *yaml_parser_t, event *yaml_event_t) bool {
- token := peek_token(parser)
- if token == nil {
- return false
- }
- if token.typ == yaml_VERSION_DIRECTIVE_TOKEN ||
- token.typ == yaml_TAG_DIRECTIVE_TOKEN ||
- token.typ == yaml_DOCUMENT_START_TOKEN ||
- token.typ == yaml_DOCUMENT_END_TOKEN ||
- token.typ == yaml_STREAM_END_TOKEN {
- parser.state = parser.states[len(parser.states)-1]
- parser.states = parser.states[:len(parser.states)-1]
- return yaml_parser_process_empty_scalar(parser, event,
- token.start_mark)
- }
- return yaml_parser_parse_node(parser, event, true, false)
-}
-
-// Parse the productions:
-// implicit_document ::= block_node DOCUMENT-END*
-// *************
-// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
-//
-func yaml_parser_parse_document_end(parser *yaml_parser_t, event *yaml_event_t) bool {
- token := peek_token(parser)
- if token == nil {
- return false
- }
-
- start_mark := token.start_mark
- end_mark := token.start_mark
-
- implicit := true
- if token.typ == yaml_DOCUMENT_END_TOKEN {
- end_mark = token.end_mark
- skip_token(parser)
- implicit = false
- }
-
- parser.tag_directives = parser.tag_directives[:0]
-
- parser.state = yaml_PARSE_DOCUMENT_START_STATE
- *event = yaml_event_t{
- typ: yaml_DOCUMENT_END_EVENT,
- start_mark: start_mark,
- end_mark: end_mark,
- implicit: implicit,
- }
- return true
-}
-
-// Parse the productions:
-// block_node_or_indentless_sequence ::=
-// ALIAS
-// *****
-// | properties (block_content | indentless_block_sequence)?
-// ********** *
-// | block_content | indentless_block_sequence
-// *
-// block_node ::= ALIAS
-// *****
-// | properties block_content?
-// ********** *
-// | block_content
-// *
-// flow_node ::= ALIAS
-// *****
-// | properties flow_content?
-// ********** *
-// | flow_content
-// *
-// properties ::= TAG ANCHOR? | ANCHOR TAG?
-// *************************
-// block_content ::= block_collection | flow_collection | SCALAR
-// ******
-// flow_content ::= flow_collection | SCALAR
-// ******
-func yaml_parser_parse_node(parser *yaml_parser_t, event *yaml_event_t, block, indentless_sequence bool) bool {
- //defer trace("yaml_parser_parse_node", "block:", block, "indentless_sequence:", indentless_sequence)()
-
- token := peek_token(parser)
- if token == nil {
- return false
- }
-
- if token.typ == yaml_ALIAS_TOKEN {
- parser.state = parser.states[len(parser.states)-1]
- parser.states = parser.states[:len(parser.states)-1]
- *event = yaml_event_t{
- typ: yaml_ALIAS_EVENT,
- start_mark: token.start_mark,
- end_mark: token.end_mark,
- anchor: token.value,
- }
- skip_token(parser)
- return true
- }
-
- start_mark := token.start_mark
- end_mark := token.start_mark
-
- var tag_token bool
- var tag_handle, tag_suffix, anchor []byte
- var tag_mark yaml_mark_t
- if token.typ == yaml_ANCHOR_TOKEN {
- anchor = token.value
- start_mark = token.start_mark
- end_mark = token.end_mark
- skip_token(parser)
- token = peek_token(parser)
- if token == nil {
- return false
- }
- if token.typ == yaml_TAG_TOKEN {
- tag_token = true
- tag_handle = token.value
- tag_suffix = token.suffix
- tag_mark = token.start_mark
- end_mark = token.end_mark
- skip_token(parser)
- token = peek_token(parser)
- if token == nil {
- return false
- }
- }
- } else if token.typ == yaml_TAG_TOKEN {
- tag_token = true
- tag_handle = token.value
- tag_suffix = token.suffix
- start_mark = token.start_mark
- tag_mark = token.start_mark
- end_mark = token.end_mark
- skip_token(parser)
- token = peek_token(parser)
- if token == nil {
- return false
- }
- if token.typ == yaml_ANCHOR_TOKEN {
- anchor = token.value
- end_mark = token.end_mark
- skip_token(parser)
- token = peek_token(parser)
- if token == nil {
- return false
- }
- }
- }
-
- var tag []byte
- if tag_token {
- if len(tag_handle) == 0 {
- tag = tag_suffix
- tag_suffix = nil
- } else {
- for i := range parser.tag_directives {
- if bytes.Equal(parser.tag_directives[i].handle, tag_handle) {
- tag = append([]byte(nil), parser.tag_directives[i].prefix...)
- tag = append(tag, tag_suffix...)
- break
- }
- }
- if len(tag) == 0 {
- yaml_parser_set_parser_error_context(parser,
- "while parsing a node", start_mark,
- "found undefined tag handle", tag_mark)
- return false
- }
- }
- }
-
- implicit := len(tag) == 0
- if indentless_sequence && token.typ == yaml_BLOCK_ENTRY_TOKEN {
- end_mark = token.end_mark
- parser.state = yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE
- *event = yaml_event_t{
- typ: yaml_SEQUENCE_START_EVENT,
- start_mark: start_mark,
- end_mark: end_mark,
- anchor: anchor,
- tag: tag,
- implicit: implicit,
- style: yaml_style_t(yaml_BLOCK_SEQUENCE_STYLE),
- }
- return true
- }
- if token.typ == yaml_SCALAR_TOKEN {
- var plain_implicit, quoted_implicit bool
- end_mark = token.end_mark
- if (len(tag) == 0 && token.style == yaml_PLAIN_SCALAR_STYLE) || (len(tag) == 1 && tag[0] == '!') {
- plain_implicit = true
- } else if len(tag) == 0 {
- quoted_implicit = true
- }
- parser.state = parser.states[len(parser.states)-1]
- parser.states = parser.states[:len(parser.states)-1]
-
- *event = yaml_event_t{
- typ: yaml_SCALAR_EVENT,
- start_mark: start_mark,
- end_mark: end_mark,
- anchor: anchor,
- tag: tag,
- value: token.value,
- implicit: plain_implicit,
- quoted_implicit: quoted_implicit,
- style: yaml_style_t(token.style),
- }
- skip_token(parser)
- return true
- }
- if token.typ == yaml_FLOW_SEQUENCE_START_TOKEN {
- // [Go] Some of the events below can be merged as they differ only on style.
- end_mark = token.end_mark
- parser.state = yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE
- *event = yaml_event_t{
- typ: yaml_SEQUENCE_START_EVENT,
- start_mark: start_mark,
- end_mark: end_mark,
- anchor: anchor,
- tag: tag,
- implicit: implicit,
- style: yaml_style_t(yaml_FLOW_SEQUENCE_STYLE),
- }
- return true
- }
- if token.typ == yaml_FLOW_MAPPING_START_TOKEN {
- end_mark = token.end_mark
- parser.state = yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE
- *event = yaml_event_t{
- typ: yaml_MAPPING_START_EVENT,
- start_mark: start_mark,
- end_mark: end_mark,
- anchor: anchor,
- tag: tag,
- implicit: implicit,
- style: yaml_style_t(yaml_FLOW_MAPPING_STYLE),
- }
- return true
- }
- if block && token.typ == yaml_BLOCK_SEQUENCE_START_TOKEN {
- end_mark = token.end_mark
- parser.state = yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE
- *event = yaml_event_t{
- typ: yaml_SEQUENCE_START_EVENT,
- start_mark: start_mark,
- end_mark: end_mark,
- anchor: anchor,
- tag: tag,
- implicit: implicit,
- style: yaml_style_t(yaml_BLOCK_SEQUENCE_STYLE),
- }
- return true
- }
- if block && token.typ == yaml_BLOCK_MAPPING_START_TOKEN {
- end_mark = token.end_mark
- parser.state = yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE
- *event = yaml_event_t{
- typ: yaml_MAPPING_START_EVENT,
- start_mark: start_mark,
- end_mark: end_mark,
- anchor: anchor,
- tag: tag,
- implicit: implicit,
- style: yaml_style_t(yaml_BLOCK_MAPPING_STYLE),
- }
- return true
- }
- if len(anchor) > 0 || len(tag) > 0 {
- parser.state = parser.states[len(parser.states)-1]
- parser.states = parser.states[:len(parser.states)-1]
-
- *event = yaml_event_t{
- typ: yaml_SCALAR_EVENT,
- start_mark: start_mark,
- end_mark: end_mark,
- anchor: anchor,
- tag: tag,
- implicit: implicit,
- quoted_implicit: false,
- style: yaml_style_t(yaml_PLAIN_SCALAR_STYLE),
- }
- return true
- }
-
- context := "while parsing a flow node"
- if block {
- context = "while parsing a block node"
- }
- yaml_parser_set_parser_error_context(parser, context, start_mark,
- "did not find expected node content", token.start_mark)
- return false
-}
-
-// Parse the productions:
-// block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END
-// ******************** *********** * *********
-//
-func yaml_parser_parse_block_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool {
- if first {
- token := peek_token(parser)
- parser.marks = append(parser.marks, token.start_mark)
- skip_token(parser)
- }
-
- token := peek_token(parser)
- if token == nil {
- return false
- }
-
- if token.typ == yaml_BLOCK_ENTRY_TOKEN {
- mark := token.end_mark
- skip_token(parser)
- token = peek_token(parser)
- if token == nil {
- return false
- }
- if token.typ != yaml_BLOCK_ENTRY_TOKEN && token.typ != yaml_BLOCK_END_TOKEN {
- parser.states = append(parser.states, yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE)
- return yaml_parser_parse_node(parser, event, true, false)
- } else {
- parser.state = yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE
- return yaml_parser_process_empty_scalar(parser, event, mark)
- }
- }
- if token.typ == yaml_BLOCK_END_TOKEN {
- parser.state = parser.states[len(parser.states)-1]
- parser.states = parser.states[:len(parser.states)-1]
- parser.marks = parser.marks[:len(parser.marks)-1]
-
- *event = yaml_event_t{
- typ: yaml_SEQUENCE_END_EVENT,
- start_mark: token.start_mark,
- end_mark: token.end_mark,
- }
-
- skip_token(parser)
- return true
- }
-
- context_mark := parser.marks[len(parser.marks)-1]
- parser.marks = parser.marks[:len(parser.marks)-1]
- return yaml_parser_set_parser_error_context(parser,
- "while parsing a block collection", context_mark,
- "did not find expected '-' indicator", token.start_mark)
-}
-
-// Parse the productions:
-// indentless_sequence ::= (BLOCK-ENTRY block_node?)+
-// *********** *
-func yaml_parser_parse_indentless_sequence_entry(parser *yaml_parser_t, event *yaml_event_t) bool {
- token := peek_token(parser)
- if token == nil {
- return false
- }
-
- if token.typ == yaml_BLOCK_ENTRY_TOKEN {
- mark := token.end_mark
- skip_token(parser)
- token = peek_token(parser)
- if token == nil {
- return false
- }
- if token.typ != yaml_BLOCK_ENTRY_TOKEN &&
- token.typ != yaml_KEY_TOKEN &&
- token.typ != yaml_VALUE_TOKEN &&
- token.typ != yaml_BLOCK_END_TOKEN {
- parser.states = append(parser.states, yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE)
- return yaml_parser_parse_node(parser, event, true, false)
- }
- parser.state = yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE
- return yaml_parser_process_empty_scalar(parser, event, mark)
- }
- parser.state = parser.states[len(parser.states)-1]
- parser.states = parser.states[:len(parser.states)-1]
-
- *event = yaml_event_t{
- typ: yaml_SEQUENCE_END_EVENT,
- start_mark: token.start_mark,
- end_mark: token.start_mark, // [Go] Shouldn't this be token.end_mark?
- }
- return true
-}
-
-// Parse the productions:
-// block_mapping ::= BLOCK-MAPPING_START
-// *******************
-// ((KEY block_node_or_indentless_sequence?)?
-// *** *
-// (VALUE block_node_or_indentless_sequence?)?)*
-//
-// BLOCK-END
-// *********
-//
-func yaml_parser_parse_block_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool {
- if first {
- token := peek_token(parser)
- parser.marks = append(parser.marks, token.start_mark)
- skip_token(parser)
- }
-
- token := peek_token(parser)
- if token == nil {
- return false
- }
-
- if token.typ == yaml_KEY_TOKEN {
- mark := token.end_mark
- skip_token(parser)
- token = peek_token(parser)
- if token == nil {
- return false
- }
- if token.typ != yaml_KEY_TOKEN &&
- token.typ != yaml_VALUE_TOKEN &&
- token.typ != yaml_BLOCK_END_TOKEN {
- parser.states = append(parser.states, yaml_PARSE_BLOCK_MAPPING_VALUE_STATE)
- return yaml_parser_parse_node(parser, event, true, true)
- } else {
- parser.state = yaml_PARSE_BLOCK_MAPPING_VALUE_STATE
- return yaml_parser_process_empty_scalar(parser, event, mark)
- }
- } else if token.typ == yaml_BLOCK_END_TOKEN {
- parser.state = parser.states[len(parser.states)-1]
- parser.states = parser.states[:len(parser.states)-1]
- parser.marks = parser.marks[:len(parser.marks)-1]
- *event = yaml_event_t{
- typ: yaml_MAPPING_END_EVENT,
- start_mark: token.start_mark,
- end_mark: token.end_mark,
- }
- skip_token(parser)
- return true
- }
-
- context_mark := parser.marks[len(parser.marks)-1]
- parser.marks = parser.marks[:len(parser.marks)-1]
- return yaml_parser_set_parser_error_context(parser,
- "while parsing a block mapping", context_mark,
- "did not find expected key", token.start_mark)
-}
-
-// Parse the productions:
-// block_mapping ::= BLOCK-MAPPING_START
-//
-// ((KEY block_node_or_indentless_sequence?)?
-//
-// (VALUE block_node_or_indentless_sequence?)?)*
-// ***** *
-// BLOCK-END
-//
-//
-func yaml_parser_parse_block_mapping_value(parser *yaml_parser_t, event *yaml_event_t) bool {
- token := peek_token(parser)
- if token == nil {
- return false
- }
- if token.typ == yaml_VALUE_TOKEN {
- mark := token.end_mark
- skip_token(parser)
- token = peek_token(parser)
- if token == nil {
- return false
- }
- if token.typ != yaml_KEY_TOKEN &&
- token.typ != yaml_VALUE_TOKEN &&
- token.typ != yaml_BLOCK_END_TOKEN {
- parser.states = append(parser.states, yaml_PARSE_BLOCK_MAPPING_KEY_STATE)
- return yaml_parser_parse_node(parser, event, true, true)
- }
- parser.state = yaml_PARSE_BLOCK_MAPPING_KEY_STATE
- return yaml_parser_process_empty_scalar(parser, event, mark)
- }
- parser.state = yaml_PARSE_BLOCK_MAPPING_KEY_STATE
- return yaml_parser_process_empty_scalar(parser, event, token.start_mark)
-}
-
-// Parse the productions:
-// flow_sequence ::= FLOW-SEQUENCE-START
-// *******************
-// (flow_sequence_entry FLOW-ENTRY)*
-// * **********
-// flow_sequence_entry?
-// *
-// FLOW-SEQUENCE-END
-// *****************
-// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
-// *
-//
-func yaml_parser_parse_flow_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool {
- if first {
- token := peek_token(parser)
- parser.marks = append(parser.marks, token.start_mark)
- skip_token(parser)
- }
- token := peek_token(parser)
- if token == nil {
- return false
- }
- if token.typ != yaml_FLOW_SEQUENCE_END_TOKEN {
- if !first {
- if token.typ == yaml_FLOW_ENTRY_TOKEN {
- skip_token(parser)
- token = peek_token(parser)
- if token == nil {
- return false
- }
- } else {
- context_mark := parser.marks[len(parser.marks)-1]
- parser.marks = parser.marks[:len(parser.marks)-1]
- return yaml_parser_set_parser_error_context(parser,
- "while parsing a flow sequence", context_mark,
- "did not find expected ',' or ']'", token.start_mark)
- }
- }
-
- if token.typ == yaml_KEY_TOKEN {
- parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE
- *event = yaml_event_t{
- typ: yaml_MAPPING_START_EVENT,
- start_mark: token.start_mark,
- end_mark: token.end_mark,
- implicit: true,
- style: yaml_style_t(yaml_FLOW_MAPPING_STYLE),
- }
- skip_token(parser)
- return true
- } else if token.typ != yaml_FLOW_SEQUENCE_END_TOKEN {
- parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE)
- return yaml_parser_parse_node(parser, event, false, false)
- }
- }
-
- parser.state = parser.states[len(parser.states)-1]
- parser.states = parser.states[:len(parser.states)-1]
- parser.marks = parser.marks[:len(parser.marks)-1]
-
- *event = yaml_event_t{
- typ: yaml_SEQUENCE_END_EVENT,
- start_mark: token.start_mark,
- end_mark: token.end_mark,
- }
-
- skip_token(parser)
- return true
-}
-
-//
-// Parse the productions:
-// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
-// *** *
-//
-func yaml_parser_parse_flow_sequence_entry_mapping_key(parser *yaml_parser_t, event *yaml_event_t) bool {
- token := peek_token(parser)
- if token == nil {
- return false
- }
- if token.typ != yaml_VALUE_TOKEN &&
- token.typ != yaml_FLOW_ENTRY_TOKEN &&
- token.typ != yaml_FLOW_SEQUENCE_END_TOKEN {
- parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE)
- return yaml_parser_parse_node(parser, event, false, false)
- }
- mark := token.end_mark
- skip_token(parser)
- parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE
- return yaml_parser_process_empty_scalar(parser, event, mark)
-}
-
-// Parse the productions:
-// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
-// ***** *
-//
-func yaml_parser_parse_flow_sequence_entry_mapping_value(parser *yaml_parser_t, event *yaml_event_t) bool {
- token := peek_token(parser)
- if token == nil {
- return false
- }
- if token.typ == yaml_VALUE_TOKEN {
- skip_token(parser)
- token := peek_token(parser)
- if token == nil {
- return false
- }
- if token.typ != yaml_FLOW_ENTRY_TOKEN && token.typ != yaml_FLOW_SEQUENCE_END_TOKEN {
- parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE)
- return yaml_parser_parse_node(parser, event, false, false)
- }
- }
- parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE
- return yaml_parser_process_empty_scalar(parser, event, token.start_mark)
-}
-
-// Parse the productions:
-// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
-// *
-//
-func yaml_parser_parse_flow_sequence_entry_mapping_end(parser *yaml_parser_t, event *yaml_event_t) bool {
- token := peek_token(parser)
- if token == nil {
- return false
- }
- parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE
- *event = yaml_event_t{
- typ: yaml_MAPPING_END_EVENT,
- start_mark: token.start_mark,
- end_mark: token.start_mark, // [Go] Shouldn't this be end_mark?
- }
- return true
-}
-
-// Parse the productions:
-// flow_mapping ::= FLOW-MAPPING-START
-// ******************
-// (flow_mapping_entry FLOW-ENTRY)*
-// * **********
-// flow_mapping_entry?
-// ******************
-// FLOW-MAPPING-END
-// ****************
-// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
-// * *** *
-//
-func yaml_parser_parse_flow_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool {
- if first {
- token := peek_token(parser)
- parser.marks = append(parser.marks, token.start_mark)
- skip_token(parser)
- }
-
- token := peek_token(parser)
- if token == nil {
- return false
- }
-
- if token.typ != yaml_FLOW_MAPPING_END_TOKEN {
- if !first {
- if token.typ == yaml_FLOW_ENTRY_TOKEN {
- skip_token(parser)
- token = peek_token(parser)
- if token == nil {
- return false
- }
- } else {
- context_mark := parser.marks[len(parser.marks)-1]
- parser.marks = parser.marks[:len(parser.marks)-1]
- return yaml_parser_set_parser_error_context(parser,
- "while parsing a flow mapping", context_mark,
- "did not find expected ',' or '}'", token.start_mark)
- }
- }
-
- if token.typ == yaml_KEY_TOKEN {
- skip_token(parser)
- token = peek_token(parser)
- if token == nil {
- return false
- }
- if token.typ != yaml_VALUE_TOKEN &&
- token.typ != yaml_FLOW_ENTRY_TOKEN &&
- token.typ != yaml_FLOW_MAPPING_END_TOKEN {
- parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_VALUE_STATE)
- return yaml_parser_parse_node(parser, event, false, false)
- } else {
- parser.state = yaml_PARSE_FLOW_MAPPING_VALUE_STATE
- return yaml_parser_process_empty_scalar(parser, event, token.start_mark)
- }
- } else if token.typ != yaml_FLOW_MAPPING_END_TOKEN {
- parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE)
- return yaml_parser_parse_node(parser, event, false, false)
- }
- }
-
- parser.state = parser.states[len(parser.states)-1]
- parser.states = parser.states[:len(parser.states)-1]
- parser.marks = parser.marks[:len(parser.marks)-1]
- *event = yaml_event_t{
- typ: yaml_MAPPING_END_EVENT,
- start_mark: token.start_mark,
- end_mark: token.end_mark,
- }
- skip_token(parser)
- return true
-}
-
-// Parse the productions:
-// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
-// * ***** *
-//
-func yaml_parser_parse_flow_mapping_value(parser *yaml_parser_t, event *yaml_event_t, empty bool) bool {
- token := peek_token(parser)
- if token == nil {
- return false
- }
- if empty {
- parser.state = yaml_PARSE_FLOW_MAPPING_KEY_STATE
- return yaml_parser_process_empty_scalar(parser, event, token.start_mark)
- }
- if token.typ == yaml_VALUE_TOKEN {
- skip_token(parser)
- token = peek_token(parser)
- if token == nil {
- return false
- }
- if token.typ != yaml_FLOW_ENTRY_TOKEN && token.typ != yaml_FLOW_MAPPING_END_TOKEN {
- parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_KEY_STATE)
- return yaml_parser_parse_node(parser, event, false, false)
- }
- }
- parser.state = yaml_PARSE_FLOW_MAPPING_KEY_STATE
- return yaml_parser_process_empty_scalar(parser, event, token.start_mark)
-}
-
-// Generate an empty scalar event.
-func yaml_parser_process_empty_scalar(parser *yaml_parser_t, event *yaml_event_t, mark yaml_mark_t) bool {
- *event = yaml_event_t{
- typ: yaml_SCALAR_EVENT,
- start_mark: mark,
- end_mark: mark,
- value: nil, // Empty
- implicit: true,
- style: yaml_style_t(yaml_PLAIN_SCALAR_STYLE),
- }
- return true
-}
-
-var default_tag_directives = []yaml_tag_directive_t{
- {[]byte("!"), []byte("!")},
- {[]byte("!!"), []byte("tag:yaml.org,2002:")},
-}
-
-// Parse directives.
-func yaml_parser_process_directives(parser *yaml_parser_t,
- version_directive_ref **yaml_version_directive_t,
- tag_directives_ref *[]yaml_tag_directive_t) bool {
-
- var version_directive *yaml_version_directive_t
- var tag_directives []yaml_tag_directive_t
-
- token := peek_token(parser)
- if token == nil {
- return false
- }
-
- for token.typ == yaml_VERSION_DIRECTIVE_TOKEN || token.typ == yaml_TAG_DIRECTIVE_TOKEN {
- if token.typ == yaml_VERSION_DIRECTIVE_TOKEN {
- if version_directive != nil {
- yaml_parser_set_parser_error(parser,
- "found duplicate %YAML directive", token.start_mark)
- return false
- }
- if token.major != 1 || token.minor != 1 {
- yaml_parser_set_parser_error(parser,
- "found incompatible YAML document", token.start_mark)
- return false
- }
- version_directive = &yaml_version_directive_t{
- major: token.major,
- minor: token.minor,
- }
- } else if token.typ == yaml_TAG_DIRECTIVE_TOKEN {
- value := yaml_tag_directive_t{
- handle: token.value,
- prefix: token.prefix,
- }
- if !yaml_parser_append_tag_directive(parser, value, false, token.start_mark) {
- return false
- }
- tag_directives = append(tag_directives, value)
- }
-
- skip_token(parser)
- token = peek_token(parser)
- if token == nil {
- return false
- }
- }
-
- for i := range default_tag_directives {
- if !yaml_parser_append_tag_directive(parser, default_tag_directives[i], true, token.start_mark) {
- return false
- }
- }
-
- if version_directive_ref != nil {
- *version_directive_ref = version_directive
- }
- if tag_directives_ref != nil {
- *tag_directives_ref = tag_directives
- }
- return true
-}
-
-// Append a tag directive to the directives stack.
-func yaml_parser_append_tag_directive(parser *yaml_parser_t, value yaml_tag_directive_t, allow_duplicates bool, mark yaml_mark_t) bool {
- for i := range parser.tag_directives {
- if bytes.Equal(value.handle, parser.tag_directives[i].handle) {
- if allow_duplicates {
- return true
- }
- return yaml_parser_set_parser_error(parser, "found duplicate %TAG directive", mark)
- }
- }
-
- // [Go] I suspect the copy is unnecessary. This was likely done
- // because there was no way to track ownership of the data.
- value_copy := yaml_tag_directive_t{
- handle: make([]byte, len(value.handle)),
- prefix: make([]byte, len(value.prefix)),
- }
- copy(value_copy.handle, value.handle)
- copy(value_copy.prefix, value.prefix)
- parser.tag_directives = append(parser.tag_directives, value_copy)
- return true
-}
diff --git a/Godeps/_workspace/src/github.com/coreos/yaml/readerc.go b/Godeps/_workspace/src/github.com/coreos/yaml/readerc.go
deleted file mode 100644
index d5fb097..0000000
--- a/Godeps/_workspace/src/github.com/coreos/yaml/readerc.go
+++ /dev/null
@@ -1,391 +0,0 @@
-package yaml
-
-import (
- "io"
-)
-
-// Set the reader error and return 0.
-func yaml_parser_set_reader_error(parser *yaml_parser_t, problem string, offset int, value int) bool {
- parser.error = yaml_READER_ERROR
- parser.problem = problem
- parser.problem_offset = offset
- parser.problem_value = value
- return false
-}
-
-// Byte order marks.
-const (
- bom_UTF8 = "\xef\xbb\xbf"
- bom_UTF16LE = "\xff\xfe"
- bom_UTF16BE = "\xfe\xff"
-)
-
-// Determine the input stream encoding by checking the BOM symbol. If no BOM is
-// found, the UTF-8 encoding is assumed. Return 1 on success, 0 on failure.
-func yaml_parser_determine_encoding(parser *yaml_parser_t) bool {
- // Ensure that we had enough bytes in the raw buffer.
- for !parser.eof && len(parser.raw_buffer)-parser.raw_buffer_pos < 3 {
- if !yaml_parser_update_raw_buffer(parser) {
- return false
- }
- }
-
- // Determine the encoding.
- buf := parser.raw_buffer
- pos := parser.raw_buffer_pos
- avail := len(buf) - pos
- if avail >= 2 && buf[pos] == bom_UTF16LE[0] && buf[pos+1] == bom_UTF16LE[1] {
- parser.encoding = yaml_UTF16LE_ENCODING
- parser.raw_buffer_pos += 2
- parser.offset += 2
- } else if avail >= 2 && buf[pos] == bom_UTF16BE[0] && buf[pos+1] == bom_UTF16BE[1] {
- parser.encoding = yaml_UTF16BE_ENCODING
- parser.raw_buffer_pos += 2
- parser.offset += 2
- } else if avail >= 3 && buf[pos] == bom_UTF8[0] && buf[pos+1] == bom_UTF8[1] && buf[pos+2] == bom_UTF8[2] {
- parser.encoding = yaml_UTF8_ENCODING
- parser.raw_buffer_pos += 3
- parser.offset += 3
- } else {
- parser.encoding = yaml_UTF8_ENCODING
- }
- return true
-}
-
-// Update the raw buffer.
-func yaml_parser_update_raw_buffer(parser *yaml_parser_t) bool {
- size_read := 0
-
- // Return if the raw buffer is full.
- if parser.raw_buffer_pos == 0 && len(parser.raw_buffer) == cap(parser.raw_buffer) {
- return true
- }
-
- // Return on EOF.
- if parser.eof {
- return true
- }
-
- // Move the remaining bytes in the raw buffer to the beginning.
- if parser.raw_buffer_pos > 0 && parser.raw_buffer_pos < len(parser.raw_buffer) {
- copy(parser.raw_buffer, parser.raw_buffer[parser.raw_buffer_pos:])
- }
- parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)-parser.raw_buffer_pos]
- parser.raw_buffer_pos = 0
-
- // Call the read handler to fill the buffer.
- size_read, err := parser.read_handler(parser, parser.raw_buffer[len(parser.raw_buffer):cap(parser.raw_buffer)])
- parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)+size_read]
- if err == io.EOF {
- parser.eof = true
- } else if err != nil {
- return yaml_parser_set_reader_error(parser, "input error: "+err.Error(), parser.offset, -1)
- }
- return true
-}
-
-// Ensure that the buffer contains at least `length` characters.
-// Return true on success, false on failure.
-//
-// The length is supposed to be significantly less that the buffer size.
-func yaml_parser_update_buffer(parser *yaml_parser_t, length int) bool {
- if parser.read_handler == nil {
- panic("read handler must be set")
- }
-
- // If the EOF flag is set and the raw buffer is empty, do nothing.
- if parser.eof && parser.raw_buffer_pos == len(parser.raw_buffer) {
- return true
- }
-
- // Return if the buffer contains enough characters.
- if parser.unread >= length {
- return true
- }
-
- // Determine the input encoding if it is not known yet.
- if parser.encoding == yaml_ANY_ENCODING {
- if !yaml_parser_determine_encoding(parser) {
- return false
- }
- }
-
- // Move the unread characters to the beginning of the buffer.
- buffer_len := len(parser.buffer)
- if parser.buffer_pos > 0 && parser.buffer_pos < buffer_len {
- copy(parser.buffer, parser.buffer[parser.buffer_pos:])
- buffer_len -= parser.buffer_pos
- parser.buffer_pos = 0
- } else if parser.buffer_pos == buffer_len {
- buffer_len = 0
- parser.buffer_pos = 0
- }
-
- // Open the whole buffer for writing, and cut it before returning.
- parser.buffer = parser.buffer[:cap(parser.buffer)]
-
- // Fill the buffer until it has enough characters.
- first := true
- for parser.unread < length {
-
- // Fill the raw buffer if necessary.
- if !first || parser.raw_buffer_pos == len(parser.raw_buffer) {
- if !yaml_parser_update_raw_buffer(parser) {
- parser.buffer = parser.buffer[:buffer_len]
- return false
- }
- }
- first = false
-
- // Decode the raw buffer.
- inner:
- for parser.raw_buffer_pos != len(parser.raw_buffer) {
- var value rune
- var width int
-
- raw_unread := len(parser.raw_buffer) - parser.raw_buffer_pos
-
- // Decode the next character.
- switch parser.encoding {
- case yaml_UTF8_ENCODING:
- // Decode a UTF-8 character. Check RFC 3629
- // (http://www.ietf.org/rfc/rfc3629.txt) for more details.
- //
- // The following table (taken from the RFC) is used for
- // decoding.
- //
- // Char. number range | UTF-8 octet sequence
- // (hexadecimal) | (binary)
- // --------------------+------------------------------------
- // 0000 0000-0000 007F | 0xxxxxxx
- // 0000 0080-0000 07FF | 110xxxxx 10xxxxxx
- // 0000 0800-0000 FFFF | 1110xxxx 10xxxxxx 10xxxxxx
- // 0001 0000-0010 FFFF | 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx
- //
- // Additionally, the characters in the range 0xD800-0xDFFF
- // are prohibited as they are reserved for use with UTF-16
- // surrogate pairs.
-
- // Determine the length of the UTF-8 sequence.
- octet := parser.raw_buffer[parser.raw_buffer_pos]
- switch {
- case octet&0x80 == 0x00:
- width = 1
- case octet&0xE0 == 0xC0:
- width = 2
- case octet&0xF0 == 0xE0:
- width = 3
- case octet&0xF8 == 0xF0:
- width = 4
- default:
- // The leading octet is invalid.
- return yaml_parser_set_reader_error(parser,
- "invalid leading UTF-8 octet",
- parser.offset, int(octet))
- }
-
- // Check if the raw buffer contains an incomplete character.
- if width > raw_unread {
- if parser.eof {
- return yaml_parser_set_reader_error(parser,
- "incomplete UTF-8 octet sequence",
- parser.offset, -1)
- }
- break inner
- }
-
- // Decode the leading octet.
- switch {
- case octet&0x80 == 0x00:
- value = rune(octet & 0x7F)
- case octet&0xE0 == 0xC0:
- value = rune(octet & 0x1F)
- case octet&0xF0 == 0xE0:
- value = rune(octet & 0x0F)
- case octet&0xF8 == 0xF0:
- value = rune(octet & 0x07)
- default:
- value = 0
- }
-
- // Check and decode the trailing octets.
- for k := 1; k < width; k++ {
- octet = parser.raw_buffer[parser.raw_buffer_pos+k]
-
- // Check if the octet is valid.
- if (octet & 0xC0) != 0x80 {
- return yaml_parser_set_reader_error(parser,
- "invalid trailing UTF-8 octet",
- parser.offset+k, int(octet))
- }
-
- // Decode the octet.
- value = (value << 6) + rune(octet&0x3F)
- }
-
- // Check the length of the sequence against the value.
- switch {
- case width == 1:
- case width == 2 && value >= 0x80:
- case width == 3 && value >= 0x800:
- case width == 4 && value >= 0x10000:
- default:
- return yaml_parser_set_reader_error(parser,
- "invalid length of a UTF-8 sequence",
- parser.offset, -1)
- }
-
- // Check the range of the value.
- if value >= 0xD800 && value <= 0xDFFF || value > 0x10FFFF {
- return yaml_parser_set_reader_error(parser,
- "invalid Unicode character",
- parser.offset, int(value))
- }
-
- case yaml_UTF16LE_ENCODING, yaml_UTF16BE_ENCODING:
- var low, high int
- if parser.encoding == yaml_UTF16LE_ENCODING {
- low, high = 0, 1
- } else {
- high, low = 1, 0
- }
-
- // The UTF-16 encoding is not as simple as one might
- // naively think. Check RFC 2781
- // (http://www.ietf.org/rfc/rfc2781.txt).
- //
- // Normally, two subsequent bytes describe a Unicode
- // character. However a special technique (called a
- // surrogate pair) is used for specifying character
- // values larger than 0xFFFF.
- //
- // A surrogate pair consists of two pseudo-characters:
- // high surrogate area (0xD800-0xDBFF)
- // low surrogate area (0xDC00-0xDFFF)
- //
- // The following formulas are used for decoding
- // and encoding characters using surrogate pairs:
- //
- // U = U' + 0x10000 (0x01 00 00 <= U <= 0x10 FF FF)
- // U' = yyyyyyyyyyxxxxxxxxxx (0 <= U' <= 0x0F FF FF)
- // W1 = 110110yyyyyyyyyy
- // W2 = 110111xxxxxxxxxx
- //
- // where U is the character value, W1 is the high surrogate
- // area, W2 is the low surrogate area.
-
- // Check for incomplete UTF-16 character.
- if raw_unread < 2 {
- if parser.eof {
- return yaml_parser_set_reader_error(parser,
- "incomplete UTF-16 character",
- parser.offset, -1)
- }
- break inner
- }
-
- // Get the character.
- value = rune(parser.raw_buffer[parser.raw_buffer_pos+low]) +
- (rune(parser.raw_buffer[parser.raw_buffer_pos+high]) << 8)
-
- // Check for unexpected low surrogate area.
- if value&0xFC00 == 0xDC00 {
- return yaml_parser_set_reader_error(parser,
- "unexpected low surrogate area",
- parser.offset, int(value))
- }
-
- // Check for a high surrogate area.
- if value&0xFC00 == 0xD800 {
- width = 4
-
- // Check for incomplete surrogate pair.
- if raw_unread < 4 {
- if parser.eof {
- return yaml_parser_set_reader_error(parser,
- "incomplete UTF-16 surrogate pair",
- parser.offset, -1)
- }
- break inner
- }
-
- // Get the next character.
- value2 := rune(parser.raw_buffer[parser.raw_buffer_pos+low+2]) +
- (rune(parser.raw_buffer[parser.raw_buffer_pos+high+2]) << 8)
-
- // Check for a low surrogate area.
- if value2&0xFC00 != 0xDC00 {
- return yaml_parser_set_reader_error(parser,
- "expected low surrogate area",
- parser.offset+2, int(value2))
- }
-
- // Generate the value of the surrogate pair.
- value = 0x10000 + ((value & 0x3FF) << 10) + (value2 & 0x3FF)
- } else {
- width = 2
- }
-
- default:
- panic("impossible")
- }
-
- // Check if the character is in the allowed range:
- // #x9 | #xA | #xD | [#x20-#x7E] (8 bit)
- // | #x85 | [#xA0-#xD7FF] | [#xE000-#xFFFD] (16 bit)
- // | [#x10000-#x10FFFF] (32 bit)
- switch {
- case value == 0x09:
- case value == 0x0A:
- case value == 0x0D:
- case value >= 0x20 && value <= 0x7E:
- case value == 0x85:
- case value >= 0xA0 && value <= 0xD7FF:
- case value >= 0xE000 && value <= 0xFFFD:
- case value >= 0x10000 && value <= 0x10FFFF:
- default:
- return yaml_parser_set_reader_error(parser,
- "control characters are not allowed",
- parser.offset, int(value))
- }
-
- // Move the raw pointers.
- parser.raw_buffer_pos += width
- parser.offset += width
-
- // Finally put the character into the buffer.
- if value <= 0x7F {
- // 0000 0000-0000 007F . 0xxxxxxx
- parser.buffer[buffer_len+0] = byte(value)
- } else if value <= 0x7FF {
- // 0000 0080-0000 07FF . 110xxxxx 10xxxxxx
- parser.buffer[buffer_len+0] = byte(0xC0 + (value >> 6))
- parser.buffer[buffer_len+1] = byte(0x80 + (value & 0x3F))
- } else if value <= 0xFFFF {
- // 0000 0800-0000 FFFF . 1110xxxx 10xxxxxx 10xxxxxx
- parser.buffer[buffer_len+0] = byte(0xE0 + (value >> 12))
- parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 6) & 0x3F))
- parser.buffer[buffer_len+2] = byte(0x80 + (value & 0x3F))
- } else {
- // 0001 0000-0010 FFFF . 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx
- parser.buffer[buffer_len+0] = byte(0xF0 + (value >> 18))
- parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 12) & 0x3F))
- parser.buffer[buffer_len+2] = byte(0x80 + ((value >> 6) & 0x3F))
- parser.buffer[buffer_len+3] = byte(0x80 + (value & 0x3F))
- }
- buffer_len += width
-
- parser.unread++
- }
-
- // On EOF, put NUL into the buffer and return.
- if parser.eof {
- parser.buffer[buffer_len] = 0
- buffer_len++
- parser.unread++
- break
- }
- }
- parser.buffer = parser.buffer[:buffer_len]
- return true
-}
diff --git a/Godeps/_workspace/src/github.com/coreos/yaml/resolve.go b/Godeps/_workspace/src/github.com/coreos/yaml/resolve.go
deleted file mode 100644
index 06c698a..0000000
--- a/Godeps/_workspace/src/github.com/coreos/yaml/resolve.go
+++ /dev/null
@@ -1,190 +0,0 @@
-package yaml
-
-import (
- "encoding/base64"
- "fmt"
- "math"
- "strconv"
- "strings"
- "unicode/utf8"
-)
-
-// TODO: merge, timestamps, base 60 floats, omap.
-
-type resolveMapItem struct {
- value interface{}
- tag string
-}
-
-var resolveTable = make([]byte, 256)
-var resolveMap = make(map[string]resolveMapItem)
-
-func init() {
- t := resolveTable
- t[int('+')] = 'S' // Sign
- t[int('-')] = 'S'
- for _, c := range "0123456789" {
- t[int(c)] = 'D' // Digit
- }
- for _, c := range "yYnNtTfFoO~" {
- t[int(c)] = 'M' // In map
- }
- t[int('.')] = '.' // Float (potentially in map)
-
- var resolveMapList = []struct {
- v interface{}
- tag string
- l []string
- }{
- {true, yaml_BOOL_TAG, []string{"y", "Y", "yes", "Yes", "YES"}},
- {true, yaml_BOOL_TAG, []string{"true", "True", "TRUE"}},
- {true, yaml_BOOL_TAG, []string{"on", "On", "ON"}},
- {false, yaml_BOOL_TAG, []string{"n", "N", "no", "No", "NO"}},
- {false, yaml_BOOL_TAG, []string{"false", "False", "FALSE"}},
- {false, yaml_BOOL_TAG, []string{"off", "Off", "OFF"}},
- {nil, yaml_NULL_TAG, []string{"", "~", "null", "Null", "NULL"}},
- {math.NaN(), yaml_FLOAT_TAG, []string{".nan", ".NaN", ".NAN"}},
- {math.Inf(+1), yaml_FLOAT_TAG, []string{".inf", ".Inf", ".INF"}},
- {math.Inf(+1), yaml_FLOAT_TAG, []string{"+.inf", "+.Inf", "+.INF"}},
- {math.Inf(-1), yaml_FLOAT_TAG, []string{"-.inf", "-.Inf", "-.INF"}},
- {"<<", yaml_MERGE_TAG, []string{"<<"}},
- }
-
- m := resolveMap
- for _, item := range resolveMapList {
- for _, s := range item.l {
- m[s] = resolveMapItem{item.v, item.tag}
- }
- }
-}
-
-const longTagPrefix = "tag:yaml.org,2002:"
-
-func shortTag(tag string) string {
- // TODO This can easily be made faster and produce less garbage.
- if strings.HasPrefix(tag, longTagPrefix) {
- return "!!" + tag[len(longTagPrefix):]
- }
- return tag
-}
-
-func longTag(tag string) string {
- if strings.HasPrefix(tag, "!!") {
- return longTagPrefix + tag[2:]
- }
- return tag
-}
-
-func resolvableTag(tag string) bool {
- switch tag {
- case "", yaml_STR_TAG, yaml_BOOL_TAG, yaml_INT_TAG, yaml_FLOAT_TAG, yaml_NULL_TAG:
- return true
- }
- return false
-}
-
-func resolve(tag string, in string) (rtag string, out interface{}) {
- if !resolvableTag(tag) {
- return tag, in
- }
-
- defer func() {
- switch tag {
- case "", rtag, yaml_STR_TAG, yaml_BINARY_TAG:
- return
- }
- fail(fmt.Sprintf("cannot decode %s `%s` as a %s", shortTag(rtag), in, shortTag(tag)))
- }()
-
- // Any data is accepted as a !!str or !!binary.
- // Otherwise, the prefix is enough of a hint about what it might be.
- hint := byte('N')
- if in != "" {
- hint = resolveTable[in[0]]
- }
- if hint != 0 && tag != yaml_STR_TAG && tag != yaml_BINARY_TAG {
- // Handle things we can lookup in a map.
- if item, ok := resolveMap[in]; ok {
- return item.tag, item.value
- }
-
- // Base 60 floats are a bad idea, were dropped in YAML 1.2, and
- // are purposefully unsupported here. They're still quoted on
- // the way out for compatibility with other parser, though.
-
- switch hint {
- case 'M':
- // We've already checked the map above.
-
- case '.':
- // Not in the map, so maybe a normal float.
- floatv, err := strconv.ParseFloat(in, 64)
- if err == nil {
- return yaml_FLOAT_TAG, floatv
- }
-
- case 'D', 'S':
- // Int, float, or timestamp.
- plain := strings.Replace(in, "_", "", -1)
- intv, err := strconv.ParseInt(plain, 0, 64)
- if err == nil {
- if intv == int64(int(intv)) {
- return yaml_INT_TAG, int(intv)
- } else {
- return yaml_INT_TAG, intv
- }
- }
- floatv, err := strconv.ParseFloat(plain, 64)
- if err == nil {
- return yaml_FLOAT_TAG, floatv
- }
- if strings.HasPrefix(plain, "0b") {
- intv, err := strconv.ParseInt(plain[2:], 2, 64)
- if err == nil {
- return yaml_INT_TAG, int(intv)
- }
- } else if strings.HasPrefix(plain, "-0b") {
- intv, err := strconv.ParseInt(plain[3:], 2, 64)
- if err == nil {
- return yaml_INT_TAG, -int(intv)
- }
- }
- // XXX Handle timestamps here.
-
- default:
- panic("resolveTable item not yet handled: " + string(rune(hint)) + " (with " + in + ")")
- }
- }
- if tag == yaml_BINARY_TAG {
- return yaml_BINARY_TAG, in
- }
- if utf8.ValidString(in) {
- return yaml_STR_TAG, in
- }
- return yaml_BINARY_TAG, encodeBase64(in)
-}
-
-// encodeBase64 encodes s as base64 that is broken up into multiple lines
-// as appropriate for the resulting length.
-func encodeBase64(s string) string {
- const lineLen = 70
- encLen := base64.StdEncoding.EncodedLen(len(s))
- lines := encLen/lineLen + 1
- buf := make([]byte, encLen*2+lines)
- in := buf[0:encLen]
- out := buf[encLen:]
- base64.StdEncoding.Encode(in, []byte(s))
- k := 0
- for i := 0; i < len(in); i += lineLen {
- j := i + lineLen
- if j > len(in) {
- j = len(in)
- }
- k += copy(out[k:], in[i:j])
- if lines > 1 {
- out[k] = '\n'
- k++
- }
- }
- return string(out[:k])
-}
diff --git a/Godeps/_workspace/src/github.com/coreos/yaml/scannerc.go b/Godeps/_workspace/src/github.com/coreos/yaml/scannerc.go
deleted file mode 100644
index fe93b19..0000000
--- a/Godeps/_workspace/src/github.com/coreos/yaml/scannerc.go
+++ /dev/null
@@ -1,2710 +0,0 @@
-package yaml
-
-import (
- "bytes"
- "fmt"
-)
-
-// Introduction
-// ************
-//
-// The following notes assume that you are familiar with the YAML specification
-// (http://yaml.org/spec/cvs/current.html). We mostly follow it, although in
-// some cases we are less restrictive that it requires.
-//
-// The process of transforming a YAML stream into a sequence of events is
-// divided on two steps: Scanning and Parsing.
-//
-// The Scanner transforms the input stream into a sequence of tokens, while the
-// parser transform the sequence of tokens produced by the Scanner into a
-// sequence of parsing events.
-//
-// The Scanner is rather clever and complicated. The Parser, on the contrary,
-// is a straightforward implementation of a recursive-descendant parser (or,
-// LL(1) parser, as it is usually called).
-//
-// Actually there are two issues of Scanning that might be called "clever", the
-// rest is quite straightforward. The issues are "block collection start" and
-// "simple keys". Both issues are explained below in details.
-//
-// Here the Scanning step is explained and implemented. We start with the list
-// of all the tokens produced by the Scanner together with short descriptions.
-//
-// Now, tokens:
-//
-// STREAM-START(encoding) # The stream start.
-// STREAM-END # The stream end.
-// VERSION-DIRECTIVE(major,minor) # The '%YAML' directive.
-// TAG-DIRECTIVE(handle,prefix) # The '%TAG' directive.
-// DOCUMENT-START # '---'
-// DOCUMENT-END # '...'
-// BLOCK-SEQUENCE-START # Indentation increase denoting a block
-// BLOCK-MAPPING-START # sequence or a block mapping.
-// BLOCK-END # Indentation decrease.
-// FLOW-SEQUENCE-START # '['
-// FLOW-SEQUENCE-END # ']'
-// BLOCK-SEQUENCE-START # '{'
-// BLOCK-SEQUENCE-END # '}'
-// BLOCK-ENTRY # '-'
-// FLOW-ENTRY # ','
-// KEY # '?' or nothing (simple keys).
-// VALUE # ':'
-// ALIAS(anchor) # '*anchor'
-// ANCHOR(anchor) # '&anchor'
-// TAG(handle,suffix) # '!handle!suffix'
-// SCALAR(value,style) # A scalar.
-//
-// The following two tokens are "virtual" tokens denoting the beginning and the
-// end of the stream:
-//
-// STREAM-START(encoding)
-// STREAM-END
-//
-// We pass the information about the input stream encoding with the
-// STREAM-START token.
-//
-// The next two tokens are responsible for tags:
-//
-// VERSION-DIRECTIVE(major,minor)
-// TAG-DIRECTIVE(handle,prefix)
-//
-// Example:
-//
-// %YAML 1.1
-// %TAG ! !foo
-// %TAG !yaml! tag:yaml.org,2002:
-// ---
-//
-// The correspoding sequence of tokens:
-//
-// STREAM-START(utf-8)
-// VERSION-DIRECTIVE(1,1)
-// TAG-DIRECTIVE("!","!foo")
-// TAG-DIRECTIVE("!yaml","tag:yaml.org,2002:")
-// DOCUMENT-START
-// STREAM-END
-//
-// Note that the VERSION-DIRECTIVE and TAG-DIRECTIVE tokens occupy a whole
-// line.
-//
-// The document start and end indicators are represented by:
-//
-// DOCUMENT-START
-// DOCUMENT-END
-//
-// Note that if a YAML stream contains an implicit document (without '---'
-// and '...' indicators), no DOCUMENT-START and DOCUMENT-END tokens will be
-// produced.
-//
-// In the following examples, we present whole documents together with the
-// produced tokens.
-//
-// 1. An implicit document:
-//
-// 'a scalar'
-//
-// Tokens:
-//
-// STREAM-START(utf-8)
-// SCALAR("a scalar",single-quoted)
-// STREAM-END
-//
-// 2. An explicit document:
-//
-// ---
-// 'a scalar'
-// ...
-//
-// Tokens:
-//
-// STREAM-START(utf-8)
-// DOCUMENT-START
-// SCALAR("a scalar",single-quoted)
-// DOCUMENT-END
-// STREAM-END
-//
-// 3. Several documents in a stream:
-//
-// 'a scalar'
-// ---
-// 'another scalar'
-// ---
-// 'yet another scalar'
-//
-// Tokens:
-//
-// STREAM-START(utf-8)
-// SCALAR("a scalar",single-quoted)
-// DOCUMENT-START
-// SCALAR("another scalar",single-quoted)
-// DOCUMENT-START
-// SCALAR("yet another scalar",single-quoted)
-// STREAM-END
-//
-// We have already introduced the SCALAR token above. The following tokens are
-// used to describe aliases, anchors, tag, and scalars:
-//
-// ALIAS(anchor)
-// ANCHOR(anchor)
-// TAG(handle,suffix)
-// SCALAR(value,style)
-//
-// The following series of examples illustrate the usage of these tokens:
-//
-// 1. A recursive sequence:
-//
-// &A [ *A ]
-//
-// Tokens:
-//
-// STREAM-START(utf-8)
-// ANCHOR("A")
-// FLOW-SEQUENCE-START
-// ALIAS("A")
-// FLOW-SEQUENCE-END
-// STREAM-END
-//
-// 2. A tagged scalar:
-//
-// !!float "3.14" # A good approximation.
-//
-// Tokens:
-//
-// STREAM-START(utf-8)
-// TAG("!!","float")
-// SCALAR("3.14",double-quoted)
-// STREAM-END
-//
-// 3. Various scalar styles:
-//
-// --- # Implicit empty plain scalars do not produce tokens.
-// --- a plain scalar
-// --- 'a single-quoted scalar'
-// --- "a double-quoted scalar"
-// --- |-
-// a literal scalar
-// --- >-
-// a folded
-// scalar
-//
-// Tokens:
-//
-// STREAM-START(utf-8)
-// DOCUMENT-START
-// DOCUMENT-START
-// SCALAR("a plain scalar",plain)
-// DOCUMENT-START
-// SCALAR("a single-quoted scalar",single-quoted)
-// DOCUMENT-START
-// SCALAR("a double-quoted scalar",double-quoted)
-// DOCUMENT-START
-// SCALAR("a literal scalar",literal)
-// DOCUMENT-START
-// SCALAR("a folded scalar",folded)
-// STREAM-END
-//
-// Now it's time to review collection-related tokens. We will start with
-// flow collections:
-//
-// FLOW-SEQUENCE-START
-// FLOW-SEQUENCE-END
-// FLOW-MAPPING-START
-// FLOW-MAPPING-END
-// FLOW-ENTRY
-// KEY
-// VALUE
-//
-// The tokens FLOW-SEQUENCE-START, FLOW-SEQUENCE-END, FLOW-MAPPING-START, and
-// FLOW-MAPPING-END represent the indicators '[', ']', '{', and '}'
-// correspondingly. FLOW-ENTRY represent the ',' indicator. Finally the
-// indicators '?' and ':', which are used for denoting mapping keys and values,
-// are represented by the KEY and VALUE tokens.
-//
-// The following examples show flow collections:
-//
-// 1. A flow sequence:
-//
-// [item 1, item 2, item 3]
-//
-// Tokens:
-//
-// STREAM-START(utf-8)
-// FLOW-SEQUENCE-START
-// SCALAR("item 1",plain)
-// FLOW-ENTRY
-// SCALAR("item 2",plain)
-// FLOW-ENTRY
-// SCALAR("item 3",plain)
-// FLOW-SEQUENCE-END
-// STREAM-END
-//
-// 2. A flow mapping:
-//
-// {
-// a simple key: a value, # Note that the KEY token is produced.
-// ? a complex key: another value,
-// }
-//
-// Tokens:
-//
-// STREAM-START(utf-8)
-// FLOW-MAPPING-START
-// KEY
-// SCALAR("a simple key",plain)
-// VALUE
-// SCALAR("a value",plain)
-// FLOW-ENTRY
-// KEY
-// SCALAR("a complex key",plain)
-// VALUE
-// SCALAR("another value",plain)
-// FLOW-ENTRY
-// FLOW-MAPPING-END
-// STREAM-END
-//
-// A simple key is a key which is not denoted by the '?' indicator. Note that
-// the Scanner still produce the KEY token whenever it encounters a simple key.
-//
-// For scanning block collections, the following tokens are used (note that we
-// repeat KEY and VALUE here):
-//
-// BLOCK-SEQUENCE-START
-// BLOCK-MAPPING-START
-// BLOCK-END
-// BLOCK-ENTRY
-// KEY
-// VALUE
-//
-// The tokens BLOCK-SEQUENCE-START and BLOCK-MAPPING-START denote indentation
-// increase that precedes a block collection (cf. the INDENT token in Python).
-// The token BLOCK-END denote indentation decrease that ends a block collection
-// (cf. the DEDENT token in Python). However YAML has some syntax pecularities
-// that makes detections of these tokens more complex.
-//
-// The tokens BLOCK-ENTRY, KEY, and VALUE are used to represent the indicators
-// '-', '?', and ':' correspondingly.
-//
-// The following examples show how the tokens BLOCK-SEQUENCE-START,
-// BLOCK-MAPPING-START, and BLOCK-END are emitted by the Scanner:
-//
-// 1. Block sequences:
-//
-// - item 1
-// - item 2
-// -
-// - item 3.1
-// - item 3.2
-// -
-// key 1: value 1
-// key 2: value 2
-//
-// Tokens:
-//
-// STREAM-START(utf-8)
-// BLOCK-SEQUENCE-START
-// BLOCK-ENTRY
-// SCALAR("item 1",plain)
-// BLOCK-ENTRY
-// SCALAR("item 2",plain)
-// BLOCK-ENTRY
-// BLOCK-SEQUENCE-START
-// BLOCK-ENTRY
-// SCALAR("item 3.1",plain)
-// BLOCK-ENTRY
-// SCALAR("item 3.2",plain)
-// BLOCK-END
-// BLOCK-ENTRY
-// BLOCK-MAPPING-START
-// KEY
-// SCALAR("key 1",plain)
-// VALUE
-// SCALAR("value 1",plain)
-// KEY
-// SCALAR("key 2",plain)
-// VALUE
-// SCALAR("value 2",plain)
-// BLOCK-END
-// BLOCK-END
-// STREAM-END
-//
-// 2. Block mappings:
-//
-// a simple key: a value # The KEY token is produced here.
-// ? a complex key
-// : another value
-// a mapping:
-// key 1: value 1
-// key 2: value 2
-// a sequence:
-// - item 1
-// - item 2
-//
-// Tokens:
-//
-// STREAM-START(utf-8)
-// BLOCK-MAPPING-START
-// KEY
-// SCALAR("a simple key",plain)
-// VALUE
-// SCALAR("a value",plain)
-// KEY
-// SCALAR("a complex key",plain)
-// VALUE
-// SCALAR("another value",plain)
-// KEY
-// SCALAR("a mapping",plain)
-// BLOCK-MAPPING-START
-// KEY
-// SCALAR("key 1",plain)
-// VALUE
-// SCALAR("value 1",plain)
-// KEY
-// SCALAR("key 2",plain)
-// VALUE
-// SCALAR("value 2",plain)
-// BLOCK-END
-// KEY
-// SCALAR("a sequence",plain)
-// VALUE
-// BLOCK-SEQUENCE-START
-// BLOCK-ENTRY
-// SCALAR("item 1",plain)
-// BLOCK-ENTRY
-// SCALAR("item 2",plain)
-// BLOCK-END
-// BLOCK-END
-// STREAM-END
-//
-// YAML does not always require to start a new block collection from a new
-// line. If the current line contains only '-', '?', and ':' indicators, a new
-// block collection may start at the current line. The following examples
-// illustrate this case:
-//
-// 1. Collections in a sequence:
-//
-// - - item 1
-// - item 2
-// - key 1: value 1
-// key 2: value 2
-// - ? complex key
-// : complex value
-//
-// Tokens:
-//
-// STREAM-START(utf-8)
-// BLOCK-SEQUENCE-START
-// BLOCK-ENTRY
-// BLOCK-SEQUENCE-START
-// BLOCK-ENTRY
-// SCALAR("item 1",plain)
-// BLOCK-ENTRY
-// SCALAR("item 2",plain)
-// BLOCK-END
-// BLOCK-ENTRY
-// BLOCK-MAPPING-START
-// KEY
-// SCALAR("key 1",plain)
-// VALUE
-// SCALAR("value 1",plain)
-// KEY
-// SCALAR("key 2",plain)
-// VALUE
-// SCALAR("value 2",plain)
-// BLOCK-END
-// BLOCK-ENTRY
-// BLOCK-MAPPING-START
-// KEY
-// SCALAR("complex key")
-// VALUE
-// SCALAR("complex value")
-// BLOCK-END
-// BLOCK-END
-// STREAM-END
-//
-// 2. Collections in a mapping:
-//
-// ? a sequence
-// : - item 1
-// - item 2
-// ? a mapping
-// : key 1: value 1
-// key 2: value 2
-//
-// Tokens:
-//
-// STREAM-START(utf-8)
-// BLOCK-MAPPING-START
-// KEY
-// SCALAR("a sequence",plain)
-// VALUE
-// BLOCK-SEQUENCE-START
-// BLOCK-ENTRY
-// SCALAR("item 1",plain)
-// BLOCK-ENTRY
-// SCALAR("item 2",plain)
-// BLOCK-END
-// KEY
-// SCALAR("a mapping",plain)
-// VALUE
-// BLOCK-MAPPING-START
-// KEY
-// SCALAR("key 1",plain)
-// VALUE
-// SCALAR("value 1",plain)
-// KEY
-// SCALAR("key 2",plain)
-// VALUE
-// SCALAR("value 2",plain)
-// BLOCK-END
-// BLOCK-END
-// STREAM-END
-//
-// YAML also permits non-indented sequences if they are included into a block
-// mapping. In this case, the token BLOCK-SEQUENCE-START is not produced:
-//
-// key:
-// - item 1 # BLOCK-SEQUENCE-START is NOT produced here.
-// - item 2
-//
-// Tokens:
-//
-// STREAM-START(utf-8)
-// BLOCK-MAPPING-START
-// KEY
-// SCALAR("key",plain)
-// VALUE
-// BLOCK-ENTRY
-// SCALAR("item 1",plain)
-// BLOCK-ENTRY
-// SCALAR("item 2",plain)
-// BLOCK-END
-//
-
-// Ensure that the buffer contains the required number of characters.
-// Return true on success, false on failure (reader error or memory error).
-func cache(parser *yaml_parser_t, length int) bool {
- // [Go] This was inlined: !cache(A, B) -> unread < B && !update(A, B)
- return parser.unread >= length || yaml_parser_update_buffer(parser, length)
-}
-
-// Advance the buffer pointer.
-func skip(parser *yaml_parser_t) {
- parser.mark.index++
- parser.mark.column++
- parser.unread--
- parser.buffer_pos += width(parser.buffer[parser.buffer_pos])
-}
-
-func skip_line(parser *yaml_parser_t) {
- if is_crlf(parser.buffer, parser.buffer_pos) {
- parser.mark.index += 2
- parser.mark.column = 0
- parser.mark.line++
- parser.unread -= 2
- parser.buffer_pos += 2
- } else if is_break(parser.buffer, parser.buffer_pos) {
- parser.mark.index++
- parser.mark.column = 0
- parser.mark.line++
- parser.unread--
- parser.buffer_pos += width(parser.buffer[parser.buffer_pos])
- }
-}
-
-// Copy a character to a string buffer and advance pointers.
-func read(parser *yaml_parser_t, s []byte) []byte {
- w := width(parser.buffer[parser.buffer_pos])
- if w == 0 {
- panic("invalid character sequence")
- }
- if len(s) == 0 {
- s = make([]byte, 0, 32)
- }
- if w == 1 && len(s)+w <= cap(s) {
- s = s[:len(s)+1]
- s[len(s)-1] = parser.buffer[parser.buffer_pos]
- parser.buffer_pos++
- } else {
- s = append(s, parser.buffer[parser.buffer_pos:parser.buffer_pos+w]...)
- parser.buffer_pos += w
- }
- parser.mark.index++
- parser.mark.column++
- parser.unread--
- return s
-}
-
-// Copy a line break character to a string buffer and advance pointers.
-func read_line(parser *yaml_parser_t, s []byte) []byte {
- buf := parser.buffer
- pos := parser.buffer_pos
- switch {
- case buf[pos] == '\r' && buf[pos+1] == '\n':
- // CR LF . LF
- s = append(s, '\n')
- parser.buffer_pos += 2
- parser.mark.index++
- parser.unread--
- case buf[pos] == '\r' || buf[pos] == '\n':
- // CR|LF . LF
- s = append(s, '\n')
- parser.buffer_pos += 1
- case buf[pos] == '\xC2' && buf[pos+1] == '\x85':
- // NEL . LF
- s = append(s, '\n')
- parser.buffer_pos += 2
- case buf[pos] == '\xE2' && buf[pos+1] == '\x80' && (buf[pos+2] == '\xA8' || buf[pos+2] == '\xA9'):
- // LS|PS . LS|PS
- s = append(s, buf[parser.buffer_pos:pos+3]...)
- parser.buffer_pos += 3
- default:
- return s
- }
- parser.mark.index++
- parser.mark.column = 0
- parser.mark.line++
- parser.unread--
- return s
-}
-
-// Get the next token.
-func yaml_parser_scan(parser *yaml_parser_t, token *yaml_token_t) bool {
- // Erase the token object.
- *token = yaml_token_t{} // [Go] Is this necessary?
-
- // No tokens after STREAM-END or error.
- if parser.stream_end_produced || parser.error != yaml_NO_ERROR {
- return true
- }
-
- // Ensure that the tokens queue contains enough tokens.
- if !parser.token_available {
- if !yaml_parser_fetch_more_tokens(parser) {
- return false
- }
- }
-
- // Fetch the next token from the queue.
- *token = parser.tokens[parser.tokens_head]
- parser.tokens_head++
- parser.tokens_parsed++
- parser.token_available = false
-
- if token.typ == yaml_STREAM_END_TOKEN {
- parser.stream_end_produced = true
- }
- return true
-}
-
-// Set the scanner error and return false.
-func yaml_parser_set_scanner_error(parser *yaml_parser_t, context string, context_mark yaml_mark_t, problem string) bool {
- parser.error = yaml_SCANNER_ERROR
- parser.context = context
- parser.context_mark = context_mark
- parser.problem = problem
- parser.problem_mark = parser.mark
- return false
-}
-
-func yaml_parser_set_scanner_tag_error(parser *yaml_parser_t, directive bool, context_mark yaml_mark_t, problem string) bool {
- context := "while parsing a tag"
- if directive {
- context = "while parsing a %TAG directive"
- }
- return yaml_parser_set_scanner_error(parser, context, context_mark, "did not find URI escaped octet")
-}
-
-func trace(args ...interface{}) func() {
- pargs := append([]interface{}{"+++"}, args...)
- fmt.Println(pargs...)
- pargs = append([]interface{}{"---"}, args...)
- return func() { fmt.Println(pargs...) }
-}
-
-// Ensure that the tokens queue contains at least one token which can be
-// returned to the Parser.
-func yaml_parser_fetch_more_tokens(parser *yaml_parser_t) bool {
- // While we need more tokens to fetch, do it.
- for {
- // Check if we really need to fetch more tokens.
- need_more_tokens := false
-
- if parser.tokens_head == len(parser.tokens) {
- // Queue is empty.
- need_more_tokens = true
- } else {
- // Check if any potential simple key may occupy the head position.
- if !yaml_parser_stale_simple_keys(parser) {
- return false
- }
-
- for i := range parser.simple_keys {
- simple_key := &parser.simple_keys[i]
- if simple_key.possible && simple_key.token_number == parser.tokens_parsed {
- need_more_tokens = true
- break
- }
- }
- }
-
- // We are finished.
- if !need_more_tokens {
- break
- }
- // Fetch the next token.
- if !yaml_parser_fetch_next_token(parser) {
- return false
- }
- }
-
- parser.token_available = true
- return true
-}
-
-// The dispatcher for token fetchers.
-func yaml_parser_fetch_next_token(parser *yaml_parser_t) bool {
- // Ensure that the buffer is initialized.
- if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
- return false
- }
-
- // Check if we just started scanning. Fetch STREAM-START then.
- if !parser.stream_start_produced {
- return yaml_parser_fetch_stream_start(parser)
- }
-
- // Eat whitespaces and comments until we reach the next token.
- if !yaml_parser_scan_to_next_token(parser) {
- return false
- }
-
- // Remove obsolete potential simple keys.
- if !yaml_parser_stale_simple_keys(parser) {
- return false
- }
-
- // Check the indentation level against the current column.
- if !yaml_parser_unroll_indent(parser, parser.mark.column) {
- return false
- }
-
- // Ensure that the buffer contains at least 4 characters. 4 is the length
- // of the longest indicators ('--- ' and '... ').
- if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) {
- return false
- }
-
- // Is it the end of the stream?
- if is_z(parser.buffer, parser.buffer_pos) {
- return yaml_parser_fetch_stream_end(parser)
- }
-
- // Is it a directive?
- if parser.mark.column == 0 && parser.buffer[parser.buffer_pos] == '%' {
- return yaml_parser_fetch_directive(parser)
- }
-
- buf := parser.buffer
- pos := parser.buffer_pos
-
- // Is it the document start indicator?
- if parser.mark.column == 0 && buf[pos] == '-' && buf[pos+1] == '-' && buf[pos+2] == '-' && is_blankz(buf, pos+3) {
- return yaml_parser_fetch_document_indicator(parser, yaml_DOCUMENT_START_TOKEN)
- }
-
- // Is it the document end indicator?
- if parser.mark.column == 0 && buf[pos] == '.' && buf[pos+1] == '.' && buf[pos+2] == '.' && is_blankz(buf, pos+3) {
- return yaml_parser_fetch_document_indicator(parser, yaml_DOCUMENT_END_TOKEN)
- }
-
- // Is it the flow sequence start indicator?
- if buf[pos] == '[' {
- return yaml_parser_fetch_flow_collection_start(parser, yaml_FLOW_SEQUENCE_START_TOKEN)
- }
-
- // Is it the flow mapping start indicator?
- if parser.buffer[parser.buffer_pos] == '{' {
- return yaml_parser_fetch_flow_collection_start(parser, yaml_FLOW_MAPPING_START_TOKEN)
- }
-
- // Is it the flow sequence end indicator?
- if parser.buffer[parser.buffer_pos] == ']' {
- return yaml_parser_fetch_flow_collection_end(parser,
- yaml_FLOW_SEQUENCE_END_TOKEN)
- }
-
- // Is it the flow mapping end indicator?
- if parser.buffer[parser.buffer_pos] == '}' {
- return yaml_parser_fetch_flow_collection_end(parser,
- yaml_FLOW_MAPPING_END_TOKEN)
- }
-
- // Is it the flow entry indicator?
- if parser.buffer[parser.buffer_pos] == ',' {
- return yaml_parser_fetch_flow_entry(parser)
- }
-
- // Is it the block entry indicator?
- if parser.buffer[parser.buffer_pos] == '-' && is_blankz(parser.buffer, parser.buffer_pos+1) {
- return yaml_parser_fetch_block_entry(parser)
- }
-
- // Is it the key indicator?
- if parser.buffer[parser.buffer_pos] == '?' && (parser.flow_level > 0 || is_blankz(parser.buffer, parser.buffer_pos+1)) {
- return yaml_parser_fetch_key(parser)
- }
-
- // Is it the value indicator?
- if parser.buffer[parser.buffer_pos] == ':' && (parser.flow_level > 0 || is_blankz(parser.buffer, parser.buffer_pos+1)) {
- return yaml_parser_fetch_value(parser)
- }
-
- // Is it an alias?
- if parser.buffer[parser.buffer_pos] == '*' {
- return yaml_parser_fetch_anchor(parser, yaml_ALIAS_TOKEN)
- }
-
- // Is it an anchor?
- if parser.buffer[parser.buffer_pos] == '&' {
- return yaml_parser_fetch_anchor(parser, yaml_ANCHOR_TOKEN)
- }
-
- // Is it a tag?
- if parser.buffer[parser.buffer_pos] == '!' {
- return yaml_parser_fetch_tag(parser)
- }
-
- // Is it a literal scalar?
- if parser.buffer[parser.buffer_pos] == '|' && parser.flow_level == 0 {
- return yaml_parser_fetch_block_scalar(parser, true)
- }
-
- // Is it a folded scalar?
- if parser.buffer[parser.buffer_pos] == '>' && parser.flow_level == 0 {
- return yaml_parser_fetch_block_scalar(parser, false)
- }
-
- // Is it a single-quoted scalar?
- if parser.buffer[parser.buffer_pos] == '\'' {
- return yaml_parser_fetch_flow_scalar(parser, true)
- }
-
- // Is it a double-quoted scalar?
- if parser.buffer[parser.buffer_pos] == '"' {
- return yaml_parser_fetch_flow_scalar(parser, false)
- }
-
- // Is it a plain scalar?
- //
- // A plain scalar may start with any non-blank characters except
- //
- // '-', '?', ':', ',', '[', ']', '{', '}',
- // '#', '&', '*', '!', '|', '>', '\'', '\"',
- // '%', '@', '`'.
- //
- // In the block context (and, for the '-' indicator, in the flow context
- // too), it may also start with the characters
- //
- // '-', '?', ':'
- //
- // if it is followed by a non-space character.
- //
- // The last rule is more restrictive than the specification requires.
- // [Go] Make this logic more reasonable.
- //switch parser.buffer[parser.buffer_pos] {
- //case '-', '?', ':', ',', '?', '-', ',', ':', ']', '[', '}', '{', '&', '#', '!', '*', '>', '|', '"', '\'', '@', '%', '-', '`':
- //}
- if !(is_blankz(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == '-' ||
- parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == ':' ||
- parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == '[' ||
- parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '{' ||
- parser.buffer[parser.buffer_pos] == '}' || parser.buffer[parser.buffer_pos] == '#' ||
- parser.buffer[parser.buffer_pos] == '&' || parser.buffer[parser.buffer_pos] == '*' ||
- parser.buffer[parser.buffer_pos] == '!' || parser.buffer[parser.buffer_pos] == '|' ||
- parser.buffer[parser.buffer_pos] == '>' || parser.buffer[parser.buffer_pos] == '\'' ||
- parser.buffer[parser.buffer_pos] == '"' || parser.buffer[parser.buffer_pos] == '%' ||
- parser.buffer[parser.buffer_pos] == '@' || parser.buffer[parser.buffer_pos] == '`') ||
- (parser.buffer[parser.buffer_pos] == '-' && !is_blank(parser.buffer, parser.buffer_pos+1)) ||
- (parser.flow_level == 0 &&
- (parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == ':') &&
- !is_blankz(parser.buffer, parser.buffer_pos+1)) {
- return yaml_parser_fetch_plain_scalar(parser)
- }
-
- // If we don't determine the token type so far, it is an error.
- return yaml_parser_set_scanner_error(parser,
- "while scanning for the next token", parser.mark,
- "found character that cannot start any token")
-}
-
-// Check the list of potential simple keys and remove the positions that
-// cannot contain simple keys anymore.
-func yaml_parser_stale_simple_keys(parser *yaml_parser_t) bool {
- // Check for a potential simple key for each flow level.
- for i := range parser.simple_keys {
- simple_key := &parser.simple_keys[i]
-
- // The specification requires that a simple key
- //
- // - is limited to a single line,
- // - is shorter than 1024 characters.
- if simple_key.possible && (simple_key.mark.line < parser.mark.line || simple_key.mark.index+1024 < parser.mark.index) {
-
- // Check if the potential simple key to be removed is required.
- if simple_key.required {
- return yaml_parser_set_scanner_error(parser,
- "while scanning a simple key", simple_key.mark,
- "could not find expected ':'")
- }
- simple_key.possible = false
- }
- }
- return true
-}
-
-// Check if a simple key may start at the current position and add it if
-// needed.
-func yaml_parser_save_simple_key(parser *yaml_parser_t) bool {
- // A simple key is required at the current position if the scanner is in
- // the block context and the current column coincides with the indentation
- // level.
-
- required := parser.flow_level == 0 && parser.indent == parser.mark.column
-
- // A simple key is required only when it is the first token in the current
- // line. Therefore it is always allowed. But we add a check anyway.
- if required && !parser.simple_key_allowed {
- panic("should not happen")
- }
-
- //
- // If the current position may start a simple key, save it.
- //
- if parser.simple_key_allowed {
- simple_key := yaml_simple_key_t{
- possible: true,
- required: required,
- token_number: parser.tokens_parsed + (len(parser.tokens) - parser.tokens_head),
- }
- simple_key.mark = parser.mark
-
- if !yaml_parser_remove_simple_key(parser) {
- return false
- }
- parser.simple_keys[len(parser.simple_keys)-1] = simple_key
- }
- return true
-}
-
-// Remove a potential simple key at the current flow level.
-func yaml_parser_remove_simple_key(parser *yaml_parser_t) bool {
- i := len(parser.simple_keys) - 1
- if parser.simple_keys[i].possible {
- // If the key is required, it is an error.
- if parser.simple_keys[i].required {
- return yaml_parser_set_scanner_error(parser,
- "while scanning a simple key", parser.simple_keys[i].mark,
- "could not find expected ':'")
- }
- }
- // Remove the key from the stack.
- parser.simple_keys[i].possible = false
- return true
-}
-
-// Increase the flow level and resize the simple key list if needed.
-func yaml_parser_increase_flow_level(parser *yaml_parser_t) bool {
- // Reset the simple key on the next level.
- parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{})
-
- // Increase the flow level.
- parser.flow_level++
- return true
-}
-
-// Decrease the flow level.
-func yaml_parser_decrease_flow_level(parser *yaml_parser_t) bool {
- if parser.flow_level > 0 {
- parser.flow_level--
- parser.simple_keys = parser.simple_keys[:len(parser.simple_keys)-1]
- }
- return true
-}
-
-// Push the current indentation level to the stack and set the new level
-// the current column is greater than the indentation level. In this case,
-// append or insert the specified token into the token queue.
-func yaml_parser_roll_indent(parser *yaml_parser_t, column, number int, typ yaml_token_type_t, mark yaml_mark_t) bool {
- // In the flow context, do nothing.
- if parser.flow_level > 0 {
- return true
- }
-
- if parser.indent < column {
- // Push the current indentation level to the stack and set the new
- // indentation level.
- parser.indents = append(parser.indents, parser.indent)
- parser.indent = column
-
- // Create a token and insert it into the queue.
- token := yaml_token_t{
- typ: typ,
- start_mark: mark,
- end_mark: mark,
- }
- if number > -1 {
- number -= parser.tokens_parsed
- }
- yaml_insert_token(parser, number, &token)
- }
- return true
-}
-
-// Pop indentation levels from the indents stack until the current level
-// becomes less or equal to the column. For each intendation level, append
-// the BLOCK-END token.
-func yaml_parser_unroll_indent(parser *yaml_parser_t, column int) bool {
- // In the flow context, do nothing.
- if parser.flow_level > 0 {
- return true
- }
-
- // Loop through the intendation levels in the stack.
- for parser.indent > column {
- // Create a token and append it to the queue.
- token := yaml_token_t{
- typ: yaml_BLOCK_END_TOKEN,
- start_mark: parser.mark,
- end_mark: parser.mark,
- }
- yaml_insert_token(parser, -1, &token)
-
- // Pop the indentation level.
- parser.indent = parser.indents[len(parser.indents)-1]
- parser.indents = parser.indents[:len(parser.indents)-1]
- }
- return true
-}
-
-// Initialize the scanner and produce the STREAM-START token.
-func yaml_parser_fetch_stream_start(parser *yaml_parser_t) bool {
-
- // Set the initial indentation.
- parser.indent = -1
-
- // Initialize the simple key stack.
- parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{})
-
- // A simple key is allowed at the beginning of the stream.
- parser.simple_key_allowed = true
-
- // We have started.
- parser.stream_start_produced = true
-
- // Create the STREAM-START token and append it to the queue.
- token := yaml_token_t{
- typ: yaml_STREAM_START_TOKEN,
- start_mark: parser.mark,
- end_mark: parser.mark,
- encoding: parser.encoding,
- }
- yaml_insert_token(parser, -1, &token)
- return true
-}
-
-// Produce the STREAM-END token and shut down the scanner.
-func yaml_parser_fetch_stream_end(parser *yaml_parser_t) bool {
-
- // Force new line.
- if parser.mark.column != 0 {
- parser.mark.column = 0
- parser.mark.line++
- }
-
- // Reset the indentation level.
- if !yaml_parser_unroll_indent(parser, -1) {
- return false
- }
-
- // Reset simple keys.
- if !yaml_parser_remove_simple_key(parser) {
- return false
- }
-
- parser.simple_key_allowed = false
-
- // Create the STREAM-END token and append it to the queue.
- token := yaml_token_t{
- typ: yaml_STREAM_END_TOKEN,
- start_mark: parser.mark,
- end_mark: parser.mark,
- }
- yaml_insert_token(parser, -1, &token)
- return true
-}
-
-// Produce a VERSION-DIRECTIVE or TAG-DIRECTIVE token.
-func yaml_parser_fetch_directive(parser *yaml_parser_t) bool {
- // Reset the indentation level.
- if !yaml_parser_unroll_indent(parser, -1) {
- return false
- }
-
- // Reset simple keys.
- if !yaml_parser_remove_simple_key(parser) {
- return false
- }
-
- parser.simple_key_allowed = false
-
- // Create the YAML-DIRECTIVE or TAG-DIRECTIVE token.
- token := yaml_token_t{}
- if !yaml_parser_scan_directive(parser, &token) {
- return false
- }
- // Append the token to the queue.
- yaml_insert_token(parser, -1, &token)
- return true
-}
-
-// Produce the DOCUMENT-START or DOCUMENT-END token.
-func yaml_parser_fetch_document_indicator(parser *yaml_parser_t, typ yaml_token_type_t) bool {
- // Reset the indentation level.
- if !yaml_parser_unroll_indent(parser, -1) {
- return false
- }
-
- // Reset simple keys.
- if !yaml_parser_remove_simple_key(parser) {
- return false
- }
-
- parser.simple_key_allowed = false
-
- // Consume the token.
- start_mark := parser.mark
-
- skip(parser)
- skip(parser)
- skip(parser)
-
- end_mark := parser.mark
-
- // Create the DOCUMENT-START or DOCUMENT-END token.
- token := yaml_token_t{
- typ: typ,
- start_mark: start_mark,
- end_mark: end_mark,
- }
- // Append the token to the queue.
- yaml_insert_token(parser, -1, &token)
- return true
-}
-
-// Produce the FLOW-SEQUENCE-START or FLOW-MAPPING-START token.
-func yaml_parser_fetch_flow_collection_start(parser *yaml_parser_t, typ yaml_token_type_t) bool {
- // The indicators '[' and '{' may start a simple key.
- if !yaml_parser_save_simple_key(parser) {
- return false
- }
-
- // Increase the flow level.
- if !yaml_parser_increase_flow_level(parser) {
- return false
- }
-
- // A simple key may follow the indicators '[' and '{'.
- parser.simple_key_allowed = true
-
- // Consume the token.
- start_mark := parser.mark
- skip(parser)
- end_mark := parser.mark
-
- // Create the FLOW-SEQUENCE-START of FLOW-MAPPING-START token.
- token := yaml_token_t{
- typ: typ,
- start_mark: start_mark,
- end_mark: end_mark,
- }
- // Append the token to the queue.
- yaml_insert_token(parser, -1, &token)
- return true
-}
-
-// Produce the FLOW-SEQUENCE-END or FLOW-MAPPING-END token.
-func yaml_parser_fetch_flow_collection_end(parser *yaml_parser_t, typ yaml_token_type_t) bool {
- // Reset any potential simple key on the current flow level.
- if !yaml_parser_remove_simple_key(parser) {
- return false
- }
-
- // Decrease the flow level.
- if !yaml_parser_decrease_flow_level(parser) {
- return false
- }
-
- // No simple keys after the indicators ']' and '}'.
- parser.simple_key_allowed = false
-
- // Consume the token.
-
- start_mark := parser.mark
- skip(parser)
- end_mark := parser.mark
-
- // Create the FLOW-SEQUENCE-END of FLOW-MAPPING-END token.
- token := yaml_token_t{
- typ: typ,
- start_mark: start_mark,
- end_mark: end_mark,
- }
- // Append the token to the queue.
- yaml_insert_token(parser, -1, &token)
- return true
-}
-
-// Produce the FLOW-ENTRY token.
-func yaml_parser_fetch_flow_entry(parser *yaml_parser_t) bool {
- // Reset any potential simple keys on the current flow level.
- if !yaml_parser_remove_simple_key(parser) {
- return false
- }
-
- // Simple keys are allowed after ','.
- parser.simple_key_allowed = true
-
- // Consume the token.
- start_mark := parser.mark
- skip(parser)
- end_mark := parser.mark
-
- // Create the FLOW-ENTRY token and append it to the queue.
- token := yaml_token_t{
- typ: yaml_FLOW_ENTRY_TOKEN,
- start_mark: start_mark,
- end_mark: end_mark,
- }
- yaml_insert_token(parser, -1, &token)
- return true
-}
-
-// Produce the BLOCK-ENTRY token.
-func yaml_parser_fetch_block_entry(parser *yaml_parser_t) bool {
- // Check if the scanner is in the block context.
- if parser.flow_level == 0 {
- // Check if we are allowed to start a new entry.
- if !parser.simple_key_allowed {
- return yaml_parser_set_scanner_error(parser, "", parser.mark,
- "block sequence entries are not allowed in this context")
- }
- // Add the BLOCK-SEQUENCE-START token if needed.
- if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_SEQUENCE_START_TOKEN, parser.mark) {
- return false
- }
- } else {
- // It is an error for the '-' indicator to occur in the flow context,
- // but we let the Parser detect and report about it because the Parser
- // is able to point to the context.
- }
-
- // Reset any potential simple keys on the current flow level.
- if !yaml_parser_remove_simple_key(parser) {
- return false
- }
-
- // Simple keys are allowed after '-'.
- parser.simple_key_allowed = true
-
- // Consume the token.
- start_mark := parser.mark
- skip(parser)
- end_mark := parser.mark
-
- // Create the BLOCK-ENTRY token and append it to the queue.
- token := yaml_token_t{
- typ: yaml_BLOCK_ENTRY_TOKEN,
- start_mark: start_mark,
- end_mark: end_mark,
- }
- yaml_insert_token(parser, -1, &token)
- return true
-}
-
-// Produce the KEY token.
-func yaml_parser_fetch_key(parser *yaml_parser_t) bool {
-
- // In the block context, additional checks are required.
- if parser.flow_level == 0 {
- // Check if we are allowed to start a new key (not nessesary simple).
- if !parser.simple_key_allowed {
- return yaml_parser_set_scanner_error(parser, "", parser.mark,
- "mapping keys are not allowed in this context")
- }
- // Add the BLOCK-MAPPING-START token if needed.
- if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_MAPPING_START_TOKEN, parser.mark) {
- return false
- }
- }
-
- // Reset any potential simple keys on the current flow level.
- if !yaml_parser_remove_simple_key(parser) {
- return false
- }
-
- // Simple keys are allowed after '?' in the block context.
- parser.simple_key_allowed = parser.flow_level == 0
-
- // Consume the token.
- start_mark := parser.mark
- skip(parser)
- end_mark := parser.mark
-
- // Create the KEY token and append it to the queue.
- token := yaml_token_t{
- typ: yaml_KEY_TOKEN,
- start_mark: start_mark,
- end_mark: end_mark,
- }
- yaml_insert_token(parser, -1, &token)
- return true
-}
-
-// Produce the VALUE token.
-func yaml_parser_fetch_value(parser *yaml_parser_t) bool {
-
- simple_key := &parser.simple_keys[len(parser.simple_keys)-1]
-
- // Have we found a simple key?
- if simple_key.possible {
- // Create the KEY token and insert it into the queue.
- token := yaml_token_t{
- typ: yaml_KEY_TOKEN,
- start_mark: simple_key.mark,
- end_mark: simple_key.mark,
- }
- yaml_insert_token(parser, simple_key.token_number-parser.tokens_parsed, &token)
-
- // In the block context, we may need to add the BLOCK-MAPPING-START token.
- if !yaml_parser_roll_indent(parser, simple_key.mark.column,
- simple_key.token_number,
- yaml_BLOCK_MAPPING_START_TOKEN, simple_key.mark) {
- return false
- }
-
- // Remove the simple key.
- simple_key.possible = false
-
- // A simple key cannot follow another simple key.
- parser.simple_key_allowed = false
-
- } else {
- // The ':' indicator follows a complex key.
-
- // In the block context, extra checks are required.
- if parser.flow_level == 0 {
-
- // Check if we are allowed to start a complex value.
- if !parser.simple_key_allowed {
- return yaml_parser_set_scanner_error(parser, "", parser.mark,
- "mapping values are not allowed in this context")
- }
-
- // Add the BLOCK-MAPPING-START token if needed.
- if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_MAPPING_START_TOKEN, parser.mark) {
- return false
- }
- }
-
- // Simple keys after ':' are allowed in the block context.
- parser.simple_key_allowed = parser.flow_level == 0
- }
-
- // Consume the token.
- start_mark := parser.mark
- skip(parser)
- end_mark := parser.mark
-
- // Create the VALUE token and append it to the queue.
- token := yaml_token_t{
- typ: yaml_VALUE_TOKEN,
- start_mark: start_mark,
- end_mark: end_mark,
- }
- yaml_insert_token(parser, -1, &token)
- return true
-}
-
-// Produce the ALIAS or ANCHOR token.
-func yaml_parser_fetch_anchor(parser *yaml_parser_t, typ yaml_token_type_t) bool {
- // An anchor or an alias could be a simple key.
- if !yaml_parser_save_simple_key(parser) {
- return false
- }
-
- // A simple key cannot follow an anchor or an alias.
- parser.simple_key_allowed = false
-
- // Create the ALIAS or ANCHOR token and append it to the queue.
- var token yaml_token_t
- if !yaml_parser_scan_anchor(parser, &token, typ) {
- return false
- }
- yaml_insert_token(parser, -1, &token)
- return true
-}
-
-// Produce the TAG token.
-func yaml_parser_fetch_tag(parser *yaml_parser_t) bool {
- // A tag could be a simple key.
- if !yaml_parser_save_simple_key(parser) {
- return false
- }
-
- // A simple key cannot follow a tag.
- parser.simple_key_allowed = false
-
- // Create the TAG token and append it to the queue.
- var token yaml_token_t
- if !yaml_parser_scan_tag(parser, &token) {
- return false
- }
- yaml_insert_token(parser, -1, &token)
- return true
-}
-
-// Produce the SCALAR(...,literal) or SCALAR(...,folded) tokens.
-func yaml_parser_fetch_block_scalar(parser *yaml_parser_t, literal bool) bool {
- // Remove any potential simple keys.
- if !yaml_parser_remove_simple_key(parser) {
- return false
- }
-
- // A simple key may follow a block scalar.
- parser.simple_key_allowed = true
-
- // Create the SCALAR token and append it to the queue.
- var token yaml_token_t
- if !yaml_parser_scan_block_scalar(parser, &token, literal) {
- return false
- }
- yaml_insert_token(parser, -1, &token)
- return true
-}
-
-// Produce the SCALAR(...,single-quoted) or SCALAR(...,double-quoted) tokens.
-func yaml_parser_fetch_flow_scalar(parser *yaml_parser_t, single bool) bool {
- // A plain scalar could be a simple key.
- if !yaml_parser_save_simple_key(parser) {
- return false
- }
-
- // A simple key cannot follow a flow scalar.
- parser.simple_key_allowed = false
-
- // Create the SCALAR token and append it to the queue.
- var token yaml_token_t
- if !yaml_parser_scan_flow_scalar(parser, &token, single) {
- return false
- }
- yaml_insert_token(parser, -1, &token)
- return true
-}
-
-// Produce the SCALAR(...,plain) token.
-func yaml_parser_fetch_plain_scalar(parser *yaml_parser_t) bool {
- // A plain scalar could be a simple key.
- if !yaml_parser_save_simple_key(parser) {
- return false
- }
-
- // A simple key cannot follow a flow scalar.
- parser.simple_key_allowed = false
-
- // Create the SCALAR token and append it to the queue.
- var token yaml_token_t
- if !yaml_parser_scan_plain_scalar(parser, &token) {
- return false
- }
- yaml_insert_token(parser, -1, &token)
- return true
-}
-
-// Eat whitespaces and comments until the next token is found.
-func yaml_parser_scan_to_next_token(parser *yaml_parser_t) bool {
-
- // Until the next token is not found.
- for {
- // Allow the BOM mark to start a line.
- if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
- return false
- }
- if parser.mark.column == 0 && is_bom(parser.buffer, parser.buffer_pos) {
- skip(parser)
- }
-
- // Eat whitespaces.
- // Tabs are allowed:
- // - in the flow context
- // - in the block context, but not at the beginning of the line or
- // after '-', '?', or ':' (complex value).
- if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
- return false
- }
-
- for parser.buffer[parser.buffer_pos] == ' ' || ((parser.flow_level > 0 || !parser.simple_key_allowed) && parser.buffer[parser.buffer_pos] == '\t') {
- skip(parser)
- if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
- return false
- }
- }
-
- // Eat a comment until a line break.
- if parser.buffer[parser.buffer_pos] == '#' {
- for !is_breakz(parser.buffer, parser.buffer_pos) {
- skip(parser)
- if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
- return false
- }
- }
- }
-
- // If it is a line break, eat it.
- if is_break(parser.buffer, parser.buffer_pos) {
- if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
- return false
- }
- skip_line(parser)
-
- // In the block context, a new line may start a simple key.
- if parser.flow_level == 0 {
- parser.simple_key_allowed = true
- }
- } else {
- break // We have found a token.
- }
- }
-
- return true
-}
-
-// Scan a YAML-DIRECTIVE or TAG-DIRECTIVE token.
-//
-// Scope:
-// %YAML 1.1 # a comment \n
-// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-// %TAG !yaml! tag:yaml.org,2002: \n
-// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-//
-func yaml_parser_scan_directive(parser *yaml_parser_t, token *yaml_token_t) bool {
- // Eat '%'.
- start_mark := parser.mark
- skip(parser)
-
- // Scan the directive name.
- var name []byte
- if !yaml_parser_scan_directive_name(parser, start_mark, &name) {
- return false
- }
-
- // Is it a YAML directive?
- if bytes.Equal(name, []byte("YAML")) {
- // Scan the VERSION directive value.
- var major, minor int8
- if !yaml_parser_scan_version_directive_value(parser, start_mark, &major, &minor) {
- return false
- }
- end_mark := parser.mark
-
- // Create a VERSION-DIRECTIVE token.
- *token = yaml_token_t{
- typ: yaml_VERSION_DIRECTIVE_TOKEN,
- start_mark: start_mark,
- end_mark: end_mark,
- major: major,
- minor: minor,
- }
-
- // Is it a TAG directive?
- } else if bytes.Equal(name, []byte("TAG")) {
- // Scan the TAG directive value.
- var handle, prefix []byte
- if !yaml_parser_scan_tag_directive_value(parser, start_mark, &handle, &prefix) {
- return false
- }
- end_mark := parser.mark
-
- // Create a TAG-DIRECTIVE token.
- *token = yaml_token_t{
- typ: yaml_TAG_DIRECTIVE_TOKEN,
- start_mark: start_mark,
- end_mark: end_mark,
- value: handle,
- prefix: prefix,
- }
-
- // Unknown directive.
- } else {
- yaml_parser_set_scanner_error(parser, "while scanning a directive",
- start_mark, "found uknown directive name")
- return false
- }
-
- // Eat the rest of the line including any comments.
- if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
- return false
- }
-
- for is_blank(parser.buffer, parser.buffer_pos) {
- skip(parser)
- if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
- return false
- }
- }
-
- if parser.buffer[parser.buffer_pos] == '#' {
- for !is_breakz(parser.buffer, parser.buffer_pos) {
- skip(parser)
- if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
- return false
- }
- }
- }
-
- // Check if we are at the end of the line.
- if !is_breakz(parser.buffer, parser.buffer_pos) {
- yaml_parser_set_scanner_error(parser, "while scanning a directive",
- start_mark, "did not find expected comment or line break")
- return false
- }
-
- // Eat a line break.
- if is_break(parser.buffer, parser.buffer_pos) {
- if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
- return false
- }
- skip_line(parser)
- }
-
- return true
-}
-
-// Scan the directive name.
-//
-// Scope:
-// %YAML 1.1 # a comment \n
-// ^^^^
-// %TAG !yaml! tag:yaml.org,2002: \n
-// ^^^
-//
-func yaml_parser_scan_directive_name(parser *yaml_parser_t, start_mark yaml_mark_t, name *[]byte) bool {
- // Consume the directive name.
- if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
- return false
- }
-
- var s []byte
- for is_alpha(parser.buffer, parser.buffer_pos) {
- s = read(parser, s)
- if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
- return false
- }
- }
-
- // Check if the name is empty.
- if len(s) == 0 {
- yaml_parser_set_scanner_error(parser, "while scanning a directive",
- start_mark, "could not find expected directive name")
- return false
- }
-
- // Check for an blank character after the name.
- if !is_blankz(parser.buffer, parser.buffer_pos) {
- yaml_parser_set_scanner_error(parser, "while scanning a directive",
- start_mark, "found unexpected non-alphabetical character")
- return false
- }
- *name = s
- return true
-}
-
-// Scan the value of VERSION-DIRECTIVE.
-//
-// Scope:
-// %YAML 1.1 # a comment \n
-// ^^^^^^
-func yaml_parser_scan_version_directive_value(parser *yaml_parser_t, start_mark yaml_mark_t, major, minor *int8) bool {
- // Eat whitespaces.
- if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
- return false
- }
- for is_blank(parser.buffer, parser.buffer_pos) {
- skip(parser)
- if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
- return false
- }
- }
-
- // Consume the major version number.
- if !yaml_parser_scan_version_directive_number(parser, start_mark, major) {
- return false
- }
-
- // Eat '.'.
- if parser.buffer[parser.buffer_pos] != '.' {
- return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive",
- start_mark, "did not find expected digit or '.' character")
- }
-
- skip(parser)
-
- // Consume the minor version number.
- if !yaml_parser_scan_version_directive_number(parser, start_mark, minor) {
- return false
- }
- return true
-}
-
-const max_number_length = 2
-
-// Scan the version number of VERSION-DIRECTIVE.
-//
-// Scope:
-// %YAML 1.1 # a comment \n
-// ^
-// %YAML 1.1 # a comment \n
-// ^
-func yaml_parser_scan_version_directive_number(parser *yaml_parser_t, start_mark yaml_mark_t, number *int8) bool {
-
- // Repeat while the next character is digit.
- if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
- return false
- }
- var value, length int8
- for is_digit(parser.buffer, parser.buffer_pos) {
- // Check if the number is too long.
- length++
- if length > max_number_length {
- return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive",
- start_mark, "found extremely long version number")
- }
- value = value*10 + int8(as_digit(parser.buffer, parser.buffer_pos))
- skip(parser)
- if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
- return false
- }
- }
-
- // Check if the number was present.
- if length == 0 {
- return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive",
- start_mark, "did not find expected version number")
- }
- *number = value
- return true
-}
-
-// Scan the value of a TAG-DIRECTIVE token.
-//
-// Scope:
-// %TAG !yaml! tag:yaml.org,2002: \n
-// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-//
-func yaml_parser_scan_tag_directive_value(parser *yaml_parser_t, start_mark yaml_mark_t, handle, prefix *[]byte) bool {
- var handle_value, prefix_value []byte
-
- // Eat whitespaces.
- if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
- return false
- }
-
- for is_blank(parser.buffer, parser.buffer_pos) {
- skip(parser)
- if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
- return false
- }
- }
-
- // Scan a handle.
- if !yaml_parser_scan_tag_handle(parser, true, start_mark, &handle_value) {
- return false
- }
-
- // Expect a whitespace.
- if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
- return false
- }
- if !is_blank(parser.buffer, parser.buffer_pos) {
- yaml_parser_set_scanner_error(parser, "while scanning a %TAG directive",
- start_mark, "did not find expected whitespace")
- return false
- }
-
- // Eat whitespaces.
- for is_blank(parser.buffer, parser.buffer_pos) {
- skip(parser)
- if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
- return false
- }
- }
-
- // Scan a prefix.
- if !yaml_parser_scan_tag_uri(parser, true, nil, start_mark, &prefix_value) {
- return false
- }
-
- // Expect a whitespace or line break.
- if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
- return false
- }
- if !is_blankz(parser.buffer, parser.buffer_pos) {
- yaml_parser_set_scanner_error(parser, "while scanning a %TAG directive",
- start_mark, "did not find expected whitespace or line break")
- return false
- }
-
- *handle = handle_value
- *prefix = prefix_value
- return true
-}
-
-func yaml_parser_scan_anchor(parser *yaml_parser_t, token *yaml_token_t, typ yaml_token_type_t) bool {
- var s []byte
-
- // Eat the indicator character.
- start_mark := parser.mark
- skip(parser)
-
- // Consume the value.
- if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
- return false
- }
-
- for is_alpha(parser.buffer, parser.buffer_pos) {
- s = read(parser, s)
- if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
- return false
- }
- }
-
- end_mark := parser.mark
-
- /*
- * Check if length of the anchor is greater than 0 and it is followed by
- * a whitespace character or one of the indicators:
- *
- * '?', ':', ',', ']', '}', '%', '@', '`'.
- */
-
- if len(s) == 0 ||
- !(is_blankz(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == '?' ||
- parser.buffer[parser.buffer_pos] == ':' || parser.buffer[parser.buffer_pos] == ',' ||
- parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '}' ||
- parser.buffer[parser.buffer_pos] == '%' || parser.buffer[parser.buffer_pos] == '@' ||
- parser.buffer[parser.buffer_pos] == '`') {
- context := "while scanning an alias"
- if typ == yaml_ANCHOR_TOKEN {
- context = "while scanning an anchor"
- }
- yaml_parser_set_scanner_error(parser, context, start_mark,
- "did not find expected alphabetic or numeric character")
- return false
- }
-
- // Create a token.
- *token = yaml_token_t{
- typ: typ,
- start_mark: start_mark,
- end_mark: end_mark,
- value: s,
- }
-
- return true
-}
-
-/*
- * Scan a TAG token.
- */
-
-func yaml_parser_scan_tag(parser *yaml_parser_t, token *yaml_token_t) bool {
- var handle, suffix []byte
-
- start_mark := parser.mark
-
- // Check if the tag is in the canonical form.
- if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
- return false
- }
-
- if parser.buffer[parser.buffer_pos+1] == '<' {
- // Keep the handle as ''
-
- // Eat '!<'
- skip(parser)
- skip(parser)
-
- // Consume the tag value.
- if !yaml_parser_scan_tag_uri(parser, false, nil, start_mark, &suffix) {
- return false
- }
-
- // Check for '>' and eat it.
- if parser.buffer[parser.buffer_pos] != '>' {
- yaml_parser_set_scanner_error(parser, "while scanning a tag",
- start_mark, "did not find the expected '>'")
- return false
- }
-
- skip(parser)
- } else {
- // The tag has either the '!suffix' or the '!handle!suffix' form.
-
- // First, try to scan a handle.
- if !yaml_parser_scan_tag_handle(parser, false, start_mark, &handle) {
- return false
- }
-
- // Check if it is, indeed, handle.
- if handle[0] == '!' && len(handle) > 1 && handle[len(handle)-1] == '!' {
- // Scan the suffix now.
- if !yaml_parser_scan_tag_uri(parser, false, nil, start_mark, &suffix) {
- return false
- }
- } else {
- // It wasn't a handle after all. Scan the rest of the tag.
- if !yaml_parser_scan_tag_uri(parser, false, handle, start_mark, &suffix) {
- return false
- }
-
- // Set the handle to '!'.
- handle = []byte{'!'}
-
- // A special case: the '!' tag. Set the handle to '' and the
- // suffix to '!'.
- if len(suffix) == 0 {
- handle, suffix = suffix, handle
- }
- }
- }
-
- // Check the character which ends the tag.
- if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
- return false
- }
- if !is_blankz(parser.buffer, parser.buffer_pos) {
- yaml_parser_set_scanner_error(parser, "while scanning a tag",
- start_mark, "did not find expected whitespace or line break")
- return false
- }
-
- end_mark := parser.mark
-
- // Create a token.
- *token = yaml_token_t{
- typ: yaml_TAG_TOKEN,
- start_mark: start_mark,
- end_mark: end_mark,
- value: handle,
- suffix: suffix,
- }
- return true
-}
-
-// Scan a tag handle.
-func yaml_parser_scan_tag_handle(parser *yaml_parser_t, directive bool, start_mark yaml_mark_t, handle *[]byte) bool {
- // Check the initial '!' character.
- if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
- return false
- }
- if parser.buffer[parser.buffer_pos] != '!' {
- yaml_parser_set_scanner_tag_error(parser, directive,
- start_mark, "did not find expected '!'")
- return false
- }
-
- var s []byte
-
- // Copy the '!' character.
- s = read(parser, s)
-
- // Copy all subsequent alphabetical and numerical characters.
- if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
- return false
- }
- for is_alpha(parser.buffer, parser.buffer_pos) {
- s = read(parser, s)
- if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
- return false
- }
- }
-
- // Check if the trailing character is '!' and copy it.
- if parser.buffer[parser.buffer_pos] == '!' {
- s = read(parser, s)
- } else {
- // It's either the '!' tag or not really a tag handle. If it's a %TAG
- // directive, it's an error. If it's a tag token, it must be a part of URI.
- if directive && !(s[0] == '!' && s[1] == 0) {
- yaml_parser_set_scanner_tag_error(parser, directive,
- start_mark, "did not find expected '!'")
- return false
- }
- }
-
- *handle = s
- return true
-}
-
-// Scan a tag.
-func yaml_parser_scan_tag_uri(parser *yaml_parser_t, directive bool, head []byte, start_mark yaml_mark_t, uri *[]byte) bool {
- //size_t length = head ? strlen((char *)head) : 0
- var s []byte
-
- // Copy the head if needed.
- //
- // Note that we don't copy the leading '!' character.
- if len(head) > 1 {
- s = append(s, head[1:]...)
- }
-
- // Scan the tag.
- if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
- return false
- }
-
- // The set of characters that may appear in URI is as follows:
- //
- // '0'-'9', 'A'-'Z', 'a'-'z', '_', '-', ';', '/', '?', ':', '@', '&',
- // '=', '+', '$', ',', '.', '!', '~', '*', '\'', '(', ')', '[', ']',
- // '%'.
- // [Go] Convert this into more reasonable logic.
- for is_alpha(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == ';' ||
- parser.buffer[parser.buffer_pos] == '/' || parser.buffer[parser.buffer_pos] == '?' ||
- parser.buffer[parser.buffer_pos] == ':' || parser.buffer[parser.buffer_pos] == '@' ||
- parser.buffer[parser.buffer_pos] == '&' || parser.buffer[parser.buffer_pos] == '=' ||
- parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '$' ||
- parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == '.' ||
- parser.buffer[parser.buffer_pos] == '!' || parser.buffer[parser.buffer_pos] == '~' ||
- parser.buffer[parser.buffer_pos] == '*' || parser.buffer[parser.buffer_pos] == '\'' ||
- parser.buffer[parser.buffer_pos] == '(' || parser.buffer[parser.buffer_pos] == ')' ||
- parser.buffer[parser.buffer_pos] == '[' || parser.buffer[parser.buffer_pos] == ']' ||
- parser.buffer[parser.buffer_pos] == '%' {
- // Check if it is a URI-escape sequence.
- if parser.buffer[parser.buffer_pos] == '%' {
- if !yaml_parser_scan_uri_escapes(parser, directive, start_mark, &s) {
- return false
- }
- } else {
- s = read(parser, s)
- }
- if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
- return false
- }
- }
-
- // Check if the tag is non-empty.
- if len(s) == 0 {
- yaml_parser_set_scanner_tag_error(parser, directive,
- start_mark, "did not find expected tag URI")
- return false
- }
- *uri = s
- return true
-}
-
-// Decode an URI-escape sequence corresponding to a single UTF-8 character.
-func yaml_parser_scan_uri_escapes(parser *yaml_parser_t, directive bool, start_mark yaml_mark_t, s *[]byte) bool {
-
- // Decode the required number of characters.
- w := 1024
- for w > 0 {
- // Check for a URI-escaped octet.
- if parser.unread < 3 && !yaml_parser_update_buffer(parser, 3) {
- return false
- }
-
- if !(parser.buffer[parser.buffer_pos] == '%' &&
- is_hex(parser.buffer, parser.buffer_pos+1) &&
- is_hex(parser.buffer, parser.buffer_pos+2)) {
- return yaml_parser_set_scanner_tag_error(parser, directive,
- start_mark, "did not find URI escaped octet")
- }
-
- // Get the octet.
- octet := byte((as_hex(parser.buffer, parser.buffer_pos+1) << 4) + as_hex(parser.buffer, parser.buffer_pos+2))
-
- // If it is the leading octet, determine the length of the UTF-8 sequence.
- if w == 1024 {
- w = width(octet)
- if w == 0 {
- return yaml_parser_set_scanner_tag_error(parser, directive,
- start_mark, "found an incorrect leading UTF-8 octet")
- }
- } else {
- // Check if the trailing octet is correct.
- if octet&0xC0 != 0x80 {
- return yaml_parser_set_scanner_tag_error(parser, directive,
- start_mark, "found an incorrect trailing UTF-8 octet")
- }
- }
-
- // Copy the octet and move the pointers.
- *s = append(*s, octet)
- skip(parser)
- skip(parser)
- skip(parser)
- w--
- }
- return true
-}
-
-// Scan a block scalar.
-func yaml_parser_scan_block_scalar(parser *yaml_parser_t, token *yaml_token_t, literal bool) bool {
- // Eat the indicator '|' or '>'.
- start_mark := parser.mark
- skip(parser)
-
- // Scan the additional block scalar indicators.
- if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
- return false
- }
-
- // Check for a chomping indicator.
- var chomping, increment int
- if parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '-' {
- // Set the chomping method and eat the indicator.
- if parser.buffer[parser.buffer_pos] == '+' {
- chomping = +1
- } else {
- chomping = -1
- }
- skip(parser)
-
- // Check for an indentation indicator.
- if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
- return false
- }
- if is_digit(parser.buffer, parser.buffer_pos) {
- // Check that the intendation is greater than 0.
- if parser.buffer[parser.buffer_pos] == '0' {
- yaml_parser_set_scanner_error(parser, "while scanning a block scalar",
- start_mark, "found an intendation indicator equal to 0")
- return false
- }
-
- // Get the intendation level and eat the indicator.
- increment = as_digit(parser.buffer, parser.buffer_pos)
- skip(parser)
- }
-
- } else if is_digit(parser.buffer, parser.buffer_pos) {
- // Do the same as above, but in the opposite order.
-
- if parser.buffer[parser.buffer_pos] == '0' {
- yaml_parser_set_scanner_error(parser, "while scanning a block scalar",
- start_mark, "found an intendation indicator equal to 0")
- return false
- }
- increment = as_digit(parser.buffer, parser.buffer_pos)
- skip(parser)
-
- if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
- return false
- }
- if parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '-' {
- if parser.buffer[parser.buffer_pos] == '+' {
- chomping = +1
- } else {
- chomping = -1
- }
- skip(parser)
- }
- }
-
- // Eat whitespaces and comments to the end of the line.
- if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
- return false
- }
- for is_blank(parser.buffer, parser.buffer_pos) {
- skip(parser)
- if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
- return false
- }
- }
- if parser.buffer[parser.buffer_pos] == '#' {
- for !is_breakz(parser.buffer, parser.buffer_pos) {
- skip(parser)
- if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
- return false
- }
- }
- }
-
- // Check if we are at the end of the line.
- if !is_breakz(parser.buffer, parser.buffer_pos) {
- yaml_parser_set_scanner_error(parser, "while scanning a block scalar",
- start_mark, "did not find expected comment or line break")
- return false
- }
-
- // Eat a line break.
- if is_break(parser.buffer, parser.buffer_pos) {
- if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
- return false
- }
- skip_line(parser)
- }
-
- end_mark := parser.mark
-
- // Set the intendation level if it was specified.
- var indent int
- if increment > 0 {
- if parser.indent >= 0 {
- indent = parser.indent + increment
- } else {
- indent = increment
- }
- }
-
- // Scan the leading line breaks and determine the indentation level if needed.
- var s, leading_break, trailing_breaks []byte
- if !yaml_parser_scan_block_scalar_breaks(parser, &indent, &trailing_breaks, start_mark, &end_mark) {
- return false
- }
-
- // Scan the block scalar content.
- if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
- return false
- }
- var leading_blank, trailing_blank bool
- for parser.mark.column == indent && !is_z(parser.buffer, parser.buffer_pos) {
- // We are at the beginning of a non-empty line.
-
- // Is it a trailing whitespace?
- trailing_blank = is_blank(parser.buffer, parser.buffer_pos)
-
- // Check if we need to fold the leading line break.
- if !literal && !leading_blank && !trailing_blank && len(leading_break) > 0 && leading_break[0] == '\n' {
- // Do we need to join the lines by space?
- if len(trailing_breaks) == 0 {
- s = append(s, ' ')
- }
- } else {
- s = append(s, leading_break...)
- }
- leading_break = leading_break[:0]
-
- // Append the remaining line breaks.
- s = append(s, trailing_breaks...)
- trailing_breaks = trailing_breaks[:0]
-
- // Is it a leading whitespace?
- leading_blank = is_blank(parser.buffer, parser.buffer_pos)
-
- // Consume the current line.
- for !is_breakz(parser.buffer, parser.buffer_pos) {
- s = read(parser, s)
- if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
- return false
- }
- }
-
- // Consume the line break.
- if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
- return false
- }
-
- leading_break = read_line(parser, leading_break)
-
- // Eat the following intendation spaces and line breaks.
- if !yaml_parser_scan_block_scalar_breaks(parser, &indent, &trailing_breaks, start_mark, &end_mark) {
- return false
- }
- }
-
- // Chomp the tail.
- if chomping != -1 {
- s = append(s, leading_break...)
- }
- if chomping == 1 {
- s = append(s, trailing_breaks...)
- }
-
- // Create a token.
- *token = yaml_token_t{
- typ: yaml_SCALAR_TOKEN,
- start_mark: start_mark,
- end_mark: end_mark,
- value: s,
- style: yaml_LITERAL_SCALAR_STYLE,
- }
- if !literal {
- token.style = yaml_FOLDED_SCALAR_STYLE
- }
- return true
-}
-
-// Scan intendation spaces and line breaks for a block scalar. Determine the
-// intendation level if needed.
-func yaml_parser_scan_block_scalar_breaks(parser *yaml_parser_t, indent *int, breaks *[]byte, start_mark yaml_mark_t, end_mark *yaml_mark_t) bool {
- *end_mark = parser.mark
-
- // Eat the intendation spaces and line breaks.
- max_indent := 0
- for {
- // Eat the intendation spaces.
- if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
- return false
- }
- for (*indent == 0 || parser.mark.column < *indent) && is_space(parser.buffer, parser.buffer_pos) {
- skip(parser)
- if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
- return false
- }
- }
- if parser.mark.column > max_indent {
- max_indent = parser.mark.column
- }
-
- // Check for a tab character messing the intendation.
- if (*indent == 0 || parser.mark.column < *indent) && is_tab(parser.buffer, parser.buffer_pos) {
- return yaml_parser_set_scanner_error(parser, "while scanning a block scalar",
- start_mark, "found a tab character where an intendation space is expected")
- }
-
- // Have we found a non-empty line?
- if !is_break(parser.buffer, parser.buffer_pos) {
- break
- }
-
- // Consume the line break.
- if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
- return false
- }
- // [Go] Should really be returning breaks instead.
- *breaks = read_line(parser, *breaks)
- *end_mark = parser.mark
- }
-
- // Determine the indentation level if needed.
- if *indent == 0 {
- *indent = max_indent
- if *indent < parser.indent+1 {
- *indent = parser.indent + 1
- }
- if *indent < 1 {
- *indent = 1
- }
- }
- return true
-}
-
-// Scan a quoted scalar.
-func yaml_parser_scan_flow_scalar(parser *yaml_parser_t, token *yaml_token_t, single bool) bool {
- // Eat the left quote.
- start_mark := parser.mark
- skip(parser)
-
- // Consume the content of the quoted scalar.
- var s, leading_break, trailing_breaks, whitespaces []byte
- for {
- // Check that there are no document indicators at the beginning of the line.
- if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) {
- return false
- }
-
- if parser.mark.column == 0 &&
- ((parser.buffer[parser.buffer_pos+0] == '-' &&
- parser.buffer[parser.buffer_pos+1] == '-' &&
- parser.buffer[parser.buffer_pos+2] == '-') ||
- (parser.buffer[parser.buffer_pos+0] == '.' &&
- parser.buffer[parser.buffer_pos+1] == '.' &&
- parser.buffer[parser.buffer_pos+2] == '.')) &&
- is_blankz(parser.buffer, parser.buffer_pos+3) {
- yaml_parser_set_scanner_error(parser, "while scanning a quoted scalar",
- start_mark, "found unexpected document indicator")
- return false
- }
-
- // Check for EOF.
- if is_z(parser.buffer, parser.buffer_pos) {
- yaml_parser_set_scanner_error(parser, "while scanning a quoted scalar",
- start_mark, "found unexpected end of stream")
- return false
- }
-
- // Consume non-blank characters.
- leading_blanks := false
- for !is_blankz(parser.buffer, parser.buffer_pos) {
- if single && parser.buffer[parser.buffer_pos] == '\'' && parser.buffer[parser.buffer_pos+1] == '\'' {
- // Is is an escaped single quote.
- s = append(s, '\'')
- skip(parser)
- skip(parser)
-
- } else if single && parser.buffer[parser.buffer_pos] == '\'' {
- // It is a right single quote.
- break
- } else if !single && parser.buffer[parser.buffer_pos] == '"' {
- // It is a right double quote.
- break
-
- } else if !single && parser.buffer[parser.buffer_pos] == '\\' && is_break(parser.buffer, parser.buffer_pos+1) {
- // It is an escaped line break.
- if parser.unread < 3 && !yaml_parser_update_buffer(parser, 3) {
- return false
- }
- skip(parser)
- skip_line(parser)
- leading_blanks = true
- break
-
- } else if !single && parser.buffer[parser.buffer_pos] == '\\' {
- // It is an escape sequence.
- code_length := 0
-
- // Check the escape character.
- switch parser.buffer[parser.buffer_pos+1] {
- case '0':
- s = append(s, 0)
- case 'a':
- s = append(s, '\x07')
- case 'b':
- s = append(s, '\x08')
- case 't', '\t':
- s = append(s, '\x09')
- case 'n':
- s = append(s, '\x0A')
- case 'v':
- s = append(s, '\x0B')
- case 'f':
- s = append(s, '\x0C')
- case 'r':
- s = append(s, '\x0D')
- case 'e':
- s = append(s, '\x1B')
- case ' ':
- s = append(s, '\x20')
- case '"':
- s = append(s, '"')
- case '\'':
- s = append(s, '\'')
- case '\\':
- s = append(s, '\\')
- case 'N': // NEL (#x85)
- s = append(s, '\xC2')
- s = append(s, '\x85')
- case '_': // #xA0
- s = append(s, '\xC2')
- s = append(s, '\xA0')
- case 'L': // LS (#x2028)
- s = append(s, '\xE2')
- s = append(s, '\x80')
- s = append(s, '\xA8')
- case 'P': // PS (#x2029)
- s = append(s, '\xE2')
- s = append(s, '\x80')
- s = append(s, '\xA9')
- case 'x':
- code_length = 2
- case 'u':
- code_length = 4
- case 'U':
- code_length = 8
- default:
- yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar",
- start_mark, "found unknown escape character")
- return false
- }
-
- skip(parser)
- skip(parser)
-
- // Consume an arbitrary escape code.
- if code_length > 0 {
- var value int
-
- // Scan the character value.
- if parser.unread < code_length && !yaml_parser_update_buffer(parser, code_length) {
- return false
- }
- for k := 0; k < code_length; k++ {
- if !is_hex(parser.buffer, parser.buffer_pos+k) {
- yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar",
- start_mark, "did not find expected hexdecimal number")
- return false
- }
- value = (value << 4) + as_hex(parser.buffer, parser.buffer_pos+k)
- }
-
- // Check the value and write the character.
- if (value >= 0xD800 && value <= 0xDFFF) || value > 0x10FFFF {
- yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar",
- start_mark, "found invalid Unicode character escape code")
- return false
- }
- if value <= 0x7F {
- s = append(s, byte(value))
- } else if value <= 0x7FF {
- s = append(s, byte(0xC0+(value>>6)))
- s = append(s, byte(0x80+(value&0x3F)))
- } else if value <= 0xFFFF {
- s = append(s, byte(0xE0+(value>>12)))
- s = append(s, byte(0x80+((value>>6)&0x3F)))
- s = append(s, byte(0x80+(value&0x3F)))
- } else {
- s = append(s, byte(0xF0+(value>>18)))
- s = append(s, byte(0x80+((value>>12)&0x3F)))
- s = append(s, byte(0x80+((value>>6)&0x3F)))
- s = append(s, byte(0x80+(value&0x3F)))
- }
-
- // Advance the pointer.
- for k := 0; k < code_length; k++ {
- skip(parser)
- }
- }
- } else {
- // It is a non-escaped non-blank character.
- s = read(parser, s)
- }
- if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
- return false
- }
- }
-
- // Check if we are at the end of the scalar.
- if single {
- if parser.buffer[parser.buffer_pos] == '\'' {
- break
- }
- } else {
- if parser.buffer[parser.buffer_pos] == '"' {
- break
- }
- }
-
- // Consume blank characters.
- if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
- return false
- }
-
- for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) {
- if is_blank(parser.buffer, parser.buffer_pos) {
- // Consume a space or a tab character.
- if !leading_blanks {
- whitespaces = read(parser, whitespaces)
- } else {
- skip(parser)
- }
- } else {
- if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
- return false
- }
-
- // Check if it is a first line break.
- if !leading_blanks {
- whitespaces = whitespaces[:0]
- leading_break = read_line(parser, leading_break)
- leading_blanks = true
- } else {
- trailing_breaks = read_line(parser, trailing_breaks)
- }
- }
- if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
- return false
- }
- }
-
- // Join the whitespaces or fold line breaks.
- if leading_blanks {
- // Do we need to fold line breaks?
- if len(leading_break) > 0 && leading_break[0] == '\n' {
- if len(trailing_breaks) == 0 {
- s = append(s, ' ')
- } else {
- s = append(s, trailing_breaks...)
- }
- } else {
- s = append(s, leading_break...)
- s = append(s, trailing_breaks...)
- }
- trailing_breaks = trailing_breaks[:0]
- leading_break = leading_break[:0]
- } else {
- s = append(s, whitespaces...)
- whitespaces = whitespaces[:0]
- }
- }
-
- // Eat the right quote.
- skip(parser)
- end_mark := parser.mark
-
- // Create a token.
- *token = yaml_token_t{
- typ: yaml_SCALAR_TOKEN,
- start_mark: start_mark,
- end_mark: end_mark,
- value: s,
- style: yaml_SINGLE_QUOTED_SCALAR_STYLE,
- }
- if !single {
- token.style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
- }
- return true
-}
-
-// Scan a plain scalar.
-func yaml_parser_scan_plain_scalar(parser *yaml_parser_t, token *yaml_token_t) bool {
-
- var s, leading_break, trailing_breaks, whitespaces []byte
- var leading_blanks bool
- var indent = parser.indent + 1
-
- start_mark := parser.mark
- end_mark := parser.mark
-
- // Consume the content of the plain scalar.
- for {
- // Check for a document indicator.
- if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) {
- return false
- }
- if parser.mark.column == 0 &&
- ((parser.buffer[parser.buffer_pos+0] == '-' &&
- parser.buffer[parser.buffer_pos+1] == '-' &&
- parser.buffer[parser.buffer_pos+2] == '-') ||
- (parser.buffer[parser.buffer_pos+0] == '.' &&
- parser.buffer[parser.buffer_pos+1] == '.' &&
- parser.buffer[parser.buffer_pos+2] == '.')) &&
- is_blankz(parser.buffer, parser.buffer_pos+3) {
- break
- }
-
- // Check for a comment.
- if parser.buffer[parser.buffer_pos] == '#' {
- break
- }
-
- // Consume non-blank characters.
- for !is_blankz(parser.buffer, parser.buffer_pos) {
-
- // Check for 'x:x' in the flow context. TODO: Fix the test "spec-08-13".
- if parser.flow_level > 0 &&
- parser.buffer[parser.buffer_pos] == ':' &&
- !is_blankz(parser.buffer, parser.buffer_pos+1) {
- yaml_parser_set_scanner_error(parser, "while scanning a plain scalar",
- start_mark, "found unexpected ':'")
- return false
- }
-
- // Check for indicators that may end a plain scalar.
- if (parser.buffer[parser.buffer_pos] == ':' && is_blankz(parser.buffer, parser.buffer_pos+1)) ||
- (parser.flow_level > 0 &&
- (parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == ':' ||
- parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == '[' ||
- parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '{' ||
- parser.buffer[parser.buffer_pos] == '}')) {
- break
- }
-
- // Check if we need to join whitespaces and breaks.
- if leading_blanks || len(whitespaces) > 0 {
- if leading_blanks {
- // Do we need to fold line breaks?
- if leading_break[0] == '\n' {
- if len(trailing_breaks) == 0 {
- s = append(s, ' ')
- } else {
- s = append(s, trailing_breaks...)
- }
- } else {
- s = append(s, leading_break...)
- s = append(s, trailing_breaks...)
- }
- trailing_breaks = trailing_breaks[:0]
- leading_break = leading_break[:0]
- leading_blanks = false
- } else {
- s = append(s, whitespaces...)
- whitespaces = whitespaces[:0]
- }
- }
-
- // Copy the character.
- s = read(parser, s)
-
- end_mark = parser.mark
- if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
- return false
- }
- }
-
- // Is it the end?
- if !(is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos)) {
- break
- }
-
- // Consume blank characters.
- if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
- return false
- }
-
- for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) {
- if is_blank(parser.buffer, parser.buffer_pos) {
-
- // Check for tab character that abuse intendation.
- if leading_blanks && parser.mark.column < indent && is_tab(parser.buffer, parser.buffer_pos) {
- yaml_parser_set_scanner_error(parser, "while scanning a plain scalar",
- start_mark, "found a tab character that violate intendation")
- return false
- }
-
- // Consume a space or a tab character.
- if !leading_blanks {
- whitespaces = read(parser, whitespaces)
- } else {
- skip(parser)
- }
- } else {
- if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
- return false
- }
-
- // Check if it is a first line break.
- if !leading_blanks {
- whitespaces = whitespaces[:0]
- leading_break = read_line(parser, leading_break)
- leading_blanks = true
- } else {
- trailing_breaks = read_line(parser, trailing_breaks)
- }
- }
- if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
- return false
- }
- }
-
- // Check intendation level.
- if parser.flow_level == 0 && parser.mark.column < indent {
- break
- }
- }
-
- // Create a token.
- *token = yaml_token_t{
- typ: yaml_SCALAR_TOKEN,
- start_mark: start_mark,
- end_mark: end_mark,
- value: s,
- style: yaml_PLAIN_SCALAR_STYLE,
- }
-
- // Note that we change the 'simple_key_allowed' flag.
- if leading_blanks {
- parser.simple_key_allowed = true
- }
- return true
-}
diff --git a/Godeps/_workspace/src/github.com/coreos/yaml/sorter.go b/Godeps/_workspace/src/github.com/coreos/yaml/sorter.go
deleted file mode 100644
index 5958822..0000000
--- a/Godeps/_workspace/src/github.com/coreos/yaml/sorter.go
+++ /dev/null
@@ -1,104 +0,0 @@
-package yaml
-
-import (
- "reflect"
- "unicode"
-)
-
-type keyList []reflect.Value
-
-func (l keyList) Len() int { return len(l) }
-func (l keyList) Swap(i, j int) { l[i], l[j] = l[j], l[i] }
-func (l keyList) Less(i, j int) bool {
- a := l[i]
- b := l[j]
- ak := a.Kind()
- bk := b.Kind()
- for (ak == reflect.Interface || ak == reflect.Ptr) && !a.IsNil() {
- a = a.Elem()
- ak = a.Kind()
- }
- for (bk == reflect.Interface || bk == reflect.Ptr) && !b.IsNil() {
- b = b.Elem()
- bk = b.Kind()
- }
- af, aok := keyFloat(a)
- bf, bok := keyFloat(b)
- if aok && bok {
- if af != bf {
- return af < bf
- }
- if ak != bk {
- return ak < bk
- }
- return numLess(a, b)
- }
- if ak != reflect.String || bk != reflect.String {
- return ak < bk
- }
- ar, br := []rune(a.String()), []rune(b.String())
- for i := 0; i < len(ar) && i < len(br); i++ {
- if ar[i] == br[i] {
- continue
- }
- al := unicode.IsLetter(ar[i])
- bl := unicode.IsLetter(br[i])
- if al && bl {
- return ar[i] < br[i]
- }
- if al || bl {
- return bl
- }
- var ai, bi int
- var an, bn int64
- for ai = i; ai < len(ar) && unicode.IsDigit(ar[ai]); ai++ {
- an = an*10 + int64(ar[ai]-'0')
- }
- for bi = i; bi < len(br) && unicode.IsDigit(br[bi]); bi++ {
- bn = bn*10 + int64(br[bi]-'0')
- }
- if an != bn {
- return an < bn
- }
- if ai != bi {
- return ai < bi
- }
- return ar[i] < br[i]
- }
- return len(ar) < len(br)
-}
-
-// keyFloat returns a float value for v if it is a number/bool
-// and whether it is a number/bool or not.
-func keyFloat(v reflect.Value) (f float64, ok bool) {
- switch v.Kind() {
- case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
- return float64(v.Int()), true
- case reflect.Float32, reflect.Float64:
- return v.Float(), true
- case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
- return float64(v.Uint()), true
- case reflect.Bool:
- if v.Bool() {
- return 1, true
- }
- return 0, true
- }
- return 0, false
-}
-
-// numLess returns whether a < b.
-// a and b must necessarily have the same kind.
-func numLess(a, b reflect.Value) bool {
- switch a.Kind() {
- case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
- return a.Int() < b.Int()
- case reflect.Float32, reflect.Float64:
- return a.Float() < b.Float()
- case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
- return a.Uint() < b.Uint()
- case reflect.Bool:
- return !a.Bool() && b.Bool()
- }
- panic("not a number")
-}
diff --git a/Godeps/_workspace/src/github.com/coreos/yaml/suite_test.go b/Godeps/_workspace/src/github.com/coreos/yaml/suite_test.go
deleted file mode 100644
index c5cf1ed..0000000
--- a/Godeps/_workspace/src/github.com/coreos/yaml/suite_test.go
+++ /dev/null
@@ -1,12 +0,0 @@
-package yaml_test
-
-import (
- . "gopkg.in/check.v1"
- "testing"
-)
-
-func Test(t *testing.T) { TestingT(t) }
-
-type S struct{}
-
-var _ = Suite(&S{})
diff --git a/Godeps/_workspace/src/github.com/coreos/yaml/writerc.go b/Godeps/_workspace/src/github.com/coreos/yaml/writerc.go
deleted file mode 100644
index 190362f..0000000
--- a/Godeps/_workspace/src/github.com/coreos/yaml/writerc.go
+++ /dev/null
@@ -1,89 +0,0 @@
-package yaml
-
-// Set the writer error and return false.
-func yaml_emitter_set_writer_error(emitter *yaml_emitter_t, problem string) bool {
- emitter.error = yaml_WRITER_ERROR
- emitter.problem = problem
- return false
-}
-
-// Flush the output buffer.
-func yaml_emitter_flush(emitter *yaml_emitter_t) bool {
- if emitter.write_handler == nil {
- panic("write handler not set")
- }
-
- // Check if the buffer is empty.
- if emitter.buffer_pos == 0 {
- return true
- }
-
- // If the output encoding is UTF-8, we don't need to recode the buffer.
- if emitter.encoding == yaml_UTF8_ENCODING {
- if err := emitter.write_handler(emitter, emitter.buffer[:emitter.buffer_pos]); err != nil {
- return yaml_emitter_set_writer_error(emitter, "write error: "+err.Error())
- }
- emitter.buffer_pos = 0
- return true
- }
-
- // Recode the buffer into the raw buffer.
- var low, high int
- if emitter.encoding == yaml_UTF16LE_ENCODING {
- low, high = 0, 1
- } else {
- high, low = 1, 0
- }
-
- pos := 0
- for pos < emitter.buffer_pos {
- // See the "reader.c" code for more details on UTF-8 encoding. Note
- // that we assume that the buffer contains a valid UTF-8 sequence.
-
- // Read the next UTF-8 character.
- octet := emitter.buffer[pos]
-
- var w int
- var value rune
- switch {
- case octet&0x80 == 0x00:
- w, value = 1, rune(octet&0x7F)
- case octet&0xE0 == 0xC0:
- w, value = 2, rune(octet&0x1F)
- case octet&0xF0 == 0xE0:
- w, value = 3, rune(octet&0x0F)
- case octet&0xF8 == 0xF0:
- w, value = 4, rune(octet&0x07)
- }
- for k := 1; k < w; k++ {
- octet = emitter.buffer[pos+k]
- value = (value << 6) + (rune(octet) & 0x3F)
- }
- pos += w
-
- // Write the character.
- if value < 0x10000 {
- var b [2]byte
- b[high] = byte(value >> 8)
- b[low] = byte(value & 0xFF)
- emitter.raw_buffer = append(emitter.raw_buffer, b[0], b[1])
- } else {
- // Write the character using a surrogate pair (check "reader.c").
- var b [4]byte
- value -= 0x10000
- b[high] = byte(0xD8 + (value >> 18))
- b[low] = byte((value >> 10) & 0xFF)
- b[high+2] = byte(0xDC + ((value >> 8) & 0xFF))
- b[low+2] = byte(value & 0xFF)
- emitter.raw_buffer = append(emitter.raw_buffer, b[0], b[1], b[2], b[3])
- }
- }
-
- // Write the raw buffer.
- if err := emitter.write_handler(emitter, emitter.raw_buffer); err != nil {
- return yaml_emitter_set_writer_error(emitter, "write error: "+err.Error())
- }
- emitter.buffer_pos = 0
- emitter.raw_buffer = emitter.raw_buffer[:0]
- return true
-}
diff --git a/Godeps/_workspace/src/github.com/coreos/yaml/yaml.go b/Godeps/_workspace/src/github.com/coreos/yaml/yaml.go
deleted file mode 100644
index 16e1365..0000000
--- a/Godeps/_workspace/src/github.com/coreos/yaml/yaml.go
+++ /dev/null
@@ -1,312 +0,0 @@
-// Package yaml implements YAML support for the Go language.
-//
-// Source code and other details for the project are available at GitHub:
-//
-// https://github.com/go-yaml/yaml
-//
-package yaml
-
-import (
- "errors"
- "fmt"
- "reflect"
- "strings"
- "sync"
-)
-
-type yamlError string
-
-func fail(msg string) {
- panic(yamlError(msg))
-}
-
-func handleErr(err *error) {
- if r := recover(); r != nil {
- if e, ok := r.(yamlError); ok {
- *err = errors.New("YAML error: " + string(e))
- } else {
- panic(r)
- }
- }
-}
-
-// The Setter interface may be implemented by types to do their own custom
-// unmarshalling of YAML values, rather than being implicitly assigned by
-// the yaml package machinery. If setting the value works, the method should
-// return true. If it returns false, the value is considered unsupported
-// and is omitted from maps and slices.
-type Setter interface {
- SetYAML(tag string, value interface{}) bool
-}
-
-// The Getter interface is implemented by types to do their own custom
-// marshalling into a YAML tag and value.
-type Getter interface {
- GetYAML() (tag string, value interface{})
-}
-
-// Unmarshal decodes the first document found within the in byte slice
-// and assigns decoded values into the out value.
-//
-// Maps and pointers (to a struct, string, int, etc) are accepted as out
-// values. If an internal pointer within a struct is not initialized,
-// the yaml package will initialize it if necessary for unmarshalling
-// the provided data. The out parameter must not be nil.
-//
-// The type of the decoded values and the type of out will be considered,
-// and Unmarshal will do the best possible job to unmarshal values
-// appropriately. It is NOT considered an error, though, to skip values
-// because they are not available in the decoded YAML, or if they are not
-// compatible with the out value. To ensure something was properly
-// unmarshaled use a map or compare against the previous value for the
-// field (usually the zero value).
-//
-// Struct fields are only unmarshalled if they are exported (have an
-// upper case first letter), and are unmarshalled using the field name
-// lowercased as the default key. Custom keys may be defined via the
-// "yaml" name in the field tag: the content preceding the first comma
-// is used as the key, and the following comma-separated options are
-// used to tweak the marshalling process (see Marshal).
-// Conflicting names result in a runtime error.
-//
-// For example:
-//
-// type T struct {
-// F int `yaml:"a,omitempty"`
-// B int
-// }
-// var t T
-// yaml.Unmarshal([]byte("a: 1\nb: 2"), &t)
-//
-// See the documentation of Marshal for the format of tags and a list of
-// supported tag options.
-//
-func Unmarshal(in []byte, out interface{}) (err error) {
- defer handleErr(&err)
- d := newDecoder()
- p := newParser(in, UnmarshalMappingKeyTransform)
- defer p.destroy()
- node := p.parse()
- if node != nil {
- v := reflect.ValueOf(out)
- if v.Kind() == reflect.Ptr && !v.IsNil() {
- v = v.Elem()
- }
- d.unmarshal(node, v)
- }
- return nil
-}
-
-// Marshal serializes the value provided into a YAML document. The structure
-// of the generated document will reflect the structure of the value itself.
-// Maps and pointers (to struct, string, int, etc) are accepted as the in value.
-//
-// Struct fields are only unmarshalled if they are exported (have an upper case
-// first letter), and are unmarshalled using the field name lowercased as the
-// default key. Custom keys may be defined via the "yaml" name in the field
-// tag: the content preceding the first comma is used as the key, and the
-// following comma-separated options are used to tweak the marshalling process.
-// Conflicting names result in a runtime error.
-//
-// The field tag format accepted is:
-//
-// `(...) yaml:"[][,[,]]" (...)`
-//
-// The following flags are currently supported:
-//
-// omitempty Only include the field if it's not set to the zero
-// value for the type or to empty slices or maps.
-// Does not apply to zero valued structs.
-//
-// flow Marshal using a flow style (useful for structs,
-// sequences and maps.
-//
-// inline Inline the struct it's applied to, so its fields
-// are processed as if they were part of the outer
-// struct.
-//
-// In addition, if the key is "-", the field is ignored.
-//
-// For example:
-//
-// type T struct {
-// F int "a,omitempty"
-// B int
-// }
-// yaml.Marshal(&T{B: 2}) // Returns "b: 2\n"
-// yaml.Marshal(&T{F: 1}} // Returns "a: 1\nb: 0\n"
-//
-func Marshal(in interface{}) (out []byte, err error) {
- defer handleErr(&err)
- e := newEncoder()
- defer e.destroy()
- e.marshal("", reflect.ValueOf(in))
- e.finish()
- out = e.out
- return
-}
-
-// UnmarshalMappingKeyTransform is a string transformation that is applied to
-// each mapping key in a YAML document before it is unmarshalled. By default,
-// UnmarshalMappingKeyTransform is an identity transform (no modification).
-var UnmarshalMappingKeyTransform transformString = identityTransform
-
-type transformString func(in string) (out string)
-
-func identityTransform(in string) (out string) {
- return in
-}
-
-// --------------------------------------------------------------------------
-// Maintain a mapping of keys to structure field indexes
-
-// The code in this section was copied from mgo/bson.
-
-// structInfo holds details for the serialization of fields of
-// a given struct.
-type structInfo struct {
- FieldsMap map[string]fieldInfo
- FieldsList []fieldInfo
-
- // InlineMap is the number of the field in the struct that
- // contains an ,inline map, or -1 if there's none.
- InlineMap int
-}
-
-type fieldInfo struct {
- Key string
- Num int
- OmitEmpty bool
- Flow bool
-
- // Inline holds the field index if the field is part of an inlined struct.
- Inline []int
-}
-
-var structMap = make(map[reflect.Type]*structInfo)
-var fieldMapMutex sync.RWMutex
-
-func getStructInfo(st reflect.Type) (*structInfo, error) {
- fieldMapMutex.RLock()
- sinfo, found := structMap[st]
- fieldMapMutex.RUnlock()
- if found {
- return sinfo, nil
- }
-
- n := st.NumField()
- fieldsMap := make(map[string]fieldInfo)
- fieldsList := make([]fieldInfo, 0, n)
- inlineMap := -1
- for i := 0; i != n; i++ {
- field := st.Field(i)
- if field.PkgPath != "" {
- continue // Private field
- }
-
- info := fieldInfo{Num: i}
-
- tag := field.Tag.Get("yaml")
- if tag == "" && strings.Index(string(field.Tag), ":") < 0 {
- tag = string(field.Tag)
- }
- if tag == "-" {
- continue
- }
-
- inline := false
- fields := strings.Split(tag, ",")
- if len(fields) > 1 {
- for _, flag := range fields[1:] {
- switch flag {
- case "omitempty":
- info.OmitEmpty = true
- case "flow":
- info.Flow = true
- case "inline":
- inline = true
- default:
- return nil, errors.New(fmt.Sprintf("Unsupported flag %q in tag %q of type %s", flag, tag, st))
- }
- }
- tag = fields[0]
- }
-
- if inline {
- switch field.Type.Kind() {
- // TODO: Implement support for inline maps.
- //case reflect.Map:
- // if inlineMap >= 0 {
- // return nil, errors.New("Multiple ,inline maps in struct " + st.String())
- // }
- // if field.Type.Key() != reflect.TypeOf("") {
- // return nil, errors.New("Option ,inline needs a map with string keys in struct " + st.String())
- // }
- // inlineMap = info.Num
- case reflect.Struct:
- sinfo, err := getStructInfo(field.Type)
- if err != nil {
- return nil, err
- }
- for _, finfo := range sinfo.FieldsList {
- if _, found := fieldsMap[finfo.Key]; found {
- msg := "Duplicated key '" + finfo.Key + "' in struct " + st.String()
- return nil, errors.New(msg)
- }
- if finfo.Inline == nil {
- finfo.Inline = []int{i, finfo.Num}
- } else {
- finfo.Inline = append([]int{i}, finfo.Inline...)
- }
- fieldsMap[finfo.Key] = finfo
- fieldsList = append(fieldsList, finfo)
- }
- default:
- //return nil, errors.New("Option ,inline needs a struct value or map field")
- return nil, errors.New("Option ,inline needs a struct value field")
- }
- continue
- }
-
- if tag != "" {
- info.Key = tag
- } else {
- info.Key = strings.ToLower(field.Name)
- }
-
- if _, found = fieldsMap[info.Key]; found {
- msg := "Duplicated key '" + info.Key + "' in struct " + st.String()
- return nil, errors.New(msg)
- }
-
- fieldsList = append(fieldsList, info)
- fieldsMap[info.Key] = info
- }
-
- sinfo = &structInfo{fieldsMap, fieldsList, inlineMap}
-
- fieldMapMutex.Lock()
- structMap[st] = sinfo
- fieldMapMutex.Unlock()
- return sinfo, nil
-}
-
-func isZero(v reflect.Value) bool {
- switch v.Kind() {
- case reflect.String:
- return len(v.String()) == 0
- case reflect.Interface, reflect.Ptr:
- return v.IsNil()
- case reflect.Slice:
- return v.Len() == 0
- case reflect.Map:
- return v.Len() == 0
- case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
- return v.Int() == 0
- case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
- return v.Uint() == 0
- case reflect.Bool:
- return !v.Bool()
- }
- return false
-}
diff --git a/Godeps/_workspace/src/github.com/coreos/yaml/yamlh.go b/Godeps/_workspace/src/github.com/coreos/yaml/yamlh.go
deleted file mode 100644
index 4b020b1..0000000
--- a/Godeps/_workspace/src/github.com/coreos/yaml/yamlh.go
+++ /dev/null
@@ -1,716 +0,0 @@
-package yaml
-
-import (
- "io"
-)
-
-// The version directive data.
-type yaml_version_directive_t struct {
- major int8 // The major version number.
- minor int8 // The minor version number.
-}
-
-// The tag directive data.
-type yaml_tag_directive_t struct {
- handle []byte // The tag handle.
- prefix []byte // The tag prefix.
-}
-
-type yaml_encoding_t int
-
-// The stream encoding.
-const (
- // Let the parser choose the encoding.
- yaml_ANY_ENCODING yaml_encoding_t = iota
-
- yaml_UTF8_ENCODING // The default UTF-8 encoding.
- yaml_UTF16LE_ENCODING // The UTF-16-LE encoding with BOM.
- yaml_UTF16BE_ENCODING // The UTF-16-BE encoding with BOM.
-)
-
-type yaml_break_t int
-
-// Line break types.
-const (
- // Let the parser choose the break type.
- yaml_ANY_BREAK yaml_break_t = iota
-
- yaml_CR_BREAK // Use CR for line breaks (Mac style).
- yaml_LN_BREAK // Use LN for line breaks (Unix style).
- yaml_CRLN_BREAK // Use CR LN for line breaks (DOS style).
-)
-
-type yaml_error_type_t int
-
-// Many bad things could happen with the parser and emitter.
-const (
- // No error is produced.
- yaml_NO_ERROR yaml_error_type_t = iota
-
- yaml_MEMORY_ERROR // Cannot allocate or reallocate a block of memory.
- yaml_READER_ERROR // Cannot read or decode the input stream.
- yaml_SCANNER_ERROR // Cannot scan the input stream.
- yaml_PARSER_ERROR // Cannot parse the input stream.
- yaml_COMPOSER_ERROR // Cannot compose a YAML document.
- yaml_WRITER_ERROR // Cannot write to the output stream.
- yaml_EMITTER_ERROR // Cannot emit a YAML stream.
-)
-
-// The pointer position.
-type yaml_mark_t struct {
- index int // The position index.
- line int // The position line.
- column int // The position column.
-}
-
-// Node Styles
-
-type yaml_style_t int8
-
-type yaml_scalar_style_t yaml_style_t
-
-// Scalar styles.
-const (
- // Let the emitter choose the style.
- yaml_ANY_SCALAR_STYLE yaml_scalar_style_t = iota
-
- yaml_PLAIN_SCALAR_STYLE // The plain scalar style.
- yaml_SINGLE_QUOTED_SCALAR_STYLE // The single-quoted scalar style.
- yaml_DOUBLE_QUOTED_SCALAR_STYLE // The double-quoted scalar style.
- yaml_LITERAL_SCALAR_STYLE // The literal scalar style.
- yaml_FOLDED_SCALAR_STYLE // The folded scalar style.
-)
-
-type yaml_sequence_style_t yaml_style_t
-
-// Sequence styles.
-const (
- // Let the emitter choose the style.
- yaml_ANY_SEQUENCE_STYLE yaml_sequence_style_t = iota
-
- yaml_BLOCK_SEQUENCE_STYLE // The block sequence style.
- yaml_FLOW_SEQUENCE_STYLE // The flow sequence style.
-)
-
-type yaml_mapping_style_t yaml_style_t
-
-// Mapping styles.
-const (
- // Let the emitter choose the style.
- yaml_ANY_MAPPING_STYLE yaml_mapping_style_t = iota
-
- yaml_BLOCK_MAPPING_STYLE // The block mapping style.
- yaml_FLOW_MAPPING_STYLE // The flow mapping style.
-)
-
-// Tokens
-
-type yaml_token_type_t int
-
-// Token types.
-const (
- // An empty token.
- yaml_NO_TOKEN yaml_token_type_t = iota
-
- yaml_STREAM_START_TOKEN // A STREAM-START token.
- yaml_STREAM_END_TOKEN // A STREAM-END token.
-
- yaml_VERSION_DIRECTIVE_TOKEN // A VERSION-DIRECTIVE token.
- yaml_TAG_DIRECTIVE_TOKEN // A TAG-DIRECTIVE token.
- yaml_DOCUMENT_START_TOKEN // A DOCUMENT-START token.
- yaml_DOCUMENT_END_TOKEN // A DOCUMENT-END token.
-
- yaml_BLOCK_SEQUENCE_START_TOKEN // A BLOCK-SEQUENCE-START token.
- yaml_BLOCK_MAPPING_START_TOKEN // A BLOCK-SEQUENCE-END token.
- yaml_BLOCK_END_TOKEN // A BLOCK-END token.
-
- yaml_FLOW_SEQUENCE_START_TOKEN // A FLOW-SEQUENCE-START token.
- yaml_FLOW_SEQUENCE_END_TOKEN // A FLOW-SEQUENCE-END token.
- yaml_FLOW_MAPPING_START_TOKEN // A FLOW-MAPPING-START token.
- yaml_FLOW_MAPPING_END_TOKEN // A FLOW-MAPPING-END token.
-
- yaml_BLOCK_ENTRY_TOKEN // A BLOCK-ENTRY token.
- yaml_FLOW_ENTRY_TOKEN // A FLOW-ENTRY token.
- yaml_KEY_TOKEN // A KEY token.
- yaml_VALUE_TOKEN // A VALUE token.
-
- yaml_ALIAS_TOKEN // An ALIAS token.
- yaml_ANCHOR_TOKEN // An ANCHOR token.
- yaml_TAG_TOKEN // A TAG token.
- yaml_SCALAR_TOKEN // A SCALAR token.
-)
-
-func (tt yaml_token_type_t) String() string {
- switch tt {
- case yaml_NO_TOKEN:
- return "yaml_NO_TOKEN"
- case yaml_STREAM_START_TOKEN:
- return "yaml_STREAM_START_TOKEN"
- case yaml_STREAM_END_TOKEN:
- return "yaml_STREAM_END_TOKEN"
- case yaml_VERSION_DIRECTIVE_TOKEN:
- return "yaml_VERSION_DIRECTIVE_TOKEN"
- case yaml_TAG_DIRECTIVE_TOKEN:
- return "yaml_TAG_DIRECTIVE_TOKEN"
- case yaml_DOCUMENT_START_TOKEN:
- return "yaml_DOCUMENT_START_TOKEN"
- case yaml_DOCUMENT_END_TOKEN:
- return "yaml_DOCUMENT_END_TOKEN"
- case yaml_BLOCK_SEQUENCE_START_TOKEN:
- return "yaml_BLOCK_SEQUENCE_START_TOKEN"
- case yaml_BLOCK_MAPPING_START_TOKEN:
- return "yaml_BLOCK_MAPPING_START_TOKEN"
- case yaml_BLOCK_END_TOKEN:
- return "yaml_BLOCK_END_TOKEN"
- case yaml_FLOW_SEQUENCE_START_TOKEN:
- return "yaml_FLOW_SEQUENCE_START_TOKEN"
- case yaml_FLOW_SEQUENCE_END_TOKEN:
- return "yaml_FLOW_SEQUENCE_END_TOKEN"
- case yaml_FLOW_MAPPING_START_TOKEN:
- return "yaml_FLOW_MAPPING_START_TOKEN"
- case yaml_FLOW_MAPPING_END_TOKEN:
- return "yaml_FLOW_MAPPING_END_TOKEN"
- case yaml_BLOCK_ENTRY_TOKEN:
- return "yaml_BLOCK_ENTRY_TOKEN"
- case yaml_FLOW_ENTRY_TOKEN:
- return "yaml_FLOW_ENTRY_TOKEN"
- case yaml_KEY_TOKEN:
- return "yaml_KEY_TOKEN"
- case yaml_VALUE_TOKEN:
- return "yaml_VALUE_TOKEN"
- case yaml_ALIAS_TOKEN:
- return "yaml_ALIAS_TOKEN"
- case yaml_ANCHOR_TOKEN:
- return "yaml_ANCHOR_TOKEN"
- case yaml_TAG_TOKEN:
- return "yaml_TAG_TOKEN"
- case yaml_SCALAR_TOKEN:
- return "yaml_SCALAR_TOKEN"
- }
- return ""
-}
-
-// The token structure.
-type yaml_token_t struct {
- // The token type.
- typ yaml_token_type_t
-
- // The start/end of the token.
- start_mark, end_mark yaml_mark_t
-
- // The stream encoding (for yaml_STREAM_START_TOKEN).
- encoding yaml_encoding_t
-
- // The alias/anchor/scalar value or tag/tag directive handle
- // (for yaml_ALIAS_TOKEN, yaml_ANCHOR_TOKEN, yaml_SCALAR_TOKEN, yaml_TAG_TOKEN, yaml_TAG_DIRECTIVE_TOKEN).
- value []byte
-
- // The tag suffix (for yaml_TAG_TOKEN).
- suffix []byte
-
- // The tag directive prefix (for yaml_TAG_DIRECTIVE_TOKEN).
- prefix []byte
-
- // The scalar style (for yaml_SCALAR_TOKEN).
- style yaml_scalar_style_t
-
- // The version directive major/minor (for yaml_VERSION_DIRECTIVE_TOKEN).
- major, minor int8
-}
-
-// Events
-
-type yaml_event_type_t int8
-
-// Event types.
-const (
- // An empty event.
- yaml_NO_EVENT yaml_event_type_t = iota
-
- yaml_STREAM_START_EVENT // A STREAM-START event.
- yaml_STREAM_END_EVENT // A STREAM-END event.
- yaml_DOCUMENT_START_EVENT // A DOCUMENT-START event.
- yaml_DOCUMENT_END_EVENT // A DOCUMENT-END event.
- yaml_ALIAS_EVENT // An ALIAS event.
- yaml_SCALAR_EVENT // A SCALAR event.
- yaml_SEQUENCE_START_EVENT // A SEQUENCE-START event.
- yaml_SEQUENCE_END_EVENT // A SEQUENCE-END event.
- yaml_MAPPING_START_EVENT // A MAPPING-START event.
- yaml_MAPPING_END_EVENT // A MAPPING-END event.
-)
-
-// The event structure.
-type yaml_event_t struct {
-
- // The event type.
- typ yaml_event_type_t
-
- // The start and end of the event.
- start_mark, end_mark yaml_mark_t
-
- // The document encoding (for yaml_STREAM_START_EVENT).
- encoding yaml_encoding_t
-
- // The version directive (for yaml_DOCUMENT_START_EVENT).
- version_directive *yaml_version_directive_t
-
- // The list of tag directives (for yaml_DOCUMENT_START_EVENT).
- tag_directives []yaml_tag_directive_t
-
- // The anchor (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_ALIAS_EVENT).
- anchor []byte
-
- // The tag (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT).
- tag []byte
-
- // The scalar value (for yaml_SCALAR_EVENT).
- value []byte
-
- // Is the document start/end indicator implicit, or the tag optional?
- // (for yaml_DOCUMENT_START_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_SCALAR_EVENT).
- implicit bool
-
- // Is the tag optional for any non-plain style? (for yaml_SCALAR_EVENT).
- quoted_implicit bool
-
- // The style (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT).
- style yaml_style_t
-}
-
-func (e *yaml_event_t) scalar_style() yaml_scalar_style_t { return yaml_scalar_style_t(e.style) }
-func (e *yaml_event_t) sequence_style() yaml_sequence_style_t { return yaml_sequence_style_t(e.style) }
-func (e *yaml_event_t) mapping_style() yaml_mapping_style_t { return yaml_mapping_style_t(e.style) }
-
-// Nodes
-
-const (
- yaml_NULL_TAG = "tag:yaml.org,2002:null" // The tag !!null with the only possible value: null.
- yaml_BOOL_TAG = "tag:yaml.org,2002:bool" // The tag !!bool with the values: true and false.
- yaml_STR_TAG = "tag:yaml.org,2002:str" // The tag !!str for string values.
- yaml_INT_TAG = "tag:yaml.org,2002:int" // The tag !!int for integer values.
- yaml_FLOAT_TAG = "tag:yaml.org,2002:float" // The tag !!float for float values.
- yaml_TIMESTAMP_TAG = "tag:yaml.org,2002:timestamp" // The tag !!timestamp for date and time values.
-
- yaml_SEQ_TAG = "tag:yaml.org,2002:seq" // The tag !!seq is used to denote sequences.
- yaml_MAP_TAG = "tag:yaml.org,2002:map" // The tag !!map is used to denote mapping.
-
- // Not in original libyaml.
- yaml_BINARY_TAG = "tag:yaml.org,2002:binary"
- yaml_MERGE_TAG = "tag:yaml.org,2002:merge"
-
- yaml_DEFAULT_SCALAR_TAG = yaml_STR_TAG // The default scalar tag is !!str.
- yaml_DEFAULT_SEQUENCE_TAG = yaml_SEQ_TAG // The default sequence tag is !!seq.
- yaml_DEFAULT_MAPPING_TAG = yaml_MAP_TAG // The default mapping tag is !!map.
-)
-
-type yaml_node_type_t int
-
-// Node types.
-const (
- // An empty node.
- yaml_NO_NODE yaml_node_type_t = iota
-
- yaml_SCALAR_NODE // A scalar node.
- yaml_SEQUENCE_NODE // A sequence node.
- yaml_MAPPING_NODE // A mapping node.
-)
-
-// An element of a sequence node.
-type yaml_node_item_t int
-
-// An element of a mapping node.
-type yaml_node_pair_t struct {
- key int // The key of the element.
- value int // The value of the element.
-}
-
-// The node structure.
-type yaml_node_t struct {
- typ yaml_node_type_t // The node type.
- tag []byte // The node tag.
-
- // The node data.
-
- // The scalar parameters (for yaml_SCALAR_NODE).
- scalar struct {
- value []byte // The scalar value.
- length int // The length of the scalar value.
- style yaml_scalar_style_t // The scalar style.
- }
-
- // The sequence parameters (for YAML_SEQUENCE_NODE).
- sequence struct {
- items_data []yaml_node_item_t // The stack of sequence items.
- style yaml_sequence_style_t // The sequence style.
- }
-
- // The mapping parameters (for yaml_MAPPING_NODE).
- mapping struct {
- pairs_data []yaml_node_pair_t // The stack of mapping pairs (key, value).
- pairs_start *yaml_node_pair_t // The beginning of the stack.
- pairs_end *yaml_node_pair_t // The end of the stack.
- pairs_top *yaml_node_pair_t // The top of the stack.
- style yaml_mapping_style_t // The mapping style.
- }
-
- start_mark yaml_mark_t // The beginning of the node.
- end_mark yaml_mark_t // The end of the node.
-
-}
-
-// The document structure.
-type yaml_document_t struct {
-
- // The document nodes.
- nodes []yaml_node_t
-
- // The version directive.
- version_directive *yaml_version_directive_t
-
- // The list of tag directives.
- tag_directives_data []yaml_tag_directive_t
- tag_directives_start int // The beginning of the tag directives list.
- tag_directives_end int // The end of the tag directives list.
-
- start_implicit int // Is the document start indicator implicit?
- end_implicit int // Is the document end indicator implicit?
-
- // The start/end of the document.
- start_mark, end_mark yaml_mark_t
-}
-
-// The prototype of a read handler.
-//
-// The read handler is called when the parser needs to read more bytes from the
-// source. The handler should write not more than size bytes to the buffer.
-// The number of written bytes should be set to the size_read variable.
-//
-// [in,out] data A pointer to an application data specified by
-// yaml_parser_set_input().
-// [out] buffer The buffer to write the data from the source.
-// [in] size The size of the buffer.
-// [out] size_read The actual number of bytes read from the source.
-//
-// On success, the handler should return 1. If the handler failed,
-// the returned value should be 0. On EOF, the handler should set the
-// size_read to 0 and return 1.
-type yaml_read_handler_t func(parser *yaml_parser_t, buffer []byte) (n int, err error)
-
-// This structure holds information about a potential simple key.
-type yaml_simple_key_t struct {
- possible bool // Is a simple key possible?
- required bool // Is a simple key required?
- token_number int // The number of the token.
- mark yaml_mark_t // The position mark.
-}
-
-// The states of the parser.
-type yaml_parser_state_t int
-
-const (
- yaml_PARSE_STREAM_START_STATE yaml_parser_state_t = iota
-
- yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE // Expect the beginning of an implicit document.
- yaml_PARSE_DOCUMENT_START_STATE // Expect DOCUMENT-START.
- yaml_PARSE_DOCUMENT_CONTENT_STATE // Expect the content of a document.
- yaml_PARSE_DOCUMENT_END_STATE // Expect DOCUMENT-END.
- yaml_PARSE_BLOCK_NODE_STATE // Expect a block node.
- yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE // Expect a block node or indentless sequence.
- yaml_PARSE_FLOW_NODE_STATE // Expect a flow node.
- yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE // Expect the first entry of a block sequence.
- yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE // Expect an entry of a block sequence.
- yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE // Expect an entry of an indentless sequence.
- yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE // Expect the first key of a block mapping.
- yaml_PARSE_BLOCK_MAPPING_KEY_STATE // Expect a block mapping key.
- yaml_PARSE_BLOCK_MAPPING_VALUE_STATE // Expect a block mapping value.
- yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE // Expect the first entry of a flow sequence.
- yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE // Expect an entry of a flow sequence.
- yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE // Expect a key of an ordered mapping.
- yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE // Expect a value of an ordered mapping.
- yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE // Expect the and of an ordered mapping entry.
- yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE // Expect the first key of a flow mapping.
- yaml_PARSE_FLOW_MAPPING_KEY_STATE // Expect a key of a flow mapping.
- yaml_PARSE_FLOW_MAPPING_VALUE_STATE // Expect a value of a flow mapping.
- yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE // Expect an empty value of a flow mapping.
- yaml_PARSE_END_STATE // Expect nothing.
-)
-
-func (ps yaml_parser_state_t) String() string {
- switch ps {
- case yaml_PARSE_STREAM_START_STATE:
- return "yaml_PARSE_STREAM_START_STATE"
- case yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE:
- return "yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE"
- case yaml_PARSE_DOCUMENT_START_STATE:
- return "yaml_PARSE_DOCUMENT_START_STATE"
- case yaml_PARSE_DOCUMENT_CONTENT_STATE:
- return "yaml_PARSE_DOCUMENT_CONTENT_STATE"
- case yaml_PARSE_DOCUMENT_END_STATE:
- return "yaml_PARSE_DOCUMENT_END_STATE"
- case yaml_PARSE_BLOCK_NODE_STATE:
- return "yaml_PARSE_BLOCK_NODE_STATE"
- case yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE:
- return "yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE"
- case yaml_PARSE_FLOW_NODE_STATE:
- return "yaml_PARSE_FLOW_NODE_STATE"
- case yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE:
- return "yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE"
- case yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE:
- return "yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE"
- case yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE:
- return "yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE"
- case yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE:
- return "yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE"
- case yaml_PARSE_BLOCK_MAPPING_KEY_STATE:
- return "yaml_PARSE_BLOCK_MAPPING_KEY_STATE"
- case yaml_PARSE_BLOCK_MAPPING_VALUE_STATE:
- return "yaml_PARSE_BLOCK_MAPPING_VALUE_STATE"
- case yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE:
- return "yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE"
- case yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE:
- return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE"
- case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE:
- return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE"
- case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE:
- return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE"
- case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE:
- return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE"
- case yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE:
- return "yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE"
- case yaml_PARSE_FLOW_MAPPING_KEY_STATE:
- return "yaml_PARSE_FLOW_MAPPING_KEY_STATE"
- case yaml_PARSE_FLOW_MAPPING_VALUE_STATE:
- return "yaml_PARSE_FLOW_MAPPING_VALUE_STATE"
- case yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE:
- return "yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE"
- case yaml_PARSE_END_STATE:
- return "yaml_PARSE_END_STATE"
- }
- return ""
-}
-
-// This structure holds aliases data.
-type yaml_alias_data_t struct {
- anchor []byte // The anchor.
- index int // The node id.
- mark yaml_mark_t // The anchor mark.
-}
-
-// The parser structure.
-//
-// All members are internal. Manage the structure using the
-// yaml_parser_ family of functions.
-type yaml_parser_t struct {
-
- // Error handling
-
- error yaml_error_type_t // Error type.
-
- problem string // Error description.
-
- // The byte about which the problem occured.
- problem_offset int
- problem_value int
- problem_mark yaml_mark_t
-
- // The error context.
- context string
- context_mark yaml_mark_t
-
- // Reader stuff
-
- read_handler yaml_read_handler_t // Read handler.
-
- input_file io.Reader // File input data.
- input []byte // String input data.
- input_pos int
-
- eof bool // EOF flag
-
- buffer []byte // The working buffer.
- buffer_pos int // The current position of the buffer.
-
- unread int // The number of unread characters in the buffer.
-
- raw_buffer []byte // The raw buffer.
- raw_buffer_pos int // The current position of the buffer.
-
- encoding yaml_encoding_t // The input encoding.
-
- offset int // The offset of the current position (in bytes).
- mark yaml_mark_t // The mark of the current position.
-
- // Scanner stuff
-
- stream_start_produced bool // Have we started to scan the input stream?
- stream_end_produced bool // Have we reached the end of the input stream?
-
- flow_level int // The number of unclosed '[' and '{' indicators.
-
- tokens []yaml_token_t // The tokens queue.
- tokens_head int // The head of the tokens queue.
- tokens_parsed int // The number of tokens fetched from the queue.
- token_available bool // Does the tokens queue contain a token ready for dequeueing.
-
- indent int // The current indentation level.
- indents []int // The indentation levels stack.
-
- simple_key_allowed bool // May a simple key occur at the current position?
- simple_keys []yaml_simple_key_t // The stack of simple keys.
-
- // Parser stuff
-
- state yaml_parser_state_t // The current parser state.
- states []yaml_parser_state_t // The parser states stack.
- marks []yaml_mark_t // The stack of marks.
- tag_directives []yaml_tag_directive_t // The list of TAG directives.
-
- // Dumper stuff
-
- aliases []yaml_alias_data_t // The alias data.
-
- document *yaml_document_t // The currently parsed document.
-}
-
-// Emitter Definitions
-
-// The prototype of a write handler.
-//
-// The write handler is called when the emitter needs to flush the accumulated
-// characters to the output. The handler should write @a size bytes of the
-// @a buffer to the output.
-//
-// @param[in,out] data A pointer to an application data specified by
-// yaml_emitter_set_output().
-// @param[in] buffer The buffer with bytes to be written.
-// @param[in] size The size of the buffer.
-//
-// @returns On success, the handler should return @c 1. If the handler failed,
-// the returned value should be @c 0.
-//
-type yaml_write_handler_t func(emitter *yaml_emitter_t, buffer []byte) error
-
-type yaml_emitter_state_t int
-
-// The emitter states.
-const (
- // Expect STREAM-START.
- yaml_EMIT_STREAM_START_STATE yaml_emitter_state_t = iota
-
- yaml_EMIT_FIRST_DOCUMENT_START_STATE // Expect the first DOCUMENT-START or STREAM-END.
- yaml_EMIT_DOCUMENT_START_STATE // Expect DOCUMENT-START or STREAM-END.
- yaml_EMIT_DOCUMENT_CONTENT_STATE // Expect the content of a document.
- yaml_EMIT_DOCUMENT_END_STATE // Expect DOCUMENT-END.
- yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE // Expect the first item of a flow sequence.
- yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE // Expect an item of a flow sequence.
- yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE // Expect the first key of a flow mapping.
- yaml_EMIT_FLOW_MAPPING_KEY_STATE // Expect a key of a flow mapping.
- yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a flow mapping.
- yaml_EMIT_FLOW_MAPPING_VALUE_STATE // Expect a value of a flow mapping.
- yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE // Expect the first item of a block sequence.
- yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE // Expect an item of a block sequence.
- yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE // Expect the first key of a block mapping.
- yaml_EMIT_BLOCK_MAPPING_KEY_STATE // Expect the key of a block mapping.
- yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a block mapping.
- yaml_EMIT_BLOCK_MAPPING_VALUE_STATE // Expect a value of a block mapping.
- yaml_EMIT_END_STATE // Expect nothing.
-)
-
-// The emitter structure.
-//
-// All members are internal. Manage the structure using the @c yaml_emitter_
-// family of functions.
-type yaml_emitter_t struct {
-
- // Error handling
-
- error yaml_error_type_t // Error type.
- problem string // Error description.
-
- // Writer stuff
-
- write_handler yaml_write_handler_t // Write handler.
-
- output_buffer *[]byte // String output data.
- output_file io.Writer // File output data.
-
- buffer []byte // The working buffer.
- buffer_pos int // The current position of the buffer.
-
- raw_buffer []byte // The raw buffer.
- raw_buffer_pos int // The current position of the buffer.
-
- encoding yaml_encoding_t // The stream encoding.
-
- // Emitter stuff
-
- canonical bool // If the output is in the canonical style?
- best_indent int // The number of indentation spaces.
- best_width int // The preferred width of the output lines.
- unicode bool // Allow unescaped non-ASCII characters?
- line_break yaml_break_t // The preferred line break.
-
- state yaml_emitter_state_t // The current emitter state.
- states []yaml_emitter_state_t // The stack of states.
-
- events []yaml_event_t // The event queue.
- events_head int // The head of the event queue.
-
- indents []int // The stack of indentation levels.
-
- tag_directives []yaml_tag_directive_t // The list of tag directives.
-
- indent int // The current indentation level.
-
- flow_level int // The current flow level.
-
- root_context bool // Is it the document root context?
- sequence_context bool // Is it a sequence context?
- mapping_context bool // Is it a mapping context?
- simple_key_context bool // Is it a simple mapping key context?
-
- line int // The current line.
- column int // The current column.
- whitespace bool // If the last character was a whitespace?
- indention bool // If the last character was an indentation character (' ', '-', '?', ':')?
- open_ended bool // If an explicit document end is required?
-
- // Anchor analysis.
- anchor_data struct {
- anchor []byte // The anchor value.
- alias bool // Is it an alias?
- }
-
- // Tag analysis.
- tag_data struct {
- handle []byte // The tag handle.
- suffix []byte // The tag suffix.
- }
-
- // Scalar analysis.
- scalar_data struct {
- value []byte // The scalar value.
- multiline bool // Does the scalar contain line breaks?
- flow_plain_allowed bool // Can the scalar be expessed in the flow plain style?
- block_plain_allowed bool // Can the scalar be expressed in the block plain style?
- single_quoted_allowed bool // Can the scalar be expressed in the single quoted style?
- block_allowed bool // Can the scalar be expressed in the literal or folded styles?
- style yaml_scalar_style_t // The output style.
- }
-
- // Dumper stuff
-
- opened bool // If the stream was already opened?
- closed bool // If the stream was already closed?
-
- // The information associated with the document nodes.
- anchors *struct {
- references int // The number of references.
- anchor int // The anchor id.
- serialized bool // If the node has been emitted?
- }
-
- last_anchor_id int // The last assigned anchor id.
-
- document *yaml_document_t // The currently emitted document.
-}
diff --git a/Godeps/_workspace/src/github.com/coreos/yaml/yamlprivateh.go b/Godeps/_workspace/src/github.com/coreos/yaml/yamlprivateh.go
deleted file mode 100644
index 8110ce3..0000000
--- a/Godeps/_workspace/src/github.com/coreos/yaml/yamlprivateh.go
+++ /dev/null
@@ -1,173 +0,0 @@
-package yaml
-
-const (
- // The size of the input raw buffer.
- input_raw_buffer_size = 512
-
- // The size of the input buffer.
- // It should be possible to decode the whole raw buffer.
- input_buffer_size = input_raw_buffer_size * 3
-
- // The size of the output buffer.
- output_buffer_size = 128
-
- // The size of the output raw buffer.
- // It should be possible to encode the whole output buffer.
- output_raw_buffer_size = (output_buffer_size*2 + 2)
-
- // The size of other stacks and queues.
- initial_stack_size = 16
- initial_queue_size = 16
- initial_string_size = 16
-)
-
-// Check if the character at the specified position is an alphabetical
-// character, a digit, '_', or '-'.
-func is_alpha(b []byte, i int) bool {
- return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'Z' || b[i] >= 'a' && b[i] <= 'z' || b[i] == '_' || b[i] == '-'
-}
-
-// Check if the character at the specified position is a digit.
-func is_digit(b []byte, i int) bool {
- return b[i] >= '0' && b[i] <= '9'
-}
-
-// Get the value of a digit.
-func as_digit(b []byte, i int) int {
- return int(b[i]) - '0'
-}
-
-// Check if the character at the specified position is a hex-digit.
-func is_hex(b []byte, i int) bool {
- return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'F' || b[i] >= 'a' && b[i] <= 'f'
-}
-
-// Get the value of a hex-digit.
-func as_hex(b []byte, i int) int {
- bi := b[i]
- if bi >= 'A' && bi <= 'F' {
- return int(bi) - 'A' + 10
- }
- if bi >= 'a' && bi <= 'f' {
- return int(bi) - 'a' + 10
- }
- return int(bi) - '0'
-}
-
-// Check if the character is ASCII.
-func is_ascii(b []byte, i int) bool {
- return b[i] <= 0x7F
-}
-
-// Check if the character at the start of the buffer can be printed unescaped.
-func is_printable(b []byte, i int) bool {
- return ((b[i] == 0x0A) || // . == #x0A
- (b[i] >= 0x20 && b[i] <= 0x7E) || // #x20 <= . <= #x7E
- (b[i] == 0xC2 && b[i+1] >= 0xA0) || // #0xA0 <= . <= #xD7FF
- (b[i] > 0xC2 && b[i] < 0xED) ||
- (b[i] == 0xED && b[i+1] < 0xA0) ||
- (b[i] == 0xEE) ||
- (b[i] == 0xEF && // #xE000 <= . <= #xFFFD
- !(b[i+1] == 0xBB && b[i+2] == 0xBF) && // && . != #xFEFF
- !(b[i+1] == 0xBF && (b[i+2] == 0xBE || b[i+2] == 0xBF))))
-}
-
-// Check if the character at the specified position is NUL.
-func is_z(b []byte, i int) bool {
- return b[i] == 0x00
-}
-
-// Check if the beginning of the buffer is a BOM.
-func is_bom(b []byte, i int) bool {
- return b[0] == 0xEF && b[1] == 0xBB && b[2] == 0xBF
-}
-
-// Check if the character at the specified position is space.
-func is_space(b []byte, i int) bool {
- return b[i] == ' '
-}
-
-// Check if the character at the specified position is tab.
-func is_tab(b []byte, i int) bool {
- return b[i] == '\t'
-}
-
-// Check if the character at the specified position is blank (space or tab).
-func is_blank(b []byte, i int) bool {
- //return is_space(b, i) || is_tab(b, i)
- return b[i] == ' ' || b[i] == '\t'
-}
-
-// Check if the character at the specified position is a line break.
-func is_break(b []byte, i int) bool {
- return (b[i] == '\r' || // CR (#xD)
- b[i] == '\n' || // LF (#xA)
- b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85)
- b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028)
- b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9) // PS (#x2029)
-}
-
-func is_crlf(b []byte, i int) bool {
- return b[i] == '\r' && b[i+1] == '\n'
-}
-
-// Check if the character is a line break or NUL.
-func is_breakz(b []byte, i int) bool {
- //return is_break(b, i) || is_z(b, i)
- return ( // is_break:
- b[i] == '\r' || // CR (#xD)
- b[i] == '\n' || // LF (#xA)
- b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85)
- b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028)
- b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029)
- // is_z:
- b[i] == 0)
-}
-
-// Check if the character is a line break, space, or NUL.
-func is_spacez(b []byte, i int) bool {
- //return is_space(b, i) || is_breakz(b, i)
- return ( // is_space:
- b[i] == ' ' ||
- // is_breakz:
- b[i] == '\r' || // CR (#xD)
- b[i] == '\n' || // LF (#xA)
- b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85)
- b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028)
- b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029)
- b[i] == 0)
-}
-
-// Check if the character is a line break, space, tab, or NUL.
-func is_blankz(b []byte, i int) bool {
- //return is_blank(b, i) || is_breakz(b, i)
- return ( // is_blank:
- b[i] == ' ' || b[i] == '\t' ||
- // is_breakz:
- b[i] == '\r' || // CR (#xD)
- b[i] == '\n' || // LF (#xA)
- b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85)
- b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028)
- b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029)
- b[i] == 0)
-}
-
-// Determine the width of the character.
-func width(b byte) int {
- // Don't replace these by a switch without first
- // confirming that it is being inlined.
- if b&0x80 == 0x00 {
- return 1
- }
- if b&0xE0 == 0xC0 {
- return 2
- }
- if b&0xF0 == 0xE0 {
- return 3
- }
- if b&0xF8 == 0xF0 {
- return 4
- }
- return 0
-
-}
diff --git a/Godeps/_workspace/src/github.com/dotcloud/docker/pkg/netlink/MAINTAINERS b/Godeps/_workspace/src/github.com/dotcloud/docker/pkg/netlink/MAINTAINERS
deleted file mode 100644
index 1cb5513..0000000
--- a/Godeps/_workspace/src/github.com/dotcloud/docker/pkg/netlink/MAINTAINERS
+++ /dev/null
@@ -1,2 +0,0 @@
-Michael Crosby (@crosbymichael)
-Guillaume J. Charmes (@creack)
diff --git a/Godeps/_workspace/src/github.com/dotcloud/docker/pkg/netlink/netlink.go b/Godeps/_workspace/src/github.com/dotcloud/docker/pkg/netlink/netlink.go
deleted file mode 100644
index 5cc7562..0000000
--- a/Godeps/_workspace/src/github.com/dotcloud/docker/pkg/netlink/netlink.go
+++ /dev/null
@@ -1,23 +0,0 @@
-// Packet netlink provide access to low level Netlink sockets and messages.
-//
-// Actual implementations are in:
-// netlink_linux.go
-// netlink_darwin.go
-package netlink
-
-import (
- "errors"
- "net"
-)
-
-var (
- ErrWrongSockType = errors.New("Wrong socket type")
- ErrShortResponse = errors.New("Got short response from netlink")
-)
-
-// A Route is a subnet associated with the interface to reach it.
-type Route struct {
- *net.IPNet
- Iface *net.Interface
- Default bool
-}
diff --git a/Godeps/_workspace/src/github.com/dotcloud/docker/pkg/netlink/netlink_linux.go b/Godeps/_workspace/src/github.com/dotcloud/docker/pkg/netlink/netlink_linux.go
deleted file mode 100644
index 6de293d..0000000
--- a/Godeps/_workspace/src/github.com/dotcloud/docker/pkg/netlink/netlink_linux.go
+++ /dev/null
@@ -1,891 +0,0 @@
-// +build amd64
-
-package netlink
-
-import (
- "encoding/binary"
- "fmt"
- "math/rand"
- "net"
- "syscall"
- "unsafe"
-)
-
-const (
- IFNAMSIZ = 16
- DEFAULT_CHANGE = 0xFFFFFFFF
- IFLA_INFO_KIND = 1
- IFLA_INFO_DATA = 2
- VETH_INFO_PEER = 1
- IFLA_NET_NS_FD = 28
- SIOC_BRADDBR = 0x89a0
- SIOC_BRADDIF = 0x89a2
-)
-
-var nextSeqNr int
-
-type ifreqHwaddr struct {
- IfrnName [16]byte
- IfruHwaddr syscall.RawSockaddr
-}
-
-type ifreqIndex struct {
- IfrnName [16]byte
- IfruIndex int32
-}
-
-func nativeEndian() binary.ByteOrder {
- var x uint32 = 0x01020304
- if *(*byte)(unsafe.Pointer(&x)) == 0x01 {
- return binary.BigEndian
- }
- return binary.LittleEndian
-}
-
-func getSeq() int {
- nextSeqNr = nextSeqNr + 1
- return nextSeqNr
-}
-
-func getIpFamily(ip net.IP) int {
- if len(ip) <= net.IPv4len {
- return syscall.AF_INET
- }
- if ip.To4() != nil {
- return syscall.AF_INET
- }
- return syscall.AF_INET6
-}
-
-type NetlinkRequestData interface {
- Len() int
- ToWireFormat() []byte
-}
-
-type IfInfomsg struct {
- syscall.IfInfomsg
-}
-
-func newIfInfomsg(family int) *IfInfomsg {
- return &IfInfomsg{
- IfInfomsg: syscall.IfInfomsg{
- Family: uint8(family),
- },
- }
-}
-
-func newIfInfomsgChild(parent *RtAttr, family int) *IfInfomsg {
- msg := newIfInfomsg(family)
- parent.children = append(parent.children, msg)
- return msg
-}
-
-func (msg *IfInfomsg) ToWireFormat() []byte {
- native := nativeEndian()
-
- length := syscall.SizeofIfInfomsg
- b := make([]byte, length)
- b[0] = msg.Family
- b[1] = 0
- native.PutUint16(b[2:4], msg.Type)
- native.PutUint32(b[4:8], uint32(msg.Index))
- native.PutUint32(b[8:12], msg.Flags)
- native.PutUint32(b[12:16], msg.Change)
- return b
-}
-
-func (msg *IfInfomsg) Len() int {
- return syscall.SizeofIfInfomsg
-}
-
-type IfAddrmsg struct {
- syscall.IfAddrmsg
-}
-
-func newIfAddrmsg(family int) *IfAddrmsg {
- return &IfAddrmsg{
- IfAddrmsg: syscall.IfAddrmsg{
- Family: uint8(family),
- },
- }
-}
-
-func (msg *IfAddrmsg) ToWireFormat() []byte {
- native := nativeEndian()
-
- length := syscall.SizeofIfAddrmsg
- b := make([]byte, length)
- b[0] = msg.Family
- b[1] = msg.Prefixlen
- b[2] = msg.Flags
- b[3] = msg.Scope
- native.PutUint32(b[4:8], msg.Index)
- return b
-}
-
-func (msg *IfAddrmsg) Len() int {
- return syscall.SizeofIfAddrmsg
-}
-
-type RtMsg struct {
- syscall.RtMsg
-}
-
-func newRtMsg(family int) *RtMsg {
- return &RtMsg{
- RtMsg: syscall.RtMsg{
- Family: uint8(family),
- Table: syscall.RT_TABLE_MAIN,
- Scope: syscall.RT_SCOPE_UNIVERSE,
- Protocol: syscall.RTPROT_BOOT,
- Type: syscall.RTN_UNICAST,
- },
- }
-}
-
-func (msg *RtMsg) ToWireFormat() []byte {
- native := nativeEndian()
-
- length := syscall.SizeofRtMsg
- b := make([]byte, length)
- b[0] = msg.Family
- b[1] = msg.Dst_len
- b[2] = msg.Src_len
- b[3] = msg.Tos
- b[4] = msg.Table
- b[5] = msg.Protocol
- b[6] = msg.Scope
- b[7] = msg.Type
- native.PutUint32(b[8:12], msg.Flags)
- return b
-}
-
-func (msg *RtMsg) Len() int {
- return syscall.SizeofRtMsg
-}
-
-func rtaAlignOf(attrlen int) int {
- return (attrlen + syscall.RTA_ALIGNTO - 1) & ^(syscall.RTA_ALIGNTO - 1)
-}
-
-type RtAttr struct {
- syscall.RtAttr
- Data []byte
- children []NetlinkRequestData
-}
-
-func newRtAttr(attrType int, data []byte) *RtAttr {
- return &RtAttr{
- RtAttr: syscall.RtAttr{
- Type: uint16(attrType),
- },
- children: []NetlinkRequestData{},
- Data: data,
- }
-}
-
-func newRtAttrChild(parent *RtAttr, attrType int, data []byte) *RtAttr {
- attr := newRtAttr(attrType, data)
- parent.children = append(parent.children, attr)
- return attr
-}
-
-func (a *RtAttr) Len() int {
- l := 0
- for _, child := range a.children {
- l += child.Len() + syscall.SizeofRtAttr
- }
- if l == 0 {
- l++
- }
- return rtaAlignOf(l + len(a.Data))
-}
-
-func (a *RtAttr) ToWireFormat() []byte {
- native := nativeEndian()
-
- length := a.Len()
- buf := make([]byte, rtaAlignOf(length+syscall.SizeofRtAttr))
-
- if a.Data != nil {
- copy(buf[4:], a.Data)
- } else {
- next := 4
- for _, child := range a.children {
- childBuf := child.ToWireFormat()
- copy(buf[next:], childBuf)
- next += rtaAlignOf(len(childBuf))
- }
- }
-
- if l := uint16(rtaAlignOf(length)); l != 0 {
- native.PutUint16(buf[0:2], l+1)
- }
- native.PutUint16(buf[2:4], a.Type)
-
- return buf
-}
-
-type NetlinkRequest struct {
- syscall.NlMsghdr
- Data []NetlinkRequestData
-}
-
-func (rr *NetlinkRequest) ToWireFormat() []byte {
- native := nativeEndian()
-
- length := rr.Len
- dataBytes := make([][]byte, len(rr.Data))
- for i, data := range rr.Data {
- dataBytes[i] = data.ToWireFormat()
- length += uint32(len(dataBytes[i]))
- }
- b := make([]byte, length)
- native.PutUint32(b[0:4], length)
- native.PutUint16(b[4:6], rr.Type)
- native.PutUint16(b[6:8], rr.Flags)
- native.PutUint32(b[8:12], rr.Seq)
- native.PutUint32(b[12:16], rr.Pid)
-
- next := 16
- for _, data := range dataBytes {
- copy(b[next:], data)
- next += len(data)
- }
- return b
-}
-
-func (rr *NetlinkRequest) AddData(data NetlinkRequestData) {
- if data != nil {
- rr.Data = append(rr.Data, data)
- }
-}
-
-func newNetlinkRequest(proto, flags int) *NetlinkRequest {
- return &NetlinkRequest{
- NlMsghdr: syscall.NlMsghdr{
- Len: uint32(syscall.NLMSG_HDRLEN),
- Type: uint16(proto),
- Flags: syscall.NLM_F_REQUEST | uint16(flags),
- Seq: uint32(getSeq()),
- },
- }
-}
-
-type NetlinkSocket struct {
- fd int
- lsa syscall.SockaddrNetlink
-}
-
-func getNetlinkSocket() (*NetlinkSocket, error) {
- fd, err := syscall.Socket(syscall.AF_NETLINK, syscall.SOCK_RAW, syscall.NETLINK_ROUTE)
- if err != nil {
- return nil, err
- }
- s := &NetlinkSocket{
- fd: fd,
- }
- s.lsa.Family = syscall.AF_NETLINK
- if err := syscall.Bind(fd, &s.lsa); err != nil {
- syscall.Close(fd)
- return nil, err
- }
-
- return s, nil
-}
-
-func (s *NetlinkSocket) Close() {
- syscall.Close(s.fd)
-}
-
-func (s *NetlinkSocket) Send(request *NetlinkRequest) error {
- if err := syscall.Sendto(s.fd, request.ToWireFormat(), 0, &s.lsa); err != nil {
- return err
- }
- return nil
-}
-
-func (s *NetlinkSocket) Receive() ([]syscall.NetlinkMessage, error) {
- rb := make([]byte, syscall.Getpagesize())
- nr, _, err := syscall.Recvfrom(s.fd, rb, 0)
- if err != nil {
- return nil, err
- }
- if nr < syscall.NLMSG_HDRLEN {
- return nil, ErrShortResponse
- }
- rb = rb[:nr]
- return syscall.ParseNetlinkMessage(rb)
-}
-
-func (s *NetlinkSocket) GetPid() (uint32, error) {
- lsa, err := syscall.Getsockname(s.fd)
- if err != nil {
- return 0, err
- }
- switch v := lsa.(type) {
- case *syscall.SockaddrNetlink:
- return v.Pid, nil
- }
- return 0, ErrWrongSockType
-}
-
-func (s *NetlinkSocket) HandleAck(seq uint32) error {
- native := nativeEndian()
-
- pid, err := s.GetPid()
- if err != nil {
- return err
- }
-
-done:
- for {
- msgs, err := s.Receive()
- if err != nil {
- return err
- }
- for _, m := range msgs {
- if m.Header.Seq != seq {
- return fmt.Errorf("Wrong Seq nr %d, expected %d", m.Header.Seq, seq)
- }
- if m.Header.Pid != pid {
- return fmt.Errorf("Wrong pid %d, expected %d", m.Header.Pid, pid)
- }
- if m.Header.Type == syscall.NLMSG_DONE {
- break done
- }
- if m.Header.Type == syscall.NLMSG_ERROR {
- error := int32(native.Uint32(m.Data[0:4]))
- if error == 0 {
- break done
- }
- return syscall.Errno(-error)
- }
- }
- }
-
- return nil
-}
-
-// Add a new default gateway. Identical to:
-// ip route add default via $ip
-func AddDefaultGw(ip net.IP) error {
- s, err := getNetlinkSocket()
- if err != nil {
- return err
- }
- defer s.Close()
-
- family := getIpFamily(ip)
-
- wb := newNetlinkRequest(syscall.RTM_NEWROUTE, syscall.NLM_F_CREATE|syscall.NLM_F_EXCL|syscall.NLM_F_ACK)
-
- msg := newRtMsg(family)
- wb.AddData(msg)
-
- var ipData []byte
- if family == syscall.AF_INET {
- ipData = ip.To4()
- } else {
- ipData = ip.To16()
- }
-
- gateway := newRtAttr(syscall.RTA_GATEWAY, ipData)
-
- wb.AddData(gateway)
-
- if err := s.Send(wb); err != nil {
- return err
- }
-
- return s.HandleAck(wb.Seq)
-}
-
-// Bring up a particular network interface
-func NetworkLinkUp(iface *net.Interface) error {
- s, err := getNetlinkSocket()
- if err != nil {
- return err
- }
- defer s.Close()
-
- wb := newNetlinkRequest(syscall.RTM_NEWLINK, syscall.NLM_F_ACK)
-
- msg := newIfInfomsg(syscall.AF_UNSPEC)
- msg.Change = syscall.IFF_UP
- msg.Flags = syscall.IFF_UP
- msg.Index = int32(iface.Index)
- wb.AddData(msg)
-
- if err := s.Send(wb); err != nil {
- return err
- }
-
- return s.HandleAck(wb.Seq)
-}
-
-func NetworkLinkDown(iface *net.Interface) error {
- s, err := getNetlinkSocket()
- if err != nil {
- return err
- }
- defer s.Close()
-
- wb := newNetlinkRequest(syscall.RTM_NEWLINK, syscall.NLM_F_ACK)
-
- msg := newIfInfomsg(syscall.AF_UNSPEC)
- msg.Change = syscall.IFF_UP
- msg.Flags = 0 & ^syscall.IFF_UP
- msg.Index = int32(iface.Index)
- wb.AddData(msg)
-
- if err := s.Send(wb); err != nil {
- return err
- }
-
- return s.HandleAck(wb.Seq)
-}
-
-func NetworkSetMTU(iface *net.Interface, mtu int) error {
- s, err := getNetlinkSocket()
- if err != nil {
- return err
- }
- defer s.Close()
-
- wb := newNetlinkRequest(syscall.RTM_SETLINK, syscall.NLM_F_ACK)
-
- msg := newIfInfomsg(syscall.AF_UNSPEC)
- msg.Type = syscall.RTM_SETLINK
- msg.Flags = syscall.NLM_F_REQUEST
- msg.Index = int32(iface.Index)
- msg.Change = DEFAULT_CHANGE
- wb.AddData(msg)
-
- var (
- b = make([]byte, 4)
- native = nativeEndian()
- )
- native.PutUint32(b, uint32(mtu))
-
- data := newRtAttr(syscall.IFLA_MTU, b)
- wb.AddData(data)
-
- if err := s.Send(wb); err != nil {
- return err
- }
- return s.HandleAck(wb.Seq)
-}
-
-// same as ip link set $name master $master
-func NetworkSetMaster(iface, master *net.Interface) error {
- s, err := getNetlinkSocket()
- if err != nil {
- return err
- }
- defer s.Close()
-
- wb := newNetlinkRequest(syscall.RTM_SETLINK, syscall.NLM_F_ACK)
-
- msg := newIfInfomsg(syscall.AF_UNSPEC)
- msg.Type = syscall.RTM_SETLINK
- msg.Flags = syscall.NLM_F_REQUEST
- msg.Index = int32(iface.Index)
- msg.Change = DEFAULT_CHANGE
- wb.AddData(msg)
-
- var (
- b = make([]byte, 4)
- native = nativeEndian()
- )
- native.PutUint32(b, uint32(master.Index))
-
- data := newRtAttr(syscall.IFLA_MASTER, b)
- wb.AddData(data)
-
- if err := s.Send(wb); err != nil {
- return err
- }
-
- return s.HandleAck(wb.Seq)
-}
-
-func NetworkSetNsPid(iface *net.Interface, nspid int) error {
- s, err := getNetlinkSocket()
- if err != nil {
- return err
- }
- defer s.Close()
-
- wb := newNetlinkRequest(syscall.RTM_SETLINK, syscall.NLM_F_ACK)
-
- msg := newIfInfomsg(syscall.AF_UNSPEC)
- msg.Type = syscall.RTM_SETLINK
- msg.Flags = syscall.NLM_F_REQUEST
- msg.Index = int32(iface.Index)
- msg.Change = DEFAULT_CHANGE
- wb.AddData(msg)
-
- var (
- b = make([]byte, 4)
- native = nativeEndian()
- )
- native.PutUint32(b, uint32(nspid))
-
- data := newRtAttr(syscall.IFLA_NET_NS_PID, b)
- wb.AddData(data)
-
- if err := s.Send(wb); err != nil {
- return err
- }
-
- return s.HandleAck(wb.Seq)
-}
-
-func NetworkSetNsFd(iface *net.Interface, fd int) error {
- s, err := getNetlinkSocket()
- if err != nil {
- return err
- }
- defer s.Close()
-
- wb := newNetlinkRequest(syscall.RTM_SETLINK, syscall.NLM_F_ACK)
-
- msg := newIfInfomsg(syscall.AF_UNSPEC)
- msg.Type = syscall.RTM_SETLINK
- msg.Flags = syscall.NLM_F_REQUEST
- msg.Index = int32(iface.Index)
- msg.Change = DEFAULT_CHANGE
- wb.AddData(msg)
-
- var (
- b = make([]byte, 4)
- native = nativeEndian()
- )
- native.PutUint32(b, uint32(fd))
-
- data := newRtAttr(IFLA_NET_NS_FD, b)
- wb.AddData(data)
-
- if err := s.Send(wb); err != nil {
- return err
- }
-
- return s.HandleAck(wb.Seq)
-}
-
-// Add an Ip address to an interface. This is identical to:
-// ip addr add $ip/$ipNet dev $iface
-func NetworkLinkAddIp(iface *net.Interface, ip net.IP, ipNet *net.IPNet) error {
- s, err := getNetlinkSocket()
- if err != nil {
- return err
- }
- defer s.Close()
-
- family := getIpFamily(ip)
-
- wb := newNetlinkRequest(syscall.RTM_NEWADDR, syscall.NLM_F_CREATE|syscall.NLM_F_EXCL|syscall.NLM_F_ACK)
-
- msg := newIfAddrmsg(family)
- msg.Index = uint32(iface.Index)
- prefixLen, _ := ipNet.Mask.Size()
- msg.Prefixlen = uint8(prefixLen)
- wb.AddData(msg)
-
- var ipData []byte
- if family == syscall.AF_INET {
- ipData = ip.To4()
- } else {
- ipData = ip.To16()
- }
-
- localData := newRtAttr(syscall.IFA_LOCAL, ipData)
- wb.AddData(localData)
-
- addrData := newRtAttr(syscall.IFA_ADDRESS, ipData)
- wb.AddData(addrData)
-
- if err := s.Send(wb); err != nil {
- return err
- }
-
- return s.HandleAck(wb.Seq)
-}
-
-func zeroTerminated(s string) []byte {
- return []byte(s + "\000")
-}
-
-func nonZeroTerminated(s string) []byte {
- return []byte(s)
-}
-
-// Add a new network link of a specified type. This is identical to
-// running: ip add link $name type $linkType
-func NetworkLinkAdd(name string, linkType string) error {
- s, err := getNetlinkSocket()
- if err != nil {
- return err
- }
- defer s.Close()
-
- wb := newNetlinkRequest(syscall.RTM_NEWLINK, syscall.NLM_F_CREATE|syscall.NLM_F_EXCL|syscall.NLM_F_ACK)
-
- msg := newIfInfomsg(syscall.AF_UNSPEC)
- wb.AddData(msg)
-
- if name != "" {
- nameData := newRtAttr(syscall.IFLA_IFNAME, zeroTerminated(name))
- wb.AddData(nameData)
- }
-
- kindData := newRtAttr(IFLA_INFO_KIND, nonZeroTerminated(linkType))
-
- infoData := newRtAttr(syscall.IFLA_LINKINFO, kindData.ToWireFormat())
- wb.AddData(infoData)
-
- if err := s.Send(wb); err != nil {
- return err
- }
-
- return s.HandleAck(wb.Seq)
-}
-
-// Returns an array of IPNet for all the currently routed subnets on ipv4
-// This is similar to the first column of "ip route" output
-func NetworkGetRoutes() ([]Route, error) {
- native := nativeEndian()
-
- s, err := getNetlinkSocket()
- if err != nil {
- return nil, err
- }
- defer s.Close()
-
- wb := newNetlinkRequest(syscall.RTM_GETROUTE, syscall.NLM_F_DUMP)
-
- msg := newIfInfomsg(syscall.AF_UNSPEC)
- wb.AddData(msg)
-
- if err := s.Send(wb); err != nil {
- return nil, err
- }
-
- pid, err := s.GetPid()
- if err != nil {
- return nil, err
- }
-
- res := make([]Route, 0)
-
-done:
- for {
- msgs, err := s.Receive()
- if err != nil {
- return nil, err
- }
- for _, m := range msgs {
- if m.Header.Seq != wb.Seq {
- return nil, fmt.Errorf("Wrong Seq nr %d, expected 1", m.Header.Seq)
- }
- if m.Header.Pid != pid {
- return nil, fmt.Errorf("Wrong pid %d, expected %d", m.Header.Pid, pid)
- }
- if m.Header.Type == syscall.NLMSG_DONE {
- break done
- }
- if m.Header.Type == syscall.NLMSG_ERROR {
- error := int32(native.Uint32(m.Data[0:4]))
- if error == 0 {
- break done
- }
- return nil, syscall.Errno(-error)
- }
- if m.Header.Type != syscall.RTM_NEWROUTE {
- continue
- }
-
- var r Route
-
- msg := (*RtMsg)(unsafe.Pointer(&m.Data[0:syscall.SizeofRtMsg][0]))
-
- if msg.Flags&syscall.RTM_F_CLONED != 0 {
- // Ignore cloned routes
- continue
- }
-
- if msg.Table != syscall.RT_TABLE_MAIN {
- // Ignore non-main tables
- continue
- }
-
- if msg.Family != syscall.AF_INET {
- // Ignore non-ipv4 routes
- continue
- }
-
- if msg.Dst_len == 0 {
- // Default routes
- r.Default = true
- }
-
- attrs, err := syscall.ParseNetlinkRouteAttr(&m)
- if err != nil {
- return nil, err
- }
- for _, attr := range attrs {
- switch attr.Attr.Type {
- case syscall.RTA_DST:
- ip := attr.Value
- r.IPNet = &net.IPNet{
- IP: ip,
- Mask: net.CIDRMask(int(msg.Dst_len), 8*len(ip)),
- }
- case syscall.RTA_OIF:
- index := int(native.Uint32(attr.Value[0:4]))
- r.Iface, _ = net.InterfaceByIndex(index)
- }
- }
- if r.Default || r.IPNet != nil {
- res = append(res, r)
- }
- }
- }
-
- return res, nil
-}
-
-func getIfSocket() (fd int, err error) {
- for _, socket := range []int{
- syscall.AF_INET,
- syscall.AF_PACKET,
- syscall.AF_INET6,
- } {
- if fd, err = syscall.Socket(socket, syscall.SOCK_DGRAM, 0); err == nil {
- break
- }
- }
- if err == nil {
- return fd, nil
- }
- return -1, err
-}
-
-func NetworkChangeName(iface *net.Interface, newName string) error {
- fd, err := getIfSocket()
- if err != nil {
- return err
- }
- defer syscall.Close(fd)
-
- data := [IFNAMSIZ * 2]byte{}
- // the "-1"s here are very important for ensuring we get proper null
- // termination of our new C strings
- copy(data[:IFNAMSIZ-1], iface.Name)
- copy(data[IFNAMSIZ:IFNAMSIZ*2-1], newName)
-
- if _, _, errno := syscall.Syscall(syscall.SYS_IOCTL, uintptr(fd), syscall.SIOCSIFNAME, uintptr(unsafe.Pointer(&data[0]))); errno != 0 {
- return errno
- }
- return nil
-}
-
-func NetworkCreateVethPair(name1, name2 string) error {
- s, err := getNetlinkSocket()
- if err != nil {
- return err
- }
- defer s.Close()
-
- wb := newNetlinkRequest(syscall.RTM_NEWLINK, syscall.NLM_F_CREATE|syscall.NLM_F_EXCL|syscall.NLM_F_ACK)
-
- msg := newIfInfomsg(syscall.AF_UNSPEC)
- wb.AddData(msg)
-
- nameData := newRtAttr(syscall.IFLA_IFNAME, zeroTerminated(name1))
- wb.AddData(nameData)
-
- nest1 := newRtAttr(syscall.IFLA_LINKINFO, nil)
- newRtAttrChild(nest1, IFLA_INFO_KIND, zeroTerminated("veth"))
- nest2 := newRtAttrChild(nest1, IFLA_INFO_DATA, nil)
- nest3 := newRtAttrChild(nest2, VETH_INFO_PEER, nil)
-
- newIfInfomsgChild(nest3, syscall.AF_UNSPEC)
- newRtAttrChild(nest3, syscall.IFLA_IFNAME, zeroTerminated(name2))
-
- wb.AddData(nest1)
-
- if err := s.Send(wb); err != nil {
- return err
- }
- return s.HandleAck(wb.Seq)
-}
-
-// Create the actual bridge device. This is more backward-compatible than
-// netlink.NetworkLinkAdd and works on RHEL 6.
-func CreateBridge(name string, setMacAddr bool) error {
- s, err := syscall.Socket(syscall.AF_INET6, syscall.SOCK_STREAM, syscall.IPPROTO_IP)
- if err != nil {
- // ipv6 issue, creating with ipv4
- s, err = syscall.Socket(syscall.AF_INET, syscall.SOCK_STREAM, syscall.IPPROTO_IP)
- if err != nil {
- return err
- }
- }
- defer syscall.Close(s)
-
- nameBytePtr, err := syscall.BytePtrFromString(name)
- if err != nil {
- return err
- }
- if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, uintptr(s), SIOC_BRADDBR, uintptr(unsafe.Pointer(nameBytePtr))); err != 0 {
- return err
- }
- if setMacAddr {
- return setBridgeMacAddress(s, name)
- }
- return nil
-}
-
-// Add a slave to abridge device. This is more backward-compatible than
-// netlink.NetworkSetMaster and works on RHEL 6.
-func AddToBridge(iface, master *net.Interface) error {
- s, err := syscall.Socket(syscall.AF_INET6, syscall.SOCK_STREAM, syscall.IPPROTO_IP)
- if err != nil {
- // ipv6 issue, creating with ipv4
- s, err = syscall.Socket(syscall.AF_INET, syscall.SOCK_STREAM, syscall.IPPROTO_IP)
- if err != nil {
- return err
- }
- }
- defer syscall.Close(s)
-
- ifr := ifreqIndex{}
- copy(ifr.IfrnName[:], master.Name)
- ifr.IfruIndex = int32(iface.Index)
-
- if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, uintptr(s), SIOC_BRADDIF, uintptr(unsafe.Pointer(&ifr))); err != 0 {
- return err
- }
-
- return nil
-}
-
-func setBridgeMacAddress(s int, name string) error {
- ifr := ifreqHwaddr{}
- ifr.IfruHwaddr.Family = syscall.ARPHRD_ETHER
- copy(ifr.IfrnName[:], name)
-
- for i := 0; i < 6; i++ {
- ifr.IfruHwaddr.Data[i] = int8(rand.Intn(255))
- }
-
- ifr.IfruHwaddr.Data[0] &^= 0x1 // clear multicast bit
- ifr.IfruHwaddr.Data[0] |= 0x2 // set local assignment bit (IEEE802)
-
- if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, uintptr(s), syscall.SIOCSIFHWADDR, uintptr(unsafe.Pointer(&ifr))); err != 0 {
- return err
- }
- return nil
-}
diff --git a/Godeps/_workspace/src/github.com/dotcloud/docker/pkg/netlink/netlink_unsupported.go b/Godeps/_workspace/src/github.com/dotcloud/docker/pkg/netlink/netlink_unsupported.go
deleted file mode 100644
index 8a5531b..0000000
--- a/Godeps/_workspace/src/github.com/dotcloud/docker/pkg/netlink/netlink_unsupported.go
+++ /dev/null
@@ -1,69 +0,0 @@
-// +build !linux !amd64
-
-package netlink
-
-import (
- "errors"
- "net"
-)
-
-var (
- ErrNotImplemented = errors.New("not implemented")
-)
-
-func NetworkGetRoutes() ([]Route, error) {
- return nil, ErrNotImplemented
-}
-
-func NetworkLinkAdd(name string, linkType string) error {
- return ErrNotImplemented
-}
-
-func NetworkLinkUp(iface *net.Interface) error {
- return ErrNotImplemented
-}
-
-func NetworkLinkAddIp(iface *net.Interface, ip net.IP, ipNet *net.IPNet) error {
- return ErrNotImplemented
-}
-
-func AddDefaultGw(ip net.IP) error {
- return ErrNotImplemented
-
-}
-
-func NetworkSetMTU(iface *net.Interface, mtu int) error {
- return ErrNotImplemented
-}
-
-func NetworkCreateVethPair(name1, name2 string) error {
- return ErrNotImplemented
-}
-
-func NetworkChangeName(iface *net.Interface, newName string) error {
- return ErrNotImplemented
-}
-
-func NetworkSetNsFd(iface *net.Interface, fd int) error {
- return ErrNotImplemented
-}
-
-func NetworkSetNsPid(iface *net.Interface, nspid int) error {
- return ErrNotImplemented
-}
-
-func NetworkSetMaster(iface, master *net.Interface) error {
- return ErrNotImplemented
-}
-
-func NetworkLinkDown(iface *net.Interface) error {
- return ErrNotImplemented
-}
-
-func CreateBridge(name string, setMacAddr bool) error {
- return ErrNotImplemented
-}
-
-func AddToBridge(iface, master *net.Interface) error {
- return ErrNotImplemented
-}
diff --git a/Godeps/_workspace/src/github.com/guelfey/go.dbus/LICENSE b/Godeps/_workspace/src/github.com/guelfey/go.dbus/LICENSE
deleted file mode 100644
index 06b252b..0000000
--- a/Godeps/_workspace/src/github.com/guelfey/go.dbus/LICENSE
+++ /dev/null
@@ -1,25 +0,0 @@
-Copyright (c) 2013, Georg Reinke ()
-All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions
-are met:
-
-1. Redistributions of source code must retain the above copyright notice,
-this list of conditions and the following disclaimer.
-
-2. Redistributions in binary form must reproduce the above copyright
-notice, this list of conditions and the following disclaimer in the
-documentation and/or other materials provided with the distribution.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
-TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/Godeps/_workspace/src/github.com/guelfey/go.dbus/README.markdown b/Godeps/_workspace/src/github.com/guelfey/go.dbus/README.markdown
deleted file mode 100644
index dc467fd..0000000
--- a/Godeps/_workspace/src/github.com/guelfey/go.dbus/README.markdown
+++ /dev/null
@@ -1,36 +0,0 @@
-go.dbus
--------
-
-go.dbus is a simple library that implements native Go client bindings for the
-D-Bus message bus system.
-
-### Features
-
-* Complete native implementation of the D-Bus message protocol
-* Go-like API (channels for signals / asynchronous method calls, Goroutine-safe connections)
-* Subpackages that help with the introspection / property interfaces
-
-### Installation
-
-This packages requires Go 1.1. If you installed it and set up your GOPATH, just run:
-
-```
-go get github.com/guelfey/go.dbus
-```
-
-If you want to use the subpackages, you can install them the same way.
-
-### Usage
-
-The complete package documentation and some simple examples are available at
-[godoc.org](http://godoc.org/github.com/guelfey/go.dbus). Also, the
-[_examples](https://github.com/guelfey/go.dbus/tree/master/_examples) directory
-gives a short overview over the basic usage.
-
-Please note that the API is considered unstable for now and may change without
-further notice.
-
-### License
-
-go.dbus is available under the Simplified BSD License; see LICENSE for the full
-text.
diff --git a/Godeps/_workspace/src/github.com/guelfey/go.dbus/auth.go b/Godeps/_workspace/src/github.com/guelfey/go.dbus/auth.go
deleted file mode 100644
index bf70a89..0000000
--- a/Godeps/_workspace/src/github.com/guelfey/go.dbus/auth.go
+++ /dev/null
@@ -1,255 +0,0 @@
-package dbus
-
-import (
- "bufio"
- "bytes"
- "errors"
- "io"
- "os/user"
-)
-
-// AuthStatus represents the Status of an authentication mechanism.
-type AuthStatus byte
-
-const (
- // AuthOk signals that authentication is finished; the next command
- // from the server should be an OK.
- AuthOk AuthStatus = iota
-
- // AuthContinue signals that additional data is needed; the next command
- // from the server should be a DATA.
- AuthContinue
-
- // AuthError signals an error; the server sent invalid data or some
- // other unexpected thing happened and the current authentication
- // process should be aborted.
- AuthError
-)
-
-type authState byte
-
-const (
- waitingForData authState = iota
- waitingForOk
- waitingForReject
-)
-
-// Auth defines the behaviour of an authentication mechanism.
-type Auth interface {
- // Return the name of the mechnism, the argument to the first AUTH command
- // and the next status.
- FirstData() (name, resp []byte, status AuthStatus)
-
- // Process the given DATA command, and return the argument to the DATA
- // command and the next status. If len(resp) == 0, no DATA command is sent.
- HandleData(data []byte) (resp []byte, status AuthStatus)
-}
-
-// Auth authenticates the connection, trying the given list of authentication
-// mechanisms (in that order). If nil is passed, the EXTERNAL and
-// DBUS_COOKIE_SHA1 mechanisms are tried for the current user. For private
-// connections, this method must be called before sending any messages to the
-// bus. Auth must not be called on shared connections.
-func (conn *Conn) Auth(methods []Auth) error {
- if methods == nil {
- u, err := user.Current()
- if err != nil {
- return err
- }
- methods = []Auth{AuthExternal(u.Username), AuthCookieSha1(u.Username, u.HomeDir)}
- }
- in := bufio.NewReader(conn.transport)
- err := conn.transport.SendNullByte()
- if err != nil {
- return err
- }
- err = authWriteLine(conn.transport, []byte("AUTH"))
- if err != nil {
- return err
- }
- s, err := authReadLine(in)
- if err != nil {
- return err
- }
- if len(s) < 2 || !bytes.Equal(s[0], []byte("REJECTED")) {
- return errors.New("dbus: authentication protocol error")
- }
- s = s[1:]
- for _, v := range s {
- for _, m := range methods {
- if name, data, status := m.FirstData(); bytes.Equal(v, name) {
- var ok bool
- err = authWriteLine(conn.transport, []byte("AUTH"), []byte(v), data)
- if err != nil {
- return err
- }
- switch status {
- case AuthOk:
- err, ok = conn.tryAuth(m, waitingForOk, in)
- case AuthContinue:
- err, ok = conn.tryAuth(m, waitingForData, in)
- default:
- panic("dbus: invalid authentication status")
- }
- if err != nil {
- return err
- }
- if ok {
- if conn.transport.SupportsUnixFDs() {
- err = authWriteLine(conn, []byte("NEGOTIATE_UNIX_FD"))
- if err != nil {
- return err
- }
- line, err := authReadLine(in)
- if err != nil {
- return err
- }
- switch {
- case bytes.Equal(line[0], []byte("AGREE_UNIX_FD")):
- conn.EnableUnixFDs()
- conn.unixFD = true
- case bytes.Equal(line[0], []byte("ERROR")):
- default:
- return errors.New("dbus: authentication protocol error")
- }
- }
- err = authWriteLine(conn.transport, []byte("BEGIN"))
- if err != nil {
- return err
- }
- go conn.inWorker()
- go conn.outWorker()
- return nil
- }
- }
- }
- }
- return errors.New("dbus: authentication failed")
-}
-
-// tryAuth tries to authenticate with m as the mechanism, using state as the
-// initial authState and in for reading input. It returns (nil, true) on
-// success, (nil, false) on a REJECTED and (someErr, false) if some other
-// error occured.
-func (conn *Conn) tryAuth(m Auth, state authState, in *bufio.Reader) (error, bool) {
- for {
- s, err := authReadLine(in)
- if err != nil {
- return err, false
- }
- switch {
- case state == waitingForData && string(s[0]) == "DATA":
- if len(s) != 2 {
- err = authWriteLine(conn.transport, []byte("ERROR"))
- if err != nil {
- return err, false
- }
- continue
- }
- data, status := m.HandleData(s[1])
- switch status {
- case AuthOk, AuthContinue:
- if len(data) != 0 {
- err = authWriteLine(conn.transport, []byte("DATA"), data)
- if err != nil {
- return err, false
- }
- }
- if status == AuthOk {
- state = waitingForOk
- }
- case AuthError:
- err = authWriteLine(conn.transport, []byte("ERROR"))
- if err != nil {
- return err, false
- }
- }
- case state == waitingForData && string(s[0]) == "REJECTED":
- return nil, false
- case state == waitingForData && string(s[0]) == "ERROR":
- err = authWriteLine(conn.transport, []byte("CANCEL"))
- if err != nil {
- return err, false
- }
- state = waitingForReject
- case state == waitingForData && string(s[0]) == "OK":
- if len(s) != 2 {
- err = authWriteLine(conn.transport, []byte("CANCEL"))
- if err != nil {
- return err, false
- }
- state = waitingForReject
- }
- conn.uuid = string(s[1])
- return nil, true
- case state == waitingForData:
- err = authWriteLine(conn.transport, []byte("ERROR"))
- if err != nil {
- return err, false
- }
- case state == waitingForOk && string(s[0]) == "OK":
- if len(s) != 2 {
- err = authWriteLine(conn.transport, []byte("CANCEL"))
- if err != nil {
- return err, false
- }
- state = waitingForReject
- }
- conn.uuid = string(s[1])
- return nil, true
- case state == waitingForOk && string(s[0]) == "REJECTED":
- return nil, false
- case state == waitingForOk && (string(s[0]) == "DATA" ||
- string(s[0]) == "ERROR"):
-
- err = authWriteLine(conn.transport, []byte("CANCEL"))
- if err != nil {
- return err, false
- }
- state = waitingForReject
- case state == waitingForOk:
- err = authWriteLine(conn.transport, []byte("ERROR"))
- if err != nil {
- return err, false
- }
- case state == waitingForReject && string(s[0]) == "REJECTED":
- return nil, false
- case state == waitingForReject:
- return errors.New("dbus: authentication protocol error"), false
- default:
- panic("dbus: invalid auth state")
- }
- }
-}
-
-// authReadLine reads a line and separates it into its fields.
-func authReadLine(in *bufio.Reader) ([][]byte, error) {
- data, err := in.ReadBytes('\n')
- if err != nil {
- return nil, err
- }
- data = bytes.TrimSuffix(data, []byte("\r\n"))
- return bytes.Split(data, []byte{' '}), nil
-}
-
-// authWriteLine writes the given line in the authentication protocol format
-// (elements of data separated by a " " and terminated by "\r\n").
-func authWriteLine(out io.Writer, data ...[]byte) error {
- buf := make([]byte, 0)
- for i, v := range data {
- buf = append(buf, v...)
- if i != len(data)-1 {
- buf = append(buf, ' ')
- }
- }
- buf = append(buf, '\r')
- buf = append(buf, '\n')
- n, err := out.Write(buf)
- if err != nil {
- return err
- }
- if n != len(buf) {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
diff --git a/Godeps/_workspace/src/github.com/guelfey/go.dbus/auth_external.go b/Godeps/_workspace/src/github.com/guelfey/go.dbus/auth_external.go
deleted file mode 100644
index 7e376d3..0000000
--- a/Godeps/_workspace/src/github.com/guelfey/go.dbus/auth_external.go
+++ /dev/null
@@ -1,26 +0,0 @@
-package dbus
-
-import (
- "encoding/hex"
-)
-
-// AuthExternal returns an Auth that authenticates as the given user with the
-// EXTERNAL mechanism.
-func AuthExternal(user string) Auth {
- return authExternal{user}
-}
-
-// AuthExternal implements the EXTERNAL authentication mechanism.
-type authExternal struct {
- user string
-}
-
-func (a authExternal) FirstData() ([]byte, []byte, AuthStatus) {
- b := make([]byte, 2*len(a.user))
- hex.Encode(b, []byte(a.user))
- return []byte("EXTERNAL"), b, AuthOk
-}
-
-func (a authExternal) HandleData(b []byte) ([]byte, AuthStatus) {
- return nil, AuthError
-}
diff --git a/Godeps/_workspace/src/github.com/guelfey/go.dbus/auth_sha1.go b/Godeps/_workspace/src/github.com/guelfey/go.dbus/auth_sha1.go
deleted file mode 100644
index df15b46..0000000
--- a/Godeps/_workspace/src/github.com/guelfey/go.dbus/auth_sha1.go
+++ /dev/null
@@ -1,102 +0,0 @@
-package dbus
-
-import (
- "bufio"
- "bytes"
- "crypto/rand"
- "crypto/sha1"
- "encoding/hex"
- "os"
-)
-
-// AuthCookieSha1 returns an Auth that authenticates as the given user with the
-// DBUS_COOKIE_SHA1 mechanism. The home parameter should specify the home
-// directory of the user.
-func AuthCookieSha1(user, home string) Auth {
- return authCookieSha1{user, home}
-}
-
-type authCookieSha1 struct {
- user, home string
-}
-
-func (a authCookieSha1) FirstData() ([]byte, []byte, AuthStatus) {
- b := make([]byte, 2*len(a.user))
- hex.Encode(b, []byte(a.user))
- return []byte("DBUS_COOKIE_SHA1"), b, AuthContinue
-}
-
-func (a authCookieSha1) HandleData(data []byte) ([]byte, AuthStatus) {
- challenge := make([]byte, len(data)/2)
- _, err := hex.Decode(challenge, data)
- if err != nil {
- return nil, AuthError
- }
- b := bytes.Split(challenge, []byte{' '})
- if len(b) != 3 {
- return nil, AuthError
- }
- context := b[0]
- id := b[1]
- svchallenge := b[2]
- cookie := a.getCookie(context, id)
- if cookie == nil {
- return nil, AuthError
- }
- clchallenge := a.generateChallenge()
- if clchallenge == nil {
- return nil, AuthError
- }
- hash := sha1.New()
- hash.Write(bytes.Join([][]byte{svchallenge, clchallenge, cookie}, []byte{':'}))
- hexhash := make([]byte, 2*hash.Size())
- hex.Encode(hexhash, hash.Sum(nil))
- data = append(clchallenge, ' ')
- data = append(data, hexhash...)
- resp := make([]byte, 2*len(data))
- hex.Encode(resp, data)
- return resp, AuthOk
-}
-
-// getCookie searches for the cookie identified by id in context and returns
-// the cookie content or nil. (Since HandleData can't return a specific error,
-// but only whether an error occured, this function also doesn't bother to
-// return an error.)
-func (a authCookieSha1) getCookie(context, id []byte) []byte {
- file, err := os.Open(a.home + "/.dbus-keyrings/" + string(context))
- if err != nil {
- return nil
- }
- defer file.Close()
- rd := bufio.NewReader(file)
- for {
- line, err := rd.ReadBytes('\n')
- if err != nil {
- return nil
- }
- line = line[:len(line)-1]
- b := bytes.Split(line, []byte{' '})
- if len(b) != 3 {
- return nil
- }
- if bytes.Equal(b[0], id) {
- return b[2]
- }
- }
-}
-
-// generateChallenge returns a random, hex-encoded challenge, or nil on error
-// (see above).
-func (a authCookieSha1) generateChallenge() []byte {
- b := make([]byte, 16)
- n, err := rand.Read(b)
- if err != nil {
- return nil
- }
- if n != 16 {
- return nil
- }
- enc := make([]byte, 32)
- hex.Encode(enc, b)
- return enc
-}
diff --git a/Godeps/_workspace/src/github.com/guelfey/go.dbus/call.go b/Godeps/_workspace/src/github.com/guelfey/go.dbus/call.go
deleted file mode 100644
index 1d2fbc7..0000000
--- a/Godeps/_workspace/src/github.com/guelfey/go.dbus/call.go
+++ /dev/null
@@ -1,147 +0,0 @@
-package dbus
-
-import (
- "errors"
- "strings"
-)
-
-// Call represents a pending or completed method call.
-type Call struct {
- Destination string
- Path ObjectPath
- Method string
- Args []interface{}
-
- // Strobes when the call is complete.
- Done chan *Call
-
- // After completion, the error status. If this is non-nil, it may be an
- // error message from the peer (with Error as its type) or some other error.
- Err error
-
- // Holds the response once the call is done.
- Body []interface{}
-}
-
-var errSignature = errors.New("dbus: mismatched signature")
-
-// Store stores the body of the reply into the provided pointers. It returns
-// an error if the signatures of the body and retvalues don't match, or if
-// the error status is not nil.
-func (c *Call) Store(retvalues ...interface{}) error {
- if c.Err != nil {
- return c.Err
- }
-
- return Store(c.Body, retvalues...)
-}
-
-// Object represents a remote object on which methods can be invoked.
-type Object struct {
- conn *Conn
- dest string
- path ObjectPath
-}
-
-// Call calls a method with (*Object).Go and waits for its reply.
-func (o *Object) Call(method string, flags Flags, args ...interface{}) *Call {
- return <-o.Go(method, flags, make(chan *Call, 1), args...).Done
-}
-
-// GetProperty calls org.freedesktop.DBus.Properties.GetProperty on the given
-// object. The property name must be given in interface.member notation.
-func (o *Object) GetProperty(p string) (Variant, error) {
- idx := strings.LastIndex(p, ".")
- if idx == -1 || idx+1 == len(p) {
- return Variant{}, errors.New("dbus: invalid property " + p)
- }
-
- iface := p[:idx]
- prop := p[idx+1:]
-
- result := Variant{}
- err := o.Call("org.freedesktop.DBus.Properties.Get", 0, iface, prop).Store(&result)
-
- if err != nil {
- return Variant{}, err
- }
-
- return result, nil
-}
-
-// Go calls a method with the given arguments asynchronously. It returns a
-// Call structure representing this method call. The passed channel will
-// return the same value once the call is done. If ch is nil, a new channel
-// will be allocated. Otherwise, ch has to be buffered or Go will panic.
-//
-// If the flags include FlagNoReplyExpected, ch is ignored and a Call structure
-// is returned of which only the Err member is valid.
-//
-// If the method parameter contains a dot ('.'), the part before the last dot
-// specifies the interface on which the method is called.
-func (o *Object) Go(method string, flags Flags, ch chan *Call, args ...interface{}) *Call {
- iface := ""
- i := strings.LastIndex(method, ".")
- if i != -1 {
- iface = method[:i]
- }
- method = method[i+1:]
- msg := new(Message)
- msg.Type = TypeMethodCall
- msg.serial = o.conn.getSerial()
- msg.Flags = flags & (FlagNoAutoStart | FlagNoReplyExpected)
- msg.Headers = make(map[HeaderField]Variant)
- msg.Headers[FieldPath] = MakeVariant(o.path)
- msg.Headers[FieldDestination] = MakeVariant(o.dest)
- msg.Headers[FieldMember] = MakeVariant(method)
- if iface != "" {
- msg.Headers[FieldInterface] = MakeVariant(iface)
- }
- msg.Body = args
- if len(args) > 0 {
- msg.Headers[FieldSignature] = MakeVariant(SignatureOf(args...))
- }
- if msg.Flags&FlagNoReplyExpected == 0 {
- if ch == nil {
- ch = make(chan *Call, 10)
- } else if cap(ch) == 0 {
- panic("dbus: unbuffered channel passed to (*Object).Go")
- }
- call := &Call{
- Destination: o.dest,
- Path: o.path,
- Method: method,
- Args: args,
- Done: ch,
- }
- o.conn.callsLck.Lock()
- o.conn.calls[msg.serial] = call
- o.conn.callsLck.Unlock()
- o.conn.outLck.RLock()
- if o.conn.closed {
- call.Err = ErrClosed
- call.Done <- call
- } else {
- o.conn.out <- msg
- }
- o.conn.outLck.RUnlock()
- return call
- }
- o.conn.outLck.RLock()
- defer o.conn.outLck.RUnlock()
- if o.conn.closed {
- return &Call{Err: ErrClosed}
- }
- o.conn.out <- msg
- return &Call{Err: nil}
-}
-
-// Destination returns the destination that calls on o are sent to.
-func (o *Object) Destination() string {
- return o.dest
-}
-
-// Path returns the path that calls on o are sent to.
-func (o *Object) Path() ObjectPath {
- return o.path
-}
diff --git a/Godeps/_workspace/src/github.com/guelfey/go.dbus/conn.go b/Godeps/_workspace/src/github.com/guelfey/go.dbus/conn.go
deleted file mode 100644
index b38f852..0000000
--- a/Godeps/_workspace/src/github.com/guelfey/go.dbus/conn.go
+++ /dev/null
@@ -1,597 +0,0 @@
-package dbus
-
-import (
- "errors"
- "io"
- "os"
- "reflect"
- "strings"
- "sync"
-)
-
-const defaultSystemBusAddress = "unix:path=/var/run/dbus/system_bus_socket"
-
-var (
- systemBus *Conn
- systemBusLck sync.Mutex
- sessionBus *Conn
- sessionBusLck sync.Mutex
-)
-
-// ErrClosed is the error returned by calls on a closed connection.
-var ErrClosed = errors.New("dbus: connection closed by user")
-
-// Conn represents a connection to a message bus (usually, the system or
-// session bus).
-//
-// Connections are either shared or private. Shared connections
-// are shared between calls to the functions that return them. As a result,
-// the methods Close, Auth and Hello must not be called on them.
-//
-// Multiple goroutines may invoke methods on a connection simultaneously.
-type Conn struct {
- transport
-
- busObj *Object
- unixFD bool
- uuid string
-
- names []string
- namesLck sync.RWMutex
-
- serialLck sync.Mutex
- nextSerial uint32
- serialUsed map[uint32]bool
-
- calls map[uint32]*Call
- callsLck sync.RWMutex
-
- handlers map[ObjectPath]map[string]interface{}
- handlersLck sync.RWMutex
-
- out chan *Message
- closed bool
- outLck sync.RWMutex
-
- signals []chan<- *Signal
- signalsLck sync.Mutex
-
- eavesdropped chan<- *Message
- eavesdroppedLck sync.Mutex
-}
-
-// SessionBus returns a shared connection to the session bus, connecting to it
-// if not already done.
-func SessionBus() (conn *Conn, err error) {
- sessionBusLck.Lock()
- defer sessionBusLck.Unlock()
- if sessionBus != nil {
- return sessionBus, nil
- }
- defer func() {
- if conn != nil {
- sessionBus = conn
- }
- }()
- conn, err = SessionBusPrivate()
- if err != nil {
- return
- }
- if err = conn.Auth(nil); err != nil {
- conn.Close()
- conn = nil
- return
- }
- if err = conn.Hello(); err != nil {
- conn.Close()
- conn = nil
- }
- return
-}
-
-// SessionBusPrivate returns a new private connection to the session bus.
-func SessionBusPrivate() (*Conn, error) {
- address := os.Getenv("DBUS_SESSION_BUS_ADDRESS")
- if address != "" && address != "autolaunch:" {
- return Dial(address)
- }
-
- return sessionBusPlatform()
-}
-
-// SystemBus returns a shared connection to the system bus, connecting to it if
-// not already done.
-func SystemBus() (conn *Conn, err error) {
- systemBusLck.Lock()
- defer systemBusLck.Unlock()
- if systemBus != nil {
- return systemBus, nil
- }
- defer func() {
- if conn != nil {
- systemBus = conn
- }
- }()
- conn, err = SystemBusPrivate()
- if err != nil {
- return
- }
- if err = conn.Auth(nil); err != nil {
- conn.Close()
- conn = nil
- return
- }
- if err = conn.Hello(); err != nil {
- conn.Close()
- conn = nil
- }
- return
-}
-
-// SystemBusPrivate returns a new private connection to the system bus.
-func SystemBusPrivate() (*Conn, error) {
- address := os.Getenv("DBUS_SYSTEM_BUS_ADDRESS")
- if address != "" {
- return Dial(address)
- }
- return Dial(defaultSystemBusAddress)
-}
-
-// Dial establishes a new private connection to the message bus specified by address.
-func Dial(address string) (*Conn, error) {
- tr, err := getTransport(address)
- if err != nil {
- return nil, err
- }
- return newConn(tr)
-}
-
-// NewConn creates a new private *Conn from an already established connection.
-func NewConn(conn io.ReadWriteCloser) (*Conn, error) {
- return newConn(genericTransport{conn})
-}
-
-// newConn creates a new *Conn from a transport.
-func newConn(tr transport) (*Conn, error) {
- conn := new(Conn)
- conn.transport = tr
- conn.calls = make(map[uint32]*Call)
- conn.out = make(chan *Message, 10)
- conn.handlers = make(map[ObjectPath]map[string]interface{})
- conn.nextSerial = 1
- conn.serialUsed = map[uint32]bool{0: true}
- conn.busObj = conn.Object("org.freedesktop.DBus", "/org/freedesktop/DBus")
- return conn, nil
-}
-
-// BusObject returns the object owned by the bus daemon which handles
-// administrative requests.
-func (conn *Conn) BusObject() *Object {
- return conn.busObj
-}
-
-// Close closes the connection. Any blocked operations will return with errors
-// and the channels passed to Eavesdrop and Signal are closed. This method must
-// not be called on shared connections.
-func (conn *Conn) Close() error {
- conn.outLck.Lock()
- close(conn.out)
- conn.closed = true
- conn.outLck.Unlock()
- conn.signalsLck.Lock()
- for _, ch := range conn.signals {
- close(ch)
- }
- conn.signalsLck.Unlock()
- conn.eavesdroppedLck.Lock()
- if conn.eavesdropped != nil {
- close(conn.eavesdropped)
- }
- conn.eavesdroppedLck.Unlock()
- return conn.transport.Close()
-}
-
-// Eavesdrop causes conn to send all incoming messages to the given channel
-// without further processing. Method replies, errors and signals will not be
-// sent to the appropiate channels and method calls will not be handled. If nil
-// is passed, the normal behaviour is restored.
-//
-// The caller has to make sure that ch is sufficiently buffered;
-// if a message arrives when a write to ch is not possible, the message is
-// discarded.
-func (conn *Conn) Eavesdrop(ch chan<- *Message) {
- conn.eavesdroppedLck.Lock()
- conn.eavesdropped = ch
- conn.eavesdroppedLck.Unlock()
-}
-
-// getSerial returns an unused serial.
-func (conn *Conn) getSerial() uint32 {
- conn.serialLck.Lock()
- defer conn.serialLck.Unlock()
- n := conn.nextSerial
- for conn.serialUsed[n] {
- n++
- }
- conn.serialUsed[n] = true
- conn.nextSerial = n + 1
- return n
-}
-
-// Hello sends the initial org.freedesktop.DBus.Hello call. This method must be
-// called after authentication, but before sending any other messages to the
-// bus. Hello must not be called for shared connections.
-func (conn *Conn) Hello() error {
- var s string
- err := conn.busObj.Call("org.freedesktop.DBus.Hello", 0).Store(&s)
- if err != nil {
- return err
- }
- conn.namesLck.Lock()
- conn.names = make([]string, 1)
- conn.names[0] = s
- conn.namesLck.Unlock()
- return nil
-}
-
-// inWorker runs in an own goroutine, reading incoming messages from the
-// transport and dispatching them appropiately.
-func (conn *Conn) inWorker() {
- for {
- msg, err := conn.ReadMessage()
- if err == nil {
- conn.eavesdroppedLck.Lock()
- if conn.eavesdropped != nil {
- select {
- case conn.eavesdropped <- msg:
- default:
- }
- conn.eavesdroppedLck.Unlock()
- continue
- }
- conn.eavesdroppedLck.Unlock()
- dest, _ := msg.Headers[FieldDestination].value.(string)
- found := false
- if dest == "" {
- found = true
- } else {
- conn.namesLck.RLock()
- if len(conn.names) == 0 {
- found = true
- }
- for _, v := range conn.names {
- if dest == v {
- found = true
- break
- }
- }
- conn.namesLck.RUnlock()
- }
- if !found {
- // Eavesdropped a message, but no channel for it is registered.
- // Ignore it.
- continue
- }
- switch msg.Type {
- case TypeMethodReply, TypeError:
- serial := msg.Headers[FieldReplySerial].value.(uint32)
- conn.callsLck.Lock()
- if c, ok := conn.calls[serial]; ok {
- if msg.Type == TypeError {
- name, _ := msg.Headers[FieldErrorName].value.(string)
- c.Err = Error{name, msg.Body}
- } else {
- c.Body = msg.Body
- }
- c.Done <- c
- conn.serialLck.Lock()
- delete(conn.serialUsed, serial)
- conn.serialLck.Unlock()
- delete(conn.calls, serial)
- }
- conn.callsLck.Unlock()
- case TypeSignal:
- iface := msg.Headers[FieldInterface].value.(string)
- member := msg.Headers[FieldMember].value.(string)
- // as per http://dbus.freedesktop.org/doc/dbus-specification.html ,
- // sender is optional for signals.
- sender, _ := msg.Headers[FieldSender].value.(string)
- if iface == "org.freedesktop.DBus" && member == "NameLost" &&
- sender == "org.freedesktop.DBus" {
-
- name, _ := msg.Body[0].(string)
- conn.namesLck.Lock()
- for i, v := range conn.names {
- if v == name {
- copy(conn.names[i:], conn.names[i+1:])
- conn.names = conn.names[:len(conn.names)-1]
- }
- }
- conn.namesLck.Unlock()
- }
- signal := &Signal{
- Sender: sender,
- Path: msg.Headers[FieldPath].value.(ObjectPath),
- Name: iface + "." + member,
- Body: msg.Body,
- }
- conn.signalsLck.Lock()
- for _, ch := range conn.signals {
- // don't block trying to send a signal
- select {
- case ch <- signal:
- default:
- }
- }
- conn.signalsLck.Unlock()
- case TypeMethodCall:
- go conn.handleCall(msg)
- }
- } else if _, ok := err.(InvalidMessageError); !ok {
- // Some read error occured (usually EOF); we can't really do
- // anything but to shut down all stuff and returns errors to all
- // pending replies.
- conn.Close()
- conn.callsLck.RLock()
- for _, v := range conn.calls {
- v.Err = err
- v.Done <- v
- }
- conn.callsLck.RUnlock()
- return
- }
- // invalid messages are ignored
- }
-}
-
-// Names returns the list of all names that are currently owned by this
-// connection. The slice is always at least one element long, the first element
-// being the unique name of the connection.
-func (conn *Conn) Names() []string {
- conn.namesLck.RLock()
- // copy the slice so it can't be modified
- s := make([]string, len(conn.names))
- copy(s, conn.names)
- conn.namesLck.RUnlock()
- return s
-}
-
-// Object returns the object identified by the given destination name and path.
-func (conn *Conn) Object(dest string, path ObjectPath) *Object {
- return &Object{conn, dest, path}
-}
-
-// outWorker runs in an own goroutine, encoding and sending messages that are
-// sent to conn.out.
-func (conn *Conn) outWorker() {
- for msg := range conn.out {
- err := conn.SendMessage(msg)
- conn.callsLck.RLock()
- if err != nil {
- if c := conn.calls[msg.serial]; c != nil {
- c.Err = err
- c.Done <- c
- }
- conn.serialLck.Lock()
- delete(conn.serialUsed, msg.serial)
- conn.serialLck.Unlock()
- } else if msg.Type != TypeMethodCall {
- conn.serialLck.Lock()
- delete(conn.serialUsed, msg.serial)
- conn.serialLck.Unlock()
- }
- conn.callsLck.RUnlock()
- }
-}
-
-// Send sends the given message to the message bus. You usually don't need to
-// use this; use the higher-level equivalents (Call / Go, Emit and Export)
-// instead. If msg is a method call and NoReplyExpected is not set, a non-nil
-// call is returned and the same value is sent to ch (which must be buffered)
-// once the call is complete. Otherwise, ch is ignored and a Call structure is
-// returned of which only the Err member is valid.
-func (conn *Conn) Send(msg *Message, ch chan *Call) *Call {
- var call *Call
-
- msg.serial = conn.getSerial()
- if msg.Type == TypeMethodCall && msg.Flags&FlagNoReplyExpected == 0 {
- if ch == nil {
- ch = make(chan *Call, 5)
- } else if cap(ch) == 0 {
- panic("dbus: unbuffered channel passed to (*Conn).Send")
- }
- call = new(Call)
- call.Destination, _ = msg.Headers[FieldDestination].value.(string)
- call.Path, _ = msg.Headers[FieldPath].value.(ObjectPath)
- iface, _ := msg.Headers[FieldInterface].value.(string)
- member, _ := msg.Headers[FieldMember].value.(string)
- call.Method = iface + "." + member
- call.Args = msg.Body
- call.Done = ch
- conn.callsLck.Lock()
- conn.calls[msg.serial] = call
- conn.callsLck.Unlock()
- conn.outLck.RLock()
- if conn.closed {
- call.Err = ErrClosed
- call.Done <- call
- } else {
- conn.out <- msg
- }
- conn.outLck.RUnlock()
- } else {
- conn.outLck.RLock()
- if conn.closed {
- call = &Call{Err: ErrClosed}
- } else {
- conn.out <- msg
- call = &Call{Err: nil}
- }
- conn.outLck.RUnlock()
- }
- return call
-}
-
-// sendError creates an error message corresponding to the parameters and sends
-// it to conn.out.
-func (conn *Conn) sendError(e Error, dest string, serial uint32) {
- msg := new(Message)
- msg.Type = TypeError
- msg.serial = conn.getSerial()
- msg.Headers = make(map[HeaderField]Variant)
- msg.Headers[FieldDestination] = MakeVariant(dest)
- msg.Headers[FieldErrorName] = MakeVariant(e.Name)
- msg.Headers[FieldReplySerial] = MakeVariant(serial)
- msg.Body = e.Body
- if len(e.Body) > 0 {
- msg.Headers[FieldSignature] = MakeVariant(SignatureOf(e.Body...))
- }
- conn.outLck.RLock()
- if !conn.closed {
- conn.out <- msg
- }
- conn.outLck.RUnlock()
-}
-
-// sendReply creates a method reply message corresponding to the parameters and
-// sends it to conn.out.
-func (conn *Conn) sendReply(dest string, serial uint32, values ...interface{}) {
- msg := new(Message)
- msg.Type = TypeMethodReply
- msg.serial = conn.getSerial()
- msg.Headers = make(map[HeaderField]Variant)
- msg.Headers[FieldDestination] = MakeVariant(dest)
- msg.Headers[FieldReplySerial] = MakeVariant(serial)
- msg.Body = values
- if len(values) > 0 {
- msg.Headers[FieldSignature] = MakeVariant(SignatureOf(values...))
- }
- conn.outLck.RLock()
- if !conn.closed {
- conn.out <- msg
- }
- conn.outLck.RUnlock()
-}
-
-// Signal registers the given channel to be passed all received signal messages.
-// The caller has to make sure that ch is sufficiently buffered; if a message
-// arrives when a write to c is not possible, it is discarded.
-//
-// Multiple of these channels can be registered at the same time. Passing a
-// channel that already is registered will remove it from the list of the
-// registered channels.
-//
-// These channels are "overwritten" by Eavesdrop; i.e., if there currently is a
-// channel for eavesdropped messages, this channel receives all signals, and
-// none of the channels passed to Signal will receive any signals.
-func (conn *Conn) Signal(ch chan<- *Signal) {
- conn.signalsLck.Lock()
- conn.signals = append(conn.signals, ch)
- conn.signalsLck.Unlock()
-}
-
-// SupportsUnixFDs returns whether the underlying transport supports passing of
-// unix file descriptors. If this is false, method calls containing unix file
-// descriptors will return an error and emitted signals containing them will
-// not be sent.
-func (conn *Conn) SupportsUnixFDs() bool {
- return conn.unixFD
-}
-
-// Error represents a D-Bus message of type Error.
-type Error struct {
- Name string
- Body []interface{}
-}
-
-func (e Error) Error() string {
- if len(e.Body) >= 1 {
- s, ok := e.Body[0].(string)
- if ok {
- return s
- }
- }
- return e.Name
-}
-
-// Signal represents a D-Bus message of type Signal. The name member is given in
-// "interface.member" notation, e.g. org.freedesktop.D-Bus.NameLost.
-type Signal struct {
- Sender string
- Path ObjectPath
- Name string
- Body []interface{}
-}
-
-// transport is a D-Bus transport.
-type transport interface {
- // Read and Write raw data (for example, for the authentication protocol).
- io.ReadWriteCloser
-
- // Send the initial null byte used for the EXTERNAL mechanism.
- SendNullByte() error
-
- // Returns whether this transport supports passing Unix FDs.
- SupportsUnixFDs() bool
-
- // Signal the transport that Unix FD passing is enabled for this connection.
- EnableUnixFDs()
-
- // Read / send a message, handling things like Unix FDs.
- ReadMessage() (*Message, error)
- SendMessage(*Message) error
-}
-
-func getTransport(address string) (transport, error) {
- var err error
- var t transport
-
- m := map[string]func(string) (transport, error){
- "unix": newUnixTransport,
- }
- addresses := strings.Split(address, ";")
- for _, v := range addresses {
- i := strings.IndexRune(v, ':')
- if i == -1 {
- err = errors.New("dbus: invalid bus address (no transport)")
- continue
- }
- f := m[v[:i]]
- if f == nil {
- err = errors.New("dbus: invalid bus address (invalid or unsupported transport)")
- }
- t, err = f(v[i+1:])
- if err == nil {
- return t, nil
- }
- }
- return nil, err
-}
-
-// dereferenceAll returns a slice that, assuming that vs is a slice of pointers
-// of arbitrary types, containes the values that are obtained from dereferencing
-// all elements in vs.
-func dereferenceAll(vs []interface{}) []interface{} {
- for i := range vs {
- v := reflect.ValueOf(vs[i])
- v = v.Elem()
- vs[i] = v.Interface()
- }
- return vs
-}
-
-// getKey gets a key from a the list of keys. Returns "" on error / not found...
-func getKey(s, key string) string {
- i := strings.Index(s, key)
- if i == -1 {
- return ""
- }
- if i+len(key)+1 >= len(s) || s[i+len(key)] != '=' {
- return ""
- }
- j := strings.Index(s, ",")
- if j == -1 {
- j = len(s)
- }
- return s[i+len(key)+1 : j]
-}
diff --git a/Godeps/_workspace/src/github.com/guelfey/go.dbus/conn_darwin.go b/Godeps/_workspace/src/github.com/guelfey/go.dbus/conn_darwin.go
deleted file mode 100644
index b67bb1b..0000000
--- a/Godeps/_workspace/src/github.com/guelfey/go.dbus/conn_darwin.go
+++ /dev/null
@@ -1,21 +0,0 @@
-package dbus
-
-import (
- "errors"
- "os/exec"
-)
-
-func sessionBusPlatform() (*Conn, error) {
- cmd := exec.Command("launchctl", "getenv", "DBUS_LAUNCHD_SESSION_BUS_SOCKET")
- b, err := cmd.CombinedOutput()
-
- if err != nil {
- return nil, err
- }
-
- if len(b) == 0 {
- return nil, errors.New("dbus: couldn't determine address of session bus")
- }
-
- return Dial("unix:path=" + string(b[:len(b)-1]))
-}
diff --git a/Godeps/_workspace/src/github.com/guelfey/go.dbus/conn_other.go b/Godeps/_workspace/src/github.com/guelfey/go.dbus/conn_other.go
deleted file mode 100644
index f74b875..0000000
--- a/Godeps/_workspace/src/github.com/guelfey/go.dbus/conn_other.go
+++ /dev/null
@@ -1,27 +0,0 @@
-// +build !darwin
-
-package dbus
-
-import (
- "bytes"
- "errors"
- "os/exec"
-)
-
-func sessionBusPlatform() (*Conn, error) {
- cmd := exec.Command("dbus-launch")
- b, err := cmd.CombinedOutput()
-
- if err != nil {
- return nil, err
- }
-
- i := bytes.IndexByte(b, '=')
- j := bytes.IndexByte(b, '\n')
-
- if i == -1 || j == -1 {
- return nil, errors.New("dbus: couldn't determine address of session bus")
- }
-
- return Dial(string(b[i+1 : j]))
-}
diff --git a/Godeps/_workspace/src/github.com/guelfey/go.dbus/conn_test.go b/Godeps/_workspace/src/github.com/guelfey/go.dbus/conn_test.go
deleted file mode 100644
index a2b14e8..0000000
--- a/Godeps/_workspace/src/github.com/guelfey/go.dbus/conn_test.go
+++ /dev/null
@@ -1,199 +0,0 @@
-package dbus
-
-import "testing"
-
-func TestSessionBus(t *testing.T) {
- _, err := SessionBus()
- if err != nil {
- t.Error(err)
- }
-}
-
-func TestSystemBus(t *testing.T) {
- _, err := SystemBus()
- if err != nil {
- t.Error(err)
- }
-}
-
-func TestSend(t *testing.T) {
- bus, err := SessionBus()
- if err != nil {
- t.Error(err)
- }
- ch := make(chan *Call, 1)
- msg := &Message{
- Type: TypeMethodCall,
- Flags: 0,
- Headers: map[HeaderField]Variant{
- FieldDestination: MakeVariant(bus.Names()[0]),
- FieldPath: MakeVariant(ObjectPath("/org/freedesktop/DBus")),
- FieldInterface: MakeVariant("org.freedesktop.DBus.Peer"),
- FieldMember: MakeVariant("Ping"),
- },
- }
- call := bus.Send(msg, ch)
- <-ch
- if call.Err != nil {
- t.Error(call.Err)
- }
-}
-
-type server struct{}
-
-func (server) Double(i int64) (int64, *Error) {
- return 2 * i, nil
-}
-
-func BenchmarkCall(b *testing.B) {
- b.StopTimer()
- var s string
- bus, err := SessionBus()
- if err != nil {
- b.Fatal(err)
- }
- name := bus.Names()[0]
- obj := bus.BusObject()
- b.StartTimer()
- for i := 0; i < b.N; i++ {
- err := obj.Call("org.freedesktop.DBus.GetNameOwner", 0, name).Store(&s)
- if err != nil {
- b.Fatal(err)
- }
- if s != name {
- b.Errorf("got %s, wanted %s", s, name)
- }
- }
-}
-
-func BenchmarkCallAsync(b *testing.B) {
- b.StopTimer()
- bus, err := SessionBus()
- if err != nil {
- b.Fatal(err)
- }
- name := bus.Names()[0]
- obj := bus.BusObject()
- c := make(chan *Call, 50)
- done := make(chan struct{})
- go func() {
- for i := 0; i < b.N; i++ {
- v := <-c
- if v.Err != nil {
- b.Error(v.Err)
- }
- s := v.Body[0].(string)
- if s != name {
- b.Errorf("got %s, wanted %s", s, name)
- }
- }
- close(done)
- }()
- b.StartTimer()
- for i := 0; i < b.N; i++ {
- obj.Go("org.freedesktop.DBus.GetNameOwner", 0, c, name)
- }
- <-done
-}
-
-func BenchmarkServe(b *testing.B) {
- b.StopTimer()
- srv, err := SessionBus()
- if err != nil {
- b.Fatal(err)
- }
- cli, err := SessionBusPrivate()
- if err != nil {
- b.Fatal(err)
- }
- if err = cli.Auth(nil); err != nil {
- b.Fatal(err)
- }
- if err = cli.Hello(); err != nil {
- b.Fatal(err)
- }
- benchmarkServe(b, srv, cli)
-}
-
-func BenchmarkServeAsync(b *testing.B) {
- b.StopTimer()
- srv, err := SessionBus()
- if err != nil {
- b.Fatal(err)
- }
- cli, err := SessionBusPrivate()
- if err != nil {
- b.Fatal(err)
- }
- if err = cli.Auth(nil); err != nil {
- b.Fatal(err)
- }
- if err = cli.Hello(); err != nil {
- b.Fatal(err)
- }
- benchmarkServeAsync(b, srv, cli)
-}
-
-func BenchmarkServeSameConn(b *testing.B) {
- b.StopTimer()
- bus, err := SessionBus()
- if err != nil {
- b.Fatal(err)
- }
-
- benchmarkServe(b, bus, bus)
-}
-
-func BenchmarkServeSameConnAsync(b *testing.B) {
- b.StopTimer()
- bus, err := SessionBus()
- if err != nil {
- b.Fatal(err)
- }
-
- benchmarkServeAsync(b, bus, bus)
-}
-
-func benchmarkServe(b *testing.B, srv, cli *Conn) {
- var r int64
- var err error
- dest := srv.Names()[0]
- srv.Export(server{}, "/org/guelfey/DBus/Test", "org.guelfey.DBus.Test")
- obj := cli.Object(dest, "/org/guelfey/DBus/Test")
- b.StartTimer()
- for i := 0; i < b.N; i++ {
- err = obj.Call("org.guelfey.DBus.Test.Double", 0, int64(i)).Store(&r)
- if err != nil {
- b.Fatal(err)
- }
- if r != 2*int64(i) {
- b.Errorf("got %d, wanted %d", r, 2*int64(i))
- }
- }
-}
-
-func benchmarkServeAsync(b *testing.B, srv, cli *Conn) {
- dest := srv.Names()[0]
- srv.Export(server{}, "/org/guelfey/DBus/Test", "org.guelfey.DBus.Test")
- obj := cli.Object(dest, "/org/guelfey/DBus/Test")
- c := make(chan *Call, 50)
- done := make(chan struct{})
- go func() {
- for i := 0; i < b.N; i++ {
- v := <-c
- if v.Err != nil {
- b.Fatal(v.Err)
- }
- i, r := v.Args[0].(int64), v.Body[0].(int64)
- if 2*i != r {
- b.Errorf("got %d, wanted %d", r, 2*i)
- }
- }
- close(done)
- }()
- b.StartTimer()
- for i := 0; i < b.N; i++ {
- obj.Go("org.guelfey.DBus.Test.Double", 0, c, int64(i))
- }
- <-done
-}
diff --git a/Godeps/_workspace/src/github.com/guelfey/go.dbus/dbus.go b/Godeps/_workspace/src/github.com/guelfey/go.dbus/dbus.go
deleted file mode 100644
index 2ce6873..0000000
--- a/Godeps/_workspace/src/github.com/guelfey/go.dbus/dbus.go
+++ /dev/null
@@ -1,258 +0,0 @@
-package dbus
-
-import (
- "errors"
- "reflect"
- "strings"
-)
-
-var (
- byteType = reflect.TypeOf(byte(0))
- boolType = reflect.TypeOf(false)
- uint8Type = reflect.TypeOf(uint8(0))
- int16Type = reflect.TypeOf(int16(0))
- uint16Type = reflect.TypeOf(uint16(0))
- int32Type = reflect.TypeOf(int32(0))
- uint32Type = reflect.TypeOf(uint32(0))
- int64Type = reflect.TypeOf(int64(0))
- uint64Type = reflect.TypeOf(uint64(0))
- float64Type = reflect.TypeOf(float64(0))
- stringType = reflect.TypeOf("")
- signatureType = reflect.TypeOf(Signature{""})
- objectPathType = reflect.TypeOf(ObjectPath(""))
- variantType = reflect.TypeOf(Variant{Signature{""}, nil})
- interfacesType = reflect.TypeOf([]interface{}{})
- unixFDType = reflect.TypeOf(UnixFD(0))
- unixFDIndexType = reflect.TypeOf(UnixFDIndex(0))
-)
-
-// An InvalidTypeError signals that a value which cannot be represented in the
-// D-Bus wire format was passed to a function.
-type InvalidTypeError struct {
- Type reflect.Type
-}
-
-func (e InvalidTypeError) Error() string {
- return "dbus: invalid type " + e.Type.String()
-}
-
-// Store copies the values contained in src to dest, which must be a slice of
-// pointers. It converts slices of interfaces from src to corresponding structs
-// in dest. An error is returned if the lengths of src and dest or the types of
-// their elements don't match.
-func Store(src []interface{}, dest ...interface{}) error {
- if len(src) != len(dest) {
- return errors.New("dbus.Store: length mismatch")
- }
-
- for i := range src {
- if err := store(src[i], dest[i]); err != nil {
- return err
- }
- }
- return nil
-}
-
-func store(src, dest interface{}) error {
- if reflect.TypeOf(dest).Elem() == reflect.TypeOf(src) {
- reflect.ValueOf(dest).Elem().Set(reflect.ValueOf(src))
- return nil
- } else if hasStruct(dest) {
- rv := reflect.ValueOf(dest).Elem()
- switch rv.Kind() {
- case reflect.Struct:
- vs, ok := src.([]interface{})
- if !ok {
- return errors.New("dbus.Store: type mismatch")
- }
- t := rv.Type()
- ndest := make([]interface{}, 0, rv.NumField())
- for i := 0; i < rv.NumField(); i++ {
- field := t.Field(i)
- if field.PkgPath == "" && field.Tag.Get("dbus") != "-" {
- ndest = append(ndest, rv.Field(i).Addr().Interface())
- }
- }
- if len(vs) != len(ndest) {
- return errors.New("dbus.Store: type mismatch")
- }
- err := Store(vs, ndest...)
- if err != nil {
- return errors.New("dbus.Store: type mismatch")
- }
- case reflect.Slice:
- sv := reflect.ValueOf(src)
- if sv.Kind() != reflect.Slice {
- return errors.New("dbus.Store: type mismatch")
- }
- rv.Set(reflect.MakeSlice(rv.Type(), sv.Len(), sv.Len()))
- for i := 0; i < sv.Len(); i++ {
- if err := store(sv.Index(i).Interface(), rv.Index(i).Addr().Interface()); err != nil {
- return err
- }
- }
- case reflect.Map:
- sv := reflect.ValueOf(src)
- if sv.Kind() != reflect.Map {
- return errors.New("dbus.Store: type mismatch")
- }
- keys := sv.MapKeys()
- rv.Set(reflect.MakeMap(sv.Type()))
- for _, key := range keys {
- v := reflect.New(sv.Type().Elem())
- if err := store(v, sv.MapIndex(key).Interface()); err != nil {
- return err
- }
- rv.SetMapIndex(key, v.Elem())
- }
- default:
- return errors.New("dbus.Store: type mismatch")
- }
- return nil
- } else {
- return errors.New("dbus.Store: type mismatch")
- }
-}
-
-func hasStruct(v interface{}) bool {
- t := reflect.TypeOf(v)
- for {
- switch t.Kind() {
- case reflect.Struct:
- return true
- case reflect.Slice, reflect.Ptr, reflect.Map:
- t = t.Elem()
- default:
- return false
- }
- }
-}
-
-// An ObjectPath is an object path as defined by the D-Bus spec.
-type ObjectPath string
-
-// IsValid returns whether the object path is valid.
-func (o ObjectPath) IsValid() bool {
- s := string(o)
- if len(s) == 0 {
- return false
- }
- if s[0] != '/' {
- return false
- }
- if s[len(s)-1] == '/' && len(s) != 1 {
- return false
- }
- // probably not used, but technically possible
- if s == "/" {
- return true
- }
- split := strings.Split(s[1:], "/")
- for _, v := range split {
- if len(v) == 0 {
- return false
- }
- for _, c := range v {
- if !isMemberChar(c) {
- return false
- }
- }
- }
- return true
-}
-
-// A UnixFD is a Unix file descriptor sent over the wire. See the package-level
-// documentation for more information about Unix file descriptor passsing.
-type UnixFD int32
-
-// A UnixFDIndex is the representation of a Unix file descriptor in a message.
-type UnixFDIndex uint32
-
-// alignment returns the alignment of values of type t.
-func alignment(t reflect.Type) int {
- switch t {
- case variantType:
- return 1
- case objectPathType:
- return 4
- case signatureType:
- return 1
- case interfacesType: // sometimes used for structs
- return 8
- }
- switch t.Kind() {
- case reflect.Uint8:
- return 1
- case reflect.Uint16, reflect.Int16:
- return 2
- case reflect.Uint32, reflect.Int32, reflect.String, reflect.Array, reflect.Slice, reflect.Map:
- return 4
- case reflect.Uint64, reflect.Int64, reflect.Float64, reflect.Struct:
- return 8
- case reflect.Ptr:
- return alignment(t.Elem())
- }
- return 1
-}
-
-// isKeyType returns whether t is a valid type for a D-Bus dict.
-func isKeyType(t reflect.Type) bool {
- switch t.Kind() {
- case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64,
- reflect.Int16, reflect.Int32, reflect.Int64, reflect.Float64,
- reflect.String:
-
- return true
- }
- return false
-}
-
-// isValidInterface returns whether s is a valid name for an interface.
-func isValidInterface(s string) bool {
- if len(s) == 0 || len(s) > 255 || s[0] == '.' {
- return false
- }
- elem := strings.Split(s, ".")
- if len(elem) < 2 {
- return false
- }
- for _, v := range elem {
- if len(v) == 0 {
- return false
- }
- if v[0] >= '0' && v[0] <= '9' {
- return false
- }
- for _, c := range v {
- if !isMemberChar(c) {
- return false
- }
- }
- }
- return true
-}
-
-// isValidMember returns whether s is a valid name for a member.
-func isValidMember(s string) bool {
- if len(s) == 0 || len(s) > 255 {
- return false
- }
- i := strings.Index(s, ".")
- if i != -1 {
- return false
- }
- if s[0] >= '0' && s[0] <= '9' {
- return false
- }
- for _, c := range s {
- if !isMemberChar(c) {
- return false
- }
- }
- return true
-}
-
-func isMemberChar(c rune) bool {
- return (c >= '0' && c <= '9') || (c >= 'A' && c <= 'Z') ||
- (c >= 'a' && c <= 'z') || c == '_'
-}
diff --git a/Godeps/_workspace/src/github.com/guelfey/go.dbus/decoder.go b/Godeps/_workspace/src/github.com/guelfey/go.dbus/decoder.go
deleted file mode 100644
index ef50dca..0000000
--- a/Godeps/_workspace/src/github.com/guelfey/go.dbus/decoder.go
+++ /dev/null
@@ -1,228 +0,0 @@
-package dbus
-
-import (
- "encoding/binary"
- "io"
- "reflect"
-)
-
-type decoder struct {
- in io.Reader
- order binary.ByteOrder
- pos int
-}
-
-// newDecoder returns a new decoder that reads values from in. The input is
-// expected to be in the given byte order.
-func newDecoder(in io.Reader, order binary.ByteOrder) *decoder {
- dec := new(decoder)
- dec.in = in
- dec.order = order
- return dec
-}
-
-// align aligns the input to the given boundary and panics on error.
-func (dec *decoder) align(n int) {
- if dec.pos%n != 0 {
- newpos := (dec.pos + n - 1) & ^(n - 1)
- empty := make([]byte, newpos-dec.pos)
- if _, err := io.ReadFull(dec.in, empty); err != nil {
- panic(err)
- }
- dec.pos = newpos
- }
-}
-
-// Calls binary.Read(dec.in, dec.order, v) and panics on read errors.
-func (dec *decoder) binread(v interface{}) {
- if err := binary.Read(dec.in, dec.order, v); err != nil {
- panic(err)
- }
-}
-
-func (dec *decoder) Decode(sig Signature) (vs []interface{}, err error) {
- defer func() {
- var ok bool
- v := recover()
- if err, ok = v.(error); ok {
- if err == io.EOF || err == io.ErrUnexpectedEOF {
- err = FormatError("unexpected EOF")
- }
- }
- }()
- vs = make([]interface{}, 0)
- s := sig.str
- for s != "" {
- err, rem := validSingle(s, 0)
- if err != nil {
- return nil, err
- }
- v := dec.decode(s[:len(s)-len(rem)], 0)
- vs = append(vs, v)
- s = rem
- }
- return vs, nil
-}
-
-func (dec *decoder) decode(s string, depth int) interface{} {
- dec.align(alignment(typeFor(s)))
- switch s[0] {
- case 'y':
- var b [1]byte
- if _, err := dec.in.Read(b[:]); err != nil {
- panic(err)
- }
- dec.pos++
- return b[0]
- case 'b':
- i := dec.decode("u", depth).(uint32)
- switch {
- case i == 0:
- return false
- case i == 1:
- return true
- default:
- panic(FormatError("invalid value for boolean"))
- }
- case 'n':
- var i int16
- dec.binread(&i)
- dec.pos += 2
- return i
- case 'i':
- var i int32
- dec.binread(&i)
- dec.pos += 4
- return i
- case 'x':
- var i int64
- dec.binread(&i)
- dec.pos += 8
- return i
- case 'q':
- var i uint16
- dec.binread(&i)
- dec.pos += 2
- return i
- case 'u':
- var i uint32
- dec.binread(&i)
- dec.pos += 4
- return i
- case 't':
- var i uint64
- dec.binread(&i)
- dec.pos += 8
- return i
- case 'd':
- var f float64
- dec.binread(&f)
- dec.pos += 8
- return f
- case 's':
- length := dec.decode("u", depth).(uint32)
- b := make([]byte, int(length)+1)
- if _, err := io.ReadFull(dec.in, b); err != nil {
- panic(err)
- }
- dec.pos += int(length) + 1
- return string(b[:len(b)-1])
- case 'o':
- return ObjectPath(dec.decode("s", depth).(string))
- case 'g':
- length := dec.decode("y", depth).(byte)
- b := make([]byte, int(length)+1)
- if _, err := io.ReadFull(dec.in, b); err != nil {
- panic(err)
- }
- dec.pos += int(length) + 1
- sig, err := ParseSignature(string(b[:len(b)-1]))
- if err != nil {
- panic(err)
- }
- return sig
- case 'v':
- if depth >= 64 {
- panic(FormatError("input exceeds container depth limit"))
- }
- var variant Variant
- sig := dec.decode("g", depth).(Signature)
- if len(sig.str) == 0 {
- panic(FormatError("variant signature is empty"))
- }
- err, rem := validSingle(sig.str, 0)
- if err != nil {
- panic(err)
- }
- if rem != "" {
- panic(FormatError("variant signature has multiple types"))
- }
- variant.sig = sig
- variant.value = dec.decode(sig.str, depth+1)
- return variant
- case 'h':
- return UnixFDIndex(dec.decode("u", depth).(uint32))
- case 'a':
- if len(s) > 1 && s[1] == '{' {
- ksig := s[2:3]
- vsig := s[3 : len(s)-1]
- v := reflect.MakeMap(reflect.MapOf(typeFor(ksig), typeFor(vsig)))
- if depth >= 63 {
- panic(FormatError("input exceeds container depth limit"))
- }
- length := dec.decode("u", depth).(uint32)
- // Even for empty maps, the correct padding must be included
- dec.align(8)
- spos := dec.pos
- for dec.pos < spos+int(length) {
- dec.align(8)
- if !isKeyType(v.Type().Key()) {
- panic(InvalidTypeError{v.Type()})
- }
- kv := dec.decode(ksig, depth+2)
- vv := dec.decode(vsig, depth+2)
- v.SetMapIndex(reflect.ValueOf(kv), reflect.ValueOf(vv))
- }
- return v.Interface()
- }
- if depth >= 64 {
- panic(FormatError("input exceeds container depth limit"))
- }
- length := dec.decode("u", depth).(uint32)
- v := reflect.MakeSlice(reflect.SliceOf(typeFor(s[1:])), 0, int(length))
- // Even for empty arrays, the correct padding must be included
- dec.align(alignment(typeFor(s[1:])))
- spos := dec.pos
- for dec.pos < spos+int(length) {
- ev := dec.decode(s[1:], depth+1)
- v = reflect.Append(v, reflect.ValueOf(ev))
- }
- return v.Interface()
- case '(':
- if depth >= 64 {
- panic(FormatError("input exceeds container depth limit"))
- }
- dec.align(8)
- v := make([]interface{}, 0)
- s = s[1 : len(s)-1]
- for s != "" {
- err, rem := validSingle(s, 0)
- if err != nil {
- panic(err)
- }
- ev := dec.decode(s[:len(s)-len(rem)], depth+1)
- v = append(v, ev)
- s = rem
- }
- return v
- default:
- panic(SignatureError{Sig: s})
- }
-}
-
-// A FormatError is an error in the wire format.
-type FormatError string
-
-func (e FormatError) Error() string {
- return "dbus: wire format error: " + string(e)
-}
diff --git a/Godeps/_workspace/src/github.com/guelfey/go.dbus/doc.go b/Godeps/_workspace/src/github.com/guelfey/go.dbus/doc.go
deleted file mode 100644
index deff554..0000000
--- a/Godeps/_workspace/src/github.com/guelfey/go.dbus/doc.go
+++ /dev/null
@@ -1,63 +0,0 @@
-/*
-Package dbus implements bindings to the D-Bus message bus system.
-
-To use the message bus API, you first need to connect to a bus (usually the
-session or system bus). The acquired connection then can be used to call methods
-on remote objects and emit or receive signals. Using the Export method, you can
-arrange D-Bus methods calls to be directly translated to method calls on a Go
-value.
-
-Conversion Rules
-
-For outgoing messages, Go types are automatically converted to the
-corresponding D-Bus types. The following types are directly encoded as their
-respective D-Bus equivalents:
-
- Go type | D-Bus type
- ------------+-----------
- byte | BYTE
- bool | BOOLEAN
- int16 | INT16
- uint16 | UINT16
- int32 | INT32
- uint32 | UINT32
- int64 | INT64
- uint64 | UINT64
- float64 | DOUBLE
- string | STRING
- ObjectPath | OBJECT_PATH
- Signature | SIGNATURE
- Variant | VARIANT
- UnixFDIndex | UNIX_FD
-
-Slices and arrays encode as ARRAYs of their element type.
-
-Maps encode as DICTs, provided that their key type can be used as a key for
-a DICT.
-
-Structs other than Variant and Signature encode as a STRUCT containing their
-exported fields. Fields whose tags contain `dbus:"-"` and unexported fields will
-be skipped.
-
-Pointers encode as the value they're pointed to.
-
-Trying to encode any other type or a slice, map or struct containing an
-unsupported type will result in an InvalidTypeError.
-
-For incoming messages, the inverse of these rules are used, with the exception
-of STRUCTs. Incoming STRUCTS are represented as a slice of empty interfaces
-containing the struct fields in the correct order. The Store function can be
-used to convert such values to Go structs.
-
-Unix FD passing
-
-Handling Unix file descriptors deserves special mention. To use them, you should
-first check that they are supported on a connection by calling SupportsUnixFDs.
-If it returns true, all method of Connection will translate messages containing
-UnixFD's to messages that are accompanied by the given file descriptors with the
-UnixFD values being substituted by the correct indices. Similarily, the indices
-of incoming messages are automatically resolved. It shouldn't be necessary to use
-UnixFDIndex.
-
-*/
-package dbus
diff --git a/Godeps/_workspace/src/github.com/guelfey/go.dbus/encoder.go b/Godeps/_workspace/src/github.com/guelfey/go.dbus/encoder.go
deleted file mode 100644
index f9d2f05..0000000
--- a/Godeps/_workspace/src/github.com/guelfey/go.dbus/encoder.go
+++ /dev/null
@@ -1,179 +0,0 @@
-package dbus
-
-import (
- "bytes"
- "encoding/binary"
- "io"
- "reflect"
-)
-
-// An encoder encodes values to the D-Bus wire format.
-type encoder struct {
- out io.Writer
- order binary.ByteOrder
- pos int
-}
-
-// NewEncoder returns a new encoder that writes to out in the given byte order.
-func newEncoder(out io.Writer, order binary.ByteOrder) *encoder {
- enc := new(encoder)
- enc.out = out
- enc.order = order
- return enc
-}
-
-// Aligns the next output to be on a multiple of n. Panics on write errors.
-func (enc *encoder) align(n int) {
- if enc.pos%n != 0 {
- newpos := (enc.pos + n - 1) & ^(n - 1)
- empty := make([]byte, newpos-enc.pos)
- if _, err := enc.out.Write(empty); err != nil {
- panic(err)
- }
- enc.pos = newpos
- }
-}
-
-// Calls binary.Write(enc.out, enc.order, v) and panics on write errors.
-func (enc *encoder) binwrite(v interface{}) {
- if err := binary.Write(enc.out, enc.order, v); err != nil {
- panic(err)
- }
-}
-
-// Encode encodes the given values to the underyling reader. All written values
-// are aligned properly as required by the D-Bus spec.
-func (enc *encoder) Encode(vs ...interface{}) (err error) {
- defer func() {
- err, _ = recover().(error)
- }()
- for _, v := range vs {
- enc.encode(reflect.ValueOf(v), 0)
- }
- return nil
-}
-
-// encode encodes the given value to the writer and panics on error. depth holds
-// the depth of the container nesting.
-func (enc *encoder) encode(v reflect.Value, depth int) {
- enc.align(alignment(v.Type()))
- switch v.Kind() {
- case reflect.Uint8:
- var b [1]byte
- b[0] = byte(v.Uint())
- if _, err := enc.out.Write(b[:]); err != nil {
- panic(err)
- }
- enc.pos++
- case reflect.Bool:
- if v.Bool() {
- enc.encode(reflect.ValueOf(uint32(1)), depth)
- } else {
- enc.encode(reflect.ValueOf(uint32(0)), depth)
- }
- case reflect.Int16:
- enc.binwrite(int16(v.Int()))
- enc.pos += 2
- case reflect.Uint16:
- enc.binwrite(uint16(v.Uint()))
- enc.pos += 2
- case reflect.Int32:
- enc.binwrite(int32(v.Int()))
- enc.pos += 4
- case reflect.Uint32:
- enc.binwrite(uint32(v.Uint()))
- enc.pos += 4
- case reflect.Int64:
- enc.binwrite(v.Int())
- enc.pos += 8
- case reflect.Uint64:
- enc.binwrite(v.Uint())
- enc.pos += 8
- case reflect.Float64:
- enc.binwrite(v.Float())
- enc.pos += 8
- case reflect.String:
- enc.encode(reflect.ValueOf(uint32(len(v.String()))), depth)
- b := make([]byte, v.Len()+1)
- copy(b, v.String())
- b[len(b)-1] = 0
- n, err := enc.out.Write(b)
- if err != nil {
- panic(err)
- }
- enc.pos += n
- case reflect.Ptr:
- enc.encode(v.Elem(), depth)
- case reflect.Slice, reflect.Array:
- if depth >= 64 {
- panic(FormatError("input exceeds container depth limit"))
- }
- var buf bytes.Buffer
- bufenc := newEncoder(&buf, enc.order)
-
- for i := 0; i < v.Len(); i++ {
- bufenc.encode(v.Index(i), depth+1)
- }
- enc.encode(reflect.ValueOf(uint32(buf.Len())), depth)
- length := buf.Len()
- enc.align(alignment(v.Type().Elem()))
- if _, err := buf.WriteTo(enc.out); err != nil {
- panic(err)
- }
- enc.pos += length
- case reflect.Struct:
- if depth >= 64 && v.Type() != signatureType {
- panic(FormatError("input exceeds container depth limit"))
- }
- switch t := v.Type(); t {
- case signatureType:
- str := v.Field(0)
- enc.encode(reflect.ValueOf(byte(str.Len())), depth+1)
- b := make([]byte, str.Len()+1)
- copy(b, str.String())
- b[len(b)-1] = 0
- n, err := enc.out.Write(b)
- if err != nil {
- panic(err)
- }
- enc.pos += n
- case variantType:
- variant := v.Interface().(Variant)
- enc.encode(reflect.ValueOf(variant.sig), depth+1)
- enc.encode(reflect.ValueOf(variant.value), depth+1)
- default:
- for i := 0; i < v.Type().NumField(); i++ {
- field := t.Field(i)
- if field.PkgPath == "" && field.Tag.Get("dbus") != "-" {
- enc.encode(v.Field(i), depth+1)
- }
- }
- }
- case reflect.Map:
- // Maps are arrays of structures, so they actually increase the depth by
- // 2.
- if depth >= 63 {
- panic(FormatError("input exceeds container depth limit"))
- }
- if !isKeyType(v.Type().Key()) {
- panic(InvalidTypeError{v.Type()})
- }
- keys := v.MapKeys()
- var buf bytes.Buffer
- bufenc := newEncoder(&buf, enc.order)
- for _, k := range keys {
- bufenc.align(8)
- bufenc.encode(k, depth+2)
- bufenc.encode(v.MapIndex(k), depth+2)
- }
- enc.encode(reflect.ValueOf(uint32(buf.Len())), depth)
- length := buf.Len()
- enc.align(8)
- if _, err := buf.WriteTo(enc.out); err != nil {
- panic(err)
- }
- enc.pos += length
- default:
- panic(InvalidTypeError{v.Type()})
- }
-}
diff --git a/Godeps/_workspace/src/github.com/guelfey/go.dbus/examples_test.go b/Godeps/_workspace/src/github.com/guelfey/go.dbus/examples_test.go
deleted file mode 100644
index 0218ac5..0000000
--- a/Godeps/_workspace/src/github.com/guelfey/go.dbus/examples_test.go
+++ /dev/null
@@ -1,50 +0,0 @@
-package dbus
-
-import "fmt"
-
-func ExampleConn_Emit() {
- conn, err := SessionBus()
- if err != nil {
- panic(err)
- }
-
- conn.Emit("/foo/bar", "foo.bar.Baz", uint32(0xDAEDBEEF))
-}
-
-func ExampleObject_Call() {
- var list []string
-
- conn, err := SessionBus()
- if err != nil {
- panic(err)
- }
-
- err = conn.BusObject().Call("org.freedesktop.DBus.ListNames", 0).Store(&list)
- if err != nil {
- panic(err)
- }
- for _, v := range list {
- fmt.Println(v)
- }
-}
-
-func ExampleObject_Go() {
- conn, err := SessionBus()
- if err != nil {
- panic(err)
- }
-
- ch := make(chan *Call, 10)
- conn.BusObject().Go("org.freedesktop.DBus.ListActivatableNames", 0, ch)
- select {
- case call := <-ch:
- if call.Err != nil {
- panic(err)
- }
- list := call.Body[0].([]string)
- for _, v := range list {
- fmt.Println(v)
- }
- // put some other cases here
- }
-}
diff --git a/Godeps/_workspace/src/github.com/guelfey/go.dbus/export.go b/Godeps/_workspace/src/github.com/guelfey/go.dbus/export.go
deleted file mode 100644
index d95b092..0000000
--- a/Godeps/_workspace/src/github.com/guelfey/go.dbus/export.go
+++ /dev/null
@@ -1,300 +0,0 @@
-package dbus
-
-import (
- "errors"
- "reflect"
- "strings"
- "unicode"
-)
-
-var (
- errmsgInvalidArg = Error{
- "org.freedesktop.DBus.Error.InvalidArgs",
- []interface{}{"Invalid type / number of args"},
- }
- errmsgNoObject = Error{
- "org.freedesktop.DBus.Error.NoSuchObject",
- []interface{}{"No such object"},
- }
- errmsgUnknownMethod = Error{
- "org.freedesktop.DBus.Error.UnknownMethod",
- []interface{}{"Unknown / invalid method"},
- }
-)
-
-// Sender is a type which can be used in exported methods to receive the message
-// sender.
-type Sender string
-
-func exportedMethod(v interface{}, name string) reflect.Value {
- if v == nil {
- return reflect.Value{}
- }
- m := reflect.ValueOf(v).MethodByName(name)
- if !m.IsValid() {
- return reflect.Value{}
- }
- t := m.Type()
- if t.NumOut() == 0 ||
- t.Out(t.NumOut()-1) != reflect.TypeOf(&errmsgInvalidArg) {
-
- return reflect.Value{}
- }
- return m
-}
-
-// handleCall handles the given method call (i.e. looks if it's one of the
-// pre-implemented ones and searches for a corresponding handler if not).
-func (conn *Conn) handleCall(msg *Message) {
- name := msg.Headers[FieldMember].value.(string)
- path := msg.Headers[FieldPath].value.(ObjectPath)
- ifaceName, hasIface := msg.Headers[FieldInterface].value.(string)
- sender := msg.Headers[FieldSender].value.(string)
- serial := msg.serial
- if ifaceName == "org.freedesktop.DBus.Peer" {
- switch name {
- case "Ping":
- conn.sendReply(sender, serial)
- case "GetMachineId":
- conn.sendReply(sender, serial, conn.uuid)
- default:
- conn.sendError(errmsgUnknownMethod, sender, serial)
- }
- return
- }
- if len(name) == 0 || unicode.IsLower([]rune(name)[0]) {
- conn.sendError(errmsgUnknownMethod, sender, serial)
- }
- var m reflect.Value
- if hasIface {
- conn.handlersLck.RLock()
- obj, ok := conn.handlers[path]
- if !ok {
- conn.sendError(errmsgNoObject, sender, serial)
- conn.handlersLck.RUnlock()
- return
- }
- iface := obj[ifaceName]
- conn.handlersLck.RUnlock()
- m = exportedMethod(iface, name)
- } else {
- conn.handlersLck.RLock()
- if _, ok := conn.handlers[path]; !ok {
- conn.sendError(errmsgNoObject, sender, serial)
- conn.handlersLck.RUnlock()
- return
- }
- for _, v := range conn.handlers[path] {
- m = exportedMethod(v, name)
- if m.IsValid() {
- break
- }
- }
- conn.handlersLck.RUnlock()
- }
- if !m.IsValid() {
- conn.sendError(errmsgUnknownMethod, sender, serial)
- return
- }
- t := m.Type()
- vs := msg.Body
- pointers := make([]interface{}, t.NumIn())
- decode := make([]interface{}, 0, len(vs))
- for i := 0; i < t.NumIn(); i++ {
- tp := t.In(i)
- val := reflect.New(tp)
- pointers[i] = val.Interface()
- if tp == reflect.TypeOf((*Sender)(nil)).Elem() {
- val.Elem().SetString(sender)
- } else {
- decode = append(decode, pointers[i])
- }
- }
- if len(decode) != len(vs) {
- conn.sendError(errmsgInvalidArg, sender, serial)
- return
- }
- if err := Store(vs, decode...); err != nil {
- conn.sendError(errmsgInvalidArg, sender, serial)
- return
- }
- params := make([]reflect.Value, len(pointers))
- for i := 0; i < len(pointers); i++ {
- params[i] = reflect.ValueOf(pointers[i]).Elem()
- }
- ret := m.Call(params)
- if em := ret[t.NumOut()-1].Interface().(*Error); em != nil {
- conn.sendError(*em, sender, serial)
- return
- }
- if msg.Flags&FlagNoReplyExpected == 0 {
- reply := new(Message)
- reply.Type = TypeMethodReply
- reply.serial = conn.getSerial()
- reply.Headers = make(map[HeaderField]Variant)
- reply.Headers[FieldDestination] = msg.Headers[FieldSender]
- reply.Headers[FieldReplySerial] = MakeVariant(msg.serial)
- reply.Body = make([]interface{}, len(ret)-1)
- for i := 0; i < len(ret)-1; i++ {
- reply.Body[i] = ret[i].Interface()
- }
- if len(ret) != 1 {
- reply.Headers[FieldSignature] = MakeVariant(SignatureOf(reply.Body...))
- }
- conn.outLck.RLock()
- if !conn.closed {
- conn.out <- reply
- }
- conn.outLck.RUnlock()
- }
-}
-
-// Emit emits the given signal on the message bus. The name parameter must be
-// formatted as "interface.member", e.g., "org.freedesktop.DBus.NameLost".
-func (conn *Conn) Emit(path ObjectPath, name string, values ...interface{}) error {
- if !path.IsValid() {
- return errors.New("dbus: invalid object path")
- }
- i := strings.LastIndex(name, ".")
- if i == -1 {
- return errors.New("dbus: invalid method name")
- }
- iface := name[:i]
- member := name[i+1:]
- if !isValidMember(member) {
- return errors.New("dbus: invalid method name")
- }
- if !isValidInterface(iface) {
- return errors.New("dbus: invalid interface name")
- }
- msg := new(Message)
- msg.Type = TypeSignal
- msg.serial = conn.getSerial()
- msg.Headers = make(map[HeaderField]Variant)
- msg.Headers[FieldInterface] = MakeVariant(iface)
- msg.Headers[FieldMember] = MakeVariant(member)
- msg.Headers[FieldPath] = MakeVariant(path)
- msg.Body = values
- if len(values) > 0 {
- msg.Headers[FieldSignature] = MakeVariant(SignatureOf(values...))
- }
- conn.outLck.RLock()
- defer conn.outLck.RUnlock()
- if conn.closed {
- return ErrClosed
- }
- conn.out <- msg
- return nil
-}
-
-// Export registers the given value to be exported as an object on the
-// message bus.
-//
-// If a method call on the given path and interface is received, an exported
-// method with the same name is called with v as the receiver if the
-// parameters match and the last return value is of type *Error. If this
-// *Error is not nil, it is sent back to the caller as an error.
-// Otherwise, a method reply is sent with the other return values as its body.
-//
-// Any parameters with the special type Sender are set to the sender of the
-// dbus message when the method is called. Parameters of this type do not
-// contribute to the dbus signature of the method (i.e. the method is exposed
-// as if the parameters of type Sender were not there).
-//
-// Every method call is executed in a new goroutine, so the method may be called
-// in multiple goroutines at once.
-//
-// Method calls on the interface org.freedesktop.DBus.Peer will be automatically
-// handled for every object.
-//
-// Passing nil as the first parameter will cause conn to cease handling calls on
-// the given combination of path and interface.
-//
-// Export returns an error if path is not a valid path name.
-func (conn *Conn) Export(v interface{}, path ObjectPath, iface string) error {
- if !path.IsValid() {
- return errors.New("dbus: invalid path name")
- }
- conn.handlersLck.Lock()
- if v == nil {
- if _, ok := conn.handlers[path]; ok {
- delete(conn.handlers[path], iface)
- if len(conn.handlers[path]) == 0 {
- delete(conn.handlers, path)
- }
- }
- return nil
- }
- if _, ok := conn.handlers[path]; !ok {
- conn.handlers[path] = make(map[string]interface{})
- }
- conn.handlers[path][iface] = v
- conn.handlersLck.Unlock()
- return nil
-}
-
-// ReleaseName calls org.freedesktop.DBus.ReleaseName. You should use only this
-// method to release a name (see below).
-func (conn *Conn) ReleaseName(name string) (ReleaseNameReply, error) {
- var r uint32
- err := conn.busObj.Call("org.freedesktop.DBus.ReleaseName", 0, name).Store(&r)
- if err != nil {
- return 0, err
- }
- if r == uint32(ReleaseNameReplyReleased) {
- conn.namesLck.Lock()
- for i, v := range conn.names {
- if v == name {
- copy(conn.names[i:], conn.names[i+1:])
- conn.names = conn.names[:len(conn.names)-1]
- }
- }
- conn.namesLck.Unlock()
- }
- return ReleaseNameReply(r), nil
-}
-
-// RequestName calls org.freedesktop.DBus.RequestName. You should use only this
-// method to request a name because package dbus needs to keep track of all
-// names that the connection has.
-func (conn *Conn) RequestName(name string, flags RequestNameFlags) (RequestNameReply, error) {
- var r uint32
- err := conn.busObj.Call("org.freedesktop.DBus.RequestName", 0, name, flags).Store(&r)
- if err != nil {
- return 0, err
- }
- if r == uint32(RequestNameReplyPrimaryOwner) {
- conn.namesLck.Lock()
- conn.names = append(conn.names, name)
- conn.namesLck.Unlock()
- }
- return RequestNameReply(r), nil
-}
-
-// ReleaseNameReply is the reply to a ReleaseName call.
-type ReleaseNameReply uint32
-
-const (
- ReleaseNameReplyReleased ReleaseNameReply = 1 + iota
- ReleaseNameReplyNonExistent
- ReleaseNameReplyNotOwner
-)
-
-// RequestNameFlags represents the possible flags for a RequestName call.
-type RequestNameFlags uint32
-
-const (
- NameFlagAllowReplacement RequestNameFlags = 1 << iota
- NameFlagReplaceExisting
- NameFlagDoNotQueue
-)
-
-// RequestNameReply is the reply to a RequestName call.
-type RequestNameReply uint32
-
-const (
- RequestNameReplyPrimaryOwner RequestNameReply = 1 + iota
- RequestNameReplyInQueue
- RequestNameReplyExists
- RequestNameReplyAlreadyOwner
-)
diff --git a/Godeps/_workspace/src/github.com/guelfey/go.dbus/introspect/call.go b/Godeps/_workspace/src/github.com/guelfey/go.dbus/introspect/call.go
deleted file mode 100644
index f511836..0000000
--- a/Godeps/_workspace/src/github.com/guelfey/go.dbus/introspect/call.go
+++ /dev/null
@@ -1,27 +0,0 @@
-package introspect
-
-import (
- "encoding/xml"
- "github.com/coreos/coreos-cloudinit/Godeps/_workspace/src/github.com/guelfey/go.dbus"
- "strings"
-)
-
-// Call calls org.freedesktop.Introspectable.Introspect on a remote object
-// and returns the introspection data.
-func Call(o *dbus.Object) (*Node, error) {
- var xmldata string
- var node Node
-
- err := o.Call("org.freedesktop.DBus.Introspectable.Introspect", 0).Store(&xmldata)
- if err != nil {
- return nil, err
- }
- err = xml.NewDecoder(strings.NewReader(xmldata)).Decode(&node)
- if err != nil {
- return nil, err
- }
- if node.Name == "" {
- node.Name = string(o.Path())
- }
- return &node, nil
-}
diff --git a/Godeps/_workspace/src/github.com/guelfey/go.dbus/introspect/introspect.go b/Godeps/_workspace/src/github.com/guelfey/go.dbus/introspect/introspect.go
deleted file mode 100644
index dafcdb8..0000000
--- a/Godeps/_workspace/src/github.com/guelfey/go.dbus/introspect/introspect.go
+++ /dev/null
@@ -1,80 +0,0 @@
-// Package introspect provides some utilities for dealing with the DBus
-// introspection format.
-package introspect
-
-import "encoding/xml"
-
-// The introspection data for the org.freedesktop.DBus.Introspectable interface.
-var IntrospectData = Interface{
- Name: "org.freedesktop.DBus.Introspectable",
- Methods: []Method{
- {
- Name: "Introspect",
- Args: []Arg{
- {"out", "s", "out"},
- },
- },
- },
-}
-
-// The introspection data for the org.freedesktop.DBus.Introspectable interface,
-// as a string.
-const IntrospectDataString = `
-
-
-
-
-
-`
-
-// Node is the root element of an introspection.
-type Node struct {
- XMLName xml.Name `xml:"node"`
- Name string `xml:"name,attr,omitempty"`
- Interfaces []Interface `xml:"interface"`
- Children []Node `xml:"node,omitempty"`
-}
-
-// Interface describes a DBus interface that is available on the message bus.
-type Interface struct {
- Name string `xml:"name,attr"`
- Methods []Method `xml:"method"`
- Signals []Signal `xml:"signal"`
- Properties []Property `xml:"property"`
- Annotations []Annotation `xml:"annotation"`
-}
-
-// Method describes a Method on an Interface as retured by an introspection.
-type Method struct {
- Name string `xml:"name,attr"`
- Args []Arg `xml:"arg"`
- Annotations []Annotation `xml:"annotation"`
-}
-
-// Signal describes a Signal emitted on an Interface.
-type Signal struct {
- Name string `xml:"name,attr"`
- Args []Arg `xml:"arg"`
- Annotations []Annotation `xml:"annotation"`
-}
-
-// Property describes a property of an Interface.
-type Property struct {
- Name string `xml:"name,attr"`
- Type string `xml:"type,attr"`
- Access string `xml:"access,attr"`
- Annotations []Annotation `xml:"annotation"`
-}
-
-// Arg represents an argument of a method or a signal.
-type Arg struct {
- Name string `xml:"name,attr,omitempty"`
- Type string `xml:"type,attr"`
- Direction string `xml:"direction,attr,omitempty"`
-}
-
-// Annotation is an annotation in the introspection format.
-type Annotation struct {
- Name string `xml:"name,attr"`
- Value string `xml:"value,attr"`
-}
diff --git a/Godeps/_workspace/src/github.com/guelfey/go.dbus/introspect/introspectable.go b/Godeps/_workspace/src/github.com/guelfey/go.dbus/introspect/introspectable.go
deleted file mode 100644
index 9cbda45..0000000
--- a/Godeps/_workspace/src/github.com/guelfey/go.dbus/introspect/introspectable.go
+++ /dev/null
@@ -1,74 +0,0 @@
-package introspect
-
-import (
- "encoding/xml"
- "github.com/coreos/coreos-cloudinit/Godeps/_workspace/src/github.com/guelfey/go.dbus"
- "reflect"
-)
-
-// Introspectable implements org.freedesktop.Introspectable.
-//
-// You can create it by converting the XML-formatted introspection data from a
-// string to an Introspectable or call NewIntrospectable with a Node. Then,
-// export it as org.freedesktop.Introspectable on you object.
-type Introspectable string
-
-// NewIntrospectable returns an Introspectable that returns the introspection
-// data that corresponds to the given Node. If n.Interfaces doesn't contain the
-// data for org.freedesktop.DBus.Introspectable, it is added automatically.
-func NewIntrospectable(n *Node) Introspectable {
- found := false
- for _, v := range n.Interfaces {
- if v.Name == "org.freedesktop.DBus.Introspectable" {
- found = true
- break
- }
- }
- if !found {
- n.Interfaces = append(n.Interfaces, IntrospectData)
- }
- b, err := xml.Marshal(n)
- if err != nil {
- panic(err)
- }
- return Introspectable(b)
-}
-
-// Introspect implements org.freedesktop.Introspectable.Introspect.
-func (i Introspectable) Introspect() (string, *dbus.Error) {
- return string(i), nil
-}
-
-// Methods returns the description of the methods of v. This can be used to
-// create a Node which can be passed to NewIntrospectable.
-func Methods(v interface{}) []Method {
- t := reflect.TypeOf(v)
- ms := make([]Method, 0, t.NumMethod())
- for i := 0; i < t.NumMethod(); i++ {
- if t.Method(i).PkgPath != "" {
- continue
- }
- mt := t.Method(i).Type
- if mt.NumOut() == 0 ||
- mt.Out(mt.NumOut()-1) != reflect.TypeOf(&dbus.Error{"", nil}) {
-
- continue
- }
- var m Method
- m.Name = t.Method(i).Name
- m.Args = make([]Arg, 0, mt.NumIn()+mt.NumOut()-2)
- for j := 1; j < mt.NumIn(); j++ {
- if mt.In(j) != reflect.TypeOf((*dbus.Sender)(nil)).Elem() {
- arg := Arg{"", dbus.SignatureOfType(mt.In(j)).String(), "in"}
- m.Args = append(m.Args, arg)
- }
- }
- for j := 0; j < mt.NumOut()-1; j++ {
- arg := Arg{"", dbus.SignatureOfType(mt.Out(j)).String(), "out"}
- m.Args = append(m.Args, arg)
- }
- m.Annotations = make([]Annotation, 0)
- ms = append(ms, m)
- }
- return ms
-}
diff --git a/Godeps/_workspace/src/github.com/guelfey/go.dbus/message.go b/Godeps/_workspace/src/github.com/guelfey/go.dbus/message.go
deleted file mode 100644
index 075d6e3..0000000
--- a/Godeps/_workspace/src/github.com/guelfey/go.dbus/message.go
+++ /dev/null
@@ -1,346 +0,0 @@
-package dbus
-
-import (
- "bytes"
- "encoding/binary"
- "errors"
- "io"
- "reflect"
- "strconv"
-)
-
-const protoVersion byte = 1
-
-// Flags represents the possible flags of a D-Bus message.
-type Flags byte
-
-const (
- // FlagNoReplyExpected signals that the message is not expected to generate
- // a reply. If this flag is set on outgoing messages, any possible reply
- // will be discarded.
- FlagNoReplyExpected Flags = 1 << iota
- // FlagNoAutoStart signals that the message bus should not automatically
- // start an application when handling this message.
- FlagNoAutoStart
-)
-
-// Type represents the possible types of a D-Bus message.
-type Type byte
-
-const (
- TypeMethodCall Type = 1 + iota
- TypeMethodReply
- TypeError
- TypeSignal
- typeMax
-)
-
-func (t Type) String() string {
- switch t {
- case TypeMethodCall:
- return "method call"
- case TypeMethodReply:
- return "reply"
- case TypeError:
- return "error"
- case TypeSignal:
- return "signal"
- }
- return "invalid"
-}
-
-// HeaderField represents the possible byte codes for the headers
-// of a D-Bus message.
-type HeaderField byte
-
-const (
- FieldPath HeaderField = 1 + iota
- FieldInterface
- FieldMember
- FieldErrorName
- FieldReplySerial
- FieldDestination
- FieldSender
- FieldSignature
- FieldUnixFDs
- fieldMax
-)
-
-// An InvalidMessageError describes the reason why a D-Bus message is regarded as
-// invalid.
-type InvalidMessageError string
-
-func (e InvalidMessageError) Error() string {
- return "dbus: invalid message: " + string(e)
-}
-
-// fieldType are the types of the various header fields.
-var fieldTypes = [fieldMax]reflect.Type{
- FieldPath: objectPathType,
- FieldInterface: stringType,
- FieldMember: stringType,
- FieldErrorName: stringType,
- FieldReplySerial: uint32Type,
- FieldDestination: stringType,
- FieldSender: stringType,
- FieldSignature: signatureType,
- FieldUnixFDs: uint32Type,
-}
-
-// requiredFields lists the header fields that are required by the different
-// message types.
-var requiredFields = [typeMax][]HeaderField{
- TypeMethodCall: {FieldPath, FieldMember},
- TypeMethodReply: {FieldReplySerial},
- TypeError: {FieldErrorName, FieldReplySerial},
- TypeSignal: {FieldPath, FieldInterface, FieldMember},
-}
-
-// Message represents a single D-Bus message.
-type Message struct {
- Type
- Flags
- Headers map[HeaderField]Variant
- Body []interface{}
-
- serial uint32
-}
-
-type header struct {
- Field byte
- Variant
-}
-
-// DecodeMessage tries to decode a single message in the D-Bus wire format
-// from the given reader. The byte order is figured out from the first byte.
-// The possibly returned error can be an error of the underlying reader, an
-// InvalidMessageError or a FormatError.
-func DecodeMessage(rd io.Reader) (msg *Message, err error) {
- var order binary.ByteOrder
- var hlength, length uint32
- var typ, flags, proto byte
- var headers []header
-
- b := make([]byte, 1)
- _, err = rd.Read(b)
- if err != nil {
- return
- }
- switch b[0] {
- case 'l':
- order = binary.LittleEndian
- case 'B':
- order = binary.BigEndian
- default:
- return nil, InvalidMessageError("invalid byte order")
- }
-
- dec := newDecoder(rd, order)
- dec.pos = 1
-
- msg = new(Message)
- vs, err := dec.Decode(Signature{"yyyuu"})
- if err != nil {
- return nil, err
- }
- if err = Store(vs, &typ, &flags, &proto, &length, &msg.serial); err != nil {
- return nil, err
- }
- msg.Type = Type(typ)
- msg.Flags = Flags(flags)
-
- // get the header length separately because we need it later
- b = make([]byte, 4)
- _, err = io.ReadFull(rd, b)
- if err != nil {
- return nil, err
- }
- binary.Read(bytes.NewBuffer(b), order, &hlength)
- if hlength+length+16 > 1<<27 {
- return nil, InvalidMessageError("message is too long")
- }
- dec = newDecoder(io.MultiReader(bytes.NewBuffer(b), rd), order)
- dec.pos = 12
- vs, err = dec.Decode(Signature{"a(yv)"})
- if err != nil {
- return nil, err
- }
- if err = Store(vs, &headers); err != nil {
- return nil, err
- }
-
- msg.Headers = make(map[HeaderField]Variant)
- for _, v := range headers {
- msg.Headers[HeaderField(v.Field)] = v.Variant
- }
-
- dec.align(8)
- body := make([]byte, int(length))
- if length != 0 {
- _, err := io.ReadFull(rd, body)
- if err != nil {
- return nil, err
- }
- }
-
- if err = msg.IsValid(); err != nil {
- return nil, err
- }
- sig, _ := msg.Headers[FieldSignature].value.(Signature)
- if sig.str != "" {
- buf := bytes.NewBuffer(body)
- dec = newDecoder(buf, order)
- vs, err := dec.Decode(sig)
- if err != nil {
- return nil, err
- }
- msg.Body = vs
- }
-
- return
-}
-
-// EncodeTo encodes and sends a message to the given writer. The byte order must
-// be either binary.LittleEndian or binary.BigEndian. If the message is not
-// valid or an error occurs when writing, an error is returned.
-func (msg *Message) EncodeTo(out io.Writer, order binary.ByteOrder) error {
- if err := msg.IsValid(); err != nil {
- return err
- }
- var vs [7]interface{}
- switch order {
- case binary.LittleEndian:
- vs[0] = byte('l')
- case binary.BigEndian:
- vs[0] = byte('B')
- default:
- return errors.New("dbus: invalid byte order")
- }
- body := new(bytes.Buffer)
- enc := newEncoder(body, order)
- if len(msg.Body) != 0 {
- enc.Encode(msg.Body...)
- }
- vs[1] = msg.Type
- vs[2] = msg.Flags
- vs[3] = protoVersion
- vs[4] = uint32(len(body.Bytes()))
- vs[5] = msg.serial
- headers := make([]header, 0, len(msg.Headers))
- for k, v := range msg.Headers {
- headers = append(headers, header{byte(k), v})
- }
- vs[6] = headers
- var buf bytes.Buffer
- enc = newEncoder(&buf, order)
- enc.Encode(vs[:]...)
- enc.align(8)
- body.WriteTo(&buf)
- if buf.Len() > 1<<27 {
- return InvalidMessageError("message is too long")
- }
- if _, err := buf.WriteTo(out); err != nil {
- return err
- }
- return nil
-}
-
-// IsValid checks whether msg is a valid message and returns an
-// InvalidMessageError if it is not.
-func (msg *Message) IsValid() error {
- if msg.Flags & ^(FlagNoAutoStart|FlagNoReplyExpected) != 0 {
- return InvalidMessageError("invalid flags")
- }
- if msg.Type == 0 || msg.Type >= typeMax {
- return InvalidMessageError("invalid message type")
- }
- for k, v := range msg.Headers {
- if k == 0 || k >= fieldMax {
- return InvalidMessageError("invalid header")
- }
- if reflect.TypeOf(v.value) != fieldTypes[k] {
- return InvalidMessageError("invalid type of header field")
- }
- }
- for _, v := range requiredFields[msg.Type] {
- if _, ok := msg.Headers[v]; !ok {
- return InvalidMessageError("missing required header")
- }
- }
- if path, ok := msg.Headers[FieldPath]; ok {
- if !path.value.(ObjectPath).IsValid() {
- return InvalidMessageError("invalid path name")
- }
- }
- if iface, ok := msg.Headers[FieldInterface]; ok {
- if !isValidInterface(iface.value.(string)) {
- return InvalidMessageError("invalid interface name")
- }
- }
- if member, ok := msg.Headers[FieldMember]; ok {
- if !isValidMember(member.value.(string)) {
- return InvalidMessageError("invalid member name")
- }
- }
- if errname, ok := msg.Headers[FieldErrorName]; ok {
- if !isValidInterface(errname.value.(string)) {
- return InvalidMessageError("invalid error name")
- }
- }
- if len(msg.Body) != 0 {
- if _, ok := msg.Headers[FieldSignature]; !ok {
- return InvalidMessageError("missing signature")
- }
- }
- return nil
-}
-
-// Serial returns the message's serial number. The returned value is only valid
-// for messages received by eavesdropping.
-func (msg *Message) Serial() uint32 {
- return msg.serial
-}
-
-// String returns a string representation of a message similar to the format of
-// dbus-monitor.
-func (msg *Message) String() string {
- if err := msg.IsValid(); err != nil {
- return ""
- }
- s := msg.Type.String()
- if v, ok := msg.Headers[FieldSender]; ok {
- s += " from " + v.value.(string)
- }
- if v, ok := msg.Headers[FieldDestination]; ok {
- s += " to " + v.value.(string)
- }
- s += " serial " + strconv.FormatUint(uint64(msg.serial), 10)
- if v, ok := msg.Headers[FieldReplySerial]; ok {
- s += " reply_serial " + strconv.FormatUint(uint64(v.value.(uint32)), 10)
- }
- if v, ok := msg.Headers[FieldUnixFDs]; ok {
- s += " unixfds " + strconv.FormatUint(uint64(v.value.(uint32)), 10)
- }
- if v, ok := msg.Headers[FieldPath]; ok {
- s += " path " + string(v.value.(ObjectPath))
- }
- if v, ok := msg.Headers[FieldInterface]; ok {
- s += " interface " + v.value.(string)
- }
- if v, ok := msg.Headers[FieldErrorName]; ok {
- s += " error " + v.value.(string)
- }
- if v, ok := msg.Headers[FieldMember]; ok {
- s += " member " + v.value.(string)
- }
- if len(msg.Body) != 0 {
- s += "\n"
- }
- for i, v := range msg.Body {
- s += " " + MakeVariant(v).String()
- if i != len(msg.Body)-1 {
- s += "\n"
- }
- }
- return s
-}
diff --git a/Godeps/_workspace/src/github.com/guelfey/go.dbus/prop/prop.go b/Godeps/_workspace/src/github.com/guelfey/go.dbus/prop/prop.go
deleted file mode 100644
index 092124a..0000000
--- a/Godeps/_workspace/src/github.com/guelfey/go.dbus/prop/prop.go
+++ /dev/null
@@ -1,264 +0,0 @@
-// Package prop provides the Properties struct which can be used to implement
-// org.freedesktop.DBus.Properties.
-package prop
-
-import (
- "github.com/coreos/coreos-cloudinit/Godeps/_workspace/src/github.com/guelfey/go.dbus"
- "github.com/coreos/coreos-cloudinit/Godeps/_workspace/src/github.com/guelfey/go.dbus/introspect"
- "sync"
-)
-
-// EmitType controls how org.freedesktop.DBus.Properties.PropertiesChanged is
-// emitted for a property. If it is EmitTrue, the signal is emitted. If it is
-// EmitInvalidates, the signal is also emitted, but the new value of the property
-// is not disclosed.
-type EmitType byte
-
-const (
- EmitFalse EmitType = iota
- EmitTrue
- EmitInvalidates
-)
-
-// ErrIfaceNotFound is the error returned to peers who try to access properties
-// on interfaces that aren't found.
-var ErrIfaceNotFound = &dbus.Error{"org.freedesktop.DBus.Properties.Error.InterfaceNotFound", nil}
-
-// ErrPropNotFound is the error returned to peers trying to access properties
-// that aren't found.
-var ErrPropNotFound = &dbus.Error{"org.freedesktop.DBus.Properties.Error.PropertyNotFound", nil}
-
-// ErrReadOnly is the error returned to peers trying to set a read-only
-// property.
-var ErrReadOnly = &dbus.Error{"org.freedesktop.DBus.Properties.Error.ReadOnly", nil}
-
-// ErrInvalidArg is returned to peers if the type of the property that is being
-// changed and the argument don't match.
-var ErrInvalidArg = &dbus.Error{"org.freedesktop.DBus.Properties.Error.InvalidArg", nil}
-
-// The introspection data for the org.freedesktop.DBus.Properties interface.
-var IntrospectData = introspect.Interface{
- Name: "org.freedesktop.DBus.Properties",
- Methods: []introspect.Method{
- {
- Name: "Get",
- Args: []introspect.Arg{
- {"interface", "s", "in"},
- {"property", "s", "in"},
- {"value", "v", "out"},
- },
- },
- {
- Name: "GetAll",
- Args: []introspect.Arg{
- {"interface", "s", "in"},
- {"props", "a{sv}", "out"},
- },
- },
- {
- Name: "Set",
- Args: []introspect.Arg{
- {"interface", "s", "in"},
- {"property", "s", "in"},
- {"value", "v", "in"},
- },
- },
- },
- Signals: []introspect.Signal{
- {
- Name: "PropertiesChanged",
- Args: []introspect.Arg{
- {"interface", "s", "out"},
- {"changed_properties", "a{sv}", "out"},
- {"invalidates_properties", "as", "out"},
- },
- },
- },
-}
-
-// The introspection data for the org.freedesktop.DBus.Properties interface, as
-// a string.
-const IntrospectDataString = `
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-`
-
-// Prop represents a single property. It is used for creating a Properties
-// value.
-type Prop struct {
- // Initial value. Must be a DBus-representable type.
- Value interface{}
-
- // If true, the value can be modified by calls to Set.
- Writable bool
-
- // Controls how org.freedesktop.DBus.Properties.PropertiesChanged is
- // emitted if this property changes.
- Emit EmitType
-
- // If not nil, anytime this property is changed by Set, this function is
- // called with an appropiate Change as its argument. If the returned error
- // is not nil, it is sent back to the caller of Set and the property is not
- // changed.
- Callback func(*Change) *dbus.Error
-}
-
-// Change represents a change of a property by a call to Set.
-type Change struct {
- Props *Properties
- Iface string
- Name string
- Value interface{}
-}
-
-// Properties is a set of values that can be made available to the message bus
-// using the org.freedesktop.DBus.Properties interface. It is safe for
-// concurrent use by multiple goroutines.
-type Properties struct {
- m map[string]map[string]*Prop
- mut sync.RWMutex
- conn *dbus.Conn
- path dbus.ObjectPath
-}
-
-// New returns a new Properties structure that manages the given properties.
-// The key for the first-level map of props is the name of the interface; the
-// second-level key is the name of the property. The returned structure will be
-// exported as org.freedesktop.DBus.Properties on path.
-func New(conn *dbus.Conn, path dbus.ObjectPath, props map[string]map[string]*Prop) *Properties {
- p := &Properties{m: props, conn: conn, path: path}
- conn.Export(p, path, "org.freedesktop.DBus.Properties")
- return p
-}
-
-// Get implements org.freedesktop.DBus.Properties.Get.
-func (p *Properties) Get(iface, property string) (dbus.Variant, *dbus.Error) {
- p.mut.RLock()
- defer p.mut.RUnlock()
- m, ok := p.m[iface]
- if !ok {
- return dbus.Variant{}, ErrIfaceNotFound
- }
- prop, ok := m[property]
- if !ok {
- return dbus.Variant{}, ErrPropNotFound
- }
- return dbus.MakeVariant(prop.Value), nil
-}
-
-// GetAll implements org.freedesktop.DBus.Properties.GetAll.
-func (p *Properties) GetAll(iface string) (map[string]dbus.Variant, *dbus.Error) {
- p.mut.RLock()
- defer p.mut.RUnlock()
- m, ok := p.m[iface]
- if !ok {
- return nil, ErrIfaceNotFound
- }
- rm := make(map[string]dbus.Variant, len(m))
- for k, v := range m {
- rm[k] = dbus.MakeVariant(v.Value)
- }
- return rm, nil
-}
-
-// GetMust returns the value of the given property and panics if either the
-// interface or the property name are invalid.
-func (p *Properties) GetMust(iface, property string) interface{} {
- p.mut.RLock()
- defer p.mut.RUnlock()
- return p.m[iface][property].Value
-}
-
-// Introspection returns the introspection data that represents the properties
-// of iface.
-func (p *Properties) Introspection(iface string) []introspect.Property {
- p.mut.RLock()
- defer p.mut.RUnlock()
- m := p.m[iface]
- s := make([]introspect.Property, 0, len(m))
- for k, v := range m {
- p := introspect.Property{Name: k, Type: dbus.SignatureOf(v.Value).String()}
- if v.Writable {
- p.Access = "readwrite"
- } else {
- p.Access = "read"
- }
- s = append(s, p)
- }
- return s
-}
-
-// set sets the given property and emits PropertyChanged if appropiate. p.mut
-// must already be locked.
-func (p *Properties) set(iface, property string, v interface{}) {
- prop := p.m[iface][property]
- prop.Value = v
- switch prop.Emit {
- case EmitFalse:
- // do nothing
- case EmitInvalidates:
- p.conn.Emit(p.path, "org.freedesktop.DBus.Properties.PropertiesChanged",
- iface, map[string]dbus.Variant{}, []string{property})
- case EmitTrue:
- p.conn.Emit(p.path, "org.freedesktop.DBus.Properties.PropertiesChanged",
- iface, map[string]dbus.Variant{property: dbus.MakeVariant(v)},
- []string{})
- default:
- panic("invalid value for EmitType")
- }
-}
-
-// Set implements org.freedesktop.Properties.Set.
-func (p *Properties) Set(iface, property string, newv dbus.Variant) *dbus.Error {
- p.mut.Lock()
- defer p.mut.Unlock()
- m, ok := p.m[iface]
- if !ok {
- return ErrIfaceNotFound
- }
- prop, ok := m[property]
- if !ok {
- return ErrPropNotFound
- }
- if !prop.Writable {
- return ErrReadOnly
- }
- if newv.Signature() != dbus.SignatureOf(prop.Value) {
- return ErrInvalidArg
- }
- if prop.Callback != nil {
- err := prop.Callback(&Change{p, iface, property, newv.Value()})
- if err != nil {
- return err
- }
- }
- p.set(iface, property, newv.Value())
- return nil
-}
-
-// SetMust sets the value of the given property and panics if the interface or
-// the property name are invalid.
-func (p *Properties) SetMust(iface, property string, v interface{}) {
- p.mut.Lock()
- p.set(iface, property, v)
- p.mut.Unlock()
-}
diff --git a/Godeps/_workspace/src/github.com/guelfey/go.dbus/proto_test.go b/Godeps/_workspace/src/github.com/guelfey/go.dbus/proto_test.go
deleted file mode 100644
index 608a770..0000000
--- a/Godeps/_workspace/src/github.com/guelfey/go.dbus/proto_test.go
+++ /dev/null
@@ -1,369 +0,0 @@
-package dbus
-
-import (
- "bytes"
- "encoding/binary"
- "io/ioutil"
- "math"
- "reflect"
- "testing"
-)
-
-var protoTests = []struct {
- vs []interface{}
- bigEndian []byte
- littleEndian []byte
-}{
- {
- []interface{}{int32(0)},
- []byte{0, 0, 0, 0},
- []byte{0, 0, 0, 0},
- },
- {
- []interface{}{true, false},
- []byte{0, 0, 0, 1, 0, 0, 0, 0},
- []byte{1, 0, 0, 0, 0, 0, 0, 0},
- },
- {
- []interface{}{byte(0), uint16(12), int16(32), uint32(43)},
- []byte{0, 0, 0, 12, 0, 32, 0, 0, 0, 0, 0, 43},
- []byte{0, 0, 12, 0, 32, 0, 0, 0, 43, 0, 0, 0},
- },
- {
- []interface{}{int64(-1), uint64(1<<64 - 1)},
- bytes.Repeat([]byte{255}, 16),
- bytes.Repeat([]byte{255}, 16),
- },
- {
- []interface{}{math.Inf(+1)},
- []byte{0x7f, 0xf0, 0, 0, 0, 0, 0, 0},
- []byte{0, 0, 0, 0, 0, 0, 0xf0, 0x7f},
- },
- {
- []interface{}{"foo"},
- []byte{0, 0, 0, 3, 'f', 'o', 'o', 0},
- []byte{3, 0, 0, 0, 'f', 'o', 'o', 0},
- },
- {
- []interface{}{Signature{"ai"}},
- []byte{2, 'a', 'i', 0},
- []byte{2, 'a', 'i', 0},
- },
- {
- []interface{}{[]int16{42, 256}},
- []byte{0, 0, 0, 4, 0, 42, 1, 0},
- []byte{4, 0, 0, 0, 42, 0, 0, 1},
- },
- {
- []interface{}{MakeVariant("foo")},
- []byte{1, 's', 0, 0, 0, 0, 0, 3, 'f', 'o', 'o', 0},
- []byte{1, 's', 0, 0, 3, 0, 0, 0, 'f', 'o', 'o', 0},
- },
- {
- []interface{}{MakeVariant(MakeVariant(Signature{"v"}))},
- []byte{1, 'v', 0, 1, 'g', 0, 1, 'v', 0},
- []byte{1, 'v', 0, 1, 'g', 0, 1, 'v', 0},
- },
- {
- []interface{}{map[int32]bool{42: true}},
- []byte{0, 0, 0, 8, 0, 0, 0, 0, 0, 0, 0, 42, 0, 0, 0, 1},
- []byte{8, 0, 0, 0, 0, 0, 0, 0, 42, 0, 0, 0, 1, 0, 0, 0},
- },
- {
- []interface{}{map[string]Variant{}, byte(42)},
- []byte{0, 0, 0, 0, 0, 0, 0, 0, 42},
- []byte{0, 0, 0, 0, 0, 0, 0, 0, 42},
- },
- {
- []interface{}{[]uint64{}, byte(42)},
- []byte{0, 0, 0, 0, 0, 0, 0, 0, 42},
- []byte{0, 0, 0, 0, 0, 0, 0, 0, 42},
- },
-}
-
-func TestProto(t *testing.T) {
- for i, v := range protoTests {
- buf := new(bytes.Buffer)
- bigEnc := newEncoder(buf, binary.BigEndian)
- bigEnc.Encode(v.vs...)
- marshalled := buf.Bytes()
- if bytes.Compare(marshalled, v.bigEndian) != 0 {
- t.Errorf("test %d (marshal be): got '%v', but expected '%v'\n", i+1, marshalled,
- v.bigEndian)
- }
- buf.Reset()
- litEnc := newEncoder(buf, binary.LittleEndian)
- litEnc.Encode(v.vs...)
- marshalled = buf.Bytes()
- if bytes.Compare(marshalled, v.littleEndian) != 0 {
- t.Errorf("test %d (marshal le): got '%v', but expected '%v'\n", i+1, marshalled,
- v.littleEndian)
- }
- unmarshalled := reflect.MakeSlice(reflect.TypeOf(v.vs),
- 0, 0)
- for i := range v.vs {
- unmarshalled = reflect.Append(unmarshalled,
- reflect.New(reflect.TypeOf(v.vs[i])))
- }
- bigDec := newDecoder(bytes.NewReader(v.bigEndian), binary.BigEndian)
- vs, err := bigDec.Decode(SignatureOf(v.vs...))
- if err != nil {
- t.Errorf("test %d (unmarshal be): %s\n", i+1, err)
- continue
- }
- if !reflect.DeepEqual(vs, v.vs) {
- t.Errorf("test %d (unmarshal be): got %#v, but expected %#v\n", i+1, vs, v.vs)
- }
- litDec := newDecoder(bytes.NewReader(v.littleEndian), binary.LittleEndian)
- vs, err = litDec.Decode(SignatureOf(v.vs...))
- if err != nil {
- t.Errorf("test %d (unmarshal le): %s\n", i+1, err)
- continue
- }
- if !reflect.DeepEqual(vs, v.vs) {
- t.Errorf("test %d (unmarshal le): got %#v, but expected %#v\n", i+1, vs, v.vs)
- }
-
- }
-}
-
-func TestProtoMap(t *testing.T) {
- m := map[string]uint8{
- "foo": 23,
- "bar": 2,
- }
- var n map[string]uint8
- buf := new(bytes.Buffer)
- enc := newEncoder(buf, binary.LittleEndian)
- enc.Encode(m)
- dec := newDecoder(buf, binary.LittleEndian)
- vs, err := dec.Decode(Signature{"a{sy}"})
- if err != nil {
- t.Fatal(err)
- }
- if err = Store(vs, &n); err != nil {
- t.Fatal(err)
- }
- if len(n) != 2 || n["foo"] != 23 || n["bar"] != 2 {
- t.Error("got", n)
- }
-}
-
-func TestProtoVariantStruct(t *testing.T) {
- var variant Variant
- v := MakeVariant(struct {
- A int32
- B int16
- }{1, 2})
- buf := new(bytes.Buffer)
- enc := newEncoder(buf, binary.LittleEndian)
- enc.Encode(v)
- dec := newDecoder(buf, binary.LittleEndian)
- vs, err := dec.Decode(Signature{"v"})
- if err != nil {
- t.Fatal(err)
- }
- if err = Store(vs, &variant); err != nil {
- t.Fatal(err)
- }
- sl := variant.Value().([]interface{})
- v1, v2 := sl[0].(int32), sl[1].(int16)
- if v1 != int32(1) {
- t.Error("got", v1, "as first int")
- }
- if v2 != int16(2) {
- t.Error("got", v2, "as second int")
- }
-}
-
-func TestProtoStructTag(t *testing.T) {
- type Bar struct {
- A int32
- B chan interface{} `dbus:"-"`
- C int32
- }
- var bar1, bar2 Bar
- bar1.A = 234
- bar2.C = 345
- buf := new(bytes.Buffer)
- enc := newEncoder(buf, binary.LittleEndian)
- enc.Encode(bar1)
- dec := newDecoder(buf, binary.LittleEndian)
- vs, err := dec.Decode(Signature{"(ii)"})
- if err != nil {
- t.Fatal(err)
- }
- if err = Store(vs, &bar2); err != nil {
- t.Fatal(err)
- }
- if bar1 != bar2 {
- t.Error("struct tag test: got", bar2)
- }
-}
-
-func TestProtoStoreStruct(t *testing.T) {
- var foo struct {
- A int32
- B string
- c chan interface{}
- D interface{} `dbus:"-"`
- }
- src := []interface{}{[]interface{}{int32(42), "foo"}}
- err := Store(src, &foo)
- if err != nil {
- t.Fatal(err)
- }
-}
-
-func TestProtoStoreNestedStruct(t *testing.T) {
- var foo struct {
- A int32
- B struct {
- C string
- D float64
- }
- }
- src := []interface{}{
- []interface{}{
- int32(42),
- []interface{}{
- "foo",
- 3.14,
- },
- },
- }
- err := Store(src, &foo)
- if err != nil {
- t.Fatal(err)
- }
-}
-
-func TestMessage(t *testing.T) {
- buf := new(bytes.Buffer)
- message := new(Message)
- message.Type = TypeMethodCall
- message.serial = 32
- message.Headers = map[HeaderField]Variant{
- FieldPath: MakeVariant(ObjectPath("/org/foo/bar")),
- FieldMember: MakeVariant("baz"),
- }
- message.Body = make([]interface{}, 0)
- err := message.EncodeTo(buf, binary.LittleEndian)
- if err != nil {
- t.Error(err)
- }
- _, err = DecodeMessage(buf)
- if err != nil {
- t.Error(err)
- }
-}
-
-func TestProtoStructInterfaces(t *testing.T) {
- b := []byte{42}
- vs, err := newDecoder(bytes.NewReader(b), binary.LittleEndian).Decode(Signature{"(y)"})
- if err != nil {
- t.Fatal(err)
- }
- if vs[0].([]interface{})[0].(byte) != 42 {
- t.Errorf("wrongs results (got %v)", vs)
- }
-}
-
-// ordinary org.freedesktop.DBus.Hello call
-var smallMessage = &Message{
- Type: TypeMethodCall,
- serial: 1,
- Headers: map[HeaderField]Variant{
- FieldDestination: MakeVariant("org.freedesktop.DBus"),
- FieldPath: MakeVariant(ObjectPath("/org/freedesktop/DBus")),
- FieldInterface: MakeVariant("org.freedesktop.DBus"),
- FieldMember: MakeVariant("Hello"),
- },
-}
-
-// org.freedesktop.Notifications.Notify
-var bigMessage = &Message{
- Type: TypeMethodCall,
- serial: 2,
- Headers: map[HeaderField]Variant{
- FieldDestination: MakeVariant("org.freedesktop.Notifications"),
- FieldPath: MakeVariant(ObjectPath("/org/freedesktop/Notifications")),
- FieldInterface: MakeVariant("org.freedesktop.Notifications"),
- FieldMember: MakeVariant("Notify"),
- FieldSignature: MakeVariant(Signature{"susssasa{sv}i"}),
- },
- Body: []interface{}{
- "app_name",
- uint32(0),
- "dialog-information",
- "Notification",
- "This is the body of a notification",
- []string{"ok", "Ok"},
- map[string]Variant{
- "sound-name": MakeVariant("dialog-information"),
- },
- int32(-1),
- },
-}
-
-func BenchmarkDecodeMessageSmall(b *testing.B) {
- var err error
- var rd *bytes.Reader
-
- b.StopTimer()
- buf := new(bytes.Buffer)
- err = smallMessage.EncodeTo(buf, binary.LittleEndian)
- if err != nil {
- b.Fatal(err)
- }
- decoded := buf.Bytes()
- b.StartTimer()
- for i := 0; i < b.N; i++ {
- rd = bytes.NewReader(decoded)
- _, err = DecodeMessage(rd)
- if err != nil {
- b.Fatal(err)
- }
- }
-}
-
-func BenchmarkDecodeMessageBig(b *testing.B) {
- var err error
- var rd *bytes.Reader
-
- b.StopTimer()
- buf := new(bytes.Buffer)
- err = bigMessage.EncodeTo(buf, binary.LittleEndian)
- if err != nil {
- b.Fatal(err)
- }
- decoded := buf.Bytes()
- b.StartTimer()
- for i := 0; i < b.N; i++ {
- rd = bytes.NewReader(decoded)
- _, err = DecodeMessage(rd)
- if err != nil {
- b.Fatal(err)
- }
- }
-}
-
-func BenchmarkEncodeMessageSmall(b *testing.B) {
- var err error
- for i := 0; i < b.N; i++ {
- err = smallMessage.EncodeTo(ioutil.Discard, binary.LittleEndian)
- if err != nil {
- b.Fatal(err)
- }
- }
-}
-
-func BenchmarkEncodeMessageBig(b *testing.B) {
- var err error
- for i := 0; i < b.N; i++ {
- err = bigMessage.EncodeTo(ioutil.Discard, binary.LittleEndian)
- if err != nil {
- b.Fatal(err)
- }
- }
-}
diff --git a/Godeps/_workspace/src/github.com/guelfey/go.dbus/sig.go b/Godeps/_workspace/src/github.com/guelfey/go.dbus/sig.go
deleted file mode 100644
index f45b53c..0000000
--- a/Godeps/_workspace/src/github.com/guelfey/go.dbus/sig.go
+++ /dev/null
@@ -1,257 +0,0 @@
-package dbus
-
-import (
- "fmt"
- "reflect"
- "strings"
-)
-
-var sigToType = map[byte]reflect.Type{
- 'y': byteType,
- 'b': boolType,
- 'n': int16Type,
- 'q': uint16Type,
- 'i': int32Type,
- 'u': uint32Type,
- 'x': int64Type,
- 't': uint64Type,
- 'd': float64Type,
- 's': stringType,
- 'g': signatureType,
- 'o': objectPathType,
- 'v': variantType,
- 'h': unixFDIndexType,
-}
-
-// Signature represents a correct type signature as specified by the D-Bus
-// specification. The zero value represents the empty signature, "".
-type Signature struct {
- str string
-}
-
-// SignatureOf returns the concatenation of all the signatures of the given
-// values. It panics if one of them is not representable in D-Bus.
-func SignatureOf(vs ...interface{}) Signature {
- var s string
- for _, v := range vs {
- s += getSignature(reflect.TypeOf(v))
- }
- return Signature{s}
-}
-
-// SignatureOfType returns the signature of the given type. It panics if the
-// type is not representable in D-Bus.
-func SignatureOfType(t reflect.Type) Signature {
- return Signature{getSignature(t)}
-}
-
-// getSignature returns the signature of the given type and panics on unknown types.
-func getSignature(t reflect.Type) string {
- // handle simple types first
- switch t.Kind() {
- case reflect.Uint8:
- return "y"
- case reflect.Bool:
- return "b"
- case reflect.Int16:
- return "n"
- case reflect.Uint16:
- return "q"
- case reflect.Int32:
- if t == unixFDType {
- return "h"
- }
- return "i"
- case reflect.Uint32:
- if t == unixFDIndexType {
- return "h"
- }
- return "u"
- case reflect.Int64:
- return "x"
- case reflect.Uint64:
- return "t"
- case reflect.Float64:
- return "d"
- case reflect.Ptr:
- return getSignature(t.Elem())
- case reflect.String:
- if t == objectPathType {
- return "o"
- }
- return "s"
- case reflect.Struct:
- if t == variantType {
- return "v"
- } else if t == signatureType {
- return "g"
- }
- var s string
- for i := 0; i < t.NumField(); i++ {
- field := t.Field(i)
- if field.PkgPath == "" && field.Tag.Get("dbus") != "-" {
- s += getSignature(t.Field(i).Type)
- }
- }
- return "(" + s + ")"
- case reflect.Array, reflect.Slice:
- return "a" + getSignature(t.Elem())
- case reflect.Map:
- if !isKeyType(t.Key()) {
- panic(InvalidTypeError{t})
- }
- return "a{" + getSignature(t.Key()) + getSignature(t.Elem()) + "}"
- }
- panic(InvalidTypeError{t})
-}
-
-// ParseSignature returns the signature represented by this string, or a
-// SignatureError if the string is not a valid signature.
-func ParseSignature(s string) (sig Signature, err error) {
- if len(s) == 0 {
- return
- }
- if len(s) > 255 {
- return Signature{""}, SignatureError{s, "too long"}
- }
- sig.str = s
- for err == nil && len(s) != 0 {
- err, s = validSingle(s, 0)
- }
- if err != nil {
- sig = Signature{""}
- }
-
- return
-}
-
-// ParseSignatureMust behaves like ParseSignature, except that it panics if s
-// is not valid.
-func ParseSignatureMust(s string) Signature {
- sig, err := ParseSignature(s)
- if err != nil {
- panic(err)
- }
- return sig
-}
-
-// Empty retruns whether the signature is the empty signature.
-func (s Signature) Empty() bool {
- return s.str == ""
-}
-
-// Single returns whether the signature represents a single, complete type.
-func (s Signature) Single() bool {
- err, r := validSingle(s.str, 0)
- return err != nil && r == ""
-}
-
-// String returns the signature's string representation.
-func (s Signature) String() string {
- return s.str
-}
-
-// A SignatureError indicates that a signature passed to a function or received
-// on a connection is not a valid signature.
-type SignatureError struct {
- Sig string
- Reason string
-}
-
-func (e SignatureError) Error() string {
- return fmt.Sprintf("dbus: invalid signature: %q (%s)", e.Sig, e.Reason)
-}
-
-// Try to read a single type from this string. If it was successfull, err is nil
-// and rem is the remaining unparsed part. Otherwise, err is a non-nil
-// SignatureError and rem is "". depth is the current recursion depth which may
-// not be greater than 64 and should be given as 0 on the first call.
-func validSingle(s string, depth int) (err error, rem string) {
- if s == "" {
- return SignatureError{Sig: s, Reason: "empty signature"}, ""
- }
- if depth > 64 {
- return SignatureError{Sig: s, Reason: "container nesting too deep"}, ""
- }
- switch s[0] {
- case 'y', 'b', 'n', 'q', 'i', 'u', 'x', 't', 'd', 's', 'g', 'o', 'v', 'h':
- return nil, s[1:]
- case 'a':
- if len(s) > 1 && s[1] == '{' {
- i := findMatching(s[1:], '{', '}')
- if i == -1 {
- return SignatureError{Sig: s, Reason: "unmatched '{'"}, ""
- }
- i++
- rem = s[i+1:]
- s = s[2:i]
- if err, _ = validSingle(s[:1], depth+1); err != nil {
- return err, ""
- }
- err, nr := validSingle(s[1:], depth+1)
- if err != nil {
- return err, ""
- }
- if nr != "" {
- return SignatureError{Sig: s, Reason: "too many types in dict"}, ""
- }
- return nil, rem
- }
- return validSingle(s[1:], depth+1)
- case '(':
- i := findMatching(s, '(', ')')
- if i == -1 {
- return SignatureError{Sig: s, Reason: "unmatched ')'"}, ""
- }
- rem = s[i+1:]
- s = s[1:i]
- for err == nil && s != "" {
- err, s = validSingle(s, depth+1)
- }
- if err != nil {
- rem = ""
- }
- return
- }
- return SignatureError{Sig: s, Reason: "invalid type character"}, ""
-}
-
-func findMatching(s string, left, right rune) int {
- n := 0
- for i, v := range s {
- if v == left {
- n++
- } else if v == right {
- n--
- }
- if n == 0 {
- return i
- }
- }
- return -1
-}
-
-// typeFor returns the type of the given signature. It ignores any left over
-// characters and panics if s doesn't start with a valid type signature.
-func typeFor(s string) (t reflect.Type) {
- err, _ := validSingle(s, 0)
- if err != nil {
- panic(err)
- }
-
- if t, ok := sigToType[s[0]]; ok {
- return t
- }
- switch s[0] {
- case 'a':
- if s[1] == '{' {
- i := strings.LastIndex(s, "}")
- t = reflect.MapOf(sigToType[s[2]], typeFor(s[3:i]))
- } else {
- t = reflect.SliceOf(typeFor(s[1:]))
- }
- case '(':
- t = interfacesType
- }
- return
-}
diff --git a/Godeps/_workspace/src/github.com/guelfey/go.dbus/sig_test.go b/Godeps/_workspace/src/github.com/guelfey/go.dbus/sig_test.go
deleted file mode 100644
index da37bc9..0000000
--- a/Godeps/_workspace/src/github.com/guelfey/go.dbus/sig_test.go
+++ /dev/null
@@ -1,70 +0,0 @@
-package dbus
-
-import (
- "testing"
-)
-
-var sigTests = []struct {
- vs []interface{}
- sig Signature
-}{
- {
- []interface{}{new(int32)},
- Signature{"i"},
- },
- {
- []interface{}{new(string)},
- Signature{"s"},
- },
- {
- []interface{}{new(Signature)},
- Signature{"g"},
- },
- {
- []interface{}{new([]int16)},
- Signature{"an"},
- },
- {
- []interface{}{new(int16), new(uint32)},
- Signature{"nu"},
- },
- {
- []interface{}{new(map[byte]Variant)},
- Signature{"a{yv}"},
- },
- {
- []interface{}{new(Variant), new([]map[int32]string)},
- Signature{"vaa{is}"},
- },
-}
-
-func TestSig(t *testing.T) {
- for i, v := range sigTests {
- sig := SignatureOf(v.vs...)
- if sig != v.sig {
- t.Errorf("test %d: got %q, expected %q", i+1, sig.str, v.sig.str)
- }
- }
-}
-
-var getSigTest = []interface{}{
- []struct {
- b byte
- i int32
- t uint64
- s string
- }{},
- map[string]Variant{},
-}
-
-func BenchmarkGetSignatureSimple(b *testing.B) {
- for i := 0; i < b.N; i++ {
- SignatureOf("", int32(0))
- }
-}
-
-func BenchmarkGetSignatureLong(b *testing.B) {
- for i := 0; i < b.N; i++ {
- SignatureOf(getSigTest...)
- }
-}
diff --git a/Godeps/_workspace/src/github.com/guelfey/go.dbus/transport_darwin.go b/Godeps/_workspace/src/github.com/guelfey/go.dbus/transport_darwin.go
deleted file mode 100644
index 1bba0d6..0000000
--- a/Godeps/_workspace/src/github.com/guelfey/go.dbus/transport_darwin.go
+++ /dev/null
@@ -1,6 +0,0 @@
-package dbus
-
-func (t *unixTransport) SendNullByte() error {
- _, err := t.Write([]byte{0})
- return err
-}
diff --git a/Godeps/_workspace/src/github.com/guelfey/go.dbus/transport_generic.go b/Godeps/_workspace/src/github.com/guelfey/go.dbus/transport_generic.go
deleted file mode 100644
index 46f8f49..0000000
--- a/Godeps/_workspace/src/github.com/guelfey/go.dbus/transport_generic.go
+++ /dev/null
@@ -1,35 +0,0 @@
-package dbus
-
-import (
- "encoding/binary"
- "errors"
- "io"
-)
-
-type genericTransport struct {
- io.ReadWriteCloser
-}
-
-func (t genericTransport) SendNullByte() error {
- _, err := t.Write([]byte{0})
- return err
-}
-
-func (t genericTransport) SupportsUnixFDs() bool {
- return false
-}
-
-func (t genericTransport) EnableUnixFDs() {}
-
-func (t genericTransport) ReadMessage() (*Message, error) {
- return DecodeMessage(t)
-}
-
-func (t genericTransport) SendMessage(msg *Message) error {
- for _, v := range msg.Body {
- if _, ok := v.(UnixFD); ok {
- return errors.New("dbus: unix fd passing not enabled")
- }
- }
- return msg.EncodeTo(t, binary.LittleEndian)
-}
diff --git a/Godeps/_workspace/src/github.com/guelfey/go.dbus/transport_unix.go b/Godeps/_workspace/src/github.com/guelfey/go.dbus/transport_unix.go
deleted file mode 100644
index d16229b..0000000
--- a/Godeps/_workspace/src/github.com/guelfey/go.dbus/transport_unix.go
+++ /dev/null
@@ -1,190 +0,0 @@
-package dbus
-
-import (
- "bytes"
- "encoding/binary"
- "errors"
- "io"
- "net"
- "syscall"
-)
-
-type oobReader struct {
- conn *net.UnixConn
- oob []byte
- buf [4096]byte
-}
-
-func (o *oobReader) Read(b []byte) (n int, err error) {
- n, oobn, flags, _, err := o.conn.ReadMsgUnix(b, o.buf[:])
- if err != nil {
- return n, err
- }
- if flags&syscall.MSG_CTRUNC != 0 {
- return n, errors.New("dbus: control data truncated (too many fds received)")
- }
- o.oob = append(o.oob, o.buf[:oobn]...)
- return n, nil
-}
-
-type unixTransport struct {
- *net.UnixConn
- hasUnixFDs bool
-}
-
-func newUnixTransport(keys string) (transport, error) {
- var err error
-
- t := new(unixTransport)
- abstract := getKey(keys, "abstract")
- path := getKey(keys, "path")
- switch {
- case abstract == "" && path == "":
- return nil, errors.New("dbus: invalid address (neither path nor abstract set)")
- case abstract != "" && path == "":
- t.UnixConn, err = net.DialUnix("unix", nil, &net.UnixAddr{Name: "@" + abstract, Net: "unix"})
- if err != nil {
- return nil, err
- }
- return t, nil
- case abstract == "" && path != "":
- t.UnixConn, err = net.DialUnix("unix", nil, &net.UnixAddr{Name: path, Net: "unix"})
- if err != nil {
- return nil, err
- }
- return t, nil
- default:
- return nil, errors.New("dbus: invalid address (both path and abstract set)")
- }
-}
-
-func (t *unixTransport) EnableUnixFDs() {
- t.hasUnixFDs = true
-}
-
-func (t *unixTransport) ReadMessage() (*Message, error) {
- var (
- blen, hlen uint32
- csheader [16]byte
- headers []header
- order binary.ByteOrder
- unixfds uint32
- )
- // To be sure that all bytes of out-of-band data are read, we use a special
- // reader that uses ReadUnix on the underlying connection instead of Read
- // and gathers the out-of-band data in a buffer.
- rd := &oobReader{conn: t.UnixConn}
- // read the first 16 bytes (the part of the header that has a constant size),
- // from which we can figure out the length of the rest of the message
- if _, err := io.ReadFull(rd, csheader[:]); err != nil {
- return nil, err
- }
- switch csheader[0] {
- case 'l':
- order = binary.LittleEndian
- case 'B':
- order = binary.BigEndian
- default:
- return nil, InvalidMessageError("invalid byte order")
- }
- // csheader[4:8] -> length of message body, csheader[12:16] -> length of
- // header fields (without alignment)
- binary.Read(bytes.NewBuffer(csheader[4:8]), order, &blen)
- binary.Read(bytes.NewBuffer(csheader[12:]), order, &hlen)
- if hlen%8 != 0 {
- hlen += 8 - (hlen % 8)
- }
-
- // decode headers and look for unix fds
- headerdata := make([]byte, hlen+4)
- copy(headerdata, csheader[12:])
- if _, err := io.ReadFull(t, headerdata[4:]); err != nil {
- return nil, err
- }
- dec := newDecoder(bytes.NewBuffer(headerdata), order)
- dec.pos = 12
- vs, err := dec.Decode(Signature{"a(yv)"})
- if err != nil {
- return nil, err
- }
- Store(vs, &headers)
- for _, v := range headers {
- if v.Field == byte(FieldUnixFDs) {
- unixfds, _ = v.Variant.value.(uint32)
- }
- }
- all := make([]byte, 16+hlen+blen)
- copy(all, csheader[:])
- copy(all[16:], headerdata[4:])
- if _, err := io.ReadFull(rd, all[16+hlen:]); err != nil {
- return nil, err
- }
- if unixfds != 0 {
- if !t.hasUnixFDs {
- return nil, errors.New("dbus: got unix fds on unsupported transport")
- }
- // read the fds from the OOB data
- scms, err := syscall.ParseSocketControlMessage(rd.oob)
- if err != nil {
- return nil, err
- }
- if len(scms) != 1 {
- return nil, errors.New("dbus: received more than one socket control message")
- }
- fds, err := syscall.ParseUnixRights(&scms[0])
- if err != nil {
- return nil, err
- }
- msg, err := DecodeMessage(bytes.NewBuffer(all))
- if err != nil {
- return nil, err
- }
- // substitute the values in the message body (which are indices for the
- // array receiver via OOB) with the actual values
- for i, v := range msg.Body {
- if j, ok := v.(UnixFDIndex); ok {
- if uint32(j) >= unixfds {
- return nil, InvalidMessageError("invalid index for unix fd")
- }
- msg.Body[i] = UnixFD(fds[j])
- }
- }
- return msg, nil
- }
- return DecodeMessage(bytes.NewBuffer(all))
-}
-
-func (t *unixTransport) SendMessage(msg *Message) error {
- fds := make([]int, 0)
- for i, v := range msg.Body {
- if fd, ok := v.(UnixFD); ok {
- msg.Body[i] = UnixFDIndex(len(fds))
- fds = append(fds, int(fd))
- }
- }
- if len(fds) != 0 {
- if !t.hasUnixFDs {
- return errors.New("dbus: unix fd passing not enabled")
- }
- msg.Headers[FieldUnixFDs] = MakeVariant(uint32(len(fds)))
- oob := syscall.UnixRights(fds...)
- buf := new(bytes.Buffer)
- msg.EncodeTo(buf, binary.LittleEndian)
- n, oobn, err := t.UnixConn.WriteMsgUnix(buf.Bytes(), oob, nil)
- if err != nil {
- return err
- }
- if n != buf.Len() || oobn != len(oob) {
- return io.ErrShortWrite
- }
- } else {
- if err := msg.EncodeTo(t, binary.LittleEndian); err != nil {
- return nil
- }
- }
- return nil
-}
-
-func (t *unixTransport) SupportsUnixFDs() bool {
- return true
-}
diff --git a/Godeps/_workspace/src/github.com/guelfey/go.dbus/transport_unix_test.go b/Godeps/_workspace/src/github.com/guelfey/go.dbus/transport_unix_test.go
deleted file mode 100644
index 302233f..0000000
--- a/Godeps/_workspace/src/github.com/guelfey/go.dbus/transport_unix_test.go
+++ /dev/null
@@ -1,49 +0,0 @@
-package dbus
-
-import (
- "os"
- "testing"
-)
-
-const testString = `This is a test!
-This text should be read from the file that is created by this test.`
-
-type unixFDTest struct{}
-
-func (t unixFDTest) Test(fd UnixFD) (string, *Error) {
- var b [4096]byte
- file := os.NewFile(uintptr(fd), "testfile")
- defer file.Close()
- n, err := file.Read(b[:])
- if err != nil {
- return "", &Error{"com.github.guelfey.test.Error", nil}
- }
- return string(b[:n]), nil
-}
-
-func TestUnixFDs(t *testing.T) {
- conn, err := SessionBus()
- if err != nil {
- t.Fatal(err)
- }
- r, w, err := os.Pipe()
- if err != nil {
- t.Fatal(err)
- }
- defer w.Close()
- if _, err := w.Write([]byte(testString)); err != nil {
- t.Fatal(err)
- }
- name := conn.Names()[0]
- test := unixFDTest{}
- conn.Export(test, "/com/github/guelfey/test", "com.github.guelfey.test")
- var s string
- obj := conn.Object(name, "/com/github/guelfey/test")
- err = obj.Call("com.github.guelfey.test.Test", 0, UnixFD(r.Fd())).Store(&s)
- if err != nil {
- t.Fatal(err)
- }
- if s != testString {
- t.Fatal("got", s, "wanted", testString)
- }
-}
diff --git a/Godeps/_workspace/src/github.com/guelfey/go.dbus/transport_unixcred.go b/Godeps/_workspace/src/github.com/guelfey/go.dbus/transport_unixcred.go
deleted file mode 100644
index 42a0e76..0000000
--- a/Godeps/_workspace/src/github.com/guelfey/go.dbus/transport_unixcred.go
+++ /dev/null
@@ -1,22 +0,0 @@
-// +build !darwin
-
-package dbus
-
-import (
- "io"
- "os"
- "syscall"
-)
-
-func (t *unixTransport) SendNullByte() error {
- ucred := &syscall.Ucred{Pid: int32(os.Getpid()), Uid: uint32(os.Getuid()), Gid: uint32(os.Getgid())}
- b := syscall.UnixCredentials(ucred)
- _, oobn, err := t.UnixConn.WriteMsgUnix([]byte{0}, b, nil)
- if err != nil {
- return err
- }
- if oobn != len(b) {
- return io.ErrShortWrite
- }
- return nil
-}
diff --git a/Godeps/_workspace/src/github.com/guelfey/go.dbus/variant.go b/Godeps/_workspace/src/github.com/guelfey/go.dbus/variant.go
deleted file mode 100644
index b1b53ce..0000000
--- a/Godeps/_workspace/src/github.com/guelfey/go.dbus/variant.go
+++ /dev/null
@@ -1,129 +0,0 @@
-package dbus
-
-import (
- "bytes"
- "fmt"
- "reflect"
- "strconv"
-)
-
-// Variant represents the D-Bus variant type.
-type Variant struct {
- sig Signature
- value interface{}
-}
-
-// MakeVariant converts the given value to a Variant. It panics if v cannot be
-// represented as a D-Bus type.
-func MakeVariant(v interface{}) Variant {
- return Variant{SignatureOf(v), v}
-}
-
-// ParseVariant parses the given string as a variant as described at
-// https://developer.gnome.org/glib/unstable/gvariant-text.html. If sig is not
-// empty, it is taken to be the expected signature for the variant.
-func ParseVariant(s string, sig Signature) (Variant, error) {
- tokens := varLex(s)
- p := &varParser{tokens: tokens}
- n, err := varMakeNode(p)
- if err != nil {
- return Variant{}, err
- }
- if sig.str == "" {
- sig, err = varInfer(n)
- if err != nil {
- return Variant{}, err
- }
- }
- v, err := n.Value(sig)
- if err != nil {
- return Variant{}, err
- }
- return MakeVariant(v), nil
-}
-
-// format returns a formatted version of v and whether this string can be parsed
-// unambigously.
-func (v Variant) format() (string, bool) {
- switch v.sig.str[0] {
- case 'b', 'i':
- return fmt.Sprint(v.value), true
- case 'n', 'q', 'u', 'x', 't', 'd', 'h':
- return fmt.Sprint(v.value), false
- case 's':
- return strconv.Quote(v.value.(string)), true
- case 'o':
- return strconv.Quote(string(v.value.(ObjectPath))), false
- case 'g':
- return strconv.Quote(v.value.(Signature).str), false
- case 'v':
- s, unamb := v.value.(Variant).format()
- if !unamb {
- return "<@" + v.value.(Variant).sig.str + " " + s + ">", true
- }
- return "<" + s + ">", true
- case 'y':
- return fmt.Sprintf("%#x", v.value.(byte)), false
- }
- rv := reflect.ValueOf(v.value)
- switch rv.Kind() {
- case reflect.Slice:
- if rv.Len() == 0 {
- return "[]", false
- }
- unamb := true
- buf := bytes.NewBuffer([]byte("["))
- for i := 0; i < rv.Len(); i++ {
- // TODO: slooow
- s, b := MakeVariant(rv.Index(i).Interface()).format()
- unamb = unamb && b
- buf.WriteString(s)
- if i != rv.Len()-1 {
- buf.WriteString(", ")
- }
- }
- buf.WriteByte(']')
- return buf.String(), unamb
- case reflect.Map:
- if rv.Len() == 0 {
- return "{}", false
- }
- unamb := true
- buf := bytes.NewBuffer([]byte("{"))
- for i, k := range rv.MapKeys() {
- s, b := MakeVariant(k.Interface()).format()
- unamb = unamb && b
- buf.WriteString(s)
- buf.WriteString(": ")
- s, b = MakeVariant(rv.MapIndex(k).Interface()).format()
- unamb = unamb && b
- buf.WriteString(s)
- if i != rv.Len()-1 {
- buf.WriteString(", ")
- }
- }
- buf.WriteByte('}')
- return buf.String(), unamb
- }
- return `"INVALID"`, true
-}
-
-// Signature returns the D-Bus signature of the underlying value of v.
-func (v Variant) Signature() Signature {
- return v.sig
-}
-
-// String returns the string representation of the underlying value of v as
-// described at https://developer.gnome.org/glib/unstable/gvariant-text.html.
-func (v Variant) String() string {
- s, unamb := v.format()
- if !unamb {
- return "@" + v.sig.str + " " + s
- }
- return s
-}
-
-// Value returns the underlying value of v.
-func (v Variant) Value() interface{} {
- return v.value
-}
diff --git a/Godeps/_workspace/src/github.com/guelfey/go.dbus/variant_lexer.go b/Godeps/_workspace/src/github.com/guelfey/go.dbus/variant_lexer.go
deleted file mode 100644
index 332007d..0000000
--- a/Godeps/_workspace/src/github.com/guelfey/go.dbus/variant_lexer.go
+++ /dev/null
@@ -1,284 +0,0 @@
-package dbus
-
-import (
- "fmt"
- "strings"
- "unicode"
- "unicode/utf8"
-)
-
-// Heavily inspired by the lexer from text/template.
-
-type varToken struct {
- typ varTokenType
- val string
-}
-
-type varTokenType byte
-
-const (
- tokEOF varTokenType = iota
- tokError
- tokNumber
- tokString
- tokBool
- tokArrayStart
- tokArrayEnd
- tokDictStart
- tokDictEnd
- tokVariantStart
- tokVariantEnd
- tokComma
- tokColon
- tokType
- tokByteString
-)
-
-type varLexer struct {
- input string
- start int
- pos int
- width int
- tokens []varToken
-}
-
-type lexState func(*varLexer) lexState
-
-func varLex(s string) []varToken {
- l := &varLexer{input: s}
- l.run()
- return l.tokens
-}
-
-func (l *varLexer) accept(valid string) bool {
- if strings.IndexRune(valid, l.next()) >= 0 {
- return true
- }
- l.backup()
- return false
-}
-
-func (l *varLexer) backup() {
- l.pos -= l.width
-}
-
-func (l *varLexer) emit(t varTokenType) {
- l.tokens = append(l.tokens, varToken{t, l.input[l.start:l.pos]})
- l.start = l.pos
-}
-
-func (l *varLexer) errorf(format string, v ...interface{}) lexState {
- l.tokens = append(l.tokens, varToken{
- tokError,
- fmt.Sprintf(format, v...),
- })
- return nil
-}
-
-func (l *varLexer) ignore() {
- l.start = l.pos
-}
-
-func (l *varLexer) next() rune {
- var r rune
-
- if l.pos >= len(l.input) {
- l.width = 0
- return -1
- }
- r, l.width = utf8.DecodeRuneInString(l.input[l.pos:])
- l.pos += l.width
- return r
-}
-
-func (l *varLexer) run() {
- for state := varLexNormal; state != nil; {
- state = state(l)
- }
-}
-
-func (l *varLexer) peek() rune {
- r := l.next()
- l.backup()
- return r
-}
-
-func varLexNormal(l *varLexer) lexState {
- for {
- r := l.next()
- switch {
- case r == -1:
- l.emit(tokEOF)
- return nil
- case r == '[':
- l.emit(tokArrayStart)
- case r == ']':
- l.emit(tokArrayEnd)
- case r == '{':
- l.emit(tokDictStart)
- case r == '}':
- l.emit(tokDictEnd)
- case r == '<':
- l.emit(tokVariantStart)
- case r == '>':
- l.emit(tokVariantEnd)
- case r == ':':
- l.emit(tokColon)
- case r == ',':
- l.emit(tokComma)
- case r == '\'' || r == '"':
- l.backup()
- return varLexString
- case r == '@':
- l.backup()
- return varLexType
- case unicode.IsSpace(r):
- l.ignore()
- case unicode.IsNumber(r) || r == '+' || r == '-':
- l.backup()
- return varLexNumber
- case r == 'b':
- pos := l.start
- if n := l.peek(); n == '"' || n == '\'' {
- return varLexByteString
- }
- // not a byte string; try to parse it as a type or bool below
- l.pos = pos + 1
- l.width = 1
- fallthrough
- default:
- // either a bool or a type. Try bools first.
- l.backup()
- if l.pos+4 <= len(l.input) {
- if l.input[l.pos:l.pos+4] == "true" {
- l.pos += 4
- l.emit(tokBool)
- continue
- }
- }
- if l.pos+5 <= len(l.input) {
- if l.input[l.pos:l.pos+5] == "false" {
- l.pos += 5
- l.emit(tokBool)
- continue
- }
- }
- // must be a type.
- return varLexType
- }
- }
-}
-
-var varTypeMap = map[string]string{
- "boolean": "b",
- "byte": "y",
- "int16": "n",
- "uint16": "q",
- "int32": "i",
- "uint32": "u",
- "int64": "x",
- "uint64": "t",
- "double": "f",
- "string": "s",
- "objectpath": "o",
- "signature": "g",
-}
-
-func varLexByteString(l *varLexer) lexState {
- q := l.next()
-Loop:
- for {
- switch l.next() {
- case '\\':
- if r := l.next(); r != -1 {
- break
- }
- fallthrough
- case -1:
- return l.errorf("unterminated bytestring")
- case q:
- break Loop
- }
- }
- l.emit(tokByteString)
- return varLexNormal
-}
-
-func varLexNumber(l *varLexer) lexState {
- l.accept("+-")
- digits := "0123456789"
- if l.accept("0") {
- if l.accept("x") {
- digits = "0123456789abcdefABCDEF"
- } else {
- digits = "01234567"
- }
- }
- for strings.IndexRune(digits, l.next()) >= 0 {
- }
- l.backup()
- if l.accept(".") {
- for strings.IndexRune(digits, l.next()) >= 0 {
- }
- l.backup()
- }
- if l.accept("eE") {
- l.accept("+-")
- for strings.IndexRune("0123456789", l.next()) >= 0 {
- }
- l.backup()
- }
- if r := l.peek(); unicode.IsLetter(r) {
- l.next()
- return l.errorf("bad number syntax: %q", l.input[l.start:l.pos])
- }
- l.emit(tokNumber)
- return varLexNormal
-}
-
-func varLexString(l *varLexer) lexState {
- q := l.next()
-Loop:
- for {
- switch l.next() {
- case '\\':
- if r := l.next(); r != -1 {
- break
- }
- fallthrough
- case -1:
- return l.errorf("unterminated string")
- case q:
- break Loop
- }
- }
- l.emit(tokString)
- return varLexNormal
-}
-
-func varLexType(l *varLexer) lexState {
- at := l.accept("@")
- for {
- r := l.next()
- if r == -1 {
- break
- }
- if unicode.IsSpace(r) {
- l.backup()
- break
- }
- }
- if at {
- if _, err := ParseSignature(l.input[l.start+1 : l.pos]); err != nil {
- return l.errorf("%s", err)
- }
- } else {
- if _, ok := varTypeMap[l.input[l.start:l.pos]]; ok {
- l.emit(tokType)
- return varLexNormal
- }
- return l.errorf("unrecognized type %q", l.input[l.start:l.pos])
- }
- l.emit(tokType)
- return varLexNormal
-}
diff --git a/Godeps/_workspace/src/github.com/guelfey/go.dbus/variant_parser.go b/Godeps/_workspace/src/github.com/guelfey/go.dbus/variant_parser.go
deleted file mode 100644
index d20f5da..0000000
--- a/Godeps/_workspace/src/github.com/guelfey/go.dbus/variant_parser.go
+++ /dev/null
@@ -1,817 +0,0 @@
-package dbus
-
-import (
- "bytes"
- "errors"
- "fmt"
- "io"
- "reflect"
- "strconv"
- "strings"
- "unicode/utf8"
-)
-
-type varParser struct {
- tokens []varToken
- i int
-}
-
-func (p *varParser) backup() {
- p.i--
-}
-
-func (p *varParser) next() varToken {
- if p.i < len(p.tokens) {
- t := p.tokens[p.i]
- p.i++
- return t
- }
- return varToken{typ: tokEOF}
-}
-
-type varNode interface {
- Infer() (Signature, error)
- String() string
- Sigs() sigSet
- Value(Signature) (interface{}, error)
-}
-
-func varMakeNode(p *varParser) (varNode, error) {
- var sig Signature
-
- for {
- t := p.next()
- switch t.typ {
- case tokEOF:
- return nil, io.ErrUnexpectedEOF
- case tokError:
- return nil, errors.New(t.val)
- case tokNumber:
- return varMakeNumNode(t, sig)
- case tokString:
- return varMakeStringNode(t, sig)
- case tokBool:
- if sig.str != "" && sig.str != "b" {
- return nil, varTypeError{t.val, sig}
- }
- b, err := strconv.ParseBool(t.val)
- if err != nil {
- return nil, err
- }
- return boolNode(b), nil
- case tokArrayStart:
- return varMakeArrayNode(p, sig)
- case tokVariantStart:
- return varMakeVariantNode(p, sig)
- case tokDictStart:
- return varMakeDictNode(p, sig)
- case tokType:
- if sig.str != "" {
- return nil, errors.New("unexpected type annotation")
- }
- if t.val[0] == '@' {
- sig.str = t.val[1:]
- } else {
- sig.str = varTypeMap[t.val]
- }
- case tokByteString:
- if sig.str != "" && sig.str != "ay" {
- return nil, varTypeError{t.val, sig}
- }
- b, err := varParseByteString(t.val)
- if err != nil {
- return nil, err
- }
- return byteStringNode(b), nil
- default:
- return nil, fmt.Errorf("unexpected %q", t.val)
- }
- }
-}
-
-type varTypeError struct {
- val string
- sig Signature
-}
-
-func (e varTypeError) Error() string {
- return fmt.Sprintf("dbus: can't parse %q as type %q", e.val, e.sig.str)
-}
-
-type sigSet map[Signature]bool
-
-func (s sigSet) Empty() bool {
- return len(s) == 0
-}
-
-func (s sigSet) Intersect(s2 sigSet) sigSet {
- r := make(sigSet)
- for k := range s {
- if s2[k] {
- r[k] = true
- }
- }
- return r
-}
-
-func (s sigSet) Single() (Signature, bool) {
- if len(s) == 1 {
- for k := range s {
- return k, true
- }
- }
- return Signature{}, false
-}
-
-func (s sigSet) ToArray() sigSet {
- r := make(sigSet, len(s))
- for k := range s {
- r[Signature{"a" + k.str}] = true
- }
- return r
-}
-
-type numNode struct {
- sig Signature
- str string
- val interface{}
-}
-
-var numSigSet = sigSet{
- Signature{"y"}: true,
- Signature{"n"}: true,
- Signature{"q"}: true,
- Signature{"i"}: true,
- Signature{"u"}: true,
- Signature{"x"}: true,
- Signature{"t"}: true,
- Signature{"d"}: true,
-}
-
-func (n numNode) Infer() (Signature, error) {
- if strings.ContainsAny(n.str, ".e") {
- return Signature{"d"}, nil
- }
- return Signature{"i"}, nil
-}
-
-func (n numNode) String() string {
- return n.str
-}
-
-func (n numNode) Sigs() sigSet {
- if n.sig.str != "" {
- return sigSet{n.sig: true}
- }
- if strings.ContainsAny(n.str, ".e") {
- return sigSet{Signature{"d"}: true}
- }
- return numSigSet
-}
-
-func (n numNode) Value(sig Signature) (interface{}, error) {
- if n.sig.str != "" && n.sig != sig {
- return nil, varTypeError{n.str, sig}
- }
- if n.val != nil {
- return n.val, nil
- }
- return varNumAs(n.str, sig)
-}
-
-func varMakeNumNode(tok varToken, sig Signature) (varNode, error) {
- if sig.str == "" {
- return numNode{str: tok.val}, nil
- }
- num, err := varNumAs(tok.val, sig)
- if err != nil {
- return nil, err
- }
- return numNode{sig: sig, val: num}, nil
-}
-
-func varNumAs(s string, sig Signature) (interface{}, error) {
- isUnsigned := false
- size := 32
- switch sig.str {
- case "n":
- size = 16
- case "i":
- case "x":
- size = 64
- case "y":
- size = 8
- isUnsigned = true
- case "q":
- size = 16
- isUnsigned = true
- case "u":
- isUnsigned = true
- case "t":
- size = 64
- isUnsigned = true
- case "d":
- d, err := strconv.ParseFloat(s, 64)
- if err != nil {
- return nil, err
- }
- return d, nil
- default:
- return nil, varTypeError{s, sig}
- }
- base := 10
- if strings.HasPrefix(s, "0x") {
- base = 16
- s = s[2:]
- }
- if strings.HasPrefix(s, "0") && len(s) != 1 {
- base = 8
- s = s[1:]
- }
- if isUnsigned {
- i, err := strconv.ParseUint(s, base, size)
- if err != nil {
- return nil, err
- }
- var v interface{} = i
- switch sig.str {
- case "y":
- v = byte(i)
- case "q":
- v = uint16(i)
- case "u":
- v = uint32(i)
- }
- return v, nil
- }
- i, err := strconv.ParseInt(s, base, size)
- if err != nil {
- return nil, err
- }
- var v interface{} = i
- switch sig.str {
- case "n":
- v = int16(i)
- case "i":
- v = int32(i)
- }
- return v, nil
-}
-
-type stringNode struct {
- sig Signature
- str string // parsed
- val interface{} // has correct type
-}
-
-var stringSigSet = sigSet{
- Signature{"s"}: true,
- Signature{"g"}: true,
- Signature{"o"}: true,
-}
-
-func (n stringNode) Infer() (Signature, error) {
- return Signature{"s"}, nil
-}
-
-func (n stringNode) String() string {
- return n.str
-}
-
-func (n stringNode) Sigs() sigSet {
- if n.sig.str != "" {
- return sigSet{n.sig: true}
- }
- return stringSigSet
-}
-
-func (n stringNode) Value(sig Signature) (interface{}, error) {
- if n.sig.str != "" && n.sig != sig {
- return nil, varTypeError{n.str, sig}
- }
- if n.val != nil {
- return n.val, nil
- }
- switch {
- case sig.str == "g":
- return Signature{n.str}, nil
- case sig.str == "o":
- return ObjectPath(n.str), nil
- case sig.str == "s":
- return n.str, nil
- default:
- return nil, varTypeError{n.str, sig}
- }
-}
-
-func varMakeStringNode(tok varToken, sig Signature) (varNode, error) {
- if sig.str != "" && sig.str != "s" && sig.str != "g" && sig.str != "o" {
- return nil, fmt.Errorf("invalid type %q for string", sig.str)
- }
- s, err := varParseString(tok.val)
- if err != nil {
- return nil, err
- }
- n := stringNode{str: s}
- if sig.str == "" {
- return stringNode{str: s}, nil
- }
- n.sig = sig
- switch sig.str {
- case "o":
- n.val = ObjectPath(s)
- case "g":
- n.val = Signature{s}
- case "s":
- n.val = s
- }
- return n, nil
-}
-
-func varParseString(s string) (string, error) {
- // quotes are guaranteed to be there
- s = s[1 : len(s)-1]
- buf := new(bytes.Buffer)
- for len(s) != 0 {
- r, size := utf8.DecodeRuneInString(s)
- if r == utf8.RuneError && size == 1 {
- return "", errors.New("invalid UTF-8")
- }
- s = s[size:]
- if r != '\\' {
- buf.WriteRune(r)
- continue
- }
- r, size = utf8.DecodeRuneInString(s)
- if r == utf8.RuneError && size == 1 {
- return "", errors.New("invalid UTF-8")
- }
- s = s[size:]
- switch r {
- case 'a':
- buf.WriteRune(0x7)
- case 'b':
- buf.WriteRune(0x8)
- case 'f':
- buf.WriteRune(0xc)
- case 'n':
- buf.WriteRune('\n')
- case 'r':
- buf.WriteRune('\r')
- case 't':
- buf.WriteRune('\t')
- case '\n':
- case 'u':
- if len(s) < 4 {
- return "", errors.New("short unicode escape")
- }
- r, err := strconv.ParseUint(s[:4], 16, 32)
- if err != nil {
- return "", err
- }
- buf.WriteRune(rune(r))
- s = s[4:]
- case 'U':
- if len(s) < 8 {
- return "", errors.New("short unicode escape")
- }
- r, err := strconv.ParseUint(s[:8], 16, 32)
- if err != nil {
- return "", err
- }
- buf.WriteRune(rune(r))
- s = s[8:]
- default:
- buf.WriteRune(r)
- }
- }
- return buf.String(), nil
-}
-
-var boolSigSet = sigSet{Signature{"b"}: true}
-
-type boolNode bool
-
-func (boolNode) Infer() (Signature, error) {
- return Signature{"b"}, nil
-}
-
-func (b boolNode) String() string {
- if b {
- return "true"
- }
- return "false"
-}
-
-func (boolNode) Sigs() sigSet {
- return boolSigSet
-}
-
-func (b boolNode) Value(sig Signature) (interface{}, error) {
- if sig.str != "b" {
- return nil, varTypeError{b.String(), sig}
- }
- return bool(b), nil
-}
-
-type arrayNode struct {
- set sigSet
- children []varNode
- val interface{}
-}
-
-func (n arrayNode) Infer() (Signature, error) {
- for _, v := range n.children {
- csig, err := varInfer(v)
- if err != nil {
- continue
- }
- return Signature{"a" + csig.str}, nil
- }
- return Signature{}, fmt.Errorf("can't infer type for %q", n.String())
-}
-
-func (n arrayNode) String() string {
- s := "["
- for i, v := range n.children {
- s += v.String()
- if i != len(n.children)-1 {
- s += ", "
- }
- }
- return s + "]"
-}
-
-func (n arrayNode) Sigs() sigSet {
- return n.set
-}
-
-func (n arrayNode) Value(sig Signature) (interface{}, error) {
- if n.set.Empty() {
- // no type information whatsoever, so this must be an empty slice
- return reflect.MakeSlice(typeFor(sig.str), 0, 0).Interface(), nil
- }
- if !n.set[sig] {
- return nil, varTypeError{n.String(), sig}
- }
- s := reflect.MakeSlice(typeFor(sig.str), len(n.children), len(n.children))
- for i, v := range n.children {
- rv, err := v.Value(Signature{sig.str[1:]})
- if err != nil {
- return nil, err
- }
- s.Index(i).Set(reflect.ValueOf(rv))
- }
- return s.Interface(), nil
-}
-
-func varMakeArrayNode(p *varParser, sig Signature) (varNode, error) {
- var n arrayNode
- if sig.str != "" {
- n.set = sigSet{sig: true}
- }
- if t := p.next(); t.typ == tokArrayEnd {
- return n, nil
- } else {
- p.backup()
- }
-Loop:
- for {
- t := p.next()
- switch t.typ {
- case tokEOF:
- return nil, io.ErrUnexpectedEOF
- case tokError:
- return nil, errors.New(t.val)
- }
- p.backup()
- cn, err := varMakeNode(p)
- if err != nil {
- return nil, err
- }
- if cset := cn.Sigs(); !cset.Empty() {
- if n.set.Empty() {
- n.set = cset.ToArray()
- } else {
- nset := cset.ToArray().Intersect(n.set)
- if nset.Empty() {
- return nil, fmt.Errorf("can't parse %q with given type information", cn.String())
- }
- n.set = nset
- }
- }
- n.children = append(n.children, cn)
- switch t := p.next(); t.typ {
- case tokEOF:
- return nil, io.ErrUnexpectedEOF
- case tokError:
- return nil, errors.New(t.val)
- case tokArrayEnd:
- break Loop
- case tokComma:
- continue
- default:
- return nil, fmt.Errorf("unexpected %q", t.val)
- }
- }
- return n, nil
-}
-
-type variantNode struct {
- n varNode
-}
-
-var variantSet = sigSet{
- Signature{"v"}: true,
-}
-
-func (variantNode) Infer() (Signature, error) {
- return Signature{"v"}, nil
-}
-
-func (n variantNode) String() string {
- return "<" + n.n.String() + ">"
-}
-
-func (variantNode) Sigs() sigSet {
- return variantSet
-}
-
-func (n variantNode) Value(sig Signature) (interface{}, error) {
- if sig.str != "v" {
- return nil, varTypeError{n.String(), sig}
- }
- sig, err := varInfer(n.n)
- if err != nil {
- return nil, err
- }
- v, err := n.n.Value(sig)
- if err != nil {
- return nil, err
- }
- return MakeVariant(v), nil
-}
-
-func varMakeVariantNode(p *varParser, sig Signature) (varNode, error) {
- n, err := varMakeNode(p)
- if err != nil {
- return nil, err
- }
- if t := p.next(); t.typ != tokVariantEnd {
- return nil, fmt.Errorf("unexpected %q", t.val)
- }
- vn := variantNode{n}
- if sig.str != "" && sig.str != "v" {
- return nil, varTypeError{vn.String(), sig}
- }
- return variantNode{n}, nil
-}
-
-type dictEntry struct {
- key, val varNode
-}
-
-type dictNode struct {
- kset, vset sigSet
- children []dictEntry
- val interface{}
-}
-
-func (n dictNode) Infer() (Signature, error) {
- for _, v := range n.children {
- ksig, err := varInfer(v.key)
- if err != nil {
- continue
- }
- vsig, err := varInfer(v.val)
- if err != nil {
- continue
- }
- return Signature{"a{" + ksig.str + vsig.str + "}"}, nil
- }
- return Signature{}, fmt.Errorf("can't infer type for %q", n.String())
-}
-
-func (n dictNode) String() string {
- s := "{"
- for i, v := range n.children {
- s += v.key.String() + ": " + v.val.String()
- if i != len(n.children)-1 {
- s += ", "
- }
- }
- return s + "}"
-}
-
-func (n dictNode) Sigs() sigSet {
- r := sigSet{}
- for k := range n.kset {
- for v := range n.vset {
- sig := "a{" + k.str + v.str + "}"
- r[Signature{sig}] = true
- }
- }
- return r
-}
-
-func (n dictNode) Value(sig Signature) (interface{}, error) {
- set := n.Sigs()
- if set.Empty() {
- // no type information -> empty dict
- return reflect.MakeMap(typeFor(sig.str)).Interface(), nil
- }
- if !set[sig] {
- return nil, varTypeError{n.String(), sig}
- }
- m := reflect.MakeMap(typeFor(sig.str))
- ksig := Signature{sig.str[2:3]}
- vsig := Signature{sig.str[3 : len(sig.str)-1]}
- for _, v := range n.children {
- kv, err := v.key.Value(ksig)
- if err != nil {
- return nil, err
- }
- vv, err := v.val.Value(vsig)
- if err != nil {
- return nil, err
- }
- m.SetMapIndex(reflect.ValueOf(kv), reflect.ValueOf(vv))
- }
- return m.Interface(), nil
-}
-
-func varMakeDictNode(p *varParser, sig Signature) (varNode, error) {
- var n dictNode
-
- if sig.str != "" {
- if len(sig.str) < 5 {
- return nil, fmt.Errorf("invalid signature %q for dict type", sig)
- }
- ksig := Signature{string(sig.str[2])}
- vsig := Signature{sig.str[3 : len(sig.str)-1]}
- n.kset = sigSet{ksig: true}
- n.vset = sigSet{vsig: true}
- }
- if t := p.next(); t.typ == tokDictEnd {
- return n, nil
- } else {
- p.backup()
- }
-Loop:
- for {
- t := p.next()
- switch t.typ {
- case tokEOF:
- return nil, io.ErrUnexpectedEOF
- case tokError:
- return nil, errors.New(t.val)
- }
- p.backup()
- kn, err := varMakeNode(p)
- if err != nil {
- return nil, err
- }
- if kset := kn.Sigs(); !kset.Empty() {
- if n.kset.Empty() {
- n.kset = kset
- } else {
- n.kset = kset.Intersect(n.kset)
- if n.kset.Empty() {
- return nil, fmt.Errorf("can't parse %q with given type information", kn.String())
- }
- }
- }
- t = p.next()
- switch t.typ {
- case tokEOF:
- return nil, io.ErrUnexpectedEOF
- case tokError:
- return nil, errors.New(t.val)
- case tokColon:
- default:
- return nil, fmt.Errorf("unexpected %q", t.val)
- }
- t = p.next()
- switch t.typ {
- case tokEOF:
- return nil, io.ErrUnexpectedEOF
- case tokError:
- return nil, errors.New(t.val)
- }
- p.backup()
- vn, err := varMakeNode(p)
- if err != nil {
- return nil, err
- }
- if vset := vn.Sigs(); !vset.Empty() {
- if n.vset.Empty() {
- n.vset = vset
- } else {
- n.vset = n.vset.Intersect(vset)
- if n.vset.Empty() {
- return nil, fmt.Errorf("can't parse %q with given type information", vn.String())
- }
- }
- }
- n.children = append(n.children, dictEntry{kn, vn})
- t = p.next()
- switch t.typ {
- case tokEOF:
- return nil, io.ErrUnexpectedEOF
- case tokError:
- return nil, errors.New(t.val)
- case tokDictEnd:
- break Loop
- case tokComma:
- continue
- default:
- return nil, fmt.Errorf("unexpected %q", t.val)
- }
- }
- return n, nil
-}
-
-type byteStringNode []byte
-
-var byteStringSet = sigSet{
- Signature{"ay"}: true,
-}
-
-func (byteStringNode) Infer() (Signature, error) {
- return Signature{"ay"}, nil
-}
-
-func (b byteStringNode) String() string {
- return string(b)
-}
-
-func (b byteStringNode) Sigs() sigSet {
- return byteStringSet
-}
-
-func (b byteStringNode) Value(sig Signature) (interface{}, error) {
- if sig.str != "ay" {
- return nil, varTypeError{b.String(), sig}
- }
- return []byte(b), nil
-}
-
-func varParseByteString(s string) ([]byte, error) {
- // quotes and b at start are guaranteed to be there
- b := make([]byte, 0, 1)
- s = s[2 : len(s)-1]
- for len(s) != 0 {
- c := s[0]
- s = s[1:]
- if c != '\\' {
- b = append(b, c)
- continue
- }
- c = s[0]
- s = s[1:]
- switch c {
- case 'a':
- b = append(b, 0x7)
- case 'b':
- b = append(b, 0x8)
- case 'f':
- b = append(b, 0xc)
- case 'n':
- b = append(b, '\n')
- case 'r':
- b = append(b, '\r')
- case 't':
- b = append(b, '\t')
- case 'x':
- if len(s) < 2 {
- return nil, errors.New("short escape")
- }
- n, err := strconv.ParseUint(s[:2], 16, 8)
- if err != nil {
- return nil, err
- }
- b = append(b, byte(n))
- s = s[2:]
- case '0':
- if len(s) < 3 {
- return nil, errors.New("short escape")
- }
- n, err := strconv.ParseUint(s[:3], 8, 8)
- if err != nil {
- return nil, err
- }
- b = append(b, byte(n))
- s = s[3:]
- default:
- b = append(b, c)
- }
- }
- return append(b, 0), nil
-}
-
-func varInfer(n varNode) (Signature, error) {
- if sig, ok := n.Sigs().Single(); ok {
- return sig, nil
- }
- return n.Infer()
-}
diff --git a/Godeps/_workspace/src/github.com/guelfey/go.dbus/variant_test.go b/Godeps/_workspace/src/github.com/guelfey/go.dbus/variant_test.go
deleted file mode 100644
index da917c8..0000000
--- a/Godeps/_workspace/src/github.com/guelfey/go.dbus/variant_test.go
+++ /dev/null
@@ -1,78 +0,0 @@
-package dbus
-
-import "reflect"
-import "testing"
-
-var variantFormatTests = []struct {
- v interface{}
- s string
-}{
- {int32(1), `1`},
- {"foo", `"foo"`},
- {ObjectPath("/org/foo"), `@o "/org/foo"`},
- {Signature{"i"}, `@g "i"`},
- {[]byte{}, `@ay []`},
- {[]int32{1, 2}, `[1, 2]`},
- {[]int64{1, 2}, `@ax [1, 2]`},
- {[][]int32{{3, 4}, {5, 6}}, `[[3, 4], [5, 6]]`},
- {[]Variant{MakeVariant(int32(1)), MakeVariant(1.0)}, `[<1>, <@d 1>]`},
- {map[string]int32{"one": 1, "two": 2}, `{"one": 1, "two": 2}`},
- {map[int32]ObjectPath{1: "/org/foo"}, `@a{io} {1: "/org/foo"}`},
- {map[string]Variant{}, `@a{sv} {}`},
-}
-
-func TestFormatVariant(t *testing.T) {
- for i, v := range variantFormatTests {
- if s := MakeVariant(v.v).String(); s != v.s {
- t.Errorf("test %d: got %q, wanted %q", i+1, s, v.s)
- }
- }
-}
-
-var variantParseTests = []struct {
- s string
- v interface{}
-}{
- {"1", int32(1)},
- {"true", true},
- {"false", false},
- {"1.0", float64(1.0)},
- {"0x10", int32(16)},
- {"1e1", float64(10)},
- {`"foo"`, "foo"},
- {`"\a\b\f\n\r\t"`, "\x07\x08\x0c\n\r\t"},
- {`"\u00e4\U0001f603"`, "\u00e4\U0001f603"},
- {"[1]", []int32{1}},
- {"[1, 2, 3]", []int32{1, 2, 3}},
- {"@ai []", []int32{}},
- {"[1, 5.0]", []float64{1, 5.0}},
- {"[[1, 2], [3, 4.0]]", [][]float64{{1, 2}, {3, 4}}},
- {`[@o "/org/foo", "/org/bar"]`, []ObjectPath{"/org/foo", "/org/bar"}},
- {"<1>", MakeVariant(int32(1))},
- {"[<1>, <2.0>]", []Variant{MakeVariant(int32(1)), MakeVariant(2.0)}},
- {`[[], [""]]`, [][]string{{}, {""}}},
- {`@a{ss} {}`, map[string]string{}},
- {`{"foo": 1}`, map[string]int32{"foo": 1}},
- {`[{}, {"foo": "bar"}]`, []map[string]string{{}, {"foo": "bar"}}},
- {`{"a": <1>, "b": <"foo">}`,
- map[string]Variant{"a": MakeVariant(int32(1)), "b": MakeVariant("foo")}},
- {`b''`, []byte{0}},
- {`b"abc"`, []byte{'a', 'b', 'c', 0}},
- {`b"\x01\0002\a\b\f\n\r\t"`, []byte{1, 2, 0x7, 0x8, 0xc, '\n', '\r', '\t', 0}},
- {`[[0], b""]`, [][]byte{{0}, {0}}},
- {"int16 0", int16(0)},
- {"byte 0", byte(0)},
-}
-
-func TestParseVariant(t *testing.T) {
- for i, v := range variantParseTests {
- nv, err := ParseVariant(v.s, Signature{})
- if err != nil {
- t.Errorf("test %d: parsing failed: %s", i+1, err)
- continue
- }
- if !reflect.DeepEqual(nv.value, v.v) {
- t.Errorf("test %d: got %q, wanted %q", i+1, nv, v.v)
- }
- }
-}
diff --git a/Godeps/_workspace/src/github.com/tarm/goserial/LICENSE b/Godeps/_workspace/src/github.com/tarm/goserial/LICENSE
deleted file mode 100644
index 6a66aea..0000000
--- a/Godeps/_workspace/src/github.com/tarm/goserial/LICENSE
+++ /dev/null
@@ -1,27 +0,0 @@
-Copyright (c) 2009 The Go Authors. All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are
-met:
-
- * Redistributions of source code must retain the above copyright
-notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above
-copyright notice, this list of conditions and the following disclaimer
-in the documentation and/or other materials provided with the
-distribution.
- * Neither the name of Google Inc. nor the names of its
-contributors may be used to endorse or promote products derived from
-this software without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/Godeps/_workspace/src/github.com/tarm/goserial/README.md b/Godeps/_workspace/src/github.com/tarm/goserial/README.md
deleted file mode 100644
index becec66..0000000
--- a/Godeps/_workspace/src/github.com/tarm/goserial/README.md
+++ /dev/null
@@ -1,63 +0,0 @@
-GoSerial
-========
-A simple go package to allow you to read and write from the
-serial port as a stream of bytes.
-
-Details
--------
-It aims to have the same API on all platforms, including windows. As
-an added bonus, the windows package does not use cgo, so you can cross
-compile for windows from another platform. Unfortunately goinstall
-does not currently let you cross compile so you will have to do it
-manually:
-
- GOOS=windows make clean install
-
-Currently there is very little in the way of configurability. You can
-set the baud rate. Then you can Read(), Write(), or Close() the
-connection. Read() will block until at least one byte is returned.
-Write is the same. There is currently no exposed way to set the
-timeouts, though patches are welcome.
-
-Currently all ports are opened with 8 data bits, 1 stop bit, no
-parity, no hardware flow control, and no software flow control. This
-works fine for many real devices and many faux serial devices
-including usb-to-serial converters and bluetooth serial ports.
-
-You may Read() and Write() simulantiously on the same connection (from
-different goroutines).
-
-Usage
------
-```go
-package main
-
-import (
- "github.com/tarm/goserial"
- "log"
-)
-
-func main() {
- c := &serial.Config{Name: "COM45", Baud: 115200}
- s, err := serial.OpenPort(c)
- if err != nil {
- log.Fatal(err)
- }
-
- n, err := s.Write([]byte("test"))
- if err != nil {
- log.Fatal(err)
- }
-
- buf := make([]byte, 128)
- n, err = s.Read(buf)
- if err != nil {
- log.Fatal(err)
- }
- log.Print("%q", buf[:n])
-}
-```
-
-Possible Future Work
---------------------
-- better tests (loopback etc)
diff --git a/Godeps/_workspace/src/github.com/tarm/goserial/basic_test.go b/Godeps/_workspace/src/github.com/tarm/goserial/basic_test.go
deleted file mode 100644
index 3a72224..0000000
--- a/Godeps/_workspace/src/github.com/tarm/goserial/basic_test.go
+++ /dev/null
@@ -1,61 +0,0 @@
-package serial
-
-import (
- "testing"
- "time"
-)
-
-func TestConnection(t *testing.T) {
- c0 := &Config{Name: "/dev/ttyUSB0", Baud: 115200}
- c1 := &Config{Name: "/dev/ttyUSB1", Baud: 115200}
-
- s1, err := OpenPort(c0)
- if err != nil {
- t.Fatal(err)
- }
-
- s2, err := OpenPort(c1)
- if err != nil {
- t.Fatal(err)
- }
-
- ch := make(chan int, 1)
- go func() {
- buf := make([]byte, 128)
- var readCount int
- for {
- n, err := s2.Read(buf)
- if err != nil {
- t.Fatal(err)
- }
- readCount++
- t.Logf("Read %v %v bytes: % 02x %s", readCount, n, buf[:n], buf[:n])
- select {
- case <-ch:
- ch <- readCount
- close(ch)
- default:
- }
- }
- }()
-
- if _, err = s1.Write([]byte("hello")); err != nil {
- t.Fatal(err)
- }
- if _, err = s1.Write([]byte(" ")); err != nil {
- t.Fatal(err)
- }
- time.Sleep(time.Second)
- if _, err = s1.Write([]byte("world")); err != nil {
- t.Fatal(err)
- }
- time.Sleep(time.Second / 10)
-
- ch <- 0
- s1.Write([]byte(" ")) // We could be blocked in the read without this
- c := <-ch
- exp := 5
- if c >= exp {
- t.Fatalf("Expected less than %v read, got %v", exp, c)
- }
-}
diff --git a/Godeps/_workspace/src/github.com/tarm/goserial/serial.go b/Godeps/_workspace/src/github.com/tarm/goserial/serial.go
deleted file mode 100644
index 7a404be..0000000
--- a/Godeps/_workspace/src/github.com/tarm/goserial/serial.go
+++ /dev/null
@@ -1,99 +0,0 @@
-/*
-Goserial is a simple go package to allow you to read and write from
-the serial port as a stream of bytes.
-
-It aims to have the same API on all platforms, including windows. As
-an added bonus, the windows package does not use cgo, so you can cross
-compile for windows from another platform. Unfortunately goinstall
-does not currently let you cross compile so you will have to do it
-manually:
-
- GOOS=windows make clean install
-
-Currently there is very little in the way of configurability. You can
-set the baud rate. Then you can Read(), Write(), or Close() the
-connection. Read() will block until at least one byte is returned.
-Write is the same. There is currently no exposed way to set the
-timeouts, though patches are welcome.
-
-Currently all ports are opened with 8 data bits, 1 stop bit, no
-parity, no hardware flow control, and no software flow control. This
-works fine for many real devices and many faux serial devices
-including usb-to-serial converters and bluetooth serial ports.
-
-You may Read() and Write() simulantiously on the same connection (from
-different goroutines).
-
-Example usage:
-
- package main
-
- import (
- "github.com/tarm/goserial"
- "log"
- )
-
- func main() {
- c := &serial.Config{Name: "COM5", Baud: 115200}
- s, err := serial.OpenPort(c)
- if err != nil {
- log.Fatal(err)
- }
-
- n, err := s.Write([]byte("test"))
- if err != nil {
- log.Fatal(err)
- }
-
- buf := make([]byte, 128)
- n, err = s.Read(buf)
- if err != nil {
- log.Fatal(err)
- }
- log.Print("%q", buf[:n])
- }
-*/
-package serial
-
-import "io"
-
-// Config contains the information needed to open a serial port.
-//
-// Currently few options are implemented, but more may be added in the
-// future (patches welcome), so it is recommended that you create a
-// new config addressing the fields by name rather than by order.
-//
-// For example:
-//
-// c0 := &serial.Config{Name: "COM45", Baud: 115200}
-// or
-// c1 := new(serial.Config)
-// c1.Name = "/dev/tty.usbserial"
-// c1.Baud = 115200
-//
-type Config struct {
- Name string
- Baud int
-
- // Size int // 0 get translated to 8
- // Parity SomeNewTypeToGetCorrectDefaultOf_None
- // StopBits SomeNewTypeToGetCorrectDefaultOf_1
-
- // RTSFlowControl bool
- // DTRFlowControl bool
- // XONFlowControl bool
-
- // CRLFTranslate bool
- // TimeoutStuff int
-}
-
-// OpenPort opens a serial port with the specified configuration
-func OpenPort(c *Config) (io.ReadWriteCloser, error) {
- return openPort(c.Name, c.Baud)
-}
-
-// func Flush()
-
-// func SendBreak()
-
-// func RegisterBreakHandler(func())
diff --git a/Godeps/_workspace/src/github.com/tarm/goserial/serial_linux.go b/Godeps/_workspace/src/github.com/tarm/goserial/serial_linux.go
deleted file mode 100644
index 2366f2c..0000000
--- a/Godeps/_workspace/src/github.com/tarm/goserial/serial_linux.go
+++ /dev/null
@@ -1,90 +0,0 @@
-// +build linux,!cgo
-
-package serial
-
-import (
- "io"
- "os"
- "syscall"
- "unsafe"
-)
-
-func openPort(name string, baud int) (rwc io.ReadWriteCloser, err error) {
-
- var bauds = map[int]uint32{
- 50: syscall.B50,
- 75: syscall.B75,
- 110: syscall.B110,
- 134: syscall.B134,
- 150: syscall.B150,
- 200: syscall.B200,
- 300: syscall.B300,
- 600: syscall.B600,
- 1200: syscall.B1200,
- 1800: syscall.B1800,
- 2400: syscall.B2400,
- 4800: syscall.B4800,
- 9600: syscall.B9600,
- 19200: syscall.B19200,
- 38400: syscall.B38400,
- 57600: syscall.B57600,
- 115200: syscall.B115200,
- 230400: syscall.B230400,
- 460800: syscall.B460800,
- 500000: syscall.B500000,
- 576000: syscall.B576000,
- 921600: syscall.B921600,
- 1000000: syscall.B1000000,
- 1152000: syscall.B1152000,
- 1500000: syscall.B1500000,
- 2000000: syscall.B2000000,
- 2500000: syscall.B2500000,
- 3000000: syscall.B3000000,
- 3500000: syscall.B3500000,
- 4000000: syscall.B4000000,
- }
-
- rate := bauds[baud]
-
- if rate == 0 {
- return
- }
-
- f, err := os.OpenFile(name, syscall.O_RDWR|syscall.O_NOCTTY|syscall.O_NONBLOCK, 0666)
- if err != nil {
- return nil, err
- }
-
- defer func() {
- if err != nil && f != nil {
- f.Close()
- }
- }()
-
- fd := f.Fd()
- t := syscall.Termios{
- Iflag: syscall.IGNPAR,
- Cflag: syscall.CS8 | syscall.CREAD | syscall.CLOCAL | rate,
- Cc: [32]uint8{syscall.VMIN: 1},
- Ispeed: rate,
- Ospeed: rate,
- }
-
- if _, _, errno := syscall.Syscall6(
- syscall.SYS_IOCTL,
- uintptr(fd),
- uintptr(syscall.TCSETS),
- uintptr(unsafe.Pointer(&t)),
- 0,
- 0,
- 0,
- ); errno != 0 {
- return nil, errno
- }
-
- if err = syscall.SetNonblock(int(fd), false); err != nil {
- return
- }
-
- return f, nil
-}
diff --git a/Godeps/_workspace/src/github.com/tarm/goserial/serial_posix.go b/Godeps/_workspace/src/github.com/tarm/goserial/serial_posix.go
deleted file mode 100644
index 2eeb608..0000000
--- a/Godeps/_workspace/src/github.com/tarm/goserial/serial_posix.go
+++ /dev/null
@@ -1,107 +0,0 @@
-// +build !windows,cgo
-
-package serial
-
-// #include
-// #include
-import "C"
-
-// TODO: Maybe change to using syscall package + ioctl instead of cgo
-
-import (
- "errors"
- "fmt"
- "io"
- "os"
- "syscall"
- //"unsafe"
-)
-
-func openPort(name string, baud int) (rwc io.ReadWriteCloser, err error) {
- f, err := os.OpenFile(name, syscall.O_RDWR|syscall.O_NOCTTY|syscall.O_NONBLOCK, 0666)
- if err != nil {
- return
- }
-
- fd := C.int(f.Fd())
- if C.isatty(fd) != 1 {
- f.Close()
- return nil, errors.New("File is not a tty")
- }
-
- var st C.struct_termios
- _, err = C.tcgetattr(fd, &st)
- if err != nil {
- f.Close()
- return nil, err
- }
- var speed C.speed_t
- switch baud {
- case 115200:
- speed = C.B115200
- case 57600:
- speed = C.B57600
- case 38400:
- speed = C.B38400
- case 19200:
- speed = C.B19200
- case 9600:
- speed = C.B9600
- case 4800:
- speed = C.B4800
- case 2400:
- speed = C.B2400
- default:
- f.Close()
- return nil, fmt.Errorf("Unknown baud rate %v", baud)
- }
-
- _, err = C.cfsetispeed(&st, speed)
- if err != nil {
- f.Close()
- return nil, err
- }
- _, err = C.cfsetospeed(&st, speed)
- if err != nil {
- f.Close()
- return nil, err
- }
-
- // Select local mode
- st.c_cflag |= (C.CLOCAL | C.CREAD)
-
- // Select raw mode
- st.c_lflag &= ^C.tcflag_t(C.ICANON | C.ECHO | C.ECHOE | C.ISIG)
- st.c_oflag &= ^C.tcflag_t(C.OPOST)
-
- _, err = C.tcsetattr(fd, C.TCSANOW, &st)
- if err != nil {
- f.Close()
- return nil, err
- }
-
- //fmt.Println("Tweaking", name)
- r1, _, e := syscall.Syscall(syscall.SYS_FCNTL,
- uintptr(f.Fd()),
- uintptr(syscall.F_SETFL),
- uintptr(0))
- if e != 0 || r1 != 0 {
- s := fmt.Sprint("Clearing NONBLOCK syscall error:", e, r1)
- f.Close()
- return nil, errors.New(s)
- }
-
- /*
- r1, _, e = syscall.Syscall(syscall.SYS_IOCTL,
- uintptr(f.Fd()),
- uintptr(0x80045402), // IOSSIOSPEED
- uintptr(unsafe.Pointer(&baud)));
- if e != 0 || r1 != 0 {
- s := fmt.Sprint("Baudrate syscall error:", e, r1)
- f.Close()
- return nil, os.NewError(s)
- }
- */
-
- return f, nil
-}
diff --git a/Godeps/_workspace/src/github.com/tarm/goserial/serial_windows.go b/Godeps/_workspace/src/github.com/tarm/goserial/serial_windows.go
deleted file mode 100644
index 76e0a30..0000000
--- a/Godeps/_workspace/src/github.com/tarm/goserial/serial_windows.go
+++ /dev/null
@@ -1,263 +0,0 @@
-// +build windows
-
-package serial
-
-import (
- "fmt"
- "io"
- "os"
- "sync"
- "syscall"
- "unsafe"
-)
-
-type serialPort struct {
- f *os.File
- fd syscall.Handle
- rl sync.Mutex
- wl sync.Mutex
- ro *syscall.Overlapped
- wo *syscall.Overlapped
-}
-
-type structDCB struct {
- DCBlength, BaudRate uint32
- flags [4]byte
- wReserved, XonLim, XoffLim uint16
- ByteSize, Parity, StopBits byte
- XonChar, XoffChar, ErrorChar, EofChar, EvtChar byte
- wReserved1 uint16
-}
-
-type structTimeouts struct {
- ReadIntervalTimeout uint32
- ReadTotalTimeoutMultiplier uint32
- ReadTotalTimeoutConstant uint32
- WriteTotalTimeoutMultiplier uint32
- WriteTotalTimeoutConstant uint32
-}
-
-func openPort(name string, baud int) (rwc io.ReadWriteCloser, err error) {
- if len(name) > 0 && name[0] != '\\' {
- name = "\\\\.\\" + name
- }
-
- h, err := syscall.CreateFile(syscall.StringToUTF16Ptr(name),
- syscall.GENERIC_READ|syscall.GENERIC_WRITE,
- 0,
- nil,
- syscall.OPEN_EXISTING,
- syscall.FILE_ATTRIBUTE_NORMAL|syscall.FILE_FLAG_OVERLAPPED,
- 0)
- if err != nil {
- return nil, err
- }
- f := os.NewFile(uintptr(h), name)
- defer func() {
- if err != nil {
- f.Close()
- }
- }()
-
- if err = setCommState(h, baud); err != nil {
- return
- }
- if err = setupComm(h, 64, 64); err != nil {
- return
- }
- if err = setCommTimeouts(h); err != nil {
- return
- }
- if err = setCommMask(h); err != nil {
- return
- }
-
- ro, err := newOverlapped()
- if err != nil {
- return
- }
- wo, err := newOverlapped()
- if err != nil {
- return
- }
- port := new(serialPort)
- port.f = f
- port.fd = h
- port.ro = ro
- port.wo = wo
-
- return port, nil
-}
-
-func (p *serialPort) Close() error {
- return p.f.Close()
-}
-
-func (p *serialPort) Write(buf []byte) (int, error) {
- p.wl.Lock()
- defer p.wl.Unlock()
-
- if err := resetEvent(p.wo.HEvent); err != nil {
- return 0, err
- }
- var n uint32
- err := syscall.WriteFile(p.fd, buf, &n, p.wo)
- if err != nil && err != syscall.ERROR_IO_PENDING {
- return int(n), err
- }
- return getOverlappedResult(p.fd, p.wo)
-}
-
-func (p *serialPort) Read(buf []byte) (int, error) {
- if p == nil || p.f == nil {
- return 0, fmt.Errorf("Invalid port on read %v %v", p, p.f)
- }
-
- p.rl.Lock()
- defer p.rl.Unlock()
-
- if err := resetEvent(p.ro.HEvent); err != nil {
- return 0, err
- }
- var done uint32
- err := syscall.ReadFile(p.fd, buf, &done, p.ro)
- if err != nil && err != syscall.ERROR_IO_PENDING {
- return int(done), err
- }
- return getOverlappedResult(p.fd, p.ro)
-}
-
-var (
- nSetCommState,
- nSetCommTimeouts,
- nSetCommMask,
- nSetupComm,
- nGetOverlappedResult,
- nCreateEvent,
- nResetEvent uintptr
-)
-
-func init() {
- k32, err := syscall.LoadLibrary("kernel32.dll")
- if err != nil {
- panic("LoadLibrary " + err.Error())
- }
- defer syscall.FreeLibrary(k32)
-
- nSetCommState = getProcAddr(k32, "SetCommState")
- nSetCommTimeouts = getProcAddr(k32, "SetCommTimeouts")
- nSetCommMask = getProcAddr(k32, "SetCommMask")
- nSetupComm = getProcAddr(k32, "SetupComm")
- nGetOverlappedResult = getProcAddr(k32, "GetOverlappedResult")
- nCreateEvent = getProcAddr(k32, "CreateEventW")
- nResetEvent = getProcAddr(k32, "ResetEvent")
-}
-
-func getProcAddr(lib syscall.Handle, name string) uintptr {
- addr, err := syscall.GetProcAddress(lib, name)
- if err != nil {
- panic(name + " " + err.Error())
- }
- return addr
-}
-
-func setCommState(h syscall.Handle, baud int) error {
- var params structDCB
- params.DCBlength = uint32(unsafe.Sizeof(params))
-
- params.flags[0] = 0x01 // fBinary
- params.flags[0] |= 0x10 // Assert DSR
-
- params.BaudRate = uint32(baud)
- params.ByteSize = 8
-
- r, _, err := syscall.Syscall(nSetCommState, 2, uintptr(h), uintptr(unsafe.Pointer(¶ms)), 0)
- if r == 0 {
- return err
- }
- return nil
-}
-
-func setCommTimeouts(h syscall.Handle) error {
- var timeouts structTimeouts
- const MAXDWORD = 1<<32 - 1
- timeouts.ReadIntervalTimeout = MAXDWORD
- timeouts.ReadTotalTimeoutMultiplier = MAXDWORD
- timeouts.ReadTotalTimeoutConstant = MAXDWORD - 1
-
- /* From http://msdn.microsoft.com/en-us/library/aa363190(v=VS.85).aspx
-
- For blocking I/O see below:
-
- Remarks:
-
- If an application sets ReadIntervalTimeout and
- ReadTotalTimeoutMultiplier to MAXDWORD and sets
- ReadTotalTimeoutConstant to a value greater than zero and
- less than MAXDWORD, one of the following occurs when the
- ReadFile function is called:
-
- If there are any bytes in the input buffer, ReadFile returns
- immediately with the bytes in the buffer.
-
- If there are no bytes in the input buffer, ReadFile waits
- until a byte arrives and then returns immediately.
-
- If no bytes arrive within the time specified by
- ReadTotalTimeoutConstant, ReadFile times out.
- */
-
- r, _, err := syscall.Syscall(nSetCommTimeouts, 2, uintptr(h), uintptr(unsafe.Pointer(&timeouts)), 0)
- if r == 0 {
- return err
- }
- return nil
-}
-
-func setupComm(h syscall.Handle, in, out int) error {
- r, _, err := syscall.Syscall(nSetupComm, 3, uintptr(h), uintptr(in), uintptr(out))
- if r == 0 {
- return err
- }
- return nil
-}
-
-func setCommMask(h syscall.Handle) error {
- const EV_RXCHAR = 0x0001
- r, _, err := syscall.Syscall(nSetCommMask, 2, uintptr(h), EV_RXCHAR, 0)
- if r == 0 {
- return err
- }
- return nil
-}
-
-func resetEvent(h syscall.Handle) error {
- r, _, err := syscall.Syscall(nResetEvent, 1, uintptr(h), 0, 0)
- if r == 0 {
- return err
- }
- return nil
-}
-
-func newOverlapped() (*syscall.Overlapped, error) {
- var overlapped syscall.Overlapped
- r, _, err := syscall.Syscall6(nCreateEvent, 4, 0, 1, 0, 0, 0, 0)
- if r == 0 {
- return nil, err
- }
- overlapped.HEvent = syscall.Handle(r)
- return &overlapped, nil
-}
-
-func getOverlappedResult(h syscall.Handle, overlapped *syscall.Overlapped) (int, error) {
- var n int
- r, _, err := syscall.Syscall6(nGetOverlappedResult, 4,
- uintptr(h),
- uintptr(unsafe.Pointer(overlapped)),
- uintptr(unsafe.Pointer(&n)), 1, 0, 0)
- if r == 0 {
- return n, err
- }
-
- return n, nil
-}
diff --git a/build b/build
index aaf66f4..48a6667 100755
--- a/build
+++ b/build
@@ -5,12 +5,14 @@ REPO_PATH="${ORG_PATH}/coreos-cloudinit"
if [ ! -h gopath/src/${REPO_PATH} ]; then
mkdir -p gopath/src/${ORG_PATH}
- ln -s ../../../.. gopath/src/${REPO_PATH} || exit 255
+ ln -s ../../../.. gopath/src/${REPO_PATH} || echo "exit 255"
fi
export GOBIN=${PWD}/bin
-export GOPATH=${PWD}/gopath
+export GOPATH=${PWD}/third_party
export CGO_ENABLED=0
-go build -a -installsuffix cgo -o bin/cloudinit-x86_64 ${REPO_PATH}
-GOARCH=386 go build -a -installsuffix cgo -o bin/cloudinit-x86_32 ${REPO_PATH}
+for os in linux freebsd; do
+ GOOS=${os} go build -a -installsuffix cgo -o bin/cloudinit-linux-x86_64 ${REPO_PATH}
+ GOOS=${os} GOARCH=386 go build -a -installsuffix cgo -o bin/cloudinit-linux-x86_32 ${REPO_PATH}
+done
diff --git a/coreos-cloudinit.go b/cloudinit.go
similarity index 90%
rename from coreos-cloudinit.go
rename to cloudinit.go
index d062d31..f8d24de 100644
--- a/coreos-cloudinit.go
+++ b/cloudinit.go
@@ -26,7 +26,6 @@ import (
"github.com/coreos/coreos-cloudinit/datasource"
"github.com/coreos/coreos-cloudinit/datasource/configdrive"
"github.com/coreos/coreos-cloudinit/datasource/file"
- "github.com/coreos/coreos-cloudinit/datasource/metadata/cloudsigma"
"github.com/coreos/coreos-cloudinit/datasource/metadata/digitalocean"
"github.com/coreos/coreos-cloudinit/datasource/metadata/ec2"
"github.com/coreos/coreos-cloudinit/datasource/proc_cmdline"
@@ -40,10 +39,8 @@ import (
)
const (
- version = "1.3.3+git"
- datasourceInterval = 100 * time.Millisecond
- datasourceMaxInterval = 30 * time.Second
- datasourceTimeout = 5 * time.Minute
+ version = "1.3.3+git"
+ datasourceInterval = 100 * time.Millisecond
)
var (
@@ -51,12 +48,12 @@ var (
printVersion bool
ignoreFailure bool
sources struct {
- file string
- configDrive string
- waagent string
- metadataService bool
- ec2MetadataService string
- cloudSigmaMetadataService bool
+ file string
+ configDrive string
+ waagent string
+ metadataService bool
+ ec2MetadataService string
+ // cloudSigmaMetadataService bool
digitalOceanMetadataService string
openstackMetadataService string
url string
@@ -67,7 +64,11 @@ var (
sshKeyName string
oem string
validate bool
+ timeout string
+ dstimeout string
}{}
+ datasourceTimeout time.Duration
+ datasourceMaxInterval time.Duration
)
func init() {
@@ -78,7 +79,7 @@ func init() {
flag.StringVar(&flags.sources.waagent, "from-waagent", "", "Read data from provided waagent directory")
flag.BoolVar(&flags.sources.metadataService, "from-metadata-service", false, "[DEPRECATED - Use -from-ec2-metadata] Download data from metadata service")
flag.StringVar(&flags.sources.ec2MetadataService, "from-ec2-metadata", "", "Download EC2 data from the provided url")
- flag.BoolVar(&flags.sources.cloudSigmaMetadataService, "from-cloudsigma-metadata", false, "Download data from CloudSigma server context")
+ // flag.BoolVar(&flags.sources.cloudSigmaMetadataService, "from-cloudsigma-metadata", false, "Download data from CloudSigma server context")
flag.StringVar(&flags.sources.digitalOceanMetadataService, "from-digitalocean-metadata", "", "Download DigitalOcean data from the provided url")
flag.StringVar(&flags.sources.openstackMetadataService, "from-openstack-metadata", "", "Download OpenStack data from the provided url")
flag.StringVar(&flags.sources.url, "from-url", "", "Download user-data from provided url")
@@ -88,6 +89,9 @@ func init() {
flag.StringVar(&flags.workspace, "workspace", "/var/lib/coreos-cloudinit", "Base directory coreos-cloudinit should use to store data")
flag.StringVar(&flags.sshKeyName, "ssh-key-name", initialize.DefaultSSHKeyName, "Add SSH keys to the system with the given name")
flag.BoolVar(&flags.validate, "validate", false, "[EXPERIMENTAL] Validate the user-data but do not apply it to the system")
+ flag.StringVar(&flags.timeout, "timeout", "60s", "Timeout to wait for all datasource metadata")
+ flag.StringVar(&flags.dstimeout, "dstimeout", "10s", "Timeout to wait for single datasource metadata")
+
}
type oemConfig map[string]string
@@ -113,13 +117,14 @@ var (
"azure": oemConfig{
"from-waagent": "/var/lib/waagent",
},
- "cloudsigma": oemConfig{
- "from-cloudsigma-metadata": "true",
- },
+ // "cloudsigma": oemConfig{
+ // "from-cloudsigma-metadata": "true",
+ // },
}
)
func main() {
+ var err error
failure := false
flag.Parse()
@@ -142,6 +147,17 @@ func main() {
os.Exit(0)
}
+ datasourceTimeout, err = time.ParseDuration(flags.timeout)
+ if err != nil {
+ fmt.Printf("Invalid value to --timeout: %q\n", err)
+ os.Exit(1)
+ }
+ datasourceMaxInterval, err = time.ParseDuration(flags.dstimeout)
+ if err != nil {
+ fmt.Printf("Invalid value to --dstimeout: %q\n", err)
+ os.Exit(1)
+ }
+
switch flags.convertNetconf {
case "":
case "debian":
@@ -288,9 +304,9 @@ func getDatasources() []datasource.Datasource {
if flags.sources.ec2MetadataService != "" {
dss = append(dss, ec2.NewDatasource(flags.sources.ec2MetadataService))
}
- if flags.sources.cloudSigmaMetadataService {
- dss = append(dss, cloudsigma.NewServerContextService())
- }
+ // if flags.sources.cloudSigmaMetadataService {
+ // dss = append(dss, cloudsigma.NewServerContextService())
+ // }
if flags.sources.digitalOceanMetadataService != "" {
dss = append(dss, digitalocean.NewDatasource(flags.sources.digitalOceanMetadataService))
}
diff --git a/coreos-cloudinit_test.go b/cloudinit_test.go
similarity index 100%
rename from coreos-cloudinit_test.go
rename to cloudinit_test.go
diff --git a/config/config.go b/config/config.go
index f25261c..3a14c06 100644
--- a/config/config.go
+++ b/config/config.go
@@ -20,7 +20,7 @@ import (
"regexp"
"strings"
- "github.com/coreos/coreos-cloudinit/Godeps/_workspace/src/github.com/coreos/yaml"
+ yaml "gopkg.in/yaml.v2"
)
// CloudConfig encapsulates the entire cloud-config configuration file and maps
diff --git a/config/validate/validate.go b/config/validate/validate.go
index e1b7a9f..3ad9764 100644
--- a/config/validate/validate.go
+++ b/config/validate/validate.go
@@ -23,7 +23,7 @@ import (
"github.com/coreos/coreos-cloudinit/config"
- "github.com/coreos/coreos-cloudinit/Godeps/_workspace/src/github.com/coreos/yaml"
+ yaml "gopkg.in/yaml.v2"
)
var (
diff --git a/datasource/metadata/cloudsigma/server_context.go b/datasource/metadata/cloudsigma/server_context.go
deleted file mode 100644
index 855ae97..0000000
--- a/datasource/metadata/cloudsigma/server_context.go
+++ /dev/null
@@ -1,197 +0,0 @@
-// Copyright 2015 CoreOS, Inc.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package cloudsigma
-
-import (
- "bytes"
- "encoding/base64"
- "encoding/json"
- "errors"
- "io/ioutil"
- "net"
- "os"
- "strings"
-
- "github.com/coreos/coreos-cloudinit/datasource"
-
- "github.com/coreos/coreos-cloudinit/Godeps/_workspace/src/github.com/cloudsigma/cepgo"
-)
-
-const (
- userDataFieldName = "cloudinit-user-data"
-)
-
-type serverContextService struct {
- client interface {
- All() (interface{}, error)
- Key(string) (interface{}, error)
- Meta() (map[string]string, error)
- FetchRaw(string) ([]byte, error)
- }
-}
-
-func NewServerContextService() *serverContextService {
- return &serverContextService{
- client: cepgo.NewCepgo(),
- }
-}
-
-func (_ *serverContextService) IsAvailable() bool {
- productNameFile, err := os.Open("/sys/class/dmi/id/product_name")
- if err != nil {
- return false
- }
- productName := make([]byte, 10)
- _, err = productNameFile.Read(productName)
-
- return err == nil && string(productName) == "CloudSigma" && hasDHCPLeases()
-}
-
-func (_ *serverContextService) AvailabilityChanges() bool {
- return true
-}
-
-func (_ *serverContextService) ConfigRoot() string {
- return ""
-}
-
-func (_ *serverContextService) Type() string {
- return "server-context"
-}
-
-func (scs *serverContextService) FetchMetadata() (metadata datasource.Metadata, err error) {
- var (
- inputMetadata struct {
- Name string `json:"name"`
- UUID string `json:"uuid"`
- Meta map[string]string `json:"meta"`
- Nics []struct {
- Mac string `json:"mac"`
- IPv4Conf struct {
- InterfaceType string `json:"interface_type"`
- IP struct {
- UUID string `json:"uuid"`
- } `json:"ip"`
- } `json:"ip_v4_conf"`
- VLAN struct {
- UUID string `json:"uuid"`
- } `json:"vlan"`
- } `json:"nics"`
- }
- rawMetadata []byte
- )
-
- if rawMetadata, err = scs.client.FetchRaw(""); err != nil {
- return
- }
-
- if err = json.Unmarshal(rawMetadata, &inputMetadata); err != nil {
- return
- }
-
- if inputMetadata.Name != "" {
- metadata.Hostname = inputMetadata.Name
- } else {
- metadata.Hostname = inputMetadata.UUID
- }
-
- metadata.SSHPublicKeys = map[string]string{}
- // CloudSigma uses an empty string, rather than no string,
- // to represent the lack of a SSH key
- if key, _ := inputMetadata.Meta["ssh_public_key"]; len(key) > 0 {
- splitted := strings.Split(key, " ")
- metadata.SSHPublicKeys[splitted[len(splitted)-1]] = key
- }
-
- for _, nic := range inputMetadata.Nics {
- if nic.IPv4Conf.IP.UUID != "" {
- metadata.PublicIPv4 = net.ParseIP(nic.IPv4Conf.IP.UUID)
- }
- if nic.VLAN.UUID != "" {
- if localIP, err := scs.findLocalIP(nic.Mac); err == nil {
- metadata.PrivateIPv4 = localIP
- }
- }
- }
-
- return
-}
-
-func (scs *serverContextService) FetchUserdata() ([]byte, error) {
- metadata, err := scs.client.Meta()
- if err != nil {
- return []byte{}, err
- }
-
- userData, ok := metadata[userDataFieldName]
- if ok && isBase64Encoded(userDataFieldName, metadata) {
- if decodedUserData, err := base64.StdEncoding.DecodeString(userData); err == nil {
- return decodedUserData, nil
- } else {
- return []byte{}, nil
- }
- }
-
- return []byte(userData), nil
-}
-
-func (scs *serverContextService) findLocalIP(mac string) (net.IP, error) {
- ifaces, err := net.Interfaces()
- if err != nil {
- return nil, err
- }
- ifaceMac, err := net.ParseMAC(mac)
- if err != nil {
- return nil, err
- }
- for _, iface := range ifaces {
- if !bytes.Equal(iface.HardwareAddr, ifaceMac) {
- continue
- }
- addrs, err := iface.Addrs()
- if err != nil {
- continue
- }
-
- for _, addr := range addrs {
- switch ip := addr.(type) {
- case *net.IPNet:
- if ip.IP.To4() != nil {
- return ip.IP.To4(), nil
- }
- }
- }
- }
- return nil, errors.New("Local IP not found")
-}
-
-func isBase64Encoded(field string, userdata map[string]string) bool {
- base64Fields, ok := userdata["base64_fields"]
- if !ok {
- return false
- }
-
- for _, base64Field := range strings.Split(base64Fields, ",") {
- if field == base64Field {
- return true
- }
- }
- return false
-}
-
-func hasDHCPLeases() bool {
- files, err := ioutil.ReadDir("/run/systemd/netif/leases/")
- return err == nil && len(files) > 0
-}
diff --git a/datasource/metadata/cloudsigma/server_context_test.go b/datasource/metadata/cloudsigma/server_context_test.go
deleted file mode 100644
index 4f29d7f..0000000
--- a/datasource/metadata/cloudsigma/server_context_test.go
+++ /dev/null
@@ -1,200 +0,0 @@
-// Copyright 2015 CoreOS, Inc.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package cloudsigma
-
-import (
- "net"
- "reflect"
- "testing"
-)
-
-type fakeCepgoClient struct {
- raw []byte
- meta map[string]string
- keys map[string]interface{}
- err error
-}
-
-func (f *fakeCepgoClient) All() (interface{}, error) {
- return f.keys, f.err
-}
-
-func (f *fakeCepgoClient) Key(key string) (interface{}, error) {
- return f.keys[key], f.err
-}
-
-func (f *fakeCepgoClient) Meta() (map[string]string, error) {
- return f.meta, f.err
-}
-
-func (f *fakeCepgoClient) FetchRaw(key string) ([]byte, error) {
- return f.raw, f.err
-}
-
-func TestServerContextWithEmptyPublicSSHKey(t *testing.T) {
- client := new(fakeCepgoClient)
- scs := NewServerContextService()
- scs.client = client
- client.raw = []byte(`{
- "meta": {
- "base64_fields": "cloudinit-user-data",
- "cloudinit-user-data": "I2Nsb3VkLWNvbmZpZwoKaG9zdG5hbWU6IGNvcmVvczE=",
- "ssh_public_key": ""
- }
- }`)
- metadata, err := scs.FetchMetadata()
- if err != nil {
- t.Error(err.Error())
- }
-
- if len(metadata.SSHPublicKeys) != 0 {
- t.Error("There should be no Public SSH Keys provided")
- }
-}
-
-func TestServerContextFetchMetadata(t *testing.T) {
- client := new(fakeCepgoClient)
- scs := NewServerContextService()
- scs.client = client
- client.raw = []byte(`{
- "context": true,
- "cpu": 4000,
- "cpu_model": null,
- "cpus_instead_of_cores": false,
- "enable_numa": false,
- "grantees": [],
- "hv_relaxed": false,
- "hv_tsc": false,
- "jobs": [],
- "mem": 4294967296,
- "meta": {
- "base64_fields": "cloudinit-user-data",
- "cloudinit-user-data": "I2Nsb3VkLWNvbmZpZwoKaG9zdG5hbWU6IGNvcmVvczE=",
- "ssh_public_key": "ssh-rsa AAAAB3NzaC1yc2E.../hQ5D5 john@doe"
- },
- "name": "coreos",
- "nics": [
- {
- "boot_order": null,
- "ip_v4_conf": {
- "conf": "dhcp",
- "ip": {
- "gateway": "31.171.244.1",
- "meta": {},
- "nameservers": [
- "178.22.66.167",
- "178.22.71.56",
- "8.8.8.8"
- ],
- "netmask": 22,
- "tags": [],
- "uuid": "31.171.251.74"
- }
- },
- "ip_v6_conf": null,
- "mac": "22:3d:09:6b:90:f3",
- "model": "virtio",
- "vlan": null
- },
- {
- "boot_order": null,
- "ip_v4_conf": null,
- "ip_v6_conf": null,
- "mac": "22:ae:4a:fb:8f:31",
- "model": "virtio",
- "vlan": {
- "meta": {
- "description": "",
- "name": "CoreOS"
- },
- "tags": [],
- "uuid": "5dec030e-25b8-4621-a5a4-a3302c9d9619"
- }
- }
- ],
- "smp": 2,
- "status": "running",
- "uuid": "20a0059b-041e-4d0c-bcc6-9b2852de48b3"
- }`)
-
- metadata, err := scs.FetchMetadata()
- if err != nil {
- t.Error(err.Error())
- }
-
- if metadata.Hostname != "coreos" {
- t.Errorf("Hostname is not 'coreos' but %s instead", metadata.Hostname)
- }
-
- if metadata.SSHPublicKeys["john@doe"] != "ssh-rsa AAAAB3NzaC1yc2E.../hQ5D5 john@doe" {
- t.Error("Public SSH Keys are not being read properly")
- }
-
- if !metadata.PublicIPv4.Equal(net.ParseIP("31.171.251.74")) {
- t.Errorf("Public IP is not 31.171.251.74 but %s instead", metadata.PublicIPv4)
- }
-}
-
-func TestServerContextFetchUserdata(t *testing.T) {
- client := new(fakeCepgoClient)
- scs := NewServerContextService()
- scs.client = client
- userdataSets := []struct {
- in map[string]string
- err bool
- out []byte
- }{
- {map[string]string{
- "base64_fields": "cloudinit-user-data",
- "cloudinit-user-data": "aG9zdG5hbWU6IGNvcmVvc190ZXN0",
- }, false, []byte("hostname: coreos_test")},
- {map[string]string{
- "cloudinit-user-data": "#cloud-config\\nhostname: coreos1",
- }, false, []byte("#cloud-config\\nhostname: coreos1")},
- {map[string]string{}, false, []byte{}},
- }
-
- for i, set := range userdataSets {
- client.meta = set.in
- got, err := scs.FetchUserdata()
- if (err != nil) != set.err {
- t.Errorf("case %d: bad error state (got %t, want %t)", i, err != nil, set.err)
- }
-
- if !reflect.DeepEqual(got, set.out) {
- t.Errorf("case %d: got %s, want %s", i, got, set.out)
- }
- }
-}
-
-func TestServerContextDecodingBase64UserData(t *testing.T) {
- base64Sets := []struct {
- in string
- out bool
- }{
- {"cloudinit-user-data,foo,bar", true},
- {"bar,cloudinit-user-data,foo,bar", true},
- {"cloudinit-user-data", true},
- {"", false},
- {"foo", false},
- }
-
- for _, set := range base64Sets {
- userdata := map[string]string{"base64_fields": set.in}
- if isBase64Encoded("cloudinit-user-data", userdata) != set.out {
- t.Errorf("isBase64Encoded(cloudinit-user-data, %s) should be %t", userdata, set.out)
- }
- }
-}
diff --git a/system/networkd.go b/system/networkd.go
index c9e06ab..877eccd 100644
--- a/system/networkd.go
+++ b/system/networkd.go
@@ -23,8 +23,7 @@ import (
"github.com/coreos/coreos-cloudinit/config"
"github.com/coreos/coreos-cloudinit/network"
-
- "github.com/coreos/coreos-cloudinit/Godeps/_workspace/src/github.com/dotcloud/docker/pkg/netlink"
+ "github.com/vishvananda/netlink"
)
func RestartNetwork(interfaces []network.InterfaceGenerator) (err error) {
diff --git a/system/ssh_key.go b/system/ssh_key.go
index 85a66f6..f897308 100644
--- a/system/ssh_key.go
+++ b/system/ssh_key.go
@@ -31,14 +31,12 @@ func AuthorizeSSHKeys(user string, keysName string, keys []string) error {
// also ends with a newline
joined := fmt.Sprintf("%s\n", strings.Join(keys, "\n"))
- authorized_file := ""
- switch user {
- case "root":
- authorized_file = "/root/.ssh/authorized_keys"
- default:
- authorized_file = fmt.Sprintf("/home/%s/.ssh/authorized_keys", user)
+ home, err := UserHome(user)
+ if err != nil {
+ return err
}
+ authorized_file := fmt.Sprintf("%s/.ssh/authorized_keys", home)
f, err := os.OpenFile(authorized_file, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644)
if err != nil {
return err
diff --git a/system/systemd.go b/system/systemd.go
deleted file mode 100644
index e5fb792..0000000
--- a/system/systemd.go
+++ /dev/null
@@ -1,219 +0,0 @@
-// Copyright 2015 CoreOS, Inc.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package system
-
-import (
- "fmt"
- "io/ioutil"
- "log"
- "os"
- "os/exec"
- "path"
- "strings"
-
- "github.com/coreos/coreos-cloudinit/Godeps/_workspace/src/github.com/coreos/go-systemd/dbus"
- "github.com/coreos/coreos-cloudinit/config"
-)
-
-func NewUnitManager(root string) UnitManager {
- return &systemd{root}
-}
-
-type systemd struct {
- root string
-}
-
-// fakeMachineID is placed on non-usr CoreOS images and should
-// never be used as a true MachineID
-const fakeMachineID = "42000000000000000000000000000042"
-
-// PlaceUnit writes a unit file at its desired destination, creating parent
-// directories as necessary.
-func (s *systemd) PlaceUnit(u Unit) error {
- file := File{config.File{
- Path: u.Destination(s.root),
- Content: u.Content,
- RawFilePermissions: "0644",
- }}
-
- _, err := WriteFile(&file, "/")
- return err
-}
-
-// PlaceUnitDropIn writes a unit drop-in file at its desired destination,
-// creating parent directories as necessary.
-func (s *systemd) PlaceUnitDropIn(u Unit, d config.UnitDropIn) error {
- file := File{config.File{
- Path: u.DropInDestination(s.root, d),
- Content: d.Content,
- RawFilePermissions: "0644",
- }}
-
- _, err := WriteFile(&file, "/")
- return err
-}
-
-func (s *systemd) EnableUnitFile(u Unit) error {
- conn, err := dbus.New()
- if err != nil {
- return err
- }
-
- units := []string{u.Name}
- _, _, err = conn.EnableUnitFiles(units, u.Runtime, true)
- return err
-}
-
-func (s *systemd) RunUnitCommand(u Unit, c string) (string, error) {
- conn, err := dbus.New()
- if err != nil {
- return "", err
- }
-
- var fn func(string, string) (string, error)
- switch c {
- case "start":
- fn = conn.StartUnit
- case "stop":
- fn = conn.StopUnit
- case "restart":
- fn = conn.RestartUnit
- case "reload":
- fn = conn.ReloadUnit
- case "try-restart":
- fn = conn.TryRestartUnit
- case "reload-or-restart":
- fn = conn.ReloadOrRestartUnit
- case "reload-or-try-restart":
- fn = conn.ReloadOrTryRestartUnit
- default:
- return "", fmt.Errorf("Unsupported systemd command %q", c)
- }
-
- return fn(u.Name, "replace")
-}
-
-func (s *systemd) DaemonReload() error {
- conn, err := dbus.New()
- if err != nil {
- return err
- }
-
- return conn.Reload()
-}
-
-// MaskUnit masks the given Unit by symlinking its unit file to
-// /dev/null, analogous to `systemctl mask`.
-// N.B.: Unlike `systemctl mask`, this function will *remove any existing unit
-// file at the location*, to ensure that the mask will succeed.
-func (s *systemd) MaskUnit(u Unit) error {
- masked := u.Destination(s.root)
- if _, err := os.Stat(masked); os.IsNotExist(err) {
- if err := os.MkdirAll(path.Dir(masked), os.FileMode(0755)); err != nil {
- return err
- }
- } else if err := os.Remove(masked); err != nil {
- return err
- }
- return os.Symlink("/dev/null", masked)
-}
-
-// UnmaskUnit is analogous to systemd's unit_file_unmask. If the file
-// associated with the given Unit is empty or appears to be a symlink to
-// /dev/null, it is removed.
-func (s *systemd) UnmaskUnit(u Unit) error {
- masked := u.Destination(s.root)
- ne, err := nullOrEmpty(masked)
- if os.IsNotExist(err) {
- return nil
- } else if err != nil {
- return err
- }
- if !ne {
- log.Printf("%s is not null or empty, refusing to unmask", masked)
- return nil
- }
- return os.Remove(masked)
-}
-
-// nullOrEmpty checks whether a given path appears to be an empty regular file
-// or a symlink to /dev/null
-func nullOrEmpty(path string) (bool, error) {
- fi, err := os.Stat(path)
- if err != nil {
- return false, err
- }
- m := fi.Mode()
- if m.IsRegular() && fi.Size() <= 0 {
- return true, nil
- }
- if m&os.ModeCharDevice > 0 {
- return true, nil
- }
- return false, nil
-}
-
-func ExecuteScript(scriptPath string) (string, error) {
- props := []dbus.Property{
- dbus.PropDescription("Unit generated and executed by coreos-cloudinit on behalf of user"),
- dbus.PropExecStart([]string{"/bin/bash", scriptPath}, false),
- }
-
- base := path.Base(scriptPath)
- name := fmt.Sprintf("coreos-cloudinit-%s.service", base)
-
- log.Printf("Creating transient systemd unit '%s'", name)
-
- conn, err := dbus.New()
- if err != nil {
- return "", err
- }
-
- _, err = conn.StartTransientUnit(name, "replace", props...)
- return name, err
-}
-
-func SetHostname(hostname string) (err error) {
- for _, name := range []string{"hostnamectl", "hostname"} {
- if _, err = exec.LookPath(name); err != nil {
- continue
- }
- switch name {
- case "hostname":
- err = exec.Command(name, hostname).Run()
- case "hostnamectl":
- err = exec.Command(name, "set-hostname", hostname).Run()
- }
- }
- if err != nil {
- return
- }
- return ioutil.WriteFile("/etc/hostname", []byte(hostname+"\n"), 0644)
-}
-
-func Hostname() (string, error) {
- return os.Hostname()
-}
-
-func MachineID(root string) string {
- contents, _ := ioutil.ReadFile(path.Join(root, "etc", "machine-id"))
- id := strings.TrimSpace(string(contents))
-
- if id == fakeMachineID {
- id = ""
- }
-
- return id
-}
diff --git a/system/user.go b/system/user.go
index 523f0cf..ce4ea8e 100644
--- a/system/user.go
+++ b/system/user.go
@@ -18,15 +18,22 @@ import (
"fmt"
"log"
"os/exec"
- "os/user"
"strings"
"github.com/coreos/coreos-cloudinit/config"
)
+func UserHome(name string) (string, error) {
+ output, err := exec.Command("getent", "passwd", name).CombinedOutput()
+ if err != nil {
+ return "", err
+ }
+ passwd := strings.Split(output, ":")
+ return passwd[5], nil
+}
+
func UserExists(u *config.User) bool {
- _, err := user.Lookup(u.Name)
- return err == nil
+ return exec.Command("getent", "shadow", u.Name).Run() == nil
}
func CreateUser(u *config.User) error {