From d0d888c16029c44626867a5f75da35f150aea0af Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Josef=20Fr=C3=B6hle?= Date: Thu, 13 Dec 2018 20:33:29 +0100 Subject: [PATCH] vendor --- .gitignore | 1 - go.mod | 1 - vendor/github.com/beevik/etree/.travis.yml | 19 + vendor/github.com/beevik/etree/CONTRIBUTORS | 9 + vendor/github.com/beevik/etree/LICENSE | 24 + vendor/github.com/beevik/etree/README.md | 203 ++++ .../github.com/beevik/etree/RELEASE_NOTES.md | 27 + vendor/github.com/beevik/etree/etree.go | 1034 +++++++++++++++++ vendor/github.com/beevik/etree/helpers.go | 188 +++ vendor/github.com/beevik/etree/path.go | 533 +++++++++ vendor/github.com/crewjam/errset/.travis.yml | 5 + vendor/github.com/crewjam/errset/LICENSE | 24 + vendor/github.com/crewjam/errset/README.md | 21 + vendor/github.com/crewjam/errset/errset.go | 48 + vendor/github.com/kr/pretty/.gitignore | 4 + vendor/github.com/kr/pretty/License | 21 + vendor/github.com/kr/pretty/Readme | 9 + vendor/github.com/kr/pretty/diff.go | 265 +++++ vendor/github.com/kr/pretty/formatter.go | 328 ++++++ vendor/github.com/kr/pretty/go.mod | 3 + vendor/github.com/kr/pretty/pretty.go | 108 ++ vendor/github.com/kr/pretty/zero.go | 41 + vendor/github.com/kr/text/License | 19 + vendor/github.com/kr/text/Readme | 3 + vendor/github.com/kr/text/doc.go | 3 + vendor/github.com/kr/text/go.mod | 3 + vendor/github.com/kr/text/indent.go | 74 ++ vendor/github.com/kr/text/wrap.go | 86 ++ .../ma314smith/signedxml/.gitignore | 24 + .../ma314smith/signedxml/.travis.yml | 16 + .../ma314smith/signedxml/LICENSE.md | 21 + .../github.com/ma314smith/signedxml/README.md | 98 ++ .../signedxml/envelopedsignature.go | 39 + .../signedxml/exclusivecanonicalization.go | 302 +++++ .../ma314smith/signedxml/signedxml.go | 319 +++++ .../github.com/ma314smith/signedxml/signer.go | 171 +++ .../ma314smith/signedxml/validator.go | 200 ++++ vendor/github.com/pkg/errors/.gitignore | 24 + vendor/github.com/pkg/errors/.travis.yml | 11 + vendor/github.com/pkg/errors/LICENSE | 23 + vendor/github.com/pkg/errors/README.md | 52 + vendor/github.com/pkg/errors/appveyor.yml | 32 + vendor/github.com/pkg/errors/errors.go | 269 +++++ vendor/github.com/pkg/errors/stack.go | 178 +++ vendor/golang.org/x/crypto/AUTHORS | 3 + vendor/golang.org/x/crypto/CONTRIBUTORS | 3 + vendor/golang.org/x/crypto/LICENSE | 27 + vendor/golang.org/x/crypto/PATENTS | 22 + .../x/crypto/ripemd160/ripemd160.go | 120 ++ .../x/crypto/ripemd160/ripemd160block.go | 165 +++ vendor/gopkg.in/check.v1/.gitignore | 4 + vendor/gopkg.in/check.v1/.travis.yml | 3 + vendor/gopkg.in/check.v1/LICENSE | 25 + vendor/gopkg.in/check.v1/README.md | 20 + vendor/gopkg.in/check.v1/TODO | 2 + vendor/gopkg.in/check.v1/benchmark.go | 187 +++ vendor/gopkg.in/check.v1/check.go | 882 ++++++++++++++ vendor/gopkg.in/check.v1/checkers.go | 524 +++++++++ vendor/gopkg.in/check.v1/helpers.go | 231 ++++ vendor/gopkg.in/check.v1/printer.go | 168 +++ vendor/gopkg.in/check.v1/reporter.go | 88 ++ vendor/gopkg.in/check.v1/run.go | 175 +++ vendor/modules.txt | 16 + 63 files changed, 7546 insertions(+), 2 deletions(-) create mode 100644 vendor/github.com/beevik/etree/.travis.yml create mode 100644 vendor/github.com/beevik/etree/CONTRIBUTORS create mode 100644 vendor/github.com/beevik/etree/LICENSE create mode 100644 vendor/github.com/beevik/etree/README.md create mode 100644 vendor/github.com/beevik/etree/RELEASE_NOTES.md create mode 100644 vendor/github.com/beevik/etree/etree.go create mode 100644 vendor/github.com/beevik/etree/helpers.go create mode 100644 vendor/github.com/beevik/etree/path.go create mode 100644 vendor/github.com/crewjam/errset/.travis.yml create mode 100644 vendor/github.com/crewjam/errset/LICENSE create mode 100644 vendor/github.com/crewjam/errset/README.md create mode 100644 vendor/github.com/crewjam/errset/errset.go create mode 100644 vendor/github.com/kr/pretty/.gitignore create mode 100644 vendor/github.com/kr/pretty/License create mode 100644 vendor/github.com/kr/pretty/Readme create mode 100644 vendor/github.com/kr/pretty/diff.go create mode 100644 vendor/github.com/kr/pretty/formatter.go create mode 100644 vendor/github.com/kr/pretty/go.mod create mode 100644 vendor/github.com/kr/pretty/pretty.go create mode 100644 vendor/github.com/kr/pretty/zero.go create mode 100644 vendor/github.com/kr/text/License create mode 100644 vendor/github.com/kr/text/Readme create mode 100644 vendor/github.com/kr/text/doc.go create mode 100644 vendor/github.com/kr/text/go.mod create mode 100644 vendor/github.com/kr/text/indent.go create mode 100644 vendor/github.com/kr/text/wrap.go create mode 100644 vendor/github.com/ma314smith/signedxml/.gitignore create mode 100644 vendor/github.com/ma314smith/signedxml/.travis.yml create mode 100644 vendor/github.com/ma314smith/signedxml/LICENSE.md create mode 100644 vendor/github.com/ma314smith/signedxml/README.md create mode 100644 vendor/github.com/ma314smith/signedxml/envelopedsignature.go create mode 100644 vendor/github.com/ma314smith/signedxml/exclusivecanonicalization.go create mode 100644 vendor/github.com/ma314smith/signedxml/signedxml.go create mode 100644 vendor/github.com/ma314smith/signedxml/signer.go create mode 100644 vendor/github.com/ma314smith/signedxml/validator.go create mode 100644 vendor/github.com/pkg/errors/.gitignore create mode 100644 vendor/github.com/pkg/errors/.travis.yml create mode 100644 vendor/github.com/pkg/errors/LICENSE create mode 100644 vendor/github.com/pkg/errors/README.md create mode 100644 vendor/github.com/pkg/errors/appveyor.yml create mode 100644 vendor/github.com/pkg/errors/errors.go create mode 100644 vendor/github.com/pkg/errors/stack.go create mode 100644 vendor/golang.org/x/crypto/AUTHORS create mode 100644 vendor/golang.org/x/crypto/CONTRIBUTORS create mode 100644 vendor/golang.org/x/crypto/LICENSE create mode 100644 vendor/golang.org/x/crypto/PATENTS create mode 100644 vendor/golang.org/x/crypto/ripemd160/ripemd160.go create mode 100644 vendor/golang.org/x/crypto/ripemd160/ripemd160block.go create mode 100644 vendor/gopkg.in/check.v1/.gitignore create mode 100644 vendor/gopkg.in/check.v1/.travis.yml create mode 100644 vendor/gopkg.in/check.v1/LICENSE create mode 100644 vendor/gopkg.in/check.v1/README.md create mode 100644 vendor/gopkg.in/check.v1/TODO create mode 100644 vendor/gopkg.in/check.v1/benchmark.go create mode 100644 vendor/gopkg.in/check.v1/check.go create mode 100644 vendor/gopkg.in/check.v1/checkers.go create mode 100644 vendor/gopkg.in/check.v1/helpers.go create mode 100644 vendor/gopkg.in/check.v1/printer.go create mode 100644 vendor/gopkg.in/check.v1/reporter.go create mode 100644 vendor/gopkg.in/check.v1/run.go create mode 100644 vendor/modules.txt diff --git a/.gitignore b/.gitignore index eb58055..c450615 100644 --- a/.gitignore +++ b/.gitignore @@ -1,3 +1,2 @@ target -vendor /Dockerfile diff --git a/go.mod b/go.mod index 4bece88..b30a8bd 100644 --- a/go.mod +++ b/go.mod @@ -1,7 +1,6 @@ module git.deineagentur.com/DeineAgenturUG/go-xmlsec require ( - git.deineagentur.com/DeineAgenturUG/go-xmlsec v0.0.0-20181213183304-bc74cdd9f06f github.com/beevik/etree v1.0.1 // indirect github.com/crewjam/errset v0.0.0-20160219153700-f78d65de925c github.com/kr/pretty v0.1.0 // indirect diff --git a/vendor/github.com/beevik/etree/.travis.yml b/vendor/github.com/beevik/etree/.travis.yml new file mode 100644 index 0000000..c47175e --- /dev/null +++ b/vendor/github.com/beevik/etree/.travis.yml @@ -0,0 +1,19 @@ +language: go +sudo: false + +go: + - 1.5.x + - 1.6.x + - 1.7.x + - 1.8.x + - 1.9.x + - 1.10.x + - tip + +matrix: + allow_failures: + - go: tip + +script: + - go vet ./... + - go test -v ./... diff --git a/vendor/github.com/beevik/etree/CONTRIBUTORS b/vendor/github.com/beevik/etree/CONTRIBUTORS new file mode 100644 index 0000000..45a5395 --- /dev/null +++ b/vendor/github.com/beevik/etree/CONTRIBUTORS @@ -0,0 +1,9 @@ +Brett Vickers (beevik) +Felix Geisendörfer (felixge) +Kamil Kisiel (kisielk) +Graham King (grahamking) +Matt Smith (ma314smith) +Michal Jemala (michaljemala) +Nicolas Piganeau (npiganeau) +Chris Brown (ccbrown) +Earncef Sequeira (earncef) \ No newline at end of file diff --git a/vendor/github.com/beevik/etree/LICENSE b/vendor/github.com/beevik/etree/LICENSE new file mode 100644 index 0000000..e14ad68 --- /dev/null +++ b/vendor/github.com/beevik/etree/LICENSE @@ -0,0 +1,24 @@ +Copyright 2015 Brett Vickers. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions +are met: + + 1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY COPYRIGHT HOLDER ``AS IS'' AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL COPYRIGHT HOLDER OR +CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, +EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR +PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY +OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/beevik/etree/README.md b/vendor/github.com/beevik/etree/README.md new file mode 100644 index 0000000..2855843 --- /dev/null +++ b/vendor/github.com/beevik/etree/README.md @@ -0,0 +1,203 @@ +[![Build Status](https://travis-ci.org/beevik/etree.svg?branch=master)](https://travis-ci.org/beevik/etree) +[![GoDoc](https://godoc.org/github.com/beevik/etree?status.svg)](https://godoc.org/github.com/beevik/etree) + +etree +===== + +The etree package is a lightweight, pure go package that expresses XML in +the form of an element tree. Its design was inspired by the Python +[ElementTree](http://docs.python.org/2/library/xml.etree.elementtree.html) +module. Some of the package's features include: + +* Represents XML documents as trees of elements for easy traversal. +* Imports, serializes, modifies or creates XML documents from scratch. +* Writes and reads XML to/from files, byte slices, strings and io interfaces. +* Performs simple or complex searches with lightweight XPath-like query APIs. +* Auto-indents XML using spaces or tabs for better readability. +* Implemented in pure go; depends only on standard go libraries. +* Built on top of the go [encoding/xml](http://golang.org/pkg/encoding/xml) + package. + +### Creating an XML document + +The following example creates an XML document from scratch using the etree +package and outputs its indented contents to stdout. +```go +doc := etree.NewDocument() +doc.CreateProcInst("xml", `version="1.0" encoding="UTF-8"`) +doc.CreateProcInst("xml-stylesheet", `type="text/xsl" href="style.xsl"`) + +people := doc.CreateElement("People") +people.CreateComment("These are all known people") + +jon := people.CreateElement("Person") +jon.CreateAttr("name", "Jon") + +sally := people.CreateElement("Person") +sally.CreateAttr("name", "Sally") + +doc.Indent(2) +doc.WriteTo(os.Stdout) +``` + +Output: +```xml + + + + + + + +``` + +### Reading an XML file + +Suppose you have a file on disk called `bookstore.xml` containing the +following data: + +```xml + + + + Everyday Italian + Giada De Laurentiis + 2005 + 30.00 + + + + Harry Potter + J K. Rowling + 2005 + 29.99 + + + + XQuery Kick Start + James McGovern + Per Bothner + Kurt Cagle + James Linn + Vaidyanathan Nagarajan + 2003 + 49.99 + + + + Learning XML + Erik T. Ray + 2003 + 39.95 + + + +``` + +This code reads the file's contents into an etree document. +```go +doc := etree.NewDocument() +if err := doc.ReadFromFile("bookstore.xml"); err != nil { + panic(err) +} +``` + +You can also read XML from a string, a byte slice, or an `io.Reader`. + +### Processing elements and attributes + +This example illustrates several ways to access elements and attributes using +etree selection queries. +```go +root := doc.SelectElement("bookstore") +fmt.Println("ROOT element:", root.Tag) + +for _, book := range root.SelectElements("book") { + fmt.Println("CHILD element:", book.Tag) + if title := book.SelectElement("title"); title != nil { + lang := title.SelectAttrValue("lang", "unknown") + fmt.Printf(" TITLE: %s (%s)\n", title.Text(), lang) + } + for _, attr := range book.Attr { + fmt.Printf(" ATTR: %s=%s\n", attr.Key, attr.Value) + } +} +``` +Output: +``` +ROOT element: bookstore +CHILD element: book + TITLE: Everyday Italian (en) + ATTR: category=COOKING +CHILD element: book + TITLE: Harry Potter (en) + ATTR: category=CHILDREN +CHILD element: book + TITLE: XQuery Kick Start (en) + ATTR: category=WEB +CHILD element: book + TITLE: Learning XML (en) + ATTR: category=WEB +``` + +### Path queries + +This example uses etree's path functions to select all book titles that fall +into the category of 'WEB'. The double-slash prefix in the path causes the +search for book elements to occur recursively; book elements may appear at any +level of the XML hierarchy. +```go +for _, t := range doc.FindElements("//book[@category='WEB']/title") { + fmt.Println("Title:", t.Text()) +} +``` + +Output: +``` +Title: XQuery Kick Start +Title: Learning XML +``` + +This example finds the first book element under the root bookstore element and +outputs the tag and text of each of its child elements. +```go +for _, e := range doc.FindElements("./bookstore/book[1]/*") { + fmt.Printf("%s: %s\n", e.Tag, e.Text()) +} +``` + +Output: +``` +title: Everyday Italian +author: Giada De Laurentiis +year: 2005 +price: 30.00 +``` + +This example finds all books with a price of 49.99 and outputs their titles. +```go +path := etree.MustCompilePath("./bookstore/book[p:price='49.99']/title") +for _, e := range doc.FindElementsPath(path) { + fmt.Println(e.Text()) +} +``` + +Output: +``` +XQuery Kick Start +``` + +Note that this example uses the FindElementsPath function, which takes as an +argument a pre-compiled path object. Use precompiled paths when you plan to +search with the same path more than once. + +### Other features + +These are just a few examples of the things the etree package can do. See the +[documentation](http://godoc.org/github.com/beevik/etree) for a complete +description of its capabilities. + +### Contributing + +This project accepts contributions. Just fork the repo and submit a pull +request! diff --git a/vendor/github.com/beevik/etree/RELEASE_NOTES.md b/vendor/github.com/beevik/etree/RELEASE_NOTES.md new file mode 100644 index 0000000..b3a39bd --- /dev/null +++ b/vendor/github.com/beevik/etree/RELEASE_NOTES.md @@ -0,0 +1,27 @@ +Release v1.0.1 +============== + +**Changes** + +* Added support for absolute etree Path queries. An absolute path begins with + `/` or `//` and begins its search from the element's document root. +* Added [`GetPath`](https://godoc.org/github.com/beevik/etree#Element.GetPath) + and [`GetRelativePath`](https://godoc.org/github.com/beevik/etree#Element.GetRelativePath) + functions to the [`Element`](https://godoc.org/github.com/beevik/etree#Element) + type. + +**Breaking changes** + +* A path starting with `//` is now interpreted as an absolute path. + Previously, it was interpreted as a relative path starting from the element + whose + [`FindElement`](https://godoc.org/github.com/beevik/etree#Element.FindElement) + method was called. To remain compatible with this release, all paths + prefixed with `//` should be prefixed with `.//` when called from any + element other than the document's root. + + +Release v1.0.0 +============== + +Initial release. diff --git a/vendor/github.com/beevik/etree/etree.go b/vendor/github.com/beevik/etree/etree.go new file mode 100644 index 0000000..461a7aa --- /dev/null +++ b/vendor/github.com/beevik/etree/etree.go @@ -0,0 +1,1034 @@ +// Copyright 2015 Brett Vickers. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package etree provides XML services through an Element Tree +// abstraction. +package etree + +import ( + "bufio" + "bytes" + "encoding/xml" + "errors" + "io" + "os" + "strings" +) + +const ( + // NoIndent is used with Indent to disable all indenting. + NoIndent = -1 +) + +// ErrXML is returned when XML parsing fails due to incorrect formatting. +var ErrXML = errors.New("etree: invalid XML format") + +// ReadSettings allow for changing the default behavior of the ReadFrom* +// methods. +type ReadSettings struct { + // CharsetReader to be passed to standard xml.Decoder. Default: nil. + CharsetReader func(charset string, input io.Reader) (io.Reader, error) + + // Permissive allows input containing common mistakes such as missing tags + // or attribute values. Default: false. + Permissive bool +} + +// newReadSettings creates a default ReadSettings record. +func newReadSettings() ReadSettings { + return ReadSettings{} +} + +// WriteSettings allow for changing the serialization behavior of the WriteTo* +// methods. +type WriteSettings struct { + // CanonicalEndTags forces the production of XML end tags, even for + // elements that have no child elements. Default: false. + CanonicalEndTags bool + + // CanonicalText forces the production of XML character references for + // text data characters &, <, and >. If false, XML character references + // are also produced for " and '. Default: false. + CanonicalText bool + + // CanonicalAttrVal forces the production of XML character references for + // attribute value characters &, < and ". If false, XML character + // references are also produced for > and '. Default: false. + CanonicalAttrVal bool +} + +// newWriteSettings creates a default WriteSettings record. +func newWriteSettings() WriteSettings { + return WriteSettings{ + CanonicalEndTags: false, + CanonicalText: false, + CanonicalAttrVal: false, + } +} + +// A Token is an empty interface that represents an Element, CharData, +// Comment, Directive, or ProcInst. +type Token interface { + Parent() *Element + dup(parent *Element) Token + setParent(parent *Element) + writeTo(w *bufio.Writer, s *WriteSettings) +} + +// A Document is a container holding a complete XML hierarchy. Its embedded +// element contains zero or more children, one of which is usually the root +// element. The embedded element may include other children such as +// processing instructions or BOM CharData tokens. +type Document struct { + Element + ReadSettings ReadSettings + WriteSettings WriteSettings +} + +// An Element represents an XML element, its attributes, and its child tokens. +type Element struct { + Space, Tag string // namespace and tag + Attr []Attr // key-value attribute pairs + Child []Token // child tokens (elements, comments, etc.) + parent *Element // parent element +} + +// An Attr represents a key-value attribute of an XML element. +type Attr struct { + Space, Key string // The attribute's namespace and key + Value string // The attribute value string +} + +// CharData represents character data within XML. +type CharData struct { + Data string + parent *Element + whitespace bool +} + +// A Comment represents an XML comment. +type Comment struct { + Data string + parent *Element +} + +// A Directive represents an XML directive. +type Directive struct { + Data string + parent *Element +} + +// A ProcInst represents an XML processing instruction. +type ProcInst struct { + Target string + Inst string + parent *Element +} + +// NewDocument creates an XML document without a root element. +func NewDocument() *Document { + return &Document{ + Element{Child: make([]Token, 0)}, + newReadSettings(), + newWriteSettings(), + } +} + +// Copy returns a recursive, deep copy of the document. +func (d *Document) Copy() *Document { + return &Document{*(d.dup(nil).(*Element)), d.ReadSettings, d.WriteSettings} +} + +// Root returns the root element of the document, or nil if there is no root +// element. +func (d *Document) Root() *Element { + for _, t := range d.Child { + if c, ok := t.(*Element); ok { + return c + } + } + return nil +} + +// SetRoot replaces the document's root element with e. If the document +// already has a root when this function is called, then the document's +// original root is unbound first. If the element e is bound to another +// document (or to another element within a document), then it is unbound +// first. +func (d *Document) SetRoot(e *Element) { + if e.parent != nil { + e.parent.RemoveChild(e) + } + e.setParent(&d.Element) + + for i, t := range d.Child { + if _, ok := t.(*Element); ok { + t.setParent(nil) + d.Child[i] = e + return + } + } + d.Child = append(d.Child, e) +} + +// ReadFrom reads XML from the reader r into the document d. It returns the +// number of bytes read and any error encountered. +func (d *Document) ReadFrom(r io.Reader) (n int64, err error) { + return d.Element.readFrom(r, d.ReadSettings) +} + +// ReadFromFile reads XML from the string s into the document d. +func (d *Document) ReadFromFile(filename string) error { + f, err := os.Open(filename) + if err != nil { + return err + } + defer f.Close() + _, err = d.ReadFrom(f) + return err +} + +// ReadFromBytes reads XML from the byte slice b into the document d. +func (d *Document) ReadFromBytes(b []byte) error { + _, err := d.ReadFrom(bytes.NewReader(b)) + return err +} + +// ReadFromString reads XML from the string s into the document d. +func (d *Document) ReadFromString(s string) error { + _, err := d.ReadFrom(strings.NewReader(s)) + return err +} + +// WriteTo serializes an XML document into the writer w. It +// returns the number of bytes written and any error encountered. +func (d *Document) WriteTo(w io.Writer) (n int64, err error) { + cw := newCountWriter(w) + b := bufio.NewWriter(cw) + for _, c := range d.Child { + c.writeTo(b, &d.WriteSettings) + } + err, n = b.Flush(), cw.bytes + return +} + +// WriteToFile serializes an XML document into the file named +// filename. +func (d *Document) WriteToFile(filename string) error { + f, err := os.Create(filename) + if err != nil { + return err + } + defer f.Close() + _, err = d.WriteTo(f) + return err +} + +// WriteToBytes serializes the XML document into a slice of +// bytes. +func (d *Document) WriteToBytes() (b []byte, err error) { + var buf bytes.Buffer + if _, err = d.WriteTo(&buf); err != nil { + return + } + return buf.Bytes(), nil +} + +// WriteToString serializes the XML document into a string. +func (d *Document) WriteToString() (s string, err error) { + var b []byte + if b, err = d.WriteToBytes(); err != nil { + return + } + return string(b), nil +} + +type indentFunc func(depth int) string + +// Indent modifies the document's element tree by inserting CharData entities +// containing carriage returns and indentation. The amount of indentation per +// depth level is given as spaces. Pass etree.NoIndent for spaces if you want +// no indentation at all. +func (d *Document) Indent(spaces int) { + var indent indentFunc + switch { + case spaces < 0: + indent = func(depth int) string { return "" } + default: + indent = func(depth int) string { return crIndent(depth*spaces, crsp) } + } + d.Element.indent(0, indent) +} + +// IndentTabs modifies the document's element tree by inserting CharData +// entities containing carriage returns and tabs for indentation. One tab is +// used per indentation level. +func (d *Document) IndentTabs() { + indent := func(depth int) string { return crIndent(depth, crtab) } + d.Element.indent(0, indent) +} + +// NewElement creates an unparented element with the specified tag. The tag +// may be prefixed by a namespace and a colon. +func NewElement(tag string) *Element { + space, stag := spaceDecompose(tag) + return newElement(space, stag, nil) +} + +// newElement is a helper function that creates an element and binds it to +// a parent element if possible. +func newElement(space, tag string, parent *Element) *Element { + e := &Element{ + Space: space, + Tag: tag, + Attr: make([]Attr, 0), + Child: make([]Token, 0), + parent: parent, + } + if parent != nil { + parent.addChild(e) + } + return e +} + +// Copy creates a recursive, deep copy of the element and all its attributes +// and children. The returned element has no parent but can be parented to a +// another element using AddElement, or to a document using SetRoot. +func (e *Element) Copy() *Element { + var parent *Element + return e.dup(parent).(*Element) +} + +// Text returns the characters immediately following the element's +// opening tag. +func (e *Element) Text() string { + if len(e.Child) == 0 { + return "" + } + if cd, ok := e.Child[0].(*CharData); ok { + return cd.Data + } + return "" +} + +// SetText replaces an element's subsidiary CharData text with a new string. +func (e *Element) SetText(text string) { + if len(e.Child) > 0 { + if cd, ok := e.Child[0].(*CharData); ok { + cd.Data = text + return + } + } + cd := newCharData(text, false, e) + copy(e.Child[1:], e.Child[0:]) + e.Child[0] = cd +} + +// CreateElement creates an element with the specified tag and adds it as the +// last child element of the element e. The tag may be prefixed by a namespace +// and a colon. +func (e *Element) CreateElement(tag string) *Element { + space, stag := spaceDecompose(tag) + return newElement(space, stag, e) +} + +// AddChild adds the token t as the last child of element e. If token t was +// already the child of another element, it is first removed from its current +// parent element. +func (e *Element) AddChild(t Token) { + if t.Parent() != nil { + t.Parent().RemoveChild(t) + } + t.setParent(e) + e.addChild(t) +} + +// InsertChild inserts the token t before e's existing child token ex. If ex +// is nil (or if ex is not a child of e), then t is added to the end of e's +// child token list. If token t was already the child of another element, it +// is first removed from its current parent element. +func (e *Element) InsertChild(ex Token, t Token) { + if t.Parent() != nil { + t.Parent().RemoveChild(t) + } + t.setParent(e) + + for i, c := range e.Child { + if c == ex { + e.Child = append(e.Child, nil) + copy(e.Child[i+1:], e.Child[i:]) + e.Child[i] = t + return + } + } + e.addChild(t) +} + +// RemoveChild attempts to remove the token t from element e's list of +// children. If the token t is a child of e, then it is returned. Otherwise, +// nil is returned. +func (e *Element) RemoveChild(t Token) Token { + for i, c := range e.Child { + if c == t { + e.Child = append(e.Child[:i], e.Child[i+1:]...) + c.setParent(nil) + return t + } + } + return nil +} + +// ReadFrom reads XML from the reader r and stores the result as a new child +// of element e. +func (e *Element) readFrom(ri io.Reader, settings ReadSettings) (n int64, err error) { + r := newCountReader(ri) + dec := xml.NewDecoder(r) + dec.CharsetReader = settings.CharsetReader + dec.Strict = !settings.Permissive + var stack stack + stack.push(e) + for { + t, err := dec.RawToken() + switch { + case err == io.EOF: + return r.bytes, nil + case err != nil: + return r.bytes, err + case stack.empty(): + return r.bytes, ErrXML + } + + top := stack.peek().(*Element) + + switch t := t.(type) { + case xml.StartElement: + e := newElement(t.Name.Space, t.Name.Local, top) + for _, a := range t.Attr { + e.createAttr(a.Name.Space, a.Name.Local, a.Value) + } + stack.push(e) + case xml.EndElement: + stack.pop() + case xml.CharData: + data := string(t) + newCharData(data, isWhitespace(data), top) + case xml.Comment: + newComment(string(t), top) + case xml.Directive: + newDirective(string(t), top) + case xml.ProcInst: + newProcInst(t.Target, string(t.Inst), top) + } + } +} + +// SelectAttr finds an element attribute matching the requested key and +// returns it if found. Returns nil if no matching attribute is found. The key +// may be prefixed by a namespace and a colon. +func (e *Element) SelectAttr(key string) *Attr { + space, skey := spaceDecompose(key) + for i, a := range e.Attr { + if spaceMatch(space, a.Space) && skey == a.Key { + return &e.Attr[i] + } + } + return nil +} + +// SelectAttrValue finds an element attribute matching the requested key and +// returns its value if found. The key may be prefixed by a namespace and a +// colon. If the key is not found, the dflt value is returned instead. +func (e *Element) SelectAttrValue(key, dflt string) string { + space, skey := spaceDecompose(key) + for _, a := range e.Attr { + if spaceMatch(space, a.Space) && skey == a.Key { + return a.Value + } + } + return dflt +} + +// ChildElements returns all elements that are children of element e. +func (e *Element) ChildElements() []*Element { + var elements []*Element + for _, t := range e.Child { + if c, ok := t.(*Element); ok { + elements = append(elements, c) + } + } + return elements +} + +// SelectElement returns the first child element with the given tag. The tag +// may be prefixed by a namespace and a colon. Returns nil if no element with +// a matching tag was found. +func (e *Element) SelectElement(tag string) *Element { + space, stag := spaceDecompose(tag) + for _, t := range e.Child { + if c, ok := t.(*Element); ok && spaceMatch(space, c.Space) && stag == c.Tag { + return c + } + } + return nil +} + +// SelectElements returns a slice of all child elements with the given tag. +// The tag may be prefixed by a namespace and a colon. +func (e *Element) SelectElements(tag string) []*Element { + space, stag := spaceDecompose(tag) + var elements []*Element + for _, t := range e.Child { + if c, ok := t.(*Element); ok && spaceMatch(space, c.Space) && stag == c.Tag { + elements = append(elements, c) + } + } + return elements +} + +// FindElement returns the first element matched by the XPath-like path +// string. Returns nil if no element is found using the path. Panics if an +// invalid path string is supplied. +func (e *Element) FindElement(path string) *Element { + return e.FindElementPath(MustCompilePath(path)) +} + +// FindElementPath returns the first element matched by the XPath-like path +// string. Returns nil if no element is found using the path. +func (e *Element) FindElementPath(path Path) *Element { + p := newPather() + elements := p.traverse(e, path) + switch { + case len(elements) > 0: + return elements[0] + default: + return nil + } +} + +// FindElements returns a slice of elements matched by the XPath-like path +// string. Panics if an invalid path string is supplied. +func (e *Element) FindElements(path string) []*Element { + return e.FindElementsPath(MustCompilePath(path)) +} + +// FindElementsPath returns a slice of elements matched by the Path object. +func (e *Element) FindElementsPath(path Path) []*Element { + p := newPather() + return p.traverse(e, path) +} + +// GetPath returns the absolute path of the element. +func (e *Element) GetPath() string { + path := []string{} + for seg := e; seg != nil; seg = seg.Parent() { + if seg.Tag != "" { + path = append(path, seg.Tag) + } + } + + // Reverse the path. + for i, j := 0, len(path)-1; i < j; i, j = i+1, j-1 { + path[i], path[j] = path[j], path[i] + } + + return "/" + strings.Join(path, "/") +} + +// GetRelativePath returns the path of the element relative to the source +// element. If the two elements are not part of the same element tree, then +// GetRelativePath returns the empty string. +func (e *Element) GetRelativePath(source *Element) string { + var path []*Element + + if source == nil { + return "" + } + + // Build a reverse path from the element toward the root. Stop if the + // source element is encountered. + var seg *Element + for seg = e; seg != nil && seg != source; seg = seg.Parent() { + path = append(path, seg) + } + + // If we found the source element, reverse the path and compose the + // string. + if seg == source { + if len(path) == 0 { + return "." + } + parts := []string{} + for i := len(path) - 1; i >= 0; i-- { + parts = append(parts, path[i].Tag) + } + return "./" + strings.Join(parts, "/") + } + + // The source wasn't encountered, so climb from the source element toward + // the root of the tree until an element in the reversed path is + // encountered. + + findPathIndex := func(e *Element, path []*Element) int { + for i, ee := range path { + if e == ee { + return i + } + } + return -1 + } + + climb := 0 + for seg = source; seg != nil; seg = seg.Parent() { + i := findPathIndex(seg, path) + if i >= 0 { + path = path[:i] // truncate at found segment + break + } + climb++ + } + + // No element in the reversed path was encountered, so the two elements + // must not be part of the same tree. + if seg == nil { + return "" + } + + // Reverse the (possibly truncated) path and prepend ".." segments to + // climb. + parts := []string{} + for i := 0; i < climb; i++ { + parts = append(parts, "..") + } + for i := len(path) - 1; i >= 0; i-- { + parts = append(parts, path[i].Tag) + } + return strings.Join(parts, "/") +} + +// indent recursively inserts proper indentation between an +// XML element's child tokens. +func (e *Element) indent(depth int, indent indentFunc) { + e.stripIndent() + n := len(e.Child) + if n == 0 { + return + } + + oldChild := e.Child + e.Child = make([]Token, 0, n*2+1) + isCharData, firstNonCharData := false, true + for _, c := range oldChild { + + // Insert CR+indent before child if it's not character data. + // Exceptions: when it's the first non-character-data child, or when + // the child is at root depth. + _, isCharData = c.(*CharData) + if !isCharData { + if !firstNonCharData || depth > 0 { + newCharData(indent(depth), true, e) + } + firstNonCharData = false + } + + e.addChild(c) + + // Recursively process child elements. + if ce, ok := c.(*Element); ok { + ce.indent(depth+1, indent) + } + } + + // Insert CR+indent before the last child. + if !isCharData { + if !firstNonCharData || depth > 0 { + newCharData(indent(depth-1), true, e) + } + } +} + +// stripIndent removes any previously inserted indentation. +func (e *Element) stripIndent() { + // Count the number of non-indent child tokens + n := len(e.Child) + for _, c := range e.Child { + if cd, ok := c.(*CharData); ok && cd.whitespace { + n-- + } + } + if n == len(e.Child) { + return + } + + // Strip out indent CharData + newChild := make([]Token, n) + j := 0 + for _, c := range e.Child { + if cd, ok := c.(*CharData); ok && cd.whitespace { + continue + } + newChild[j] = c + j++ + } + e.Child = newChild +} + +// dup duplicates the element. +func (e *Element) dup(parent *Element) Token { + ne := &Element{ + Space: e.Space, + Tag: e.Tag, + Attr: make([]Attr, len(e.Attr)), + Child: make([]Token, len(e.Child)), + parent: parent, + } + for i, t := range e.Child { + ne.Child[i] = t.dup(ne) + } + for i, a := range e.Attr { + ne.Attr[i] = a + } + return ne +} + +// Parent returns the element token's parent element, or nil if it has no +// parent. +func (e *Element) Parent() *Element { + return e.parent +} + +// setParent replaces the element token's parent. +func (e *Element) setParent(parent *Element) { + e.parent = parent +} + +// writeTo serializes the element to the writer w. +func (e *Element) writeTo(w *bufio.Writer, s *WriteSettings) { + w.WriteByte('<') + if e.Space != "" { + w.WriteString(e.Space) + w.WriteByte(':') + } + w.WriteString(e.Tag) + for _, a := range e.Attr { + w.WriteByte(' ') + a.writeTo(w, s) + } + if len(e.Child) > 0 { + w.WriteString(">") + for _, c := range e.Child { + c.writeTo(w, s) + } + w.Write([]byte{'<', '/'}) + if e.Space != "" { + w.WriteString(e.Space) + w.WriteByte(':') + } + w.WriteString(e.Tag) + w.WriteByte('>') + } else { + if s.CanonicalEndTags { + w.Write([]byte{'>', '<', '/'}) + if e.Space != "" { + w.WriteString(e.Space) + w.WriteByte(':') + } + w.WriteString(e.Tag) + w.WriteByte('>') + } else { + w.Write([]byte{'/', '>'}) + } + } +} + +// addChild adds a child token to the element e. +func (e *Element) addChild(t Token) { + e.Child = append(e.Child, t) +} + +// CreateAttr creates an attribute and adds it to element e. The key may be +// prefixed by a namespace and a colon. If an attribute with the key already +// exists, its value is replaced. +func (e *Element) CreateAttr(key, value string) *Attr { + space, skey := spaceDecompose(key) + return e.createAttr(space, skey, value) +} + +// createAttr is a helper function that creates attributes. +func (e *Element) createAttr(space, key, value string) *Attr { + for i, a := range e.Attr { + if space == a.Space && key == a.Key { + e.Attr[i].Value = value + return &e.Attr[i] + } + } + a := Attr{space, key, value} + e.Attr = append(e.Attr, a) + return &e.Attr[len(e.Attr)-1] +} + +// RemoveAttr removes and returns the first attribute of the element whose key +// matches the given key. The key may be prefixed by a namespace and a colon. +// If an equal attribute does not exist, nil is returned. +func (e *Element) RemoveAttr(key string) *Attr { + space, skey := spaceDecompose(key) + for i, a := range e.Attr { + if space == a.Space && skey == a.Key { + e.Attr = append(e.Attr[0:i], e.Attr[i+1:]...) + return &a + } + } + return nil +} + +var xmlReplacerNormal = strings.NewReplacer( + "&", "&", + "<", "<", + ">", ">", + "'", "'", + `"`, """, +) + +var xmlReplacerCanonicalText = strings.NewReplacer( + "&", "&", + "<", "<", + ">", ">", + "\r", " ", +) + +var xmlReplacerCanonicalAttrVal = strings.NewReplacer( + "&", "&", + "<", "<", + `"`, """, + "\t", " ", + "\n", " ", + "\r", " ", +) + +// writeTo serializes the attribute to the writer. +func (a *Attr) writeTo(w *bufio.Writer, s *WriteSettings) { + if a.Space != "" { + w.WriteString(a.Space) + w.WriteByte(':') + } + w.WriteString(a.Key) + w.WriteString(`="`) + var r *strings.Replacer + if s.CanonicalAttrVal { + r = xmlReplacerCanonicalAttrVal + } else { + r = xmlReplacerNormal + } + w.WriteString(r.Replace(a.Value)) + w.WriteByte('"') +} + +// NewCharData creates a parentless XML character data entity. +func NewCharData(data string) *CharData { + return newCharData(data, false, nil) +} + +// newCharData creates an XML character data entity and binds it to a parent +// element. If parent is nil, the CharData token remains unbound. +func newCharData(data string, whitespace bool, parent *Element) *CharData { + c := &CharData{ + Data: data, + whitespace: whitespace, + parent: parent, + } + if parent != nil { + parent.addChild(c) + } + return c +} + +// CreateCharData creates an XML character data entity and adds it as a child +// of element e. +func (e *Element) CreateCharData(data string) *CharData { + return newCharData(data, false, e) +} + +// dup duplicates the character data. +func (c *CharData) dup(parent *Element) Token { + return &CharData{ + Data: c.Data, + whitespace: c.whitespace, + parent: parent, + } +} + +// Parent returns the character data token's parent element, or nil if it has +// no parent. +func (c *CharData) Parent() *Element { + return c.parent +} + +// setParent replaces the character data token's parent. +func (c *CharData) setParent(parent *Element) { + c.parent = parent +} + +// writeTo serializes the character data entity to the writer. +func (c *CharData) writeTo(w *bufio.Writer, s *WriteSettings) { + var r *strings.Replacer + if s.CanonicalText { + r = xmlReplacerCanonicalText + } else { + r = xmlReplacerNormal + } + w.WriteString(r.Replace(c.Data)) +} + +// NewComment creates a parentless XML comment. +func NewComment(comment string) *Comment { + return newComment(comment, nil) +} + +// NewComment creates an XML comment and binds it to a parent element. If +// parent is nil, the Comment remains unbound. +func newComment(comment string, parent *Element) *Comment { + c := &Comment{ + Data: comment, + parent: parent, + } + if parent != nil { + parent.addChild(c) + } + return c +} + +// CreateComment creates an XML comment and adds it as a child of element e. +func (e *Element) CreateComment(comment string) *Comment { + return newComment(comment, e) +} + +// dup duplicates the comment. +func (c *Comment) dup(parent *Element) Token { + return &Comment{ + Data: c.Data, + parent: parent, + } +} + +// Parent returns comment token's parent element, or nil if it has no parent. +func (c *Comment) Parent() *Element { + return c.parent +} + +// setParent replaces the comment token's parent. +func (c *Comment) setParent(parent *Element) { + c.parent = parent +} + +// writeTo serialies the comment to the writer. +func (c *Comment) writeTo(w *bufio.Writer, s *WriteSettings) { + w.WriteString("") +} + +// NewDirective creates a parentless XML directive. +func NewDirective(data string) *Directive { + return newDirective(data, nil) +} + +// newDirective creates an XML directive and binds it to a parent element. If +// parent is nil, the Directive remains unbound. +func newDirective(data string, parent *Element) *Directive { + d := &Directive{ + Data: data, + parent: parent, + } + if parent != nil { + parent.addChild(d) + } + return d +} + +// CreateDirective creates an XML directive and adds it as the last child of +// element e. +func (e *Element) CreateDirective(data string) *Directive { + return newDirective(data, e) +} + +// dup duplicates the directive. +func (d *Directive) dup(parent *Element) Token { + return &Directive{ + Data: d.Data, + parent: parent, + } +} + +// Parent returns directive token's parent element, or nil if it has no +// parent. +func (d *Directive) Parent() *Element { + return d.parent +} + +// setParent replaces the directive token's parent. +func (d *Directive) setParent(parent *Element) { + d.parent = parent +} + +// writeTo serializes the XML directive to the writer. +func (d *Directive) writeTo(w *bufio.Writer, s *WriteSettings) { + w.WriteString("") +} + +// NewProcInst creates a parentless XML processing instruction. +func NewProcInst(target, inst string) *ProcInst { + return newProcInst(target, inst, nil) +} + +// newProcInst creates an XML processing instruction and binds it to a parent +// element. If parent is nil, the ProcInst remains unbound. +func newProcInst(target, inst string, parent *Element) *ProcInst { + p := &ProcInst{ + Target: target, + Inst: inst, + parent: parent, + } + if parent != nil { + parent.addChild(p) + } + return p +} + +// CreateProcInst creates a processing instruction and adds it as a child of +// element e. +func (e *Element) CreateProcInst(target, inst string) *ProcInst { + return newProcInst(target, inst, e) +} + +// dup duplicates the procinst. +func (p *ProcInst) dup(parent *Element) Token { + return &ProcInst{ + Target: p.Target, + Inst: p.Inst, + parent: parent, + } +} + +// Parent returns processing instruction token's parent element, or nil if it +// has no parent. +func (p *ProcInst) Parent() *Element { + return p.parent +} + +// setParent replaces the processing instruction token's parent. +func (p *ProcInst) setParent(parent *Element) { + p.parent = parent +} + +// writeTo serializes the processing instruction to the writer. +func (p *ProcInst) writeTo(w *bufio.Writer, s *WriteSettings) { + w.WriteString("") +} diff --git a/vendor/github.com/beevik/etree/helpers.go b/vendor/github.com/beevik/etree/helpers.go new file mode 100644 index 0000000..4f8350e --- /dev/null +++ b/vendor/github.com/beevik/etree/helpers.go @@ -0,0 +1,188 @@ +// Copyright 2015 Brett Vickers. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package etree + +import ( + "io" + "strings" +) + +// A simple stack +type stack struct { + data []interface{} +} + +func (s *stack) empty() bool { + return len(s.data) == 0 +} + +func (s *stack) push(value interface{}) { + s.data = append(s.data, value) +} + +func (s *stack) pop() interface{} { + value := s.data[len(s.data)-1] + s.data[len(s.data)-1] = nil + s.data = s.data[:len(s.data)-1] + return value +} + +func (s *stack) peek() interface{} { + return s.data[len(s.data)-1] +} + +// A fifo is a simple first-in-first-out queue. +type fifo struct { + data []interface{} + head, tail int +} + +func (f *fifo) add(value interface{}) { + if f.len()+1 >= len(f.data) { + f.grow() + } + f.data[f.tail] = value + if f.tail++; f.tail == len(f.data) { + f.tail = 0 + } +} + +func (f *fifo) remove() interface{} { + value := f.data[f.head] + f.data[f.head] = nil + if f.head++; f.head == len(f.data) { + f.head = 0 + } + return value +} + +func (f *fifo) len() int { + if f.tail >= f.head { + return f.tail - f.head + } + return len(f.data) - f.head + f.tail +} + +func (f *fifo) grow() { + c := len(f.data) * 2 + if c == 0 { + c = 4 + } + buf, count := make([]interface{}, c), f.len() + if f.tail >= f.head { + copy(buf[0:count], f.data[f.head:f.tail]) + } else { + hindex := len(f.data) - f.head + copy(buf[0:hindex], f.data[f.head:]) + copy(buf[hindex:count], f.data[:f.tail]) + } + f.data, f.head, f.tail = buf, 0, count +} + +// countReader implements a proxy reader that counts the number of +// bytes read from its encapsulated reader. +type countReader struct { + r io.Reader + bytes int64 +} + +func newCountReader(r io.Reader) *countReader { + return &countReader{r: r} +} + +func (cr *countReader) Read(p []byte) (n int, err error) { + b, err := cr.r.Read(p) + cr.bytes += int64(b) + return b, err +} + +// countWriter implements a proxy writer that counts the number of +// bytes written by its encapsulated writer. +type countWriter struct { + w io.Writer + bytes int64 +} + +func newCountWriter(w io.Writer) *countWriter { + return &countWriter{w: w} +} + +func (cw *countWriter) Write(p []byte) (n int, err error) { + b, err := cw.w.Write(p) + cw.bytes += int64(b) + return b, err +} + +// isWhitespace returns true if the byte slice contains only +// whitespace characters. +func isWhitespace(s string) bool { + for i := 0; i < len(s); i++ { + if c := s[i]; c != ' ' && c != '\t' && c != '\n' && c != '\r' { + return false + } + } + return true +} + +// spaceMatch returns true if namespace a is the empty string +// or if namespace a equals namespace b. +func spaceMatch(a, b string) bool { + switch { + case a == "": + return true + default: + return a == b + } +} + +// spaceDecompose breaks a namespace:tag identifier at the ':' +// and returns the two parts. +func spaceDecompose(str string) (space, key string) { + colon := strings.IndexByte(str, ':') + if colon == -1 { + return "", str + } + return str[:colon], str[colon+1:] +} + +// Strings used by crIndent +const ( + crsp = "\n " + crtab = "\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t" +) + +// crIndent returns a carriage return followed by n copies of the +// first non-CR character in the source string. +func crIndent(n int, source string) string { + switch { + case n < 0: + return source[:1] + case n < len(source): + return source[:n+1] + default: + return source + strings.Repeat(source[1:2], n-len(source)+1) + } +} + +// nextIndex returns the index of the next occurrence of sep in s, +// starting from offset. It returns -1 if the sep string is not found. +func nextIndex(s, sep string, offset int) int { + switch i := strings.Index(s[offset:], sep); i { + case -1: + return -1 + default: + return offset + i + } +} + +// isInteger returns true if the string s contains an integer. +func isInteger(s string) bool { + for i := 0; i < len(s); i++ { + if (s[i] < '0' || s[i] > '9') && !(i == 0 && s[i] == '-') { + return false + } + } + return true +} diff --git a/vendor/github.com/beevik/etree/path.go b/vendor/github.com/beevik/etree/path.go new file mode 100644 index 0000000..a1a59bd --- /dev/null +++ b/vendor/github.com/beevik/etree/path.go @@ -0,0 +1,533 @@ +// Copyright 2015 Brett Vickers. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package etree + +import ( + "strconv" + "strings" +) + +/* +A Path is an object that represents an optimized version of an XPath-like +search string. A path search string is a slash-separated series of "selectors" +allowing traversal through an XML hierarchy. Although etree path strings are +similar to XPath strings, they have a more limited set of selectors and +filtering options. The following selectors and filters are supported by etree +paths: + + . Select the current element. + .. Select the parent of the current element. + * Select all child elements of the current element. + / Select the root element when used at the start of a path. + // Select all descendants of the current element. If used at + the start of a path, select all descendants of the root. + tag Select all child elements with the given tag. + [#] Select the element of the given index (1-based, + negative starts from the end). + [@attrib] Select all elements with the given attribute. + [@attrib='val'] Select all elements with the given attribute set to val. + [tag] Select all elements with a child element named tag. + [tag='val'] Select all elements with a child element named tag + and text matching val. + [text()] Select all elements with non-empty text. + [text()='val'] Select all elements whose text matches val. + +Examples: + +Select the bookstore child element of the root element: + /bookstore + +Beginning a search from the root element, select the title elements of all +descendant book elements having a 'category' attribute of 'WEB': + //book[@category='WEB']/title + +Beginning a search from the current element, select the first descendant book +element with a title child containing the text 'Great Expectations': + .//book[title='Great Expectations'][1] + +Beginning a search from the current element, select all children of book +elements with an attribute 'language' set to 'english': + ./book/*[@language='english'] + +Beginning a search from the current element, select all children of book +elements containing the text 'special': + ./book/*[text()='special'] + +Beginning a search from the current element, select all descendant book +elements whose title element has an attribute 'language' equal to 'french': + .//book/title[@language='french']/.. + +*/ +type Path struct { + segments []segment +} + +// ErrPath is returned by path functions when an invalid etree path is provided. +type ErrPath string + +// Error returns the string describing a path error. +func (err ErrPath) Error() string { + return "etree: " + string(err) +} + +// CompilePath creates an optimized version of an XPath-like string that +// can be used to query elements in an element tree. +func CompilePath(path string) (Path, error) { + var comp compiler + segments := comp.parsePath(path) + if comp.err != ErrPath("") { + return Path{nil}, comp.err + } + return Path{segments}, nil +} + +// MustCompilePath creates an optimized version of an XPath-like string that +// can be used to query elements in an element tree. Panics if an error +// occurs. Use this function to create Paths when you know the path is +// valid (i.e., if it's hard-coded). +func MustCompilePath(path string) Path { + p, err := CompilePath(path) + if err != nil { + panic(err) + } + return p +} + +// A segment is a portion of a path between "/" characters. +// It contains one selector and zero or more [filters]. +type segment struct { + sel selector + filters []filter +} + +func (seg *segment) apply(e *Element, p *pather) { + seg.sel.apply(e, p) + for _, f := range seg.filters { + f.apply(p) + } +} + +// A selector selects XML elements for consideration by the +// path traversal. +type selector interface { + apply(e *Element, p *pather) +} + +// A filter pares down a list of candidate XML elements based +// on a path filter in [brackets]. +type filter interface { + apply(p *pather) +} + +// A pather is helper object that traverses an element tree using +// a Path object. It collects and deduplicates all elements matching +// the path query. +type pather struct { + queue fifo + results []*Element + inResults map[*Element]bool + candidates []*Element + scratch []*Element // used by filters +} + +// A node represents an element and the remaining path segments that +// should be applied against it by the pather. +type node struct { + e *Element + segments []segment +} + +func newPather() *pather { + return &pather{ + results: make([]*Element, 0), + inResults: make(map[*Element]bool), + candidates: make([]*Element, 0), + scratch: make([]*Element, 0), + } +} + +// traverse follows the path from the element e, collecting +// and then returning all elements that match the path's selectors +// and filters. +func (p *pather) traverse(e *Element, path Path) []*Element { + for p.queue.add(node{e, path.segments}); p.queue.len() > 0; { + p.eval(p.queue.remove().(node)) + } + return p.results +} + +// eval evalutes the current path node by applying the remaining +// path's selector rules against the node's element. +func (p *pather) eval(n node) { + p.candidates = p.candidates[0:0] + seg, remain := n.segments[0], n.segments[1:] + seg.apply(n.e, p) + + if len(remain) == 0 { + for _, c := range p.candidates { + if in := p.inResults[c]; !in { + p.inResults[c] = true + p.results = append(p.results, c) + } + } + } else { + for _, c := range p.candidates { + p.queue.add(node{c, remain}) + } + } +} + +// A compiler generates a compiled path from a path string. +type compiler struct { + err ErrPath +} + +// parsePath parses an XPath-like string describing a path +// through an element tree and returns a slice of segment +// descriptors. +func (c *compiler) parsePath(path string) []segment { + // If path ends with //, fix it + if strings.HasSuffix(path, "//") { + path = path + "*" + } + + var segments []segment + + // Check for an absolute path + if strings.HasPrefix(path, "/") { + segments = append(segments, segment{new(selectRoot), []filter{}}) + path = path[1:] + } + + // Split path into segments + for _, s := range splitPath(path) { + segments = append(segments, c.parseSegment(s)) + if c.err != ErrPath("") { + break + } + } + return segments +} + +func splitPath(path string) []string { + pieces := make([]string, 0) + start := 0 + inquote := false + for i := 0; i+1 <= len(path); i++ { + if path[i] == '\'' { + inquote = !inquote + } else if path[i] == '/' && !inquote { + pieces = append(pieces, path[start:i]) + start = i + 1 + } + } + return append(pieces, path[start:]) +} + +// parseSegment parses a path segment between / characters. +func (c *compiler) parseSegment(path string) segment { + pieces := strings.Split(path, "[") + seg := segment{ + sel: c.parseSelector(pieces[0]), + filters: []filter{}, + } + for i := 1; i < len(pieces); i++ { + fpath := pieces[i] + if fpath[len(fpath)-1] != ']' { + c.err = ErrPath("path has invalid filter [brackets].") + break + } + seg.filters = append(seg.filters, c.parseFilter(fpath[:len(fpath)-1])) + } + return seg +} + +// parseSelector parses a selector at the start of a path segment. +func (c *compiler) parseSelector(path string) selector { + switch path { + case ".": + return new(selectSelf) + case "..": + return new(selectParent) + case "*": + return new(selectChildren) + case "": + return new(selectDescendants) + default: + return newSelectChildrenByTag(path) + } +} + +// parseFilter parses a path filter contained within [brackets]. +func (c *compiler) parseFilter(path string) filter { + if len(path) == 0 { + c.err = ErrPath("path contains an empty filter expression.") + return nil + } + + // Filter contains [@attr='val'], [text()='val'], or [tag='val']? + eqindex := strings.Index(path, "='") + if eqindex >= 0 { + rindex := nextIndex(path, "'", eqindex+2) + if rindex != len(path)-1 { + c.err = ErrPath("path has mismatched filter quotes.") + return nil + } + switch { + case path[0] == '@': + return newFilterAttrVal(path[1:eqindex], path[eqindex+2:rindex]) + case strings.HasPrefix(path, "text()"): + return newFilterTextVal(path[eqindex+2 : rindex]) + default: + return newFilterChildText(path[:eqindex], path[eqindex+2:rindex]) + } + } + + // Filter contains [@attr], [N], [tag] or [text()] + switch { + case path[0] == '@': + return newFilterAttr(path[1:]) + case path == "text()": + return newFilterText() + case isInteger(path): + pos, _ := strconv.Atoi(path) + switch { + case pos > 0: + return newFilterPos(pos - 1) + default: + return newFilterPos(pos) + } + default: + return newFilterChild(path) + } +} + +// selectSelf selects the current element into the candidate list. +type selectSelf struct{} + +func (s *selectSelf) apply(e *Element, p *pather) { + p.candidates = append(p.candidates, e) +} + +// selectRoot selects the element's root node. +type selectRoot struct{} + +func (s *selectRoot) apply(e *Element, p *pather) { + root := e + for root.parent != nil { + root = root.parent + } + p.candidates = append(p.candidates, root) +} + +// selectParent selects the element's parent into the candidate list. +type selectParent struct{} + +func (s *selectParent) apply(e *Element, p *pather) { + if e.parent != nil { + p.candidates = append(p.candidates, e.parent) + } +} + +// selectChildren selects the element's child elements into the +// candidate list. +type selectChildren struct{} + +func (s *selectChildren) apply(e *Element, p *pather) { + for _, c := range e.Child { + if c, ok := c.(*Element); ok { + p.candidates = append(p.candidates, c) + } + } +} + +// selectDescendants selects all descendant child elements +// of the element into the candidate list. +type selectDescendants struct{} + +func (s *selectDescendants) apply(e *Element, p *pather) { + var queue fifo + for queue.add(e); queue.len() > 0; { + e := queue.remove().(*Element) + p.candidates = append(p.candidates, e) + for _, c := range e.Child { + if c, ok := c.(*Element); ok { + queue.add(c) + } + } + } +} + +// selectChildrenByTag selects into the candidate list all child +// elements of the element having the specified tag. +type selectChildrenByTag struct { + space, tag string +} + +func newSelectChildrenByTag(path string) *selectChildrenByTag { + s, l := spaceDecompose(path) + return &selectChildrenByTag{s, l} +} + +func (s *selectChildrenByTag) apply(e *Element, p *pather) { + for _, c := range e.Child { + if c, ok := c.(*Element); ok && spaceMatch(s.space, c.Space) && s.tag == c.Tag { + p.candidates = append(p.candidates, c) + } + } +} + +// filterPos filters the candidate list, keeping only the +// candidate at the specified index. +type filterPos struct { + index int +} + +func newFilterPos(pos int) *filterPos { + return &filterPos{pos} +} + +func (f *filterPos) apply(p *pather) { + if f.index >= 0 { + if f.index < len(p.candidates) { + p.scratch = append(p.scratch, p.candidates[f.index]) + } + } else { + if -f.index <= len(p.candidates) { + p.scratch = append(p.scratch, p.candidates[len(p.candidates)+f.index]) + } + } + p.candidates, p.scratch = p.scratch, p.candidates[0:0] +} + +// filterAttr filters the candidate list for elements having +// the specified attribute. +type filterAttr struct { + space, key string +} + +func newFilterAttr(str string) *filterAttr { + s, l := spaceDecompose(str) + return &filterAttr{s, l} +} + +func (f *filterAttr) apply(p *pather) { + for _, c := range p.candidates { + for _, a := range c.Attr { + if spaceMatch(f.space, a.Space) && f.key == a.Key { + p.scratch = append(p.scratch, c) + break + } + } + } + p.candidates, p.scratch = p.scratch, p.candidates[0:0] +} + +// filterAttrVal filters the candidate list for elements having +// the specified attribute with the specified value. +type filterAttrVal struct { + space, key, val string +} + +func newFilterAttrVal(str, value string) *filterAttrVal { + s, l := spaceDecompose(str) + return &filterAttrVal{s, l, value} +} + +func (f *filterAttrVal) apply(p *pather) { + for _, c := range p.candidates { + for _, a := range c.Attr { + if spaceMatch(f.space, a.Space) && f.key == a.Key && f.val == a.Value { + p.scratch = append(p.scratch, c) + break + } + } + } + p.candidates, p.scratch = p.scratch, p.candidates[0:0] +} + +// filterText filters the candidate list for elements having text. +type filterText struct{} + +func newFilterText() *filterText { + return &filterText{} +} + +func (f *filterText) apply(p *pather) { + for _, c := range p.candidates { + if c.Text() != "" { + p.scratch = append(p.scratch, c) + } + } + p.candidates, p.scratch = p.scratch, p.candidates[0:0] +} + +// filterTextVal filters the candidate list for elements having +// text equal to the specified value. +type filterTextVal struct { + val string +} + +func newFilterTextVal(value string) *filterTextVal { + return &filterTextVal{value} +} + +func (f *filterTextVal) apply(p *pather) { + for _, c := range p.candidates { + if c.Text() == f.val { + p.scratch = append(p.scratch, c) + } + } + p.candidates, p.scratch = p.scratch, p.candidates[0:0] +} + +// filterChild filters the candidate list for elements having +// a child element with the specified tag. +type filterChild struct { + space, tag string +} + +func newFilterChild(str string) *filterChild { + s, l := spaceDecompose(str) + return &filterChild{s, l} +} + +func (f *filterChild) apply(p *pather) { + for _, c := range p.candidates { + for _, cc := range c.Child { + if cc, ok := cc.(*Element); ok && + spaceMatch(f.space, cc.Space) && + f.tag == cc.Tag { + p.scratch = append(p.scratch, c) + } + } + } + p.candidates, p.scratch = p.scratch, p.candidates[0:0] +} + +// filterChildText filters the candidate list for elements having +// a child element with the specified tag and text. +type filterChildText struct { + space, tag, text string +} + +func newFilterChildText(str, text string) *filterChildText { + s, l := spaceDecompose(str) + return &filterChildText{s, l, text} +} + +func (f *filterChildText) apply(p *pather) { + for _, c := range p.candidates { + for _, cc := range c.Child { + if cc, ok := cc.(*Element); ok && + spaceMatch(f.space, cc.Space) && + f.tag == cc.Tag && + f.text == cc.Text() { + p.scratch = append(p.scratch, c) + } + } + } + p.candidates, p.scratch = p.scratch, p.candidates[0:0] +} diff --git a/vendor/github.com/crewjam/errset/.travis.yml b/vendor/github.com/crewjam/errset/.travis.yml new file mode 100644 index 0000000..61fea7e --- /dev/null +++ b/vendor/github.com/crewjam/errset/.travis.yml @@ -0,0 +1,5 @@ +language: go + +go: + - 1.5 + - 1.6 diff --git a/vendor/github.com/crewjam/errset/LICENSE b/vendor/github.com/crewjam/errset/LICENSE new file mode 100644 index 0000000..cf1ab25 --- /dev/null +++ b/vendor/github.com/crewjam/errset/LICENSE @@ -0,0 +1,24 @@ +This is free and unencumbered software released into the public domain. + +Anyone is free to copy, modify, publish, use, compile, sell, or +distribute this software, either in source code form or as a compiled +binary, for any purpose, commercial or non-commercial, and by any +means. + +In jurisdictions that recognize copyright laws, the author or authors +of this software dedicate any and all copyright interest in the +software to the public domain. We make this dedication for the benefit +of the public at large and to the detriment of our heirs and +successors. We intend this dedication to be an overt act of +relinquishment in perpetuity of all present and future rights to this +software under copyright law. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR +OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, +ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR +OTHER DEALINGS IN THE SOFTWARE. + +For more information, please refer to diff --git a/vendor/github.com/crewjam/errset/README.md b/vendor/github.com/crewjam/errset/README.md new file mode 100644 index 0000000..fba0440 --- /dev/null +++ b/vendor/github.com/crewjam/errset/README.md @@ -0,0 +1,21 @@ +errset is a trivial golang package that implements a slice of errors. + +[![Build Status](https://travis-ci.org/crewjam/errset.svg?branch=master)](https://travis-ci.org/crewjam/errset) + +[![](https://godoc.org/github.com/crewjam/errset?status.png)](http://godoc.org/github.com/crewjam/errset) + +The typical go idiom is to return an `error` or a tuple of (thing, `error`) from functions. This works well if a function performs exactly one task, but +when a function does work which can reasonably partially fail, I found myself writing the same code over and over again. For example: + + // CommitBatch commits as many things as it can. + func CommitBatch(things []Thing) error { + errs := errset.ErrSet{} + for _, thing := range things { + err := Commit(thing) + if err != nil { + errs = append(errs, err) + } + } + return errs.ReturnValue() // nil if there were no errors + } + diff --git a/vendor/github.com/crewjam/errset/errset.go b/vendor/github.com/crewjam/errset/errset.go new file mode 100644 index 0000000..46faafd --- /dev/null +++ b/vendor/github.com/crewjam/errset/errset.go @@ -0,0 +1,48 @@ +package errset + +// ErrSet represents a list of errors. +// +// Example: +// +// errs := ErrSet{} +// for ... { +// if err := DoSomething(i); err != nil { +// errs = append(errs, err) +// } +// } +// return errs.ReturnValue() +// +type ErrSet []error + +// ReturnValue returns the ErrSet object if at least one non-nill error is +// present or nil if there are no errors +func (es ErrSet) ReturnValue() error { + rv := ErrSet{} + for _, err := range es { + if err != nil { + rv = append(rv, err) + } + } + if len(rv) == 0 { + return nil + } + return rv +} + +// Error implements the error interface. It returns each error in the list +// concatenated together with "; ". +func (es ErrSet) Error() string { + rv := "" + errCount := 0 + for _, err := range es { + if err == nil { + continue + } + if errCount != 0 { + rv += "; " + } + rv += err.Error() + errCount++ + } + return rv +} diff --git a/vendor/github.com/kr/pretty/.gitignore b/vendor/github.com/kr/pretty/.gitignore new file mode 100644 index 0000000..1f0a99f --- /dev/null +++ b/vendor/github.com/kr/pretty/.gitignore @@ -0,0 +1,4 @@ +[568].out +_go* +_test* +_obj diff --git a/vendor/github.com/kr/pretty/License b/vendor/github.com/kr/pretty/License new file mode 100644 index 0000000..05c783c --- /dev/null +++ b/vendor/github.com/kr/pretty/License @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright 2012 Keith Rarick + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/kr/pretty/Readme b/vendor/github.com/kr/pretty/Readme new file mode 100644 index 0000000..c589fc6 --- /dev/null +++ b/vendor/github.com/kr/pretty/Readme @@ -0,0 +1,9 @@ +package pretty + + import "github.com/kr/pretty" + + Package pretty provides pretty-printing for Go values. + +Documentation + + http://godoc.org/github.com/kr/pretty diff --git a/vendor/github.com/kr/pretty/diff.go b/vendor/github.com/kr/pretty/diff.go new file mode 100644 index 0000000..6aa7f74 --- /dev/null +++ b/vendor/github.com/kr/pretty/diff.go @@ -0,0 +1,265 @@ +package pretty + +import ( + "fmt" + "io" + "reflect" +) + +type sbuf []string + +func (p *sbuf) Printf(format string, a ...interface{}) { + s := fmt.Sprintf(format, a...) + *p = append(*p, s) +} + +// Diff returns a slice where each element describes +// a difference between a and b. +func Diff(a, b interface{}) (desc []string) { + Pdiff((*sbuf)(&desc), a, b) + return desc +} + +// wprintfer calls Fprintf on w for each Printf call +// with a trailing newline. +type wprintfer struct{ w io.Writer } + +func (p *wprintfer) Printf(format string, a ...interface{}) { + fmt.Fprintf(p.w, format+"\n", a...) +} + +// Fdiff writes to w a description of the differences between a and b. +func Fdiff(w io.Writer, a, b interface{}) { + Pdiff(&wprintfer{w}, a, b) +} + +type Printfer interface { + Printf(format string, a ...interface{}) +} + +// Pdiff prints to p a description of the differences between a and b. +// It calls Printf once for each difference, with no trailing newline. +// The standard library log.Logger is a Printfer. +func Pdiff(p Printfer, a, b interface{}) { + diffPrinter{w: p}.diff(reflect.ValueOf(a), reflect.ValueOf(b)) +} + +type Logfer interface { + Logf(format string, a ...interface{}) +} + +// logprintfer calls Fprintf on w for each Printf call +// with a trailing newline. +type logprintfer struct{ l Logfer } + +func (p *logprintfer) Printf(format string, a ...interface{}) { + p.l.Logf(format, a...) +} + +// Ldiff prints to l a description of the differences between a and b. +// It calls Logf once for each difference, with no trailing newline. +// The standard library testing.T and testing.B are Logfers. +func Ldiff(l Logfer, a, b interface{}) { + Pdiff(&logprintfer{l}, a, b) +} + +type diffPrinter struct { + w Printfer + l string // label +} + +func (w diffPrinter) printf(f string, a ...interface{}) { + var l string + if w.l != "" { + l = w.l + ": " + } + w.w.Printf(l+f, a...) +} + +func (w diffPrinter) diff(av, bv reflect.Value) { + if !av.IsValid() && bv.IsValid() { + w.printf("nil != %# v", formatter{v: bv, quote: true}) + return + } + if av.IsValid() && !bv.IsValid() { + w.printf("%# v != nil", formatter{v: av, quote: true}) + return + } + if !av.IsValid() && !bv.IsValid() { + return + } + + at := av.Type() + bt := bv.Type() + if at != bt { + w.printf("%v != %v", at, bt) + return + } + + switch kind := at.Kind(); kind { + case reflect.Bool: + if a, b := av.Bool(), bv.Bool(); a != b { + w.printf("%v != %v", a, b) + } + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + if a, b := av.Int(), bv.Int(); a != b { + w.printf("%d != %d", a, b) + } + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + if a, b := av.Uint(), bv.Uint(); a != b { + w.printf("%d != %d", a, b) + } + case reflect.Float32, reflect.Float64: + if a, b := av.Float(), bv.Float(); a != b { + w.printf("%v != %v", a, b) + } + case reflect.Complex64, reflect.Complex128: + if a, b := av.Complex(), bv.Complex(); a != b { + w.printf("%v != %v", a, b) + } + case reflect.Array: + n := av.Len() + for i := 0; i < n; i++ { + w.relabel(fmt.Sprintf("[%d]", i)).diff(av.Index(i), bv.Index(i)) + } + case reflect.Chan, reflect.Func, reflect.UnsafePointer: + if a, b := av.Pointer(), bv.Pointer(); a != b { + w.printf("%#x != %#x", a, b) + } + case reflect.Interface: + w.diff(av.Elem(), bv.Elem()) + case reflect.Map: + ak, both, bk := keyDiff(av.MapKeys(), bv.MapKeys()) + for _, k := range ak { + w := w.relabel(fmt.Sprintf("[%#v]", k)) + w.printf("%q != (missing)", av.MapIndex(k)) + } + for _, k := range both { + w := w.relabel(fmt.Sprintf("[%#v]", k)) + w.diff(av.MapIndex(k), bv.MapIndex(k)) + } + for _, k := range bk { + w := w.relabel(fmt.Sprintf("[%#v]", k)) + w.printf("(missing) != %q", bv.MapIndex(k)) + } + case reflect.Ptr: + switch { + case av.IsNil() && !bv.IsNil(): + w.printf("nil != %# v", formatter{v: bv, quote: true}) + case !av.IsNil() && bv.IsNil(): + w.printf("%# v != nil", formatter{v: av, quote: true}) + case !av.IsNil() && !bv.IsNil(): + w.diff(av.Elem(), bv.Elem()) + } + case reflect.Slice: + lenA := av.Len() + lenB := bv.Len() + if lenA != lenB { + w.printf("%s[%d] != %s[%d]", av.Type(), lenA, bv.Type(), lenB) + break + } + for i := 0; i < lenA; i++ { + w.relabel(fmt.Sprintf("[%d]", i)).diff(av.Index(i), bv.Index(i)) + } + case reflect.String: + if a, b := av.String(), bv.String(); a != b { + w.printf("%q != %q", a, b) + } + case reflect.Struct: + for i := 0; i < av.NumField(); i++ { + w.relabel(at.Field(i).Name).diff(av.Field(i), bv.Field(i)) + } + default: + panic("unknown reflect Kind: " + kind.String()) + } +} + +func (d diffPrinter) relabel(name string) (d1 diffPrinter) { + d1 = d + if d.l != "" && name[0] != '[' { + d1.l += "." + } + d1.l += name + return d1 +} + +// keyEqual compares a and b for equality. +// Both a and b must be valid map keys. +func keyEqual(av, bv reflect.Value) bool { + if !av.IsValid() && !bv.IsValid() { + return true + } + if !av.IsValid() || !bv.IsValid() || av.Type() != bv.Type() { + return false + } + switch kind := av.Kind(); kind { + case reflect.Bool: + a, b := av.Bool(), bv.Bool() + return a == b + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + a, b := av.Int(), bv.Int() + return a == b + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + a, b := av.Uint(), bv.Uint() + return a == b + case reflect.Float32, reflect.Float64: + a, b := av.Float(), bv.Float() + return a == b + case reflect.Complex64, reflect.Complex128: + a, b := av.Complex(), bv.Complex() + return a == b + case reflect.Array: + for i := 0; i < av.Len(); i++ { + if !keyEqual(av.Index(i), bv.Index(i)) { + return false + } + } + return true + case reflect.Chan, reflect.UnsafePointer, reflect.Ptr: + a, b := av.Pointer(), bv.Pointer() + return a == b + case reflect.Interface: + return keyEqual(av.Elem(), bv.Elem()) + case reflect.String: + a, b := av.String(), bv.String() + return a == b + case reflect.Struct: + for i := 0; i < av.NumField(); i++ { + if !keyEqual(av.Field(i), bv.Field(i)) { + return false + } + } + return true + default: + panic("invalid map key type " + av.Type().String()) + } +} + +func keyDiff(a, b []reflect.Value) (ak, both, bk []reflect.Value) { + for _, av := range a { + inBoth := false + for _, bv := range b { + if keyEqual(av, bv) { + inBoth = true + both = append(both, av) + break + } + } + if !inBoth { + ak = append(ak, av) + } + } + for _, bv := range b { + inBoth := false + for _, av := range a { + if keyEqual(av, bv) { + inBoth = true + break + } + } + if !inBoth { + bk = append(bk, bv) + } + } + return +} diff --git a/vendor/github.com/kr/pretty/formatter.go b/vendor/github.com/kr/pretty/formatter.go new file mode 100644 index 0000000..a317d7b --- /dev/null +++ b/vendor/github.com/kr/pretty/formatter.go @@ -0,0 +1,328 @@ +package pretty + +import ( + "fmt" + "io" + "reflect" + "strconv" + "text/tabwriter" + + "github.com/kr/text" +) + +type formatter struct { + v reflect.Value + force bool + quote bool +} + +// Formatter makes a wrapper, f, that will format x as go source with line +// breaks and tabs. Object f responds to the "%v" formatting verb when both the +// "#" and " " (space) flags are set, for example: +// +// fmt.Sprintf("%# v", Formatter(x)) +// +// If one of these two flags is not set, or any other verb is used, f will +// format x according to the usual rules of package fmt. +// In particular, if x satisfies fmt.Formatter, then x.Format will be called. +func Formatter(x interface{}) (f fmt.Formatter) { + return formatter{v: reflect.ValueOf(x), quote: true} +} + +func (fo formatter) String() string { + return fmt.Sprint(fo.v.Interface()) // unwrap it +} + +func (fo formatter) passThrough(f fmt.State, c rune) { + s := "%" + for i := 0; i < 128; i++ { + if f.Flag(i) { + s += string(i) + } + } + if w, ok := f.Width(); ok { + s += fmt.Sprintf("%d", w) + } + if p, ok := f.Precision(); ok { + s += fmt.Sprintf(".%d", p) + } + s += string(c) + fmt.Fprintf(f, s, fo.v.Interface()) +} + +func (fo formatter) Format(f fmt.State, c rune) { + if fo.force || c == 'v' && f.Flag('#') && f.Flag(' ') { + w := tabwriter.NewWriter(f, 4, 4, 1, ' ', 0) + p := &printer{tw: w, Writer: w, visited: make(map[visit]int)} + p.printValue(fo.v, true, fo.quote) + w.Flush() + return + } + fo.passThrough(f, c) +} + +type printer struct { + io.Writer + tw *tabwriter.Writer + visited map[visit]int + depth int +} + +func (p *printer) indent() *printer { + q := *p + q.tw = tabwriter.NewWriter(p.Writer, 4, 4, 1, ' ', 0) + q.Writer = text.NewIndentWriter(q.tw, []byte{'\t'}) + return &q +} + +func (p *printer) printInline(v reflect.Value, x interface{}, showType bool) { + if showType { + io.WriteString(p, v.Type().String()) + fmt.Fprintf(p, "(%#v)", x) + } else { + fmt.Fprintf(p, "%#v", x) + } +} + +// printValue must keep track of already-printed pointer values to avoid +// infinite recursion. +type visit struct { + v uintptr + typ reflect.Type +} + +func (p *printer) printValue(v reflect.Value, showType, quote bool) { + if p.depth > 10 { + io.WriteString(p, "!%v(DEPTH EXCEEDED)") + return + } + + switch v.Kind() { + case reflect.Bool: + p.printInline(v, v.Bool(), showType) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + p.printInline(v, v.Int(), showType) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + p.printInline(v, v.Uint(), showType) + case reflect.Float32, reflect.Float64: + p.printInline(v, v.Float(), showType) + case reflect.Complex64, reflect.Complex128: + fmt.Fprintf(p, "%#v", v.Complex()) + case reflect.String: + p.fmtString(v.String(), quote) + case reflect.Map: + t := v.Type() + if showType { + io.WriteString(p, t.String()) + } + writeByte(p, '{') + if nonzero(v) { + expand := !canInline(v.Type()) + pp := p + if expand { + writeByte(p, '\n') + pp = p.indent() + } + keys := v.MapKeys() + for i := 0; i < v.Len(); i++ { + showTypeInStruct := true + k := keys[i] + mv := v.MapIndex(k) + pp.printValue(k, false, true) + writeByte(pp, ':') + if expand { + writeByte(pp, '\t') + } + showTypeInStruct = t.Elem().Kind() == reflect.Interface + pp.printValue(mv, showTypeInStruct, true) + if expand { + io.WriteString(pp, ",\n") + } else if i < v.Len()-1 { + io.WriteString(pp, ", ") + } + } + if expand { + pp.tw.Flush() + } + } + writeByte(p, '}') + case reflect.Struct: + t := v.Type() + if v.CanAddr() { + addr := v.UnsafeAddr() + vis := visit{addr, t} + if vd, ok := p.visited[vis]; ok && vd < p.depth { + p.fmtString(t.String()+"{(CYCLIC REFERENCE)}", false) + break // don't print v again + } + p.visited[vis] = p.depth + } + + if showType { + io.WriteString(p, t.String()) + } + writeByte(p, '{') + if nonzero(v) { + expand := !canInline(v.Type()) + pp := p + if expand { + writeByte(p, '\n') + pp = p.indent() + } + for i := 0; i < v.NumField(); i++ { + showTypeInStruct := true + if f := t.Field(i); f.Name != "" { + io.WriteString(pp, f.Name) + writeByte(pp, ':') + if expand { + writeByte(pp, '\t') + } + showTypeInStruct = labelType(f.Type) + } + pp.printValue(getField(v, i), showTypeInStruct, true) + if expand { + io.WriteString(pp, ",\n") + } else if i < v.NumField()-1 { + io.WriteString(pp, ", ") + } + } + if expand { + pp.tw.Flush() + } + } + writeByte(p, '}') + case reflect.Interface: + switch e := v.Elem(); { + case e.Kind() == reflect.Invalid: + io.WriteString(p, "nil") + case e.IsValid(): + pp := *p + pp.depth++ + pp.printValue(e, showType, true) + default: + io.WriteString(p, v.Type().String()) + io.WriteString(p, "(nil)") + } + case reflect.Array, reflect.Slice: + t := v.Type() + if showType { + io.WriteString(p, t.String()) + } + if v.Kind() == reflect.Slice && v.IsNil() && showType { + io.WriteString(p, "(nil)") + break + } + if v.Kind() == reflect.Slice && v.IsNil() { + io.WriteString(p, "nil") + break + } + writeByte(p, '{') + expand := !canInline(v.Type()) + pp := p + if expand { + writeByte(p, '\n') + pp = p.indent() + } + for i := 0; i < v.Len(); i++ { + showTypeInSlice := t.Elem().Kind() == reflect.Interface + pp.printValue(v.Index(i), showTypeInSlice, true) + if expand { + io.WriteString(pp, ",\n") + } else if i < v.Len()-1 { + io.WriteString(pp, ", ") + } + } + if expand { + pp.tw.Flush() + } + writeByte(p, '}') + case reflect.Ptr: + e := v.Elem() + if !e.IsValid() { + writeByte(p, '(') + io.WriteString(p, v.Type().String()) + io.WriteString(p, ")(nil)") + } else { + pp := *p + pp.depth++ + writeByte(pp, '&') + pp.printValue(e, true, true) + } + case reflect.Chan: + x := v.Pointer() + if showType { + writeByte(p, '(') + io.WriteString(p, v.Type().String()) + fmt.Fprintf(p, ")(%#v)", x) + } else { + fmt.Fprintf(p, "%#v", x) + } + case reflect.Func: + io.WriteString(p, v.Type().String()) + io.WriteString(p, " {...}") + case reflect.UnsafePointer: + p.printInline(v, v.Pointer(), showType) + case reflect.Invalid: + io.WriteString(p, "nil") + } +} + +func canInline(t reflect.Type) bool { + switch t.Kind() { + case reflect.Map: + return !canExpand(t.Elem()) + case reflect.Struct: + for i := 0; i < t.NumField(); i++ { + if canExpand(t.Field(i).Type) { + return false + } + } + return true + case reflect.Interface: + return false + case reflect.Array, reflect.Slice: + return !canExpand(t.Elem()) + case reflect.Ptr: + return false + case reflect.Chan, reflect.Func, reflect.UnsafePointer: + return false + } + return true +} + +func canExpand(t reflect.Type) bool { + switch t.Kind() { + case reflect.Map, reflect.Struct, + reflect.Interface, reflect.Array, reflect.Slice, + reflect.Ptr: + return true + } + return false +} + +func labelType(t reflect.Type) bool { + switch t.Kind() { + case reflect.Interface, reflect.Struct: + return true + } + return false +} + +func (p *printer) fmtString(s string, quote bool) { + if quote { + s = strconv.Quote(s) + } + io.WriteString(p, s) +} + +func writeByte(w io.Writer, b byte) { + w.Write([]byte{b}) +} + +func getField(v reflect.Value, i int) reflect.Value { + val := v.Field(i) + if val.Kind() == reflect.Interface && !val.IsNil() { + val = val.Elem() + } + return val +} diff --git a/vendor/github.com/kr/pretty/go.mod b/vendor/github.com/kr/pretty/go.mod new file mode 100644 index 0000000..1e29533 --- /dev/null +++ b/vendor/github.com/kr/pretty/go.mod @@ -0,0 +1,3 @@ +module "github.com/kr/pretty" + +require "github.com/kr/text" v0.1.0 diff --git a/vendor/github.com/kr/pretty/pretty.go b/vendor/github.com/kr/pretty/pretty.go new file mode 100644 index 0000000..49423ec --- /dev/null +++ b/vendor/github.com/kr/pretty/pretty.go @@ -0,0 +1,108 @@ +// Package pretty provides pretty-printing for Go values. This is +// useful during debugging, to avoid wrapping long output lines in +// the terminal. +// +// It provides a function, Formatter, that can be used with any +// function that accepts a format string. It also provides +// convenience wrappers for functions in packages fmt and log. +package pretty + +import ( + "fmt" + "io" + "log" + "reflect" +) + +// Errorf is a convenience wrapper for fmt.Errorf. +// +// Calling Errorf(f, x, y) is equivalent to +// fmt.Errorf(f, Formatter(x), Formatter(y)). +func Errorf(format string, a ...interface{}) error { + return fmt.Errorf(format, wrap(a, false)...) +} + +// Fprintf is a convenience wrapper for fmt.Fprintf. +// +// Calling Fprintf(w, f, x, y) is equivalent to +// fmt.Fprintf(w, f, Formatter(x), Formatter(y)). +func Fprintf(w io.Writer, format string, a ...interface{}) (n int, error error) { + return fmt.Fprintf(w, format, wrap(a, false)...) +} + +// Log is a convenience wrapper for log.Printf. +// +// Calling Log(x, y) is equivalent to +// log.Print(Formatter(x), Formatter(y)), but each operand is +// formatted with "%# v". +func Log(a ...interface{}) { + log.Print(wrap(a, true)...) +} + +// Logf is a convenience wrapper for log.Printf. +// +// Calling Logf(f, x, y) is equivalent to +// log.Printf(f, Formatter(x), Formatter(y)). +func Logf(format string, a ...interface{}) { + log.Printf(format, wrap(a, false)...) +} + +// Logln is a convenience wrapper for log.Printf. +// +// Calling Logln(x, y) is equivalent to +// log.Println(Formatter(x), Formatter(y)), but each operand is +// formatted with "%# v". +func Logln(a ...interface{}) { + log.Println(wrap(a, true)...) +} + +// Print pretty-prints its operands and writes to standard output. +// +// Calling Print(x, y) is equivalent to +// fmt.Print(Formatter(x), Formatter(y)), but each operand is +// formatted with "%# v". +func Print(a ...interface{}) (n int, errno error) { + return fmt.Print(wrap(a, true)...) +} + +// Printf is a convenience wrapper for fmt.Printf. +// +// Calling Printf(f, x, y) is equivalent to +// fmt.Printf(f, Formatter(x), Formatter(y)). +func Printf(format string, a ...interface{}) (n int, errno error) { + return fmt.Printf(format, wrap(a, false)...) +} + +// Println pretty-prints its operands and writes to standard output. +// +// Calling Print(x, y) is equivalent to +// fmt.Println(Formatter(x), Formatter(y)), but each operand is +// formatted with "%# v". +func Println(a ...interface{}) (n int, errno error) { + return fmt.Println(wrap(a, true)...) +} + +// Sprint is a convenience wrapper for fmt.Sprintf. +// +// Calling Sprint(x, y) is equivalent to +// fmt.Sprint(Formatter(x), Formatter(y)), but each operand is +// formatted with "%# v". +func Sprint(a ...interface{}) string { + return fmt.Sprint(wrap(a, true)...) +} + +// Sprintf is a convenience wrapper for fmt.Sprintf. +// +// Calling Sprintf(f, x, y) is equivalent to +// fmt.Sprintf(f, Formatter(x), Formatter(y)). +func Sprintf(format string, a ...interface{}) string { + return fmt.Sprintf(format, wrap(a, false)...) +} + +func wrap(a []interface{}, force bool) []interface{} { + w := make([]interface{}, len(a)) + for i, x := range a { + w[i] = formatter{v: reflect.ValueOf(x), force: force} + } + return w +} diff --git a/vendor/github.com/kr/pretty/zero.go b/vendor/github.com/kr/pretty/zero.go new file mode 100644 index 0000000..abb5b6f --- /dev/null +++ b/vendor/github.com/kr/pretty/zero.go @@ -0,0 +1,41 @@ +package pretty + +import ( + "reflect" +) + +func nonzero(v reflect.Value) bool { + switch v.Kind() { + case reflect.Bool: + return v.Bool() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() != 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() != 0 + case reflect.Float32, reflect.Float64: + return v.Float() != 0 + case reflect.Complex64, reflect.Complex128: + return v.Complex() != complex(0, 0) + case reflect.String: + return v.String() != "" + case reflect.Struct: + for i := 0; i < v.NumField(); i++ { + if nonzero(getField(v, i)) { + return true + } + } + return false + case reflect.Array: + for i := 0; i < v.Len(); i++ { + if nonzero(v.Index(i)) { + return true + } + } + return false + case reflect.Map, reflect.Interface, reflect.Slice, reflect.Ptr, reflect.Chan, reflect.Func: + return !v.IsNil() + case reflect.UnsafePointer: + return v.Pointer() != 0 + } + return true +} diff --git a/vendor/github.com/kr/text/License b/vendor/github.com/kr/text/License new file mode 100644 index 0000000..480a328 --- /dev/null +++ b/vendor/github.com/kr/text/License @@ -0,0 +1,19 @@ +Copyright 2012 Keith Rarick + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/kr/text/Readme b/vendor/github.com/kr/text/Readme new file mode 100644 index 0000000..7e6e7c0 --- /dev/null +++ b/vendor/github.com/kr/text/Readme @@ -0,0 +1,3 @@ +This is a Go package for manipulating paragraphs of text. + +See http://go.pkgdoc.org/github.com/kr/text for full documentation. diff --git a/vendor/github.com/kr/text/doc.go b/vendor/github.com/kr/text/doc.go new file mode 100644 index 0000000..cf4c198 --- /dev/null +++ b/vendor/github.com/kr/text/doc.go @@ -0,0 +1,3 @@ +// Package text provides rudimentary functions for manipulating text in +// paragraphs. +package text diff --git a/vendor/github.com/kr/text/go.mod b/vendor/github.com/kr/text/go.mod new file mode 100644 index 0000000..fa0528b --- /dev/null +++ b/vendor/github.com/kr/text/go.mod @@ -0,0 +1,3 @@ +module "github.com/kr/text" + +require "github.com/kr/pty" v1.1.1 diff --git a/vendor/github.com/kr/text/indent.go b/vendor/github.com/kr/text/indent.go new file mode 100644 index 0000000..4ebac45 --- /dev/null +++ b/vendor/github.com/kr/text/indent.go @@ -0,0 +1,74 @@ +package text + +import ( + "io" +) + +// Indent inserts prefix at the beginning of each non-empty line of s. The +// end-of-line marker is NL. +func Indent(s, prefix string) string { + return string(IndentBytes([]byte(s), []byte(prefix))) +} + +// IndentBytes inserts prefix at the beginning of each non-empty line of b. +// The end-of-line marker is NL. +func IndentBytes(b, prefix []byte) []byte { + var res []byte + bol := true + for _, c := range b { + if bol && c != '\n' { + res = append(res, prefix...) + } + res = append(res, c) + bol = c == '\n' + } + return res +} + +// Writer indents each line of its input. +type indentWriter struct { + w io.Writer + bol bool + pre [][]byte + sel int + off int +} + +// NewIndentWriter makes a new write filter that indents the input +// lines. Each line is prefixed in order with the corresponding +// element of pre. If there are more lines than elements, the last +// element of pre is repeated for each subsequent line. +func NewIndentWriter(w io.Writer, pre ...[]byte) io.Writer { + return &indentWriter{ + w: w, + pre: pre, + bol: true, + } +} + +// The only errors returned are from the underlying indentWriter. +func (w *indentWriter) Write(p []byte) (n int, err error) { + for _, c := range p { + if w.bol { + var i int + i, err = w.w.Write(w.pre[w.sel][w.off:]) + w.off += i + if err != nil { + return n, err + } + } + _, err = w.w.Write([]byte{c}) + if err != nil { + return n, err + } + n++ + w.bol = c == '\n' + if w.bol { + w.off = 0 + if w.sel < len(w.pre)-1 { + w.sel++ + } + } + } + return n, nil +} diff --git a/vendor/github.com/kr/text/wrap.go b/vendor/github.com/kr/text/wrap.go new file mode 100644 index 0000000..b09bb03 --- /dev/null +++ b/vendor/github.com/kr/text/wrap.go @@ -0,0 +1,86 @@ +package text + +import ( + "bytes" + "math" +) + +var ( + nl = []byte{'\n'} + sp = []byte{' '} +) + +const defaultPenalty = 1e5 + +// Wrap wraps s into a paragraph of lines of length lim, with minimal +// raggedness. +func Wrap(s string, lim int) string { + return string(WrapBytes([]byte(s), lim)) +} + +// WrapBytes wraps b into a paragraph of lines of length lim, with minimal +// raggedness. +func WrapBytes(b []byte, lim int) []byte { + words := bytes.Split(bytes.Replace(bytes.TrimSpace(b), nl, sp, -1), sp) + var lines [][]byte + for _, line := range WrapWords(words, 1, lim, defaultPenalty) { + lines = append(lines, bytes.Join(line, sp)) + } + return bytes.Join(lines, nl) +} + +// WrapWords is the low-level line-breaking algorithm, useful if you need more +// control over the details of the text wrapping process. For most uses, either +// Wrap or WrapBytes will be sufficient and more convenient. +// +// WrapWords splits a list of words into lines with minimal "raggedness", +// treating each byte as one unit, accounting for spc units between adjacent +// words on each line, and attempting to limit lines to lim units. Raggedness +// is the total error over all lines, where error is the square of the +// difference of the length of the line and lim. Too-long lines (which only +// happen when a single word is longer than lim units) have pen penalty units +// added to the error. +func WrapWords(words [][]byte, spc, lim, pen int) [][][]byte { + n := len(words) + + length := make([][]int, n) + for i := 0; i < n; i++ { + length[i] = make([]int, n) + length[i][i] = len(words[i]) + for j := i + 1; j < n; j++ { + length[i][j] = length[i][j-1] + spc + len(words[j]) + } + } + + nbrk := make([]int, n) + cost := make([]int, n) + for i := range cost { + cost[i] = math.MaxInt32 + } + for i := n - 1; i >= 0; i-- { + if length[i][n-1] <= lim || i == n-1 { + cost[i] = 0 + nbrk[i] = n + } else { + for j := i + 1; j < n; j++ { + d := lim - length[i][j-1] + c := d*d + cost[j] + if length[i][j-1] > lim { + c += pen // too-long lines get a worse penalty + } + if c < cost[i] { + cost[i] = c + nbrk[i] = j + } + } + } + } + + var lines [][][]byte + i := 0 + for i < n { + lines = append(lines, words[i:nbrk[i]]) + i = nbrk[i] + } + return lines +} diff --git a/vendor/github.com/ma314smith/signedxml/.gitignore b/vendor/github.com/ma314smith/signedxml/.gitignore new file mode 100644 index 0000000..daf913b --- /dev/null +++ b/vendor/github.com/ma314smith/signedxml/.gitignore @@ -0,0 +1,24 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +*.prof diff --git a/vendor/github.com/ma314smith/signedxml/.travis.yml b/vendor/github.com/ma314smith/signedxml/.travis.yml new file mode 100644 index 0000000..6ece920 --- /dev/null +++ b/vendor/github.com/ma314smith/signedxml/.travis.yml @@ -0,0 +1,16 @@ +language: go +sudo: false + +go: + - 1.x + - 1.6 + - 1.7.x + - master + +before_install: + - go get -v github.com/golang/lint/golint + +script: + - go vet ./... + - golint ./... + - go test -cover -v ./... diff --git a/vendor/github.com/ma314smith/signedxml/LICENSE.md b/vendor/github.com/ma314smith/signedxml/LICENSE.md new file mode 100644 index 0000000..538c383 --- /dev/null +++ b/vendor/github.com/ma314smith/signedxml/LICENSE.md @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2015 Matt Smith + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/ma314smith/signedxml/README.md b/vendor/github.com/ma314smith/signedxml/README.md new file mode 100644 index 0000000..3e063aa --- /dev/null +++ b/vendor/github.com/ma314smith/signedxml/README.md @@ -0,0 +1,98 @@ +## signedxml + +[![Build Status](https://travis-ci.org/ma314smith/signedxml.svg?branch=master)](https://travis-ci.org/ma314smith/signedxml) +[![GoDoc](https://godoc.org/github.com/ma314smith/signedxml?status.svg)](https://godoc.org/github.com/ma314smith/signedxml) + +The signedxml package transforms and validates signed xml documents. The main use case is to support Single Sign On protocols like SAML and WS-Federation. + +Other packages that provide similar functionality rely on C libraries, which makes them difficult to run across platforms without significant configuration. `signedxml` is written in pure go, and can be easily used on any platform. + +### Install + +`go get github.com/ma314smith/signedxml` + +### Included Algorithms + +- Hashes + - http://www.w3.org/2001/04/xmldsig-more#md5 + - http://www.w3.org/2000/09/xmldsig#sha1 + - http://www.w3.org/2001/04/xmldsig-more#sha224 + - http://www.w3.org/2001/04/xmlenc#sha256 + - http://www.w3.org/2001/04/xmldsig-more#sha384 + - http://www.w3.org/2001/04/xmlenc#sha512 + - http://www.w3.org/2001/04/xmlenc#ripemd160 + + +- Signatures + - http://www.w3.org/2001/04/xmldsig-more#rsa-md2 + - http://www.w3.org/2001/04/xmldsig-more#rsa-md5 + - http://www.w3.org/2000/09/xmldsig#rsa-sha1 + - http://www.w3.org/2001/04/xmldsig-more#rsa-sha256 + - http://www.w3.org/2001/04/xmldsig-more#rsa-sha384 + - http://www.w3.org/2001/04/xmldsig-more#rsa-sha512 + - http://www.w3.org/2000/09/xmldsig#dsa-sha1 + - http://www.w3.org/2000/09/xmldsig#dsa-sha256 + - http://www.w3.org/2001/04/xmldsig-more#ecdsa-sha1 + - http://www.w3.org/2001/04/xmldsig-more#ecdsa-sha256 + - http://www.w3.org/2001/04/xmldsig-more#ecdsa-sha384 + - http://www.w3.org/2001/04/xmldsig-more#ecdsa-sha512 + + +- Canonicalization Methods/Transforms + - http://www.w3.org/2000/09/xmldsig#enveloped-signature + - http://www.w3.org/2001/10/xml-exc-c14n# + - http://www.w3.org/2001/10/xml-exc-c14n#WithComments + +### Examples + +#### Validating signed XML +If your signed xml contains the signature and certificate, then you can just pass in the xml and call `Validate()`. +```go +validator, err := signedxml.NewValidator(``) +xml, err = validator.ValidateReferences() +``` +`ValidateReferences()` verifies the DigestValue and SignatureValue in the xml document, and returns the signed payload(s). If the error value is `nil`, then the signed xml is valid. + +The x509.Certificate that was successfully used to validate the xml will be available by calling: +```go +validator.SigningCert() +``` +You can then verify that you trust the certificate. You can optionally supply your trusted certificates ahead of time by assigning them to the `Certificates` property of the `Validator` object, which is an x509.Certificate array. + +#### Using an external Signature +If you need to specify an external Signature, you can use the `SetSignature()` function to assign it: +```go +validator.SetSignature(<`Signature>`) +``` + +#### Generating signed XML +It is expected that your XML contains the Signature element with all the parameters set (except DigestValue and SignatureValue). +```go +signer, err := signedxml.NewSigner(` uri lookup + for _, attr := range node.Attr { + if attr.Space == "xmlns" { + e.namespaces[attr.Key] = attr.Value + } + } + + // handle the namespace of the node itself + if node.Space != "" { + if !contains(prefixesInScope, node.Space) { + nsListToRender["xmlns:"+node.Space] = e.namespaces[node.Space] + prefixesInScope = append(prefixesInScope, node.Space) + } + } else if defaultNS != currentNS { + newDefaultNS = currentNS + elementAttributes = append(elementAttributes, + etree.Attr{Key: "xmlns", Value: currentNS}) + } + + for _, attr := range node.Attr { + // include the namespaces if they are in the inclusiveNamespacePrefixList + if attr.Space == "xmlns" { + if !contains(prefixesInScope, attr.Key) && + contains(e.inclusiveNamespacePrefixList, attr.Key) { + + nsListToRender["xmlns:"+attr.Key] = attr.Value + prefixesInScope = append(prefixesInScope, attr.Key) + } + } + + // include namespaces for qualfied attributes + if attr.Space != "" && + attr.Space != "xmlns" && + !contains(prefixesInScope, attr.Space) { + + nsListToRender["xmlns:"+attr.Space] = e.namespaces[attr.Space] + prefixesInScope = append(prefixesInScope, attr.Space) + } + + // inclued all non-namespace attributes + if attr.Space != "xmlns" && attr.Key != "xmlns" { + attrListToRender = append(attrListToRender, + attribute{ + prefix: attr.Space, + uri: e.namespaces[attr.Space], + key: attr.Key, + value: attr.Value, + }) + } + } + + // sort and add the namespace attributes first + sortedNSList := getSortedNamespaces(nsListToRender) + elementAttributes = append(elementAttributes, sortedNSList...) + // then sort and add the non-namespace attributes + sortedAttributes := getSortedAttributes(attrListToRender) + elementAttributes = append(elementAttributes, sortedAttributes...) + // replace the nodes attributes with the sorted copy + node.Attr = elementAttributes + return currentNS, prefixesInScope +} + +func contains(slice []string, value string) bool { + for _, s := range slice { + if s == value { + return true + } + } + return false +} + +// getSortedNamespaces sorts the namespace attributes by their prefix +func getSortedNamespaces(list map[string]string) []etree.Attr { + var keys []string + for k := range list { + keys = append(keys, k) + } + sort.Strings(keys) + + elem := etree.Element{} + for _, k := range keys { + elem.CreateAttr(k, list[k]) + } + + return elem.Attr +} + +// getSortedAttributes sorts attributes by their namespace URIs +func getSortedAttributes(list attributes) []etree.Attr { + sort.Sort(list) + attrs := make([]etree.Attr, len(list)) + for i, a := range list { + attrs[i] = etree.Attr{ + Space: a.prefix, + Key: a.key, + Value: a.value, + } + } + return attrs +} + +func removeTokenFromElement(token etree.Token, e *etree.Element) *etree.Token { + for i, t := range e.Child { + if t == token { + e.Child = append(e.Child[0:i], e.Child[i+1:]...) + return &t + } + } + return nil +} + +func removeTokenFromDocument(token etree.Token, d *etree.Document) *etree.Token { + for i, t := range d.Child { + if t == token { + d.Child = append(d.Child[0:i], d.Child[i+1:]...) + return &t + } + } + return nil +} + +// isWhitespace returns true if the byte slice contains only +// whitespace characters. +func isWhitespace(s string) bool { + for i := 0; i < len(s); i++ { + if c := s[i]; c != ' ' && c != '\t' && c != '\n' && c != '\r' { + return false + } + } + return true +} diff --git a/vendor/github.com/ma314smith/signedxml/signedxml.go b/vendor/github.com/ma314smith/signedxml/signedxml.go new file mode 100644 index 0000000..abe8895 --- /dev/null +++ b/vendor/github.com/ma314smith/signedxml/signedxml.go @@ -0,0 +1,319 @@ +// Package signedxml transforms and validates signedxml documents +package signedxml + +import ( + "crypto" + "crypto/x509" + "encoding/base64" + "encoding/pem" + "errors" + "fmt" + "log" + "os" + "strings" + + "github.com/beevik/etree" +) + +var logger = log.New(os.Stdout, "DEBUG-SIGNEDXML: ", log.Ldate|log.Ltime|log.Lshortfile) + +func init() { + hashAlgorithms = map[string]crypto.Hash{ + "http://www.w3.org/2001/04/xmldsig-more#md5": crypto.MD5, + "http://www.w3.org/2000/09/xmldsig#sha1": crypto.SHA1, + "http://www.w3.org/2001/04/xmldsig-more#sha224": crypto.SHA224, + "http://www.w3.org/2001/04/xmlenc#sha256": crypto.SHA256, + "http://www.w3.org/2001/04/xmldsig-more#sha384": crypto.SHA384, + "http://www.w3.org/2001/04/xmlenc#sha512": crypto.SHA512, + "http://www.w3.org/2001/04/xmlenc#ripemd160": crypto.RIPEMD160, + } + + signatureAlgorithms = map[string]x509.SignatureAlgorithm{ + "http://www.w3.org/2001/04/xmldsig-more#rsa-md2": x509.MD2WithRSA, + "http://www.w3.org/2001/04/xmldsig-more#rsa-md5": x509.MD5WithRSA, + "http://www.w3.org/2000/09/xmldsig#rsa-sha1": x509.SHA1WithRSA, + "http://www.w3.org/2001/04/xmldsig-more#rsa-sha256": x509.SHA256WithRSA, + "http://www.w3.org/2001/04/xmldsig-more#rsa-sha384": x509.SHA384WithRSA, + "http://www.w3.org/2001/04/xmldsig-more#rsa-sha512": x509.SHA512WithRSA, + "http://www.w3.org/2000/09/xmldsig#dsa-sha1": x509.DSAWithSHA1, + "http://www.w3.org/2000/09/xmldsig#dsa-sha256": x509.DSAWithSHA256, + "http://www.w3.org/2001/04/xmldsig-more#ecdsa-sha1": x509.ECDSAWithSHA1, + "http://www.w3.org/2001/04/xmldsig-more#ecdsa-sha256": x509.ECDSAWithSHA256, + "http://www.w3.org/2001/04/xmldsig-more#ecdsa-sha384": x509.ECDSAWithSHA384, + "http://www.w3.org/2001/04/xmldsig-more#ecdsa-sha512": x509.ECDSAWithSHA512, + } + + CanonicalizationAlgorithms = map[string]CanonicalizationAlgorithm{ + "http://www.w3.org/2000/09/xmldsig#enveloped-signature": EnvelopedSignature{}, + "http://www.w3.org/2001/10/xml-exc-c14n#": ExclusiveCanonicalization{}, + "http://www.w3.org/2001/10/xml-exc-c14n#WithComments": ExclusiveCanonicalization{WithComments: true}, + } +} + +// CanonicalizationAlgorithm defines an interface for processing an XML +// document into a standard format. +// +// If any child elements are in the Transform node, the entire transform node +// will be passed to the Process method through the transformXML parameter as an +// XML string. This is necessary for transforms that need additional processing +// data, like XPath (http://www.w3.org/TR/xmldsig-core/#sec-XPath). If there are +// no child elements in Transform (or CanonicalizationMethod), then an empty +// string will be passed through. +type CanonicalizationAlgorithm interface { + Process(inputXML string, transformXML string) (outputXML string, err error) +} + +// CanonicalizationAlgorithms maps the CanonicalizationMethod or +// Transform Algorithm URIs to a type that implements the +// CanonicalizationAlgorithm interface. +// +// Implementations are provided for the following transforms: +// http://www.w3.org/2001/10/xml-exc-c14n# (ExclusiveCanonicalization) +// http://www.w3.org/2001/10/xml-exc-c14n#WithComments (ExclusiveCanonicalizationWithComments) +// http://www.w3.org/2000/09/xmldsig#enveloped-signature (EnvelopedSignature) +// +// Custom implementations can be added to the map +var CanonicalizationAlgorithms map[string]CanonicalizationAlgorithm +var hashAlgorithms map[string]crypto.Hash +var signatureAlgorithms map[string]x509.SignatureAlgorithm + +// signatureData provides options for verifying a signed XML document +type signatureData struct { + xml *etree.Document + signature *etree.Element + signedInfo *etree.Element + sigValue string + sigAlgorithm x509.SignatureAlgorithm + canonAlgorithm CanonicalizationAlgorithm +} + +// SetSignature can be used to assign an external signature for the XML doc +// that Validator will verify +func (s *signatureData) SetSignature(sig string) error { + doc := etree.NewDocument() + err := doc.ReadFromString(sig) + s.signature = doc.Root() + return err +} + +func (s *signatureData) parseEnvelopedSignature() error { + sig := s.xml.FindElement(".//Signature") + if sig != nil { + s.signature = sig + } else { + return errors.New("signedxml: Unable to find a unique signature element " + + "in the xml document. The signature must either be enveloped in the " + + "xml doc or externally assigned to Validator.SetSignature") + } + return nil +} + +func (s *signatureData) parseSignedInfo() error { + s.signedInfo = nil + s.signedInfo = s.signature.SelectElement("SignedInfo") + if s.signedInfo == nil { + return errors.New("signedxml: unable to find SignedInfo element") + } + + // move the Signature level namespace down to SignedInfo so that the signature + // value will match up + if s.signedInfo.Space != "" { + attr := s.signature.SelectAttr(s.signedInfo.Space) + if attr != nil { + s.signedInfo.Attr = []etree.Attr{*attr} + } + } else { + attr := s.signature.SelectAttr("xmlns") + if attr != nil { + s.signedInfo.Attr = []etree.Attr{*attr} + } + } + + // Copy SignedInfo xmlns: into itself if it does not exist and is defined as a root attribute + root := s.xml.Root() + + if root != nil { + sigNS := root.SelectAttr("xmlns:" + s.signedInfo.Space) + if sigNS != nil { + if s.signedInfo.SelectAttr("xmlns:"+s.signedInfo.Space) == nil { + s.signedInfo.CreateAttr("xmlns:"+s.signedInfo.Space, sigNS.Value) + } + } + } + + return nil +} + +func (s *signatureData) parseSigValue() error { + s.sigValue = "" + sigValueElement := s.signature.SelectElement("SignatureValue") + if sigValueElement != nil { + s.sigValue = sigValueElement.Text() + return nil + } + return errors.New("signedxml: unable to find SignatureValue") +} + +func (s *signatureData) parseSigAlgorithm() error { + s.sigAlgorithm = x509.UnknownSignatureAlgorithm + sigMethod := s.signedInfo.SelectElement("SignatureMethod") + + var sigAlgoURI string + if sigMethod == nil { + return errors.New("signedxml: Unable to find SignatureMethod element") + } + + sigAlgoURI = sigMethod.SelectAttrValue("Algorithm", "") + sigAlgo, ok := signatureAlgorithms[sigAlgoURI] + if ok { + s.sigAlgorithm = sigAlgo + return nil + } + + return errors.New("signedxml: Unable to find Algorithm in SignatureMethod element") +} + +func (s *signatureData) parseCanonAlgorithm() error { + s.canonAlgorithm = nil + canonMethod := s.signedInfo.SelectElement("CanonicalizationMethod") + + var canonAlgoURI string + if canonMethod == nil { + return errors.New("signedxml: Unable to find CanonicalizationMethod element") + } + + canonAlgoURI = canonMethod.SelectAttrValue("Algorithm", "") + canonAlgo, ok := CanonicalizationAlgorithms[canonAlgoURI] + if ok { + s.canonAlgorithm = canonAlgo + return nil + } + + return errors.New("signedxml: Unable to find Algorithm in " + + "CanonicalizationMethod element") +} + +func getReferencedXML(reference *etree.Element, inputDoc *etree.Document) (outputDoc *etree.Document, err error) { + uri := reference.SelectAttrValue("URI", "") + uri = strings.Replace(uri, "#", "", 1) + // populate doc with the referenced xml from the Reference URI + if uri == "" { + outputDoc = inputDoc + } else { + path := fmt.Sprintf(".//[@ID='%s']", uri) + e := inputDoc.FindElement(path) + if e != nil { + outputDoc = etree.NewDocument() + outputDoc.SetRoot(e.Copy()) + } else { + // SAML v1.1 Assertions use AssertionID + path := fmt.Sprintf(".//[@AssertionID='%s']", uri) + e := inputDoc.FindElement(path) + if e != nil { + outputDoc = etree.NewDocument() + outputDoc.SetRoot(e.Copy()) + } + } + } + + if outputDoc == nil { + return nil, errors.New("signedxml: unable to find refereced xml") + } + + return outputDoc, nil +} + +func getCertFromPEMString(pemString string) (*x509.Certificate, error) { + pubkey := fmt.Sprintf("-----BEGIN PUBLIC KEY-----\n%s\n-----END PUBLIC KEY-----", + pemString) + + pemBlock, _ := pem.Decode([]byte(pubkey)) + if pemBlock == nil { + return &x509.Certificate{}, errors.New("Could not parse Public Key PEM") + } + if pemBlock.Type != "PUBLIC KEY" { + return &x509.Certificate{}, errors.New("Found wrong key type") + } + + cert, err := x509.ParseCertificate(pemBlock.Bytes) + return cert, err +} + +func processTransform(transform *etree.Element, + docIn *etree.Document) (docOut *etree.Document, err error) { + + transformAlgoURI := transform.SelectAttrValue("Algorithm", "") + if transformAlgoURI == "" { + return nil, errors.New("signedxml: unable to find Algorithm in Transform") + } + + transformAlgo, ok := CanonicalizationAlgorithms[transformAlgoURI] + if !ok { + return nil, fmt.Errorf("signedxml: unable to find matching transform"+ + "algorithm for %s in CanonicalizationAlgorithms", transformAlgoURI) + } + + var transformContent string + + if transform.ChildElements() != nil { + tDoc := etree.NewDocument() + tDoc.SetRoot(transform.Copy()) + transformContent, err = tDoc.WriteToString() + if err != nil { + return nil, err + } + } + + docString, err := docIn.WriteToString() + if err != nil { + return nil, err + } + + docString, err = transformAlgo.Process(docString, transformContent) + if err != nil { + return nil, err + } + + docOut = etree.NewDocument() + docOut.ReadFromString(docString) + + return docOut, nil +} + +func calculateHash(reference *etree.Element, doc *etree.Document) (string, error) { + digestMethodElement := reference.SelectElement("DigestMethod") + if digestMethodElement == nil { + return "", errors.New("signedxml: unable to find DigestMethod") + } + + digestMethodURI := digestMethodElement.SelectAttrValue("Algorithm", "") + if digestMethodURI == "" { + return "", errors.New("signedxml: unable to find Algorithm in DigestMethod") + } + + digestAlgo, ok := hashAlgorithms[digestMethodURI] + if !ok { + return "", fmt.Errorf("signedxml: unable to find matching hash"+ + "algorithm for %s in hashAlgorithms", digestMethodURI) + } + + doc.WriteSettings.CanonicalEndTags = true + doc.WriteSettings.CanonicalText = true + doc.WriteSettings.CanonicalAttrVal = true + + h := digestAlgo.New() + docBytes, err := doc.WriteToBytes() + if err != nil { + return "", err + } + + //ioutil.WriteFile("C:/Temp/SignedXML/Suspect.xml", docBytes, 0644) + //s, _ := doc.WriteToString() + //logger.Println(s) + + h.Write(docBytes) + d := h.Sum(nil) + calculatedValue := base64.StdEncoding.EncodeToString(d) + + return calculatedValue, nil +} diff --git a/vendor/github.com/ma314smith/signedxml/signer.go b/vendor/github.com/ma314smith/signedxml/signer.go new file mode 100644 index 0000000..088bbd5 --- /dev/null +++ b/vendor/github.com/ma314smith/signedxml/signer.go @@ -0,0 +1,171 @@ +package signedxml + +import ( + "crypto" + "crypto/rand" + "crypto/rsa" + "crypto/x509" + "encoding/base64" + "errors" + + "github.com/beevik/etree" +) + +var signingAlgorithms map[x509.SignatureAlgorithm]cryptoHash + +func init() { + signingAlgorithms = map[x509.SignatureAlgorithm]cryptoHash{ + // MD2 not supported + // x509.MD2WithRSA: cryptoHash{algorithm: "rsa", hash: crypto.MD2}, + x509.MD5WithRSA: cryptoHash{algorithm: "rsa", hash: crypto.MD5}, + x509.SHA1WithRSA: cryptoHash{algorithm: "rsa", hash: crypto.SHA1}, + x509.SHA256WithRSA: cryptoHash{algorithm: "rsa", hash: crypto.SHA256}, + x509.SHA384WithRSA: cryptoHash{algorithm: "rsa", hash: crypto.SHA384}, + x509.SHA512WithRSA: cryptoHash{algorithm: "rsa", hash: crypto.SHA512}, + // DSA not supported + // x509.DSAWithSHA1: cryptoHash{algorithm: "dsa", hash: crypto.SHA1}, + // x509.DSAWithSHA256:cryptoHash{algorithm: "dsa", hash: crypto.SHA256}, + // Golang ECDSA support is lacking, can't seem to load private keys + // x509.ECDSAWithSHA1: cryptoHash{algorithm: "ecdsa", hash: crypto.SHA1}, + // x509.ECDSAWithSHA256: cryptoHash{algorithm: "ecdsa", hash: crypto.SHA256}, + // x509.ECDSAWithSHA384: cryptoHash{algorithm: "ecdsa", hash: crypto.SHA384}, + // x509.ECDSAWithSHA512: cryptoHash{algorithm: "ecdsa", hash: crypto.SHA512}, + } +} + +type cryptoHash struct { + algorithm string + hash crypto.Hash +} + +// Signer provides options for signing an XML document +type Signer struct { + signatureData + privateKey interface{} +} + +// NewSigner returns a *Signer for the XML provided +func NewSigner(xml string) (*Signer, error) { + doc := etree.NewDocument() + err := doc.ReadFromString(xml) + if err != nil { + return nil, err + } + s := &Signer{signatureData: signatureData{xml: doc}} + return s, nil +} + +// Sign populates the XML digest and signature based on the parameters present and privateKey given +func (s *Signer) Sign(privateKey interface{}) (string, error) { + s.privateKey = privateKey + + if s.signature == nil { + if err := s.parseEnvelopedSignature(); err != nil { + return "", err + } + } + if err := s.parseSignedInfo(); err != nil { + return "", err + } + if err := s.parseSigAlgorithm(); err != nil { + return "", err + } + if err := s.parseCanonAlgorithm(); err != nil { + return "", err + } + if err := s.setDigest(); err != nil { + return "", err + } + if err := s.setSignature(); err != nil { + return "", err + } + + xml, err := s.xml.WriteToString() + if err != nil { + return "", err + } + return xml, nil +} + +func (s *Signer) setDigest() (err error) { + references := s.signedInfo.FindElements("./Reference") + for _, ref := range references { + doc := s.xml.Copy() + transforms := ref.SelectElement("Transforms") + for _, transform := range transforms.SelectElements("Transform") { + doc, err = processTransform(transform, doc) + if err != nil { + return err + } + } + + doc, err := getReferencedXML(ref, doc) + if err != nil { + return err + } + + calculatedValue, err := calculateHash(ref, doc) + if err != nil { + return err + } + + digestValueElement := ref.SelectElement("DigestValue") + if digestValueElement == nil { + return errors.New("signedxml: unable to find DigestValue") + } + digestValueElement.SetText(calculatedValue) + } + return nil +} + +func (s *Signer) setSignature() error { + doc := etree.NewDocument() + doc.SetRoot(s.signedInfo.Copy()) + signedInfo, err := doc.WriteToString() + if err != nil { + return err + } + + canonSignedInfo, err := s.canonAlgorithm.Process(signedInfo, "") + if err != nil { + return err + } + + var hashed, signature []byte + //var h1, h2 *big.Int + signingAlgorithm, ok := signingAlgorithms[s.sigAlgorithm] + if !ok { + return errors.New("signedxml: unsupported algorithm") + } + + hasher := signingAlgorithm.hash.New() + hasher.Write([]byte(canonSignedInfo)) + hashed = hasher.Sum(nil) + + switch signingAlgorithm.algorithm { + case "rsa": + signature, err = rsa.SignPKCS1v15(rand.Reader, s.privateKey.(*rsa.PrivateKey), signingAlgorithm.hash, hashed) + /* + case "dsa": + h1, h2, err = dsa.Sign(rand.Reader, s.privateKey.(*dsa.PrivateKey), hashed) + case "ecdsa": + h1, h2, err = ecdsa.Sign(rand.Reader, s.privateKey.(*ecdsa.PrivateKey), hashed) + */ + } + if err != nil { + return err + } + + // DSA and ECDSA has not been validated + /* + if signature == nil && h1 != nil && h2 != nil { + signature = append(h1.Bytes(), h2.Bytes()...) + } + */ + + b64 := base64.StdEncoding.EncodeToString(signature) + sigValueElement := s.signature.SelectElement("SignatureValue") + sigValueElement.SetText(b64) + + return nil +} diff --git a/vendor/github.com/ma314smith/signedxml/validator.go b/vendor/github.com/ma314smith/signedxml/validator.go new file mode 100644 index 0000000..f3e68f6 --- /dev/null +++ b/vendor/github.com/ma314smith/signedxml/validator.go @@ -0,0 +1,200 @@ +package signedxml + +import ( + "crypto/x509" + "encoding/base64" + "errors" + "fmt" + "log" + + "github.com/beevik/etree" +) + +// Validator provides options for verifying a signed XML document +type Validator struct { + Certificates []x509.Certificate + signingCert x509.Certificate + signatureData +} + +// NewValidator returns a *Validator for the XML provided +func NewValidator(xml string) (*Validator, error) { + doc := etree.NewDocument() + err := doc.ReadFromString(xml) + if err != nil { + return nil, err + } + v := &Validator{signatureData: signatureData{xml: doc}} + return v, nil +} + +// SetXML is used to assign the XML document that the Validator will verify +func (v *Validator) SetXML(xml string) error { + doc := etree.NewDocument() + err := doc.ReadFromString(xml) + v.xml = doc + return err +} + +// SigningCert returns the certificate, if any, that was used to successfully +// validate the signature of the XML document. This will be a zero value +// x509.Certificate before Validator.Validate is successfully called. +func (v *Validator) SigningCert() x509.Certificate { + return v.signingCert +} + +// Validate validates the Reference digest values, and the signature value +// over the SignedInfo. +// +// Deprecated: Use ValidateReferences instead +func (v *Validator) Validate() error { + _, err := v.ValidateReferences() + return err +} + +// ValidateReferences validates the Reference digest values, and the signature value +// over the SignedInfo. +// +// If the signature is enveloped in the XML, then it will be used. +// Otherwise, an external signature should be assigned using +// Validator.SetSignature. +// +// The references returned by this method can be used to verify what was signed. +func (v *Validator) ValidateReferences() ([]string, error) { + if err := v.loadValuesFromXML(); err != nil { + return nil, err + } + + referenced, err := v.validateReferences() + if err != nil { + return nil, err + } + + var ref []string + for _, doc := range referenced { + docStr, err := doc.WriteToString() + if err != nil { + return nil, err + } + ref = append(ref, docStr) + } + + err = v.validateSignature() + return ref, err +} + +func (v *Validator) loadValuesFromXML() error { + if v.signature == nil { + if err := v.parseEnvelopedSignature(); err != nil { + return err + } + } + if err := v.parseSignedInfo(); err != nil { + return err + } + if err := v.parseSigValue(); err != nil { + return err + } + if err := v.parseSigAlgorithm(); err != nil { + return err + } + if err := v.parseCanonAlgorithm(); err != nil { + return err + } + if err := v.loadCertificates(); err != nil { + return err + } + return nil +} + +func (v *Validator) validateReferences() (referenced []*etree.Document, err error) { + references := v.signedInfo.FindElements("./Reference") + for _, ref := range references { + doc := v.xml.Copy() + transforms := ref.SelectElement("Transforms") + for _, transform := range transforms.SelectElements("Transform") { + doc, err = processTransform(transform, doc) + if err != nil { + return nil, err + } + } + + doc, err = getReferencedXML(ref, doc) + if err != nil { + return nil, err + } + + referenced = append(referenced, doc) + + digestValueElement := ref.SelectElement("DigestValue") + if digestValueElement == nil { + return nil, errors.New("signedxml: unable to find DigestValue") + } + digestValue := digestValueElement.Text() + + calculatedValue, err := calculateHash(ref, doc) + if err != nil { + return nil, err + } + + if calculatedValue != digestValue { + return nil, fmt.Errorf("signedxml: Calculated digest does not match the"+ + " expected digestvalue of %s", digestValue) + } + } + return referenced, nil +} + +func (v *Validator) validateSignature() error { + doc := etree.NewDocument() + doc.SetRoot(v.signedInfo.Copy()) + signedInfo, err := doc.WriteToString() + if err != nil { + return err + } + + canonSignedInfo, err := v.canonAlgorithm.Process(signedInfo, "") + if err != nil { + return err + } + + b64, err := base64.StdEncoding.DecodeString(v.sigValue) + if err != nil { + return err + } + sig := []byte(b64) + + v.signingCert = x509.Certificate{} + for _, cert := range v.Certificates { + err := cert.CheckSignature(v.sigAlgorithm, []byte(canonSignedInfo), sig) + if err == nil { + v.signingCert = cert + return nil + } + } + + return errors.New("signedxml: Calculated signature does not match the " + + "SignatureValue provided") +} + +func (v *Validator) loadCertificates() error { + // If v.Certificates is already populated, then the client has already set it + // to the desired cert. Otherwise, let's pull the public keys from the XML + if len(v.Certificates) < 1 { + keydata := v.xml.FindElements(".//X509Certificate") + for _, key := range keydata { + cert, err := getCertFromPEMString(key.Text()) + if err != nil { + log.Printf("signedxml: Unable to load certificate: (%s). "+ + "Looking for another cert.", err) + } else { + v.Certificates = append(v.Certificates, *cert) + } + } + } + + if len(v.Certificates) < 1 { + return errors.New("signedxml: a certificate is required, but was not found") + } + return nil +} diff --git a/vendor/github.com/pkg/errors/.gitignore b/vendor/github.com/pkg/errors/.gitignore new file mode 100644 index 0000000..daf913b --- /dev/null +++ b/vendor/github.com/pkg/errors/.gitignore @@ -0,0 +1,24 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +*.prof diff --git a/vendor/github.com/pkg/errors/.travis.yml b/vendor/github.com/pkg/errors/.travis.yml new file mode 100644 index 0000000..588ceca --- /dev/null +++ b/vendor/github.com/pkg/errors/.travis.yml @@ -0,0 +1,11 @@ +language: go +go_import_path: github.com/pkg/errors +go: + - 1.4.3 + - 1.5.4 + - 1.6.2 + - 1.7.1 + - tip + +script: + - go test -v ./... diff --git a/vendor/github.com/pkg/errors/LICENSE b/vendor/github.com/pkg/errors/LICENSE new file mode 100644 index 0000000..835ba3e --- /dev/null +++ b/vendor/github.com/pkg/errors/LICENSE @@ -0,0 +1,23 @@ +Copyright (c) 2015, Dave Cheney +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/pkg/errors/README.md b/vendor/github.com/pkg/errors/README.md new file mode 100644 index 0000000..273db3c --- /dev/null +++ b/vendor/github.com/pkg/errors/README.md @@ -0,0 +1,52 @@ +# errors [![Travis-CI](https://travis-ci.org/pkg/errors.svg)](https://travis-ci.org/pkg/errors) [![AppVeyor](https://ci.appveyor.com/api/projects/status/b98mptawhudj53ep/branch/master?svg=true)](https://ci.appveyor.com/project/davecheney/errors/branch/master) [![GoDoc](https://godoc.org/github.com/pkg/errors?status.svg)](http://godoc.org/github.com/pkg/errors) [![Report card](https://goreportcard.com/badge/github.com/pkg/errors)](https://goreportcard.com/report/github.com/pkg/errors) + +Package errors provides simple error handling primitives. + +`go get github.com/pkg/errors` + +The traditional error handling idiom in Go is roughly akin to +```go +if err != nil { + return err +} +``` +which applied recursively up the call stack results in error reports without context or debugging information. The errors package allows programmers to add context to the failure path in their code in a way that does not destroy the original value of the error. + +## Adding context to an error + +The errors.Wrap function returns a new error that adds context to the original error. For example +```go +_, err := ioutil.ReadAll(r) +if err != nil { + return errors.Wrap(err, "read failed") +} +``` +## Retrieving the cause of an error + +Using `errors.Wrap` constructs a stack of errors, adding context to the preceding error. Depending on the nature of the error it may be necessary to reverse the operation of errors.Wrap to retrieve the original error for inspection. Any error value which implements this interface can be inspected by `errors.Cause`. +```go +type causer interface { + Cause() error +} +``` +`errors.Cause` will recursively retrieve the topmost error which does not implement `causer`, which is assumed to be the original cause. For example: +```go +switch err := errors.Cause(err).(type) { +case *MyError: + // handle specifically +default: + // unknown error +} +``` + +[Read the package documentation for more information](https://godoc.org/github.com/pkg/errors). + +## Contributing + +We welcome pull requests, bug fixes and issue reports. With that said, the bar for adding new symbols to this package is intentionally set high. + +Before proposing a change, please discuss your change by raising an issue. + +## Licence + +BSD-2-Clause diff --git a/vendor/github.com/pkg/errors/appveyor.yml b/vendor/github.com/pkg/errors/appveyor.yml new file mode 100644 index 0000000..a932ead --- /dev/null +++ b/vendor/github.com/pkg/errors/appveyor.yml @@ -0,0 +1,32 @@ +version: build-{build}.{branch} + +clone_folder: C:\gopath\src\github.com\pkg\errors +shallow_clone: true # for startup speed + +environment: + GOPATH: C:\gopath + +platform: + - x64 + +# http://www.appveyor.com/docs/installed-software +install: + # some helpful output for debugging builds + - go version + - go env + # pre-installed MinGW at C:\MinGW is 32bit only + # but MSYS2 at C:\msys64 has mingw64 + - set PATH=C:\msys64\mingw64\bin;%PATH% + - gcc --version + - g++ --version + +build_script: + - go install -v ./... + +test_script: + - set PATH=C:\gopath\bin;%PATH% + - go test -v ./... + +#artifacts: +# - path: '%GOPATH%\bin\*.exe' +deploy: off diff --git a/vendor/github.com/pkg/errors/errors.go b/vendor/github.com/pkg/errors/errors.go new file mode 100644 index 0000000..842ee80 --- /dev/null +++ b/vendor/github.com/pkg/errors/errors.go @@ -0,0 +1,269 @@ +// Package errors provides simple error handling primitives. +// +// The traditional error handling idiom in Go is roughly akin to +// +// if err != nil { +// return err +// } +// +// which applied recursively up the call stack results in error reports +// without context or debugging information. The errors package allows +// programmers to add context to the failure path in their code in a way +// that does not destroy the original value of the error. +// +// Adding context to an error +// +// The errors.Wrap function returns a new error that adds context to the +// original error by recording a stack trace at the point Wrap is called, +// and the supplied message. For example +// +// _, err := ioutil.ReadAll(r) +// if err != nil { +// return errors.Wrap(err, "read failed") +// } +// +// If additional control is required the errors.WithStack and errors.WithMessage +// functions destructure errors.Wrap into its component operations of annotating +// an error with a stack trace and an a message, respectively. +// +// Retrieving the cause of an error +// +// Using errors.Wrap constructs a stack of errors, adding context to the +// preceding error. Depending on the nature of the error it may be necessary +// to reverse the operation of errors.Wrap to retrieve the original error +// for inspection. Any error value which implements this interface +// +// type causer interface { +// Cause() error +// } +// +// can be inspected by errors.Cause. errors.Cause will recursively retrieve +// the topmost error which does not implement causer, which is assumed to be +// the original cause. For example: +// +// switch err := errors.Cause(err).(type) { +// case *MyError: +// // handle specifically +// default: +// // unknown error +// } +// +// causer interface is not exported by this package, but is considered a part +// of stable public API. +// +// Formatted printing of errors +// +// All error values returned from this package implement fmt.Formatter and can +// be formatted by the fmt package. The following verbs are supported +// +// %s print the error. If the error has a Cause it will be +// printed recursively +// %v see %s +// %+v extended format. Each Frame of the error's StackTrace will +// be printed in detail. +// +// Retrieving the stack trace of an error or wrapper +// +// New, Errorf, Wrap, and Wrapf record a stack trace at the point they are +// invoked. This information can be retrieved with the following interface. +// +// type stackTracer interface { +// StackTrace() errors.StackTrace +// } +// +// Where errors.StackTrace is defined as +// +// type StackTrace []Frame +// +// The Frame type represents a call site in the stack trace. Frame supports +// the fmt.Formatter interface that can be used for printing information about +// the stack trace of this error. For example: +// +// if err, ok := err.(stackTracer); ok { +// for _, f := range err.StackTrace() { +// fmt.Printf("%+s:%d", f) +// } +// } +// +// stackTracer interface is not exported by this package, but is considered a part +// of stable public API. +// +// See the documentation for Frame.Format for more details. +package errors + +import ( + "fmt" + "io" +) + +// New returns an error with the supplied message. +// New also records the stack trace at the point it was called. +func New(message string) error { + return &fundamental{ + msg: message, + stack: callers(), + } +} + +// Errorf formats according to a format specifier and returns the string +// as a value that satisfies error. +// Errorf also records the stack trace at the point it was called. +func Errorf(format string, args ...interface{}) error { + return &fundamental{ + msg: fmt.Sprintf(format, args...), + stack: callers(), + } +} + +// fundamental is an error that has a message and a stack, but no caller. +type fundamental struct { + msg string + *stack +} + +func (f *fundamental) Error() string { return f.msg } + +func (f *fundamental) Format(s fmt.State, verb rune) { + switch verb { + case 'v': + if s.Flag('+') { + io.WriteString(s, f.msg) + f.stack.Format(s, verb) + return + } + fallthrough + case 's': + io.WriteString(s, f.msg) + case 'q': + fmt.Fprintf(s, "%q", f.msg) + } +} + +// WithStack annotates err with a stack trace at the point WithStack was called. +// If err is nil, WithStack returns nil. +func WithStack(err error) error { + if err == nil { + return nil + } + return &withStack{ + err, + callers(), + } +} + +type withStack struct { + error + *stack +} + +func (w *withStack) Cause() error { return w.error } + +func (w *withStack) Format(s fmt.State, verb rune) { + switch verb { + case 'v': + if s.Flag('+') { + fmt.Fprintf(s, "%+v", w.Cause()) + w.stack.Format(s, verb) + return + } + fallthrough + case 's': + io.WriteString(s, w.Error()) + case 'q': + fmt.Fprintf(s, "%q", w.Error()) + } +} + +// Wrap returns an error annotating err with a stack trace +// at the point Wrap is called, and the supplied message. +// If err is nil, Wrap returns nil. +func Wrap(err error, message string) error { + if err == nil { + return nil + } + err = &withMessage{ + cause: err, + msg: message, + } + return &withStack{ + err, + callers(), + } +} + +// Wrapf returns an error annotating err with a stack trace +// at the point Wrapf is call, and the format specifier. +// If err is nil, Wrapf returns nil. +func Wrapf(err error, format string, args ...interface{}) error { + if err == nil { + return nil + } + err = &withMessage{ + cause: err, + msg: fmt.Sprintf(format, args...), + } + return &withStack{ + err, + callers(), + } +} + +// WithMessage annotates err with a new message. +// If err is nil, WithMessage returns nil. +func WithMessage(err error, message string) error { + if err == nil { + return nil + } + return &withMessage{ + cause: err, + msg: message, + } +} + +type withMessage struct { + cause error + msg string +} + +func (w *withMessage) Error() string { return w.msg + ": " + w.cause.Error() } +func (w *withMessage) Cause() error { return w.cause } + +func (w *withMessage) Format(s fmt.State, verb rune) { + switch verb { + case 'v': + if s.Flag('+') { + fmt.Fprintf(s, "%+v\n", w.Cause()) + io.WriteString(s, w.msg) + return + } + fallthrough + case 's', 'q': + io.WriteString(s, w.Error()) + } +} + +// Cause returns the underlying cause of the error, if possible. +// An error value has a cause if it implements the following +// interface: +// +// type causer interface { +// Cause() error +// } +// +// If the error does not implement Cause, the original error will +// be returned. If the error is nil, nil will be returned without further +// investigation. +func Cause(err error) error { + type causer interface { + Cause() error + } + + for err != nil { + cause, ok := err.(causer) + if !ok { + break + } + err = cause.Cause() + } + return err +} diff --git a/vendor/github.com/pkg/errors/stack.go b/vendor/github.com/pkg/errors/stack.go new file mode 100644 index 0000000..6b1f289 --- /dev/null +++ b/vendor/github.com/pkg/errors/stack.go @@ -0,0 +1,178 @@ +package errors + +import ( + "fmt" + "io" + "path" + "runtime" + "strings" +) + +// Frame represents a program counter inside a stack frame. +type Frame uintptr + +// pc returns the program counter for this frame; +// multiple frames may have the same PC value. +func (f Frame) pc() uintptr { return uintptr(f) - 1 } + +// file returns the full path to the file that contains the +// function for this Frame's pc. +func (f Frame) file() string { + fn := runtime.FuncForPC(f.pc()) + if fn == nil { + return "unknown" + } + file, _ := fn.FileLine(f.pc()) + return file +} + +// line returns the line number of source code of the +// function for this Frame's pc. +func (f Frame) line() int { + fn := runtime.FuncForPC(f.pc()) + if fn == nil { + return 0 + } + _, line := fn.FileLine(f.pc()) + return line +} + +// Format formats the frame according to the fmt.Formatter interface. +// +// %s source file +// %d source line +// %n function name +// %v equivalent to %s:%d +// +// Format accepts flags that alter the printing of some verbs, as follows: +// +// %+s path of source file relative to the compile time GOPATH +// %+v equivalent to %+s:%d +func (f Frame) Format(s fmt.State, verb rune) { + switch verb { + case 's': + switch { + case s.Flag('+'): + pc := f.pc() + fn := runtime.FuncForPC(pc) + if fn == nil { + io.WriteString(s, "unknown") + } else { + file, _ := fn.FileLine(pc) + fmt.Fprintf(s, "%s\n\t%s", fn.Name(), file) + } + default: + io.WriteString(s, path.Base(f.file())) + } + case 'd': + fmt.Fprintf(s, "%d", f.line()) + case 'n': + name := runtime.FuncForPC(f.pc()).Name() + io.WriteString(s, funcname(name)) + case 'v': + f.Format(s, 's') + io.WriteString(s, ":") + f.Format(s, 'd') + } +} + +// StackTrace is stack of Frames from innermost (newest) to outermost (oldest). +type StackTrace []Frame + +func (st StackTrace) Format(s fmt.State, verb rune) { + switch verb { + case 'v': + switch { + case s.Flag('+'): + for _, f := range st { + fmt.Fprintf(s, "\n%+v", f) + } + case s.Flag('#'): + fmt.Fprintf(s, "%#v", []Frame(st)) + default: + fmt.Fprintf(s, "%v", []Frame(st)) + } + case 's': + fmt.Fprintf(s, "%s", []Frame(st)) + } +} + +// stack represents a stack of program counters. +type stack []uintptr + +func (s *stack) Format(st fmt.State, verb rune) { + switch verb { + case 'v': + switch { + case st.Flag('+'): + for _, pc := range *s { + f := Frame(pc) + fmt.Fprintf(st, "\n%+v", f) + } + } + } +} + +func (s *stack) StackTrace() StackTrace { + f := make([]Frame, len(*s)) + for i := 0; i < len(f); i++ { + f[i] = Frame((*s)[i]) + } + return f +} + +func callers() *stack { + const depth = 32 + var pcs [depth]uintptr + n := runtime.Callers(3, pcs[:]) + var st stack = pcs[0:n] + return &st +} + +// funcname removes the path prefix component of a function's name reported by func.Name(). +func funcname(name string) string { + i := strings.LastIndex(name, "/") + name = name[i+1:] + i = strings.Index(name, ".") + return name[i+1:] +} + +func trimGOPATH(name, file string) string { + // Here we want to get the source file path relative to the compile time + // GOPATH. As of Go 1.6.x there is no direct way to know the compiled + // GOPATH at runtime, but we can infer the number of path segments in the + // GOPATH. We note that fn.Name() returns the function name qualified by + // the import path, which does not include the GOPATH. Thus we can trim + // segments from the beginning of the file path until the number of path + // separators remaining is one more than the number of path separators in + // the function name. For example, given: + // + // GOPATH /home/user + // file /home/user/src/pkg/sub/file.go + // fn.Name() pkg/sub.Type.Method + // + // We want to produce: + // + // pkg/sub/file.go + // + // From this we can easily see that fn.Name() has one less path separator + // than our desired output. We count separators from the end of the file + // path until it finds two more than in the function name and then move + // one character forward to preserve the initial path segment without a + // leading separator. + const sep = "/" + goal := strings.Count(name, sep) + 2 + i := len(file) + for n := 0; n < goal; n++ { + i = strings.LastIndex(file[:i], sep) + if i == -1 { + // not enough separators found, set i so that the slice expression + // below leaves file unmodified + i = -len(sep) + break + } + } + // get back to 0 or trim the leading separator + file = file[i+len(sep):] + return file +} diff --git a/vendor/golang.org/x/crypto/AUTHORS b/vendor/golang.org/x/crypto/AUTHORS new file mode 100644 index 0000000..2b00ddb --- /dev/null +++ b/vendor/golang.org/x/crypto/AUTHORS @@ -0,0 +1,3 @@ +# This source code refers to The Go Authors for copyright purposes. +# The master list of authors is in the main Go distribution, +# visible at https://tip.golang.org/AUTHORS. diff --git a/vendor/golang.org/x/crypto/CONTRIBUTORS b/vendor/golang.org/x/crypto/CONTRIBUTORS new file mode 100644 index 0000000..1fbd3e9 --- /dev/null +++ b/vendor/golang.org/x/crypto/CONTRIBUTORS @@ -0,0 +1,3 @@ +# This source code was written by the Go contributors. +# The master list of contributors is in the main Go distribution, +# visible at https://tip.golang.org/CONTRIBUTORS. diff --git a/vendor/golang.org/x/crypto/LICENSE b/vendor/golang.org/x/crypto/LICENSE new file mode 100644 index 0000000..6a66aea --- /dev/null +++ b/vendor/golang.org/x/crypto/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/golang.org/x/crypto/PATENTS b/vendor/golang.org/x/crypto/PATENTS new file mode 100644 index 0000000..7330990 --- /dev/null +++ b/vendor/golang.org/x/crypto/PATENTS @@ -0,0 +1,22 @@ +Additional IP Rights Grant (Patents) + +"This implementation" means the copyrightable works distributed by +Google as part of the Go project. + +Google hereby grants to You a perpetual, worldwide, non-exclusive, +no-charge, royalty-free, irrevocable (except as stated in this section) +patent license to make, have made, use, offer to sell, sell, import, +transfer and otherwise run, modify and propagate the contents of this +implementation of Go, where such license applies only to those patent +claims, both currently owned or controlled by Google and acquired in +the future, licensable by Google that are necessarily infringed by this +implementation of Go. This grant does not include claims that would be +infringed only as a consequence of further modification of this +implementation. If you or your agent or exclusive licensee institute or +order or agree to the institution of patent litigation against any +entity (including a cross-claim or counterclaim in a lawsuit) alleging +that this implementation of Go or any code incorporated within this +implementation of Go constitutes direct or contributory patent +infringement, or inducement of patent infringement, then any patent +rights granted to you under this License for this implementation of Go +shall terminate as of the date such litigation is filed. diff --git a/vendor/golang.org/x/crypto/ripemd160/ripemd160.go b/vendor/golang.org/x/crypto/ripemd160/ripemd160.go new file mode 100644 index 0000000..fd97ba1 --- /dev/null +++ b/vendor/golang.org/x/crypto/ripemd160/ripemd160.go @@ -0,0 +1,120 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package ripemd160 implements the RIPEMD-160 hash algorithm. +package ripemd160 // import "golang.org/x/crypto/ripemd160" + +// RIPEMD-160 is designed by Hans Dobbertin, Antoon Bosselaers, and Bart +// Preneel with specifications available at: +// http://homes.esat.kuleuven.be/~cosicart/pdf/AB-9601/AB-9601.pdf. + +import ( + "crypto" + "hash" +) + +func init() { + crypto.RegisterHash(crypto.RIPEMD160, New) +} + +// The size of the checksum in bytes. +const Size = 20 + +// The block size of the hash algorithm in bytes. +const BlockSize = 64 + +const ( + _s0 = 0x67452301 + _s1 = 0xefcdab89 + _s2 = 0x98badcfe + _s3 = 0x10325476 + _s4 = 0xc3d2e1f0 +) + +// digest represents the partial evaluation of a checksum. +type digest struct { + s [5]uint32 // running context + x [BlockSize]byte // temporary buffer + nx int // index into x + tc uint64 // total count of bytes processed +} + +func (d *digest) Reset() { + d.s[0], d.s[1], d.s[2], d.s[3], d.s[4] = _s0, _s1, _s2, _s3, _s4 + d.nx = 0 + d.tc = 0 +} + +// New returns a new hash.Hash computing the checksum. +func New() hash.Hash { + result := new(digest) + result.Reset() + return result +} + +func (d *digest) Size() int { return Size } + +func (d *digest) BlockSize() int { return BlockSize } + +func (d *digest) Write(p []byte) (nn int, err error) { + nn = len(p) + d.tc += uint64(nn) + if d.nx > 0 { + n := len(p) + if n > BlockSize-d.nx { + n = BlockSize - d.nx + } + for i := 0; i < n; i++ { + d.x[d.nx+i] = p[i] + } + d.nx += n + if d.nx == BlockSize { + _Block(d, d.x[0:]) + d.nx = 0 + } + p = p[n:] + } + n := _Block(d, p) + p = p[n:] + if len(p) > 0 { + d.nx = copy(d.x[:], p) + } + return +} + +func (d0 *digest) Sum(in []byte) []byte { + // Make a copy of d0 so that caller can keep writing and summing. + d := *d0 + + // Padding. Add a 1 bit and 0 bits until 56 bytes mod 64. + tc := d.tc + var tmp [64]byte + tmp[0] = 0x80 + if tc%64 < 56 { + d.Write(tmp[0 : 56-tc%64]) + } else { + d.Write(tmp[0 : 64+56-tc%64]) + } + + // Length in bits. + tc <<= 3 + for i := uint(0); i < 8; i++ { + tmp[i] = byte(tc >> (8 * i)) + } + d.Write(tmp[0:8]) + + if d.nx != 0 { + panic("d.nx != 0") + } + + var digest [Size]byte + for i, s := range d.s { + digest[i*4] = byte(s) + digest[i*4+1] = byte(s >> 8) + digest[i*4+2] = byte(s >> 16) + digest[i*4+3] = byte(s >> 24) + } + + return append(in, digest[:]...) +} diff --git a/vendor/golang.org/x/crypto/ripemd160/ripemd160block.go b/vendor/golang.org/x/crypto/ripemd160/ripemd160block.go new file mode 100644 index 0000000..e0edc02 --- /dev/null +++ b/vendor/golang.org/x/crypto/ripemd160/ripemd160block.go @@ -0,0 +1,165 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// RIPEMD-160 block step. +// In its own file so that a faster assembly or C version +// can be substituted easily. + +package ripemd160 + +import ( + "math/bits" +) + +// work buffer indices and roll amounts for one line +var _n = [80]uint{ + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, + 7, 4, 13, 1, 10, 6, 15, 3, 12, 0, 9, 5, 2, 14, 11, 8, + 3, 10, 14, 4, 9, 15, 8, 1, 2, 7, 0, 6, 13, 11, 5, 12, + 1, 9, 11, 10, 0, 8, 12, 4, 13, 3, 7, 15, 14, 5, 6, 2, + 4, 0, 5, 9, 7, 12, 2, 10, 14, 1, 3, 8, 11, 6, 15, 13, +} + +var _r = [80]uint{ + 11, 14, 15, 12, 5, 8, 7, 9, 11, 13, 14, 15, 6, 7, 9, 8, + 7, 6, 8, 13, 11, 9, 7, 15, 7, 12, 15, 9, 11, 7, 13, 12, + 11, 13, 6, 7, 14, 9, 13, 15, 14, 8, 13, 6, 5, 12, 7, 5, + 11, 12, 14, 15, 14, 15, 9, 8, 9, 14, 5, 6, 8, 6, 5, 12, + 9, 15, 5, 11, 6, 8, 13, 12, 5, 12, 13, 14, 11, 8, 5, 6, +} + +// same for the other parallel one +var n_ = [80]uint{ + 5, 14, 7, 0, 9, 2, 11, 4, 13, 6, 15, 8, 1, 10, 3, 12, + 6, 11, 3, 7, 0, 13, 5, 10, 14, 15, 8, 12, 4, 9, 1, 2, + 15, 5, 1, 3, 7, 14, 6, 9, 11, 8, 12, 2, 10, 0, 4, 13, + 8, 6, 4, 1, 3, 11, 15, 0, 5, 12, 2, 13, 9, 7, 10, 14, + 12, 15, 10, 4, 1, 5, 8, 7, 6, 2, 13, 14, 0, 3, 9, 11, +} + +var r_ = [80]uint{ + 8, 9, 9, 11, 13, 15, 15, 5, 7, 7, 8, 11, 14, 14, 12, 6, + 9, 13, 15, 7, 12, 8, 9, 11, 7, 7, 12, 7, 6, 15, 13, 11, + 9, 7, 15, 11, 8, 6, 6, 14, 12, 13, 5, 14, 13, 13, 7, 5, + 15, 5, 8, 11, 14, 14, 6, 14, 6, 9, 12, 9, 12, 5, 15, 8, + 8, 5, 12, 9, 12, 5, 14, 6, 8, 13, 6, 5, 15, 13, 11, 11, +} + +func _Block(md *digest, p []byte) int { + n := 0 + var x [16]uint32 + var alpha, beta uint32 + for len(p) >= BlockSize { + a, b, c, d, e := md.s[0], md.s[1], md.s[2], md.s[3], md.s[4] + aa, bb, cc, dd, ee := a, b, c, d, e + j := 0 + for i := 0; i < 16; i++ { + x[i] = uint32(p[j]) | uint32(p[j+1])<<8 | uint32(p[j+2])<<16 | uint32(p[j+3])<<24 + j += 4 + } + + // round 1 + i := 0 + for i < 16 { + alpha = a + (b ^ c ^ d) + x[_n[i]] + s := int(_r[i]) + alpha = bits.RotateLeft32(alpha, s) + e + beta = bits.RotateLeft32(c, 10) + a, b, c, d, e = e, alpha, b, beta, d + + // parallel line + alpha = aa + (bb ^ (cc | ^dd)) + x[n_[i]] + 0x50a28be6 + s = int(r_[i]) + alpha = bits.RotateLeft32(alpha, s) + ee + beta = bits.RotateLeft32(cc, 10) + aa, bb, cc, dd, ee = ee, alpha, bb, beta, dd + + i++ + } + + // round 2 + for i < 32 { + alpha = a + (b&c | ^b&d) + x[_n[i]] + 0x5a827999 + s := int(_r[i]) + alpha = bits.RotateLeft32(alpha, s) + e + beta = bits.RotateLeft32(c, 10) + a, b, c, d, e = e, alpha, b, beta, d + + // parallel line + alpha = aa + (bb&dd | cc&^dd) + x[n_[i]] + 0x5c4dd124 + s = int(r_[i]) + alpha = bits.RotateLeft32(alpha, s) + ee + beta = bits.RotateLeft32(cc, 10) + aa, bb, cc, dd, ee = ee, alpha, bb, beta, dd + + i++ + } + + // round 3 + for i < 48 { + alpha = a + (b | ^c ^ d) + x[_n[i]] + 0x6ed9eba1 + s := int(_r[i]) + alpha = bits.RotateLeft32(alpha, s) + e + beta = bits.RotateLeft32(c, 10) + a, b, c, d, e = e, alpha, b, beta, d + + // parallel line + alpha = aa + (bb | ^cc ^ dd) + x[n_[i]] + 0x6d703ef3 + s = int(r_[i]) + alpha = bits.RotateLeft32(alpha, s) + ee + beta = bits.RotateLeft32(cc, 10) + aa, bb, cc, dd, ee = ee, alpha, bb, beta, dd + + i++ + } + + // round 4 + for i < 64 { + alpha = a + (b&d | c&^d) + x[_n[i]] + 0x8f1bbcdc + s := int(_r[i]) + alpha = bits.RotateLeft32(alpha, s) + e + beta = bits.RotateLeft32(c, 10) + a, b, c, d, e = e, alpha, b, beta, d + + // parallel line + alpha = aa + (bb&cc | ^bb&dd) + x[n_[i]] + 0x7a6d76e9 + s = int(r_[i]) + alpha = bits.RotateLeft32(alpha, s) + ee + beta = bits.RotateLeft32(cc, 10) + aa, bb, cc, dd, ee = ee, alpha, bb, beta, dd + + i++ + } + + // round 5 + for i < 80 { + alpha = a + (b ^ (c | ^d)) + x[_n[i]] + 0xa953fd4e + s := int(_r[i]) + alpha = bits.RotateLeft32(alpha, s) + e + beta = bits.RotateLeft32(c, 10) + a, b, c, d, e = e, alpha, b, beta, d + + // parallel line + alpha = aa + (bb ^ cc ^ dd) + x[n_[i]] + s = int(r_[i]) + alpha = bits.RotateLeft32(alpha, s) + ee + beta = bits.RotateLeft32(cc, 10) + aa, bb, cc, dd, ee = ee, alpha, bb, beta, dd + + i++ + } + + // combine results + dd += c + md.s[1] + md.s[1] = md.s[2] + d + ee + md.s[2] = md.s[3] + e + aa + md.s[3] = md.s[4] + a + bb + md.s[4] = md.s[0] + b + cc + md.s[0] = dd + + p = p[BlockSize:] + n += BlockSize + } + return n +} diff --git a/vendor/gopkg.in/check.v1/.gitignore b/vendor/gopkg.in/check.v1/.gitignore new file mode 100644 index 0000000..191a536 --- /dev/null +++ b/vendor/gopkg.in/check.v1/.gitignore @@ -0,0 +1,4 @@ +_* +*.swp +*.[568] +[568].out diff --git a/vendor/gopkg.in/check.v1/.travis.yml b/vendor/gopkg.in/check.v1/.travis.yml new file mode 100644 index 0000000..ead6735 --- /dev/null +++ b/vendor/gopkg.in/check.v1/.travis.yml @@ -0,0 +1,3 @@ +language: go + +go_import_path: gopkg.in/check.v1 diff --git a/vendor/gopkg.in/check.v1/LICENSE b/vendor/gopkg.in/check.v1/LICENSE new file mode 100644 index 0000000..545cf2d --- /dev/null +++ b/vendor/gopkg.in/check.v1/LICENSE @@ -0,0 +1,25 @@ +Gocheck - A rich testing framework for Go + +Copyright (c) 2010-2013 Gustavo Niemeyer + +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR +ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/gopkg.in/check.v1/README.md b/vendor/gopkg.in/check.v1/README.md new file mode 100644 index 0000000..0ca9e57 --- /dev/null +++ b/vendor/gopkg.in/check.v1/README.md @@ -0,0 +1,20 @@ +Instructions +============ + +Install the package with: + + go get gopkg.in/check.v1 + +Import it with: + + import "gopkg.in/check.v1" + +and use _check_ as the package name inside the code. + +For more details, visit the project page: + +* http://labix.org/gocheck + +and the API documentation: + +* https://gopkg.in/check.v1 diff --git a/vendor/gopkg.in/check.v1/TODO b/vendor/gopkg.in/check.v1/TODO new file mode 100644 index 0000000..3349827 --- /dev/null +++ b/vendor/gopkg.in/check.v1/TODO @@ -0,0 +1,2 @@ +- Assert(slice, Contains, item) +- Parallel test support diff --git a/vendor/gopkg.in/check.v1/benchmark.go b/vendor/gopkg.in/check.v1/benchmark.go new file mode 100644 index 0000000..46ea9dc --- /dev/null +++ b/vendor/gopkg.in/check.v1/benchmark.go @@ -0,0 +1,187 @@ +// Copyright (c) 2012 The Go Authors. All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package check + +import ( + "fmt" + "runtime" + "time" +) + +var memStats runtime.MemStats + +// testingB is a type passed to Benchmark functions to manage benchmark +// timing and to specify the number of iterations to run. +type timer struct { + start time.Time // Time test or benchmark started + duration time.Duration + N int + bytes int64 + timerOn bool + benchTime time.Duration + // The initial states of memStats.Mallocs and memStats.TotalAlloc. + startAllocs uint64 + startBytes uint64 + // The net total of this test after being run. + netAllocs uint64 + netBytes uint64 +} + +// StartTimer starts timing a test. This function is called automatically +// before a benchmark starts, but it can also used to resume timing after +// a call to StopTimer. +func (c *C) StartTimer() { + if !c.timerOn { + c.start = time.Now() + c.timerOn = true + + runtime.ReadMemStats(&memStats) + c.startAllocs = memStats.Mallocs + c.startBytes = memStats.TotalAlloc + } +} + +// StopTimer stops timing a test. This can be used to pause the timer +// while performing complex initialization that you don't +// want to measure. +func (c *C) StopTimer() { + if c.timerOn { + c.duration += time.Now().Sub(c.start) + c.timerOn = false + runtime.ReadMemStats(&memStats) + c.netAllocs += memStats.Mallocs - c.startAllocs + c.netBytes += memStats.TotalAlloc - c.startBytes + } +} + +// ResetTimer sets the elapsed benchmark time to zero. +// It does not affect whether the timer is running. +func (c *C) ResetTimer() { + if c.timerOn { + c.start = time.Now() + runtime.ReadMemStats(&memStats) + c.startAllocs = memStats.Mallocs + c.startBytes = memStats.TotalAlloc + } + c.duration = 0 + c.netAllocs = 0 + c.netBytes = 0 +} + +// SetBytes informs the number of bytes that the benchmark processes +// on each iteration. If this is called in a benchmark it will also +// report MB/s. +func (c *C) SetBytes(n int64) { + c.bytes = n +} + +func (c *C) nsPerOp() int64 { + if c.N <= 0 { + return 0 + } + return c.duration.Nanoseconds() / int64(c.N) +} + +func (c *C) mbPerSec() float64 { + if c.bytes <= 0 || c.duration <= 0 || c.N <= 0 { + return 0 + } + return (float64(c.bytes) * float64(c.N) / 1e6) / c.duration.Seconds() +} + +func (c *C) timerString() string { + if c.N <= 0 { + return fmt.Sprintf("%3.3fs", float64(c.duration.Nanoseconds())/1e9) + } + mbs := c.mbPerSec() + mb := "" + if mbs != 0 { + mb = fmt.Sprintf("\t%7.2f MB/s", mbs) + } + nsop := c.nsPerOp() + ns := fmt.Sprintf("%10d ns/op", nsop) + if c.N > 0 && nsop < 100 { + // The format specifiers here make sure that + // the ones digits line up for all three possible formats. + if nsop < 10 { + ns = fmt.Sprintf("%13.2f ns/op", float64(c.duration.Nanoseconds())/float64(c.N)) + } else { + ns = fmt.Sprintf("%12.1f ns/op", float64(c.duration.Nanoseconds())/float64(c.N)) + } + } + memStats := "" + if c.benchMem { + allocedBytes := fmt.Sprintf("%8d B/op", int64(c.netBytes)/int64(c.N)) + allocs := fmt.Sprintf("%8d allocs/op", int64(c.netAllocs)/int64(c.N)) + memStats = fmt.Sprintf("\t%s\t%s", allocedBytes, allocs) + } + return fmt.Sprintf("%8d\t%s%s%s", c.N, ns, mb, memStats) +} + +func min(x, y int) int { + if x > y { + return y + } + return x +} + +func max(x, y int) int { + if x < y { + return y + } + return x +} + +// roundDown10 rounds a number down to the nearest power of 10. +func roundDown10(n int) int { + var tens = 0 + // tens = floor(log_10(n)) + for n > 10 { + n = n / 10 + tens++ + } + // result = 10^tens + result := 1 + for i := 0; i < tens; i++ { + result *= 10 + } + return result +} + +// roundUp rounds x up to a number of the form [1eX, 2eX, 5eX]. +func roundUp(n int) int { + base := roundDown10(n) + if n < (2 * base) { + return 2 * base + } + if n < (5 * base) { + return 5 * base + } + return 10 * base +} diff --git a/vendor/gopkg.in/check.v1/check.go b/vendor/gopkg.in/check.v1/check.go new file mode 100644 index 0000000..de714c2 --- /dev/null +++ b/vendor/gopkg.in/check.v1/check.go @@ -0,0 +1,882 @@ +// Package check is a rich testing extension for Go's testing package. +// +// For details about the project, see: +// +// http://labix.org/gocheck +// +package check + +import ( + "bytes" + "errors" + "fmt" + "io" + "math/rand" + "os" + "path" + "path/filepath" + "reflect" + "regexp" + "runtime" + "strconv" + "strings" + "sync" + "sync/atomic" + "time" +) + +// ----------------------------------------------------------------------- +// Internal type which deals with suite method calling. + +const ( + fixtureKd = iota + testKd +) + +type funcKind int + +const ( + succeededSt = iota + failedSt + skippedSt + panickedSt + fixturePanickedSt + missedSt +) + +type funcStatus uint32 + +// A method value can't reach its own Method structure. +type methodType struct { + reflect.Value + Info reflect.Method +} + +func newMethod(receiver reflect.Value, i int) *methodType { + return &methodType{receiver.Method(i), receiver.Type().Method(i)} +} + +func (method *methodType) PC() uintptr { + return method.Info.Func.Pointer() +} + +func (method *methodType) suiteName() string { + t := method.Info.Type.In(0) + if t.Kind() == reflect.Ptr { + t = t.Elem() + } + return t.Name() +} + +func (method *methodType) String() string { + return method.suiteName() + "." + method.Info.Name +} + +func (method *methodType) matches(re *regexp.Regexp) bool { + return (re.MatchString(method.Info.Name) || + re.MatchString(method.suiteName()) || + re.MatchString(method.String())) +} + +type C struct { + method *methodType + kind funcKind + testName string + _status funcStatus + logb *logger + logw io.Writer + done chan *C + reason string + mustFail bool + tempDir *tempDir + benchMem bool + startTime time.Time + timer +} + +func (c *C) status() funcStatus { + return funcStatus(atomic.LoadUint32((*uint32)(&c._status))) +} + +func (c *C) setStatus(s funcStatus) { + atomic.StoreUint32((*uint32)(&c._status), uint32(s)) +} + +func (c *C) stopNow() { + runtime.Goexit() +} + +// logger is a concurrency safe byte.Buffer +type logger struct { + sync.Mutex + writer bytes.Buffer +} + +func (l *logger) Write(buf []byte) (int, error) { + l.Lock() + defer l.Unlock() + return l.writer.Write(buf) +} + +func (l *logger) WriteTo(w io.Writer) (int64, error) { + l.Lock() + defer l.Unlock() + return l.writer.WriteTo(w) +} + +func (l *logger) String() string { + l.Lock() + defer l.Unlock() + return l.writer.String() +} + +// ----------------------------------------------------------------------- +// Handling of temporary files and directories. + +type tempDir struct { + sync.Mutex + path string + counter int +} + +func (td *tempDir) newPath() string { + td.Lock() + defer td.Unlock() + if td.path == "" { + var err error + for i := 0; i != 100; i++ { + path := fmt.Sprintf("%s%ccheck-%d", os.TempDir(), os.PathSeparator, rand.Int()) + if err = os.Mkdir(path, 0700); err == nil { + td.path = path + break + } + } + if td.path == "" { + panic("Couldn't create temporary directory: " + err.Error()) + } + } + result := filepath.Join(td.path, strconv.Itoa(td.counter)) + td.counter++ + return result +} + +func (td *tempDir) removeAll() { + td.Lock() + defer td.Unlock() + if td.path != "" { + err := os.RemoveAll(td.path) + if err != nil { + fmt.Fprintf(os.Stderr, "WARNING: Error cleaning up temporaries: "+err.Error()) + } + } +} + +// Create a new temporary directory which is automatically removed after +// the suite finishes running. +func (c *C) MkDir() string { + path := c.tempDir.newPath() + if err := os.Mkdir(path, 0700); err != nil { + panic(fmt.Sprintf("Couldn't create temporary directory %s: %s", path, err.Error())) + } + return path +} + +// ----------------------------------------------------------------------- +// Low-level logging functions. + +func (c *C) log(args ...interface{}) { + c.writeLog([]byte(fmt.Sprint(args...) + "\n")) +} + +func (c *C) logf(format string, args ...interface{}) { + c.writeLog([]byte(fmt.Sprintf(format+"\n", args...))) +} + +func (c *C) logNewLine() { + c.writeLog([]byte{'\n'}) +} + +func (c *C) writeLog(buf []byte) { + c.logb.Write(buf) + if c.logw != nil { + c.logw.Write(buf) + } +} + +func hasStringOrError(x interface{}) (ok bool) { + _, ok = x.(fmt.Stringer) + if ok { + return + } + _, ok = x.(error) + return +} + +func (c *C) logValue(label string, value interface{}) { + if label == "" { + if hasStringOrError(value) { + c.logf("... %#v (%q)", value, value) + } else { + c.logf("... %#v", value) + } + } else if value == nil { + c.logf("... %s = nil", label) + } else { + if hasStringOrError(value) { + fv := fmt.Sprintf("%#v", value) + qv := fmt.Sprintf("%q", value) + if fv != qv { + c.logf("... %s %s = %s (%s)", label, reflect.TypeOf(value), fv, qv) + return + } + } + if s, ok := value.(string); ok && isMultiLine(s) { + c.logf(`... %s %s = "" +`, label, reflect.TypeOf(value)) + c.logMultiLine(s) + } else { + c.logf("... %s %s = %#v", label, reflect.TypeOf(value), value) + } + } +} + +func formatMultiLine(s string, quote bool) []byte { + b := make([]byte, 0, len(s)*2) + i := 0 + n := len(s) + for i < n { + j := i + 1 + for j < n && s[j-1] != '\n' { + j++ + } + b = append(b, "... "...) + if quote { + b = strconv.AppendQuote(b, s[i:j]) + } else { + b = append(b, s[i:j]...) + b = bytes.TrimSpace(b) + } + if quote && j < n { + b = append(b, " +"...) + } + b = append(b, '\n') + i = j + } + return b +} + +func (c *C) logMultiLine(s string) { + c.writeLog(formatMultiLine(s, true)) +} + +func isMultiLine(s string) bool { + for i := 0; i+1 < len(s); i++ { + if s[i] == '\n' { + return true + } + } + return false +} + +func (c *C) logString(issue string) { + c.log("... ", issue) +} + +func (c *C) logCaller(skip int) { + // This is a bit heavier than it ought to be. + skip++ // Our own frame. + pc, callerFile, callerLine, ok := runtime.Caller(skip) + if !ok { + return + } + var testFile string + var testLine int + testFunc := runtime.FuncForPC(c.method.PC()) + if runtime.FuncForPC(pc) != testFunc { + for { + skip++ + if pc, file, line, ok := runtime.Caller(skip); ok { + // Note that the test line may be different on + // distinct calls for the same test. Showing + // the "internal" line is helpful when debugging. + if runtime.FuncForPC(pc) == testFunc { + testFile, testLine = file, line + break + } + } else { + break + } + } + } + if testFile != "" && (testFile != callerFile || testLine != callerLine) { + c.logCode(testFile, testLine) + } + c.logCode(callerFile, callerLine) +} + +func (c *C) logCode(path string, line int) { + c.logf("%s:%d:", nicePath(path), line) + code, err := printLine(path, line) + if code == "" { + code = "..." // XXX Open the file and take the raw line. + if err != nil { + code += err.Error() + } + } + c.log(indent(code, " ")) +} + +var valueGo = filepath.Join("reflect", "value.go") +var asmGo = filepath.Join("runtime", "asm_") + +func (c *C) logPanic(skip int, value interface{}) { + skip++ // Our own frame. + initialSkip := skip + for ; ; skip++ { + if pc, file, line, ok := runtime.Caller(skip); ok { + if skip == initialSkip { + c.logf("... Panic: %s (PC=0x%X)\n", value, pc) + } + name := niceFuncName(pc) + path := nicePath(file) + if strings.Contains(path, "/gopkg.in/check.v") { + continue + } + if name == "Value.call" && strings.HasSuffix(path, valueGo) { + continue + } + if (name == "call16" || name == "call32") && strings.Contains(path, asmGo) { + continue + } + c.logf("%s:%d\n in %s", nicePath(file), line, name) + } else { + break + } + } +} + +func (c *C) logSoftPanic(issue string) { + c.log("... Panic: ", issue) +} + +func (c *C) logArgPanic(method *methodType, expectedType string) { + c.logf("... Panic: %s argument should be %s", + niceFuncName(method.PC()), expectedType) +} + +// ----------------------------------------------------------------------- +// Some simple formatting helpers. + +var initWD, initWDErr = os.Getwd() + +func init() { + if initWDErr == nil { + initWD = strings.Replace(initWD, "\\", "/", -1) + "/" + } +} + +func nicePath(path string) string { + if initWDErr == nil { + if strings.HasPrefix(path, initWD) { + return path[len(initWD):] + } + } + return path +} + +func niceFuncPath(pc uintptr) string { + function := runtime.FuncForPC(pc) + if function != nil { + filename, line := function.FileLine(pc) + return fmt.Sprintf("%s:%d", nicePath(filename), line) + } + return "" +} + +func niceFuncName(pc uintptr) string { + function := runtime.FuncForPC(pc) + if function != nil { + name := path.Base(function.Name()) + if i := strings.Index(name, "."); i > 0 { + name = name[i+1:] + } + if strings.HasPrefix(name, "(*") { + if i := strings.Index(name, ")"); i > 0 { + name = name[2:i] + name[i+1:] + } + } + if i := strings.LastIndex(name, ".*"); i != -1 { + name = name[:i] + "." + name[i+2:] + } + if i := strings.LastIndex(name, "·"); i != -1 { + name = name[:i] + "." + name[i+2:] + } + return name + } + return "" +} + +// ----------------------------------------------------------------------- +// Result tracker to aggregate call results. + +type Result struct { + Succeeded int + Failed int + Skipped int + Panicked int + FixturePanicked int + ExpectedFailures int + Missed int // Not even tried to run, related to a panic in the fixture. + RunError error // Houston, we've got a problem. + WorkDir string // If KeepWorkDir is true +} + +type resultTracker struct { + result Result + _lastWasProblem bool + _waiting int + _missed int + _expectChan chan *C + _doneChan chan *C + _stopChan chan bool +} + +func newResultTracker() *resultTracker { + return &resultTracker{_expectChan: make(chan *C), // Synchronous + _doneChan: make(chan *C, 32), // Asynchronous + _stopChan: make(chan bool)} // Synchronous +} + +func (tracker *resultTracker) start() { + go tracker._loopRoutine() +} + +func (tracker *resultTracker) waitAndStop() { + <-tracker._stopChan +} + +func (tracker *resultTracker) expectCall(c *C) { + tracker._expectChan <- c +} + +func (tracker *resultTracker) callDone(c *C) { + tracker._doneChan <- c +} + +func (tracker *resultTracker) _loopRoutine() { + for { + var c *C + if tracker._waiting > 0 { + // Calls still running. Can't stop. + select { + // XXX Reindent this (not now to make diff clear) + case <-tracker._expectChan: + tracker._waiting++ + case c = <-tracker._doneChan: + tracker._waiting-- + switch c.status() { + case succeededSt: + if c.kind == testKd { + if c.mustFail { + tracker.result.ExpectedFailures++ + } else { + tracker.result.Succeeded++ + } + } + case failedSt: + tracker.result.Failed++ + case panickedSt: + if c.kind == fixtureKd { + tracker.result.FixturePanicked++ + } else { + tracker.result.Panicked++ + } + case fixturePanickedSt: + // Track it as missed, since the panic + // was on the fixture, not on the test. + tracker.result.Missed++ + case missedSt: + tracker.result.Missed++ + case skippedSt: + if c.kind == testKd { + tracker.result.Skipped++ + } + } + } + } else { + // No calls. Can stop, but no done calls here. + select { + case tracker._stopChan <- true: + return + case <-tracker._expectChan: + tracker._waiting++ + case <-tracker._doneChan: + panic("Tracker got an unexpected done call.") + } + } + } +} + +// ----------------------------------------------------------------------- +// The underlying suite runner. + +type suiteRunner struct { + suite interface{} + setUpSuite, tearDownSuite *methodType + setUpTest, tearDownTest *methodType + tests []*methodType + tracker *resultTracker + tempDir *tempDir + keepDir bool + output *outputWriter + reportedProblemLast bool + benchTime time.Duration + benchMem bool +} + +type RunConf struct { + Output io.Writer + Stream bool + Verbose bool + Filter string + Benchmark bool + BenchmarkTime time.Duration // Defaults to 1 second + BenchmarkMem bool + KeepWorkDir bool +} + +// Create a new suiteRunner able to run all methods in the given suite. +func newSuiteRunner(suite interface{}, runConf *RunConf) *suiteRunner { + var conf RunConf + if runConf != nil { + conf = *runConf + } + if conf.Output == nil { + conf.Output = os.Stdout + } + if conf.Benchmark { + conf.Verbose = true + } + + suiteType := reflect.TypeOf(suite) + suiteNumMethods := suiteType.NumMethod() + suiteValue := reflect.ValueOf(suite) + + runner := &suiteRunner{ + suite: suite, + output: newOutputWriter(conf.Output, conf.Stream, conf.Verbose), + tracker: newResultTracker(), + benchTime: conf.BenchmarkTime, + benchMem: conf.BenchmarkMem, + tempDir: &tempDir{}, + keepDir: conf.KeepWorkDir, + tests: make([]*methodType, 0, suiteNumMethods), + } + if runner.benchTime == 0 { + runner.benchTime = 1 * time.Second + } + + var filterRegexp *regexp.Regexp + if conf.Filter != "" { + regexp, err := regexp.Compile(conf.Filter) + if err != nil { + msg := "Bad filter expression: " + err.Error() + runner.tracker.result.RunError = errors.New(msg) + return runner + } + filterRegexp = regexp + } + + for i := 0; i != suiteNumMethods; i++ { + method := newMethod(suiteValue, i) + switch method.Info.Name { + case "SetUpSuite": + runner.setUpSuite = method + case "TearDownSuite": + runner.tearDownSuite = method + case "SetUpTest": + runner.setUpTest = method + case "TearDownTest": + runner.tearDownTest = method + default: + prefix := "Test" + if conf.Benchmark { + prefix = "Benchmark" + } + if !strings.HasPrefix(method.Info.Name, prefix) { + continue + } + if filterRegexp == nil || method.matches(filterRegexp) { + runner.tests = append(runner.tests, method) + } + } + } + return runner +} + +// Run all methods in the given suite. +func (runner *suiteRunner) run() *Result { + if runner.tracker.result.RunError == nil && len(runner.tests) > 0 { + runner.tracker.start() + if runner.checkFixtureArgs() { + c := runner.runFixture(runner.setUpSuite, "", nil) + if c == nil || c.status() == succeededSt { + for i := 0; i != len(runner.tests); i++ { + c := runner.runTest(runner.tests[i]) + if c.status() == fixturePanickedSt { + runner.skipTests(missedSt, runner.tests[i+1:]) + break + } + } + } else if c != nil && c.status() == skippedSt { + runner.skipTests(skippedSt, runner.tests) + } else { + runner.skipTests(missedSt, runner.tests) + } + runner.runFixture(runner.tearDownSuite, "", nil) + } else { + runner.skipTests(missedSt, runner.tests) + } + runner.tracker.waitAndStop() + if runner.keepDir { + runner.tracker.result.WorkDir = runner.tempDir.path + } else { + runner.tempDir.removeAll() + } + } + return &runner.tracker.result +} + +// Create a call object with the given suite method, and fork a +// goroutine with the provided dispatcher for running it. +func (runner *suiteRunner) forkCall(method *methodType, kind funcKind, testName string, logb *logger, dispatcher func(c *C)) *C { + var logw io.Writer + if runner.output.Stream { + logw = runner.output + } + if logb == nil { + logb = new(logger) + } + c := &C{ + method: method, + kind: kind, + testName: testName, + logb: logb, + logw: logw, + tempDir: runner.tempDir, + done: make(chan *C, 1), + timer: timer{benchTime: runner.benchTime}, + startTime: time.Now(), + benchMem: runner.benchMem, + } + runner.tracker.expectCall(c) + go (func() { + runner.reportCallStarted(c) + defer runner.callDone(c) + dispatcher(c) + })() + return c +} + +// Same as forkCall(), but wait for call to finish before returning. +func (runner *suiteRunner) runFunc(method *methodType, kind funcKind, testName string, logb *logger, dispatcher func(c *C)) *C { + c := runner.forkCall(method, kind, testName, logb, dispatcher) + <-c.done + return c +} + +// Handle a finished call. If there were any panics, update the call status +// accordingly. Then, mark the call as done and report to the tracker. +func (runner *suiteRunner) callDone(c *C) { + value := recover() + if value != nil { + switch v := value.(type) { + case *fixturePanic: + if v.status == skippedSt { + c.setStatus(skippedSt) + } else { + c.logSoftPanic("Fixture has panicked (see related PANIC)") + c.setStatus(fixturePanickedSt) + } + default: + c.logPanic(1, value) + c.setStatus(panickedSt) + } + } + if c.mustFail { + switch c.status() { + case failedSt: + c.setStatus(succeededSt) + case succeededSt: + c.setStatus(failedSt) + c.logString("Error: Test succeeded, but was expected to fail") + c.logString("Reason: " + c.reason) + } + } + + runner.reportCallDone(c) + c.done <- c +} + +// Runs a fixture call synchronously. The fixture will still be run in a +// goroutine like all suite methods, but this method will not return +// while the fixture goroutine is not done, because the fixture must be +// run in a desired order. +func (runner *suiteRunner) runFixture(method *methodType, testName string, logb *logger) *C { + if method != nil { + c := runner.runFunc(method, fixtureKd, testName, logb, func(c *C) { + c.ResetTimer() + c.StartTimer() + defer c.StopTimer() + c.method.Call([]reflect.Value{reflect.ValueOf(c)}) + }) + return c + } + return nil +} + +// Run the fixture method with runFixture(), but panic with a fixturePanic{} +// in case the fixture method panics. This makes it easier to track the +// fixture panic together with other call panics within forkTest(). +func (runner *suiteRunner) runFixtureWithPanic(method *methodType, testName string, logb *logger, skipped *bool) *C { + if skipped != nil && *skipped { + return nil + } + c := runner.runFixture(method, testName, logb) + if c != nil && c.status() != succeededSt { + if skipped != nil { + *skipped = c.status() == skippedSt + } + panic(&fixturePanic{c.status(), method}) + } + return c +} + +type fixturePanic struct { + status funcStatus + method *methodType +} + +// Run the suite test method, together with the test-specific fixture, +// asynchronously. +func (runner *suiteRunner) forkTest(method *methodType) *C { + testName := method.String() + return runner.forkCall(method, testKd, testName, nil, func(c *C) { + var skipped bool + defer runner.runFixtureWithPanic(runner.tearDownTest, testName, nil, &skipped) + defer c.StopTimer() + benchN := 1 + for { + runner.runFixtureWithPanic(runner.setUpTest, testName, c.logb, &skipped) + mt := c.method.Type() + if mt.NumIn() != 1 || mt.In(0) != reflect.TypeOf(c) { + // Rather than a plain panic, provide a more helpful message when + // the argument type is incorrect. + c.setStatus(panickedSt) + c.logArgPanic(c.method, "*check.C") + return + } + if strings.HasPrefix(c.method.Info.Name, "Test") { + c.ResetTimer() + c.StartTimer() + c.method.Call([]reflect.Value{reflect.ValueOf(c)}) + return + } + if !strings.HasPrefix(c.method.Info.Name, "Benchmark") { + panic("unexpected method prefix: " + c.method.Info.Name) + } + + runtime.GC() + c.N = benchN + c.ResetTimer() + c.StartTimer() + c.method.Call([]reflect.Value{reflect.ValueOf(c)}) + c.StopTimer() + if c.status() != succeededSt || c.duration >= c.benchTime || benchN >= 1e9 { + return + } + perOpN := int(1e9) + if c.nsPerOp() != 0 { + perOpN = int(c.benchTime.Nanoseconds() / c.nsPerOp()) + } + + // Logic taken from the stock testing package: + // - Run more iterations than we think we'll need for a second (1.5x). + // - Don't grow too fast in case we had timing errors previously. + // - Be sure to run at least one more than last time. + benchN = max(min(perOpN+perOpN/2, 100*benchN), benchN+1) + benchN = roundUp(benchN) + + skipped = true // Don't run the deferred one if this panics. + runner.runFixtureWithPanic(runner.tearDownTest, testName, nil, nil) + skipped = false + } + }) +} + +// Same as forkTest(), but wait for the test to finish before returning. +func (runner *suiteRunner) runTest(method *methodType) *C { + c := runner.forkTest(method) + <-c.done + return c +} + +// Helper to mark tests as skipped or missed. A bit heavy for what +// it does, but it enables homogeneous handling of tracking, including +// nice verbose output. +func (runner *suiteRunner) skipTests(status funcStatus, methods []*methodType) { + for _, method := range methods { + runner.runFunc(method, testKd, "", nil, func(c *C) { + c.setStatus(status) + }) + } +} + +// Verify if the fixture arguments are *check.C. In case of errors, +// log the error as a panic in the fixture method call, and return false. +func (runner *suiteRunner) checkFixtureArgs() bool { + succeeded := true + argType := reflect.TypeOf(&C{}) + for _, method := range []*methodType{runner.setUpSuite, runner.tearDownSuite, runner.setUpTest, runner.tearDownTest} { + if method != nil { + mt := method.Type() + if mt.NumIn() != 1 || mt.In(0) != argType { + succeeded = false + runner.runFunc(method, fixtureKd, "", nil, func(c *C) { + c.logArgPanic(method, "*check.C") + c.setStatus(panickedSt) + }) + } + } + } + return succeeded +} + +func (runner *suiteRunner) reportCallStarted(c *C) { + runner.output.WriteCallStarted("START", c) +} + +func (runner *suiteRunner) reportCallDone(c *C) { + runner.tracker.callDone(c) + switch c.status() { + case succeededSt: + if c.mustFail { + runner.output.WriteCallSuccess("FAIL EXPECTED", c) + } else { + runner.output.WriteCallSuccess("PASS", c) + } + case skippedSt: + runner.output.WriteCallSuccess("SKIP", c) + case failedSt: + runner.output.WriteCallProblem("FAIL", c) + case panickedSt: + runner.output.WriteCallProblem("PANIC", c) + case fixturePanickedSt: + // That's a testKd call reporting that its fixture + // has panicked. The fixture call which caused the + // panic itself was tracked above. We'll report to + // aid debugging. + runner.output.WriteCallProblem("PANIC", c) + case missedSt: + runner.output.WriteCallSuccess("MISS", c) + } +} diff --git a/vendor/gopkg.in/check.v1/checkers.go b/vendor/gopkg.in/check.v1/checkers.go new file mode 100644 index 0000000..cdd7f9a --- /dev/null +++ b/vendor/gopkg.in/check.v1/checkers.go @@ -0,0 +1,524 @@ +package check + +import ( + "fmt" + "reflect" + "regexp" + "strings" + + "github.com/kr/pretty" +) + +// ----------------------------------------------------------------------- +// CommentInterface and Commentf helper, to attach extra information to checks. + +type comment struct { + format string + args []interface{} +} + +// Commentf returns an infomational value to use with Assert or Check calls. +// If the checker test fails, the provided arguments will be passed to +// fmt.Sprintf, and will be presented next to the logged failure. +// +// For example: +// +// c.Assert(v, Equals, 42, Commentf("Iteration #%d failed.", i)) +// +// Note that if the comment is constant, a better option is to +// simply use a normal comment right above or next to the line, as +// it will also get printed with any errors: +// +// c.Assert(l, Equals, 8192) // Ensure buffer size is correct (bug #123) +// +func Commentf(format string, args ...interface{}) CommentInterface { + return &comment{format, args} +} + +// CommentInterface must be implemented by types that attach extra +// information to failed checks. See the Commentf function for details. +type CommentInterface interface { + CheckCommentString() string +} + +func (c *comment) CheckCommentString() string { + return fmt.Sprintf(c.format, c.args...) +} + +// ----------------------------------------------------------------------- +// The Checker interface. + +// The Checker interface must be provided by checkers used with +// the Assert and Check verification methods. +type Checker interface { + Info() *CheckerInfo + Check(params []interface{}, names []string) (result bool, error string) +} + +// See the Checker interface. +type CheckerInfo struct { + Name string + Params []string +} + +func (info *CheckerInfo) Info() *CheckerInfo { + return info +} + +// ----------------------------------------------------------------------- +// Not checker logic inverter. + +// The Not checker inverts the logic of the provided checker. The +// resulting checker will succeed where the original one failed, and +// vice-versa. +// +// For example: +// +// c.Assert(a, Not(Equals), b) +// +func Not(checker Checker) Checker { + return ¬Checker{checker} +} + +type notChecker struct { + sub Checker +} + +func (checker *notChecker) Info() *CheckerInfo { + info := *checker.sub.Info() + info.Name = "Not(" + info.Name + ")" + return &info +} + +func (checker *notChecker) Check(params []interface{}, names []string) (result bool, error string) { + result, error = checker.sub.Check(params, names) + result = !result + if result { + // clear error message if the new result is true + error = "" + } + return +} + +// ----------------------------------------------------------------------- +// IsNil checker. + +type isNilChecker struct { + *CheckerInfo +} + +// The IsNil checker tests whether the obtained value is nil. +// +// For example: +// +// c.Assert(err, IsNil) +// +var IsNil Checker = &isNilChecker{ + &CheckerInfo{Name: "IsNil", Params: []string{"value"}}, +} + +func (checker *isNilChecker) Check(params []interface{}, names []string) (result bool, error string) { + return isNil(params[0]), "" +} + +func isNil(obtained interface{}) (result bool) { + if obtained == nil { + result = true + } else { + switch v := reflect.ValueOf(obtained); v.Kind() { + case reflect.Chan, reflect.Func, reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice: + return v.IsNil() + } + } + return +} + +// ----------------------------------------------------------------------- +// NotNil checker. Alias for Not(IsNil), since it's so common. + +type notNilChecker struct { + *CheckerInfo +} + +// The NotNil checker verifies that the obtained value is not nil. +// +// For example: +// +// c.Assert(iface, NotNil) +// +// This is an alias for Not(IsNil), made available since it's a +// fairly common check. +// +var NotNil Checker = ¬NilChecker{ + &CheckerInfo{Name: "NotNil", Params: []string{"value"}}, +} + +func (checker *notNilChecker) Check(params []interface{}, names []string) (result bool, error string) { + return !isNil(params[0]), "" +} + +// ----------------------------------------------------------------------- +// Equals checker. + +func diffworthy(a interface{}) bool { + t := reflect.TypeOf(a) + switch t.Kind() { + case reflect.Array, reflect.Map, reflect.Slice, reflect.Struct, reflect.String, reflect.Ptr: + return true + } + return false +} + +// formatUnequal will dump the actual and expected values into a textual +// representation and return an error message containing a diff. +func formatUnequal(obtained interface{}, expected interface{}) string { + // We do not do diffs for basic types because go-check already + // shows them very cleanly. + if !diffworthy(obtained) || !diffworthy(expected) { + return "" + } + + // Handle strings, short strings are ignored (go-check formats + // them very nicely already). We do multi-line strings by + // generating two string slices and using kr.Diff to compare + // those (kr.Diff does not do string diffs by itself). + aStr, aOK := obtained.(string) + bStr, bOK := expected.(string) + if aOK && bOK { + l1 := strings.Split(aStr, "\n") + l2 := strings.Split(bStr, "\n") + // the "2" here is a bit arbitrary + if len(l1) > 2 && len(l2) > 2 { + diff := pretty.Diff(l1, l2) + return fmt.Sprintf(`String difference: +%s`, formatMultiLine(strings.Join(diff, "\n"), false)) + } + // string too short + return "" + } + + // generic diff + diff := pretty.Diff(obtained, expected) + if len(diff) == 0 { + // No diff, this happens when e.g. just struct + // pointers are different but the structs have + // identical values. + return "" + } + + return fmt.Sprintf(`Difference: +%s`, formatMultiLine(strings.Join(diff, "\n"), false)) +} + +type equalsChecker struct { + *CheckerInfo +} + +// The Equals checker verifies that the obtained value is equal to +// the expected value, according to usual Go semantics for ==. +// +// For example: +// +// c.Assert(value, Equals, 42) +// +var Equals Checker = &equalsChecker{ + &CheckerInfo{Name: "Equals", Params: []string{"obtained", "expected"}}, +} + +func (checker *equalsChecker) Check(params []interface{}, names []string) (result bool, error string) { + defer func() { + if v := recover(); v != nil { + result = false + error = fmt.Sprint(v) + } + }() + + result = params[0] == params[1] + if !result { + error = formatUnequal(params[0], params[1]) + } + return +} + +// ----------------------------------------------------------------------- +// DeepEquals checker. + +type deepEqualsChecker struct { + *CheckerInfo +} + +// The DeepEquals checker verifies that the obtained value is deep-equal to +// the expected value. The check will work correctly even when facing +// slices, interfaces, and values of different types (which always fail +// the test). +// +// For example: +// +// c.Assert(value, DeepEquals, 42) +// c.Assert(array, DeepEquals, []string{"hi", "there"}) +// +var DeepEquals Checker = &deepEqualsChecker{ + &CheckerInfo{Name: "DeepEquals", Params: []string{"obtained", "expected"}}, +} + +func (checker *deepEqualsChecker) Check(params []interface{}, names []string) (result bool, error string) { + result = reflect.DeepEqual(params[0], params[1]) + if !result { + error = formatUnequal(params[0], params[1]) + } + return +} + +// ----------------------------------------------------------------------- +// HasLen checker. + +type hasLenChecker struct { + *CheckerInfo +} + +// The HasLen checker verifies that the obtained value has the +// provided length. In many cases this is superior to using Equals +// in conjunction with the len function because in case the check +// fails the value itself will be printed, instead of its length, +// providing more details for figuring the problem. +// +// For example: +// +// c.Assert(list, HasLen, 5) +// +var HasLen Checker = &hasLenChecker{ + &CheckerInfo{Name: "HasLen", Params: []string{"obtained", "n"}}, +} + +func (checker *hasLenChecker) Check(params []interface{}, names []string) (result bool, error string) { + n, ok := params[1].(int) + if !ok { + return false, "n must be an int" + } + value := reflect.ValueOf(params[0]) + switch value.Kind() { + case reflect.Map, reflect.Array, reflect.Slice, reflect.Chan, reflect.String: + default: + return false, "obtained value type has no length" + } + return value.Len() == n, "" +} + +// ----------------------------------------------------------------------- +// ErrorMatches checker. + +type errorMatchesChecker struct { + *CheckerInfo +} + +// The ErrorMatches checker verifies that the error value +// is non nil and matches the regular expression provided. +// +// For example: +// +// c.Assert(err, ErrorMatches, "perm.*denied") +// +var ErrorMatches Checker = errorMatchesChecker{ + &CheckerInfo{Name: "ErrorMatches", Params: []string{"value", "regex"}}, +} + +func (checker errorMatchesChecker) Check(params []interface{}, names []string) (result bool, errStr string) { + if params[0] == nil { + return false, "Error value is nil" + } + err, ok := params[0].(error) + if !ok { + return false, "Value is not an error" + } + params[0] = err.Error() + names[0] = "error" + return matches(params[0], params[1]) +} + +// ----------------------------------------------------------------------- +// Matches checker. + +type matchesChecker struct { + *CheckerInfo +} + +// The Matches checker verifies that the string provided as the obtained +// value (or the string resulting from obtained.String()) matches the +// regular expression provided. +// +// For example: +// +// c.Assert(err, Matches, "perm.*denied") +// +var Matches Checker = &matchesChecker{ + &CheckerInfo{Name: "Matches", Params: []string{"value", "regex"}}, +} + +func (checker *matchesChecker) Check(params []interface{}, names []string) (result bool, error string) { + return matches(params[0], params[1]) +} + +func matches(value, regex interface{}) (result bool, error string) { + reStr, ok := regex.(string) + if !ok { + return false, "Regex must be a string" + } + valueStr, valueIsStr := value.(string) + if !valueIsStr { + if valueWithStr, valueHasStr := value.(fmt.Stringer); valueHasStr { + valueStr, valueIsStr = valueWithStr.String(), true + } + } + if valueIsStr { + matches, err := regexp.MatchString("^"+reStr+"$", valueStr) + if err != nil { + return false, "Can't compile regex: " + err.Error() + } + return matches, "" + } + return false, "Obtained value is not a string and has no .String()" +} + +// ----------------------------------------------------------------------- +// Panics checker. + +type panicsChecker struct { + *CheckerInfo +} + +// The Panics checker verifies that calling the provided zero-argument +// function will cause a panic which is deep-equal to the provided value. +// +// For example: +// +// c.Assert(func() { f(1, 2) }, Panics, &SomeErrorType{"BOOM"}). +// +// +var Panics Checker = &panicsChecker{ + &CheckerInfo{Name: "Panics", Params: []string{"function", "expected"}}, +} + +func (checker *panicsChecker) Check(params []interface{}, names []string) (result bool, error string) { + f := reflect.ValueOf(params[0]) + if f.Kind() != reflect.Func || f.Type().NumIn() != 0 { + return false, "Function must take zero arguments" + } + defer func() { + // If the function has not panicked, then don't do the check. + if error != "" { + return + } + params[0] = recover() + names[0] = "panic" + result = reflect.DeepEqual(params[0], params[1]) + }() + f.Call(nil) + return false, "Function has not panicked" +} + +type panicMatchesChecker struct { + *CheckerInfo +} + +// The PanicMatches checker verifies that calling the provided zero-argument +// function will cause a panic with an error value matching +// the regular expression provided. +// +// For example: +// +// c.Assert(func() { f(1, 2) }, PanicMatches, `open.*: no such file or directory`). +// +// +var PanicMatches Checker = &panicMatchesChecker{ + &CheckerInfo{Name: "PanicMatches", Params: []string{"function", "expected"}}, +} + +func (checker *panicMatchesChecker) Check(params []interface{}, names []string) (result bool, errmsg string) { + f := reflect.ValueOf(params[0]) + if f.Kind() != reflect.Func || f.Type().NumIn() != 0 { + return false, "Function must take zero arguments" + } + defer func() { + // If the function has not panicked, then don't do the check. + if errmsg != "" { + return + } + obtained := recover() + names[0] = "panic" + if e, ok := obtained.(error); ok { + params[0] = e.Error() + } else if _, ok := obtained.(string); ok { + params[0] = obtained + } else { + errmsg = "Panic value is not a string or an error" + return + } + result, errmsg = matches(params[0], params[1]) + }() + f.Call(nil) + return false, "Function has not panicked" +} + +// ----------------------------------------------------------------------- +// FitsTypeOf checker. + +type fitsTypeChecker struct { + *CheckerInfo +} + +// The FitsTypeOf checker verifies that the obtained value is +// assignable to a variable with the same type as the provided +// sample value. +// +// For example: +// +// c.Assert(value, FitsTypeOf, int64(0)) +// c.Assert(value, FitsTypeOf, os.Error(nil)) +// +var FitsTypeOf Checker = &fitsTypeChecker{ + &CheckerInfo{Name: "FitsTypeOf", Params: []string{"obtained", "sample"}}, +} + +func (checker *fitsTypeChecker) Check(params []interface{}, names []string) (result bool, error string) { + obtained := reflect.ValueOf(params[0]) + sample := reflect.ValueOf(params[1]) + if !obtained.IsValid() { + return false, "" + } + if !sample.IsValid() { + return false, "Invalid sample value" + } + return obtained.Type().AssignableTo(sample.Type()), "" +} + +// ----------------------------------------------------------------------- +// Implements checker. + +type implementsChecker struct { + *CheckerInfo +} + +// The Implements checker verifies that the obtained value +// implements the interface specified via a pointer to an interface +// variable. +// +// For example: +// +// var e os.Error +// c.Assert(err, Implements, &e) +// +var Implements Checker = &implementsChecker{ + &CheckerInfo{Name: "Implements", Params: []string{"obtained", "ifaceptr"}}, +} + +func (checker *implementsChecker) Check(params []interface{}, names []string) (result bool, error string) { + obtained := reflect.ValueOf(params[0]) + ifaceptr := reflect.ValueOf(params[1]) + if !obtained.IsValid() { + return false, "" + } + if !ifaceptr.IsValid() || ifaceptr.Kind() != reflect.Ptr || ifaceptr.Elem().Kind() != reflect.Interface { + return false, "ifaceptr should be a pointer to an interface variable" + } + return obtained.Type().Implements(ifaceptr.Elem().Type()), "" +} diff --git a/vendor/gopkg.in/check.v1/helpers.go b/vendor/gopkg.in/check.v1/helpers.go new file mode 100644 index 0000000..58a733b --- /dev/null +++ b/vendor/gopkg.in/check.v1/helpers.go @@ -0,0 +1,231 @@ +package check + +import ( + "fmt" + "strings" + "time" +) + +// TestName returns the current test name in the form "SuiteName.TestName" +func (c *C) TestName() string { + return c.testName +} + +// ----------------------------------------------------------------------- +// Basic succeeding/failing logic. + +// Failed returns whether the currently running test has already failed. +func (c *C) Failed() bool { + return c.status() == failedSt +} + +// Fail marks the currently running test as failed. +// +// Something ought to have been previously logged so the developer can tell +// what went wrong. The higher level helper functions will fail the test +// and do the logging properly. +func (c *C) Fail() { + c.setStatus(failedSt) +} + +// FailNow marks the currently running test as failed and stops running it. +// Something ought to have been previously logged so the developer can tell +// what went wrong. The higher level helper functions will fail the test +// and do the logging properly. +func (c *C) FailNow() { + c.Fail() + c.stopNow() +} + +// Succeed marks the currently running test as succeeded, undoing any +// previous failures. +func (c *C) Succeed() { + c.setStatus(succeededSt) +} + +// SucceedNow marks the currently running test as succeeded, undoing any +// previous failures, and stops running the test. +func (c *C) SucceedNow() { + c.Succeed() + c.stopNow() +} + +// ExpectFailure informs that the running test is knowingly broken for +// the provided reason. If the test does not fail, an error will be reported +// to raise attention to this fact. This method is useful to temporarily +// disable tests which cover well known problems until a better time to +// fix the problem is found, without forgetting about the fact that a +// failure still exists. +func (c *C) ExpectFailure(reason string) { + if reason == "" { + panic("Missing reason why the test is expected to fail") + } + c.mustFail = true + c.reason = reason +} + +// Skip skips the running test for the provided reason. If run from within +// SetUpTest, the individual test being set up will be skipped, and if run +// from within SetUpSuite, the whole suite is skipped. +func (c *C) Skip(reason string) { + if reason == "" { + panic("Missing reason why the test is being skipped") + } + c.reason = reason + c.setStatus(skippedSt) + c.stopNow() +} + +// ----------------------------------------------------------------------- +// Basic logging. + +// GetTestLog returns the current test error output. +func (c *C) GetTestLog() string { + return c.logb.String() +} + +// Log logs some information into the test error output. +// The provided arguments are assembled together into a string with fmt.Sprint. +func (c *C) Log(args ...interface{}) { + c.log(args...) +} + +// Log logs some information into the test error output. +// The provided arguments are assembled together into a string with fmt.Sprintf. +func (c *C) Logf(format string, args ...interface{}) { + c.logf(format, args...) +} + +// Output enables *C to be used as a logger in functions that require only +// the minimum interface of *log.Logger. +func (c *C) Output(calldepth int, s string) error { + d := time.Now().Sub(c.startTime) + msec := d / time.Millisecond + sec := d / time.Second + min := d / time.Minute + + c.Logf("[LOG] %d:%02d.%03d %s", min, sec%60, msec%1000, s) + return nil +} + +// Error logs an error into the test error output and marks the test as failed. +// The provided arguments are assembled together into a string with fmt.Sprint. +func (c *C) Error(args ...interface{}) { + c.logCaller(1) + c.logString(fmt.Sprint("Error: ", fmt.Sprint(args...))) + c.logNewLine() + c.Fail() +} + +// Errorf logs an error into the test error output and marks the test as failed. +// The provided arguments are assembled together into a string with fmt.Sprintf. +func (c *C) Errorf(format string, args ...interface{}) { + c.logCaller(1) + c.logString(fmt.Sprintf("Error: "+format, args...)) + c.logNewLine() + c.Fail() +} + +// Fatal logs an error into the test error output, marks the test as failed, and +// stops the test execution. The provided arguments are assembled together into +// a string with fmt.Sprint. +func (c *C) Fatal(args ...interface{}) { + c.logCaller(1) + c.logString(fmt.Sprint("Error: ", fmt.Sprint(args...))) + c.logNewLine() + c.FailNow() +} + +// Fatlaf logs an error into the test error output, marks the test as failed, and +// stops the test execution. The provided arguments are assembled together into +// a string with fmt.Sprintf. +func (c *C) Fatalf(format string, args ...interface{}) { + c.logCaller(1) + c.logString(fmt.Sprint("Error: ", fmt.Sprintf(format, args...))) + c.logNewLine() + c.FailNow() +} + +// ----------------------------------------------------------------------- +// Generic checks and assertions based on checkers. + +// Check verifies if the first value matches the expected value according +// to the provided checker. If they do not match, an error is logged, the +// test is marked as failed, and the test execution continues. +// +// Some checkers may not need the expected argument (e.g. IsNil). +// +// Extra arguments provided to the function are logged next to the reported +// problem when the matching fails. +func (c *C) Check(obtained interface{}, checker Checker, args ...interface{}) bool { + return c.internalCheck("Check", obtained, checker, args...) +} + +// Assert ensures that the first value matches the expected value according +// to the provided checker. If they do not match, an error is logged, the +// test is marked as failed, and the test execution stops. +// +// Some checkers may not need the expected argument (e.g. IsNil). +// +// Extra arguments provided to the function are logged next to the reported +// problem when the matching fails. +func (c *C) Assert(obtained interface{}, checker Checker, args ...interface{}) { + if !c.internalCheck("Assert", obtained, checker, args...) { + c.stopNow() + } +} + +func (c *C) internalCheck(funcName string, obtained interface{}, checker Checker, args ...interface{}) bool { + if checker == nil { + c.logCaller(2) + c.logString(fmt.Sprintf("%s(obtained, nil!?, ...):", funcName)) + c.logString("Oops.. you've provided a nil checker!") + c.logNewLine() + c.Fail() + return false + } + + // If the last argument is a bug info, extract it out. + var comment CommentInterface + if len(args) > 0 { + if c, ok := args[len(args)-1].(CommentInterface); ok { + comment = c + args = args[:len(args)-1] + } + } + + params := append([]interface{}{obtained}, args...) + info := checker.Info() + + if len(params) != len(info.Params) { + names := append([]string{info.Params[0], info.Name}, info.Params[1:]...) + c.logCaller(2) + c.logString(fmt.Sprintf("%s(%s):", funcName, strings.Join(names, ", "))) + c.logString(fmt.Sprintf("Wrong number of parameters for %s: want %d, got %d", info.Name, len(names), len(params)+1)) + c.logNewLine() + c.Fail() + return false + } + + // Copy since it may be mutated by Check. + names := append([]string{}, info.Params...) + + // Do the actual check. + result, error := checker.Check(params, names) + if !result || error != "" { + c.logCaller(2) + for i := 0; i != len(params); i++ { + c.logValue(names[i], params[i]) + } + if comment != nil { + c.logString(comment.CheckCommentString()) + } + if error != "" { + c.logString(error) + } + c.logNewLine() + c.Fail() + return false + } + return true +} diff --git a/vendor/gopkg.in/check.v1/printer.go b/vendor/gopkg.in/check.v1/printer.go new file mode 100644 index 0000000..e0f7557 --- /dev/null +++ b/vendor/gopkg.in/check.v1/printer.go @@ -0,0 +1,168 @@ +package check + +import ( + "bytes" + "go/ast" + "go/parser" + "go/printer" + "go/token" + "os" +) + +func indent(s, with string) (r string) { + eol := true + for i := 0; i != len(s); i++ { + c := s[i] + switch { + case eol && c == '\n' || c == '\r': + case c == '\n' || c == '\r': + eol = true + case eol: + eol = false + s = s[:i] + with + s[i:] + i += len(with) + } + } + return s +} + +func printLine(filename string, line int) (string, error) { + fset := token.NewFileSet() + file, err := os.Open(filename) + if err != nil { + return "", err + } + fnode, err := parser.ParseFile(fset, filename, file, parser.ParseComments) + if err != nil { + return "", err + } + config := &printer.Config{Mode: printer.UseSpaces, Tabwidth: 4} + lp := &linePrinter{fset: fset, fnode: fnode, line: line, config: config} + ast.Walk(lp, fnode) + result := lp.output.Bytes() + // Comments leave \n at the end. + n := len(result) + for n > 0 && result[n-1] == '\n' { + n-- + } + return string(result[:n]), nil +} + +type linePrinter struct { + config *printer.Config + fset *token.FileSet + fnode *ast.File + line int + output bytes.Buffer + stmt ast.Stmt +} + +func (lp *linePrinter) emit() bool { + if lp.stmt != nil { + lp.trim(lp.stmt) + lp.printWithComments(lp.stmt) + lp.stmt = nil + return true + } + return false +} + +func (lp *linePrinter) printWithComments(n ast.Node) { + nfirst := lp.fset.Position(n.Pos()).Line + nlast := lp.fset.Position(n.End()).Line + for _, g := range lp.fnode.Comments { + cfirst := lp.fset.Position(g.Pos()).Line + clast := lp.fset.Position(g.End()).Line + if clast == nfirst-1 && lp.fset.Position(n.Pos()).Column == lp.fset.Position(g.Pos()).Column { + for _, c := range g.List { + lp.output.WriteString(c.Text) + lp.output.WriteByte('\n') + } + } + if cfirst >= nfirst && cfirst <= nlast && n.End() <= g.List[0].Slash { + // The printer will not include the comment if it starts past + // the node itself. Trick it into printing by overlapping the + // slash with the end of the statement. + g.List[0].Slash = n.End() - 1 + } + } + node := &printer.CommentedNode{n, lp.fnode.Comments} + lp.config.Fprint(&lp.output, lp.fset, node) +} + +func (lp *linePrinter) Visit(n ast.Node) (w ast.Visitor) { + if n == nil { + if lp.output.Len() == 0 { + lp.emit() + } + return nil + } + first := lp.fset.Position(n.Pos()).Line + last := lp.fset.Position(n.End()).Line + if first <= lp.line && last >= lp.line { + // Print the innermost statement containing the line. + if stmt, ok := n.(ast.Stmt); ok { + if _, ok := n.(*ast.BlockStmt); !ok { + lp.stmt = stmt + } + } + if first == lp.line && lp.emit() { + return nil + } + return lp + } + return nil +} + +func (lp *linePrinter) trim(n ast.Node) bool { + stmt, ok := n.(ast.Stmt) + if !ok { + return true + } + line := lp.fset.Position(n.Pos()).Line + if line != lp.line { + return false + } + switch stmt := stmt.(type) { + case *ast.IfStmt: + stmt.Body = lp.trimBlock(stmt.Body) + case *ast.SwitchStmt: + stmt.Body = lp.trimBlock(stmt.Body) + case *ast.TypeSwitchStmt: + stmt.Body = lp.trimBlock(stmt.Body) + case *ast.CaseClause: + stmt.Body = lp.trimList(stmt.Body) + case *ast.CommClause: + stmt.Body = lp.trimList(stmt.Body) + case *ast.BlockStmt: + stmt.List = lp.trimList(stmt.List) + } + return true +} + +func (lp *linePrinter) trimBlock(stmt *ast.BlockStmt) *ast.BlockStmt { + if !lp.trim(stmt) { + return lp.emptyBlock(stmt) + } + stmt.Rbrace = stmt.Lbrace + return stmt +} + +func (lp *linePrinter) trimList(stmts []ast.Stmt) []ast.Stmt { + for i := 0; i != len(stmts); i++ { + if !lp.trim(stmts[i]) { + stmts[i] = lp.emptyStmt(stmts[i]) + break + } + } + return stmts +} + +func (lp *linePrinter) emptyStmt(n ast.Node) *ast.ExprStmt { + return &ast.ExprStmt{&ast.Ellipsis{n.Pos(), nil}} +} + +func (lp *linePrinter) emptyBlock(n ast.Node) *ast.BlockStmt { + p := n.Pos() + return &ast.BlockStmt{p, []ast.Stmt{lp.emptyStmt(n)}, p} +} diff --git a/vendor/gopkg.in/check.v1/reporter.go b/vendor/gopkg.in/check.v1/reporter.go new file mode 100644 index 0000000..fb04f76 --- /dev/null +++ b/vendor/gopkg.in/check.v1/reporter.go @@ -0,0 +1,88 @@ +package check + +import ( + "fmt" + "io" + "sync" +) + +// ----------------------------------------------------------------------- +// Output writer manages atomic output writing according to settings. + +type outputWriter struct { + m sync.Mutex + writer io.Writer + wroteCallProblemLast bool + Stream bool + Verbose bool +} + +func newOutputWriter(writer io.Writer, stream, verbose bool) *outputWriter { + return &outputWriter{writer: writer, Stream: stream, Verbose: verbose} +} + +func (ow *outputWriter) Write(content []byte) (n int, err error) { + ow.m.Lock() + n, err = ow.writer.Write(content) + ow.m.Unlock() + return +} + +func (ow *outputWriter) WriteCallStarted(label string, c *C) { + if ow.Stream { + header := renderCallHeader(label, c, "", "\n") + ow.m.Lock() + ow.writer.Write([]byte(header)) + ow.m.Unlock() + } +} + +func (ow *outputWriter) WriteCallProblem(label string, c *C) { + var prefix string + if !ow.Stream { + prefix = "\n-----------------------------------" + + "-----------------------------------\n" + } + header := renderCallHeader(label, c, prefix, "\n\n") + ow.m.Lock() + ow.wroteCallProblemLast = true + ow.writer.Write([]byte(header)) + if !ow.Stream { + c.logb.WriteTo(ow.writer) + } + ow.m.Unlock() +} + +func (ow *outputWriter) WriteCallSuccess(label string, c *C) { + if ow.Stream || (ow.Verbose && c.kind == testKd) { + // TODO Use a buffer here. + var suffix string + if c.reason != "" { + suffix = " (" + c.reason + ")" + } + if c.status() == succeededSt { + suffix += "\t" + c.timerString() + } + suffix += "\n" + if ow.Stream { + suffix += "\n" + } + header := renderCallHeader(label, c, "", suffix) + ow.m.Lock() + // Resist temptation of using line as prefix above due to race. + if !ow.Stream && ow.wroteCallProblemLast { + header = "\n-----------------------------------" + + "-----------------------------------\n" + + header + } + ow.wroteCallProblemLast = false + ow.writer.Write([]byte(header)) + ow.m.Unlock() + } +} + +func renderCallHeader(label string, c *C, prefix, suffix string) string { + pc := c.method.PC() + return fmt.Sprintf("%s%s: %s: %s%s", prefix, label, niceFuncPath(pc), + niceFuncName(pc), suffix) +} diff --git a/vendor/gopkg.in/check.v1/run.go b/vendor/gopkg.in/check.v1/run.go new file mode 100644 index 0000000..da8fd79 --- /dev/null +++ b/vendor/gopkg.in/check.v1/run.go @@ -0,0 +1,175 @@ +package check + +import ( + "bufio" + "flag" + "fmt" + "os" + "testing" + "time" +) + +// ----------------------------------------------------------------------- +// Test suite registry. + +var allSuites []interface{} + +// Suite registers the given value as a test suite to be run. Any methods +// starting with the Test prefix in the given value will be considered as +// a test method. +func Suite(suite interface{}) interface{} { + allSuites = append(allSuites, suite) + return suite +} + +// ----------------------------------------------------------------------- +// Public running interface. + +var ( + oldFilterFlag = flag.String("gocheck.f", "", "Regular expression selecting which tests and/or suites to run") + oldVerboseFlag = flag.Bool("gocheck.v", false, "Verbose mode") + oldStreamFlag = flag.Bool("gocheck.vv", false, "Super verbose mode (disables output caching)") + oldBenchFlag = flag.Bool("gocheck.b", false, "Run benchmarks") + oldBenchTime = flag.Duration("gocheck.btime", 1*time.Second, "approximate run time for each benchmark") + oldListFlag = flag.Bool("gocheck.list", false, "List the names of all tests that will be run") + oldWorkFlag = flag.Bool("gocheck.work", false, "Display and do not remove the test working directory") + + newFilterFlag = flag.String("check.f", "", "Regular expression selecting which tests and/or suites to run") + newVerboseFlag = flag.Bool("check.v", false, "Verbose mode") + newStreamFlag = flag.Bool("check.vv", false, "Super verbose mode (disables output caching)") + newBenchFlag = flag.Bool("check.b", false, "Run benchmarks") + newBenchTime = flag.Duration("check.btime", 1*time.Second, "approximate run time for each benchmark") + newBenchMem = flag.Bool("check.bmem", false, "Report memory benchmarks") + newListFlag = flag.Bool("check.list", false, "List the names of all tests that will be run") + newWorkFlag = flag.Bool("check.work", false, "Display and do not remove the test working directory") +) + +// TestingT runs all test suites registered with the Suite function, +// printing results to stdout, and reporting any failures back to +// the "testing" package. +func TestingT(testingT *testing.T) { + benchTime := *newBenchTime + if benchTime == 1*time.Second { + benchTime = *oldBenchTime + } + conf := &RunConf{ + Filter: *oldFilterFlag + *newFilterFlag, + Verbose: *oldVerboseFlag || *newVerboseFlag, + Stream: *oldStreamFlag || *newStreamFlag, + Benchmark: *oldBenchFlag || *newBenchFlag, + BenchmarkTime: benchTime, + BenchmarkMem: *newBenchMem, + KeepWorkDir: *oldWorkFlag || *newWorkFlag, + } + if *oldListFlag || *newListFlag { + w := bufio.NewWriter(os.Stdout) + for _, name := range ListAll(conf) { + fmt.Fprintln(w, name) + } + w.Flush() + return + } + result := RunAll(conf) + println(result.String()) + if !result.Passed() { + testingT.Fail() + } +} + +// RunAll runs all test suites registered with the Suite function, using the +// provided run configuration. +func RunAll(runConf *RunConf) *Result { + result := Result{} + for _, suite := range allSuites { + result.Add(Run(suite, runConf)) + } + return &result +} + +// Run runs the provided test suite using the provided run configuration. +func Run(suite interface{}, runConf *RunConf) *Result { + runner := newSuiteRunner(suite, runConf) + return runner.run() +} + +// ListAll returns the names of all the test functions registered with the +// Suite function that will be run with the provided run configuration. +func ListAll(runConf *RunConf) []string { + var names []string + for _, suite := range allSuites { + names = append(names, List(suite, runConf)...) + } + return names +} + +// List returns the names of the test functions in the given +// suite that will be run with the provided run configuration. +func List(suite interface{}, runConf *RunConf) []string { + var names []string + runner := newSuiteRunner(suite, runConf) + for _, t := range runner.tests { + names = append(names, t.String()) + } + return names +} + +// ----------------------------------------------------------------------- +// Result methods. + +func (r *Result) Add(other *Result) { + r.Succeeded += other.Succeeded + r.Skipped += other.Skipped + r.Failed += other.Failed + r.Panicked += other.Panicked + r.FixturePanicked += other.FixturePanicked + r.ExpectedFailures += other.ExpectedFailures + r.Missed += other.Missed + if r.WorkDir != "" && other.WorkDir != "" { + r.WorkDir += ":" + other.WorkDir + } else if other.WorkDir != "" { + r.WorkDir = other.WorkDir + } +} + +func (r *Result) Passed() bool { + return (r.Failed == 0 && r.Panicked == 0 && + r.FixturePanicked == 0 && r.Missed == 0 && + r.RunError == nil) +} + +func (r *Result) String() string { + if r.RunError != nil { + return "ERROR: " + r.RunError.Error() + } + + var value string + if r.Failed == 0 && r.Panicked == 0 && r.FixturePanicked == 0 && + r.Missed == 0 { + value = "OK: " + } else { + value = "OOPS: " + } + value += fmt.Sprintf("%d passed", r.Succeeded) + if r.Skipped != 0 { + value += fmt.Sprintf(", %d skipped", r.Skipped) + } + if r.ExpectedFailures != 0 { + value += fmt.Sprintf(", %d expected failures", r.ExpectedFailures) + } + if r.Failed != 0 { + value += fmt.Sprintf(", %d FAILED", r.Failed) + } + if r.Panicked != 0 { + value += fmt.Sprintf(", %d PANICKED", r.Panicked) + } + if r.FixturePanicked != 0 { + value += fmt.Sprintf(", %d FIXTURE-PANICKED", r.FixturePanicked) + } + if r.Missed != 0 { + value += fmt.Sprintf(", %d MISSED", r.Missed) + } + if r.WorkDir != "" { + value += "\nWORK=" + r.WorkDir + } + return value +} diff --git a/vendor/modules.txt b/vendor/modules.txt new file mode 100644 index 0000000..0e24fcc --- /dev/null +++ b/vendor/modules.txt @@ -0,0 +1,16 @@ +# github.com/beevik/etree v1.0.1 +github.com/beevik/etree +# github.com/crewjam/errset v0.0.0-20160219153700-f78d65de925c +github.com/crewjam/errset +# github.com/kr/pretty v0.1.0 +github.com/kr/pretty +# github.com/kr/text v0.1.0 +github.com/kr/text +# github.com/ma314smith/signedxml v0.0.0-20161222022618-9781f7e0c2f1 +github.com/ma314smith/signedxml +# github.com/pkg/errors v0.8.0 +github.com/pkg/errors +# golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9 +golang.org/x/crypto/ripemd160 +# gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 +gopkg.in/check.v1