Migrate to Go modules - remove vendor folder (#2504)

* Migrate to gomods

* Fix conflict

* gomod: Switch xenolf/lego to go-acme/lego
This commit is contained in:
Michael Grosser 2019-03-30 02:30:48 +00:00 committed by Matt Holt
parent bea48b80ce
commit 3841517ce1
1715 changed files with 117 additions and 795881 deletions

View file

@ -22,12 +22,12 @@ import (
"os"
"sync/atomic"
"github.com/xenolf/lego/challenge/tlsalpn01"
"github.com/go-acme/lego/challenge/tlsalpn01"
"github.com/go-acme/lego/certcrypto"
"github.com/klauspost/cpuid"
"github.com/mholt/caddy"
"github.com/mholt/certmagic"
"github.com/xenolf/lego/certcrypto"
)
// Config describes how TLS should be configured and used.

View file

@ -14,7 +14,7 @@ import (
"strings"
"time"
"github.com/xenolf/lego/certcrypto"
"github.com/go-acme/lego/certcrypto"
)
// newSelfSignedCertificate returns a new self-signed certificate.

View file

@ -23,7 +23,7 @@ import (
"github.com/mholt/caddy"
"github.com/mholt/certmagic"
"github.com/xenolf/lego/certcrypto"
"github.com/go-acme/lego/certcrypto"
)
func TestMain(m *testing.M) {

View file

@ -29,9 +29,9 @@
package caddytls
import (
"github.com/go-acme/lego/challenge"
"github.com/mholt/caddy"
"github.com/mholt/certmagic"
"github.com/xenolf/lego/challenge"
)
// ConfigHolder is any type that has a Config; it presumably is
@ -93,7 +93,7 @@ var KnownACMECAs = []string{
//
// challenge.Provider is an interface that allows the implementation of custom
// challenge providers. For more details, see:
// https://godoc.org/github.com/xenolf/lego/acme#ChallengeProvider
// https://godoc.org/github.com/go-acme/lego/acme#ChallengeProvider
type ChallengeProvider challenge.Provider
// DNSProviderConstructor is a function that takes credentials and

27
go.mod Normal file
View file

@ -0,0 +1,27 @@
module github.com/mholt/caddy
go 1.12
require (
github.com/bifurcation/mint v0.0.0-20190129141059-83ba9bc2ead9 // indirect
github.com/dustin/go-humanize v1.0.0
github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568
github.com/go-acme/lego v2.4.0+incompatible
github.com/google/uuid v1.1.1
github.com/gorilla/websocket v1.4.0
github.com/hashicorp/go-syslog v1.0.0
github.com/hashicorp/golang-lru v0.5.1 // indirect
github.com/jimstudt/http-authentication v0.0.0-20140401203705-3eca13d6893a
github.com/klauspost/cpuid v1.2.0
github.com/lucas-clemente/aes12 v0.0.0-20171027163421-cd47fb39b79f // indirect
github.com/lucas-clemente/quic-go v0.7.1-0.20190305092557-966b00dd3dc9
github.com/lucas-clemente/quic-go-certificates v0.0.0-20160823095156-d2f86524cced // indirect
github.com/mholt/certmagic v0.0.0-20190319183800-ee1543e2f234
github.com/naoina/go-stringutil v0.1.0 // indirect
github.com/naoina/toml v0.1.1
github.com/russross/blackfriday v0.0.0-20170610170232-067529f716f4
github.com/shurcooL/sanitized_anchor_name v1.0.0 // indirect
golang.org/x/net v0.0.0-20190328230028-74de082e2cca
gopkg.in/natefinch/lumberjack.v2 v2.0.0
gopkg.in/yaml.v2 v2.2.2
)

84
go.sum Normal file
View file

@ -0,0 +1,84 @@
github.com/bifurcation/mint v0.0.0-20190129141059-83ba9bc2ead9 h1:cJwkHhcmnrWFPCg8eUPm7JJCnhnF93lFUe2ukQnXvJ4=
github.com/bifurcation/mint v0.0.0-20190129141059-83ba9bc2ead9/go.mod h1:zVt7zX3K/aDCk9Tj+VM7YymsX66ERvzCJzw8rFCX2JU=
github.com/cenkalti/backoff v2.1.1+incompatible h1:tKJnvO2kl0zmb/jA5UKAt4VoEVw1qxKWjE/Bpp46npY=
github.com/cenkalti/backoff v2.1.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM=
github.com/cheekybits/genny v1.0.0 h1:uGGa4nei+j20rOSeDeP5Of12XVm7TGUd4dJA9RDitfE=
github.com/cheekybits/genny v1.0.0/go.mod h1:+tQajlRqAUrPI7DOSpB0XAqZYtQakVtB7wXkRAgjxjQ=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo=
github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568 h1:BHsljHzVlRcyQhjrss6TZTdY2VfCqZPbv5k3iBFa2ZQ=
github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc=
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
github.com/go-acme/lego v2.3.1-0.20190318164254-3684cc738d37+incompatible/go.mod h1:yzMNe9CasVUhkquNvti5nAtPmG94USbYxYrZfTkIn0M=
github.com/go-acme/lego v2.4.0+incompatible h1:+BTLUfLtDc5qQauyiTCXH6lupEUOCvXyGlEjdeU0YQI=
github.com/go-acme/lego v2.4.0+incompatible/go.mod h1:yzMNe9CasVUhkquNvti5nAtPmG94USbYxYrZfTkIn0M=
github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY=
github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/gorilla/websocket v1.4.0 h1:WDFjx/TMzVgy9VdMMQi2K2Emtwi2QcUQsztZ/zLaH/Q=
github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
github.com/hashicorp/go-syslog v1.0.0 h1:KaodqZuhUoZereWVIYmpUgZysurB1kBLX2j0MwMrUAE=
github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4=
github.com/hashicorp/golang-lru v0.5.1 h1:0hERBMJE1eitiLkihrMvRVBYAkpHzc/J3QdDN+dAcgU=
github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
github.com/jimstudt/http-authentication v0.0.0-20140401203705-3eca13d6893a h1:BcF8coBl0QFVhe8vAMMlD+CV8EISiu9MGKLoj6ZEyJA=
github.com/jimstudt/http-authentication v0.0.0-20140401203705-3eca13d6893a/go.mod h1:wK6yTYYcgjHE1Z1QtXACPDjcFJyBskHEdagmnq3vsP8=
github.com/klauspost/cpuid v1.2.0 h1:NMpwD2G9JSFOE1/TJjGSo5zG7Yb2bTe7eq1jH+irmeE=
github.com/klauspost/cpuid v1.2.0/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
github.com/lucas-clemente/aes12 v0.0.0-20171027163421-cd47fb39b79f h1:sSeNEkJrs+0F9TUau0CgWTTNEwF23HST3Eq0A+QIx+A=
github.com/lucas-clemente/aes12 v0.0.0-20171027163421-cd47fb39b79f/go.mod h1:JpH9J1c9oX6otFSgdUHwUBUizmKlrMjxWnIAjff4m04=
github.com/lucas-clemente/quic-go v0.7.1-0.20190305092557-966b00dd3dc9 h1:a63dIU38jDMOGFGAFlxjcLCCngoJ0UjHk6EGgUZ3oiM=
github.com/lucas-clemente/quic-go v0.7.1-0.20190305092557-966b00dd3dc9/go.mod h1:m0oFjJxxNCH6+RsoooGlifXEGnJRIA+WiXXCc9kyiok=
github.com/lucas-clemente/quic-go-certificates v0.0.0-20160823095156-d2f86524cced h1:zqEC1GJZFbGZA0tRyNZqRjep92K5fujFtFsu5ZW7Aug=
github.com/lucas-clemente/quic-go-certificates v0.0.0-20160823095156-d2f86524cced/go.mod h1:NCcRLrOTZbzhZvixZLlERbJtDtYsmMw8Jc4vS8Z0g58=
github.com/marten-seemann/qtls v0.2.0 h1:SnGwbmSUjODZ3PPCG6N0GX0w30yvndyFmoNY2pbgW+s=
github.com/marten-seemann/qtls v0.2.0/go.mod h1:xzjG7avBwGGbdZ8dTGxlBnLArsVKLvwmjgmPuiQEcYk=
github.com/mholt/certmagic v0.0.0-20190319183800-ee1543e2f234 h1:6biVHmhBIOQCzdOClOJ97D/Ip9oH8TgFftq2bj/93YI=
github.com/mholt/certmagic v0.0.0-20190319183800-ee1543e2f234/go.mod h1:KvmxBmeVqj88J9Z9us/x04Yp/YYfMtmGkhQFWewFe6U=
github.com/miekg/dns v1.1.3 h1:1g0r1IvskvgL8rR+AcHzUA+oFmGcQlaIm4IqakufeMM=
github.com/miekg/dns v1.1.3/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg=
github.com/naoina/go-stringutil v0.1.0 h1:rCUeRUHjBjGTSHl0VC00jUPLz8/F9dDzYI70Hzifhks=
github.com/naoina/go-stringutil v0.1.0/go.mod h1:XJ2SJL9jCtBh+P9q5btrd/Ylo8XwT/h1USek5+NqSA0=
github.com/naoina/toml v0.1.1 h1:PT/lllxVVN0gzzSqSlHEmP8MJB4MY2U7STGxiouV4X8=
github.com/naoina/toml v0.1.1/go.mod h1:NBIhNtsFMo3G2szEBne+bO4gS192HuIYRqfvOWb4i1E=
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/russross/blackfriday v0.0.0-20170610170232-067529f716f4 h1:S9YlS71UNJIyS61OqGAmLXv3w5zclSidN+qwr80XxKs=
github.com/russross/blackfriday v0.0.0-20170610170232-067529f716f4/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g=
github.com/shurcooL/sanitized_anchor_name v1.0.0 h1:PdmoCO6wvbs+7yrJyMORt4/BmY5IYyJwS/kOiWx8mHo=
github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
golang.org/x/crypto v0.0.0-20190123085648-057139ce5d2b/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190228161510-8dd112bcdc25/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2 h1:VklqNMn3ovrHsnt90PveolxSbWFaJdECFbxSq0Mqo2M=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190328230028-74de082e2cca h1:hyA6yiAgbUwuWqtscNvWAI7U1CtlaD1KilQ6iudt1aI=
golang.org/x/net v0.0.0-20190328230028-74de082e2cca/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190124100055-b90733256f2e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a h1:1BGLXjeY4akVXGgbC9HugT3Jv3hCI0z56oJR5vAMgBU=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190228124157-a34e9553db1e h1:ZytStCyV048ZqDsWHiYDdoI2Vd4msMcrDECFxS+tL9c=
golang.org/x/sys v0.0.0-20190228124157-a34e9553db1e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
gopkg.in/natefinch/lumberjack.v2 v2.0.0 h1:1Lc07Kr7qY4U2YPouBjpCLxpiyxIVoxqXgkXLknAOE8=
gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k=
gopkg.in/square/go-jose.v2 v2.2.2 h1:orlkJ3myw8CN1nVQHBFfloD+L3egixIa4FvUP6RosSA=
gopkg.in/square/go-jose.v2 v2.2.2/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=

View file

@ -1,202 +0,0 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright 2014 Google Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View file

@ -1,437 +0,0 @@
// Copyright 2014 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package metadata provides access to Google Compute Engine (GCE)
// metadata and API service accounts.
//
// This package is a wrapper around the GCE metadata service,
// as documented at https://developers.google.com/compute/docs/metadata.
package metadata // import "cloud.google.com/go/compute/metadata"
import (
"encoding/json"
"fmt"
"io/ioutil"
"net"
"net/http"
"net/url"
"os"
"runtime"
"strings"
"sync"
"time"
"golang.org/x/net/context"
"golang.org/x/net/context/ctxhttp"
)
const (
// metadataIP is the documented metadata server IP address.
metadataIP = "169.254.169.254"
// metadataHostEnv is the environment variable specifying the
// GCE metadata hostname. If empty, the default value of
// metadataIP ("169.254.169.254") is used instead.
// This is variable name is not defined by any spec, as far as
// I know; it was made up for the Go package.
metadataHostEnv = "GCE_METADATA_HOST"
userAgent = "gcloud-golang/0.1"
)
type cachedValue struct {
k string
trim bool
mu sync.Mutex
v string
}
var (
projID = &cachedValue{k: "project/project-id", trim: true}
projNum = &cachedValue{k: "project/numeric-project-id", trim: true}
instID = &cachedValue{k: "instance/id", trim: true}
)
var (
metaClient = &http.Client{
Transport: &http.Transport{
Dial: (&net.Dialer{
Timeout: 2 * time.Second,
KeepAlive: 30 * time.Second,
}).Dial,
ResponseHeaderTimeout: 2 * time.Second,
},
}
subscribeClient = &http.Client{
Transport: &http.Transport{
Dial: (&net.Dialer{
Timeout: 2 * time.Second,
KeepAlive: 30 * time.Second,
}).Dial,
},
}
)
// NotDefinedError is returned when requested metadata is not defined.
//
// The underlying string is the suffix after "/computeMetadata/v1/".
//
// This error is not returned if the value is defined to be the empty
// string.
type NotDefinedError string
func (suffix NotDefinedError) Error() string {
return fmt.Sprintf("metadata: GCE metadata %q not defined", string(suffix))
}
// Get returns a value from the metadata service.
// The suffix is appended to "http://${GCE_METADATA_HOST}/computeMetadata/v1/".
//
// If the GCE_METADATA_HOST environment variable is not defined, a default of
// 169.254.169.254 will be used instead.
//
// If the requested metadata is not defined, the returned error will
// be of type NotDefinedError.
func Get(suffix string) (string, error) {
val, _, err := getETag(metaClient, suffix)
return val, err
}
// getETag returns a value from the metadata service as well as the associated
// ETag using the provided client. This func is otherwise equivalent to Get.
func getETag(client *http.Client, suffix string) (value, etag string, err error) {
// Using a fixed IP makes it very difficult to spoof the metadata service in
// a container, which is an important use-case for local testing of cloud
// deployments. To enable spoofing of the metadata service, the environment
// variable GCE_METADATA_HOST is first inspected to decide where metadata
// requests shall go.
host := os.Getenv(metadataHostEnv)
if host == "" {
// Using 169.254.169.254 instead of "metadata" here because Go
// binaries built with the "netgo" tag and without cgo won't
// know the search suffix for "metadata" is
// ".google.internal", and this IP address is documented as
// being stable anyway.
host = metadataIP
}
url := "http://" + host + "/computeMetadata/v1/" + suffix
req, _ := http.NewRequest("GET", url, nil)
req.Header.Set("Metadata-Flavor", "Google")
req.Header.Set("User-Agent", userAgent)
res, err := client.Do(req)
if err != nil {
return "", "", err
}
defer res.Body.Close()
if res.StatusCode == http.StatusNotFound {
return "", "", NotDefinedError(suffix)
}
if res.StatusCode != 200 {
return "", "", fmt.Errorf("status code %d trying to fetch %s", res.StatusCode, url)
}
all, err := ioutil.ReadAll(res.Body)
if err != nil {
return "", "", err
}
return string(all), res.Header.Get("Etag"), nil
}
func getTrimmed(suffix string) (s string, err error) {
s, err = Get(suffix)
s = strings.TrimSpace(s)
return
}
func (c *cachedValue) get() (v string, err error) {
defer c.mu.Unlock()
c.mu.Lock()
if c.v != "" {
return c.v, nil
}
if c.trim {
v, err = getTrimmed(c.k)
} else {
v, err = Get(c.k)
}
if err == nil {
c.v = v
}
return
}
var (
onGCEOnce sync.Once
onGCE bool
)
// OnGCE reports whether this process is running on Google Compute Engine.
func OnGCE() bool {
onGCEOnce.Do(initOnGCE)
return onGCE
}
func initOnGCE() {
onGCE = testOnGCE()
}
func testOnGCE() bool {
// The user explicitly said they're on GCE, so trust them.
if os.Getenv(metadataHostEnv) != "" {
return true
}
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
resc := make(chan bool, 2)
// Try two strategies in parallel.
// See https://github.com/GoogleCloudPlatform/google-cloud-go/issues/194
go func() {
req, _ := http.NewRequest("GET", "http://"+metadataIP, nil)
req.Header.Set("User-Agent", userAgent)
res, err := ctxhttp.Do(ctx, metaClient, req)
if err != nil {
resc <- false
return
}
defer res.Body.Close()
resc <- res.Header.Get("Metadata-Flavor") == "Google"
}()
go func() {
addrs, err := net.LookupHost("metadata.google.internal")
if err != nil || len(addrs) == 0 {
resc <- false
return
}
resc <- strsContains(addrs, metadataIP)
}()
tryHarder := systemInfoSuggestsGCE()
if tryHarder {
res := <-resc
if res {
// The first strategy succeeded, so let's use it.
return true
}
// Wait for either the DNS or metadata server probe to
// contradict the other one and say we are running on
// GCE. Give it a lot of time to do so, since the system
// info already suggests we're running on a GCE BIOS.
timer := time.NewTimer(5 * time.Second)
defer timer.Stop()
select {
case res = <-resc:
return res
case <-timer.C:
// Too slow. Who knows what this system is.
return false
}
}
// There's no hint from the system info that we're running on
// GCE, so use the first probe's result as truth, whether it's
// true or false. The goal here is to optimize for speed for
// users who are NOT running on GCE. We can't assume that
// either a DNS lookup or an HTTP request to a blackholed IP
// address is fast. Worst case this should return when the
// metaClient's Transport.ResponseHeaderTimeout or
// Transport.Dial.Timeout fires (in two seconds).
return <-resc
}
// systemInfoSuggestsGCE reports whether the local system (without
// doing network requests) suggests that we're running on GCE. If this
// returns true, testOnGCE tries a bit harder to reach its metadata
// server.
func systemInfoSuggestsGCE() bool {
if runtime.GOOS != "linux" {
// We don't have any non-Linux clues available, at least yet.
return false
}
slurp, _ := ioutil.ReadFile("/sys/class/dmi/id/product_name")
name := strings.TrimSpace(string(slurp))
return name == "Google" || name == "Google Compute Engine"
}
// Subscribe subscribes to a value from the metadata service.
// The suffix is appended to "http://${GCE_METADATA_HOST}/computeMetadata/v1/".
// The suffix may contain query parameters.
//
// Subscribe calls fn with the latest metadata value indicated by the provided
// suffix. If the metadata value is deleted, fn is called with the empty string
// and ok false. Subscribe blocks until fn returns a non-nil error or the value
// is deleted. Subscribe returns the error value returned from the last call to
// fn, which may be nil when ok == false.
func Subscribe(suffix string, fn func(v string, ok bool) error) error {
const failedSubscribeSleep = time.Second * 5
// First check to see if the metadata value exists at all.
val, lastETag, err := getETag(subscribeClient, suffix)
if err != nil {
return err
}
if err := fn(val, true); err != nil {
return err
}
ok := true
if strings.ContainsRune(suffix, '?') {
suffix += "&wait_for_change=true&last_etag="
} else {
suffix += "?wait_for_change=true&last_etag="
}
for {
val, etag, err := getETag(subscribeClient, suffix+url.QueryEscape(lastETag))
if err != nil {
if _, deleted := err.(NotDefinedError); !deleted {
time.Sleep(failedSubscribeSleep)
continue // Retry on other errors.
}
ok = false
}
lastETag = etag
if err := fn(val, ok); err != nil || !ok {
return err
}
}
}
// ProjectID returns the current instance's project ID string.
func ProjectID() (string, error) { return projID.get() }
// NumericProjectID returns the current instance's numeric project ID.
func NumericProjectID() (string, error) { return projNum.get() }
// InternalIP returns the instance's primary internal IP address.
func InternalIP() (string, error) {
return getTrimmed("instance/network-interfaces/0/ip")
}
// ExternalIP returns the instance's primary external (public) IP address.
func ExternalIP() (string, error) {
return getTrimmed("instance/network-interfaces/0/access-configs/0/external-ip")
}
// Hostname returns the instance's hostname. This will be of the form
// "<instanceID>.c.<projID>.internal".
func Hostname() (string, error) {
return getTrimmed("instance/hostname")
}
// InstanceTags returns the list of user-defined instance tags,
// assigned when initially creating a GCE instance.
func InstanceTags() ([]string, error) {
var s []string
j, err := Get("instance/tags")
if err != nil {
return nil, err
}
if err := json.NewDecoder(strings.NewReader(j)).Decode(&s); err != nil {
return nil, err
}
return s, nil
}
// InstanceID returns the current VM's numeric instance ID.
func InstanceID() (string, error) {
return instID.get()
}
// InstanceName returns the current VM's instance ID string.
func InstanceName() (string, error) {
host, err := Hostname()
if err != nil {
return "", err
}
return strings.Split(host, ".")[0], nil
}
// Zone returns the current VM's zone, such as "us-central1-b".
func Zone() (string, error) {
zone, err := getTrimmed("instance/zone")
// zone is of the form "projects/<projNum>/zones/<zoneName>".
if err != nil {
return "", err
}
return zone[strings.LastIndex(zone, "/")+1:], nil
}
// InstanceAttributes returns the list of user-defined attributes,
// assigned when initially creating a GCE VM instance. The value of an
// attribute can be obtained with InstanceAttributeValue.
func InstanceAttributes() ([]string, error) { return lines("instance/attributes/") }
// ProjectAttributes returns the list of user-defined attributes
// applying to the project as a whole, not just this VM. The value of
// an attribute can be obtained with ProjectAttributeValue.
func ProjectAttributes() ([]string, error) { return lines("project/attributes/") }
func lines(suffix string) ([]string, error) {
j, err := Get(suffix)
if err != nil {
return nil, err
}
s := strings.Split(strings.TrimSpace(j), "\n")
for i := range s {
s[i] = strings.TrimSpace(s[i])
}
return s, nil
}
// InstanceAttributeValue returns the value of the provided VM
// instance attribute.
//
// If the requested attribute is not defined, the returned error will
// be of type NotDefinedError.
//
// InstanceAttributeValue may return ("", nil) if the attribute was
// defined to be the empty string.
func InstanceAttributeValue(attr string) (string, error) {
return Get("instance/attributes/" + attr)
}
// ProjectAttributeValue returns the value of the provided
// project attribute.
//
// If the requested attribute is not defined, the returned error will
// be of type NotDefinedError.
//
// ProjectAttributeValue may return ("", nil) if the attribute was
// defined to be the empty string.
func ProjectAttributeValue(attr string) (string, error) {
return Get("project/attributes/" + attr)
}
// Scopes returns the service account scopes for the given account.
// The account may be empty or the string "default" to use the instance's
// main account.
func Scopes(serviceAccount string) ([]string, error) {
if serviceAccount == "" {
serviceAccount = "default"
}
return lines("instance/service-accounts/" + serviceAccount + "/scopes")
}
func strsContains(ss []string, s string) bool {
for _, v := range ss {
if v == s {
return true
}
}
return false
}

View file

@ -1,21 +0,0 @@
The MIT License (MIT)
Copyright (c) 2016 Andreas Auernhammer
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

View file

@ -1,197 +0,0 @@
// Copyright (c) 2016 Andreas Auernhammer. All rights reserved.
// Use of this source code is governed by a license that can be
// found in the LICENSE file.
// Package chacha implements some low-level functions of the
// ChaCha cipher family.
package chacha // import "github.com/aead/chacha20/chacha"
import (
"encoding/binary"
"errors"
"math"
)
const (
// NonceSize is the size of the ChaCha20 nonce in bytes.
NonceSize = 8
// INonceSize is the size of the IETF-ChaCha20 nonce in bytes.
INonceSize = 12
// XNonceSize is the size of the XChaCha20 nonce in bytes.
XNonceSize = 24
// KeySize is the size of the key in bytes.
KeySize = 32
)
var (
useSSE2 bool
useSSSE3 bool
useAVX bool
useAVX2 bool
)
var (
errKeySize = errors.New("chacha20/chacha: bad key length")
errInvalidNonce = errors.New("chacha20/chacha: bad nonce length")
)
func setup(state *[64]byte, nonce, key []byte) (err error) {
if len(key) != KeySize {
err = errKeySize
return
}
var Nonce [16]byte
switch len(nonce) {
case NonceSize:
copy(Nonce[8:], nonce)
initialize(state, key, &Nonce)
case INonceSize:
copy(Nonce[4:], nonce)
initialize(state, key, &Nonce)
case XNonceSize:
var tmpKey [32]byte
var hNonce [16]byte
copy(hNonce[:], nonce[:16])
copy(tmpKey[:], key)
HChaCha20(&tmpKey, &hNonce, &tmpKey)
copy(Nonce[8:], nonce[16:])
initialize(state, tmpKey[:], &Nonce)
// BUG(aead): A "good" compiler will remove this (optimizations)
// But using the provided key instead of tmpKey,
// will change the key (-> probably confuses users)
for i := range tmpKey {
tmpKey[i] = 0
}
default:
err = errInvalidNonce
}
return
}
// XORKeyStream crypts bytes from src to dst using the given nonce and key.
// The length of the nonce determinds the version of ChaCha20:
// - NonceSize: ChaCha20/r with a 64 bit nonce and a 2^64 * 64 byte period.
// - INonceSize: ChaCha20/r as defined in RFC 7539 and a 2^32 * 64 byte period.
// - XNonceSize: XChaCha20/r with a 192 bit nonce and a 2^64 * 64 byte period.
// The rounds argument specifies the number of rounds performed for keystream
// generation - valid values are 8, 12 or 20. The src and dst may be the same slice
// but otherwise should not overlap. If len(dst) < len(src) this function panics.
// If the nonce is neither 64, 96 nor 192 bits long, this function panics.
func XORKeyStream(dst, src, nonce, key []byte, rounds int) {
if rounds != 20 && rounds != 12 && rounds != 8 {
panic("chacha20/chacha: bad number of rounds")
}
if len(dst) < len(src) {
panic("chacha20/chacha: dst buffer is to small")
}
if len(nonce) == INonceSize && uint64(len(src)) > (1<<38) {
panic("chacha20/chacha: src is too large")
}
var block, state [64]byte
if err := setup(&state, nonce, key); err != nil {
panic(err)
}
xorKeyStream(dst, src, &block, &state, rounds)
}
// Cipher implements ChaCha20/r (XChaCha20/r) for a given number of rounds r.
type Cipher struct {
state, block [64]byte
off int
rounds int // 20 for ChaCha20
noncesize int
}
// NewCipher returns a new *chacha.Cipher implementing the ChaCha20/r or XChaCha20/r
// (r = 8, 12 or 20) stream cipher. The nonce must be unique for one key for all time.
// The length of the nonce determinds the version of ChaCha20:
// - NonceSize: ChaCha20/r with a 64 bit nonce and a 2^64 * 64 byte period.
// - INonceSize: ChaCha20/r as defined in RFC 7539 and a 2^32 * 64 byte period.
// - XNonceSize: XChaCha20/r with a 192 bit nonce and a 2^64 * 64 byte period.
// If the nonce is neither 64, 96 nor 192 bits long, a non-nil error is returned.
func NewCipher(nonce, key []byte, rounds int) (*Cipher, error) {
if rounds != 20 && rounds != 12 && rounds != 8 {
panic("chacha20/chacha: bad number of rounds")
}
c := new(Cipher)
if err := setup(&(c.state), nonce, key); err != nil {
return nil, err
}
c.rounds = rounds
if len(nonce) == INonceSize {
c.noncesize = INonceSize
} else {
c.noncesize = NonceSize
}
return c, nil
}
// XORKeyStream crypts bytes from src to dst. Src and dst may be the same slice
// but otherwise should not overlap. If len(dst) < len(src) the function panics.
func (c *Cipher) XORKeyStream(dst, src []byte) {
if len(dst) < len(src) {
panic("chacha20/chacha: dst buffer is to small")
}
if c.off > 0 {
n := len(c.block[c.off:])
if len(src) <= n {
for i, v := range src {
dst[i] = v ^ c.block[c.off]
c.off++
}
if c.off == 64 {
c.off = 0
}
return
}
for i, v := range c.block[c.off:] {
dst[i] = src[i] ^ v
}
src = src[n:]
dst = dst[n:]
c.off = 0
}
// check for counter overflow
blocksToXOR := len(src) / 64
if len(src)%64 != 0 {
blocksToXOR++
}
var overflow bool
if c.noncesize == INonceSize {
overflow = binary.LittleEndian.Uint32(c.state[48:]) > math.MaxUint32-uint32(blocksToXOR)
} else {
overflow = binary.LittleEndian.Uint64(c.state[48:]) > math.MaxUint64-uint64(blocksToXOR)
}
if overflow {
panic("chacha20/chacha: counter overflow")
}
c.off += xorKeyStream(dst, src, &(c.block), &(c.state), c.rounds)
}
// SetCounter skips ctr * 64 byte blocks. SetCounter(0) resets the cipher.
// This function always skips the unused keystream of the current 64 byte block.
func (c *Cipher) SetCounter(ctr uint64) {
if c.noncesize == INonceSize {
binary.LittleEndian.PutUint32(c.state[48:], uint32(ctr))
} else {
binary.LittleEndian.PutUint64(c.state[48:], ctr)
}
c.off = 0
}
// HChaCha20 generates 32 pseudo-random bytes from a 128 bit nonce and a 256 bit secret key.
// It can be used as a key-derivation-function (KDF).
func HChaCha20(out *[32]byte, nonce *[16]byte, key *[32]byte) { hChaCha20(out, nonce, key) }

View file

@ -1,406 +0,0 @@
// Copyright (c) 2016 Andreas Auernhammer. All rights reserved.
// Use of this source code is governed by a license that can be
// found in the LICENSE file.
// +build amd64,!gccgo,!appengine,!nacl
#include "const.s"
#include "macro.s"
#define TWO 0(SP)
#define C16 32(SP)
#define C8 64(SP)
#define STATE_0 96(SP)
#define STATE_1 128(SP)
#define STATE_2 160(SP)
#define STATE_3 192(SP)
#define TMP_0 224(SP)
#define TMP_1 256(SP)
// func xorKeyStreamAVX(dst, src []byte, block, state *[64]byte, rounds int) int
TEXT ·xorKeyStreamAVX2(SB), 4, $320-80
MOVQ dst_base+0(FP), DI
MOVQ src_base+24(FP), SI
MOVQ block+48(FP), BX
MOVQ state+56(FP), AX
MOVQ rounds+64(FP), DX
MOVQ src_len+32(FP), CX
MOVQ SP, R8
ADDQ $32, SP
ANDQ $-32, SP
VMOVDQU 0(AX), Y2
VMOVDQU 32(AX), Y3
VPERM2I128 $0x22, Y2, Y0, Y0
VPERM2I128 $0x33, Y2, Y1, Y1
VPERM2I128 $0x22, Y3, Y2, Y2
VPERM2I128 $0x33, Y3, Y3, Y3
TESTQ CX, CX
JZ done
VMOVDQU ·one_AVX2<>(SB), Y4
VPADDD Y4, Y3, Y3
VMOVDQA Y0, STATE_0
VMOVDQA Y1, STATE_1
VMOVDQA Y2, STATE_2
VMOVDQA Y3, STATE_3
VMOVDQU ·rol16_AVX2<>(SB), Y4
VMOVDQU ·rol8_AVX2<>(SB), Y5
VMOVDQU ·two_AVX2<>(SB), Y6
VMOVDQA Y4, Y14
VMOVDQA Y5, Y15
VMOVDQA Y4, C16
VMOVDQA Y5, C8
VMOVDQA Y6, TWO
CMPQ CX, $64
JBE between_0_and_64
CMPQ CX, $192
JBE between_64_and_192
CMPQ CX, $320
JBE between_192_and_320
CMPQ CX, $448
JBE between_320_and_448
at_least_512:
VMOVDQA Y0, Y4
VMOVDQA Y1, Y5
VMOVDQA Y2, Y6
VPADDQ TWO, Y3, Y7
VMOVDQA Y0, Y8
VMOVDQA Y1, Y9
VMOVDQA Y2, Y10
VPADDQ TWO, Y7, Y11
VMOVDQA Y0, Y12
VMOVDQA Y1, Y13
VMOVDQA Y2, Y14
VPADDQ TWO, Y11, Y15
MOVQ DX, R9
chacha_loop_512:
VMOVDQA Y8, TMP_0
CHACHA_QROUND_AVX(Y0, Y1, Y2, Y3, Y8, C16, C8)
CHACHA_QROUND_AVX(Y4, Y5, Y6, Y7, Y8, C16, C8)
VMOVDQA TMP_0, Y8
VMOVDQA Y0, TMP_0
CHACHA_QROUND_AVX(Y8, Y9, Y10, Y11, Y0, C16, C8)
CHACHA_QROUND_AVX(Y12, Y13, Y14, Y15, Y0, C16, C8)
CHACHA_SHUFFLE_AVX(Y1, Y2, Y3)
CHACHA_SHUFFLE_AVX(Y5, Y6, Y7)
CHACHA_SHUFFLE_AVX(Y9, Y10, Y11)
CHACHA_SHUFFLE_AVX(Y13, Y14, Y15)
CHACHA_QROUND_AVX(Y12, Y13, Y14, Y15, Y0, C16, C8)
CHACHA_QROUND_AVX(Y8, Y9, Y10, Y11, Y0, C16, C8)
VMOVDQA TMP_0, Y0
VMOVDQA Y8, TMP_0
CHACHA_QROUND_AVX(Y4, Y5, Y6, Y7, Y8, C16, C8)
CHACHA_QROUND_AVX(Y0, Y1, Y2, Y3, Y8, C16, C8)
VMOVDQA TMP_0, Y8
CHACHA_SHUFFLE_AVX(Y3, Y2, Y1)
CHACHA_SHUFFLE_AVX(Y7, Y6, Y5)
CHACHA_SHUFFLE_AVX(Y11, Y10, Y9)
CHACHA_SHUFFLE_AVX(Y15, Y14, Y13)
SUBQ $2, R9
JA chacha_loop_512
VMOVDQA Y12, TMP_0
VMOVDQA Y13, TMP_1
VPADDD STATE_0, Y0, Y0
VPADDD STATE_1, Y1, Y1
VPADDD STATE_2, Y2, Y2
VPADDD STATE_3, Y3, Y3
XOR_AVX2(DI, SI, 0, Y0, Y1, Y2, Y3, Y12, Y13)
VMOVDQA STATE_0, Y0
VMOVDQA STATE_1, Y1
VMOVDQA STATE_2, Y2
VMOVDQA STATE_3, Y3
VPADDQ TWO, Y3, Y3
VPADDD Y0, Y4, Y4
VPADDD Y1, Y5, Y5
VPADDD Y2, Y6, Y6
VPADDD Y3, Y7, Y7
XOR_AVX2(DI, SI, 128, Y4, Y5, Y6, Y7, Y12, Y13)
VPADDQ TWO, Y3, Y3
VPADDD Y0, Y8, Y8
VPADDD Y1, Y9, Y9
VPADDD Y2, Y10, Y10
VPADDD Y3, Y11, Y11
XOR_AVX2(DI, SI, 256, Y8, Y9, Y10, Y11, Y12, Y13)
VPADDQ TWO, Y3, Y3
VPADDD TMP_0, Y0, Y12
VPADDD TMP_1, Y1, Y13
VPADDD Y2, Y14, Y14
VPADDD Y3, Y15, Y15
VPADDQ TWO, Y3, Y3
CMPQ CX, $512
JB less_than_512
XOR_AVX2(DI, SI, 384, Y12, Y13, Y14, Y15, Y4, Y5)
VMOVDQA Y3, STATE_3
ADDQ $512, SI
ADDQ $512, DI
SUBQ $512, CX
CMPQ CX, $448
JA at_least_512
TESTQ CX, CX
JZ done
VMOVDQA C16, Y14
VMOVDQA C8, Y15
CMPQ CX, $64
JBE between_0_and_64
CMPQ CX, $192
JBE between_64_and_192
CMPQ CX, $320
JBE between_192_and_320
JMP between_320_and_448
less_than_512:
XOR_UPPER_AVX2(DI, SI, 384, Y12, Y13, Y14, Y15, Y4, Y5)
EXTRACT_LOWER(BX, Y12, Y13, Y14, Y15, Y4)
ADDQ $448, SI
ADDQ $448, DI
SUBQ $448, CX
JMP finalize
between_320_and_448:
VMOVDQA Y0, Y4
VMOVDQA Y1, Y5
VMOVDQA Y2, Y6
VPADDQ TWO, Y3, Y7
VMOVDQA Y0, Y8
VMOVDQA Y1, Y9
VMOVDQA Y2, Y10
VPADDQ TWO, Y7, Y11
MOVQ DX, R9
chacha_loop_384:
CHACHA_QROUND_AVX(Y0, Y1, Y2, Y3, Y13, Y14, Y15)
CHACHA_QROUND_AVX(Y4, Y5, Y6, Y7, Y13, Y14, Y15)
CHACHA_QROUND_AVX(Y8, Y9, Y10, Y11, Y13, Y14, Y15)
CHACHA_SHUFFLE_AVX(Y1, Y2, Y3)
CHACHA_SHUFFLE_AVX(Y5, Y6, Y7)
CHACHA_SHUFFLE_AVX(Y9, Y10, Y11)
CHACHA_QROUND_AVX(Y0, Y1, Y2, Y3, Y13, Y14, Y15)
CHACHA_QROUND_AVX(Y4, Y5, Y6, Y7, Y13, Y14, Y15)
CHACHA_QROUND_AVX(Y8, Y9, Y10, Y11, Y13, Y14, Y15)
CHACHA_SHUFFLE_AVX(Y3, Y2, Y1)
CHACHA_SHUFFLE_AVX(Y7, Y6, Y5)
CHACHA_SHUFFLE_AVX(Y11, Y10, Y9)
SUBQ $2, R9
JA chacha_loop_384
VPADDD STATE_0, Y0, Y0
VPADDD STATE_1, Y1, Y1
VPADDD STATE_2, Y2, Y2
VPADDD STATE_3, Y3, Y3
XOR_AVX2(DI, SI, 0, Y0, Y1, Y2, Y3, Y12, Y13)
VMOVDQA STATE_0, Y0
VMOVDQA STATE_1, Y1
VMOVDQA STATE_2, Y2
VMOVDQA STATE_3, Y3
VPADDQ TWO, Y3, Y3
VPADDD Y0, Y4, Y4
VPADDD Y1, Y5, Y5
VPADDD Y2, Y6, Y6
VPADDD Y3, Y7, Y7
XOR_AVX2(DI, SI, 128, Y4, Y5, Y6, Y7, Y12, Y13)
VPADDQ TWO, Y3, Y3
VPADDD Y0, Y8, Y8
VPADDD Y1, Y9, Y9
VPADDD Y2, Y10, Y10
VPADDD Y3, Y11, Y11
VPADDQ TWO, Y3, Y3
CMPQ CX, $384
JB less_than_384
XOR_AVX2(DI, SI, 256, Y8, Y9, Y10, Y11, Y12, Y13)
SUBQ $384, CX
TESTQ CX, CX
JE done
ADDQ $384, SI
ADDQ $384, DI
JMP between_0_and_64
less_than_384:
XOR_UPPER_AVX2(DI, SI, 256, Y8, Y9, Y10, Y11, Y12, Y13)
EXTRACT_LOWER(BX, Y8, Y9, Y10, Y11, Y12)
ADDQ $320, SI
ADDQ $320, DI
SUBQ $320, CX
JMP finalize
between_192_and_320:
VMOVDQA Y0, Y4
VMOVDQA Y1, Y5
VMOVDQA Y2, Y6
VMOVDQA Y3, Y7
VMOVDQA Y0, Y8
VMOVDQA Y1, Y9
VMOVDQA Y2, Y10
VPADDQ TWO, Y3, Y11
MOVQ DX, R9
chacha_loop_256:
CHACHA_QROUND_AVX(Y4, Y5, Y6, Y7, Y13, Y14, Y15)
CHACHA_QROUND_AVX(Y8, Y9, Y10, Y11, Y13, Y14, Y15)
CHACHA_SHUFFLE_AVX(Y5, Y6, Y7)
CHACHA_SHUFFLE_AVX(Y9, Y10, Y11)
CHACHA_QROUND_AVX(Y4, Y5, Y6, Y7, Y13, Y14, Y15)
CHACHA_QROUND_AVX(Y8, Y9, Y10, Y11, Y13, Y14, Y15)
CHACHA_SHUFFLE_AVX(Y7, Y6, Y5)
CHACHA_SHUFFLE_AVX(Y11, Y10, Y9)
SUBQ $2, R9
JA chacha_loop_256
VPADDD Y0, Y4, Y4
VPADDD Y1, Y5, Y5
VPADDD Y2, Y6, Y6
VPADDD Y3, Y7, Y7
VPADDQ TWO, Y3, Y3
XOR_AVX2(DI, SI, 0, Y4, Y5, Y6, Y7, Y12, Y13)
VPADDD Y0, Y8, Y8
VPADDD Y1, Y9, Y9
VPADDD Y2, Y10, Y10
VPADDD Y3, Y11, Y11
VPADDQ TWO, Y3, Y3
CMPQ CX, $256
JB less_than_256
XOR_AVX2(DI, SI, 128, Y8, Y9, Y10, Y11, Y12, Y13)
SUBQ $256, CX
TESTQ CX, CX
JE done
ADDQ $256, SI
ADDQ $256, DI
JMP between_0_and_64
less_than_256:
XOR_UPPER_AVX2(DI, SI, 128, Y8, Y9, Y10, Y11, Y12, Y13)
EXTRACT_LOWER(BX, Y8, Y9, Y10, Y11, Y12)
ADDQ $192, SI
ADDQ $192, DI
SUBQ $192, CX
JMP finalize
between_64_and_192:
VMOVDQA Y0, Y4
VMOVDQA Y1, Y5
VMOVDQA Y2, Y6
VMOVDQA Y3, Y7
MOVQ DX, R9
chacha_loop_128:
CHACHA_QROUND_AVX(Y4, Y5, Y6, Y7, Y13, Y14, Y15)
CHACHA_SHUFFLE_AVX(Y5, Y6, Y7)
CHACHA_QROUND_AVX(Y4, Y5, Y6, Y7, Y13, Y14, Y15)
CHACHA_SHUFFLE_AVX(Y7, Y6, Y5)
SUBQ $2, R9
JA chacha_loop_128
VPADDD Y0, Y4, Y4
VPADDD Y1, Y5, Y5
VPADDD Y2, Y6, Y6
VPADDD Y3, Y7, Y7
VPADDQ TWO, Y3, Y3
CMPQ CX, $128
JB less_than_128
XOR_AVX2(DI, SI, 0, Y4, Y5, Y6, Y7, Y12, Y13)
SUBQ $128, CX
TESTQ CX, CX
JE done
ADDQ $128, SI
ADDQ $128, DI
JMP between_0_and_64
less_than_128:
XOR_UPPER_AVX2(DI, SI, 0, Y4, Y5, Y6, Y7, Y12, Y13)
EXTRACT_LOWER(BX, Y4, Y5, Y6, Y7, Y13)
ADDQ $64, SI
ADDQ $64, DI
SUBQ $64, CX
JMP finalize
between_0_and_64:
VMOVDQA X0, X4
VMOVDQA X1, X5
VMOVDQA X2, X6
VMOVDQA X3, X7
MOVQ DX, R9
chacha_loop_64:
CHACHA_QROUND_AVX(X4, X5, X6, X7, X13, X14, X15)
CHACHA_SHUFFLE_AVX(X5, X6, X7)
CHACHA_QROUND_AVX(X4, X5, X6, X7, X13, X14, X15)
CHACHA_SHUFFLE_AVX(X7, X6, X5)
SUBQ $2, R9
JA chacha_loop_64
VPADDD X0, X4, X4
VPADDD X1, X5, X5
VPADDD X2, X6, X6
VPADDD X3, X7, X7
VMOVDQU ·one<>(SB), X0
VPADDQ X0, X3, X3
CMPQ CX, $64
JB less_than_64
XOR_AVX(DI, SI, 0, X4, X5, X6, X7, X13)
SUBQ $64, CX
JMP done
less_than_64:
VMOVDQU X4, 0(BX)
VMOVDQU X5, 16(BX)
VMOVDQU X6, 32(BX)
VMOVDQU X7, 48(BX)
finalize:
XORQ R11, R11
XORQ R12, R12
MOVQ CX, BP
xor_loop:
MOVB 0(SI), R11
MOVB 0(BX), R12
XORQ R11, R12
MOVB R12, 0(DI)
INCQ SI
INCQ BX
INCQ DI
DECQ BP
JA xor_loop
done:
VMOVDQU X3, 48(AX)
VZEROUPPER
MOVQ R8, SP
MOVQ CX, ret+72(FP)
RET

View file

@ -1,60 +0,0 @@
// Copyright (c) 2016 Andreas Auernhammer. All rights reserved.
// Use of this source code is governed by a license that can be
// found in the LICENSE file.
// +build 386,!gccgo,!appengine,!nacl
package chacha
import (
"encoding/binary"
"golang.org/x/sys/cpu"
)
func init() {
useSSE2 = cpu.X86.HasSSE2
useSSSE3 = cpu.X86.HasSSSE3
useAVX = false
useAVX2 = false
}
func initialize(state *[64]byte, key []byte, nonce *[16]byte) {
binary.LittleEndian.PutUint32(state[0:], sigma[0])
binary.LittleEndian.PutUint32(state[4:], sigma[1])
binary.LittleEndian.PutUint32(state[8:], sigma[2])
binary.LittleEndian.PutUint32(state[12:], sigma[3])
copy(state[16:], key[:])
copy(state[48:], nonce[:])
}
// This function is implemented in chacha_386.s
//go:noescape
func hChaCha20SSE2(out *[32]byte, nonce *[16]byte, key *[32]byte)
// This function is implemented in chacha_386.s
//go:noescape
func hChaCha20SSSE3(out *[32]byte, nonce *[16]byte, key *[32]byte)
// This function is implemented in chacha_386.s
//go:noescape
func xorKeyStreamSSE2(dst, src []byte, block, state *[64]byte, rounds int) int
func hChaCha20(out *[32]byte, nonce *[16]byte, key *[32]byte) {
switch {
case useSSSE3:
hChaCha20SSSE3(out, nonce, key)
case useSSE2:
hChaCha20SSE2(out, nonce, key)
default:
hChaCha20Generic(out, nonce, key)
}
}
func xorKeyStream(dst, src []byte, block, state *[64]byte, rounds int) int {
if useSSE2 {
return xorKeyStreamSSE2(dst, src, block, state, rounds)
} else {
return xorKeyStreamGeneric(dst, src, block, state, rounds)
}
}

View file

@ -1,163 +0,0 @@
// Copyright (c) 2016 Andreas Auernhammer. All rights reserved.
// Use of this source code is governed by a license that can be
// found in the LICENSE file.
// +build 386,!gccgo,!appengine,!nacl
#include "const.s"
#include "macro.s"
// FINALIZE xors len bytes from src and block using
// the temp. registers t0 and t1 and writes the result
// to dst.
#define FINALIZE(dst, src, block, len, t0, t1) \
XORL t0, t0; \
XORL t1, t1; \
FINALIZE_LOOP:; \
MOVB 0(src), t0; \
MOVB 0(block), t1; \
XORL t0, t1; \
MOVB t1, 0(dst); \
INCL src; \
INCL block; \
INCL dst; \
DECL len; \
JG FINALIZE_LOOP \
#define Dst DI
#define Nonce AX
#define Key BX
#define Rounds DX
// func hChaCha20SSE2(out *[32]byte, nonce *[16]byte, key *[32]byte)
TEXT ·hChaCha20SSE2(SB), 4, $0-12
MOVL out+0(FP), Dst
MOVL nonce+4(FP), Nonce
MOVL key+8(FP), Key
MOVOU ·sigma<>(SB), X0
MOVOU 0*16(Key), X1
MOVOU 1*16(Key), X2
MOVOU 0*16(Nonce), X3
MOVL $20, Rounds
chacha_loop:
CHACHA_QROUND_SSE2(X0, X1, X2, X3, X4)
CHACHA_SHUFFLE_SSE(X1, X2, X3)
CHACHA_QROUND_SSE2(X0, X1, X2, X3, X4)
CHACHA_SHUFFLE_SSE(X3, X2, X1)
SUBL $2, Rounds
JNZ chacha_loop
MOVOU X0, 0*16(Dst)
MOVOU X3, 1*16(Dst)
RET
// func hChaCha20SSSE3(out *[32]byte, nonce *[16]byte, key *[32]byte)
TEXT ·hChaCha20SSSE3(SB), 4, $0-12
MOVL out+0(FP), Dst
MOVL nonce+4(FP), Nonce
MOVL key+8(FP), Key
MOVOU ·sigma<>(SB), X0
MOVOU 0*16(Key), X1
MOVOU 1*16(Key), X2
MOVOU 0*16(Nonce), X3
MOVL $20, Rounds
MOVOU ·rol16<>(SB), X5
MOVOU ·rol8<>(SB), X6
chacha_loop:
CHACHA_QROUND_SSSE3(X0, X1, X2, X3, X4, X5, X6)
CHACHA_SHUFFLE_SSE(X1, X2, X3)
CHACHA_QROUND_SSSE3(X0, X1, X2, X3, X4, X5, X6)
CHACHA_SHUFFLE_SSE(X3, X2, X1)
SUBL $2, Rounds
JNZ chacha_loop
MOVOU X0, 0*16(Dst)
MOVOU X3, 1*16(Dst)
RET
#undef Dst
#undef Nonce
#undef Key
#undef Rounds
#define State AX
#define Dst DI
#define Src SI
#define Len DX
#define Tmp0 BX
#define Tmp1 BP
// func xorKeyStreamSSE2(dst, src []byte, block, state *[64]byte, rounds int) int
TEXT ·xorKeyStreamSSE2(SB), 4, $0-40
MOVL dst_base+0(FP), Dst
MOVL src_base+12(FP), Src
MOVL state+28(FP), State
MOVL src_len+16(FP), Len
MOVL $0, ret+36(FP) // Number of bytes written to the keystream buffer - 0 iff len mod 64 == 0
MOVOU 0*16(State), X0
MOVOU 1*16(State), X1
MOVOU 2*16(State), X2
MOVOU 3*16(State), X3
TESTL Len, Len
JZ DONE
GENERATE_KEYSTREAM:
MOVO X0, X4
MOVO X1, X5
MOVO X2, X6
MOVO X3, X7
MOVL rounds+32(FP), Tmp0
CHACHA_LOOP:
CHACHA_QROUND_SSE2(X4, X5, X6, X7, X0)
CHACHA_SHUFFLE_SSE(X5, X6, X7)
CHACHA_QROUND_SSE2(X4, X5, X6, X7, X0)
CHACHA_SHUFFLE_SSE(X7, X6, X5)
SUBL $2, Tmp0
JA CHACHA_LOOP
MOVOU 0*16(State), X0 // Restore X0 from state
PADDL X0, X4
PADDL X1, X5
PADDL X2, X6
PADDL X3, X7
MOVOU ·one<>(SB), X0
PADDQ X0, X3
CMPL Len, $64
JL BUFFER_KEYSTREAM
XOR_SSE(Dst, Src, 0, X4, X5, X6, X7, X0)
MOVOU 0*16(State), X0 // Restore X0 from state
ADDL $64, Src
ADDL $64, Dst
SUBL $64, Len
JZ DONE
JMP GENERATE_KEYSTREAM // There is at least one more plaintext byte
BUFFER_KEYSTREAM:
MOVL block+24(FP), State
MOVOU X4, 0(State)
MOVOU X5, 16(State)
MOVOU X6, 32(State)
MOVOU X7, 48(State)
MOVL Len, ret+36(FP) // Number of bytes written to the keystream buffer - 0 < Len < 64
FINALIZE(Dst, Src, State, Len, Tmp0, Tmp1)
DONE:
MOVL state+28(FP), State
MOVOU X3, 3*16(State)
RET
#undef State
#undef Dst
#undef Src
#undef Len
#undef Tmp0
#undef Tmp1

View file

@ -1,76 +0,0 @@
// Copyright (c) 2017 Andreas Auernhammer. All rights reserved.
// Use of this source code is governed by a license that can be
// found in the LICENSE file.
// +build go1.7,amd64,!gccgo,!appengine,!nacl
package chacha
import "golang.org/x/sys/cpu"
func init() {
useSSE2 = cpu.X86.HasSSE2
useSSSE3 = cpu.X86.HasSSSE3
useAVX = cpu.X86.HasAVX
useAVX2 = cpu.X86.HasAVX2
}
// This function is implemented in chacha_amd64.s
//go:noescape
func initialize(state *[64]byte, key []byte, nonce *[16]byte)
// This function is implemented in chacha_amd64.s
//go:noescape
func hChaCha20SSE2(out *[32]byte, nonce *[16]byte, key *[32]byte)
// This function is implemented in chacha_amd64.s
//go:noescape
func hChaCha20SSSE3(out *[32]byte, nonce *[16]byte, key *[32]byte)
// This function is implemented in chachaAVX2_amd64.s
//go:noescape
func hChaCha20AVX(out *[32]byte, nonce *[16]byte, key *[32]byte)
// This function is implemented in chacha_amd64.s
//go:noescape
func xorKeyStreamSSE2(dst, src []byte, block, state *[64]byte, rounds int) int
// This function is implemented in chacha_amd64.s
//go:noescape
func xorKeyStreamSSSE3(dst, src []byte, block, state *[64]byte, rounds int) int
// This function is implemented in chacha_amd64.s
//go:noescape
func xorKeyStreamAVX(dst, src []byte, block, state *[64]byte, rounds int) int
// This function is implemented in chachaAVX2_amd64.s
//go:noescape
func xorKeyStreamAVX2(dst, src []byte, block, state *[64]byte, rounds int) int
func hChaCha20(out *[32]byte, nonce *[16]byte, key *[32]byte) {
switch {
case useAVX:
hChaCha20AVX(out, nonce, key)
case useSSSE3:
hChaCha20SSSE3(out, nonce, key)
case useSSE2:
hChaCha20SSE2(out, nonce, key)
default:
hChaCha20Generic(out, nonce, key)
}
}
func xorKeyStream(dst, src []byte, block, state *[64]byte, rounds int) int {
switch {
case useAVX2:
return xorKeyStreamAVX2(dst, src, block, state, rounds)
case useAVX:
return xorKeyStreamAVX(dst, src, block, state, rounds)
case useSSSE3:
return xorKeyStreamSSSE3(dst, src, block, state, rounds)
case useSSE2:
return xorKeyStreamSSE2(dst, src, block, state, rounds)
default:
return xorKeyStreamGeneric(dst, src, block, state, rounds)
}
}

File diff suppressed because it is too large Load diff

View file

@ -1,319 +0,0 @@
// Copyright (c) 2016 Andreas Auernhammer. All rights reserved.
// Use of this source code is governed by a license that can be
// found in the LICENSE file.
package chacha
import "encoding/binary"
var sigma = [4]uint32{0x61707865, 0x3320646e, 0x79622d32, 0x6b206574}
func xorKeyStreamGeneric(dst, src []byte, block, state *[64]byte, rounds int) int {
for len(src) >= 64 {
chachaGeneric(block, state, rounds)
for i, v := range block {
dst[i] = src[i] ^ v
}
src = src[64:]
dst = dst[64:]
}
n := len(src)
if n > 0 {
chachaGeneric(block, state, rounds)
for i, v := range src {
dst[i] = v ^ block[i]
}
}
return n
}
func chachaGeneric(dst *[64]byte, state *[64]byte, rounds int) {
v00 := binary.LittleEndian.Uint32(state[0:])
v01 := binary.LittleEndian.Uint32(state[4:])
v02 := binary.LittleEndian.Uint32(state[8:])
v03 := binary.LittleEndian.Uint32(state[12:])
v04 := binary.LittleEndian.Uint32(state[16:])
v05 := binary.LittleEndian.Uint32(state[20:])
v06 := binary.LittleEndian.Uint32(state[24:])
v07 := binary.LittleEndian.Uint32(state[28:])
v08 := binary.LittleEndian.Uint32(state[32:])
v09 := binary.LittleEndian.Uint32(state[36:])
v10 := binary.LittleEndian.Uint32(state[40:])
v11 := binary.LittleEndian.Uint32(state[44:])
v12 := binary.LittleEndian.Uint32(state[48:])
v13 := binary.LittleEndian.Uint32(state[52:])
v14 := binary.LittleEndian.Uint32(state[56:])
v15 := binary.LittleEndian.Uint32(state[60:])
s00, s01, s02, s03, s04, s05, s06, s07 := v00, v01, v02, v03, v04, v05, v06, v07
s08, s09, s10, s11, s12, s13, s14, s15 := v08, v09, v10, v11, v12, v13, v14, v15
for i := 0; i < rounds; i += 2 {
v00 += v04
v12 ^= v00
v12 = (v12 << 16) | (v12 >> 16)
v08 += v12
v04 ^= v08
v04 = (v04 << 12) | (v04 >> 20)
v00 += v04
v12 ^= v00
v12 = (v12 << 8) | (v12 >> 24)
v08 += v12
v04 ^= v08
v04 = (v04 << 7) | (v04 >> 25)
v01 += v05
v13 ^= v01
v13 = (v13 << 16) | (v13 >> 16)
v09 += v13
v05 ^= v09
v05 = (v05 << 12) | (v05 >> 20)
v01 += v05
v13 ^= v01
v13 = (v13 << 8) | (v13 >> 24)
v09 += v13
v05 ^= v09
v05 = (v05 << 7) | (v05 >> 25)
v02 += v06
v14 ^= v02
v14 = (v14 << 16) | (v14 >> 16)
v10 += v14
v06 ^= v10
v06 = (v06 << 12) | (v06 >> 20)
v02 += v06
v14 ^= v02
v14 = (v14 << 8) | (v14 >> 24)
v10 += v14
v06 ^= v10
v06 = (v06 << 7) | (v06 >> 25)
v03 += v07
v15 ^= v03
v15 = (v15 << 16) | (v15 >> 16)
v11 += v15
v07 ^= v11
v07 = (v07 << 12) | (v07 >> 20)
v03 += v07
v15 ^= v03
v15 = (v15 << 8) | (v15 >> 24)
v11 += v15
v07 ^= v11
v07 = (v07 << 7) | (v07 >> 25)
v00 += v05
v15 ^= v00
v15 = (v15 << 16) | (v15 >> 16)
v10 += v15
v05 ^= v10
v05 = (v05 << 12) | (v05 >> 20)
v00 += v05
v15 ^= v00
v15 = (v15 << 8) | (v15 >> 24)
v10 += v15
v05 ^= v10
v05 = (v05 << 7) | (v05 >> 25)
v01 += v06
v12 ^= v01
v12 = (v12 << 16) | (v12 >> 16)
v11 += v12
v06 ^= v11
v06 = (v06 << 12) | (v06 >> 20)
v01 += v06
v12 ^= v01
v12 = (v12 << 8) | (v12 >> 24)
v11 += v12
v06 ^= v11
v06 = (v06 << 7) | (v06 >> 25)
v02 += v07
v13 ^= v02
v13 = (v13 << 16) | (v13 >> 16)
v08 += v13
v07 ^= v08
v07 = (v07 << 12) | (v07 >> 20)
v02 += v07
v13 ^= v02
v13 = (v13 << 8) | (v13 >> 24)
v08 += v13
v07 ^= v08
v07 = (v07 << 7) | (v07 >> 25)
v03 += v04
v14 ^= v03
v14 = (v14 << 16) | (v14 >> 16)
v09 += v14
v04 ^= v09
v04 = (v04 << 12) | (v04 >> 20)
v03 += v04
v14 ^= v03
v14 = (v14 << 8) | (v14 >> 24)
v09 += v14
v04 ^= v09
v04 = (v04 << 7) | (v04 >> 25)
}
v00 += s00
v01 += s01
v02 += s02
v03 += s03
v04 += s04
v05 += s05
v06 += s06
v07 += s07
v08 += s08
v09 += s09
v10 += s10
v11 += s11
v12 += s12
v13 += s13
v14 += s14
v15 += s15
s12++
binary.LittleEndian.PutUint32(state[48:], s12)
if s12 == 0 { // indicates overflow
s13++
binary.LittleEndian.PutUint32(state[52:], s13)
}
binary.LittleEndian.PutUint32(dst[0:], v00)
binary.LittleEndian.PutUint32(dst[4:], v01)
binary.LittleEndian.PutUint32(dst[8:], v02)
binary.LittleEndian.PutUint32(dst[12:], v03)
binary.LittleEndian.PutUint32(dst[16:], v04)
binary.LittleEndian.PutUint32(dst[20:], v05)
binary.LittleEndian.PutUint32(dst[24:], v06)
binary.LittleEndian.PutUint32(dst[28:], v07)
binary.LittleEndian.PutUint32(dst[32:], v08)
binary.LittleEndian.PutUint32(dst[36:], v09)
binary.LittleEndian.PutUint32(dst[40:], v10)
binary.LittleEndian.PutUint32(dst[44:], v11)
binary.LittleEndian.PutUint32(dst[48:], v12)
binary.LittleEndian.PutUint32(dst[52:], v13)
binary.LittleEndian.PutUint32(dst[56:], v14)
binary.LittleEndian.PutUint32(dst[60:], v15)
}
func hChaCha20Generic(out *[32]byte, nonce *[16]byte, key *[32]byte) {
v00 := sigma[0]
v01 := sigma[1]
v02 := sigma[2]
v03 := sigma[3]
v04 := binary.LittleEndian.Uint32(key[0:])
v05 := binary.LittleEndian.Uint32(key[4:])
v06 := binary.LittleEndian.Uint32(key[8:])
v07 := binary.LittleEndian.Uint32(key[12:])
v08 := binary.LittleEndian.Uint32(key[16:])
v09 := binary.LittleEndian.Uint32(key[20:])
v10 := binary.LittleEndian.Uint32(key[24:])
v11 := binary.LittleEndian.Uint32(key[28:])
v12 := binary.LittleEndian.Uint32(nonce[0:])
v13 := binary.LittleEndian.Uint32(nonce[4:])
v14 := binary.LittleEndian.Uint32(nonce[8:])
v15 := binary.LittleEndian.Uint32(nonce[12:])
for i := 0; i < 20; i += 2 {
v00 += v04
v12 ^= v00
v12 = (v12 << 16) | (v12 >> 16)
v08 += v12
v04 ^= v08
v04 = (v04 << 12) | (v04 >> 20)
v00 += v04
v12 ^= v00
v12 = (v12 << 8) | (v12 >> 24)
v08 += v12
v04 ^= v08
v04 = (v04 << 7) | (v04 >> 25)
v01 += v05
v13 ^= v01
v13 = (v13 << 16) | (v13 >> 16)
v09 += v13
v05 ^= v09
v05 = (v05 << 12) | (v05 >> 20)
v01 += v05
v13 ^= v01
v13 = (v13 << 8) | (v13 >> 24)
v09 += v13
v05 ^= v09
v05 = (v05 << 7) | (v05 >> 25)
v02 += v06
v14 ^= v02
v14 = (v14 << 16) | (v14 >> 16)
v10 += v14
v06 ^= v10
v06 = (v06 << 12) | (v06 >> 20)
v02 += v06
v14 ^= v02
v14 = (v14 << 8) | (v14 >> 24)
v10 += v14
v06 ^= v10
v06 = (v06 << 7) | (v06 >> 25)
v03 += v07
v15 ^= v03
v15 = (v15 << 16) | (v15 >> 16)
v11 += v15
v07 ^= v11
v07 = (v07 << 12) | (v07 >> 20)
v03 += v07
v15 ^= v03
v15 = (v15 << 8) | (v15 >> 24)
v11 += v15
v07 ^= v11
v07 = (v07 << 7) | (v07 >> 25)
v00 += v05
v15 ^= v00
v15 = (v15 << 16) | (v15 >> 16)
v10 += v15
v05 ^= v10
v05 = (v05 << 12) | (v05 >> 20)
v00 += v05
v15 ^= v00
v15 = (v15 << 8) | (v15 >> 24)
v10 += v15
v05 ^= v10
v05 = (v05 << 7) | (v05 >> 25)
v01 += v06
v12 ^= v01
v12 = (v12 << 16) | (v12 >> 16)
v11 += v12
v06 ^= v11
v06 = (v06 << 12) | (v06 >> 20)
v01 += v06
v12 ^= v01
v12 = (v12 << 8) | (v12 >> 24)
v11 += v12
v06 ^= v11
v06 = (v06 << 7) | (v06 >> 25)
v02 += v07
v13 ^= v02
v13 = (v13 << 16) | (v13 >> 16)
v08 += v13
v07 ^= v08
v07 = (v07 << 12) | (v07 >> 20)
v02 += v07
v13 ^= v02
v13 = (v13 << 8) | (v13 >> 24)
v08 += v13
v07 ^= v08
v07 = (v07 << 7) | (v07 >> 25)
v03 += v04
v14 ^= v03
v14 = (v14 << 16) | (v14 >> 16)
v09 += v14
v04 ^= v09
v04 = (v04 << 12) | (v04 >> 20)
v03 += v04
v14 ^= v03
v14 = (v14 << 8) | (v14 >> 24)
v09 += v14
v04 ^= v09
v04 = (v04 << 7) | (v04 >> 25)
}
binary.LittleEndian.PutUint32(out[0:], v00)
binary.LittleEndian.PutUint32(out[4:], v01)
binary.LittleEndian.PutUint32(out[8:], v02)
binary.LittleEndian.PutUint32(out[12:], v03)
binary.LittleEndian.PutUint32(out[16:], v12)
binary.LittleEndian.PutUint32(out[20:], v13)
binary.LittleEndian.PutUint32(out[24:], v14)
binary.LittleEndian.PutUint32(out[28:], v15)
}

View file

@ -1,33 +0,0 @@
// Copyright (c) 2016 Andreas Auernhammer. All rights reserved.
// Use of this source code is governed by a license that can be
// found in the LICENSE file.
// +build !amd64,!386 gccgo appengine nacl
package chacha
import "encoding/binary"
func init() {
useSSE2 = false
useSSSE3 = false
useAVX = false
useAVX2 = false
}
func initialize(state *[64]byte, key []byte, nonce *[16]byte) {
binary.LittleEndian.PutUint32(state[0:], sigma[0])
binary.LittleEndian.PutUint32(state[4:], sigma[1])
binary.LittleEndian.PutUint32(state[8:], sigma[2])
binary.LittleEndian.PutUint32(state[12:], sigma[3])
copy(state[16:], key[:])
copy(state[48:], nonce[:])
}
func xorKeyStream(dst, src []byte, block, state *[64]byte, rounds int) int {
return xorKeyStreamGeneric(dst, src, block, state, rounds)
}
func hChaCha20(out *[32]byte, nonce *[16]byte, key *[32]byte) {
hChaCha20Generic(out, nonce, key)
}

View file

@ -1,53 +0,0 @@
// Copyright (c) 2018 Andreas Auernhammer. All rights reserved.
// Use of this source code is governed by a license that can be
// found in the LICENSE file.
// +build 386,!gccgo,!appengine,!nacl amd64,!gccgo,!appengine,!nacl
#include "textflag.h"
DATA ·sigma<>+0x00(SB)/4, $0x61707865
DATA ·sigma<>+0x04(SB)/4, $0x3320646e
DATA ·sigma<>+0x08(SB)/4, $0x79622d32
DATA ·sigma<>+0x0C(SB)/4, $0x6b206574
GLOBL ·sigma<>(SB), (NOPTR+RODATA), $16 // The 4 ChaCha initialization constants
// SSE2/SSE3/AVX constants
DATA ·one<>+0x00(SB)/8, $1
DATA ·one<>+0x08(SB)/8, $0
GLOBL ·one<>(SB), (NOPTR+RODATA), $16 // The constant 1 as 128 bit value
DATA ·rol16<>+0x00(SB)/8, $0x0504070601000302
DATA ·rol16<>+0x08(SB)/8, $0x0D0C0F0E09080B0A
GLOBL ·rol16<>(SB), (NOPTR+RODATA), $16 // The PSHUFB 16 bit left rotate constant
DATA ·rol8<>+0x00(SB)/8, $0x0605040702010003
DATA ·rol8<>+0x08(SB)/8, $0x0E0D0C0F0A09080B
GLOBL ·rol8<>(SB), (NOPTR+RODATA), $16 // The PSHUFB 8 bit left rotate constant
// AVX2 constants
DATA ·one_AVX2<>+0x00(SB)/8, $0
DATA ·one_AVX2<>+0x08(SB)/8, $0
DATA ·one_AVX2<>+0x10(SB)/8, $1
DATA ·one_AVX2<>+0x18(SB)/8, $0
GLOBL ·one_AVX2<>(SB), (NOPTR+RODATA), $32 // The constant 1 as 256 bit value
DATA ·two_AVX2<>+0x00(SB)/8, $2
DATA ·two_AVX2<>+0x08(SB)/8, $0
DATA ·two_AVX2<>+0x10(SB)/8, $2
DATA ·two_AVX2<>+0x18(SB)/8, $0
GLOBL ·two_AVX2<>(SB), (NOPTR+RODATA), $32
DATA ·rol16_AVX2<>+0x00(SB)/8, $0x0504070601000302
DATA ·rol16_AVX2<>+0x08(SB)/8, $0x0D0C0F0E09080B0A
DATA ·rol16_AVX2<>+0x10(SB)/8, $0x0504070601000302
DATA ·rol16_AVX2<>+0x18(SB)/8, $0x0D0C0F0E09080B0A
GLOBL ·rol16_AVX2<>(SB), (NOPTR+RODATA), $32 // The VPSHUFB 16 bit left rotate constant
DATA ·rol8_AVX2<>+0x00(SB)/8, $0x0605040702010003
DATA ·rol8_AVX2<>+0x08(SB)/8, $0x0E0D0C0F0A09080B
DATA ·rol8_AVX2<>+0x10(SB)/8, $0x0605040702010003
DATA ·rol8_AVX2<>+0x18(SB)/8, $0x0E0D0C0F0A09080B
GLOBL ·rol8_AVX2<>(SB), (NOPTR+RODATA), $32 // The VPSHUFB 8 bit left rotate constant

View file

@ -1,163 +0,0 @@
// Copyright (c) 2018 Andreas Auernhammer. All rights reserved.
// Use of this source code is governed by a license that can be
// found in the LICENSE file.
// +build 386,!gccgo,!appengine,!nacl amd64,!gccgo,!appengine,!nacl
// ROTL_SSE rotates all 4 32 bit values of the XMM register v
// left by n bits using SSE2 instructions (0 <= n <= 32).
// The XMM register t is used as a temp. register.
#define ROTL_SSE(n, t, v) \
MOVO v, t; \
PSLLL $n, t; \
PSRLL $(32-n), v; \
PXOR t, v
// ROTL_AVX rotates all 4/8 32 bit values of the AVX/AVX2 register v
// left by n bits using AVX/AVX2 instructions (0 <= n <= 32).
// The AVX/AVX2 register t is used as a temp. register.
#define ROTL_AVX(n, t, v) \
VPSLLD $n, v, t; \
VPSRLD $(32-n), v, v; \
VPXOR v, t, v
// CHACHA_QROUND_SSE2 performs a ChaCha quarter-round using the
// 4 XMM registers v0, v1, v2 and v3. It uses only ROTL_SSE2 for
// rotations. The XMM register t is used as a temp. register.
#define CHACHA_QROUND_SSE2(v0, v1, v2, v3, t) \
PADDL v1, v0; \
PXOR v0, v3; \
ROTL_SSE(16, t, v3); \
PADDL v3, v2; \
PXOR v2, v1; \
ROTL_SSE(12, t, v1); \
PADDL v1, v0; \
PXOR v0, v3; \
ROTL_SSE(8, t, v3); \
PADDL v3, v2; \
PXOR v2, v1; \
ROTL_SSE(7, t, v1)
// CHACHA_QROUND_SSSE3 performs a ChaCha quarter-round using the
// 4 XMM registers v0, v1, v2 and v3. It uses PSHUFB for 8/16 bit
// rotations. The XMM register t is used as a temp. register.
//
// r16 holds the PSHUFB constant for a 16 bit left rotate.
// r8 holds the PSHUFB constant for a 8 bit left rotate.
#define CHACHA_QROUND_SSSE3(v0, v1, v2, v3, t, r16, r8) \
PADDL v1, v0; \
PXOR v0, v3; \
PSHUFB r16, v3; \
PADDL v3, v2; \
PXOR v2, v1; \
ROTL_SSE(12, t, v1); \
PADDL v1, v0; \
PXOR v0, v3; \
PSHUFB r8, v3; \
PADDL v3, v2; \
PXOR v2, v1; \
ROTL_SSE(7, t, v1)
// CHACHA_QROUND_AVX performs a ChaCha quarter-round using the
// 4 AVX/AVX2 registers v0, v1, v2 and v3. It uses VPSHUFB for 8/16 bit
// rotations. The AVX/AVX2 register t is used as a temp. register.
//
// r16 holds the VPSHUFB constant for a 16 bit left rotate.
// r8 holds the VPSHUFB constant for a 8 bit left rotate.
#define CHACHA_QROUND_AVX(v0, v1, v2, v3, t, r16, r8) \
VPADDD v0, v1, v0; \
VPXOR v3, v0, v3; \
VPSHUFB r16, v3, v3; \
VPADDD v2, v3, v2; \
VPXOR v1, v2, v1; \
ROTL_AVX(12, t, v1); \
VPADDD v0, v1, v0; \
VPXOR v3, v0, v3; \
VPSHUFB r8, v3, v3; \
VPADDD v2, v3, v2; \
VPXOR v1, v2, v1; \
ROTL_AVX(7, t, v1)
// CHACHA_SHUFFLE_SSE performs a ChaCha shuffle using the
// 3 XMM registers v1, v2 and v3. The inverse shuffle is
// performed by switching v1 and v3: CHACHA_SHUFFLE_SSE(v3, v2, v1).
#define CHACHA_SHUFFLE_SSE(v1, v2, v3) \
PSHUFL $0x39, v1, v1; \
PSHUFL $0x4E, v2, v2; \
PSHUFL $0x93, v3, v3
// CHACHA_SHUFFLE_AVX performs a ChaCha shuffle using the
// 3 AVX/AVX2 registers v1, v2 and v3. The inverse shuffle is
// performed by switching v1 and v3: CHACHA_SHUFFLE_AVX(v3, v2, v1).
#define CHACHA_SHUFFLE_AVX(v1, v2, v3) \
VPSHUFD $0x39, v1, v1; \
VPSHUFD $0x4E, v2, v2; \
VPSHUFD $0x93, v3, v3
// XOR_SSE extracts 4x16 byte vectors from src at
// off, xors all vectors with the corresponding XMM
// register (v0 - v3) and writes the result to dst
// at off.
// The XMM register t is used as a temp. register.
#define XOR_SSE(dst, src, off, v0, v1, v2, v3, t) \
MOVOU 0+off(src), t; \
PXOR v0, t; \
MOVOU t, 0+off(dst); \
MOVOU 16+off(src), t; \
PXOR v1, t; \
MOVOU t, 16+off(dst); \
MOVOU 32+off(src), t; \
PXOR v2, t; \
MOVOU t, 32+off(dst); \
MOVOU 48+off(src), t; \
PXOR v3, t; \
MOVOU t, 48+off(dst)
// XOR_AVX extracts 4x16 byte vectors from src at
// off, xors all vectors with the corresponding AVX
// register (v0 - v3) and writes the result to dst
// at off.
// The XMM register t is used as a temp. register.
#define XOR_AVX(dst, src, off, v0, v1, v2, v3, t) \
VPXOR 0+off(src), v0, t; \
VMOVDQU t, 0+off(dst); \
VPXOR 16+off(src), v1, t; \
VMOVDQU t, 16+off(dst); \
VPXOR 32+off(src), v2, t; \
VMOVDQU t, 32+off(dst); \
VPXOR 48+off(src), v3, t; \
VMOVDQU t, 48+off(dst)
#define XOR_AVX2(dst, src, off, v0, v1, v2, v3, t0, t1) \
VMOVDQU (0+off)(src), t0; \
VPERM2I128 $32, v1, v0, t1; \
VPXOR t0, t1, t0; \
VMOVDQU t0, (0+off)(dst); \
VMOVDQU (32+off)(src), t0; \
VPERM2I128 $32, v3, v2, t1; \
VPXOR t0, t1, t0; \
VMOVDQU t0, (32+off)(dst); \
VMOVDQU (64+off)(src), t0; \
VPERM2I128 $49, v1, v0, t1; \
VPXOR t0, t1, t0; \
VMOVDQU t0, (64+off)(dst); \
VMOVDQU (96+off)(src), t0; \
VPERM2I128 $49, v3, v2, t1; \
VPXOR t0, t1, t0; \
VMOVDQU t0, (96+off)(dst)
#define XOR_UPPER_AVX2(dst, src, off, v0, v1, v2, v3, t0, t1) \
VMOVDQU (0+off)(src), t0; \
VPERM2I128 $32, v1, v0, t1; \
VPXOR t0, t1, t0; \
VMOVDQU t0, (0+off)(dst); \
VMOVDQU (32+off)(src), t0; \
VPERM2I128 $32, v3, v2, t1; \
VPXOR t0, t1, t0; \
VMOVDQU t0, (32+off)(dst); \
#define EXTRACT_LOWER(dst, v0, v1, v2, v3, t0) \
VPERM2I128 $49, v1, v0, t0; \
VMOVDQU t0, 0(dst); \
VPERM2I128 $49, v3, v2, t0; \
VMOVDQU t0, 32(dst)

View file

@ -1,41 +0,0 @@
// Copyright (c) 2016 Andreas Auernhammer. All rights reserved.
// Use of this source code is governed by a license that can be
// found in the LICENSE file.
// Package chacha20 implements the ChaCha20 / XChaCha20 stream chipher.
// Notice that one specific key-nonce combination must be unique for all time.
//
// There are three versions of ChaCha20:
// - ChaCha20 with a 64 bit nonce (en/decrypt up to 2^64 * 64 bytes for one key-nonce combination)
// - ChaCha20 with a 96 bit nonce (en/decrypt up to 2^32 * 64 bytes (~256 GB) for one key-nonce combination)
// - XChaCha20 with a 192 bit nonce (en/decrypt up to 2^64 * 64 bytes for one key-nonce combination)
package chacha20 // import "github.com/aead/chacha20"
import (
"crypto/cipher"
"github.com/aead/chacha20/chacha"
)
// XORKeyStream crypts bytes from src to dst using the given nonce and key.
// The length of the nonce determinds the version of ChaCha20:
// - 8 bytes: ChaCha20 with a 64 bit nonce and a 2^64 * 64 byte period.
// - 12 bytes: ChaCha20 as defined in RFC 7539 and a 2^32 * 64 byte period.
// - 24 bytes: XChaCha20 with a 192 bit nonce and a 2^64 * 64 byte period.
// Src and dst may be the same slice but otherwise should not overlap.
// If len(dst) < len(src) this function panics.
// If the nonce is neither 64, 96 nor 192 bits long, this function panics.
func XORKeyStream(dst, src, nonce, key []byte) {
chacha.XORKeyStream(dst, src, nonce, key, 20)
}
// NewCipher returns a new cipher.Stream implementing a ChaCha20 version.
// The nonce must be unique for one key for all time.
// The length of the nonce determinds the version of ChaCha20:
// - 8 bytes: ChaCha20 with a 64 bit nonce and a 2^64 * 64 byte period.
// - 12 bytes: ChaCha20 as defined in RFC 7539 and a 2^32 * 64 byte period.
// - 24 bytes: XChaCha20 with a 192 bit nonce and a 2^64 * 64 byte period.
// If the nonce is neither 64, 96 nor 192 bits long, a non-nil error is returned.
func NewCipher(nonce, key []byte) (cipher.Stream, error) {
return chacha.NewCipher(nonce, key, 20)
}

View file

@ -1,27 +0,0 @@
Copyright (c) 2012 The Go Authors. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

View file

@ -1,406 +0,0 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
/*
Package template implements data-driven templates for generating textual output.
To generate HTML output, see package html/template, which has the same interface
as this package but automatically secures HTML output against certain attacks.
Templates are executed by applying them to a data structure. Annotations in the
template refer to elements of the data structure (typically a field of a struct
or a key in a map) to control execution and derive values to be displayed.
Execution of the template walks the structure and sets the cursor, represented
by a period '.' and called "dot", to the value at the current location in the
structure as execution proceeds.
The input text for a template is UTF-8-encoded text in any format.
"Actions"--data evaluations or control structures--are delimited by
"{{" and "}}"; all text outside actions is copied to the output unchanged.
Actions may not span newlines, although comments can.
Once parsed, a template may be executed safely in parallel.
Here is a trivial example that prints "17 items are made of wool".
type Inventory struct {
Material string
Count uint
}
sweaters := Inventory{"wool", 17}
tmpl, err := template.New("test").Parse("{{.Count}} items are made of {{.Material}}")
if err != nil { panic(err) }
err = tmpl.Execute(os.Stdout, sweaters)
if err != nil { panic(err) }
More intricate examples appear below.
Actions
Here is the list of actions. "Arguments" and "pipelines" are evaluations of
data, defined in detail below.
*/
// {{/* a comment */}}
// A comment; discarded. May contain newlines.
// Comments do not nest and must start and end at the
// delimiters, as shown here.
/*
{{pipeline}}
The default textual representation of the value of the pipeline
is copied to the output.
{{if pipeline}} T1 {{end}}
If the value of the pipeline is empty, no output is generated;
otherwise, T1 is executed. The empty values are false, 0, any
nil pointer or interface value, and any array, slice, map, or
string of length zero.
Dot is unaffected.
{{if pipeline}} T1 {{else}} T0 {{end}}
If the value of the pipeline is empty, T0 is executed;
otherwise, T1 is executed. Dot is unaffected.
{{if pipeline}} T1 {{else if pipeline}} T0 {{end}}
To simplify the appearance of if-else chains, the else action
of an if may include another if directly; the effect is exactly
the same as writing
{{if pipeline}} T1 {{else}}{{if pipeline}} T0 {{end}}{{end}}
{{range pipeline}} T1 {{end}}
The value of the pipeline must be an array, slice, map, or channel.
If the value of the pipeline has length zero, nothing is output;
otherwise, dot is set to the successive elements of the array,
slice, or map and T1 is executed. If the value is a map and the
keys are of basic type with a defined order ("comparable"), the
elements will be visited in sorted key order.
{{range pipeline}} T1 {{else}} T0 {{end}}
The value of the pipeline must be an array, slice, map, or channel.
If the value of the pipeline has length zero, dot is unaffected and
T0 is executed; otherwise, dot is set to the successive elements
of the array, slice, or map and T1 is executed.
{{template "name"}}
The template with the specified name is executed with nil data.
{{template "name" pipeline}}
The template with the specified name is executed with dot set
to the value of the pipeline.
{{with pipeline}} T1 {{end}}
If the value of the pipeline is empty, no output is generated;
otherwise, dot is set to the value of the pipeline and T1 is
executed.
{{with pipeline}} T1 {{else}} T0 {{end}}
If the value of the pipeline is empty, dot is unaffected and T0
is executed; otherwise, dot is set to the value of the pipeline
and T1 is executed.
Arguments
An argument is a simple value, denoted by one of the following.
- A boolean, string, character, integer, floating-point, imaginary
or complex constant in Go syntax. These behave like Go's untyped
constants, although raw strings may not span newlines.
- The keyword nil, representing an untyped Go nil.
- The character '.' (period):
.
The result is the value of dot.
- A variable name, which is a (possibly empty) alphanumeric string
preceded by a dollar sign, such as
$piOver2
or
$
The result is the value of the variable.
Variables are described below.
- The name of a field of the data, which must be a struct, preceded
by a period, such as
.Field
The result is the value of the field. Field invocations may be
chained:
.Field1.Field2
Fields can also be evaluated on variables, including chaining:
$x.Field1.Field2
- The name of a key of the data, which must be a map, preceded
by a period, such as
.Key
The result is the map element value indexed by the key.
Key invocations may be chained and combined with fields to any
depth:
.Field1.Key1.Field2.Key2
Although the key must be an alphanumeric identifier, unlike with
field names they do not need to start with an upper case letter.
Keys can also be evaluated on variables, including chaining:
$x.key1.key2
- The name of a niladic method of the data, preceded by a period,
such as
.Method
The result is the value of invoking the method with dot as the
receiver, dot.Method(). Such a method must have one return value (of
any type) or two return values, the second of which is an error.
If it has two and the returned error is non-nil, execution terminates
and an error is returned to the caller as the value of Execute.
Method invocations may be chained and combined with fields and keys
to any depth:
.Field1.Key1.Method1.Field2.Key2.Method2
Methods can also be evaluated on variables, including chaining:
$x.Method1.Field
- The name of a niladic function, such as
fun
The result is the value of invoking the function, fun(). The return
types and values behave as in methods. Functions and function
names are described below.
- A parenthesized instance of one the above, for grouping. The result
may be accessed by a field or map key invocation.
print (.F1 arg1) (.F2 arg2)
(.StructValuedMethod "arg").Field
Arguments may evaluate to any type; if they are pointers the implementation
automatically indirects to the base type when required.
If an evaluation yields a function value, such as a function-valued
field of a struct, the function is not invoked automatically, but it
can be used as a truth value for an if action and the like. To invoke
it, use the call function, defined below.
A pipeline is a possibly chained sequence of "commands". A command is a simple
value (argument) or a function or method call, possibly with multiple arguments:
Argument
The result is the value of evaluating the argument.
.Method [Argument...]
The method can be alone or the last element of a chain but,
unlike methods in the middle of a chain, it can take arguments.
The result is the value of calling the method with the
arguments:
dot.Method(Argument1, etc.)
functionName [Argument...]
The result is the value of calling the function associated
with the name:
function(Argument1, etc.)
Functions and function names are described below.
Pipelines
A pipeline may be "chained" by separating a sequence of commands with pipeline
characters '|'. In a chained pipeline, the result of the each command is
passed as the last argument of the following command. The output of the final
command in the pipeline is the value of the pipeline.
The output of a command will be either one value or two values, the second of
which has type error. If that second value is present and evaluates to
non-nil, execution terminates and the error is returned to the caller of
Execute.
Variables
A pipeline inside an action may initialize a variable to capture the result.
The initialization has syntax
$variable := pipeline
where $variable is the name of the variable. An action that declares a
variable produces no output.
If a "range" action initializes a variable, the variable is set to the
successive elements of the iteration. Also, a "range" may declare two
variables, separated by a comma:
range $index, $element := pipeline
in which case $index and $element are set to the successive values of the
array/slice index or map key and element, respectively. Note that if there is
only one variable, it is assigned the element; this is opposite to the
convention in Go range clauses.
A variable's scope extends to the "end" action of the control structure ("if",
"with", or "range") in which it is declared, or to the end of the template if
there is no such control structure. A template invocation does not inherit
variables from the point of its invocation.
When execution begins, $ is set to the data argument passed to Execute, that is,
to the starting value of dot.
Examples
Here are some example one-line templates demonstrating pipelines and variables.
All produce the quoted word "output":
{{"\"output\""}}
A string constant.
{{`"output"`}}
A raw string constant.
{{printf "%q" "output"}}
A function call.
{{"output" | printf "%q"}}
A function call whose final argument comes from the previous
command.
{{printf "%q" (print "out" "put")}}
A parenthesized argument.
{{"put" | printf "%s%s" "out" | printf "%q"}}
A more elaborate call.
{{"output" | printf "%s" | printf "%q"}}
A longer chain.
{{with "output"}}{{printf "%q" .}}{{end}}
A with action using dot.
{{with $x := "output" | printf "%q"}}{{$x}}{{end}}
A with action that creates and uses a variable.
{{with $x := "output"}}{{printf "%q" $x}}{{end}}
A with action that uses the variable in another action.
{{with $x := "output"}}{{$x | printf "%q"}}{{end}}
The same, but pipelined.
Functions
During execution functions are found in two function maps: first in the
template, then in the global function map. By default, no functions are defined
in the template but the Funcs method can be used to add them.
Predefined global functions are named as follows.
and
Returns the boolean AND of its arguments by returning the
first empty argument or the last argument, that is,
"and x y" behaves as "if x then y else x". All the
arguments are evaluated.
call
Returns the result of calling the first argument, which
must be a function, with the remaining arguments as parameters.
Thus "call .X.Y 1 2" is, in Go notation, dot.X.Y(1, 2) where
Y is a func-valued field, map entry, or the like.
The first argument must be the result of an evaluation
that yields a value of function type (as distinct from
a predefined function such as print). The function must
return either one or two result values, the second of which
is of type error. If the arguments don't match the function
or the returned error value is non-nil, execution stops.
html
Returns the escaped HTML equivalent of the textual
representation of its arguments.
index
Returns the result of indexing its first argument by the
following arguments. Thus "index x 1 2 3" is, in Go syntax,
x[1][2][3]. Each indexed item must be a map, slice, or array.
js
Returns the escaped JavaScript equivalent of the textual
representation of its arguments.
len
Returns the integer length of its argument.
not
Returns the boolean negation of its single argument.
or
Returns the boolean OR of its arguments by returning the
first non-empty argument or the last argument, that is,
"or x y" behaves as "if x then x else y". All the
arguments are evaluated.
print
An alias for fmt.Sprint
printf
An alias for fmt.Sprintf
println
An alias for fmt.Sprintln
urlquery
Returns the escaped value of the textual representation of
its arguments in a form suitable for embedding in a URL query.
The boolean functions take any zero value to be false and a non-zero
value to be true.
There is also a set of binary comparison operators defined as
functions:
eq
Returns the boolean truth of arg1 == arg2
ne
Returns the boolean truth of arg1 != arg2
lt
Returns the boolean truth of arg1 < arg2
le
Returns the boolean truth of arg1 <= arg2
gt
Returns the boolean truth of arg1 > arg2
ge
Returns the boolean truth of arg1 >= arg2
For simpler multi-way equality tests, eq (only) accepts two or more
arguments and compares the second and subsequent to the first,
returning in effect
arg1==arg2 || arg1==arg3 || arg1==arg4 ...
(Unlike with || in Go, however, eq is a function call and all the
arguments will be evaluated.)
The comparison functions work on basic types only (or named basic
types, such as "type Celsius float32"). They implement the Go rules
for comparison of values, except that size and exact type are
ignored, so any integer value, signed or unsigned, may be compared
with any other integer value. (The arithmetic value is compared,
not the bit pattern, so all negative integers are less than all
unsigned integers.) However, as usual, one may not compare an int
with a float32 and so on.
Associated templates
Each template is named by a string specified when it is created. Also, each
template is associated with zero or more other templates that it may invoke by
name; such associations are transitive and form a name space of templates.
A template may use a template invocation to instantiate another associated
template; see the explanation of the "template" action above. The name must be
that of a template associated with the template that contains the invocation.
Nested template definitions
When parsing a template, another template may be defined and associated with the
template being parsed. Template definitions must appear at the top level of the
template, much like global variables in a Go program.
The syntax of such definitions is to surround each template declaration with a
"define" and "end" action.
The define action names the template being created by providing a string
constant. Here is a simple example:
`{{define "T1"}}ONE{{end}}
{{define "T2"}}TWO{{end}}
{{define "T3"}}{{template "T1"}} {{template "T2"}}{{end}}
{{template "T3"}}`
This defines two templates, T1 and T2, and a third T3 that invokes the other two
when it is executed. Finally it invokes T3. If executed this template will
produce the text
ONE TWO
By construction, a template may reside in only one association. If it's
necessary to have a template addressable from multiple associations, the
template definition must be parsed multiple times to create distinct *Template
values, or must be copied with the Clone or AddParseTree method.
Parse may be called multiple times to assemble the various associated templates;
see the ParseFiles and ParseGlob functions and methods for simple ways to parse
related templates stored in files.
A template may be executed directly or through ExecuteTemplate, which executes
an associated template identified by name. To invoke our example above, we
might write,
err := tmpl.Execute(os.Stdout, "no data needed")
if err != nil {
log.Fatalf("execution failed: %s", err)
}
or to invoke a particular template explicitly by name,
err := tmpl.ExecuteTemplate(os.Stdout, "T2", "no data needed")
if err != nil {
log.Fatalf("execution failed: %s", err)
}
*/
package template

View file

@ -1,845 +0,0 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package template
import (
"bytes"
"fmt"
"io"
"reflect"
"runtime"
"sort"
"strings"
"github.com/alecthomas/template/parse"
)
// state represents the state of an execution. It's not part of the
// template so that multiple executions of the same template
// can execute in parallel.
type state struct {
tmpl *Template
wr io.Writer
node parse.Node // current node, for errors
vars []variable // push-down stack of variable values.
}
// variable holds the dynamic value of a variable such as $, $x etc.
type variable struct {
name string
value reflect.Value
}
// push pushes a new variable on the stack.
func (s *state) push(name string, value reflect.Value) {
s.vars = append(s.vars, variable{name, value})
}
// mark returns the length of the variable stack.
func (s *state) mark() int {
return len(s.vars)
}
// pop pops the variable stack up to the mark.
func (s *state) pop(mark int) {
s.vars = s.vars[0:mark]
}
// setVar overwrites the top-nth variable on the stack. Used by range iterations.
func (s *state) setVar(n int, value reflect.Value) {
s.vars[len(s.vars)-n].value = value
}
// varValue returns the value of the named variable.
func (s *state) varValue(name string) reflect.Value {
for i := s.mark() - 1; i >= 0; i-- {
if s.vars[i].name == name {
return s.vars[i].value
}
}
s.errorf("undefined variable: %s", name)
return zero
}
var zero reflect.Value
// at marks the state to be on node n, for error reporting.
func (s *state) at(node parse.Node) {
s.node = node
}
// doublePercent returns the string with %'s replaced by %%, if necessary,
// so it can be used safely inside a Printf format string.
func doublePercent(str string) string {
if strings.Contains(str, "%") {
str = strings.Replace(str, "%", "%%", -1)
}
return str
}
// errorf formats the error and terminates processing.
func (s *state) errorf(format string, args ...interface{}) {
name := doublePercent(s.tmpl.Name())
if s.node == nil {
format = fmt.Sprintf("template: %s: %s", name, format)
} else {
location, context := s.tmpl.ErrorContext(s.node)
format = fmt.Sprintf("template: %s: executing %q at <%s>: %s", location, name, doublePercent(context), format)
}
panic(fmt.Errorf(format, args...))
}
// errRecover is the handler that turns panics into returns from the top
// level of Parse.
func errRecover(errp *error) {
e := recover()
if e != nil {
switch err := e.(type) {
case runtime.Error:
panic(e)
case error:
*errp = err
default:
panic(e)
}
}
}
// ExecuteTemplate applies the template associated with t that has the given name
// to the specified data object and writes the output to wr.
// If an error occurs executing the template or writing its output,
// execution stops, but partial results may already have been written to
// the output writer.
// A template may be executed safely in parallel.
func (t *Template) ExecuteTemplate(wr io.Writer, name string, data interface{}) error {
tmpl := t.tmpl[name]
if tmpl == nil {
return fmt.Errorf("template: no template %q associated with template %q", name, t.name)
}
return tmpl.Execute(wr, data)
}
// Execute applies a parsed template to the specified data object,
// and writes the output to wr.
// If an error occurs executing the template or writing its output,
// execution stops, but partial results may already have been written to
// the output writer.
// A template may be executed safely in parallel.
func (t *Template) Execute(wr io.Writer, data interface{}) (err error) {
defer errRecover(&err)
value := reflect.ValueOf(data)
state := &state{
tmpl: t,
wr: wr,
vars: []variable{{"$", value}},
}
t.init()
if t.Tree == nil || t.Root == nil {
var b bytes.Buffer
for name, tmpl := range t.tmpl {
if tmpl.Tree == nil || tmpl.Root == nil {
continue
}
if b.Len() > 0 {
b.WriteString(", ")
}
fmt.Fprintf(&b, "%q", name)
}
var s string
if b.Len() > 0 {
s = "; defined templates are: " + b.String()
}
state.errorf("%q is an incomplete or empty template%s", t.Name(), s)
}
state.walk(value, t.Root)
return
}
// Walk functions step through the major pieces of the template structure,
// generating output as they go.
func (s *state) walk(dot reflect.Value, node parse.Node) {
s.at(node)
switch node := node.(type) {
case *parse.ActionNode:
// Do not pop variables so they persist until next end.
// Also, if the action declares variables, don't print the result.
val := s.evalPipeline(dot, node.Pipe)
if len(node.Pipe.Decl) == 0 {
s.printValue(node, val)
}
case *parse.IfNode:
s.walkIfOrWith(parse.NodeIf, dot, node.Pipe, node.List, node.ElseList)
case *parse.ListNode:
for _, node := range node.Nodes {
s.walk(dot, node)
}
case *parse.RangeNode:
s.walkRange(dot, node)
case *parse.TemplateNode:
s.walkTemplate(dot, node)
case *parse.TextNode:
if _, err := s.wr.Write(node.Text); err != nil {
s.errorf("%s", err)
}
case *parse.WithNode:
s.walkIfOrWith(parse.NodeWith, dot, node.Pipe, node.List, node.ElseList)
default:
s.errorf("unknown node: %s", node)
}
}
// walkIfOrWith walks an 'if' or 'with' node. The two control structures
// are identical in behavior except that 'with' sets dot.
func (s *state) walkIfOrWith(typ parse.NodeType, dot reflect.Value, pipe *parse.PipeNode, list, elseList *parse.ListNode) {
defer s.pop(s.mark())
val := s.evalPipeline(dot, pipe)
truth, ok := isTrue(val)
if !ok {
s.errorf("if/with can't use %v", val)
}
if truth {
if typ == parse.NodeWith {
s.walk(val, list)
} else {
s.walk(dot, list)
}
} else if elseList != nil {
s.walk(dot, elseList)
}
}
// isTrue reports whether the value is 'true', in the sense of not the zero of its type,
// and whether the value has a meaningful truth value.
func isTrue(val reflect.Value) (truth, ok bool) {
if !val.IsValid() {
// Something like var x interface{}, never set. It's a form of nil.
return false, true
}
switch val.Kind() {
case reflect.Array, reflect.Map, reflect.Slice, reflect.String:
truth = val.Len() > 0
case reflect.Bool:
truth = val.Bool()
case reflect.Complex64, reflect.Complex128:
truth = val.Complex() != 0
case reflect.Chan, reflect.Func, reflect.Ptr, reflect.Interface:
truth = !val.IsNil()
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
truth = val.Int() != 0
case reflect.Float32, reflect.Float64:
truth = val.Float() != 0
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
truth = val.Uint() != 0
case reflect.Struct:
truth = true // Struct values are always true.
default:
return
}
return truth, true
}
func (s *state) walkRange(dot reflect.Value, r *parse.RangeNode) {
s.at(r)
defer s.pop(s.mark())
val, _ := indirect(s.evalPipeline(dot, r.Pipe))
// mark top of stack before any variables in the body are pushed.
mark := s.mark()
oneIteration := func(index, elem reflect.Value) {
// Set top var (lexically the second if there are two) to the element.
if len(r.Pipe.Decl) > 0 {
s.setVar(1, elem)
}
// Set next var (lexically the first if there are two) to the index.
if len(r.Pipe.Decl) > 1 {
s.setVar(2, index)
}
s.walk(elem, r.List)
s.pop(mark)
}
switch val.Kind() {
case reflect.Array, reflect.Slice:
if val.Len() == 0 {
break
}
for i := 0; i < val.Len(); i++ {
oneIteration(reflect.ValueOf(i), val.Index(i))
}
return
case reflect.Map:
if val.Len() == 0 {
break
}
for _, key := range sortKeys(val.MapKeys()) {
oneIteration(key, val.MapIndex(key))
}
return
case reflect.Chan:
if val.IsNil() {
break
}
i := 0
for ; ; i++ {
elem, ok := val.Recv()
if !ok {
break
}
oneIteration(reflect.ValueOf(i), elem)
}
if i == 0 {
break
}
return
case reflect.Invalid:
break // An invalid value is likely a nil map, etc. and acts like an empty map.
default:
s.errorf("range can't iterate over %v", val)
}
if r.ElseList != nil {
s.walk(dot, r.ElseList)
}
}
func (s *state) walkTemplate(dot reflect.Value, t *parse.TemplateNode) {
s.at(t)
tmpl := s.tmpl.tmpl[t.Name]
if tmpl == nil {
s.errorf("template %q not defined", t.Name)
}
// Variables declared by the pipeline persist.
dot = s.evalPipeline(dot, t.Pipe)
newState := *s
newState.tmpl = tmpl
// No dynamic scoping: template invocations inherit no variables.
newState.vars = []variable{{"$", dot}}
newState.walk(dot, tmpl.Root)
}
// Eval functions evaluate pipelines, commands, and their elements and extract
// values from the data structure by examining fields, calling methods, and so on.
// The printing of those values happens only through walk functions.
// evalPipeline returns the value acquired by evaluating a pipeline. If the
// pipeline has a variable declaration, the variable will be pushed on the
// stack. Callers should therefore pop the stack after they are finished
// executing commands depending on the pipeline value.
func (s *state) evalPipeline(dot reflect.Value, pipe *parse.PipeNode) (value reflect.Value) {
if pipe == nil {
return
}
s.at(pipe)
for _, cmd := range pipe.Cmds {
value = s.evalCommand(dot, cmd, value) // previous value is this one's final arg.
// If the object has type interface{}, dig down one level to the thing inside.
if value.Kind() == reflect.Interface && value.Type().NumMethod() == 0 {
value = reflect.ValueOf(value.Interface()) // lovely!
}
}
for _, variable := range pipe.Decl {
s.push(variable.Ident[0], value)
}
return value
}
func (s *state) notAFunction(args []parse.Node, final reflect.Value) {
if len(args) > 1 || final.IsValid() {
s.errorf("can't give argument to non-function %s", args[0])
}
}
func (s *state) evalCommand(dot reflect.Value, cmd *parse.CommandNode, final reflect.Value) reflect.Value {
firstWord := cmd.Args[0]
switch n := firstWord.(type) {
case *parse.FieldNode:
return s.evalFieldNode(dot, n, cmd.Args, final)
case *parse.ChainNode:
return s.evalChainNode(dot, n, cmd.Args, final)
case *parse.IdentifierNode:
// Must be a function.
return s.evalFunction(dot, n, cmd, cmd.Args, final)
case *parse.PipeNode:
// Parenthesized pipeline. The arguments are all inside the pipeline; final is ignored.
return s.evalPipeline(dot, n)
case *parse.VariableNode:
return s.evalVariableNode(dot, n, cmd.Args, final)
}
s.at(firstWord)
s.notAFunction(cmd.Args, final)
switch word := firstWord.(type) {
case *parse.BoolNode:
return reflect.ValueOf(word.True)
case *parse.DotNode:
return dot
case *parse.NilNode:
s.errorf("nil is not a command")
case *parse.NumberNode:
return s.idealConstant(word)
case *parse.StringNode:
return reflect.ValueOf(word.Text)
}
s.errorf("can't evaluate command %q", firstWord)
panic("not reached")
}
// idealConstant is called to return the value of a number in a context where
// we don't know the type. In that case, the syntax of the number tells us
// its type, and we use Go rules to resolve. Note there is no such thing as
// a uint ideal constant in this situation - the value must be of int type.
func (s *state) idealConstant(constant *parse.NumberNode) reflect.Value {
// These are ideal constants but we don't know the type
// and we have no context. (If it was a method argument,
// we'd know what we need.) The syntax guides us to some extent.
s.at(constant)
switch {
case constant.IsComplex:
return reflect.ValueOf(constant.Complex128) // incontrovertible.
case constant.IsFloat && !isHexConstant(constant.Text) && strings.IndexAny(constant.Text, ".eE") >= 0:
return reflect.ValueOf(constant.Float64)
case constant.IsInt:
n := int(constant.Int64)
if int64(n) != constant.Int64 {
s.errorf("%s overflows int", constant.Text)
}
return reflect.ValueOf(n)
case constant.IsUint:
s.errorf("%s overflows int", constant.Text)
}
return zero
}
func isHexConstant(s string) bool {
return len(s) > 2 && s[0] == '0' && (s[1] == 'x' || s[1] == 'X')
}
func (s *state) evalFieldNode(dot reflect.Value, field *parse.FieldNode, args []parse.Node, final reflect.Value) reflect.Value {
s.at(field)
return s.evalFieldChain(dot, dot, field, field.Ident, args, final)
}
func (s *state) evalChainNode(dot reflect.Value, chain *parse.ChainNode, args []parse.Node, final reflect.Value) reflect.Value {
s.at(chain)
// (pipe).Field1.Field2 has pipe as .Node, fields as .Field. Eval the pipeline, then the fields.
pipe := s.evalArg(dot, nil, chain.Node)
if len(chain.Field) == 0 {
s.errorf("internal error: no fields in evalChainNode")
}
return s.evalFieldChain(dot, pipe, chain, chain.Field, args, final)
}
func (s *state) evalVariableNode(dot reflect.Value, variable *parse.VariableNode, args []parse.Node, final reflect.Value) reflect.Value {
// $x.Field has $x as the first ident, Field as the second. Eval the var, then the fields.
s.at(variable)
value := s.varValue(variable.Ident[0])
if len(variable.Ident) == 1 {
s.notAFunction(args, final)
return value
}
return s.evalFieldChain(dot, value, variable, variable.Ident[1:], args, final)
}
// evalFieldChain evaluates .X.Y.Z possibly followed by arguments.
// dot is the environment in which to evaluate arguments, while
// receiver is the value being walked along the chain.
func (s *state) evalFieldChain(dot, receiver reflect.Value, node parse.Node, ident []string, args []parse.Node, final reflect.Value) reflect.Value {
n := len(ident)
for i := 0; i < n-1; i++ {
receiver = s.evalField(dot, ident[i], node, nil, zero, receiver)
}
// Now if it's a method, it gets the arguments.
return s.evalField(dot, ident[n-1], node, args, final, receiver)
}
func (s *state) evalFunction(dot reflect.Value, node *parse.IdentifierNode, cmd parse.Node, args []parse.Node, final reflect.Value) reflect.Value {
s.at(node)
name := node.Ident
function, ok := findFunction(name, s.tmpl)
if !ok {
s.errorf("%q is not a defined function", name)
}
return s.evalCall(dot, function, cmd, name, args, final)
}
// evalField evaluates an expression like (.Field) or (.Field arg1 arg2).
// The 'final' argument represents the return value from the preceding
// value of the pipeline, if any.
func (s *state) evalField(dot reflect.Value, fieldName string, node parse.Node, args []parse.Node, final, receiver reflect.Value) reflect.Value {
if !receiver.IsValid() {
return zero
}
typ := receiver.Type()
receiver, _ = indirect(receiver)
// Unless it's an interface, need to get to a value of type *T to guarantee
// we see all methods of T and *T.
ptr := receiver
if ptr.Kind() != reflect.Interface && ptr.CanAddr() {
ptr = ptr.Addr()
}
if method := ptr.MethodByName(fieldName); method.IsValid() {
return s.evalCall(dot, method, node, fieldName, args, final)
}
hasArgs := len(args) > 1 || final.IsValid()
// It's not a method; must be a field of a struct or an element of a map. The receiver must not be nil.
receiver, isNil := indirect(receiver)
if isNil {
s.errorf("nil pointer evaluating %s.%s", typ, fieldName)
}
switch receiver.Kind() {
case reflect.Struct:
tField, ok := receiver.Type().FieldByName(fieldName)
if ok {
field := receiver.FieldByIndex(tField.Index)
if tField.PkgPath != "" { // field is unexported
s.errorf("%s is an unexported field of struct type %s", fieldName, typ)
}
// If it's a function, we must call it.
if hasArgs {
s.errorf("%s has arguments but cannot be invoked as function", fieldName)
}
return field
}
s.errorf("%s is not a field of struct type %s", fieldName, typ)
case reflect.Map:
// If it's a map, attempt to use the field name as a key.
nameVal := reflect.ValueOf(fieldName)
if nameVal.Type().AssignableTo(receiver.Type().Key()) {
if hasArgs {
s.errorf("%s is not a method but has arguments", fieldName)
}
return receiver.MapIndex(nameVal)
}
}
s.errorf("can't evaluate field %s in type %s", fieldName, typ)
panic("not reached")
}
var (
errorType = reflect.TypeOf((*error)(nil)).Elem()
fmtStringerType = reflect.TypeOf((*fmt.Stringer)(nil)).Elem()
)
// evalCall executes a function or method call. If it's a method, fun already has the receiver bound, so
// it looks just like a function call. The arg list, if non-nil, includes (in the manner of the shell), arg[0]
// as the function itself.
func (s *state) evalCall(dot, fun reflect.Value, node parse.Node, name string, args []parse.Node, final reflect.Value) reflect.Value {
if args != nil {
args = args[1:] // Zeroth arg is function name/node; not passed to function.
}
typ := fun.Type()
numIn := len(args)
if final.IsValid() {
numIn++
}
numFixed := len(args)
if typ.IsVariadic() {
numFixed = typ.NumIn() - 1 // last arg is the variadic one.
if numIn < numFixed {
s.errorf("wrong number of args for %s: want at least %d got %d", name, typ.NumIn()-1, len(args))
}
} else if numIn < typ.NumIn()-1 || !typ.IsVariadic() && numIn != typ.NumIn() {
s.errorf("wrong number of args for %s: want %d got %d", name, typ.NumIn(), len(args))
}
if !goodFunc(typ) {
// TODO: This could still be a confusing error; maybe goodFunc should provide info.
s.errorf("can't call method/function %q with %d results", name, typ.NumOut())
}
// Build the arg list.
argv := make([]reflect.Value, numIn)
// Args must be evaluated. Fixed args first.
i := 0
for ; i < numFixed && i < len(args); i++ {
argv[i] = s.evalArg(dot, typ.In(i), args[i])
}
// Now the ... args.
if typ.IsVariadic() {
argType := typ.In(typ.NumIn() - 1).Elem() // Argument is a slice.
for ; i < len(args); i++ {
argv[i] = s.evalArg(dot, argType, args[i])
}
}
// Add final value if necessary.
if final.IsValid() {
t := typ.In(typ.NumIn() - 1)
if typ.IsVariadic() {
t = t.Elem()
}
argv[i] = s.validateType(final, t)
}
result := fun.Call(argv)
// If we have an error that is not nil, stop execution and return that error to the caller.
if len(result) == 2 && !result[1].IsNil() {
s.at(node)
s.errorf("error calling %s: %s", name, result[1].Interface().(error))
}
return result[0]
}
// canBeNil reports whether an untyped nil can be assigned to the type. See reflect.Zero.
func canBeNil(typ reflect.Type) bool {
switch typ.Kind() {
case reflect.Chan, reflect.Func, reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice:
return true
}
return false
}
// validateType guarantees that the value is valid and assignable to the type.
func (s *state) validateType(value reflect.Value, typ reflect.Type) reflect.Value {
if !value.IsValid() {
if typ == nil || canBeNil(typ) {
// An untyped nil interface{}. Accept as a proper nil value.
return reflect.Zero(typ)
}
s.errorf("invalid value; expected %s", typ)
}
if typ != nil && !value.Type().AssignableTo(typ) {
if value.Kind() == reflect.Interface && !value.IsNil() {
value = value.Elem()
if value.Type().AssignableTo(typ) {
return value
}
// fallthrough
}
// Does one dereference or indirection work? We could do more, as we
// do with method receivers, but that gets messy and method receivers
// are much more constrained, so it makes more sense there than here.
// Besides, one is almost always all you need.
switch {
case value.Kind() == reflect.Ptr && value.Type().Elem().AssignableTo(typ):
value = value.Elem()
if !value.IsValid() {
s.errorf("dereference of nil pointer of type %s", typ)
}
case reflect.PtrTo(value.Type()).AssignableTo(typ) && value.CanAddr():
value = value.Addr()
default:
s.errorf("wrong type for value; expected %s; got %s", typ, value.Type())
}
}
return value
}
func (s *state) evalArg(dot reflect.Value, typ reflect.Type, n parse.Node) reflect.Value {
s.at(n)
switch arg := n.(type) {
case *parse.DotNode:
return s.validateType(dot, typ)
case *parse.NilNode:
if canBeNil(typ) {
return reflect.Zero(typ)
}
s.errorf("cannot assign nil to %s", typ)
case *parse.FieldNode:
return s.validateType(s.evalFieldNode(dot, arg, []parse.Node{n}, zero), typ)
case *parse.VariableNode:
return s.validateType(s.evalVariableNode(dot, arg, nil, zero), typ)
case *parse.PipeNode:
return s.validateType(s.evalPipeline(dot, arg), typ)
case *parse.IdentifierNode:
return s.evalFunction(dot, arg, arg, nil, zero)
case *parse.ChainNode:
return s.validateType(s.evalChainNode(dot, arg, nil, zero), typ)
}
switch typ.Kind() {
case reflect.Bool:
return s.evalBool(typ, n)
case reflect.Complex64, reflect.Complex128:
return s.evalComplex(typ, n)
case reflect.Float32, reflect.Float64:
return s.evalFloat(typ, n)
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
return s.evalInteger(typ, n)
case reflect.Interface:
if typ.NumMethod() == 0 {
return s.evalEmptyInterface(dot, n)
}
case reflect.String:
return s.evalString(typ, n)
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
return s.evalUnsignedInteger(typ, n)
}
s.errorf("can't handle %s for arg of type %s", n, typ)
panic("not reached")
}
func (s *state) evalBool(typ reflect.Type, n parse.Node) reflect.Value {
s.at(n)
if n, ok := n.(*parse.BoolNode); ok {
value := reflect.New(typ).Elem()
value.SetBool(n.True)
return value
}
s.errorf("expected bool; found %s", n)
panic("not reached")
}
func (s *state) evalString(typ reflect.Type, n parse.Node) reflect.Value {
s.at(n)
if n, ok := n.(*parse.StringNode); ok {
value := reflect.New(typ).Elem()
value.SetString(n.Text)
return value
}
s.errorf("expected string; found %s", n)
panic("not reached")
}
func (s *state) evalInteger(typ reflect.Type, n parse.Node) reflect.Value {
s.at(n)
if n, ok := n.(*parse.NumberNode); ok && n.IsInt {
value := reflect.New(typ).Elem()
value.SetInt(n.Int64)
return value
}
s.errorf("expected integer; found %s", n)
panic("not reached")
}
func (s *state) evalUnsignedInteger(typ reflect.Type, n parse.Node) reflect.Value {
s.at(n)
if n, ok := n.(*parse.NumberNode); ok && n.IsUint {
value := reflect.New(typ).Elem()
value.SetUint(n.Uint64)
return value
}
s.errorf("expected unsigned integer; found %s", n)
panic("not reached")
}
func (s *state) evalFloat(typ reflect.Type, n parse.Node) reflect.Value {
s.at(n)
if n, ok := n.(*parse.NumberNode); ok && n.IsFloat {
value := reflect.New(typ).Elem()
value.SetFloat(n.Float64)
return value
}
s.errorf("expected float; found %s", n)
panic("not reached")
}
func (s *state) evalComplex(typ reflect.Type, n parse.Node) reflect.Value {
if n, ok := n.(*parse.NumberNode); ok && n.IsComplex {
value := reflect.New(typ).Elem()
value.SetComplex(n.Complex128)
return value
}
s.errorf("expected complex; found %s", n)
panic("not reached")
}
func (s *state) evalEmptyInterface(dot reflect.Value, n parse.Node) reflect.Value {
s.at(n)
switch n := n.(type) {
case *parse.BoolNode:
return reflect.ValueOf(n.True)
case *parse.DotNode:
return dot
case *parse.FieldNode:
return s.evalFieldNode(dot, n, nil, zero)
case *parse.IdentifierNode:
return s.evalFunction(dot, n, n, nil, zero)
case *parse.NilNode:
// NilNode is handled in evalArg, the only place that calls here.
s.errorf("evalEmptyInterface: nil (can't happen)")
case *parse.NumberNode:
return s.idealConstant(n)
case *parse.StringNode:
return reflect.ValueOf(n.Text)
case *parse.VariableNode:
return s.evalVariableNode(dot, n, nil, zero)
case *parse.PipeNode:
return s.evalPipeline(dot, n)
}
s.errorf("can't handle assignment of %s to empty interface argument", n)
panic("not reached")
}
// indirect returns the item at the end of indirection, and a bool to indicate if it's nil.
// We indirect through pointers and empty interfaces (only) because
// non-empty interfaces have methods we might need.
func indirect(v reflect.Value) (rv reflect.Value, isNil bool) {
for ; v.Kind() == reflect.Ptr || v.Kind() == reflect.Interface; v = v.Elem() {
if v.IsNil() {
return v, true
}
if v.Kind() == reflect.Interface && v.NumMethod() > 0 {
break
}
}
return v, false
}
// printValue writes the textual representation of the value to the output of
// the template.
func (s *state) printValue(n parse.Node, v reflect.Value) {
s.at(n)
iface, ok := printableValue(v)
if !ok {
s.errorf("can't print %s of type %s", n, v.Type())
}
fmt.Fprint(s.wr, iface)
}
// printableValue returns the, possibly indirected, interface value inside v that
// is best for a call to formatted printer.
func printableValue(v reflect.Value) (interface{}, bool) {
if v.Kind() == reflect.Ptr {
v, _ = indirect(v) // fmt.Fprint handles nil.
}
if !v.IsValid() {
return "<no value>", true
}
if !v.Type().Implements(errorType) && !v.Type().Implements(fmtStringerType) {
if v.CanAddr() && (reflect.PtrTo(v.Type()).Implements(errorType) || reflect.PtrTo(v.Type()).Implements(fmtStringerType)) {
v = v.Addr()
} else {
switch v.Kind() {
case reflect.Chan, reflect.Func:
return nil, false
}
}
}
return v.Interface(), true
}
// Types to help sort the keys in a map for reproducible output.
type rvs []reflect.Value
func (x rvs) Len() int { return len(x) }
func (x rvs) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
type rvInts struct{ rvs }
func (x rvInts) Less(i, j int) bool { return x.rvs[i].Int() < x.rvs[j].Int() }
type rvUints struct{ rvs }
func (x rvUints) Less(i, j int) bool { return x.rvs[i].Uint() < x.rvs[j].Uint() }
type rvFloats struct{ rvs }
func (x rvFloats) Less(i, j int) bool { return x.rvs[i].Float() < x.rvs[j].Float() }
type rvStrings struct{ rvs }
func (x rvStrings) Less(i, j int) bool { return x.rvs[i].String() < x.rvs[j].String() }
// sortKeys sorts (if it can) the slice of reflect.Values, which is a slice of map keys.
func sortKeys(v []reflect.Value) []reflect.Value {
if len(v) <= 1 {
return v
}
switch v[0].Kind() {
case reflect.Float32, reflect.Float64:
sort.Sort(rvFloats{v})
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
sort.Sort(rvInts{v})
case reflect.String:
sort.Sort(rvStrings{v})
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
sort.Sort(rvUints{v})
}
return v
}

View file

@ -1,598 +0,0 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package template
import (
"bytes"
"errors"
"fmt"
"io"
"net/url"
"reflect"
"strings"
"unicode"
"unicode/utf8"
)
// FuncMap is the type of the map defining the mapping from names to functions.
// Each function must have either a single return value, or two return values of
// which the second has type error. In that case, if the second (error)
// return value evaluates to non-nil during execution, execution terminates and
// Execute returns that error.
type FuncMap map[string]interface{}
var builtins = FuncMap{
"and": and,
"call": call,
"html": HTMLEscaper,
"index": index,
"js": JSEscaper,
"len": length,
"not": not,
"or": or,
"print": fmt.Sprint,
"printf": fmt.Sprintf,
"println": fmt.Sprintln,
"urlquery": URLQueryEscaper,
// Comparisons
"eq": eq, // ==
"ge": ge, // >=
"gt": gt, // >
"le": le, // <=
"lt": lt, // <
"ne": ne, // !=
}
var builtinFuncs = createValueFuncs(builtins)
// createValueFuncs turns a FuncMap into a map[string]reflect.Value
func createValueFuncs(funcMap FuncMap) map[string]reflect.Value {
m := make(map[string]reflect.Value)
addValueFuncs(m, funcMap)
return m
}
// addValueFuncs adds to values the functions in funcs, converting them to reflect.Values.
func addValueFuncs(out map[string]reflect.Value, in FuncMap) {
for name, fn := range in {
v := reflect.ValueOf(fn)
if v.Kind() != reflect.Func {
panic("value for " + name + " not a function")
}
if !goodFunc(v.Type()) {
panic(fmt.Errorf("can't install method/function %q with %d results", name, v.Type().NumOut()))
}
out[name] = v
}
}
// addFuncs adds to values the functions in funcs. It does no checking of the input -
// call addValueFuncs first.
func addFuncs(out, in FuncMap) {
for name, fn := range in {
out[name] = fn
}
}
// goodFunc checks that the function or method has the right result signature.
func goodFunc(typ reflect.Type) bool {
// We allow functions with 1 result or 2 results where the second is an error.
switch {
case typ.NumOut() == 1:
return true
case typ.NumOut() == 2 && typ.Out(1) == errorType:
return true
}
return false
}
// findFunction looks for a function in the template, and global map.
func findFunction(name string, tmpl *Template) (reflect.Value, bool) {
if tmpl != nil && tmpl.common != nil {
if fn := tmpl.execFuncs[name]; fn.IsValid() {
return fn, true
}
}
if fn := builtinFuncs[name]; fn.IsValid() {
return fn, true
}
return reflect.Value{}, false
}
// Indexing.
// index returns the result of indexing its first argument by the following
// arguments. Thus "index x 1 2 3" is, in Go syntax, x[1][2][3]. Each
// indexed item must be a map, slice, or array.
func index(item interface{}, indices ...interface{}) (interface{}, error) {
v := reflect.ValueOf(item)
for _, i := range indices {
index := reflect.ValueOf(i)
var isNil bool
if v, isNil = indirect(v); isNil {
return nil, fmt.Errorf("index of nil pointer")
}
switch v.Kind() {
case reflect.Array, reflect.Slice, reflect.String:
var x int64
switch index.Kind() {
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
x = index.Int()
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
x = int64(index.Uint())
default:
return nil, fmt.Errorf("cannot index slice/array with type %s", index.Type())
}
if x < 0 || x >= int64(v.Len()) {
return nil, fmt.Errorf("index out of range: %d", x)
}
v = v.Index(int(x))
case reflect.Map:
if !index.IsValid() {
index = reflect.Zero(v.Type().Key())
}
if !index.Type().AssignableTo(v.Type().Key()) {
return nil, fmt.Errorf("%s is not index type for %s", index.Type(), v.Type())
}
if x := v.MapIndex(index); x.IsValid() {
v = x
} else {
v = reflect.Zero(v.Type().Elem())
}
default:
return nil, fmt.Errorf("can't index item of type %s", v.Type())
}
}
return v.Interface(), nil
}
// Length
// length returns the length of the item, with an error if it has no defined length.
func length(item interface{}) (int, error) {
v, isNil := indirect(reflect.ValueOf(item))
if isNil {
return 0, fmt.Errorf("len of nil pointer")
}
switch v.Kind() {
case reflect.Array, reflect.Chan, reflect.Map, reflect.Slice, reflect.String:
return v.Len(), nil
}
return 0, fmt.Errorf("len of type %s", v.Type())
}
// Function invocation
// call returns the result of evaluating the first argument as a function.
// The function must return 1 result, or 2 results, the second of which is an error.
func call(fn interface{}, args ...interface{}) (interface{}, error) {
v := reflect.ValueOf(fn)
typ := v.Type()
if typ.Kind() != reflect.Func {
return nil, fmt.Errorf("non-function of type %s", typ)
}
if !goodFunc(typ) {
return nil, fmt.Errorf("function called with %d args; should be 1 or 2", typ.NumOut())
}
numIn := typ.NumIn()
var dddType reflect.Type
if typ.IsVariadic() {
if len(args) < numIn-1 {
return nil, fmt.Errorf("wrong number of args: got %d want at least %d", len(args), numIn-1)
}
dddType = typ.In(numIn - 1).Elem()
} else {
if len(args) != numIn {
return nil, fmt.Errorf("wrong number of args: got %d want %d", len(args), numIn)
}
}
argv := make([]reflect.Value, len(args))
for i, arg := range args {
value := reflect.ValueOf(arg)
// Compute the expected type. Clumsy because of variadics.
var argType reflect.Type
if !typ.IsVariadic() || i < numIn-1 {
argType = typ.In(i)
} else {
argType = dddType
}
if !value.IsValid() && canBeNil(argType) {
value = reflect.Zero(argType)
}
if !value.Type().AssignableTo(argType) {
return nil, fmt.Errorf("arg %d has type %s; should be %s", i, value.Type(), argType)
}
argv[i] = value
}
result := v.Call(argv)
if len(result) == 2 && !result[1].IsNil() {
return result[0].Interface(), result[1].Interface().(error)
}
return result[0].Interface(), nil
}
// Boolean logic.
func truth(a interface{}) bool {
t, _ := isTrue(reflect.ValueOf(a))
return t
}
// and computes the Boolean AND of its arguments, returning
// the first false argument it encounters, or the last argument.
func and(arg0 interface{}, args ...interface{}) interface{} {
if !truth(arg0) {
return arg0
}
for i := range args {
arg0 = args[i]
if !truth(arg0) {
break
}
}
return arg0
}
// or computes the Boolean OR of its arguments, returning
// the first true argument it encounters, or the last argument.
func or(arg0 interface{}, args ...interface{}) interface{} {
if truth(arg0) {
return arg0
}
for i := range args {
arg0 = args[i]
if truth(arg0) {
break
}
}
return arg0
}
// not returns the Boolean negation of its argument.
func not(arg interface{}) (truth bool) {
truth, _ = isTrue(reflect.ValueOf(arg))
return !truth
}
// Comparison.
// TODO: Perhaps allow comparison between signed and unsigned integers.
var (
errBadComparisonType = errors.New("invalid type for comparison")
errBadComparison = errors.New("incompatible types for comparison")
errNoComparison = errors.New("missing argument for comparison")
)
type kind int
const (
invalidKind kind = iota
boolKind
complexKind
intKind
floatKind
integerKind
stringKind
uintKind
)
func basicKind(v reflect.Value) (kind, error) {
switch v.Kind() {
case reflect.Bool:
return boolKind, nil
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
return intKind, nil
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
return uintKind, nil
case reflect.Float32, reflect.Float64:
return floatKind, nil
case reflect.Complex64, reflect.Complex128:
return complexKind, nil
case reflect.String:
return stringKind, nil
}
return invalidKind, errBadComparisonType
}
// eq evaluates the comparison a == b || a == c || ...
func eq(arg1 interface{}, arg2 ...interface{}) (bool, error) {
v1 := reflect.ValueOf(arg1)
k1, err := basicKind(v1)
if err != nil {
return false, err
}
if len(arg2) == 0 {
return false, errNoComparison
}
for _, arg := range arg2 {
v2 := reflect.ValueOf(arg)
k2, err := basicKind(v2)
if err != nil {
return false, err
}
truth := false
if k1 != k2 {
// Special case: Can compare integer values regardless of type's sign.
switch {
case k1 == intKind && k2 == uintKind:
truth = v1.Int() >= 0 && uint64(v1.Int()) == v2.Uint()
case k1 == uintKind && k2 == intKind:
truth = v2.Int() >= 0 && v1.Uint() == uint64(v2.Int())
default:
return false, errBadComparison
}
} else {
switch k1 {
case boolKind:
truth = v1.Bool() == v2.Bool()
case complexKind:
truth = v1.Complex() == v2.Complex()
case floatKind:
truth = v1.Float() == v2.Float()
case intKind:
truth = v1.Int() == v2.Int()
case stringKind:
truth = v1.String() == v2.String()
case uintKind:
truth = v1.Uint() == v2.Uint()
default:
panic("invalid kind")
}
}
if truth {
return true, nil
}
}
return false, nil
}
// ne evaluates the comparison a != b.
func ne(arg1, arg2 interface{}) (bool, error) {
// != is the inverse of ==.
equal, err := eq(arg1, arg2)
return !equal, err
}
// lt evaluates the comparison a < b.
func lt(arg1, arg2 interface{}) (bool, error) {
v1 := reflect.ValueOf(arg1)
k1, err := basicKind(v1)
if err != nil {
return false, err
}
v2 := reflect.ValueOf(arg2)
k2, err := basicKind(v2)
if err != nil {
return false, err
}
truth := false
if k1 != k2 {
// Special case: Can compare integer values regardless of type's sign.
switch {
case k1 == intKind && k2 == uintKind:
truth = v1.Int() < 0 || uint64(v1.Int()) < v2.Uint()
case k1 == uintKind && k2 == intKind:
truth = v2.Int() >= 0 && v1.Uint() < uint64(v2.Int())
default:
return false, errBadComparison
}
} else {
switch k1 {
case boolKind, complexKind:
return false, errBadComparisonType
case floatKind:
truth = v1.Float() < v2.Float()
case intKind:
truth = v1.Int() < v2.Int()
case stringKind:
truth = v1.String() < v2.String()
case uintKind:
truth = v1.Uint() < v2.Uint()
default:
panic("invalid kind")
}
}
return truth, nil
}
// le evaluates the comparison <= b.
func le(arg1, arg2 interface{}) (bool, error) {
// <= is < or ==.
lessThan, err := lt(arg1, arg2)
if lessThan || err != nil {
return lessThan, err
}
return eq(arg1, arg2)
}
// gt evaluates the comparison a > b.
func gt(arg1, arg2 interface{}) (bool, error) {
// > is the inverse of <=.
lessOrEqual, err := le(arg1, arg2)
if err != nil {
return false, err
}
return !lessOrEqual, nil
}
// ge evaluates the comparison a >= b.
func ge(arg1, arg2 interface{}) (bool, error) {
// >= is the inverse of <.
lessThan, err := lt(arg1, arg2)
if err != nil {
return false, err
}
return !lessThan, nil
}
// HTML escaping.
var (
htmlQuot = []byte("&#34;") // shorter than "&quot;"
htmlApos = []byte("&#39;") // shorter than "&apos;" and apos was not in HTML until HTML5
htmlAmp = []byte("&amp;")
htmlLt = []byte("&lt;")
htmlGt = []byte("&gt;")
)
// HTMLEscape writes to w the escaped HTML equivalent of the plain text data b.
func HTMLEscape(w io.Writer, b []byte) {
last := 0
for i, c := range b {
var html []byte
switch c {
case '"':
html = htmlQuot
case '\'':
html = htmlApos
case '&':
html = htmlAmp
case '<':
html = htmlLt
case '>':
html = htmlGt
default:
continue
}
w.Write(b[last:i])
w.Write(html)
last = i + 1
}
w.Write(b[last:])
}
// HTMLEscapeString returns the escaped HTML equivalent of the plain text data s.
func HTMLEscapeString(s string) string {
// Avoid allocation if we can.
if strings.IndexAny(s, `'"&<>`) < 0 {
return s
}
var b bytes.Buffer
HTMLEscape(&b, []byte(s))
return b.String()
}
// HTMLEscaper returns the escaped HTML equivalent of the textual
// representation of its arguments.
func HTMLEscaper(args ...interface{}) string {
return HTMLEscapeString(evalArgs(args))
}
// JavaScript escaping.
var (
jsLowUni = []byte(`\u00`)
hex = []byte("0123456789ABCDEF")
jsBackslash = []byte(`\\`)
jsApos = []byte(`\'`)
jsQuot = []byte(`\"`)
jsLt = []byte(`\x3C`)
jsGt = []byte(`\x3E`)
)
// JSEscape writes to w the escaped JavaScript equivalent of the plain text data b.
func JSEscape(w io.Writer, b []byte) {
last := 0
for i := 0; i < len(b); i++ {
c := b[i]
if !jsIsSpecial(rune(c)) {
// fast path: nothing to do
continue
}
w.Write(b[last:i])
if c < utf8.RuneSelf {
// Quotes, slashes and angle brackets get quoted.
// Control characters get written as \u00XX.
switch c {
case '\\':
w.Write(jsBackslash)
case '\'':
w.Write(jsApos)
case '"':
w.Write(jsQuot)
case '<':
w.Write(jsLt)
case '>':
w.Write(jsGt)
default:
w.Write(jsLowUni)
t, b := c>>4, c&0x0f
w.Write(hex[t : t+1])
w.Write(hex[b : b+1])
}
} else {
// Unicode rune.
r, size := utf8.DecodeRune(b[i:])
if unicode.IsPrint(r) {
w.Write(b[i : i+size])
} else {
fmt.Fprintf(w, "\\u%04X", r)
}
i += size - 1
}
last = i + 1
}
w.Write(b[last:])
}
// JSEscapeString returns the escaped JavaScript equivalent of the plain text data s.
func JSEscapeString(s string) string {
// Avoid allocation if we can.
if strings.IndexFunc(s, jsIsSpecial) < 0 {
return s
}
var b bytes.Buffer
JSEscape(&b, []byte(s))
return b.String()
}
func jsIsSpecial(r rune) bool {
switch r {
case '\\', '\'', '"', '<', '>':
return true
}
return r < ' ' || utf8.RuneSelf <= r
}
// JSEscaper returns the escaped JavaScript equivalent of the textual
// representation of its arguments.
func JSEscaper(args ...interface{}) string {
return JSEscapeString(evalArgs(args))
}
// URLQueryEscaper returns the escaped value of the textual representation of
// its arguments in a form suitable for embedding in a URL query.
func URLQueryEscaper(args ...interface{}) string {
return url.QueryEscape(evalArgs(args))
}
// evalArgs formats the list of arguments into a string. It is therefore equivalent to
// fmt.Sprint(args...)
// except that each argument is indirected (if a pointer), as required,
// using the same rules as the default string evaluation during template
// execution.
func evalArgs(args []interface{}) string {
ok := false
var s string
// Fast path for simple common case.
if len(args) == 1 {
s, ok = args[0].(string)
}
if !ok {
for i, arg := range args {
a, ok := printableValue(reflect.ValueOf(arg))
if ok {
args[i] = a
} // else left fmt do its thing
}
s = fmt.Sprint(args...)
}
return s
}

View file

@ -1,108 +0,0 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Helper functions to make constructing templates easier.
package template
import (
"fmt"
"io/ioutil"
"path/filepath"
)
// Functions and methods to parse templates.
// Must is a helper that wraps a call to a function returning (*Template, error)
// and panics if the error is non-nil. It is intended for use in variable
// initializations such as
// var t = template.Must(template.New("name").Parse("text"))
func Must(t *Template, err error) *Template {
if err != nil {
panic(err)
}
return t
}
// ParseFiles creates a new Template and parses the template definitions from
// the named files. The returned template's name will have the (base) name and
// (parsed) contents of the first file. There must be at least one file.
// If an error occurs, parsing stops and the returned *Template is nil.
func ParseFiles(filenames ...string) (*Template, error) {
return parseFiles(nil, filenames...)
}
// ParseFiles parses the named files and associates the resulting templates with
// t. If an error occurs, parsing stops and the returned template is nil;
// otherwise it is t. There must be at least one file.
func (t *Template) ParseFiles(filenames ...string) (*Template, error) {
return parseFiles(t, filenames...)
}
// parseFiles is the helper for the method and function. If the argument
// template is nil, it is created from the first file.
func parseFiles(t *Template, filenames ...string) (*Template, error) {
if len(filenames) == 0 {
// Not really a problem, but be consistent.
return nil, fmt.Errorf("template: no files named in call to ParseFiles")
}
for _, filename := range filenames {
b, err := ioutil.ReadFile(filename)
if err != nil {
return nil, err
}
s := string(b)
name := filepath.Base(filename)
// First template becomes return value if not already defined,
// and we use that one for subsequent New calls to associate
// all the templates together. Also, if this file has the same name
// as t, this file becomes the contents of t, so
// t, err := New(name).Funcs(xxx).ParseFiles(name)
// works. Otherwise we create a new template associated with t.
var tmpl *Template
if t == nil {
t = New(name)
}
if name == t.Name() {
tmpl = t
} else {
tmpl = t.New(name)
}
_, err = tmpl.Parse(s)
if err != nil {
return nil, err
}
}
return t, nil
}
// ParseGlob creates a new Template and parses the template definitions from the
// files identified by the pattern, which must match at least one file. The
// returned template will have the (base) name and (parsed) contents of the
// first file matched by the pattern. ParseGlob is equivalent to calling
// ParseFiles with the list of files matched by the pattern.
func ParseGlob(pattern string) (*Template, error) {
return parseGlob(nil, pattern)
}
// ParseGlob parses the template definitions in the files identified by the
// pattern and associates the resulting templates with t. The pattern is
// processed by filepath.Glob and must match at least one file. ParseGlob is
// equivalent to calling t.ParseFiles with the list of files matched by the
// pattern.
func (t *Template) ParseGlob(pattern string) (*Template, error) {
return parseGlob(t, pattern)
}
// parseGlob is the implementation of the function and method ParseGlob.
func parseGlob(t *Template, pattern string) (*Template, error) {
filenames, err := filepath.Glob(pattern)
if err != nil {
return nil, err
}
if len(filenames) == 0 {
return nil, fmt.Errorf("template: pattern matches no files: %#q", pattern)
}
return parseFiles(t, filenames...)
}

View file

@ -1,556 +0,0 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package parse
import (
"fmt"
"strings"
"unicode"
"unicode/utf8"
)
// item represents a token or text string returned from the scanner.
type item struct {
typ itemType // The type of this item.
pos Pos // The starting position, in bytes, of this item in the input string.
val string // The value of this item.
}
func (i item) String() string {
switch {
case i.typ == itemEOF:
return "EOF"
case i.typ == itemError:
return i.val
case i.typ > itemKeyword:
return fmt.Sprintf("<%s>", i.val)
case len(i.val) > 10:
return fmt.Sprintf("%.10q...", i.val)
}
return fmt.Sprintf("%q", i.val)
}
// itemType identifies the type of lex items.
type itemType int
const (
itemError itemType = iota // error occurred; value is text of error
itemBool // boolean constant
itemChar // printable ASCII character; grab bag for comma etc.
itemCharConstant // character constant
itemComplex // complex constant (1+2i); imaginary is just a number
itemColonEquals // colon-equals (':=') introducing a declaration
itemEOF
itemField // alphanumeric identifier starting with '.'
itemIdentifier // alphanumeric identifier not starting with '.'
itemLeftDelim // left action delimiter
itemLeftParen // '(' inside action
itemNumber // simple number, including imaginary
itemPipe // pipe symbol
itemRawString // raw quoted string (includes quotes)
itemRightDelim // right action delimiter
itemElideNewline // elide newline after right delim
itemRightParen // ')' inside action
itemSpace // run of spaces separating arguments
itemString // quoted string (includes quotes)
itemText // plain text
itemVariable // variable starting with '$', such as '$' or '$1' or '$hello'
// Keywords appear after all the rest.
itemKeyword // used only to delimit the keywords
itemDot // the cursor, spelled '.'
itemDefine // define keyword
itemElse // else keyword
itemEnd // end keyword
itemIf // if keyword
itemNil // the untyped nil constant, easiest to treat as a keyword
itemRange // range keyword
itemTemplate // template keyword
itemWith // with keyword
)
var key = map[string]itemType{
".": itemDot,
"define": itemDefine,
"else": itemElse,
"end": itemEnd,
"if": itemIf,
"range": itemRange,
"nil": itemNil,
"template": itemTemplate,
"with": itemWith,
}
const eof = -1
// stateFn represents the state of the scanner as a function that returns the next state.
type stateFn func(*lexer) stateFn
// lexer holds the state of the scanner.
type lexer struct {
name string // the name of the input; used only for error reports
input string // the string being scanned
leftDelim string // start of action
rightDelim string // end of action
state stateFn // the next lexing function to enter
pos Pos // current position in the input
start Pos // start position of this item
width Pos // width of last rune read from input
lastPos Pos // position of most recent item returned by nextItem
items chan item // channel of scanned items
parenDepth int // nesting depth of ( ) exprs
}
// next returns the next rune in the input.
func (l *lexer) next() rune {
if int(l.pos) >= len(l.input) {
l.width = 0
return eof
}
r, w := utf8.DecodeRuneInString(l.input[l.pos:])
l.width = Pos(w)
l.pos += l.width
return r
}
// peek returns but does not consume the next rune in the input.
func (l *lexer) peek() rune {
r := l.next()
l.backup()
return r
}
// backup steps back one rune. Can only be called once per call of next.
func (l *lexer) backup() {
l.pos -= l.width
}
// emit passes an item back to the client.
func (l *lexer) emit(t itemType) {
l.items <- item{t, l.start, l.input[l.start:l.pos]}
l.start = l.pos
}
// ignore skips over the pending input before this point.
func (l *lexer) ignore() {
l.start = l.pos
}
// accept consumes the next rune if it's from the valid set.
func (l *lexer) accept(valid string) bool {
if strings.IndexRune(valid, l.next()) >= 0 {
return true
}
l.backup()
return false
}
// acceptRun consumes a run of runes from the valid set.
func (l *lexer) acceptRun(valid string) {
for strings.IndexRune(valid, l.next()) >= 0 {
}
l.backup()
}
// lineNumber reports which line we're on, based on the position of
// the previous item returned by nextItem. Doing it this way
// means we don't have to worry about peek double counting.
func (l *lexer) lineNumber() int {
return 1 + strings.Count(l.input[:l.lastPos], "\n")
}
// errorf returns an error token and terminates the scan by passing
// back a nil pointer that will be the next state, terminating l.nextItem.
func (l *lexer) errorf(format string, args ...interface{}) stateFn {
l.items <- item{itemError, l.start, fmt.Sprintf(format, args...)}
return nil
}
// nextItem returns the next item from the input.
func (l *lexer) nextItem() item {
item := <-l.items
l.lastPos = item.pos
return item
}
// lex creates a new scanner for the input string.
func lex(name, input, left, right string) *lexer {
if left == "" {
left = leftDelim
}
if right == "" {
right = rightDelim
}
l := &lexer{
name: name,
input: input,
leftDelim: left,
rightDelim: right,
items: make(chan item),
}
go l.run()
return l
}
// run runs the state machine for the lexer.
func (l *lexer) run() {
for l.state = lexText; l.state != nil; {
l.state = l.state(l)
}
}
// state functions
const (
leftDelim = "{{"
rightDelim = "}}"
leftComment = "/*"
rightComment = "*/"
)
// lexText scans until an opening action delimiter, "{{".
func lexText(l *lexer) stateFn {
for {
if strings.HasPrefix(l.input[l.pos:], l.leftDelim) {
if l.pos > l.start {
l.emit(itemText)
}
return lexLeftDelim
}
if l.next() == eof {
break
}
}
// Correctly reached EOF.
if l.pos > l.start {
l.emit(itemText)
}
l.emit(itemEOF)
return nil
}
// lexLeftDelim scans the left delimiter, which is known to be present.
func lexLeftDelim(l *lexer) stateFn {
l.pos += Pos(len(l.leftDelim))
if strings.HasPrefix(l.input[l.pos:], leftComment) {
return lexComment
}
l.emit(itemLeftDelim)
l.parenDepth = 0
return lexInsideAction
}
// lexComment scans a comment. The left comment marker is known to be present.
func lexComment(l *lexer) stateFn {
l.pos += Pos(len(leftComment))
i := strings.Index(l.input[l.pos:], rightComment)
if i < 0 {
return l.errorf("unclosed comment")
}
l.pos += Pos(i + len(rightComment))
if !strings.HasPrefix(l.input[l.pos:], l.rightDelim) {
return l.errorf("comment ends before closing delimiter")
}
l.pos += Pos(len(l.rightDelim))
l.ignore()
return lexText
}
// lexRightDelim scans the right delimiter, which is known to be present.
func lexRightDelim(l *lexer) stateFn {
l.pos += Pos(len(l.rightDelim))
l.emit(itemRightDelim)
if l.peek() == '\\' {
l.pos++
l.emit(itemElideNewline)
}
return lexText
}
// lexInsideAction scans the elements inside action delimiters.
func lexInsideAction(l *lexer) stateFn {
// Either number, quoted string, or identifier.
// Spaces separate arguments; runs of spaces turn into itemSpace.
// Pipe symbols separate and are emitted.
if strings.HasPrefix(l.input[l.pos:], l.rightDelim+"\\") || strings.HasPrefix(l.input[l.pos:], l.rightDelim) {
if l.parenDepth == 0 {
return lexRightDelim
}
return l.errorf("unclosed left paren")
}
switch r := l.next(); {
case r == eof || isEndOfLine(r):
return l.errorf("unclosed action")
case isSpace(r):
return lexSpace
case r == ':':
if l.next() != '=' {
return l.errorf("expected :=")
}
l.emit(itemColonEquals)
case r == '|':
l.emit(itemPipe)
case r == '"':
return lexQuote
case r == '`':
return lexRawQuote
case r == '$':
return lexVariable
case r == '\'':
return lexChar
case r == '.':
// special look-ahead for ".field" so we don't break l.backup().
if l.pos < Pos(len(l.input)) {
r := l.input[l.pos]
if r < '0' || '9' < r {
return lexField
}
}
fallthrough // '.' can start a number.
case r == '+' || r == '-' || ('0' <= r && r <= '9'):
l.backup()
return lexNumber
case isAlphaNumeric(r):
l.backup()
return lexIdentifier
case r == '(':
l.emit(itemLeftParen)
l.parenDepth++
return lexInsideAction
case r == ')':
l.emit(itemRightParen)
l.parenDepth--
if l.parenDepth < 0 {
return l.errorf("unexpected right paren %#U", r)
}
return lexInsideAction
case r <= unicode.MaxASCII && unicode.IsPrint(r):
l.emit(itemChar)
return lexInsideAction
default:
return l.errorf("unrecognized character in action: %#U", r)
}
return lexInsideAction
}
// lexSpace scans a run of space characters.
// One space has already been seen.
func lexSpace(l *lexer) stateFn {
for isSpace(l.peek()) {
l.next()
}
l.emit(itemSpace)
return lexInsideAction
}
// lexIdentifier scans an alphanumeric.
func lexIdentifier(l *lexer) stateFn {
Loop:
for {
switch r := l.next(); {
case isAlphaNumeric(r):
// absorb.
default:
l.backup()
word := l.input[l.start:l.pos]
if !l.atTerminator() {
return l.errorf("bad character %#U", r)
}
switch {
case key[word] > itemKeyword:
l.emit(key[word])
case word[0] == '.':
l.emit(itemField)
case word == "true", word == "false":
l.emit(itemBool)
default:
l.emit(itemIdentifier)
}
break Loop
}
}
return lexInsideAction
}
// lexField scans a field: .Alphanumeric.
// The . has been scanned.
func lexField(l *lexer) stateFn {
return lexFieldOrVariable(l, itemField)
}
// lexVariable scans a Variable: $Alphanumeric.
// The $ has been scanned.
func lexVariable(l *lexer) stateFn {
if l.atTerminator() { // Nothing interesting follows -> "$".
l.emit(itemVariable)
return lexInsideAction
}
return lexFieldOrVariable(l, itemVariable)
}
// lexVariable scans a field or variable: [.$]Alphanumeric.
// The . or $ has been scanned.
func lexFieldOrVariable(l *lexer, typ itemType) stateFn {
if l.atTerminator() { // Nothing interesting follows -> "." or "$".
if typ == itemVariable {
l.emit(itemVariable)
} else {
l.emit(itemDot)
}
return lexInsideAction
}
var r rune
for {
r = l.next()
if !isAlphaNumeric(r) {
l.backup()
break
}
}
if !l.atTerminator() {
return l.errorf("bad character %#U", r)
}
l.emit(typ)
return lexInsideAction
}
// atTerminator reports whether the input is at valid termination character to
// appear after an identifier. Breaks .X.Y into two pieces. Also catches cases
// like "$x+2" not being acceptable without a space, in case we decide one
// day to implement arithmetic.
func (l *lexer) atTerminator() bool {
r := l.peek()
if isSpace(r) || isEndOfLine(r) {
return true
}
switch r {
case eof, '.', ',', '|', ':', ')', '(':
return true
}
// Does r start the delimiter? This can be ambiguous (with delim=="//", $x/2 will
// succeed but should fail) but only in extremely rare cases caused by willfully
// bad choice of delimiter.
if rd, _ := utf8.DecodeRuneInString(l.rightDelim); rd == r {
return true
}
return false
}
// lexChar scans a character constant. The initial quote is already
// scanned. Syntax checking is done by the parser.
func lexChar(l *lexer) stateFn {
Loop:
for {
switch l.next() {
case '\\':
if r := l.next(); r != eof && r != '\n' {
break
}
fallthrough
case eof, '\n':
return l.errorf("unterminated character constant")
case '\'':
break Loop
}
}
l.emit(itemCharConstant)
return lexInsideAction
}
// lexNumber scans a number: decimal, octal, hex, float, or imaginary. This
// isn't a perfect number scanner - for instance it accepts "." and "0x0.2"
// and "089" - but when it's wrong the input is invalid and the parser (via
// strconv) will notice.
func lexNumber(l *lexer) stateFn {
if !l.scanNumber() {
return l.errorf("bad number syntax: %q", l.input[l.start:l.pos])
}
if sign := l.peek(); sign == '+' || sign == '-' {
// Complex: 1+2i. No spaces, must end in 'i'.
if !l.scanNumber() || l.input[l.pos-1] != 'i' {
return l.errorf("bad number syntax: %q", l.input[l.start:l.pos])
}
l.emit(itemComplex)
} else {
l.emit(itemNumber)
}
return lexInsideAction
}
func (l *lexer) scanNumber() bool {
// Optional leading sign.
l.accept("+-")
// Is it hex?
digits := "0123456789"
if l.accept("0") && l.accept("xX") {
digits = "0123456789abcdefABCDEF"
}
l.acceptRun(digits)
if l.accept(".") {
l.acceptRun(digits)
}
if l.accept("eE") {
l.accept("+-")
l.acceptRun("0123456789")
}
// Is it imaginary?
l.accept("i")
// Next thing mustn't be alphanumeric.
if isAlphaNumeric(l.peek()) {
l.next()
return false
}
return true
}
// lexQuote scans a quoted string.
func lexQuote(l *lexer) stateFn {
Loop:
for {
switch l.next() {
case '\\':
if r := l.next(); r != eof && r != '\n' {
break
}
fallthrough
case eof, '\n':
return l.errorf("unterminated quoted string")
case '"':
break Loop
}
}
l.emit(itemString)
return lexInsideAction
}
// lexRawQuote scans a raw quoted string.
func lexRawQuote(l *lexer) stateFn {
Loop:
for {
switch l.next() {
case eof, '\n':
return l.errorf("unterminated raw quoted string")
case '`':
break Loop
}
}
l.emit(itemRawString)
return lexInsideAction
}
// isSpace reports whether r is a space character.
func isSpace(r rune) bool {
return r == ' ' || r == '\t'
}
// isEndOfLine reports whether r is an end-of-line character.
func isEndOfLine(r rune) bool {
return r == '\r' || r == '\n'
}
// isAlphaNumeric reports whether r is an alphabetic, digit, or underscore.
func isAlphaNumeric(r rune) bool {
return r == '_' || unicode.IsLetter(r) || unicode.IsDigit(r)
}

View file

@ -1,834 +0,0 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Parse nodes.
package parse
import (
"bytes"
"fmt"
"strconv"
"strings"
)
var textFormat = "%s" // Changed to "%q" in tests for better error messages.
// A Node is an element in the parse tree. The interface is trivial.
// The interface contains an unexported method so that only
// types local to this package can satisfy it.
type Node interface {
Type() NodeType
String() string
// Copy does a deep copy of the Node and all its components.
// To avoid type assertions, some XxxNodes also have specialized
// CopyXxx methods that return *XxxNode.
Copy() Node
Position() Pos // byte position of start of node in full original input string
// tree returns the containing *Tree.
// It is unexported so all implementations of Node are in this package.
tree() *Tree
}
// NodeType identifies the type of a parse tree node.
type NodeType int
// Pos represents a byte position in the original input text from which
// this template was parsed.
type Pos int
func (p Pos) Position() Pos {
return p
}
// Type returns itself and provides an easy default implementation
// for embedding in a Node. Embedded in all non-trivial Nodes.
func (t NodeType) Type() NodeType {
return t
}
const (
NodeText NodeType = iota // Plain text.
NodeAction // A non-control action such as a field evaluation.
NodeBool // A boolean constant.
NodeChain // A sequence of field accesses.
NodeCommand // An element of a pipeline.
NodeDot // The cursor, dot.
nodeElse // An else action. Not added to tree.
nodeEnd // An end action. Not added to tree.
NodeField // A field or method name.
NodeIdentifier // An identifier; always a function name.
NodeIf // An if action.
NodeList // A list of Nodes.
NodeNil // An untyped nil constant.
NodeNumber // A numerical constant.
NodePipe // A pipeline of commands.
NodeRange // A range action.
NodeString // A string constant.
NodeTemplate // A template invocation action.
NodeVariable // A $ variable.
NodeWith // A with action.
)
// Nodes.
// ListNode holds a sequence of nodes.
type ListNode struct {
NodeType
Pos
tr *Tree
Nodes []Node // The element nodes in lexical order.
}
func (t *Tree) newList(pos Pos) *ListNode {
return &ListNode{tr: t, NodeType: NodeList, Pos: pos}
}
func (l *ListNode) append(n Node) {
l.Nodes = append(l.Nodes, n)
}
func (l *ListNode) tree() *Tree {
return l.tr
}
func (l *ListNode) String() string {
b := new(bytes.Buffer)
for _, n := range l.Nodes {
fmt.Fprint(b, n)
}
return b.String()
}
func (l *ListNode) CopyList() *ListNode {
if l == nil {
return l
}
n := l.tr.newList(l.Pos)
for _, elem := range l.Nodes {
n.append(elem.Copy())
}
return n
}
func (l *ListNode) Copy() Node {
return l.CopyList()
}
// TextNode holds plain text.
type TextNode struct {
NodeType
Pos
tr *Tree
Text []byte // The text; may span newlines.
}
func (t *Tree) newText(pos Pos, text string) *TextNode {
return &TextNode{tr: t, NodeType: NodeText, Pos: pos, Text: []byte(text)}
}
func (t *TextNode) String() string {
return fmt.Sprintf(textFormat, t.Text)
}
func (t *TextNode) tree() *Tree {
return t.tr
}
func (t *TextNode) Copy() Node {
return &TextNode{tr: t.tr, NodeType: NodeText, Pos: t.Pos, Text: append([]byte{}, t.Text...)}
}
// PipeNode holds a pipeline with optional declaration
type PipeNode struct {
NodeType
Pos
tr *Tree
Line int // The line number in the input (deprecated; kept for compatibility)
Decl []*VariableNode // Variable declarations in lexical order.
Cmds []*CommandNode // The commands in lexical order.
}
func (t *Tree) newPipeline(pos Pos, line int, decl []*VariableNode) *PipeNode {
return &PipeNode{tr: t, NodeType: NodePipe, Pos: pos, Line: line, Decl: decl}
}
func (p *PipeNode) append(command *CommandNode) {
p.Cmds = append(p.Cmds, command)
}
func (p *PipeNode) String() string {
s := ""
if len(p.Decl) > 0 {
for i, v := range p.Decl {
if i > 0 {
s += ", "
}
s += v.String()
}
s += " := "
}
for i, c := range p.Cmds {
if i > 0 {
s += " | "
}
s += c.String()
}
return s
}
func (p *PipeNode) tree() *Tree {
return p.tr
}
func (p *PipeNode) CopyPipe() *PipeNode {
if p == nil {
return p
}
var decl []*VariableNode
for _, d := range p.Decl {
decl = append(decl, d.Copy().(*VariableNode))
}
n := p.tr.newPipeline(p.Pos, p.Line, decl)
for _, c := range p.Cmds {
n.append(c.Copy().(*CommandNode))
}
return n
}
func (p *PipeNode) Copy() Node {
return p.CopyPipe()
}
// ActionNode holds an action (something bounded by delimiters).
// Control actions have their own nodes; ActionNode represents simple
// ones such as field evaluations and parenthesized pipelines.
type ActionNode struct {
NodeType
Pos
tr *Tree
Line int // The line number in the input (deprecated; kept for compatibility)
Pipe *PipeNode // The pipeline in the action.
}
func (t *Tree) newAction(pos Pos, line int, pipe *PipeNode) *ActionNode {
return &ActionNode{tr: t, NodeType: NodeAction, Pos: pos, Line: line, Pipe: pipe}
}
func (a *ActionNode) String() string {
return fmt.Sprintf("{{%s}}", a.Pipe)
}
func (a *ActionNode) tree() *Tree {
return a.tr
}
func (a *ActionNode) Copy() Node {
return a.tr.newAction(a.Pos, a.Line, a.Pipe.CopyPipe())
}
// CommandNode holds a command (a pipeline inside an evaluating action).
type CommandNode struct {
NodeType
Pos
tr *Tree
Args []Node // Arguments in lexical order: Identifier, field, or constant.
}
func (t *Tree) newCommand(pos Pos) *CommandNode {
return &CommandNode{tr: t, NodeType: NodeCommand, Pos: pos}
}
func (c *CommandNode) append(arg Node) {
c.Args = append(c.Args, arg)
}
func (c *CommandNode) String() string {
s := ""
for i, arg := range c.Args {
if i > 0 {
s += " "
}
if arg, ok := arg.(*PipeNode); ok {
s += "(" + arg.String() + ")"
continue
}
s += arg.String()
}
return s
}
func (c *CommandNode) tree() *Tree {
return c.tr
}
func (c *CommandNode) Copy() Node {
if c == nil {
return c
}
n := c.tr.newCommand(c.Pos)
for _, c := range c.Args {
n.append(c.Copy())
}
return n
}
// IdentifierNode holds an identifier.
type IdentifierNode struct {
NodeType
Pos
tr *Tree
Ident string // The identifier's name.
}
// NewIdentifier returns a new IdentifierNode with the given identifier name.
func NewIdentifier(ident string) *IdentifierNode {
return &IdentifierNode{NodeType: NodeIdentifier, Ident: ident}
}
// SetPos sets the position. NewIdentifier is a public method so we can't modify its signature.
// Chained for convenience.
// TODO: fix one day?
func (i *IdentifierNode) SetPos(pos Pos) *IdentifierNode {
i.Pos = pos
return i
}
// SetTree sets the parent tree for the node. NewIdentifier is a public method so we can't modify its signature.
// Chained for convenience.
// TODO: fix one day?
func (i *IdentifierNode) SetTree(t *Tree) *IdentifierNode {
i.tr = t
return i
}
func (i *IdentifierNode) String() string {
return i.Ident
}
func (i *IdentifierNode) tree() *Tree {
return i.tr
}
func (i *IdentifierNode) Copy() Node {
return NewIdentifier(i.Ident).SetTree(i.tr).SetPos(i.Pos)
}
// VariableNode holds a list of variable names, possibly with chained field
// accesses. The dollar sign is part of the (first) name.
type VariableNode struct {
NodeType
Pos
tr *Tree
Ident []string // Variable name and fields in lexical order.
}
func (t *Tree) newVariable(pos Pos, ident string) *VariableNode {
return &VariableNode{tr: t, NodeType: NodeVariable, Pos: pos, Ident: strings.Split(ident, ".")}
}
func (v *VariableNode) String() string {
s := ""
for i, id := range v.Ident {
if i > 0 {
s += "."
}
s += id
}
return s
}
func (v *VariableNode) tree() *Tree {
return v.tr
}
func (v *VariableNode) Copy() Node {
return &VariableNode{tr: v.tr, NodeType: NodeVariable, Pos: v.Pos, Ident: append([]string{}, v.Ident...)}
}
// DotNode holds the special identifier '.'.
type DotNode struct {
NodeType
Pos
tr *Tree
}
func (t *Tree) newDot(pos Pos) *DotNode {
return &DotNode{tr: t, NodeType: NodeDot, Pos: pos}
}
func (d *DotNode) Type() NodeType {
// Override method on embedded NodeType for API compatibility.
// TODO: Not really a problem; could change API without effect but
// api tool complains.
return NodeDot
}
func (d *DotNode) String() string {
return "."
}
func (d *DotNode) tree() *Tree {
return d.tr
}
func (d *DotNode) Copy() Node {
return d.tr.newDot(d.Pos)
}
// NilNode holds the special identifier 'nil' representing an untyped nil constant.
type NilNode struct {
NodeType
Pos
tr *Tree
}
func (t *Tree) newNil(pos Pos) *NilNode {
return &NilNode{tr: t, NodeType: NodeNil, Pos: pos}
}
func (n *NilNode) Type() NodeType {
// Override method on embedded NodeType for API compatibility.
// TODO: Not really a problem; could change API without effect but
// api tool complains.
return NodeNil
}
func (n *NilNode) String() string {
return "nil"
}
func (n *NilNode) tree() *Tree {
return n.tr
}
func (n *NilNode) Copy() Node {
return n.tr.newNil(n.Pos)
}
// FieldNode holds a field (identifier starting with '.').
// The names may be chained ('.x.y').
// The period is dropped from each ident.
type FieldNode struct {
NodeType
Pos
tr *Tree
Ident []string // The identifiers in lexical order.
}
func (t *Tree) newField(pos Pos, ident string) *FieldNode {
return &FieldNode{tr: t, NodeType: NodeField, Pos: pos, Ident: strings.Split(ident[1:], ".")} // [1:] to drop leading period
}
func (f *FieldNode) String() string {
s := ""
for _, id := range f.Ident {
s += "." + id
}
return s
}
func (f *FieldNode) tree() *Tree {
return f.tr
}
func (f *FieldNode) Copy() Node {
return &FieldNode{tr: f.tr, NodeType: NodeField, Pos: f.Pos, Ident: append([]string{}, f.Ident...)}
}
// ChainNode holds a term followed by a chain of field accesses (identifier starting with '.').
// The names may be chained ('.x.y').
// The periods are dropped from each ident.
type ChainNode struct {
NodeType
Pos
tr *Tree
Node Node
Field []string // The identifiers in lexical order.
}
func (t *Tree) newChain(pos Pos, node Node) *ChainNode {
return &ChainNode{tr: t, NodeType: NodeChain, Pos: pos, Node: node}
}
// Add adds the named field (which should start with a period) to the end of the chain.
func (c *ChainNode) Add(field string) {
if len(field) == 0 || field[0] != '.' {
panic("no dot in field")
}
field = field[1:] // Remove leading dot.
if field == "" {
panic("empty field")
}
c.Field = append(c.Field, field)
}
func (c *ChainNode) String() string {
s := c.Node.String()
if _, ok := c.Node.(*PipeNode); ok {
s = "(" + s + ")"
}
for _, field := range c.Field {
s += "." + field
}
return s
}
func (c *ChainNode) tree() *Tree {
return c.tr
}
func (c *ChainNode) Copy() Node {
return &ChainNode{tr: c.tr, NodeType: NodeChain, Pos: c.Pos, Node: c.Node, Field: append([]string{}, c.Field...)}
}
// BoolNode holds a boolean constant.
type BoolNode struct {
NodeType
Pos
tr *Tree
True bool // The value of the boolean constant.
}
func (t *Tree) newBool(pos Pos, true bool) *BoolNode {
return &BoolNode{tr: t, NodeType: NodeBool, Pos: pos, True: true}
}
func (b *BoolNode) String() string {
if b.True {
return "true"
}
return "false"
}
func (b *BoolNode) tree() *Tree {
return b.tr
}
func (b *BoolNode) Copy() Node {
return b.tr.newBool(b.Pos, b.True)
}
// NumberNode holds a number: signed or unsigned integer, float, or complex.
// The value is parsed and stored under all the types that can represent the value.
// This simulates in a small amount of code the behavior of Go's ideal constants.
type NumberNode struct {
NodeType
Pos
tr *Tree
IsInt bool // Number has an integral value.
IsUint bool // Number has an unsigned integral value.
IsFloat bool // Number has a floating-point value.
IsComplex bool // Number is complex.
Int64 int64 // The signed integer value.
Uint64 uint64 // The unsigned integer value.
Float64 float64 // The floating-point value.
Complex128 complex128 // The complex value.
Text string // The original textual representation from the input.
}
func (t *Tree) newNumber(pos Pos, text string, typ itemType) (*NumberNode, error) {
n := &NumberNode{tr: t, NodeType: NodeNumber, Pos: pos, Text: text}
switch typ {
case itemCharConstant:
rune, _, tail, err := strconv.UnquoteChar(text[1:], text[0])
if err != nil {
return nil, err
}
if tail != "'" {
return nil, fmt.Errorf("malformed character constant: %s", text)
}
n.Int64 = int64(rune)
n.IsInt = true
n.Uint64 = uint64(rune)
n.IsUint = true
n.Float64 = float64(rune) // odd but those are the rules.
n.IsFloat = true
return n, nil
case itemComplex:
// fmt.Sscan can parse the pair, so let it do the work.
if _, err := fmt.Sscan(text, &n.Complex128); err != nil {
return nil, err
}
n.IsComplex = true
n.simplifyComplex()
return n, nil
}
// Imaginary constants can only be complex unless they are zero.
if len(text) > 0 && text[len(text)-1] == 'i' {
f, err := strconv.ParseFloat(text[:len(text)-1], 64)
if err == nil {
n.IsComplex = true
n.Complex128 = complex(0, f)
n.simplifyComplex()
return n, nil
}
}
// Do integer test first so we get 0x123 etc.
u, err := strconv.ParseUint(text, 0, 64) // will fail for -0; fixed below.
if err == nil {
n.IsUint = true
n.Uint64 = u
}
i, err := strconv.ParseInt(text, 0, 64)
if err == nil {
n.IsInt = true
n.Int64 = i
if i == 0 {
n.IsUint = true // in case of -0.
n.Uint64 = u
}
}
// If an integer extraction succeeded, promote the float.
if n.IsInt {
n.IsFloat = true
n.Float64 = float64(n.Int64)
} else if n.IsUint {
n.IsFloat = true
n.Float64 = float64(n.Uint64)
} else {
f, err := strconv.ParseFloat(text, 64)
if err == nil {
n.IsFloat = true
n.Float64 = f
// If a floating-point extraction succeeded, extract the int if needed.
if !n.IsInt && float64(int64(f)) == f {
n.IsInt = true
n.Int64 = int64(f)
}
if !n.IsUint && float64(uint64(f)) == f {
n.IsUint = true
n.Uint64 = uint64(f)
}
}
}
if !n.IsInt && !n.IsUint && !n.IsFloat {
return nil, fmt.Errorf("illegal number syntax: %q", text)
}
return n, nil
}
// simplifyComplex pulls out any other types that are represented by the complex number.
// These all require that the imaginary part be zero.
func (n *NumberNode) simplifyComplex() {
n.IsFloat = imag(n.Complex128) == 0
if n.IsFloat {
n.Float64 = real(n.Complex128)
n.IsInt = float64(int64(n.Float64)) == n.Float64
if n.IsInt {
n.Int64 = int64(n.Float64)
}
n.IsUint = float64(uint64(n.Float64)) == n.Float64
if n.IsUint {
n.Uint64 = uint64(n.Float64)
}
}
}
func (n *NumberNode) String() string {
return n.Text
}
func (n *NumberNode) tree() *Tree {
return n.tr
}
func (n *NumberNode) Copy() Node {
nn := new(NumberNode)
*nn = *n // Easy, fast, correct.
return nn
}
// StringNode holds a string constant. The value has been "unquoted".
type StringNode struct {
NodeType
Pos
tr *Tree
Quoted string // The original text of the string, with quotes.
Text string // The string, after quote processing.
}
func (t *Tree) newString(pos Pos, orig, text string) *StringNode {
return &StringNode{tr: t, NodeType: NodeString, Pos: pos, Quoted: orig, Text: text}
}
func (s *StringNode) String() string {
return s.Quoted
}
func (s *StringNode) tree() *Tree {
return s.tr
}
func (s *StringNode) Copy() Node {
return s.tr.newString(s.Pos, s.Quoted, s.Text)
}
// endNode represents an {{end}} action.
// It does not appear in the final parse tree.
type endNode struct {
NodeType
Pos
tr *Tree
}
func (t *Tree) newEnd(pos Pos) *endNode {
return &endNode{tr: t, NodeType: nodeEnd, Pos: pos}
}
func (e *endNode) String() string {
return "{{end}}"
}
func (e *endNode) tree() *Tree {
return e.tr
}
func (e *endNode) Copy() Node {
return e.tr.newEnd(e.Pos)
}
// elseNode represents an {{else}} action. Does not appear in the final tree.
type elseNode struct {
NodeType
Pos
tr *Tree
Line int // The line number in the input (deprecated; kept for compatibility)
}
func (t *Tree) newElse(pos Pos, line int) *elseNode {
return &elseNode{tr: t, NodeType: nodeElse, Pos: pos, Line: line}
}
func (e *elseNode) Type() NodeType {
return nodeElse
}
func (e *elseNode) String() string {
return "{{else}}"
}
func (e *elseNode) tree() *Tree {
return e.tr
}
func (e *elseNode) Copy() Node {
return e.tr.newElse(e.Pos, e.Line)
}
// BranchNode is the common representation of if, range, and with.
type BranchNode struct {
NodeType
Pos
tr *Tree
Line int // The line number in the input (deprecated; kept for compatibility)
Pipe *PipeNode // The pipeline to be evaluated.
List *ListNode // What to execute if the value is non-empty.
ElseList *ListNode // What to execute if the value is empty (nil if absent).
}
func (b *BranchNode) String() string {
name := ""
switch b.NodeType {
case NodeIf:
name = "if"
case NodeRange:
name = "range"
case NodeWith:
name = "with"
default:
panic("unknown branch type")
}
if b.ElseList != nil {
return fmt.Sprintf("{{%s %s}}%s{{else}}%s{{end}}", name, b.Pipe, b.List, b.ElseList)
}
return fmt.Sprintf("{{%s %s}}%s{{end}}", name, b.Pipe, b.List)
}
func (b *BranchNode) tree() *Tree {
return b.tr
}
func (b *BranchNode) Copy() Node {
switch b.NodeType {
case NodeIf:
return b.tr.newIf(b.Pos, b.Line, b.Pipe, b.List, b.ElseList)
case NodeRange:
return b.tr.newRange(b.Pos, b.Line, b.Pipe, b.List, b.ElseList)
case NodeWith:
return b.tr.newWith(b.Pos, b.Line, b.Pipe, b.List, b.ElseList)
default:
panic("unknown branch type")
}
}
// IfNode represents an {{if}} action and its commands.
type IfNode struct {
BranchNode
}
func (t *Tree) newIf(pos Pos, line int, pipe *PipeNode, list, elseList *ListNode) *IfNode {
return &IfNode{BranchNode{tr: t, NodeType: NodeIf, Pos: pos, Line: line, Pipe: pipe, List: list, ElseList: elseList}}
}
func (i *IfNode) Copy() Node {
return i.tr.newIf(i.Pos, i.Line, i.Pipe.CopyPipe(), i.List.CopyList(), i.ElseList.CopyList())
}
// RangeNode represents a {{range}} action and its commands.
type RangeNode struct {
BranchNode
}
func (t *Tree) newRange(pos Pos, line int, pipe *PipeNode, list, elseList *ListNode) *RangeNode {
return &RangeNode{BranchNode{tr: t, NodeType: NodeRange, Pos: pos, Line: line, Pipe: pipe, List: list, ElseList: elseList}}
}
func (r *RangeNode) Copy() Node {
return r.tr.newRange(r.Pos, r.Line, r.Pipe.CopyPipe(), r.List.CopyList(), r.ElseList.CopyList())
}
// WithNode represents a {{with}} action and its commands.
type WithNode struct {
BranchNode
}
func (t *Tree) newWith(pos Pos, line int, pipe *PipeNode, list, elseList *ListNode) *WithNode {
return &WithNode{BranchNode{tr: t, NodeType: NodeWith, Pos: pos, Line: line, Pipe: pipe, List: list, ElseList: elseList}}
}
func (w *WithNode) Copy() Node {
return w.tr.newWith(w.Pos, w.Line, w.Pipe.CopyPipe(), w.List.CopyList(), w.ElseList.CopyList())
}
// TemplateNode represents a {{template}} action.
type TemplateNode struct {
NodeType
Pos
tr *Tree
Line int // The line number in the input (deprecated; kept for compatibility)
Name string // The name of the template (unquoted).
Pipe *PipeNode // The command to evaluate as dot for the template.
}
func (t *Tree) newTemplate(pos Pos, line int, name string, pipe *PipeNode) *TemplateNode {
return &TemplateNode{tr: t, NodeType: NodeTemplate, Pos: pos, Line: line, Name: name, Pipe: pipe}
}
func (t *TemplateNode) String() string {
if t.Pipe == nil {
return fmt.Sprintf("{{template %q}}", t.Name)
}
return fmt.Sprintf("{{template %q %s}}", t.Name, t.Pipe)
}
func (t *TemplateNode) tree() *Tree {
return t.tr
}
func (t *TemplateNode) Copy() Node {
return t.tr.newTemplate(t.Pos, t.Line, t.Name, t.Pipe.CopyPipe())
}

View file

@ -1,700 +0,0 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package parse builds parse trees for templates as defined by text/template
// and html/template. Clients should use those packages to construct templates
// rather than this one, which provides shared internal data structures not
// intended for general use.
package parse
import (
"bytes"
"fmt"
"runtime"
"strconv"
"strings"
)
// Tree is the representation of a single parsed template.
type Tree struct {
Name string // name of the template represented by the tree.
ParseName string // name of the top-level template during parsing, for error messages.
Root *ListNode // top-level root of the tree.
text string // text parsed to create the template (or its parent)
// Parsing only; cleared after parse.
funcs []map[string]interface{}
lex *lexer
token [3]item // three-token lookahead for parser.
peekCount int
vars []string // variables defined at the moment.
}
// Copy returns a copy of the Tree. Any parsing state is discarded.
func (t *Tree) Copy() *Tree {
if t == nil {
return nil
}
return &Tree{
Name: t.Name,
ParseName: t.ParseName,
Root: t.Root.CopyList(),
text: t.text,
}
}
// Parse returns a map from template name to parse.Tree, created by parsing the
// templates described in the argument string. The top-level template will be
// given the specified name. If an error is encountered, parsing stops and an
// empty map is returned with the error.
func Parse(name, text, leftDelim, rightDelim string, funcs ...map[string]interface{}) (treeSet map[string]*Tree, err error) {
treeSet = make(map[string]*Tree)
t := New(name)
t.text = text
_, err = t.Parse(text, leftDelim, rightDelim, treeSet, funcs...)
return
}
// next returns the next token.
func (t *Tree) next() item {
if t.peekCount > 0 {
t.peekCount--
} else {
t.token[0] = t.lex.nextItem()
}
return t.token[t.peekCount]
}
// backup backs the input stream up one token.
func (t *Tree) backup() {
t.peekCount++
}
// backup2 backs the input stream up two tokens.
// The zeroth token is already there.
func (t *Tree) backup2(t1 item) {
t.token[1] = t1
t.peekCount = 2
}
// backup3 backs the input stream up three tokens
// The zeroth token is already there.
func (t *Tree) backup3(t2, t1 item) { // Reverse order: we're pushing back.
t.token[1] = t1
t.token[2] = t2
t.peekCount = 3
}
// peek returns but does not consume the next token.
func (t *Tree) peek() item {
if t.peekCount > 0 {
return t.token[t.peekCount-1]
}
t.peekCount = 1
t.token[0] = t.lex.nextItem()
return t.token[0]
}
// nextNonSpace returns the next non-space token.
func (t *Tree) nextNonSpace() (token item) {
for {
token = t.next()
if token.typ != itemSpace {
break
}
}
return token
}
// peekNonSpace returns but does not consume the next non-space token.
func (t *Tree) peekNonSpace() (token item) {
for {
token = t.next()
if token.typ != itemSpace {
break
}
}
t.backup()
return token
}
// Parsing.
// New allocates a new parse tree with the given name.
func New(name string, funcs ...map[string]interface{}) *Tree {
return &Tree{
Name: name,
funcs: funcs,
}
}
// ErrorContext returns a textual representation of the location of the node in the input text.
// The receiver is only used when the node does not have a pointer to the tree inside,
// which can occur in old code.
func (t *Tree) ErrorContext(n Node) (location, context string) {
pos := int(n.Position())
tree := n.tree()
if tree == nil {
tree = t
}
text := tree.text[:pos]
byteNum := strings.LastIndex(text, "\n")
if byteNum == -1 {
byteNum = pos // On first line.
} else {
byteNum++ // After the newline.
byteNum = pos - byteNum
}
lineNum := 1 + strings.Count(text, "\n")
context = n.String()
if len(context) > 20 {
context = fmt.Sprintf("%.20s...", context)
}
return fmt.Sprintf("%s:%d:%d", tree.ParseName, lineNum, byteNum), context
}
// errorf formats the error and terminates processing.
func (t *Tree) errorf(format string, args ...interface{}) {
t.Root = nil
format = fmt.Sprintf("template: %s:%d: %s", t.ParseName, t.lex.lineNumber(), format)
panic(fmt.Errorf(format, args...))
}
// error terminates processing.
func (t *Tree) error(err error) {
t.errorf("%s", err)
}
// expect consumes the next token and guarantees it has the required type.
func (t *Tree) expect(expected itemType, context string) item {
token := t.nextNonSpace()
if token.typ != expected {
t.unexpected(token, context)
}
return token
}
// expectOneOf consumes the next token and guarantees it has one of the required types.
func (t *Tree) expectOneOf(expected1, expected2 itemType, context string) item {
token := t.nextNonSpace()
if token.typ != expected1 && token.typ != expected2 {
t.unexpected(token, context)
}
return token
}
// unexpected complains about the token and terminates processing.
func (t *Tree) unexpected(token item, context string) {
t.errorf("unexpected %s in %s", token, context)
}
// recover is the handler that turns panics into returns from the top level of Parse.
func (t *Tree) recover(errp *error) {
e := recover()
if e != nil {
if _, ok := e.(runtime.Error); ok {
panic(e)
}
if t != nil {
t.stopParse()
}
*errp = e.(error)
}
return
}
// startParse initializes the parser, using the lexer.
func (t *Tree) startParse(funcs []map[string]interface{}, lex *lexer) {
t.Root = nil
t.lex = lex
t.vars = []string{"$"}
t.funcs = funcs
}
// stopParse terminates parsing.
func (t *Tree) stopParse() {
t.lex = nil
t.vars = nil
t.funcs = nil
}
// Parse parses the template definition string to construct a representation of
// the template for execution. If either action delimiter string is empty, the
// default ("{{" or "}}") is used. Embedded template definitions are added to
// the treeSet map.
func (t *Tree) Parse(text, leftDelim, rightDelim string, treeSet map[string]*Tree, funcs ...map[string]interface{}) (tree *Tree, err error) {
defer t.recover(&err)
t.ParseName = t.Name
t.startParse(funcs, lex(t.Name, text, leftDelim, rightDelim))
t.text = text
t.parse(treeSet)
t.add(treeSet)
t.stopParse()
return t, nil
}
// add adds tree to the treeSet.
func (t *Tree) add(treeSet map[string]*Tree) {
tree := treeSet[t.Name]
if tree == nil || IsEmptyTree(tree.Root) {
treeSet[t.Name] = t
return
}
if !IsEmptyTree(t.Root) {
t.errorf("template: multiple definition of template %q", t.Name)
}
}
// IsEmptyTree reports whether this tree (node) is empty of everything but space.
func IsEmptyTree(n Node) bool {
switch n := n.(type) {
case nil:
return true
case *ActionNode:
case *IfNode:
case *ListNode:
for _, node := range n.Nodes {
if !IsEmptyTree(node) {
return false
}
}
return true
case *RangeNode:
case *TemplateNode:
case *TextNode:
return len(bytes.TrimSpace(n.Text)) == 0
case *WithNode:
default:
panic("unknown node: " + n.String())
}
return false
}
// parse is the top-level parser for a template, essentially the same
// as itemList except it also parses {{define}} actions.
// It runs to EOF.
func (t *Tree) parse(treeSet map[string]*Tree) (next Node) {
t.Root = t.newList(t.peek().pos)
for t.peek().typ != itemEOF {
if t.peek().typ == itemLeftDelim {
delim := t.next()
if t.nextNonSpace().typ == itemDefine {
newT := New("definition") // name will be updated once we know it.
newT.text = t.text
newT.ParseName = t.ParseName
newT.startParse(t.funcs, t.lex)
newT.parseDefinition(treeSet)
continue
}
t.backup2(delim)
}
n := t.textOrAction()
if n.Type() == nodeEnd {
t.errorf("unexpected %s", n)
}
t.Root.append(n)
}
return nil
}
// parseDefinition parses a {{define}} ... {{end}} template definition and
// installs the definition in the treeSet map. The "define" keyword has already
// been scanned.
func (t *Tree) parseDefinition(treeSet map[string]*Tree) {
const context = "define clause"
name := t.expectOneOf(itemString, itemRawString, context)
var err error
t.Name, err = strconv.Unquote(name.val)
if err != nil {
t.error(err)
}
t.expect(itemRightDelim, context)
var end Node
t.Root, end = t.itemList()
if end.Type() != nodeEnd {
t.errorf("unexpected %s in %s", end, context)
}
t.add(treeSet)
t.stopParse()
}
// itemList:
// textOrAction*
// Terminates at {{end}} or {{else}}, returned separately.
func (t *Tree) itemList() (list *ListNode, next Node) {
list = t.newList(t.peekNonSpace().pos)
for t.peekNonSpace().typ != itemEOF {
n := t.textOrAction()
switch n.Type() {
case nodeEnd, nodeElse:
return list, n
}
list.append(n)
}
t.errorf("unexpected EOF")
return
}
// textOrAction:
// text | action
func (t *Tree) textOrAction() Node {
switch token := t.nextNonSpace(); token.typ {
case itemElideNewline:
return t.elideNewline()
case itemText:
return t.newText(token.pos, token.val)
case itemLeftDelim:
return t.action()
default:
t.unexpected(token, "input")
}
return nil
}
// elideNewline:
// Remove newlines trailing rightDelim if \\ is present.
func (t *Tree) elideNewline() Node {
token := t.peek()
if token.typ != itemText {
t.unexpected(token, "input")
return nil
}
t.next()
stripped := strings.TrimLeft(token.val, "\n\r")
diff := len(token.val) - len(stripped)
if diff > 0 {
// This is a bit nasty. We mutate the token in-place to remove
// preceding newlines.
token.pos += Pos(diff)
token.val = stripped
}
return t.newText(token.pos, token.val)
}
// Action:
// control
// command ("|" command)*
// Left delim is past. Now get actions.
// First word could be a keyword such as range.
func (t *Tree) action() (n Node) {
switch token := t.nextNonSpace(); token.typ {
case itemElse:
return t.elseControl()
case itemEnd:
return t.endControl()
case itemIf:
return t.ifControl()
case itemRange:
return t.rangeControl()
case itemTemplate:
return t.templateControl()
case itemWith:
return t.withControl()
}
t.backup()
// Do not pop variables; they persist until "end".
return t.newAction(t.peek().pos, t.lex.lineNumber(), t.pipeline("command"))
}
// Pipeline:
// declarations? command ('|' command)*
func (t *Tree) pipeline(context string) (pipe *PipeNode) {
var decl []*VariableNode
pos := t.peekNonSpace().pos
// Are there declarations?
for {
if v := t.peekNonSpace(); v.typ == itemVariable {
t.next()
// Since space is a token, we need 3-token look-ahead here in the worst case:
// in "$x foo" we need to read "foo" (as opposed to ":=") to know that $x is an
// argument variable rather than a declaration. So remember the token
// adjacent to the variable so we can push it back if necessary.
tokenAfterVariable := t.peek()
if next := t.peekNonSpace(); next.typ == itemColonEquals || (next.typ == itemChar && next.val == ",") {
t.nextNonSpace()
variable := t.newVariable(v.pos, v.val)
decl = append(decl, variable)
t.vars = append(t.vars, v.val)
if next.typ == itemChar && next.val == "," {
if context == "range" && len(decl) < 2 {
continue
}
t.errorf("too many declarations in %s", context)
}
} else if tokenAfterVariable.typ == itemSpace {
t.backup3(v, tokenAfterVariable)
} else {
t.backup2(v)
}
}
break
}
pipe = t.newPipeline(pos, t.lex.lineNumber(), decl)
for {
switch token := t.nextNonSpace(); token.typ {
case itemRightDelim, itemRightParen:
if len(pipe.Cmds) == 0 {
t.errorf("missing value for %s", context)
}
if token.typ == itemRightParen {
t.backup()
}
return
case itemBool, itemCharConstant, itemComplex, itemDot, itemField, itemIdentifier,
itemNumber, itemNil, itemRawString, itemString, itemVariable, itemLeftParen:
t.backup()
pipe.append(t.command())
default:
t.unexpected(token, context)
}
}
}
func (t *Tree) parseControl(allowElseIf bool, context string) (pos Pos, line int, pipe *PipeNode, list, elseList *ListNode) {
defer t.popVars(len(t.vars))
line = t.lex.lineNumber()
pipe = t.pipeline(context)
var next Node
list, next = t.itemList()
switch next.Type() {
case nodeEnd: //done
case nodeElse:
if allowElseIf {
// Special case for "else if". If the "else" is followed immediately by an "if",
// the elseControl will have left the "if" token pending. Treat
// {{if a}}_{{else if b}}_{{end}}
// as
// {{if a}}_{{else}}{{if b}}_{{end}}{{end}}.
// To do this, parse the if as usual and stop at it {{end}}; the subsequent{{end}}
// is assumed. This technique works even for long if-else-if chains.
// TODO: Should we allow else-if in with and range?
if t.peek().typ == itemIf {
t.next() // Consume the "if" token.
elseList = t.newList(next.Position())
elseList.append(t.ifControl())
// Do not consume the next item - only one {{end}} required.
break
}
}
elseList, next = t.itemList()
if next.Type() != nodeEnd {
t.errorf("expected end; found %s", next)
}
}
return pipe.Position(), line, pipe, list, elseList
}
// If:
// {{if pipeline}} itemList {{end}}
// {{if pipeline}} itemList {{else}} itemList {{end}}
// If keyword is past.
func (t *Tree) ifControl() Node {
return t.newIf(t.parseControl(true, "if"))
}
// Range:
// {{range pipeline}} itemList {{end}}
// {{range pipeline}} itemList {{else}} itemList {{end}}
// Range keyword is past.
func (t *Tree) rangeControl() Node {
return t.newRange(t.parseControl(false, "range"))
}
// With:
// {{with pipeline}} itemList {{end}}
// {{with pipeline}} itemList {{else}} itemList {{end}}
// If keyword is past.
func (t *Tree) withControl() Node {
return t.newWith(t.parseControl(false, "with"))
}
// End:
// {{end}}
// End keyword is past.
func (t *Tree) endControl() Node {
return t.newEnd(t.expect(itemRightDelim, "end").pos)
}
// Else:
// {{else}}
// Else keyword is past.
func (t *Tree) elseControl() Node {
// Special case for "else if".
peek := t.peekNonSpace()
if peek.typ == itemIf {
// We see "{{else if ... " but in effect rewrite it to {{else}}{{if ... ".
return t.newElse(peek.pos, t.lex.lineNumber())
}
return t.newElse(t.expect(itemRightDelim, "else").pos, t.lex.lineNumber())
}
// Template:
// {{template stringValue pipeline}}
// Template keyword is past. The name must be something that can evaluate
// to a string.
func (t *Tree) templateControl() Node {
var name string
token := t.nextNonSpace()
switch token.typ {
case itemString, itemRawString:
s, err := strconv.Unquote(token.val)
if err != nil {
t.error(err)
}
name = s
default:
t.unexpected(token, "template invocation")
}
var pipe *PipeNode
if t.nextNonSpace().typ != itemRightDelim {
t.backup()
// Do not pop variables; they persist until "end".
pipe = t.pipeline("template")
}
return t.newTemplate(token.pos, t.lex.lineNumber(), name, pipe)
}
// command:
// operand (space operand)*
// space-separated arguments up to a pipeline character or right delimiter.
// we consume the pipe character but leave the right delim to terminate the action.
func (t *Tree) command() *CommandNode {
cmd := t.newCommand(t.peekNonSpace().pos)
for {
t.peekNonSpace() // skip leading spaces.
operand := t.operand()
if operand != nil {
cmd.append(operand)
}
switch token := t.next(); token.typ {
case itemSpace:
continue
case itemError:
t.errorf("%s", token.val)
case itemRightDelim, itemRightParen:
t.backup()
case itemPipe:
default:
t.errorf("unexpected %s in operand; missing space?", token)
}
break
}
if len(cmd.Args) == 0 {
t.errorf("empty command")
}
return cmd
}
// operand:
// term .Field*
// An operand is a space-separated component of a command,
// a term possibly followed by field accesses.
// A nil return means the next item is not an operand.
func (t *Tree) operand() Node {
node := t.term()
if node == nil {
return nil
}
if t.peek().typ == itemField {
chain := t.newChain(t.peek().pos, node)
for t.peek().typ == itemField {
chain.Add(t.next().val)
}
// Compatibility with original API: If the term is of type NodeField
// or NodeVariable, just put more fields on the original.
// Otherwise, keep the Chain node.
// TODO: Switch to Chains always when we can.
switch node.Type() {
case NodeField:
node = t.newField(chain.Position(), chain.String())
case NodeVariable:
node = t.newVariable(chain.Position(), chain.String())
default:
node = chain
}
}
return node
}
// term:
// literal (number, string, nil, boolean)
// function (identifier)
// .
// .Field
// $
// '(' pipeline ')'
// A term is a simple "expression".
// A nil return means the next item is not a term.
func (t *Tree) term() Node {
switch token := t.nextNonSpace(); token.typ {
case itemError:
t.errorf("%s", token.val)
case itemIdentifier:
if !t.hasFunction(token.val) {
t.errorf("function %q not defined", token.val)
}
return NewIdentifier(token.val).SetTree(t).SetPos(token.pos)
case itemDot:
return t.newDot(token.pos)
case itemNil:
return t.newNil(token.pos)
case itemVariable:
return t.useVar(token.pos, token.val)
case itemField:
return t.newField(token.pos, token.val)
case itemBool:
return t.newBool(token.pos, token.val == "true")
case itemCharConstant, itemComplex, itemNumber:
number, err := t.newNumber(token.pos, token.val, token.typ)
if err != nil {
t.error(err)
}
return number
case itemLeftParen:
pipe := t.pipeline("parenthesized pipeline")
if token := t.next(); token.typ != itemRightParen {
t.errorf("unclosed right paren: unexpected %s", token)
}
return pipe
case itemString, itemRawString:
s, err := strconv.Unquote(token.val)
if err != nil {
t.error(err)
}
return t.newString(token.pos, token.val, s)
}
t.backup()
return nil
}
// hasFunction reports if a function name exists in the Tree's maps.
func (t *Tree) hasFunction(name string) bool {
for _, funcMap := range t.funcs {
if funcMap == nil {
continue
}
if funcMap[name] != nil {
return true
}
}
return false
}
// popVars trims the variable list to the specified length
func (t *Tree) popVars(n int) {
t.vars = t.vars[:n]
}
// useVar returns a node for a variable reference. It errors if the
// variable is not defined.
func (t *Tree) useVar(pos Pos, name string) Node {
v := t.newVariable(pos, name)
for _, varName := range t.vars {
if varName == v.Ident[0] {
return v
}
}
t.errorf("undefined variable %q", v.Ident[0])
return nil
}

View file

@ -1,218 +0,0 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package template
import (
"fmt"
"reflect"
"github.com/alecthomas/template/parse"
)
// common holds the information shared by related templates.
type common struct {
tmpl map[string]*Template
// We use two maps, one for parsing and one for execution.
// This separation makes the API cleaner since it doesn't
// expose reflection to the client.
parseFuncs FuncMap
execFuncs map[string]reflect.Value
}
// Template is the representation of a parsed template. The *parse.Tree
// field is exported only for use by html/template and should be treated
// as unexported by all other clients.
type Template struct {
name string
*parse.Tree
*common
leftDelim string
rightDelim string
}
// New allocates a new template with the given name.
func New(name string) *Template {
return &Template{
name: name,
}
}
// Name returns the name of the template.
func (t *Template) Name() string {
return t.name
}
// New allocates a new template associated with the given one and with the same
// delimiters. The association, which is transitive, allows one template to
// invoke another with a {{template}} action.
func (t *Template) New(name string) *Template {
t.init()
return &Template{
name: name,
common: t.common,
leftDelim: t.leftDelim,
rightDelim: t.rightDelim,
}
}
func (t *Template) init() {
if t.common == nil {
t.common = new(common)
t.tmpl = make(map[string]*Template)
t.parseFuncs = make(FuncMap)
t.execFuncs = make(map[string]reflect.Value)
}
}
// Clone returns a duplicate of the template, including all associated
// templates. The actual representation is not copied, but the name space of
// associated templates is, so further calls to Parse in the copy will add
// templates to the copy but not to the original. Clone can be used to prepare
// common templates and use them with variant definitions for other templates
// by adding the variants after the clone is made.
func (t *Template) Clone() (*Template, error) {
nt := t.copy(nil)
nt.init()
nt.tmpl[t.name] = nt
for k, v := range t.tmpl {
if k == t.name { // Already installed.
continue
}
// The associated templates share nt's common structure.
tmpl := v.copy(nt.common)
nt.tmpl[k] = tmpl
}
for k, v := range t.parseFuncs {
nt.parseFuncs[k] = v
}
for k, v := range t.execFuncs {
nt.execFuncs[k] = v
}
return nt, nil
}
// copy returns a shallow copy of t, with common set to the argument.
func (t *Template) copy(c *common) *Template {
nt := New(t.name)
nt.Tree = t.Tree
nt.common = c
nt.leftDelim = t.leftDelim
nt.rightDelim = t.rightDelim
return nt
}
// AddParseTree creates a new template with the name and parse tree
// and associates it with t.
func (t *Template) AddParseTree(name string, tree *parse.Tree) (*Template, error) {
if t.common != nil && t.tmpl[name] != nil {
return nil, fmt.Errorf("template: redefinition of template %q", name)
}
nt := t.New(name)
nt.Tree = tree
t.tmpl[name] = nt
return nt, nil
}
// Templates returns a slice of the templates associated with t, including t
// itself.
func (t *Template) Templates() []*Template {
if t.common == nil {
return nil
}
// Return a slice so we don't expose the map.
m := make([]*Template, 0, len(t.tmpl))
for _, v := range t.tmpl {
m = append(m, v)
}
return m
}
// Delims sets the action delimiters to the specified strings, to be used in
// subsequent calls to Parse, ParseFiles, or ParseGlob. Nested template
// definitions will inherit the settings. An empty delimiter stands for the
// corresponding default: {{ or }}.
// The return value is the template, so calls can be chained.
func (t *Template) Delims(left, right string) *Template {
t.leftDelim = left
t.rightDelim = right
return t
}
// Funcs adds the elements of the argument map to the template's function map.
// It panics if a value in the map is not a function with appropriate return
// type. However, it is legal to overwrite elements of the map. The return
// value is the template, so calls can be chained.
func (t *Template) Funcs(funcMap FuncMap) *Template {
t.init()
addValueFuncs(t.execFuncs, funcMap)
addFuncs(t.parseFuncs, funcMap)
return t
}
// Lookup returns the template with the given name that is associated with t,
// or nil if there is no such template.
func (t *Template) Lookup(name string) *Template {
if t.common == nil {
return nil
}
return t.tmpl[name]
}
// Parse parses a string into a template. Nested template definitions will be
// associated with the top-level template t. Parse may be called multiple times
// to parse definitions of templates to associate with t. It is an error if a
// resulting template is non-empty (contains content other than template
// definitions) and would replace a non-empty template with the same name.
// (In multiple calls to Parse with the same receiver template, only one call
// can contain text other than space, comments, and template definitions.)
func (t *Template) Parse(text string) (*Template, error) {
t.init()
trees, err := parse.Parse(t.name, text, t.leftDelim, t.rightDelim, t.parseFuncs, builtins)
if err != nil {
return nil, err
}
// Add the newly parsed trees, including the one for t, into our common structure.
for name, tree := range trees {
// If the name we parsed is the name of this template, overwrite this template.
// The associate method checks it's not a redefinition.
tmpl := t
if name != t.name {
tmpl = t.New(name)
}
// Even if t == tmpl, we need to install it in the common.tmpl map.
if replace, err := t.associate(tmpl, tree); err != nil {
return nil, err
} else if replace {
tmpl.Tree = tree
}
tmpl.leftDelim = t.leftDelim
tmpl.rightDelim = t.rightDelim
}
return t, nil
}
// associate installs the new template into the group of templates associated
// with t. It is an error to reuse a name except to overwrite an empty
// template. The two are already known to share the common structure.
// The boolean return value reports wither to store this tree as t.Tree.
func (t *Template) associate(new *Template, tree *parse.Tree) (bool, error) {
if new.common != t.common {
panic("internal error: associate not common")
}
name := new.name
if old := t.tmpl[name]; old != nil {
oldIsEmpty := parse.IsEmptyTree(old.Root)
newIsEmpty := parse.IsEmptyTree(tree.Root)
if newIsEmpty {
// Whether old is empty or not, new is empty; no reason to replace old.
return false, nil
}
if !oldIsEmpty {
return false, fmt.Errorf("template: redefinition of template %q", name)
}
}
t.tmpl[name] = new
return true, nil
}

View file

@ -1,19 +0,0 @@
Copyright (C) 2014 Alec Thomas
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is furnished to do
so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

View file

@ -1,83 +0,0 @@
package units
// Base2Bytes is the old non-SI power-of-2 byte scale (1024 bytes in a kilobyte,
// etc.).
type Base2Bytes int64
// Base-2 byte units.
const (
Kibibyte Base2Bytes = 1024
KiB = Kibibyte
Mebibyte = Kibibyte * 1024
MiB = Mebibyte
Gibibyte = Mebibyte * 1024
GiB = Gibibyte
Tebibyte = Gibibyte * 1024
TiB = Tebibyte
Pebibyte = Tebibyte * 1024
PiB = Pebibyte
Exbibyte = Pebibyte * 1024
EiB = Exbibyte
)
var (
bytesUnitMap = MakeUnitMap("iB", "B", 1024)
oldBytesUnitMap = MakeUnitMap("B", "B", 1024)
)
// ParseBase2Bytes supports both iB and B in base-2 multipliers. That is, KB
// and KiB are both 1024.
func ParseBase2Bytes(s string) (Base2Bytes, error) {
n, err := ParseUnit(s, bytesUnitMap)
if err != nil {
n, err = ParseUnit(s, oldBytesUnitMap)
}
return Base2Bytes(n), err
}
func (b Base2Bytes) String() string {
return ToString(int64(b), 1024, "iB", "B")
}
var (
metricBytesUnitMap = MakeUnitMap("B", "B", 1000)
)
// MetricBytes are SI byte units (1000 bytes in a kilobyte).
type MetricBytes SI
// SI base-10 byte units.
const (
Kilobyte MetricBytes = 1000
KB = Kilobyte
Megabyte = Kilobyte * 1000
MB = Megabyte
Gigabyte = Megabyte * 1000
GB = Gigabyte
Terabyte = Gigabyte * 1000
TB = Terabyte
Petabyte = Terabyte * 1000
PB = Petabyte
Exabyte = Petabyte * 1000
EB = Exabyte
)
// ParseMetricBytes parses base-10 metric byte units. That is, KB is 1000 bytes.
func ParseMetricBytes(s string) (MetricBytes, error) {
n, err := ParseUnit(s, metricBytesUnitMap)
return MetricBytes(n), err
}
func (m MetricBytes) String() string {
return ToString(int64(m), 1000, "B", "B")
}
// ParseStrictBytes supports both iB and B suffixes for base 2 and metric,
// respectively. That is, KiB represents 1024 and KB represents 1000.
func ParseStrictBytes(s string) (int64, error) {
n, err := ParseUnit(s, bytesUnitMap)
if err != nil {
n, err = ParseUnit(s, metricBytesUnitMap)
}
return int64(n), err
}

View file

@ -1,13 +0,0 @@
// Package units provides helpful unit multipliers and functions for Go.
//
// The goal of this package is to have functionality similar to the time [1] package.
//
//
// [1] http://golang.org/pkg/time/
//
// It allows for code like this:
//
// n, err := ParseBase2Bytes("1KB")
// // n == 1024
// n = units.Mebibyte * 512
package units

View file

@ -1,26 +0,0 @@
package units
// SI units.
type SI int64
// SI unit multiples.
const (
Kilo SI = 1000
Mega = Kilo * 1000
Giga = Mega * 1000
Tera = Giga * 1000
Peta = Tera * 1000
Exa = Peta * 1000
)
func MakeUnitMap(suffix, shortSuffix string, scale int64) map[string]float64 {
return map[string]float64{
shortSuffix: 1,
"K" + suffix: float64(scale),
"M" + suffix: float64(scale * scale),
"G" + suffix: float64(scale * scale * scale),
"T" + suffix: float64(scale * scale * scale * scale),
"P" + suffix: float64(scale * scale * scale * scale * scale),
"E" + suffix: float64(scale * scale * scale * scale * scale * scale),
}
}

View file

@ -1,138 +0,0 @@
package units
import (
"errors"
"fmt"
"strings"
)
var (
siUnits = []string{"", "K", "M", "G", "T", "P", "E"}
)
func ToString(n int64, scale int64, suffix, baseSuffix string) string {
mn := len(siUnits)
out := make([]string, mn)
for i, m := range siUnits {
if n%scale != 0 || i == 0 && n == 0 {
s := suffix
if i == 0 {
s = baseSuffix
}
out[mn-1-i] = fmt.Sprintf("%d%s%s", n%scale, m, s)
}
n /= scale
if n == 0 {
break
}
}
return strings.Join(out, "")
}
// Below code ripped straight from http://golang.org/src/pkg/time/format.go?s=33392:33438#L1123
var errLeadingInt = errors.New("units: bad [0-9]*") // never printed
// leadingInt consumes the leading [0-9]* from s.
func leadingInt(s string) (x int64, rem string, err error) {
i := 0
for ; i < len(s); i++ {
c := s[i]
if c < '0' || c > '9' {
break
}
if x >= (1<<63-10)/10 {
// overflow
return 0, "", errLeadingInt
}
x = x*10 + int64(c) - '0'
}
return x, s[i:], nil
}
func ParseUnit(s string, unitMap map[string]float64) (int64, error) {
// [-+]?([0-9]*(\.[0-9]*)?[a-z]+)+
orig := s
f := float64(0)
neg := false
// Consume [-+]?
if s != "" {
c := s[0]
if c == '-' || c == '+' {
neg = c == '-'
s = s[1:]
}
}
// Special case: if all that is left is "0", this is zero.
if s == "0" {
return 0, nil
}
if s == "" {
return 0, errors.New("units: invalid " + orig)
}
for s != "" {
g := float64(0) // this element of the sequence
var x int64
var err error
// The next character must be [0-9.]
if !(s[0] == '.' || ('0' <= s[0] && s[0] <= '9')) {
return 0, errors.New("units: invalid " + orig)
}
// Consume [0-9]*
pl := len(s)
x, s, err = leadingInt(s)
if err != nil {
return 0, errors.New("units: invalid " + orig)
}
g = float64(x)
pre := pl != len(s) // whether we consumed anything before a period
// Consume (\.[0-9]*)?
post := false
if s != "" && s[0] == '.' {
s = s[1:]
pl := len(s)
x, s, err = leadingInt(s)
if err != nil {
return 0, errors.New("units: invalid " + orig)
}
scale := 1.0
for n := pl - len(s); n > 0; n-- {
scale *= 10
}
g += float64(x) / scale
post = pl != len(s)
}
if !pre && !post {
// no digits (e.g. ".s" or "-.s")
return 0, errors.New("units: invalid " + orig)
}
// Consume unit.
i := 0
for ; i < len(s); i++ {
c := s[i]
if c == '.' || ('0' <= c && c <= '9') {
break
}
}
u := s[:i]
s = s[i:]
unit, ok := unitMap[u]
if !ok {
return 0, errors.New("units: unknown unit " + u + " in " + orig)
}
f += g * unit
}
if neg {
f = -f
}
if f < float64(-1<<63) || f > float64(1<<63-1) {
return 0, errors.New("units: overflow parsing unit")
}
return int64(f), nil
}

View file

@ -1,20 +0,0 @@
The MIT License (MIT)
Copyright (c) 2014 Cenk Altı
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

View file

@ -1,66 +0,0 @@
// Package backoff implements backoff algorithms for retrying operations.
//
// Use Retry function for retrying operations that may fail.
// If Retry does not meet your needs,
// copy/paste the function into your project and modify as you wish.
//
// There is also Ticker type similar to time.Ticker.
// You can use it if you need to work with channels.
//
// See Examples section below for usage examples.
package backoff
import "time"
// BackOff is a backoff policy for retrying an operation.
type BackOff interface {
// NextBackOff returns the duration to wait before retrying the operation,
// or backoff. Stop to indicate that no more retries should be made.
//
// Example usage:
//
// duration := backoff.NextBackOff();
// if (duration == backoff.Stop) {
// // Do not retry operation.
// } else {
// // Sleep for duration and retry operation.
// }
//
NextBackOff() time.Duration
// Reset to initial state.
Reset()
}
// Stop indicates that no more retries should be made for use in NextBackOff().
const Stop time.Duration = -1
// ZeroBackOff is a fixed backoff policy whose backoff time is always zero,
// meaning that the operation is retried immediately without waiting, indefinitely.
type ZeroBackOff struct{}
func (b *ZeroBackOff) Reset() {}
func (b *ZeroBackOff) NextBackOff() time.Duration { return 0 }
// StopBackOff is a fixed backoff policy that always returns backoff.Stop for
// NextBackOff(), meaning that the operation should never be retried.
type StopBackOff struct{}
func (b *StopBackOff) Reset() {}
func (b *StopBackOff) NextBackOff() time.Duration { return Stop }
// ConstantBackOff is a backoff policy that always returns the same backoff delay.
// This is in contrast to an exponential backoff policy,
// which returns a delay that grows longer as you call NextBackOff() over and over again.
type ConstantBackOff struct {
Interval time.Duration
}
func (b *ConstantBackOff) Reset() {}
func (b *ConstantBackOff) NextBackOff() time.Duration { return b.Interval }
func NewConstantBackOff(d time.Duration) *ConstantBackOff {
return &ConstantBackOff{Interval: d}
}

View file

@ -1,63 +0,0 @@
package backoff
import (
"context"
"time"
)
// BackOffContext is a backoff policy that stops retrying after the context
// is canceled.
type BackOffContext interface {
BackOff
Context() context.Context
}
type backOffContext struct {
BackOff
ctx context.Context
}
// WithContext returns a BackOffContext with context ctx
//
// ctx must not be nil
func WithContext(b BackOff, ctx context.Context) BackOffContext {
if ctx == nil {
panic("nil context")
}
if b, ok := b.(*backOffContext); ok {
return &backOffContext{
BackOff: b.BackOff,
ctx: ctx,
}
}
return &backOffContext{
BackOff: b,
ctx: ctx,
}
}
func ensureContext(b BackOff) BackOffContext {
if cb, ok := b.(BackOffContext); ok {
return cb
}
return WithContext(b, context.Background())
}
func (b *backOffContext) Context() context.Context {
return b.ctx
}
func (b *backOffContext) NextBackOff() time.Duration {
select {
case <-b.ctx.Done():
return Stop
default:
}
next := b.BackOff.NextBackOff()
if deadline, ok := b.ctx.Deadline(); ok && deadline.Sub(time.Now()) < next {
return Stop
}
return next
}

View file

@ -1,153 +0,0 @@
package backoff
import (
"math/rand"
"time"
)
/*
ExponentialBackOff is a backoff implementation that increases the backoff
period for each retry attempt using a randomization function that grows exponentially.
NextBackOff() is calculated using the following formula:
randomized interval =
RetryInterval * (random value in range [1 - RandomizationFactor, 1 + RandomizationFactor])
In other words NextBackOff() will range between the randomization factor
percentage below and above the retry interval.
For example, given the following parameters:
RetryInterval = 2
RandomizationFactor = 0.5
Multiplier = 2
the actual backoff period used in the next retry attempt will range between 1 and 3 seconds,
multiplied by the exponential, that is, between 2 and 6 seconds.
Note: MaxInterval caps the RetryInterval and not the randomized interval.
If the time elapsed since an ExponentialBackOff instance is created goes past the
MaxElapsedTime, then the method NextBackOff() starts returning backoff.Stop.
The elapsed time can be reset by calling Reset().
Example: Given the following default arguments, for 10 tries the sequence will be,
and assuming we go over the MaxElapsedTime on the 10th try:
Request # RetryInterval (seconds) Randomized Interval (seconds)
1 0.5 [0.25, 0.75]
2 0.75 [0.375, 1.125]
3 1.125 [0.562, 1.687]
4 1.687 [0.8435, 2.53]
5 2.53 [1.265, 3.795]
6 3.795 [1.897, 5.692]
7 5.692 [2.846, 8.538]
8 8.538 [4.269, 12.807]
9 12.807 [6.403, 19.210]
10 19.210 backoff.Stop
Note: Implementation is not thread-safe.
*/
type ExponentialBackOff struct {
InitialInterval time.Duration
RandomizationFactor float64
Multiplier float64
MaxInterval time.Duration
// After MaxElapsedTime the ExponentialBackOff stops.
// It never stops if MaxElapsedTime == 0.
MaxElapsedTime time.Duration
Clock Clock
currentInterval time.Duration
startTime time.Time
}
// Clock is an interface that returns current time for BackOff.
type Clock interface {
Now() time.Time
}
// Default values for ExponentialBackOff.
const (
DefaultInitialInterval = 500 * time.Millisecond
DefaultRandomizationFactor = 0.5
DefaultMultiplier = 1.5
DefaultMaxInterval = 60 * time.Second
DefaultMaxElapsedTime = 15 * time.Minute
)
// NewExponentialBackOff creates an instance of ExponentialBackOff using default values.
func NewExponentialBackOff() *ExponentialBackOff {
b := &ExponentialBackOff{
InitialInterval: DefaultInitialInterval,
RandomizationFactor: DefaultRandomizationFactor,
Multiplier: DefaultMultiplier,
MaxInterval: DefaultMaxInterval,
MaxElapsedTime: DefaultMaxElapsedTime,
Clock: SystemClock,
}
b.Reset()
return b
}
type systemClock struct{}
func (t systemClock) Now() time.Time {
return time.Now()
}
// SystemClock implements Clock interface that uses time.Now().
var SystemClock = systemClock{}
// Reset the interval back to the initial retry interval and restarts the timer.
func (b *ExponentialBackOff) Reset() {
b.currentInterval = b.InitialInterval
b.startTime = b.Clock.Now()
}
// NextBackOff calculates the next backoff interval using the formula:
// Randomized interval = RetryInterval +/- (RandomizationFactor * RetryInterval)
func (b *ExponentialBackOff) NextBackOff() time.Duration {
// Make sure we have not gone over the maximum elapsed time.
if b.MaxElapsedTime != 0 && b.GetElapsedTime() > b.MaxElapsedTime {
return Stop
}
defer b.incrementCurrentInterval()
return getRandomValueFromInterval(b.RandomizationFactor, rand.Float64(), b.currentInterval)
}
// GetElapsedTime returns the elapsed time since an ExponentialBackOff instance
// is created and is reset when Reset() is called.
//
// The elapsed time is computed using time.Now().UnixNano(). It is
// safe to call even while the backoff policy is used by a running
// ticker.
func (b *ExponentialBackOff) GetElapsedTime() time.Duration {
return b.Clock.Now().Sub(b.startTime)
}
// Increments the current interval by multiplying it with the multiplier.
func (b *ExponentialBackOff) incrementCurrentInterval() {
// Check for overflow, if overflow is detected set the current interval to the max interval.
if float64(b.currentInterval) >= float64(b.MaxInterval)/b.Multiplier {
b.currentInterval = b.MaxInterval
} else {
b.currentInterval = time.Duration(float64(b.currentInterval) * b.Multiplier)
}
}
// Returns a random value from the following interval:
// [randomizationFactor * currentInterval, randomizationFactor * currentInterval].
func getRandomValueFromInterval(randomizationFactor, random float64, currentInterval time.Duration) time.Duration {
var delta = randomizationFactor * float64(currentInterval)
var minInterval = float64(currentInterval) - delta
var maxInterval = float64(currentInterval) + delta
// Get a random value from the range [minInterval, maxInterval].
// The formula used below has a +1 because if the minInterval is 1 and the maxInterval is 3 then
// we want a 33% chance for selecting either 1, 2 or 3.
return time.Duration(minInterval + (random * (maxInterval - minInterval + 1)))
}

View file

@ -1,82 +0,0 @@
package backoff
import "time"
// An Operation is executing by Retry() or RetryNotify().
// The operation will be retried using a backoff policy if it returns an error.
type Operation func() error
// Notify is a notify-on-error function. It receives an operation error and
// backoff delay if the operation failed (with an error).
//
// NOTE that if the backoff policy stated to stop retrying,
// the notify function isn't called.
type Notify func(error, time.Duration)
// Retry the operation o until it does not return error or BackOff stops.
// o is guaranteed to be run at least once.
//
// If o returns a *PermanentError, the operation is not retried, and the
// wrapped error is returned.
//
// Retry sleeps the goroutine for the duration returned by BackOff after a
// failed operation returns.
func Retry(o Operation, b BackOff) error { return RetryNotify(o, b, nil) }
// RetryNotify calls notify function with the error and wait duration
// for each failed attempt before sleep.
func RetryNotify(operation Operation, b BackOff, notify Notify) error {
var err error
var next time.Duration
var t *time.Timer
cb := ensureContext(b)
b.Reset()
for {
if err = operation(); err == nil {
return nil
}
if permanent, ok := err.(*PermanentError); ok {
return permanent.Err
}
if next = cb.NextBackOff(); next == Stop {
return err
}
if notify != nil {
notify(err, next)
}
if t == nil {
t = time.NewTimer(next)
defer t.Stop()
} else {
t.Reset(next)
}
select {
case <-cb.Context().Done():
return err
case <-t.C:
}
}
}
// PermanentError signals that the operation should not be retried.
type PermanentError struct {
Err error
}
func (e *PermanentError) Error() string {
return e.Err.Error()
}
// Permanent wraps the given err in a *PermanentError.
func Permanent(err error) *PermanentError {
return &PermanentError{
Err: err,
}
}

View file

@ -1,82 +0,0 @@
package backoff
import (
"sync"
"time"
)
// Ticker holds a channel that delivers `ticks' of a clock at times reported by a BackOff.
//
// Ticks will continue to arrive when the previous operation is still running,
// so operations that take a while to fail could run in quick succession.
type Ticker struct {
C <-chan time.Time
c chan time.Time
b BackOffContext
stop chan struct{}
stopOnce sync.Once
}
// NewTicker returns a new Ticker containing a channel that will send
// the time at times specified by the BackOff argument. Ticker is
// guaranteed to tick at least once. The channel is closed when Stop
// method is called or BackOff stops. It is not safe to manipulate the
// provided backoff policy (notably calling NextBackOff or Reset)
// while the ticker is running.
func NewTicker(b BackOff) *Ticker {
c := make(chan time.Time)
t := &Ticker{
C: c,
c: c,
b: ensureContext(b),
stop: make(chan struct{}),
}
t.b.Reset()
go t.run()
return t
}
// Stop turns off a ticker. After Stop, no more ticks will be sent.
func (t *Ticker) Stop() {
t.stopOnce.Do(func() { close(t.stop) })
}
func (t *Ticker) run() {
c := t.c
defer close(c)
// Ticker is guaranteed to tick at least once.
afterC := t.send(time.Now())
for {
if afterC == nil {
return
}
select {
case tick := <-afterC:
afterC = t.send(tick)
case <-t.stop:
t.c = nil // Prevent future ticks from being sent to the channel.
return
case <-t.b.Context().Done():
return
}
}
}
func (t *Ticker) send(tick time.Time) <-chan time.Time {
select {
case t.c <- tick:
case <-t.stop:
return nil
}
next := t.b.NextBackOff()
if next == Stop {
t.Stop()
return nil
}
return time.After(next)
}

View file

@ -1,35 +0,0 @@
package backoff
import "time"
/*
WithMaxRetries creates a wrapper around another BackOff, which will
return Stop if NextBackOff() has been called too many times since
the last time Reset() was called
Note: Implementation is not thread-safe.
*/
func WithMaxRetries(b BackOff, max uint64) BackOff {
return &backOffTries{delegate: b, maxTries: max}
}
type backOffTries struct {
delegate BackOff
maxTries uint64
numTries uint64
}
func (b *backOffTries) NextBackOff() time.Duration {
if b.maxTries > 0 {
if b.maxTries <= b.numTries {
return Stop
}
b.numTries++
}
return b.delegate.NextBackOff()
}
func (b *backOffTries) Reset() {
b.numTries = 0
b.delegate.Reset()
}

View file

@ -1,21 +0,0 @@
The MIT License (MIT)
Copyright (c) 2014 Coda Hale
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.

View file

@ -1,9 +0,0 @@
// func HasAESNI() bool
TEXT ·HasAESNI(SB),$0
XORQ AX, AX
INCL AX
CPUID
SHRQ $25, CX
ANDQ $1, CX
MOVB CX, ret+0(FP)
RET

View file

@ -1,6 +0,0 @@
// +build amd64
package aesnicheck
// HasAESNI returns whether AES-NI is supported by the CPU.
func HasAESNI() bool

View file

@ -1,8 +0,0 @@
// +build !amd64
package aesnicheck
// HasAESNI returns whether AES-NI is supported by the CPU.
func HasAESNI() bool {
return false
}

View file

@ -1,22 +0,0 @@
// Command aesnicheck queries the CPU for AES-NI support. If AES-NI is supported,
// aesnicheck will print "supported" and exit with a status of 0. If AES-NI is
// not supported, aesnicheck will print "unsupported" and exit with a status of
// -1.
package main
import (
"fmt"
"os"
"github.com/codahale/aesnicheck"
)
func main() {
if aesnicheck.HasAESNI() {
fmt.Println("supported")
os.Exit(0)
} else {
fmt.Println("unsupported")
os.Exit(-1)
}
}

View file

@ -1,9 +0,0 @@
// Package aesnicheck provides a simple check to see if crypto/aes is using
// AES-NI instructions or if the AES transform is being done in software. AES-NI
// is constant-time, which makes it impervious to cache-level timing attacks. For
// security-conscious deployments on public cloud infrastructure (Amazon EC2,
// Google Compute Engine, Microsoft Azure, etc.) this may be critical.
//
// See http://eprint.iacr.org/2014/248 for details on cross-VM timing attacks on
// AES keys.
package aesnicheck

View file

@ -1,21 +0,0 @@
Copyright (c) 2005-2008 Dustin Sallings <dustin@spy.net>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
<http://www.opensource.org/licenses/mit-license.php>

View file

@ -1,31 +0,0 @@
package humanize
import (
"math/big"
)
// order of magnitude (to a max order)
func oomm(n, b *big.Int, maxmag int) (float64, int) {
mag := 0
m := &big.Int{}
for n.Cmp(b) >= 0 {
n.DivMod(n, b, m)
mag++
if mag == maxmag && maxmag >= 0 {
break
}
}
return float64(n.Int64()) + (float64(m.Int64()) / float64(b.Int64())), mag
}
// total order of magnitude
// (same as above, but with no upper limit)
func oom(n, b *big.Int) (float64, int) {
mag := 0
m := &big.Int{}
for n.Cmp(b) >= 0 {
n.DivMod(n, b, m)
mag++
}
return float64(n.Int64()) + (float64(m.Int64()) / float64(b.Int64())), mag
}

View file

@ -1,173 +0,0 @@
package humanize
import (
"fmt"
"math/big"
"strings"
"unicode"
)
var (
bigIECExp = big.NewInt(1024)
// BigByte is one byte in bit.Ints
BigByte = big.NewInt(1)
// BigKiByte is 1,024 bytes in bit.Ints
BigKiByte = (&big.Int{}).Mul(BigByte, bigIECExp)
// BigMiByte is 1,024 k bytes in bit.Ints
BigMiByte = (&big.Int{}).Mul(BigKiByte, bigIECExp)
// BigGiByte is 1,024 m bytes in bit.Ints
BigGiByte = (&big.Int{}).Mul(BigMiByte, bigIECExp)
// BigTiByte is 1,024 g bytes in bit.Ints
BigTiByte = (&big.Int{}).Mul(BigGiByte, bigIECExp)
// BigPiByte is 1,024 t bytes in bit.Ints
BigPiByte = (&big.Int{}).Mul(BigTiByte, bigIECExp)
// BigEiByte is 1,024 p bytes in bit.Ints
BigEiByte = (&big.Int{}).Mul(BigPiByte, bigIECExp)
// BigZiByte is 1,024 e bytes in bit.Ints
BigZiByte = (&big.Int{}).Mul(BigEiByte, bigIECExp)
// BigYiByte is 1,024 z bytes in bit.Ints
BigYiByte = (&big.Int{}).Mul(BigZiByte, bigIECExp)
)
var (
bigSIExp = big.NewInt(1000)
// BigSIByte is one SI byte in big.Ints
BigSIByte = big.NewInt(1)
// BigKByte is 1,000 SI bytes in big.Ints
BigKByte = (&big.Int{}).Mul(BigSIByte, bigSIExp)
// BigMByte is 1,000 SI k bytes in big.Ints
BigMByte = (&big.Int{}).Mul(BigKByte, bigSIExp)
// BigGByte is 1,000 SI m bytes in big.Ints
BigGByte = (&big.Int{}).Mul(BigMByte, bigSIExp)
// BigTByte is 1,000 SI g bytes in big.Ints
BigTByte = (&big.Int{}).Mul(BigGByte, bigSIExp)
// BigPByte is 1,000 SI t bytes in big.Ints
BigPByte = (&big.Int{}).Mul(BigTByte, bigSIExp)
// BigEByte is 1,000 SI p bytes in big.Ints
BigEByte = (&big.Int{}).Mul(BigPByte, bigSIExp)
// BigZByte is 1,000 SI e bytes in big.Ints
BigZByte = (&big.Int{}).Mul(BigEByte, bigSIExp)
// BigYByte is 1,000 SI z bytes in big.Ints
BigYByte = (&big.Int{}).Mul(BigZByte, bigSIExp)
)
var bigBytesSizeTable = map[string]*big.Int{
"b": BigByte,
"kib": BigKiByte,
"kb": BigKByte,
"mib": BigMiByte,
"mb": BigMByte,
"gib": BigGiByte,
"gb": BigGByte,
"tib": BigTiByte,
"tb": BigTByte,
"pib": BigPiByte,
"pb": BigPByte,
"eib": BigEiByte,
"eb": BigEByte,
"zib": BigZiByte,
"zb": BigZByte,
"yib": BigYiByte,
"yb": BigYByte,
// Without suffix
"": BigByte,
"ki": BigKiByte,
"k": BigKByte,
"mi": BigMiByte,
"m": BigMByte,
"gi": BigGiByte,
"g": BigGByte,
"ti": BigTiByte,
"t": BigTByte,
"pi": BigPiByte,
"p": BigPByte,
"ei": BigEiByte,
"e": BigEByte,
"z": BigZByte,
"zi": BigZiByte,
"y": BigYByte,
"yi": BigYiByte,
}
var ten = big.NewInt(10)
func humanateBigBytes(s, base *big.Int, sizes []string) string {
if s.Cmp(ten) < 0 {
return fmt.Sprintf("%d B", s)
}
c := (&big.Int{}).Set(s)
val, mag := oomm(c, base, len(sizes)-1)
suffix := sizes[mag]
f := "%.0f %s"
if val < 10 {
f = "%.1f %s"
}
return fmt.Sprintf(f, val, suffix)
}
// BigBytes produces a human readable representation of an SI size.
//
// See also: ParseBigBytes.
//
// BigBytes(82854982) -> 83 MB
func BigBytes(s *big.Int) string {
sizes := []string{"B", "kB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB"}
return humanateBigBytes(s, bigSIExp, sizes)
}
// BigIBytes produces a human readable representation of an IEC size.
//
// See also: ParseBigBytes.
//
// BigIBytes(82854982) -> 79 MiB
func BigIBytes(s *big.Int) string {
sizes := []string{"B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB", "ZiB", "YiB"}
return humanateBigBytes(s, bigIECExp, sizes)
}
// ParseBigBytes parses a string representation of bytes into the number
// of bytes it represents.
//
// See also: BigBytes, BigIBytes.
//
// ParseBigBytes("42 MB") -> 42000000, nil
// ParseBigBytes("42 mib") -> 44040192, nil
func ParseBigBytes(s string) (*big.Int, error) {
lastDigit := 0
hasComma := false
for _, r := range s {
if !(unicode.IsDigit(r) || r == '.' || r == ',') {
break
}
if r == ',' {
hasComma = true
}
lastDigit++
}
num := s[:lastDigit]
if hasComma {
num = strings.Replace(num, ",", "", -1)
}
val := &big.Rat{}
_, err := fmt.Sscanf(num, "%f", val)
if err != nil {
return nil, err
}
extra := strings.ToLower(strings.TrimSpace(s[lastDigit:]))
if m, ok := bigBytesSizeTable[extra]; ok {
mv := (&big.Rat{}).SetInt(m)
val.Mul(val, mv)
rv := &big.Int{}
rv.Div(val.Num(), val.Denom())
return rv, nil
}
return nil, fmt.Errorf("unhandled size name: %v", extra)
}

View file

@ -1,143 +0,0 @@
package humanize
import (
"fmt"
"math"
"strconv"
"strings"
"unicode"
)
// IEC Sizes.
// kibis of bits
const (
Byte = 1 << (iota * 10)
KiByte
MiByte
GiByte
TiByte
PiByte
EiByte
)
// SI Sizes.
const (
IByte = 1
KByte = IByte * 1000
MByte = KByte * 1000
GByte = MByte * 1000
TByte = GByte * 1000
PByte = TByte * 1000
EByte = PByte * 1000
)
var bytesSizeTable = map[string]uint64{
"b": Byte,
"kib": KiByte,
"kb": KByte,
"mib": MiByte,
"mb": MByte,
"gib": GiByte,
"gb": GByte,
"tib": TiByte,
"tb": TByte,
"pib": PiByte,
"pb": PByte,
"eib": EiByte,
"eb": EByte,
// Without suffix
"": Byte,
"ki": KiByte,
"k": KByte,
"mi": MiByte,
"m": MByte,
"gi": GiByte,
"g": GByte,
"ti": TiByte,
"t": TByte,
"pi": PiByte,
"p": PByte,
"ei": EiByte,
"e": EByte,
}
func logn(n, b float64) float64 {
return math.Log(n) / math.Log(b)
}
func humanateBytes(s uint64, base float64, sizes []string) string {
if s < 10 {
return fmt.Sprintf("%d B", s)
}
e := math.Floor(logn(float64(s), base))
suffix := sizes[int(e)]
val := math.Floor(float64(s)/math.Pow(base, e)*10+0.5) / 10
f := "%.0f %s"
if val < 10 {
f = "%.1f %s"
}
return fmt.Sprintf(f, val, suffix)
}
// Bytes produces a human readable representation of an SI size.
//
// See also: ParseBytes.
//
// Bytes(82854982) -> 83 MB
func Bytes(s uint64) string {
sizes := []string{"B", "kB", "MB", "GB", "TB", "PB", "EB"}
return humanateBytes(s, 1000, sizes)
}
// IBytes produces a human readable representation of an IEC size.
//
// See also: ParseBytes.
//
// IBytes(82854982) -> 79 MiB
func IBytes(s uint64) string {
sizes := []string{"B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB"}
return humanateBytes(s, 1024, sizes)
}
// ParseBytes parses a string representation of bytes into the number
// of bytes it represents.
//
// See Also: Bytes, IBytes.
//
// ParseBytes("42 MB") -> 42000000, nil
// ParseBytes("42 mib") -> 44040192, nil
func ParseBytes(s string) (uint64, error) {
lastDigit := 0
hasComma := false
for _, r := range s {
if !(unicode.IsDigit(r) || r == '.' || r == ',') {
break
}
if r == ',' {
hasComma = true
}
lastDigit++
}
num := s[:lastDigit]
if hasComma {
num = strings.Replace(num, ",", "", -1)
}
f, err := strconv.ParseFloat(num, 64)
if err != nil {
return 0, err
}
extra := strings.ToLower(strings.TrimSpace(s[lastDigit:]))
if m, ok := bytesSizeTable[extra]; ok {
f *= float64(m)
if f >= math.MaxUint64 {
return 0, fmt.Errorf("too large: %v", s)
}
return uint64(f), nil
}
return 0, fmt.Errorf("unhandled size name: %v", extra)
}

View file

@ -1,108 +0,0 @@
package humanize
import (
"bytes"
"math"
"math/big"
"strconv"
"strings"
)
// Comma produces a string form of the given number in base 10 with
// commas after every three orders of magnitude.
//
// e.g. Comma(834142) -> 834,142
func Comma(v int64) string {
sign := ""
// minin64 can't be negated to a usable value, so it has to be special cased.
if v == math.MinInt64 {
return "-9,223,372,036,854,775,808"
}
if v < 0 {
sign = "-"
v = 0 - v
}
parts := []string{"", "", "", "", "", "", ""}
j := len(parts) - 1
for v > 999 {
parts[j] = strconv.FormatInt(v%1000, 10)
switch len(parts[j]) {
case 2:
parts[j] = "0" + parts[j]
case 1:
parts[j] = "00" + parts[j]
}
v = v / 1000
j--
}
parts[j] = strconv.Itoa(int(v))
return sign + strings.Join(parts[j:], ",")
}
// Commaf produces a string form of the given number in base 10 with
// commas after every three orders of magnitude.
//
// e.g. Commaf(834142.32) -> 834,142.32
func Commaf(v float64) string {
buf := &bytes.Buffer{}
if v < 0 {
buf.Write([]byte{'-'})
v = 0 - v
}
comma := []byte{','}
parts := strings.Split(strconv.FormatFloat(v, 'f', -1, 64), ".")
pos := 0
if len(parts[0])%3 != 0 {
pos += len(parts[0]) % 3
buf.WriteString(parts[0][:pos])
buf.Write(comma)
}
for ; pos < len(parts[0]); pos += 3 {
buf.WriteString(parts[0][pos : pos+3])
buf.Write(comma)
}
buf.Truncate(buf.Len() - 1)
if len(parts) > 1 {
buf.Write([]byte{'.'})
buf.WriteString(parts[1])
}
return buf.String()
}
// BigComma produces a string form of the given big.Int in base 10
// with commas after every three orders of magnitude.
func BigComma(b *big.Int) string {
sign := ""
if b.Sign() < 0 {
sign = "-"
b.Abs(b)
}
athousand := big.NewInt(1000)
c := (&big.Int{}).Set(b)
_, m := oom(c, athousand)
parts := make([]string, m+1)
j := len(parts) - 1
mod := &big.Int{}
for b.Cmp(athousand) >= 0 {
b.DivMod(b, athousand, mod)
parts[j] = strconv.FormatInt(mod.Int64(), 10)
switch len(parts[j]) {
case 2:
parts[j] = "0" + parts[j]
case 1:
parts[j] = "00" + parts[j]
}
j--
}
parts[j] = strconv.Itoa(int(b.Int64()))
return sign + strings.Join(parts[j:], ",")
}

View file

@ -1,40 +0,0 @@
// +build go1.6
package humanize
import (
"bytes"
"math/big"
"strings"
)
// BigCommaf produces a string form of the given big.Float in base 10
// with commas after every three orders of magnitude.
func BigCommaf(v *big.Float) string {
buf := &bytes.Buffer{}
if v.Sign() < 0 {
buf.Write([]byte{'-'})
v.Abs(v)
}
comma := []byte{','}
parts := strings.Split(v.Text('f', -1), ".")
pos := 0
if len(parts[0])%3 != 0 {
pos += len(parts[0]) % 3
buf.WriteString(parts[0][:pos])
buf.Write(comma)
}
for ; pos < len(parts[0]); pos += 3 {
buf.WriteString(parts[0][pos : pos+3])
buf.Write(comma)
}
buf.Truncate(buf.Len() - 1)
if len(parts) > 1 {
buf.Write([]byte{'.'})
buf.WriteString(parts[1])
}
return buf.String()
}

View file

@ -1,23 +0,0 @@
package humanize
import "strconv"
func stripTrailingZeros(s string) string {
offset := len(s) - 1
for offset > 0 {
if s[offset] == '.' {
offset--
break
}
if s[offset] != '0' {
break
}
offset--
}
return s[:offset+1]
}
// Ftoa converts a float to a string with no trailing zeros.
func Ftoa(num float64) string {
return stripTrailingZeros(strconv.FormatFloat(num, 'f', 6, 64))
}

View file

@ -1,8 +0,0 @@
/*
Package humanize converts boring ugly numbers to human-friendly strings and back.
Durations can be turned into strings such as "3 days ago", numbers
representing sizes like 82854982 into useful strings like, "83 MB" or
"79 MiB" (whichever you prefer).
*/
package humanize

View file

@ -1,192 +0,0 @@
package humanize
/*
Slightly adapted from the source to fit go-humanize.
Author: https://github.com/gorhill
Source: https://gist.github.com/gorhill/5285193
*/
import (
"math"
"strconv"
)
var (
renderFloatPrecisionMultipliers = [...]float64{
1,
10,
100,
1000,
10000,
100000,
1000000,
10000000,
100000000,
1000000000,
}
renderFloatPrecisionRounders = [...]float64{
0.5,
0.05,
0.005,
0.0005,
0.00005,
0.000005,
0.0000005,
0.00000005,
0.000000005,
0.0000000005,
}
)
// FormatFloat produces a formatted number as string based on the following user-specified criteria:
// * thousands separator
// * decimal separator
// * decimal precision
//
// Usage: s := RenderFloat(format, n)
// The format parameter tells how to render the number n.
//
// See examples: http://play.golang.org/p/LXc1Ddm1lJ
//
// Examples of format strings, given n = 12345.6789:
// "#,###.##" => "12,345.67"
// "#,###." => "12,345"
// "#,###" => "12345,678"
// "#\u202F###,##" => "12345,68"
// "#.###,###### => 12.345,678900
// "" (aka default format) => 12,345.67
//
// The highest precision allowed is 9 digits after the decimal symbol.
// There is also a version for integer number, FormatInteger(),
// which is convenient for calls within template.
func FormatFloat(format string, n float64) string {
// Special cases:
// NaN = "NaN"
// +Inf = "+Infinity"
// -Inf = "-Infinity"
if math.IsNaN(n) {
return "NaN"
}
if n > math.MaxFloat64 {
return "Infinity"
}
if n < -math.MaxFloat64 {
return "-Infinity"
}
// default format
precision := 2
decimalStr := "."
thousandStr := ","
positiveStr := ""
negativeStr := "-"
if len(format) > 0 {
format := []rune(format)
// If there is an explicit format directive,
// then default values are these:
precision = 9
thousandStr = ""
// collect indices of meaningful formatting directives
formatIndx := []int{}
for i, char := range format {
if char != '#' && char != '0' {
formatIndx = append(formatIndx, i)
}
}
if len(formatIndx) > 0 {
// Directive at index 0:
// Must be a '+'
// Raise an error if not the case
// index: 0123456789
// +0.000,000
// +000,000.0
// +0000.00
// +0000
if formatIndx[0] == 0 {
if format[formatIndx[0]] != '+' {
panic("RenderFloat(): invalid positive sign directive")
}
positiveStr = "+"
formatIndx = formatIndx[1:]
}
// Two directives:
// First is thousands separator
// Raise an error if not followed by 3-digit
// 0123456789
// 0.000,000
// 000,000.00
if len(formatIndx) == 2 {
if (formatIndx[1] - formatIndx[0]) != 4 {
panic("RenderFloat(): thousands separator directive must be followed by 3 digit-specifiers")
}
thousandStr = string(format[formatIndx[0]])
formatIndx = formatIndx[1:]
}
// One directive:
// Directive is decimal separator
// The number of digit-specifier following the separator indicates wanted precision
// 0123456789
// 0.00
// 000,0000
if len(formatIndx) == 1 {
decimalStr = string(format[formatIndx[0]])
precision = len(format) - formatIndx[0] - 1
}
}
}
// generate sign part
var signStr string
if n >= 0.000000001 {
signStr = positiveStr
} else if n <= -0.000000001 {
signStr = negativeStr
n = -n
} else {
signStr = ""
n = 0.0
}
// split number into integer and fractional parts
intf, fracf := math.Modf(n + renderFloatPrecisionRounders[precision])
// generate integer part string
intStr := strconv.FormatInt(int64(intf), 10)
// add thousand separator if required
if len(thousandStr) > 0 {
for i := len(intStr); i > 3; {
i -= 3
intStr = intStr[:i] + thousandStr + intStr[i:]
}
}
// no fractional part, we can leave now
if precision == 0 {
return signStr + intStr
}
// generate fractional part
fracStr := strconv.Itoa(int(fracf * renderFloatPrecisionMultipliers[precision]))
// may need padding
if len(fracStr) < precision {
fracStr = "000000000000000"[:precision-len(fracStr)] + fracStr
}
return signStr + intStr + decimalStr + fracStr
}
// FormatInteger produces a formatted number as string.
// See FormatFloat.
func FormatInteger(format string, n int) string {
return FormatFloat(format, float64(n))
}

View file

@ -1,25 +0,0 @@
package humanize
import "strconv"
// Ordinal gives you the input number in a rank/ordinal format.
//
// Ordinal(3) -> 3rd
func Ordinal(x int) string {
suffix := "th"
switch x % 10 {
case 1:
if x%100 != 11 {
suffix = "st"
}
case 2:
if x%100 != 12 {
suffix = "nd"
}
case 3:
if x%100 != 13 {
suffix = "rd"
}
}
return strconv.Itoa(x) + suffix
}

View file

@ -1,113 +0,0 @@
package humanize
import (
"errors"
"math"
"regexp"
"strconv"
)
var siPrefixTable = map[float64]string{
-24: "y", // yocto
-21: "z", // zepto
-18: "a", // atto
-15: "f", // femto
-12: "p", // pico
-9: "n", // nano
-6: "µ", // micro
-3: "m", // milli
0: "",
3: "k", // kilo
6: "M", // mega
9: "G", // giga
12: "T", // tera
15: "P", // peta
18: "E", // exa
21: "Z", // zetta
24: "Y", // yotta
}
var revSIPrefixTable = revfmap(siPrefixTable)
// revfmap reverses the map and precomputes the power multiplier
func revfmap(in map[float64]string) map[string]float64 {
rv := map[string]float64{}
for k, v := range in {
rv[v] = math.Pow(10, k)
}
return rv
}
var riParseRegex *regexp.Regexp
func init() {
ri := `^([\-0-9.]+)\s?([`
for _, v := range siPrefixTable {
ri += v
}
ri += `]?)(.*)`
riParseRegex = regexp.MustCompile(ri)
}
// ComputeSI finds the most appropriate SI prefix for the given number
// and returns the prefix along with the value adjusted to be within
// that prefix.
//
// See also: SI, ParseSI.
//
// e.g. ComputeSI(2.2345e-12) -> (2.2345, "p")
func ComputeSI(input float64) (float64, string) {
if input == 0 {
return 0, ""
}
mag := math.Abs(input)
exponent := math.Floor(logn(mag, 10))
exponent = math.Floor(exponent/3) * 3
value := mag / math.Pow(10, exponent)
// Handle special case where value is exactly 1000.0
// Should return 1 M instead of 1000 k
if value == 1000.0 {
exponent += 3
value = mag / math.Pow(10, exponent)
}
value = math.Copysign(value, input)
prefix := siPrefixTable[exponent]
return value, prefix
}
// SI returns a string with default formatting.
//
// SI uses Ftoa to format float value, removing trailing zeros.
//
// See also: ComputeSI, ParseSI.
//
// e.g. SI(1000000, "B") -> 1 MB
// e.g. SI(2.2345e-12, "F") -> 2.2345 pF
func SI(input float64, unit string) string {
value, prefix := ComputeSI(input)
return Ftoa(value) + " " + prefix + unit
}
var errInvalid = errors.New("invalid input")
// ParseSI parses an SI string back into the number and unit.
//
// See also: SI, ComputeSI.
//
// e.g. ParseSI("2.2345 pF") -> (2.2345e-12, "F", nil)
func ParseSI(input string) (float64, string, error) {
found := riParseRegex.FindStringSubmatch(input)
if len(found) != 4 {
return 0, "", errInvalid
}
mag := revSIPrefixTable[found[2]]
unit := found[3]
base, err := strconv.ParseFloat(found[1], 64)
return base * mag, unit, err
}

View file

@ -1,117 +0,0 @@
package humanize
import (
"fmt"
"math"
"sort"
"time"
)
// Seconds-based time units
const (
Day = 24 * time.Hour
Week = 7 * Day
Month = 30 * Day
Year = 12 * Month
LongTime = 37 * Year
)
// Time formats a time into a relative string.
//
// Time(someT) -> "3 weeks ago"
func Time(then time.Time) string {
return RelTime(then, time.Now(), "ago", "from now")
}
// A RelTimeMagnitude struct contains a relative time point at which
// the relative format of time will switch to a new format string. A
// slice of these in ascending order by their "D" field is passed to
// CustomRelTime to format durations.
//
// The Format field is a string that may contain a "%s" which will be
// replaced with the appropriate signed label (e.g. "ago" or "from
// now") and a "%d" that will be replaced by the quantity.
//
// The DivBy field is the amount of time the time difference must be
// divided by in order to display correctly.
//
// e.g. if D is 2*time.Minute and you want to display "%d minutes %s"
// DivBy should be time.Minute so whatever the duration is will be
// expressed in minutes.
type RelTimeMagnitude struct {
D time.Duration
Format string
DivBy time.Duration
}
var defaultMagnitudes = []RelTimeMagnitude{
{time.Second, "now", time.Second},
{2 * time.Second, "1 second %s", 1},
{time.Minute, "%d seconds %s", time.Second},
{2 * time.Minute, "1 minute %s", 1},
{time.Hour, "%d minutes %s", time.Minute},
{2 * time.Hour, "1 hour %s", 1},
{Day, "%d hours %s", time.Hour},
{2 * Day, "1 day %s", 1},
{Week, "%d days %s", Day},
{2 * Week, "1 week %s", 1},
{Month, "%d weeks %s", Week},
{2 * Month, "1 month %s", 1},
{Year, "%d months %s", Month},
{18 * Month, "1 year %s", 1},
{2 * Year, "2 years %s", 1},
{LongTime, "%d years %s", Year},
{math.MaxInt64, "a long while %s", 1},
}
// RelTime formats a time into a relative string.
//
// It takes two times and two labels. In addition to the generic time
// delta string (e.g. 5 minutes), the labels are used applied so that
// the label corresponding to the smaller time is applied.
//
// RelTime(timeInPast, timeInFuture, "earlier", "later") -> "3 weeks earlier"
func RelTime(a, b time.Time, albl, blbl string) string {
return CustomRelTime(a, b, albl, blbl, defaultMagnitudes)
}
// CustomRelTime formats a time into a relative string.
//
// It takes two times two labels and a table of relative time formats.
// In addition to the generic time delta string (e.g. 5 minutes), the
// labels are used applied so that the label corresponding to the
// smaller time is applied.
func CustomRelTime(a, b time.Time, albl, blbl string, magnitudes []RelTimeMagnitude) string {
lbl := albl
diff := b.Sub(a)
if a.After(b) {
lbl = blbl
diff = a.Sub(b)
}
n := sort.Search(len(magnitudes), func(i int) bool {
return magnitudes[i].D >= diff
})
if n >= len(magnitudes) {
n = len(magnitudes) - 1
}
mag := magnitudes[n]
args := []interface{}{}
escaped := false
for _, ch := range mag.Format {
if escaped {
switch ch {
case 's':
args = append(args, lbl)
case 'd':
args = append(args, diff/mag.DivBy)
}
escaped = false
} else {
escaped = ch == '%'
}
}
return fmt.Sprintf(mag.Format, args...)
}

View file

@ -1,202 +0,0 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View file

@ -1,457 +0,0 @@
/*
Copyright 2012 Google Inc. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package shlex
/*
Package shlex implements a simple lexer which splits input in to tokens using
shell-style rules for quoting and commenting.
*/
import (
"bufio"
"errors"
"fmt"
"io"
"strings"
)
/*
A TokenType is a top-level token; a word, space, comment, unknown.
*/
type TokenType int
/*
A RuneTokenType is the type of a UTF-8 character; a character, quote, space, escape.
*/
type RuneTokenType int
type lexerState int
type Token struct {
tokenType TokenType
value string
}
/*
Two tokens are equal if both their types and values are equal. A nil token can
never equal another token.
*/
func (a *Token) Equal(b *Token) bool {
if a == nil || b == nil {
return false
}
if a.tokenType != b.tokenType {
return false
}
return a.value == b.value
}
const (
RUNE_CHAR string = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789._-,/@$*()+=><:;&^%~|!?[]{}"
RUNE_SPACE string = " \t\r\n"
RUNE_ESCAPING_QUOTE string = "\""
RUNE_NONESCAPING_QUOTE string = "'"
RUNE_ESCAPE = "\\"
RUNE_COMMENT = "#"
RUNETOKEN_UNKNOWN RuneTokenType = 0
RUNETOKEN_CHAR RuneTokenType = 1
RUNETOKEN_SPACE RuneTokenType = 2
RUNETOKEN_ESCAPING_QUOTE RuneTokenType = 3
RUNETOKEN_NONESCAPING_QUOTE RuneTokenType = 4
RUNETOKEN_ESCAPE RuneTokenType = 5
RUNETOKEN_COMMENT RuneTokenType = 6
RUNETOKEN_EOF RuneTokenType = 7
TOKEN_UNKNOWN TokenType = 0
TOKEN_WORD TokenType = 1
TOKEN_SPACE TokenType = 2
TOKEN_COMMENT TokenType = 3
STATE_START lexerState = 0
STATE_INWORD lexerState = 1
STATE_ESCAPING lexerState = 2
STATE_ESCAPING_QUOTED lexerState = 3
STATE_QUOTED_ESCAPING lexerState = 4
STATE_QUOTED lexerState = 5
STATE_COMMENT lexerState = 6
INITIAL_TOKEN_CAPACITY int = 100
)
/*
A type for classifying characters. This allows for different sorts of
classifiers - those accepting extended non-ascii chars, or strict posix
compatibility, for example.
*/
type TokenClassifier struct {
typeMap map[int32]RuneTokenType
}
func addRuneClass(typeMap *map[int32]RuneTokenType, runes string, tokenType RuneTokenType) {
for _, rune := range runes {
(*typeMap)[int32(rune)] = tokenType
}
}
/*
Create a new classifier for basic ASCII characters.
*/
func NewDefaultClassifier() *TokenClassifier {
typeMap := map[int32]RuneTokenType{}
addRuneClass(&typeMap, RUNE_CHAR, RUNETOKEN_CHAR)
addRuneClass(&typeMap, RUNE_SPACE, RUNETOKEN_SPACE)
addRuneClass(&typeMap, RUNE_ESCAPING_QUOTE, RUNETOKEN_ESCAPING_QUOTE)
addRuneClass(&typeMap, RUNE_NONESCAPING_QUOTE, RUNETOKEN_NONESCAPING_QUOTE)
addRuneClass(&typeMap, RUNE_ESCAPE, RUNETOKEN_ESCAPE)
addRuneClass(&typeMap, RUNE_COMMENT, RUNETOKEN_COMMENT)
return &TokenClassifier{
typeMap: typeMap}
}
func (classifier *TokenClassifier) ClassifyRune(rune int32) RuneTokenType {
return classifier.typeMap[rune]
}
/*
A type for turning an input stream in to a sequence of strings. Whitespace and
comments are skipped.
*/
type Lexer struct {
tokenizer *Tokenizer
}
/*
Create a new lexer.
*/
func NewLexer(r io.Reader) (*Lexer, error) {
tokenizer, err := NewTokenizer(r)
if err != nil {
return nil, err
}
lexer := &Lexer{tokenizer: tokenizer}
return lexer, nil
}
/*
Return the next word, and an error value. If there are no more words, the error
will be io.EOF.
*/
func (l *Lexer) NextWord() (string, error) {
var token *Token
var err error
for {
token, err = l.tokenizer.NextToken()
if err != nil {
return "", err
}
switch token.tokenType {
case TOKEN_WORD:
{
return token.value, nil
}
case TOKEN_COMMENT:
{
// skip comments
}
default:
{
panic(fmt.Sprintf("Unknown token type: %v", token.tokenType))
}
}
}
return "", io.EOF
}
/*
A type for turning an input stream in to a sequence of typed tokens.
*/
type Tokenizer struct {
input *bufio.Reader
classifier *TokenClassifier
}
/*
Create a new tokenizer.
*/
func NewTokenizer(r io.Reader) (*Tokenizer, error) {
input := bufio.NewReader(r)
classifier := NewDefaultClassifier()
tokenizer := &Tokenizer{
input: input,
classifier: classifier}
return tokenizer, nil
}
/*
Scan the stream for the next token.
This uses an internal state machine. It will panic if it encounters a character
which it does not know how to handle.
*/
func (t *Tokenizer) scanStream() (*Token, error) {
state := STATE_START
var tokenType TokenType
value := make([]int32, 0, INITIAL_TOKEN_CAPACITY)
var (
nextRune int32
nextRuneType RuneTokenType
err error
)
SCAN:
for {
nextRune, _, err = t.input.ReadRune()
nextRuneType = t.classifier.ClassifyRune(nextRune)
if err != nil {
if err == io.EOF {
nextRuneType = RUNETOKEN_EOF
err = nil
} else {
return nil, err
}
}
switch state {
case STATE_START: // no runes read yet
{
switch nextRuneType {
case RUNETOKEN_EOF:
{
return nil, io.EOF
}
case RUNETOKEN_CHAR:
{
tokenType = TOKEN_WORD
value = append(value, nextRune)
state = STATE_INWORD
}
case RUNETOKEN_SPACE:
{
}
case RUNETOKEN_ESCAPING_QUOTE:
{
tokenType = TOKEN_WORD
state = STATE_QUOTED_ESCAPING
}
case RUNETOKEN_NONESCAPING_QUOTE:
{
tokenType = TOKEN_WORD
state = STATE_QUOTED
}
case RUNETOKEN_ESCAPE:
{
tokenType = TOKEN_WORD
state = STATE_ESCAPING
}
case RUNETOKEN_COMMENT:
{
tokenType = TOKEN_COMMENT
state = STATE_COMMENT
}
default:
{
return nil, errors.New(fmt.Sprintf("Unknown rune: %v", nextRune))
}
}
}
case STATE_INWORD: // in a regular word
{
switch nextRuneType {
case RUNETOKEN_EOF:
{
break SCAN
}
case RUNETOKEN_CHAR, RUNETOKEN_COMMENT:
{
value = append(value, nextRune)
}
case RUNETOKEN_SPACE:
{
t.input.UnreadRune()
break SCAN
}
case RUNETOKEN_ESCAPING_QUOTE:
{
state = STATE_QUOTED_ESCAPING
}
case RUNETOKEN_NONESCAPING_QUOTE:
{
state = STATE_QUOTED
}
case RUNETOKEN_ESCAPE:
{
state = STATE_ESCAPING
}
default:
{
return nil, errors.New(fmt.Sprintf("Uknown rune: %v", nextRune))
}
}
}
case STATE_ESCAPING: // the next rune after an escape character
{
switch nextRuneType {
case RUNETOKEN_EOF:
{
err = errors.New("EOF found after escape character")
break SCAN
}
case RUNETOKEN_CHAR, RUNETOKEN_SPACE, RUNETOKEN_ESCAPING_QUOTE, RUNETOKEN_NONESCAPING_QUOTE, RUNETOKEN_ESCAPE, RUNETOKEN_COMMENT:
{
state = STATE_INWORD
value = append(value, nextRune)
}
default:
{
return nil, errors.New(fmt.Sprintf("Uknown rune: %v", nextRune))
}
}
}
case STATE_ESCAPING_QUOTED: // the next rune after an escape character, in double quotes
{
switch nextRuneType {
case RUNETOKEN_EOF:
{
err = errors.New("EOF found after escape character")
break SCAN
}
case RUNETOKEN_CHAR, RUNETOKEN_SPACE, RUNETOKEN_ESCAPING_QUOTE, RUNETOKEN_NONESCAPING_QUOTE, RUNETOKEN_ESCAPE, RUNETOKEN_COMMENT:
{
state = STATE_QUOTED_ESCAPING
value = append(value, nextRune)
}
default:
{
return nil, errors.New(fmt.Sprintf("Uknown rune: %v", nextRune))
}
}
}
case STATE_QUOTED_ESCAPING: // in escaping double quotes
{
switch nextRuneType {
case RUNETOKEN_EOF:
{
err = errors.New("EOF found when expecting closing quote.")
break SCAN
}
case RUNETOKEN_CHAR, RUNETOKEN_UNKNOWN, RUNETOKEN_SPACE, RUNETOKEN_NONESCAPING_QUOTE, RUNETOKEN_COMMENT:
{
value = append(value, nextRune)
}
case RUNETOKEN_ESCAPING_QUOTE:
{
state = STATE_INWORD
}
case RUNETOKEN_ESCAPE:
{
state = STATE_ESCAPING_QUOTED
}
default:
{
return nil, errors.New(fmt.Sprintf("Uknown rune: %v", nextRune))
}
}
}
case STATE_QUOTED: // in non-escaping single quotes
{
switch nextRuneType {
case RUNETOKEN_EOF:
{
err = errors.New("EOF found when expecting closing quote.")
break SCAN
}
case RUNETOKEN_CHAR, RUNETOKEN_UNKNOWN, RUNETOKEN_SPACE, RUNETOKEN_ESCAPING_QUOTE, RUNETOKEN_ESCAPE, RUNETOKEN_COMMENT:
{
value = append(value, nextRune)
}
case RUNETOKEN_NONESCAPING_QUOTE:
{
state = STATE_INWORD
}
default:
{
return nil, errors.New(fmt.Sprintf("Uknown rune: %v", nextRune))
}
}
}
case STATE_COMMENT:
{
switch nextRuneType {
case RUNETOKEN_EOF:
{
break SCAN
}
case RUNETOKEN_CHAR, RUNETOKEN_UNKNOWN, RUNETOKEN_ESCAPING_QUOTE, RUNETOKEN_ESCAPE, RUNETOKEN_COMMENT, RUNETOKEN_NONESCAPING_QUOTE:
{
value = append(value, nextRune)
}
case RUNETOKEN_SPACE:
{
if nextRune == '\n' {
state = STATE_START
break SCAN
} else {
value = append(value, nextRune)
}
}
default:
{
return nil, errors.New(fmt.Sprintf("Uknown rune: %v", nextRune))
}
}
}
default:
{
panic(fmt.Sprintf("Unexpected state: %v", state))
}
}
}
token := &Token{
tokenType: tokenType,
value: string(value)}
return token, err
}
/*
Return the next token in the stream, and an error value. If there are no more
tokens available, the error value will be io.EOF.
*/
func (t *Tokenizer) NextToken() (*Token, error) {
return t.scanStream()
}
/*
Split a string in to a slice of strings, based upon shell-style rules for
quoting, escaping, and spaces.
*/
func Split(s string) ([]string, error) {
l, err := NewLexer(strings.NewReader(s))
if err != nil {
return nil, err
}
subStrings := []string{}
for {
word, err := l.NextWord()
if err != nil {
if err == io.EOF {
return subStrings, nil
}
return subStrings, err
}
subStrings = append(subStrings, word)
}
return subStrings, nil
}

View file

@ -1,202 +0,0 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View file

@ -1,428 +0,0 @@
// Copyright 2010 Google Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package gomock
import (
"fmt"
"reflect"
"strconv"
"strings"
)
// Call represents an expected call to a mock.
type Call struct {
t TestReporter // for triggering test failures on invalid call setup
receiver interface{} // the receiver of the method call
method string // the name of the method
methodType reflect.Type // the type of the method
args []Matcher // the args
origin string // file and line number of call setup
preReqs []*Call // prerequisite calls
// Expectations
minCalls, maxCalls int
numCalls int // actual number made
// actions are called when this Call is called. Each action gets the args and
// can set the return values by returning a non-nil slice. Actions run in the
// order they are created.
actions []func([]interface{}) []interface{}
}
// newCall creates a *Call. It requires the method type in order to support
// unexported methods.
func newCall(t TestReporter, receiver interface{}, method string, methodType reflect.Type, args ...interface{}) *Call {
if h, ok := t.(testHelper); ok {
h.Helper()
}
// TODO: check arity, types.
margs := make([]Matcher, len(args))
for i, arg := range args {
if m, ok := arg.(Matcher); ok {
margs[i] = m
} else if arg == nil {
// Handle nil specially so that passing a nil interface value
// will match the typed nils of concrete args.
margs[i] = Nil()
} else {
margs[i] = Eq(arg)
}
}
origin := callerInfo(3)
actions := []func([]interface{}) []interface{}{func([]interface{}) []interface{} {
// Synthesize the zero value for each of the return args' types.
rets := make([]interface{}, methodType.NumOut())
for i := 0; i < methodType.NumOut(); i++ {
rets[i] = reflect.Zero(methodType.Out(i)).Interface()
}
return rets
}}
return &Call{t: t, receiver: receiver, method: method, methodType: methodType,
args: margs, origin: origin, minCalls: 1, maxCalls: 1, actions: actions}
}
// AnyTimes allows the expectation to be called 0 or more times
func (c *Call) AnyTimes() *Call {
c.minCalls, c.maxCalls = 0, 1e8 // close enough to infinity
return c
}
// MinTimes requires the call to occur at least n times. If AnyTimes or MaxTimes have not been called, MinTimes also
// sets the maximum number of calls to infinity.
func (c *Call) MinTimes(n int) *Call {
c.minCalls = n
if c.maxCalls == 1 {
c.maxCalls = 1e8
}
return c
}
// MaxTimes limits the number of calls to n times. If AnyTimes or MinTimes have not been called, MaxTimes also
// sets the minimum number of calls to 0.
func (c *Call) MaxTimes(n int) *Call {
c.maxCalls = n
if c.minCalls == 1 {
c.minCalls = 0
}
return c
}
// DoAndReturn declares the action to run when the call is matched.
// The return values from this function are returned by the mocked function.
// It takes an interface{} argument to support n-arity functions.
func (c *Call) DoAndReturn(f interface{}) *Call {
// TODO: Check arity and types here, rather than dying badly elsewhere.
v := reflect.ValueOf(f)
c.addAction(func(args []interface{}) []interface{} {
vargs := make([]reflect.Value, len(args))
ft := v.Type()
for i := 0; i < len(args); i++ {
if args[i] != nil {
vargs[i] = reflect.ValueOf(args[i])
} else {
// Use the zero value for the arg.
vargs[i] = reflect.Zero(ft.In(i))
}
}
vrets := v.Call(vargs)
rets := make([]interface{}, len(vrets))
for i, ret := range vrets {
rets[i] = ret.Interface()
}
return rets
})
return c
}
// Do declares the action to run when the call is matched. The function's
// return values are ignored to retain backward compatibility. To use the
// return values call DoAndReturn.
// It takes an interface{} argument to support n-arity functions.
func (c *Call) Do(f interface{}) *Call {
// TODO: Check arity and types here, rather than dying badly elsewhere.
v := reflect.ValueOf(f)
c.addAction(func(args []interface{}) []interface{} {
vargs := make([]reflect.Value, len(args))
ft := v.Type()
for i := 0; i < len(args); i++ {
if args[i] != nil {
vargs[i] = reflect.ValueOf(args[i])
} else {
// Use the zero value for the arg.
vargs[i] = reflect.Zero(ft.In(i))
}
}
v.Call(vargs)
return nil
})
return c
}
// Return declares the values to be returned by the mocked function call.
func (c *Call) Return(rets ...interface{}) *Call {
if h, ok := c.t.(testHelper); ok {
h.Helper()
}
mt := c.methodType
if len(rets) != mt.NumOut() {
c.t.Fatalf("wrong number of arguments to Return for %T.%v: got %d, want %d [%s]",
c.receiver, c.method, len(rets), mt.NumOut(), c.origin)
}
for i, ret := range rets {
if got, want := reflect.TypeOf(ret), mt.Out(i); got == want {
// Identical types; nothing to do.
} else if got == nil {
// Nil needs special handling.
switch want.Kind() {
case reflect.Chan, reflect.Func, reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice:
// ok
default:
c.t.Fatalf("argument %d to Return for %T.%v is nil, but %v is not nillable [%s]",
i, c.receiver, c.method, want, c.origin)
}
} else if got.AssignableTo(want) {
// Assignable type relation. Make the assignment now so that the generated code
// can return the values with a type assertion.
v := reflect.New(want).Elem()
v.Set(reflect.ValueOf(ret))
rets[i] = v.Interface()
} else {
c.t.Fatalf("wrong type of argument %d to Return for %T.%v: %v is not assignable to %v [%s]",
i, c.receiver, c.method, got, want, c.origin)
}
}
c.addAction(func([]interface{}) []interface{} {
return rets
})
return c
}
// Times declares the exact number of times a function call is expected to be executed.
func (c *Call) Times(n int) *Call {
c.minCalls, c.maxCalls = n, n
return c
}
// SetArg declares an action that will set the nth argument's value,
// indirected through a pointer. Or, in the case of a slice, SetArg
// will copy value's elements into the nth argument.
func (c *Call) SetArg(n int, value interface{}) *Call {
if h, ok := c.t.(testHelper); ok {
h.Helper()
}
mt := c.methodType
// TODO: This will break on variadic methods.
// We will need to check those at invocation time.
if n < 0 || n >= mt.NumIn() {
c.t.Fatalf("SetArg(%d, ...) called for a method with %d args [%s]",
n, mt.NumIn(), c.origin)
}
// Permit setting argument through an interface.
// In the interface case, we don't (nay, can't) check the type here.
at := mt.In(n)
switch at.Kind() {
case reflect.Ptr:
dt := at.Elem()
if vt := reflect.TypeOf(value); !vt.AssignableTo(dt) {
c.t.Fatalf("SetArg(%d, ...) argument is a %v, not assignable to %v [%s]",
n, vt, dt, c.origin)
}
case reflect.Interface:
// nothing to do
case reflect.Slice:
// nothing to do
default:
c.t.Fatalf("SetArg(%d, ...) referring to argument of non-pointer non-interface non-slice type %v [%s]",
n, at, c.origin)
}
c.addAction(func(args []interface{}) []interface{} {
v := reflect.ValueOf(value)
switch reflect.TypeOf(args[n]).Kind() {
case reflect.Slice:
setSlice(args[n], v)
default:
reflect.ValueOf(args[n]).Elem().Set(v)
}
return nil
})
return c
}
// isPreReq returns true if other is a direct or indirect prerequisite to c.
func (c *Call) isPreReq(other *Call) bool {
for _, preReq := range c.preReqs {
if other == preReq || preReq.isPreReq(other) {
return true
}
}
return false
}
// After declares that the call may only match after preReq has been exhausted.
func (c *Call) After(preReq *Call) *Call {
if h, ok := c.t.(testHelper); ok {
h.Helper()
}
if c == preReq {
c.t.Fatalf("A call isn't allowed to be its own prerequisite")
}
if preReq.isPreReq(c) {
c.t.Fatalf("Loop in call order: %v is a prerequisite to %v (possibly indirectly).", c, preReq)
}
c.preReqs = append(c.preReqs, preReq)
return c
}
// Returns true if the minimum number of calls have been made.
func (c *Call) satisfied() bool {
return c.numCalls >= c.minCalls
}
// Returns true iff the maximum number of calls have been made.
func (c *Call) exhausted() bool {
return c.numCalls >= c.maxCalls
}
func (c *Call) String() string {
args := make([]string, len(c.args))
for i, arg := range c.args {
args[i] = arg.String()
}
arguments := strings.Join(args, ", ")
return fmt.Sprintf("%T.%v(%s) %s", c.receiver, c.method, arguments, c.origin)
}
// Tests if the given call matches the expected call.
// If yes, returns nil. If no, returns error with message explaining why it does not match.
func (c *Call) matches(args []interface{}) error {
if !c.methodType.IsVariadic() {
if len(args) != len(c.args) {
return fmt.Errorf("Expected call at %s has the wrong number of arguments. Got: %d, want: %d",
c.origin, len(args), len(c.args))
}
for i, m := range c.args {
if !m.Matches(args[i]) {
return fmt.Errorf("Expected call at %s doesn't match the argument at index %s.\nGot: %v\nWant: %v",
c.origin, strconv.Itoa(i), args[i], m)
}
}
} else {
if len(c.args) < c.methodType.NumIn()-1 {
return fmt.Errorf("Expected call at %s has the wrong number of matchers. Got: %d, want: %d",
c.origin, len(c.args), c.methodType.NumIn()-1)
}
if len(c.args) != c.methodType.NumIn() && len(args) != len(c.args) {
return fmt.Errorf("Expected call at %s has the wrong number of arguments. Got: %d, want: %d",
c.origin, len(args), len(c.args))
}
if len(args) < len(c.args)-1 {
return fmt.Errorf("Expected call at %s has the wrong number of arguments. Got: %d, want: greater than or equal to %d",
c.origin, len(args), len(c.args)-1)
}
for i, m := range c.args {
if i < c.methodType.NumIn()-1 {
// Non-variadic args
if !m.Matches(args[i]) {
return fmt.Errorf("Expected call at %s doesn't match the argument at index %s.\nGot: %v\nWant: %v",
c.origin, strconv.Itoa(i), args[i], m)
}
continue
}
// The last arg has a possibility of a variadic argument, so let it branch
// sample: Foo(a int, b int, c ...int)
if i < len(c.args) && i < len(args) {
if m.Matches(args[i]) {
// Got Foo(a, b, c) want Foo(matcherA, matcherB, gomock.Any())
// Got Foo(a, b, c) want Foo(matcherA, matcherB, someSliceMatcher)
// Got Foo(a, b, c) want Foo(matcherA, matcherB, matcherC)
// Got Foo(a, b) want Foo(matcherA, matcherB)
// Got Foo(a, b, c, d) want Foo(matcherA, matcherB, matcherC, matcherD)
continue
}
}
// The number of actual args don't match the number of matchers,
// or the last matcher is a slice and the last arg is not.
// If this function still matches it is because the last matcher
// matches all the remaining arguments or the lack of any.
// Convert the remaining arguments, if any, into a slice of the
// expected type.
vargsType := c.methodType.In(c.methodType.NumIn() - 1)
vargs := reflect.MakeSlice(vargsType, 0, len(args)-i)
for _, arg := range args[i:] {
vargs = reflect.Append(vargs, reflect.ValueOf(arg))
}
if m.Matches(vargs.Interface()) {
// Got Foo(a, b, c, d, e) want Foo(matcherA, matcherB, gomock.Any())
// Got Foo(a, b, c, d, e) want Foo(matcherA, matcherB, someSliceMatcher)
// Got Foo(a, b) want Foo(matcherA, matcherB, gomock.Any())
// Got Foo(a, b) want Foo(matcherA, matcherB, someEmptySliceMatcher)
break
}
// Wrong number of matchers or not match. Fail.
// Got Foo(a, b) want Foo(matcherA, matcherB, matcherC, matcherD)
// Got Foo(a, b, c) want Foo(matcherA, matcherB, matcherC, matcherD)
// Got Foo(a, b, c, d) want Foo(matcherA, matcherB, matcherC, matcherD, matcherE)
// Got Foo(a, b, c, d, e) want Foo(matcherA, matcherB, matcherC, matcherD)
// Got Foo(a, b, c) want Foo(matcherA, matcherB)
return fmt.Errorf("Expected call at %s doesn't match the argument at index %s.\nGot: %v\nWant: %v",
c.origin, strconv.Itoa(i), args[i:], c.args[i])
}
}
// Check that all prerequisite calls have been satisfied.
for _, preReqCall := range c.preReqs {
if !preReqCall.satisfied() {
return fmt.Errorf("Expected call at %s doesn't have a prerequisite call satisfied:\n%v\nshould be called before:\n%v",
c.origin, preReqCall, c)
}
}
// Check that the call is not exhausted.
if c.exhausted() {
return fmt.Errorf("Expected call at %s has already been called the max number of times.", c.origin)
}
return nil
}
// dropPrereqs tells the expected Call to not re-check prerequisite calls any
// longer, and to return its current set.
func (c *Call) dropPrereqs() (preReqs []*Call) {
preReqs = c.preReqs
c.preReqs = nil
return
}
func (c *Call) call(args []interface{}) []func([]interface{}) []interface{} {
c.numCalls++
return c.actions
}
// InOrder declares that the given calls should occur in order.
func InOrder(calls ...*Call) {
for i := 1; i < len(calls); i++ {
calls[i].After(calls[i-1])
}
}
func setSlice(arg interface{}, v reflect.Value) {
va := reflect.ValueOf(arg)
for i := 0; i < v.Len(); i++ {
va.Index(i).Set(v.Index(i))
}
}
func (c *Call) addAction(action func([]interface{}) []interface{}) {
c.actions = append(c.actions, action)
}

View file

@ -1,108 +0,0 @@
// Copyright 2011 Google Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package gomock
import (
"bytes"
"fmt"
)
// callSet represents a set of expected calls, indexed by receiver and method
// name.
type callSet struct {
// Calls that are still expected.
expected map[callSetKey][]*Call
// Calls that have been exhausted.
exhausted map[callSetKey][]*Call
}
// callSetKey is the key in the maps in callSet
type callSetKey struct {
receiver interface{}
fname string
}
func newCallSet() *callSet {
return &callSet{make(map[callSetKey][]*Call), make(map[callSetKey][]*Call)}
}
// Add adds a new expected call.
func (cs callSet) Add(call *Call) {
key := callSetKey{call.receiver, call.method}
m := cs.expected
if call.exhausted() {
m = cs.exhausted
}
m[key] = append(m[key], call)
}
// Remove removes an expected call.
func (cs callSet) Remove(call *Call) {
key := callSetKey{call.receiver, call.method}
calls := cs.expected[key]
for i, c := range calls {
if c == call {
// maintain order for remaining calls
cs.expected[key] = append(calls[:i], calls[i+1:]...)
cs.exhausted[key] = append(cs.exhausted[key], call)
break
}
}
}
// FindMatch searches for a matching call. Returns error with explanation message if no call matched.
func (cs callSet) FindMatch(receiver interface{}, method string, args []interface{}) (*Call, error) {
key := callSetKey{receiver, method}
// Search through the expected calls.
expected := cs.expected[key]
var callsErrors bytes.Buffer
for _, call := range expected {
err := call.matches(args)
if err != nil {
fmt.Fprintf(&callsErrors, "\n%v", err)
} else {
return call, nil
}
}
// If we haven't found a match then search through the exhausted calls so we
// get useful error messages.
exhausted := cs.exhausted[key]
for _, call := range exhausted {
if err := call.matches(args); err != nil {
fmt.Fprintf(&callsErrors, "\n%v", err)
}
}
if len(expected)+len(exhausted) == 0 {
fmt.Fprintf(&callsErrors, "there are no expected calls of the method %q for that receiver", method)
}
return nil, fmt.Errorf(callsErrors.String())
}
// Failures returns the calls that are not satisfied.
func (cs callSet) Failures() []*Call {
failures := make([]*Call, 0, len(cs.expected))
for _, calls := range cs.expected {
for _, call := range calls {
if !call.satisfied() {
failures = append(failures, call)
}
}
}
return failures
}

View file

@ -1,217 +0,0 @@
// Copyright 2010 Google Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// GoMock - a mock framework for Go.
//
// Standard usage:
// (1) Define an interface that you wish to mock.
// type MyInterface interface {
// SomeMethod(x int64, y string)
// }
// (2) Use mockgen to generate a mock from the interface.
// (3) Use the mock in a test:
// func TestMyThing(t *testing.T) {
// mockCtrl := gomock.NewController(t)
// defer mockCtrl.Finish()
//
// mockObj := something.NewMockMyInterface(mockCtrl)
// mockObj.EXPECT().SomeMethod(4, "blah")
// // pass mockObj to a real object and play with it.
// }
//
// By default, expected calls are not enforced to run in any particular order.
// Call order dependency can be enforced by use of InOrder and/or Call.After.
// Call.After can create more varied call order dependencies, but InOrder is
// often more convenient.
//
// The following examples create equivalent call order dependencies.
//
// Example of using Call.After to chain expected call order:
//
// firstCall := mockObj.EXPECT().SomeMethod(1, "first")
// secondCall := mockObj.EXPECT().SomeMethod(2, "second").After(firstCall)
// mockObj.EXPECT().SomeMethod(3, "third").After(secondCall)
//
// Example of using InOrder to declare expected call order:
//
// gomock.InOrder(
// mockObj.EXPECT().SomeMethod(1, "first"),
// mockObj.EXPECT().SomeMethod(2, "second"),
// mockObj.EXPECT().SomeMethod(3, "third"),
// )
//
// TODO:
// - Handle different argument/return types (e.g. ..., chan, map, interface).
package gomock
import (
"fmt"
"golang.org/x/net/context"
"reflect"
"runtime"
"sync"
)
// A TestReporter is something that can be used to report test failures.
// It is satisfied by the standard library's *testing.T.
type TestReporter interface {
Errorf(format string, args ...interface{})
Fatalf(format string, args ...interface{})
}
// A Controller represents the top-level control of a mock ecosystem.
// It defines the scope and lifetime of mock objects, as well as their expectations.
// It is safe to call Controller's methods from multiple goroutines.
type Controller struct {
mu sync.Mutex
t TestReporter
expectedCalls *callSet
finished bool
}
func NewController(t TestReporter) *Controller {
return &Controller{
t: t,
expectedCalls: newCallSet(),
}
}
type cancelReporter struct {
t TestReporter
cancel func()
}
func (r *cancelReporter) Errorf(format string, args ...interface{}) { r.t.Errorf(format, args...) }
func (r *cancelReporter) Fatalf(format string, args ...interface{}) {
defer r.cancel()
r.t.Fatalf(format, args...)
}
// WithContext returns a new Controller and a Context, which is cancelled on any
// fatal failure.
func WithContext(ctx context.Context, t TestReporter) (*Controller, context.Context) {
ctx, cancel := context.WithCancel(ctx)
return NewController(&cancelReporter{t, cancel}), ctx
}
func (ctrl *Controller) RecordCall(receiver interface{}, method string, args ...interface{}) *Call {
if h, ok := ctrl.t.(testHelper); ok {
h.Helper()
}
recv := reflect.ValueOf(receiver)
for i := 0; i < recv.Type().NumMethod(); i++ {
if recv.Type().Method(i).Name == method {
return ctrl.RecordCallWithMethodType(receiver, method, recv.Method(i).Type(), args...)
}
}
ctrl.t.Fatalf("gomock: failed finding method %s on %T", method, receiver)
panic("unreachable")
}
func (ctrl *Controller) RecordCallWithMethodType(receiver interface{}, method string, methodType reflect.Type, args ...interface{}) *Call {
if h, ok := ctrl.t.(testHelper); ok {
h.Helper()
}
call := newCall(ctrl.t, receiver, method, methodType, args...)
ctrl.mu.Lock()
defer ctrl.mu.Unlock()
ctrl.expectedCalls.Add(call)
return call
}
func (ctrl *Controller) Call(receiver interface{}, method string, args ...interface{}) []interface{} {
if h, ok := ctrl.t.(testHelper); ok {
h.Helper()
}
// Nest this code so we can use defer to make sure the lock is released.
actions := func() []func([]interface{}) []interface{} {
ctrl.mu.Lock()
defer ctrl.mu.Unlock()
expected, err := ctrl.expectedCalls.FindMatch(receiver, method, args)
if err != nil {
origin := callerInfo(2)
ctrl.t.Fatalf("Unexpected call to %T.%v(%v) at %s because: %s", receiver, method, args, origin, err)
}
// Two things happen here:
// * the matching call no longer needs to check prerequite calls,
// * and the prerequite calls are no longer expected, so remove them.
preReqCalls := expected.dropPrereqs()
for _, preReqCall := range preReqCalls {
ctrl.expectedCalls.Remove(preReqCall)
}
actions := expected.call(args)
if expected.exhausted() {
ctrl.expectedCalls.Remove(expected)
}
return actions
}()
var rets []interface{}
for _, action := range actions {
if r := action(args); r != nil {
rets = r
}
}
return rets
}
func (ctrl *Controller) Finish() {
if h, ok := ctrl.t.(testHelper); ok {
h.Helper()
}
ctrl.mu.Lock()
defer ctrl.mu.Unlock()
if ctrl.finished {
ctrl.t.Fatalf("Controller.Finish was called more than once. It has to be called exactly once.")
}
ctrl.finished = true
// If we're currently panicking, probably because this is a deferred call,
// pass through the panic.
if err := recover(); err != nil {
panic(err)
}
// Check that all remaining expected calls are satisfied.
failures := ctrl.expectedCalls.Failures()
for _, call := range failures {
ctrl.t.Errorf("missing call(s) to %v", call)
}
if len(failures) != 0 {
ctrl.t.Fatalf("aborting test due to missing call(s)")
}
}
func callerInfo(skip int) string {
if _, file, line, ok := runtime.Caller(skip + 1); ok {
return fmt.Sprintf("%s:%d", file, line)
}
return "unknown file"
}
type testHelper interface {
TestReporter
Helper()
}

View file

@ -1,124 +0,0 @@
//go:generate mockgen -destination mock_matcher/mock_matcher.go github.com/golang/mock/gomock Matcher
// Copyright 2010 Google Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package gomock
import (
"fmt"
"reflect"
)
// A Matcher is a representation of a class of values.
// It is used to represent the valid or expected arguments to a mocked method.
type Matcher interface {
// Matches returns whether x is a match.
Matches(x interface{}) bool
// String describes what the matcher matches.
String() string
}
type anyMatcher struct{}
func (anyMatcher) Matches(x interface{}) bool {
return true
}
func (anyMatcher) String() string {
return "is anything"
}
type eqMatcher struct {
x interface{}
}
func (e eqMatcher) Matches(x interface{}) bool {
return reflect.DeepEqual(e.x, x)
}
func (e eqMatcher) String() string {
return fmt.Sprintf("is equal to %v", e.x)
}
type nilMatcher struct{}
func (nilMatcher) Matches(x interface{}) bool {
if x == nil {
return true
}
v := reflect.ValueOf(x)
switch v.Kind() {
case reflect.Chan, reflect.Func, reflect.Interface, reflect.Map,
reflect.Ptr, reflect.Slice:
return v.IsNil()
}
return false
}
func (nilMatcher) String() string {
return "is nil"
}
type notMatcher struct {
m Matcher
}
func (n notMatcher) Matches(x interface{}) bool {
return !n.m.Matches(x)
}
func (n notMatcher) String() string {
// TODO: Improve this if we add a NotString method to the Matcher interface.
return "not(" + n.m.String() + ")"
}
type assignableToTypeOfMatcher struct {
targetType reflect.Type
}
func (m assignableToTypeOfMatcher) Matches(x interface{}) bool {
return reflect.TypeOf(x).AssignableTo(m.targetType)
}
func (m assignableToTypeOfMatcher) String() string {
return "is assignable to " + m.targetType.Name()
}
// Constructors
func Any() Matcher { return anyMatcher{} }
func Eq(x interface{}) Matcher { return eqMatcher{x} }
func Nil() Matcher { return nilMatcher{} }
func Not(x interface{}) Matcher {
if m, ok := x.(Matcher); ok {
return notMatcher{m}
}
return notMatcher{Eq(x)}
}
// AssignableToTypeOf is a Matcher that matches if the parameter to the mock
// function is assignable to the type of the parameter to this function.
//
// Example usage:
//
// dbMock.EXPECT().
// Insert(gomock.AssignableToTypeOf(&EmployeeRecord{})).
// Return(errors.New("DB error"))
//
func AssignableToTypeOf(x interface{}) Matcher {
return assignableToTypeOfMatcher{reflect.TypeOf(x)}
}

View file

@ -1,57 +0,0 @@
// Code generated by MockGen. DO NOT EDIT.
// Source: github.com/golang/mock/gomock (interfaces: Matcher)
// Package mock_gomock is a generated GoMock package.
package mock_gomock
import (
gomock "github.com/golang/mock/gomock"
reflect "reflect"
)
// MockMatcher is a mock of Matcher interface
type MockMatcher struct {
ctrl *gomock.Controller
recorder *MockMatcherMockRecorder
}
// MockMatcherMockRecorder is the mock recorder for MockMatcher
type MockMatcherMockRecorder struct {
mock *MockMatcher
}
// NewMockMatcher creates a new mock instance
func NewMockMatcher(ctrl *gomock.Controller) *MockMatcher {
mock := &MockMatcher{ctrl: ctrl}
mock.recorder = &MockMatcherMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use
func (m *MockMatcher) EXPECT() *MockMatcherMockRecorder {
return m.recorder
}
// Matches mocks base method
func (m *MockMatcher) Matches(arg0 interface{}) bool {
ret := m.ctrl.Call(m, "Matches", arg0)
ret0, _ := ret[0].(bool)
return ret0
}
// Matches indicates an expected call of Matches
func (mr *MockMatcherMockRecorder) Matches(arg0 interface{}) *gomock.Call {
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Matches", reflect.TypeOf((*MockMatcher)(nil).Matches), arg0)
}
// String mocks base method
func (m *MockMatcher) String() string {
ret := m.ctrl.Call(m, "String")
ret0, _ := ret[0].(string)
return ret0
}
// String indicates an expected call of String
func (mr *MockMatcherMockRecorder) String() *gomock.Call {
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "String", reflect.TypeOf((*MockMatcher)(nil).String))
}

View file

@ -1,31 +0,0 @@
Go support for Protocol Buffers - Google's data interchange format
Copyright 2010 The Go Authors. All rights reserved.
https://github.com/golang/protobuf
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

View file

@ -1,229 +0,0 @@
// Go support for Protocol Buffers - Google's data interchange format
//
// Copyright 2011 The Go Authors. All rights reserved.
// https://github.com/golang/protobuf
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Protocol buffer deep copy and merge.
// TODO: RawMessage.
package proto
import (
"log"
"reflect"
"strings"
)
// Clone returns a deep copy of a protocol buffer.
func Clone(pb Message) Message {
in := reflect.ValueOf(pb)
if in.IsNil() {
return pb
}
out := reflect.New(in.Type().Elem())
// out is empty so a merge is a deep copy.
mergeStruct(out.Elem(), in.Elem())
return out.Interface().(Message)
}
// Merge merges src into dst.
// Required and optional fields that are set in src will be set to that value in dst.
// Elements of repeated fields will be appended.
// Merge panics if src and dst are not the same type, or if dst is nil.
func Merge(dst, src Message) {
in := reflect.ValueOf(src)
out := reflect.ValueOf(dst)
if out.IsNil() {
panic("proto: nil destination")
}
if in.Type() != out.Type() {
// Explicit test prior to mergeStruct so that mistyped nils will fail
panic("proto: type mismatch")
}
if in.IsNil() {
// Merging nil into non-nil is a quiet no-op
return
}
mergeStruct(out.Elem(), in.Elem())
}
func mergeStruct(out, in reflect.Value) {
sprop := GetProperties(in.Type())
for i := 0; i < in.NumField(); i++ {
f := in.Type().Field(i)
if strings.HasPrefix(f.Name, "XXX_") {
continue
}
mergeAny(out.Field(i), in.Field(i), false, sprop.Prop[i])
}
if emIn, ok := extendable(in.Addr().Interface()); ok {
emOut, _ := extendable(out.Addr().Interface())
mIn, muIn := emIn.extensionsRead()
if mIn != nil {
mOut := emOut.extensionsWrite()
muIn.Lock()
mergeExtension(mOut, mIn)
muIn.Unlock()
}
}
uf := in.FieldByName("XXX_unrecognized")
if !uf.IsValid() {
return
}
uin := uf.Bytes()
if len(uin) > 0 {
out.FieldByName("XXX_unrecognized").SetBytes(append([]byte(nil), uin...))
}
}
// mergeAny performs a merge between two values of the same type.
// viaPtr indicates whether the values were indirected through a pointer (implying proto2).
// prop is set if this is a struct field (it may be nil).
func mergeAny(out, in reflect.Value, viaPtr bool, prop *Properties) {
if in.Type() == protoMessageType {
if !in.IsNil() {
if out.IsNil() {
out.Set(reflect.ValueOf(Clone(in.Interface().(Message))))
} else {
Merge(out.Interface().(Message), in.Interface().(Message))
}
}
return
}
switch in.Kind() {
case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64,
reflect.String, reflect.Uint32, reflect.Uint64:
if !viaPtr && isProto3Zero(in) {
return
}
out.Set(in)
case reflect.Interface:
// Probably a oneof field; copy non-nil values.
if in.IsNil() {
return
}
// Allocate destination if it is not set, or set to a different type.
// Otherwise we will merge as normal.
if out.IsNil() || out.Elem().Type() != in.Elem().Type() {
out.Set(reflect.New(in.Elem().Elem().Type())) // interface -> *T -> T -> new(T)
}
mergeAny(out.Elem(), in.Elem(), false, nil)
case reflect.Map:
if in.Len() == 0 {
return
}
if out.IsNil() {
out.Set(reflect.MakeMap(in.Type()))
}
// For maps with value types of *T or []byte we need to deep copy each value.
elemKind := in.Type().Elem().Kind()
for _, key := range in.MapKeys() {
var val reflect.Value
switch elemKind {
case reflect.Ptr:
val = reflect.New(in.Type().Elem().Elem())
mergeAny(val, in.MapIndex(key), false, nil)
case reflect.Slice:
val = in.MapIndex(key)
val = reflect.ValueOf(append([]byte{}, val.Bytes()...))
default:
val = in.MapIndex(key)
}
out.SetMapIndex(key, val)
}
case reflect.Ptr:
if in.IsNil() {
return
}
if out.IsNil() {
out.Set(reflect.New(in.Elem().Type()))
}
mergeAny(out.Elem(), in.Elem(), true, nil)
case reflect.Slice:
if in.IsNil() {
return
}
if in.Type().Elem().Kind() == reflect.Uint8 {
// []byte is a scalar bytes field, not a repeated field.
// Edge case: if this is in a proto3 message, a zero length
// bytes field is considered the zero value, and should not
// be merged.
if prop != nil && prop.proto3 && in.Len() == 0 {
return
}
// Make a deep copy.
// Append to []byte{} instead of []byte(nil) so that we never end up
// with a nil result.
out.SetBytes(append([]byte{}, in.Bytes()...))
return
}
n := in.Len()
if out.IsNil() {
out.Set(reflect.MakeSlice(in.Type(), 0, n))
}
switch in.Type().Elem().Kind() {
case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64,
reflect.String, reflect.Uint32, reflect.Uint64:
out.Set(reflect.AppendSlice(out, in))
default:
for i := 0; i < n; i++ {
x := reflect.Indirect(reflect.New(in.Type().Elem()))
mergeAny(x, in.Index(i), false, nil)
out.Set(reflect.Append(out, x))
}
}
case reflect.Struct:
mergeStruct(out, in)
default:
// unknown type, so not a protocol buffer
log.Printf("proto: don't know how to copy %v", in)
}
}
func mergeExtension(out, in map[int32]Extension) {
for extNum, eIn := range in {
eOut := Extension{desc: eIn.desc}
if eIn.value != nil {
v := reflect.New(reflect.TypeOf(eIn.value)).Elem()
mergeAny(v, reflect.ValueOf(eIn.value), false, nil)
eOut.value = v.Interface()
}
if eIn.enc != nil {
eOut.enc = make([]byte, len(eIn.enc))
copy(eOut.enc, eIn.enc)
}
out[extNum] = eOut
}
}

View file

@ -1,970 +0,0 @@
// Go support for Protocol Buffers - Google's data interchange format
//
// Copyright 2010 The Go Authors. All rights reserved.
// https://github.com/golang/protobuf
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package proto
/*
* Routines for decoding protocol buffer data to construct in-memory representations.
*/
import (
"errors"
"fmt"
"io"
"os"
"reflect"
)
// errOverflow is returned when an integer is too large to be represented.
var errOverflow = errors.New("proto: integer overflow")
// ErrInternalBadWireType is returned by generated code when an incorrect
// wire type is encountered. It does not get returned to user code.
var ErrInternalBadWireType = errors.New("proto: internal error: bad wiretype for oneof")
// The fundamental decoders that interpret bytes on the wire.
// Those that take integer types all return uint64 and are
// therefore of type valueDecoder.
// DecodeVarint reads a varint-encoded integer from the slice.
// It returns the integer and the number of bytes consumed, or
// zero if there is not enough.
// This is the format for the
// int32, int64, uint32, uint64, bool, and enum
// protocol buffer types.
func DecodeVarint(buf []byte) (x uint64, n int) {
for shift := uint(0); shift < 64; shift += 7 {
if n >= len(buf) {
return 0, 0
}
b := uint64(buf[n])
n++
x |= (b & 0x7F) << shift
if (b & 0x80) == 0 {
return x, n
}
}
// The number is too large to represent in a 64-bit value.
return 0, 0
}
func (p *Buffer) decodeVarintSlow() (x uint64, err error) {
i := p.index
l := len(p.buf)
for shift := uint(0); shift < 64; shift += 7 {
if i >= l {
err = io.ErrUnexpectedEOF
return
}
b := p.buf[i]
i++
x |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
p.index = i
return
}
}
// The number is too large to represent in a 64-bit value.
err = errOverflow
return
}
// DecodeVarint reads a varint-encoded integer from the Buffer.
// This is the format for the
// int32, int64, uint32, uint64, bool, and enum
// protocol buffer types.
func (p *Buffer) DecodeVarint() (x uint64, err error) {
i := p.index
buf := p.buf
if i >= len(buf) {
return 0, io.ErrUnexpectedEOF
} else if buf[i] < 0x80 {
p.index++
return uint64(buf[i]), nil
} else if len(buf)-i < 10 {
return p.decodeVarintSlow()
}
var b uint64
// we already checked the first byte
x = uint64(buf[i]) - 0x80
i++
b = uint64(buf[i])
i++
x += b << 7
if b&0x80 == 0 {
goto done
}
x -= 0x80 << 7
b = uint64(buf[i])
i++
x += b << 14
if b&0x80 == 0 {
goto done
}
x -= 0x80 << 14
b = uint64(buf[i])
i++
x += b << 21
if b&0x80 == 0 {
goto done
}
x -= 0x80 << 21
b = uint64(buf[i])
i++
x += b << 28
if b&0x80 == 0 {
goto done
}
x -= 0x80 << 28
b = uint64(buf[i])
i++
x += b << 35
if b&0x80 == 0 {
goto done
}
x -= 0x80 << 35
b = uint64(buf[i])
i++
x += b << 42
if b&0x80 == 0 {
goto done
}
x -= 0x80 << 42
b = uint64(buf[i])
i++
x += b << 49
if b&0x80 == 0 {
goto done
}
x -= 0x80 << 49
b = uint64(buf[i])
i++
x += b << 56
if b&0x80 == 0 {
goto done
}
x -= 0x80 << 56
b = uint64(buf[i])
i++
x += b << 63
if b&0x80 == 0 {
goto done
}
// x -= 0x80 << 63 // Always zero.
return 0, errOverflow
done:
p.index = i
return x, nil
}
// DecodeFixed64 reads a 64-bit integer from the Buffer.
// This is the format for the
// fixed64, sfixed64, and double protocol buffer types.
func (p *Buffer) DecodeFixed64() (x uint64, err error) {
// x, err already 0
i := p.index + 8
if i < 0 || i > len(p.buf) {
err = io.ErrUnexpectedEOF
return
}
p.index = i
x = uint64(p.buf[i-8])
x |= uint64(p.buf[i-7]) << 8
x |= uint64(p.buf[i-6]) << 16
x |= uint64(p.buf[i-5]) << 24
x |= uint64(p.buf[i-4]) << 32
x |= uint64(p.buf[i-3]) << 40
x |= uint64(p.buf[i-2]) << 48
x |= uint64(p.buf[i-1]) << 56
return
}
// DecodeFixed32 reads a 32-bit integer from the Buffer.
// This is the format for the
// fixed32, sfixed32, and float protocol buffer types.
func (p *Buffer) DecodeFixed32() (x uint64, err error) {
// x, err already 0
i := p.index + 4
if i < 0 || i > len(p.buf) {
err = io.ErrUnexpectedEOF
return
}
p.index = i
x = uint64(p.buf[i-4])
x |= uint64(p.buf[i-3]) << 8
x |= uint64(p.buf[i-2]) << 16
x |= uint64(p.buf[i-1]) << 24
return
}
// DecodeZigzag64 reads a zigzag-encoded 64-bit integer
// from the Buffer.
// This is the format used for the sint64 protocol buffer type.
func (p *Buffer) DecodeZigzag64() (x uint64, err error) {
x, err = p.DecodeVarint()
if err != nil {
return
}
x = (x >> 1) ^ uint64((int64(x&1)<<63)>>63)
return
}
// DecodeZigzag32 reads a zigzag-encoded 32-bit integer
// from the Buffer.
// This is the format used for the sint32 protocol buffer type.
func (p *Buffer) DecodeZigzag32() (x uint64, err error) {
x, err = p.DecodeVarint()
if err != nil {
return
}
x = uint64((uint32(x) >> 1) ^ uint32((int32(x&1)<<31)>>31))
return
}
// These are not ValueDecoders: they produce an array of bytes or a string.
// bytes, embedded messages
// DecodeRawBytes reads a count-delimited byte buffer from the Buffer.
// This is the format used for the bytes protocol buffer
// type and for embedded messages.
func (p *Buffer) DecodeRawBytes(alloc bool) (buf []byte, err error) {
n, err := p.DecodeVarint()
if err != nil {
return nil, err
}
nb := int(n)
if nb < 0 {
return nil, fmt.Errorf("proto: bad byte length %d", nb)
}
end := p.index + nb
if end < p.index || end > len(p.buf) {
return nil, io.ErrUnexpectedEOF
}
if !alloc {
// todo: check if can get more uses of alloc=false
buf = p.buf[p.index:end]
p.index += nb
return
}
buf = make([]byte, nb)
copy(buf, p.buf[p.index:])
p.index += nb
return
}
// DecodeStringBytes reads an encoded string from the Buffer.
// This is the format used for the proto2 string type.
func (p *Buffer) DecodeStringBytes() (s string, err error) {
buf, err := p.DecodeRawBytes(false)
if err != nil {
return
}
return string(buf), nil
}
// Skip the next item in the buffer. Its wire type is decoded and presented as an argument.
// If the protocol buffer has extensions, and the field matches, add it as an extension.
// Otherwise, if the XXX_unrecognized field exists, append the skipped data there.
func (o *Buffer) skipAndSave(t reflect.Type, tag, wire int, base structPointer, unrecField field) error {
oi := o.index
err := o.skip(t, tag, wire)
if err != nil {
return err
}
if !unrecField.IsValid() {
return nil
}
ptr := structPointer_Bytes(base, unrecField)
// Add the skipped field to struct field
obuf := o.buf
o.buf = *ptr
o.EncodeVarint(uint64(tag<<3 | wire))
*ptr = append(o.buf, obuf[oi:o.index]...)
o.buf = obuf
return nil
}
// Skip the next item in the buffer. Its wire type is decoded and presented as an argument.
func (o *Buffer) skip(t reflect.Type, tag, wire int) error {
var u uint64
var err error
switch wire {
case WireVarint:
_, err = o.DecodeVarint()
case WireFixed64:
_, err = o.DecodeFixed64()
case WireBytes:
_, err = o.DecodeRawBytes(false)
case WireFixed32:
_, err = o.DecodeFixed32()
case WireStartGroup:
for {
u, err = o.DecodeVarint()
if err != nil {
break
}
fwire := int(u & 0x7)
if fwire == WireEndGroup {
break
}
ftag := int(u >> 3)
err = o.skip(t, ftag, fwire)
if err != nil {
break
}
}
default:
err = fmt.Errorf("proto: can't skip unknown wire type %d for %s", wire, t)
}
return err
}
// Unmarshaler is the interface representing objects that can
// unmarshal themselves. The method should reset the receiver before
// decoding starts. The argument points to data that may be
// overwritten, so implementations should not keep references to the
// buffer.
type Unmarshaler interface {
Unmarshal([]byte) error
}
// Unmarshal parses the protocol buffer representation in buf and places the
// decoded result in pb. If the struct underlying pb does not match
// the data in buf, the results can be unpredictable.
//
// Unmarshal resets pb before starting to unmarshal, so any
// existing data in pb is always removed. Use UnmarshalMerge
// to preserve and append to existing data.
func Unmarshal(buf []byte, pb Message) error {
pb.Reset()
return UnmarshalMerge(buf, pb)
}
// UnmarshalMerge parses the protocol buffer representation in buf and
// writes the decoded result to pb. If the struct underlying pb does not match
// the data in buf, the results can be unpredictable.
//
// UnmarshalMerge merges into existing data in pb.
// Most code should use Unmarshal instead.
func UnmarshalMerge(buf []byte, pb Message) error {
// If the object can unmarshal itself, let it.
if u, ok := pb.(Unmarshaler); ok {
return u.Unmarshal(buf)
}
return NewBuffer(buf).Unmarshal(pb)
}
// DecodeMessage reads a count-delimited message from the Buffer.
func (p *Buffer) DecodeMessage(pb Message) error {
enc, err := p.DecodeRawBytes(false)
if err != nil {
return err
}
return NewBuffer(enc).Unmarshal(pb)
}
// DecodeGroup reads a tag-delimited group from the Buffer.
func (p *Buffer) DecodeGroup(pb Message) error {
typ, base, err := getbase(pb)
if err != nil {
return err
}
return p.unmarshalType(typ.Elem(), GetProperties(typ.Elem()), true, base)
}
// Unmarshal parses the protocol buffer representation in the
// Buffer and places the decoded result in pb. If the struct
// underlying pb does not match the data in the buffer, the results can be
// unpredictable.
//
// Unlike proto.Unmarshal, this does not reset pb before starting to unmarshal.
func (p *Buffer) Unmarshal(pb Message) error {
// If the object can unmarshal itself, let it.
if u, ok := pb.(Unmarshaler); ok {
err := u.Unmarshal(p.buf[p.index:])
p.index = len(p.buf)
return err
}
typ, base, err := getbase(pb)
if err != nil {
return err
}
err = p.unmarshalType(typ.Elem(), GetProperties(typ.Elem()), false, base)
if collectStats {
stats.Decode++
}
return err
}
// unmarshalType does the work of unmarshaling a structure.
func (o *Buffer) unmarshalType(st reflect.Type, prop *StructProperties, is_group bool, base structPointer) error {
var state errorState
required, reqFields := prop.reqCount, uint64(0)
var err error
for err == nil && o.index < len(o.buf) {
oi := o.index
var u uint64
u, err = o.DecodeVarint()
if err != nil {
break
}
wire := int(u & 0x7)
if wire == WireEndGroup {
if is_group {
if required > 0 {
// Not enough information to determine the exact field.
// (See below.)
return &RequiredNotSetError{"{Unknown}"}
}
return nil // input is satisfied
}
return fmt.Errorf("proto: %s: wiretype end group for non-group", st)
}
tag := int(u >> 3)
if tag <= 0 {
return fmt.Errorf("proto: %s: illegal tag %d (wire type %d)", st, tag, wire)
}
fieldnum, ok := prop.decoderTags.get(tag)
if !ok {
// Maybe it's an extension?
if prop.extendable {
if e, _ := extendable(structPointer_Interface(base, st)); isExtensionField(e, int32(tag)) {
if err = o.skip(st, tag, wire); err == nil {
extmap := e.extensionsWrite()
ext := extmap[int32(tag)] // may be missing
ext.enc = append(ext.enc, o.buf[oi:o.index]...)
extmap[int32(tag)] = ext
}
continue
}
}
// Maybe it's a oneof?
if prop.oneofUnmarshaler != nil {
m := structPointer_Interface(base, st).(Message)
// First return value indicates whether tag is a oneof field.
ok, err = prop.oneofUnmarshaler(m, tag, wire, o)
if err == ErrInternalBadWireType {
// Map the error to something more descriptive.
// Do the formatting here to save generated code space.
err = fmt.Errorf("bad wiretype for oneof field in %T", m)
}
if ok {
continue
}
}
err = o.skipAndSave(st, tag, wire, base, prop.unrecField)
continue
}
p := prop.Prop[fieldnum]
if p.dec == nil {
fmt.Fprintf(os.Stderr, "proto: no protobuf decoder for %s.%s\n", st, st.Field(fieldnum).Name)
continue
}
dec := p.dec
if wire != WireStartGroup && wire != p.WireType {
if wire == WireBytes && p.packedDec != nil {
// a packable field
dec = p.packedDec
} else {
err = fmt.Errorf("proto: bad wiretype for field %s.%s: got wiretype %d, want %d", st, st.Field(fieldnum).Name, wire, p.WireType)
continue
}
}
decErr := dec(o, p, base)
if decErr != nil && !state.shouldContinue(decErr, p) {
err = decErr
}
if err == nil && p.Required {
// Successfully decoded a required field.
if tag <= 64 {
// use bitmap for fields 1-64 to catch field reuse.
var mask uint64 = 1 << uint64(tag-1)
if reqFields&mask == 0 {
// new required field
reqFields |= mask
required--
}
} else {
// This is imprecise. It can be fooled by a required field
// with a tag > 64 that is encoded twice; that's very rare.
// A fully correct implementation would require allocating
// a data structure, which we would like to avoid.
required--
}
}
}
if err == nil {
if is_group {
return io.ErrUnexpectedEOF
}
if state.err != nil {
return state.err
}
if required > 0 {
// Not enough information to determine the exact field. If we use extra
// CPU, we could determine the field only if the missing required field
// has a tag <= 64 and we check reqFields.
return &RequiredNotSetError{"{Unknown}"}
}
}
return err
}
// Individual type decoders
// For each,
// u is the decoded value,
// v is a pointer to the field (pointer) in the struct
// Sizes of the pools to allocate inside the Buffer.
// The goal is modest amortization and allocation
// on at least 16-byte boundaries.
const (
boolPoolSize = 16
uint32PoolSize = 8
uint64PoolSize = 4
)
// Decode a bool.
func (o *Buffer) dec_bool(p *Properties, base structPointer) error {
u, err := p.valDec(o)
if err != nil {
return err
}
if len(o.bools) == 0 {
o.bools = make([]bool, boolPoolSize)
}
o.bools[0] = u != 0
*structPointer_Bool(base, p.field) = &o.bools[0]
o.bools = o.bools[1:]
return nil
}
func (o *Buffer) dec_proto3_bool(p *Properties, base structPointer) error {
u, err := p.valDec(o)
if err != nil {
return err
}
*structPointer_BoolVal(base, p.field) = u != 0
return nil
}
// Decode an int32.
func (o *Buffer) dec_int32(p *Properties, base structPointer) error {
u, err := p.valDec(o)
if err != nil {
return err
}
word32_Set(structPointer_Word32(base, p.field), o, uint32(u))
return nil
}
func (o *Buffer) dec_proto3_int32(p *Properties, base structPointer) error {
u, err := p.valDec(o)
if err != nil {
return err
}
word32Val_Set(structPointer_Word32Val(base, p.field), uint32(u))
return nil
}
// Decode an int64.
func (o *Buffer) dec_int64(p *Properties, base structPointer) error {
u, err := p.valDec(o)
if err != nil {
return err
}
word64_Set(structPointer_Word64(base, p.field), o, u)
return nil
}
func (o *Buffer) dec_proto3_int64(p *Properties, base structPointer) error {
u, err := p.valDec(o)
if err != nil {
return err
}
word64Val_Set(structPointer_Word64Val(base, p.field), o, u)
return nil
}
// Decode a string.
func (o *Buffer) dec_string(p *Properties, base structPointer) error {
s, err := o.DecodeStringBytes()
if err != nil {
return err
}
*structPointer_String(base, p.field) = &s
return nil
}
func (o *Buffer) dec_proto3_string(p *Properties, base structPointer) error {
s, err := o.DecodeStringBytes()
if err != nil {
return err
}
*structPointer_StringVal(base, p.field) = s
return nil
}
// Decode a slice of bytes ([]byte).
func (o *Buffer) dec_slice_byte(p *Properties, base structPointer) error {
b, err := o.DecodeRawBytes(true)
if err != nil {
return err
}
*structPointer_Bytes(base, p.field) = b
return nil
}
// Decode a slice of bools ([]bool).
func (o *Buffer) dec_slice_bool(p *Properties, base structPointer) error {
u, err := p.valDec(o)
if err != nil {
return err
}
v := structPointer_BoolSlice(base, p.field)
*v = append(*v, u != 0)
return nil
}
// Decode a slice of bools ([]bool) in packed format.
func (o *Buffer) dec_slice_packed_bool(p *Properties, base structPointer) error {
v := structPointer_BoolSlice(base, p.field)
nn, err := o.DecodeVarint()
if err != nil {
return err
}
nb := int(nn) // number of bytes of encoded bools
fin := o.index + nb
if fin < o.index {
return errOverflow
}
y := *v
for o.index < fin {
u, err := p.valDec(o)
if err != nil {
return err
}
y = append(y, u != 0)
}
*v = y
return nil
}
// Decode a slice of int32s ([]int32).
func (o *Buffer) dec_slice_int32(p *Properties, base structPointer) error {
u, err := p.valDec(o)
if err != nil {
return err
}
structPointer_Word32Slice(base, p.field).Append(uint32(u))
return nil
}
// Decode a slice of int32s ([]int32) in packed format.
func (o *Buffer) dec_slice_packed_int32(p *Properties, base structPointer) error {
v := structPointer_Word32Slice(base, p.field)
nn, err := o.DecodeVarint()
if err != nil {
return err
}
nb := int(nn) // number of bytes of encoded int32s
fin := o.index + nb
if fin < o.index {
return errOverflow
}
for o.index < fin {
u, err := p.valDec(o)
if err != nil {
return err
}
v.Append(uint32(u))
}
return nil
}
// Decode a slice of int64s ([]int64).
func (o *Buffer) dec_slice_int64(p *Properties, base structPointer) error {
u, err := p.valDec(o)
if err != nil {
return err
}
structPointer_Word64Slice(base, p.field).Append(u)
return nil
}
// Decode a slice of int64s ([]int64) in packed format.
func (o *Buffer) dec_slice_packed_int64(p *Properties, base structPointer) error {
v := structPointer_Word64Slice(base, p.field)
nn, err := o.DecodeVarint()
if err != nil {
return err
}
nb := int(nn) // number of bytes of encoded int64s
fin := o.index + nb
if fin < o.index {
return errOverflow
}
for o.index < fin {
u, err := p.valDec(o)
if err != nil {
return err
}
v.Append(u)
}
return nil
}
// Decode a slice of strings ([]string).
func (o *Buffer) dec_slice_string(p *Properties, base structPointer) error {
s, err := o.DecodeStringBytes()
if err != nil {
return err
}
v := structPointer_StringSlice(base, p.field)
*v = append(*v, s)
return nil
}
// Decode a slice of slice of bytes ([][]byte).
func (o *Buffer) dec_slice_slice_byte(p *Properties, base structPointer) error {
b, err := o.DecodeRawBytes(true)
if err != nil {
return err
}
v := structPointer_BytesSlice(base, p.field)
*v = append(*v, b)
return nil
}
// Decode a map field.
func (o *Buffer) dec_new_map(p *Properties, base structPointer) error {
raw, err := o.DecodeRawBytes(false)
if err != nil {
return err
}
oi := o.index // index at the end of this map entry
o.index -= len(raw) // move buffer back to start of map entry
mptr := structPointer_NewAt(base, p.field, p.mtype) // *map[K]V
if mptr.Elem().IsNil() {
mptr.Elem().Set(reflect.MakeMap(mptr.Type().Elem()))
}
v := mptr.Elem() // map[K]V
// Prepare addressable doubly-indirect placeholders for the key and value types.
// See enc_new_map for why.
keyptr := reflect.New(reflect.PtrTo(p.mtype.Key())).Elem() // addressable *K
keybase := toStructPointer(keyptr.Addr()) // **K
var valbase structPointer
var valptr reflect.Value
switch p.mtype.Elem().Kind() {
case reflect.Slice:
// []byte
var dummy []byte
valptr = reflect.ValueOf(&dummy) // *[]byte
valbase = toStructPointer(valptr) // *[]byte
case reflect.Ptr:
// message; valptr is **Msg; need to allocate the intermediate pointer
valptr = reflect.New(reflect.PtrTo(p.mtype.Elem())).Elem() // addressable *V
valptr.Set(reflect.New(valptr.Type().Elem()))
valbase = toStructPointer(valptr)
default:
// everything else
valptr = reflect.New(reflect.PtrTo(p.mtype.Elem())).Elem() // addressable *V
valbase = toStructPointer(valptr.Addr()) // **V
}
// Decode.
// This parses a restricted wire format, namely the encoding of a message
// with two fields. See enc_new_map for the format.
for o.index < oi {
// tagcode for key and value properties are always a single byte
// because they have tags 1 and 2.
tagcode := o.buf[o.index]
o.index++
switch tagcode {
case p.mkeyprop.tagcode[0]:
if err := p.mkeyprop.dec(o, p.mkeyprop, keybase); err != nil {
return err
}
case p.mvalprop.tagcode[0]:
if err := p.mvalprop.dec(o, p.mvalprop, valbase); err != nil {
return err
}
default:
// TODO: Should we silently skip this instead?
return fmt.Errorf("proto: bad map data tag %d", raw[0])
}
}
keyelem, valelem := keyptr.Elem(), valptr.Elem()
if !keyelem.IsValid() {
keyelem = reflect.Zero(p.mtype.Key())
}
if !valelem.IsValid() {
valelem = reflect.Zero(p.mtype.Elem())
}
v.SetMapIndex(keyelem, valelem)
return nil
}
// Decode a group.
func (o *Buffer) dec_struct_group(p *Properties, base structPointer) error {
bas := structPointer_GetStructPointer(base, p.field)
if structPointer_IsNil(bas) {
// allocate new nested message
bas = toStructPointer(reflect.New(p.stype))
structPointer_SetStructPointer(base, p.field, bas)
}
return o.unmarshalType(p.stype, p.sprop, true, bas)
}
// Decode an embedded message.
func (o *Buffer) dec_struct_message(p *Properties, base structPointer) (err error) {
raw, e := o.DecodeRawBytes(false)
if e != nil {
return e
}
bas := structPointer_GetStructPointer(base, p.field)
if structPointer_IsNil(bas) {
// allocate new nested message
bas = toStructPointer(reflect.New(p.stype))
structPointer_SetStructPointer(base, p.field, bas)
}
// If the object can unmarshal itself, let it.
if p.isUnmarshaler {
iv := structPointer_Interface(bas, p.stype)
return iv.(Unmarshaler).Unmarshal(raw)
}
obuf := o.buf
oi := o.index
o.buf = raw
o.index = 0
err = o.unmarshalType(p.stype, p.sprop, false, bas)
o.buf = obuf
o.index = oi
return err
}
// Decode a slice of embedded messages.
func (o *Buffer) dec_slice_struct_message(p *Properties, base structPointer) error {
return o.dec_slice_struct(p, false, base)
}
// Decode a slice of embedded groups.
func (o *Buffer) dec_slice_struct_group(p *Properties, base structPointer) error {
return o.dec_slice_struct(p, true, base)
}
// Decode a slice of structs ([]*struct).
func (o *Buffer) dec_slice_struct(p *Properties, is_group bool, base structPointer) error {
v := reflect.New(p.stype)
bas := toStructPointer(v)
structPointer_StructPointerSlice(base, p.field).Append(bas)
if is_group {
err := o.unmarshalType(p.stype, p.sprop, is_group, bas)
return err
}
raw, err := o.DecodeRawBytes(false)
if err != nil {
return err
}
// If the object can unmarshal itself, let it.
if p.isUnmarshaler {
iv := v.Interface()
return iv.(Unmarshaler).Unmarshal(raw)
}
obuf := o.buf
oi := o.index
o.buf = raw
o.index = 0
err = o.unmarshalType(p.stype, p.sprop, is_group, bas)
o.buf = obuf
o.index = oi
return err
}

File diff suppressed because it is too large Load diff

View file

@ -1,300 +0,0 @@
// Go support for Protocol Buffers - Google's data interchange format
//
// Copyright 2011 The Go Authors. All rights reserved.
// https://github.com/golang/protobuf
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Protocol buffer comparison.
package proto
import (
"bytes"
"log"
"reflect"
"strings"
)
/*
Equal returns true iff protocol buffers a and b are equal.
The arguments must both be pointers to protocol buffer structs.
Equality is defined in this way:
- Two messages are equal iff they are the same type,
corresponding fields are equal, unknown field sets
are equal, and extensions sets are equal.
- Two set scalar fields are equal iff their values are equal.
If the fields are of a floating-point type, remember that
NaN != x for all x, including NaN. If the message is defined
in a proto3 .proto file, fields are not "set"; specifically,
zero length proto3 "bytes" fields are equal (nil == {}).
- Two repeated fields are equal iff their lengths are the same,
and their corresponding elements are equal. Note a "bytes" field,
although represented by []byte, is not a repeated field and the
rule for the scalar fields described above applies.
- Two unset fields are equal.
- Two unknown field sets are equal if their current
encoded state is equal.
- Two extension sets are equal iff they have corresponding
elements that are pairwise equal.
- Two map fields are equal iff their lengths are the same,
and they contain the same set of elements. Zero-length map
fields are equal.
- Every other combination of things are not equal.
The return value is undefined if a and b are not protocol buffers.
*/
func Equal(a, b Message) bool {
if a == nil || b == nil {
return a == b
}
v1, v2 := reflect.ValueOf(a), reflect.ValueOf(b)
if v1.Type() != v2.Type() {
return false
}
if v1.Kind() == reflect.Ptr {
if v1.IsNil() {
return v2.IsNil()
}
if v2.IsNil() {
return false
}
v1, v2 = v1.Elem(), v2.Elem()
}
if v1.Kind() != reflect.Struct {
return false
}
return equalStruct(v1, v2)
}
// v1 and v2 are known to have the same type.
func equalStruct(v1, v2 reflect.Value) bool {
sprop := GetProperties(v1.Type())
for i := 0; i < v1.NumField(); i++ {
f := v1.Type().Field(i)
if strings.HasPrefix(f.Name, "XXX_") {
continue
}
f1, f2 := v1.Field(i), v2.Field(i)
if f.Type.Kind() == reflect.Ptr {
if n1, n2 := f1.IsNil(), f2.IsNil(); n1 && n2 {
// both unset
continue
} else if n1 != n2 {
// set/unset mismatch
return false
}
b1, ok := f1.Interface().(raw)
if ok {
b2 := f2.Interface().(raw)
// RawMessage
if !bytes.Equal(b1.Bytes(), b2.Bytes()) {
return false
}
continue
}
f1, f2 = f1.Elem(), f2.Elem()
}
if !equalAny(f1, f2, sprop.Prop[i]) {
return false
}
}
if em1 := v1.FieldByName("XXX_InternalExtensions"); em1.IsValid() {
em2 := v2.FieldByName("XXX_InternalExtensions")
if !equalExtensions(v1.Type(), em1.Interface().(XXX_InternalExtensions), em2.Interface().(XXX_InternalExtensions)) {
return false
}
}
if em1 := v1.FieldByName("XXX_extensions"); em1.IsValid() {
em2 := v2.FieldByName("XXX_extensions")
if !equalExtMap(v1.Type(), em1.Interface().(map[int32]Extension), em2.Interface().(map[int32]Extension)) {
return false
}
}
uf := v1.FieldByName("XXX_unrecognized")
if !uf.IsValid() {
return true
}
u1 := uf.Bytes()
u2 := v2.FieldByName("XXX_unrecognized").Bytes()
if !bytes.Equal(u1, u2) {
return false
}
return true
}
// v1 and v2 are known to have the same type.
// prop may be nil.
func equalAny(v1, v2 reflect.Value, prop *Properties) bool {
if v1.Type() == protoMessageType {
m1, _ := v1.Interface().(Message)
m2, _ := v2.Interface().(Message)
return Equal(m1, m2)
}
switch v1.Kind() {
case reflect.Bool:
return v1.Bool() == v2.Bool()
case reflect.Float32, reflect.Float64:
return v1.Float() == v2.Float()
case reflect.Int32, reflect.Int64:
return v1.Int() == v2.Int()
case reflect.Interface:
// Probably a oneof field; compare the inner values.
n1, n2 := v1.IsNil(), v2.IsNil()
if n1 || n2 {
return n1 == n2
}
e1, e2 := v1.Elem(), v2.Elem()
if e1.Type() != e2.Type() {
return false
}
return equalAny(e1, e2, nil)
case reflect.Map:
if v1.Len() != v2.Len() {
return false
}
for _, key := range v1.MapKeys() {
val2 := v2.MapIndex(key)
if !val2.IsValid() {
// This key was not found in the second map.
return false
}
if !equalAny(v1.MapIndex(key), val2, nil) {
return false
}
}
return true
case reflect.Ptr:
// Maps may have nil values in them, so check for nil.
if v1.IsNil() && v2.IsNil() {
return true
}
if v1.IsNil() != v2.IsNil() {
return false
}
return equalAny(v1.Elem(), v2.Elem(), prop)
case reflect.Slice:
if v1.Type().Elem().Kind() == reflect.Uint8 {
// short circuit: []byte
// Edge case: if this is in a proto3 message, a zero length
// bytes field is considered the zero value.
if prop != nil && prop.proto3 && v1.Len() == 0 && v2.Len() == 0 {
return true
}
if v1.IsNil() != v2.IsNil() {
return false
}
return bytes.Equal(v1.Interface().([]byte), v2.Interface().([]byte))
}
if v1.Len() != v2.Len() {
return false
}
for i := 0; i < v1.Len(); i++ {
if !equalAny(v1.Index(i), v2.Index(i), prop) {
return false
}
}
return true
case reflect.String:
return v1.Interface().(string) == v2.Interface().(string)
case reflect.Struct:
return equalStruct(v1, v2)
case reflect.Uint32, reflect.Uint64:
return v1.Uint() == v2.Uint()
}
// unknown type, so not a protocol buffer
log.Printf("proto: don't know how to compare %v", v1)
return false
}
// base is the struct type that the extensions are based on.
// x1 and x2 are InternalExtensions.
func equalExtensions(base reflect.Type, x1, x2 XXX_InternalExtensions) bool {
em1, _ := x1.extensionsRead()
em2, _ := x2.extensionsRead()
return equalExtMap(base, em1, em2)
}
func equalExtMap(base reflect.Type, em1, em2 map[int32]Extension) bool {
if len(em1) != len(em2) {
return false
}
for extNum, e1 := range em1 {
e2, ok := em2[extNum]
if !ok {
return false
}
m1, m2 := e1.value, e2.value
if m1 != nil && m2 != nil {
// Both are unencoded.
if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) {
return false
}
continue
}
// At least one is encoded. To do a semantically correct comparison
// we need to unmarshal them first.
var desc *ExtensionDesc
if m := extensionMaps[base]; m != nil {
desc = m[extNum]
}
if desc == nil {
log.Printf("proto: don't know how to compare extension %d of %v", extNum, base)
continue
}
var err error
if m1 == nil {
m1, err = decodeExtension(e1.enc, desc)
}
if m2 == nil && err == nil {
m2, err = decodeExtension(e2.enc, desc)
}
if err != nil {
// The encoded form is invalid.
log.Printf("proto: badly encoded extension %d of %v: %v", extNum, base, err)
return false
}
if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) {
return false
}
}
return true
}

View file

@ -1,587 +0,0 @@
// Go support for Protocol Buffers - Google's data interchange format
//
// Copyright 2010 The Go Authors. All rights reserved.
// https://github.com/golang/protobuf
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package proto
/*
* Types and routines for supporting protocol buffer extensions.
*/
import (
"errors"
"fmt"
"reflect"
"strconv"
"sync"
)
// ErrMissingExtension is the error returned by GetExtension if the named extension is not in the message.
var ErrMissingExtension = errors.New("proto: missing extension")
// ExtensionRange represents a range of message extensions for a protocol buffer.
// Used in code generated by the protocol compiler.
type ExtensionRange struct {
Start, End int32 // both inclusive
}
// extendableProto is an interface implemented by any protocol buffer generated by the current
// proto compiler that may be extended.
type extendableProto interface {
Message
ExtensionRangeArray() []ExtensionRange
extensionsWrite() map[int32]Extension
extensionsRead() (map[int32]Extension, sync.Locker)
}
// extendableProtoV1 is an interface implemented by a protocol buffer generated by the previous
// version of the proto compiler that may be extended.
type extendableProtoV1 interface {
Message
ExtensionRangeArray() []ExtensionRange
ExtensionMap() map[int32]Extension
}
// extensionAdapter is a wrapper around extendableProtoV1 that implements extendableProto.
type extensionAdapter struct {
extendableProtoV1
}
func (e extensionAdapter) extensionsWrite() map[int32]Extension {
return e.ExtensionMap()
}
func (e extensionAdapter) extensionsRead() (map[int32]Extension, sync.Locker) {
return e.ExtensionMap(), notLocker{}
}
// notLocker is a sync.Locker whose Lock and Unlock methods are nops.
type notLocker struct{}
func (n notLocker) Lock() {}
func (n notLocker) Unlock() {}
// extendable returns the extendableProto interface for the given generated proto message.
// If the proto message has the old extension format, it returns a wrapper that implements
// the extendableProto interface.
func extendable(p interface{}) (extendableProto, bool) {
if ep, ok := p.(extendableProto); ok {
return ep, ok
}
if ep, ok := p.(extendableProtoV1); ok {
return extensionAdapter{ep}, ok
}
return nil, false
}
// XXX_InternalExtensions is an internal representation of proto extensions.
//
// Each generated message struct type embeds an anonymous XXX_InternalExtensions field,
// thus gaining the unexported 'extensions' method, which can be called only from the proto package.
//
// The methods of XXX_InternalExtensions are not concurrency safe in general,
// but calls to logically read-only methods such as has and get may be executed concurrently.
type XXX_InternalExtensions struct {
// The struct must be indirect so that if a user inadvertently copies a
// generated message and its embedded XXX_InternalExtensions, they
// avoid the mayhem of a copied mutex.
//
// The mutex serializes all logically read-only operations to p.extensionMap.
// It is up to the client to ensure that write operations to p.extensionMap are
// mutually exclusive with other accesses.
p *struct {
mu sync.Mutex
extensionMap map[int32]Extension
}
}
// extensionsWrite returns the extension map, creating it on first use.
func (e *XXX_InternalExtensions) extensionsWrite() map[int32]Extension {
if e.p == nil {
e.p = new(struct {
mu sync.Mutex
extensionMap map[int32]Extension
})
e.p.extensionMap = make(map[int32]Extension)
}
return e.p.extensionMap
}
// extensionsRead returns the extensions map for read-only use. It may be nil.
// The caller must hold the returned mutex's lock when accessing Elements within the map.
func (e *XXX_InternalExtensions) extensionsRead() (map[int32]Extension, sync.Locker) {
if e.p == nil {
return nil, nil
}
return e.p.extensionMap, &e.p.mu
}
var extendableProtoType = reflect.TypeOf((*extendableProto)(nil)).Elem()
var extendableProtoV1Type = reflect.TypeOf((*extendableProtoV1)(nil)).Elem()
// ExtensionDesc represents an extension specification.
// Used in generated code from the protocol compiler.
type ExtensionDesc struct {
ExtendedType Message // nil pointer to the type that is being extended
ExtensionType interface{} // nil pointer to the extension type
Field int32 // field number
Name string // fully-qualified name of extension, for text formatting
Tag string // protobuf tag style
Filename string // name of the file in which the extension is defined
}
func (ed *ExtensionDesc) repeated() bool {
t := reflect.TypeOf(ed.ExtensionType)
return t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8
}
// Extension represents an extension in a message.
type Extension struct {
// When an extension is stored in a message using SetExtension
// only desc and value are set. When the message is marshaled
// enc will be set to the encoded form of the message.
//
// When a message is unmarshaled and contains extensions, each
// extension will have only enc set. When such an extension is
// accessed using GetExtension (or GetExtensions) desc and value
// will be set.
desc *ExtensionDesc
value interface{}
enc []byte
}
// SetRawExtension is for testing only.
func SetRawExtension(base Message, id int32, b []byte) {
epb, ok := extendable(base)
if !ok {
return
}
extmap := epb.extensionsWrite()
extmap[id] = Extension{enc: b}
}
// isExtensionField returns true iff the given field number is in an extension range.
func isExtensionField(pb extendableProto, field int32) bool {
for _, er := range pb.ExtensionRangeArray() {
if er.Start <= field && field <= er.End {
return true
}
}
return false
}
// checkExtensionTypes checks that the given extension is valid for pb.
func checkExtensionTypes(pb extendableProto, extension *ExtensionDesc) error {
var pbi interface{} = pb
// Check the extended type.
if ea, ok := pbi.(extensionAdapter); ok {
pbi = ea.extendableProtoV1
}
if a, b := reflect.TypeOf(pbi), reflect.TypeOf(extension.ExtendedType); a != b {
return errors.New("proto: bad extended type; " + b.String() + " does not extend " + a.String())
}
// Check the range.
if !isExtensionField(pb, extension.Field) {
return errors.New("proto: bad extension number; not in declared ranges")
}
return nil
}
// extPropKey is sufficient to uniquely identify an extension.
type extPropKey struct {
base reflect.Type
field int32
}
var extProp = struct {
sync.RWMutex
m map[extPropKey]*Properties
}{
m: make(map[extPropKey]*Properties),
}
func extensionProperties(ed *ExtensionDesc) *Properties {
key := extPropKey{base: reflect.TypeOf(ed.ExtendedType), field: ed.Field}
extProp.RLock()
if prop, ok := extProp.m[key]; ok {
extProp.RUnlock()
return prop
}
extProp.RUnlock()
extProp.Lock()
defer extProp.Unlock()
// Check again.
if prop, ok := extProp.m[key]; ok {
return prop
}
prop := new(Properties)
prop.Init(reflect.TypeOf(ed.ExtensionType), "unknown_name", ed.Tag, nil)
extProp.m[key] = prop
return prop
}
// encode encodes any unmarshaled (unencoded) extensions in e.
func encodeExtensions(e *XXX_InternalExtensions) error {
m, mu := e.extensionsRead()
if m == nil {
return nil // fast path
}
mu.Lock()
defer mu.Unlock()
return encodeExtensionsMap(m)
}
// encode encodes any unmarshaled (unencoded) extensions in e.
func encodeExtensionsMap(m map[int32]Extension) error {
for k, e := range m {
if e.value == nil || e.desc == nil {
// Extension is only in its encoded form.
continue
}
// We don't skip extensions that have an encoded form set,
// because the extension value may have been mutated after
// the last time this function was called.
et := reflect.TypeOf(e.desc.ExtensionType)
props := extensionProperties(e.desc)
p := NewBuffer(nil)
// If e.value has type T, the encoder expects a *struct{ X T }.
// Pass a *T with a zero field and hope it all works out.
x := reflect.New(et)
x.Elem().Set(reflect.ValueOf(e.value))
if err := props.enc(p, props, toStructPointer(x)); err != nil {
return err
}
e.enc = p.buf
m[k] = e
}
return nil
}
func extensionsSize(e *XXX_InternalExtensions) (n int) {
m, mu := e.extensionsRead()
if m == nil {
return 0
}
mu.Lock()
defer mu.Unlock()
return extensionsMapSize(m)
}
func extensionsMapSize(m map[int32]Extension) (n int) {
for _, e := range m {
if e.value == nil || e.desc == nil {
// Extension is only in its encoded form.
n += len(e.enc)
continue
}
// We don't skip extensions that have an encoded form set,
// because the extension value may have been mutated after
// the last time this function was called.
et := reflect.TypeOf(e.desc.ExtensionType)
props := extensionProperties(e.desc)
// If e.value has type T, the encoder expects a *struct{ X T }.
// Pass a *T with a zero field and hope it all works out.
x := reflect.New(et)
x.Elem().Set(reflect.ValueOf(e.value))
n += props.size(props, toStructPointer(x))
}
return
}
// HasExtension returns whether the given extension is present in pb.
func HasExtension(pb Message, extension *ExtensionDesc) bool {
// TODO: Check types, field numbers, etc.?
epb, ok := extendable(pb)
if !ok {
return false
}
extmap, mu := epb.extensionsRead()
if extmap == nil {
return false
}
mu.Lock()
_, ok = extmap[extension.Field]
mu.Unlock()
return ok
}
// ClearExtension removes the given extension from pb.
func ClearExtension(pb Message, extension *ExtensionDesc) {
epb, ok := extendable(pb)
if !ok {
return
}
// TODO: Check types, field numbers, etc.?
extmap := epb.extensionsWrite()
delete(extmap, extension.Field)
}
// GetExtension parses and returns the given extension of pb.
// If the extension is not present and has no default value it returns ErrMissingExtension.
func GetExtension(pb Message, extension *ExtensionDesc) (interface{}, error) {
epb, ok := extendable(pb)
if !ok {
return nil, errors.New("proto: not an extendable proto")
}
if err := checkExtensionTypes(epb, extension); err != nil {
return nil, err
}
emap, mu := epb.extensionsRead()
if emap == nil {
return defaultExtensionValue(extension)
}
mu.Lock()
defer mu.Unlock()
e, ok := emap[extension.Field]
if !ok {
// defaultExtensionValue returns the default value or
// ErrMissingExtension if there is no default.
return defaultExtensionValue(extension)
}
if e.value != nil {
// Already decoded. Check the descriptor, though.
if e.desc != extension {
// This shouldn't happen. If it does, it means that
// GetExtension was called twice with two different
// descriptors with the same field number.
return nil, errors.New("proto: descriptor conflict")
}
return e.value, nil
}
v, err := decodeExtension(e.enc, extension)
if err != nil {
return nil, err
}
// Remember the decoded version and drop the encoded version.
// That way it is safe to mutate what we return.
e.value = v
e.desc = extension
e.enc = nil
emap[extension.Field] = e
return e.value, nil
}
// defaultExtensionValue returns the default value for extension.
// If no default for an extension is defined ErrMissingExtension is returned.
func defaultExtensionValue(extension *ExtensionDesc) (interface{}, error) {
t := reflect.TypeOf(extension.ExtensionType)
props := extensionProperties(extension)
sf, _, err := fieldDefault(t, props)
if err != nil {
return nil, err
}
if sf == nil || sf.value == nil {
// There is no default value.
return nil, ErrMissingExtension
}
if t.Kind() != reflect.Ptr {
// We do not need to return a Ptr, we can directly return sf.value.
return sf.value, nil
}
// We need to return an interface{} that is a pointer to sf.value.
value := reflect.New(t).Elem()
value.Set(reflect.New(value.Type().Elem()))
if sf.kind == reflect.Int32 {
// We may have an int32 or an enum, but the underlying data is int32.
// Since we can't set an int32 into a non int32 reflect.value directly
// set it as a int32.
value.Elem().SetInt(int64(sf.value.(int32)))
} else {
value.Elem().Set(reflect.ValueOf(sf.value))
}
return value.Interface(), nil
}
// decodeExtension decodes an extension encoded in b.
func decodeExtension(b []byte, extension *ExtensionDesc) (interface{}, error) {
o := NewBuffer(b)
t := reflect.TypeOf(extension.ExtensionType)
props := extensionProperties(extension)
// t is a pointer to a struct, pointer to basic type or a slice.
// Allocate a "field" to store the pointer/slice itself; the
// pointer/slice will be stored here. We pass
// the address of this field to props.dec.
// This passes a zero field and a *t and lets props.dec
// interpret it as a *struct{ x t }.
value := reflect.New(t).Elem()
for {
// Discard wire type and field number varint. It isn't needed.
if _, err := o.DecodeVarint(); err != nil {
return nil, err
}
if err := props.dec(o, props, toStructPointer(value.Addr())); err != nil {
return nil, err
}
if o.index >= len(o.buf) {
break
}
}
return value.Interface(), nil
}
// GetExtensions returns a slice of the extensions present in pb that are also listed in es.
// The returned slice has the same length as es; missing extensions will appear as nil elements.
func GetExtensions(pb Message, es []*ExtensionDesc) (extensions []interface{}, err error) {
epb, ok := extendable(pb)
if !ok {
return nil, errors.New("proto: not an extendable proto")
}
extensions = make([]interface{}, len(es))
for i, e := range es {
extensions[i], err = GetExtension(epb, e)
if err == ErrMissingExtension {
err = nil
}
if err != nil {
return
}
}
return
}
// ExtensionDescs returns a new slice containing pb's extension descriptors, in undefined order.
// For non-registered extensions, ExtensionDescs returns an incomplete descriptor containing
// just the Field field, which defines the extension's field number.
func ExtensionDescs(pb Message) ([]*ExtensionDesc, error) {
epb, ok := extendable(pb)
if !ok {
return nil, fmt.Errorf("proto: %T is not an extendable proto.Message", pb)
}
registeredExtensions := RegisteredExtensions(pb)
emap, mu := epb.extensionsRead()
if emap == nil {
return nil, nil
}
mu.Lock()
defer mu.Unlock()
extensions := make([]*ExtensionDesc, 0, len(emap))
for extid, e := range emap {
desc := e.desc
if desc == nil {
desc = registeredExtensions[extid]
if desc == nil {
desc = &ExtensionDesc{Field: extid}
}
}
extensions = append(extensions, desc)
}
return extensions, nil
}
// SetExtension sets the specified extension of pb to the specified value.
func SetExtension(pb Message, extension *ExtensionDesc, value interface{}) error {
epb, ok := extendable(pb)
if !ok {
return errors.New("proto: not an extendable proto")
}
if err := checkExtensionTypes(epb, extension); err != nil {
return err
}
typ := reflect.TypeOf(extension.ExtensionType)
if typ != reflect.TypeOf(value) {
return errors.New("proto: bad extension value type")
}
// nil extension values need to be caught early, because the
// encoder can't distinguish an ErrNil due to a nil extension
// from an ErrNil due to a missing field. Extensions are
// always optional, so the encoder would just swallow the error
// and drop all the extensions from the encoded message.
if reflect.ValueOf(value).IsNil() {
return fmt.Errorf("proto: SetExtension called with nil value of type %T", value)
}
extmap := epb.extensionsWrite()
extmap[extension.Field] = Extension{desc: extension, value: value}
return nil
}
// ClearAllExtensions clears all extensions from pb.
func ClearAllExtensions(pb Message) {
epb, ok := extendable(pb)
if !ok {
return
}
m := epb.extensionsWrite()
for k := range m {
delete(m, k)
}
}
// A global registry of extensions.
// The generated code will register the generated descriptors by calling RegisterExtension.
var extensionMaps = make(map[reflect.Type]map[int32]*ExtensionDesc)
// RegisterExtension is called from the generated code.
func RegisterExtension(desc *ExtensionDesc) {
st := reflect.TypeOf(desc.ExtendedType).Elem()
m := extensionMaps[st]
if m == nil {
m = make(map[int32]*ExtensionDesc)
extensionMaps[st] = m
}
if _, ok := m[desc.Field]; ok {
panic("proto: duplicate extension registered: " + st.String() + " " + strconv.Itoa(int(desc.Field)))
}
m[desc.Field] = desc
}
// RegisteredExtensions returns a map of the registered extensions of a
// protocol buffer struct, indexed by the extension number.
// The argument pb should be a nil pointer to the struct type.
func RegisteredExtensions(pb Message) map[int32]*ExtensionDesc {
return extensionMaps[reflect.TypeOf(pb).Elem()]
}

View file

@ -1,897 +0,0 @@
// Go support for Protocol Buffers - Google's data interchange format
//
// Copyright 2010 The Go Authors. All rights reserved.
// https://github.com/golang/protobuf
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
/*
Package proto converts data structures to and from the wire format of
protocol buffers. It works in concert with the Go source code generated
for .proto files by the protocol compiler.
A summary of the properties of the protocol buffer interface
for a protocol buffer variable v:
- Names are turned from camel_case to CamelCase for export.
- There are no methods on v to set fields; just treat
them as structure fields.
- There are getters that return a field's value if set,
and return the field's default value if unset.
The getters work even if the receiver is a nil message.
- The zero value for a struct is its correct initialization state.
All desired fields must be set before marshaling.
- A Reset() method will restore a protobuf struct to its zero state.
- Non-repeated fields are pointers to the values; nil means unset.
That is, optional or required field int32 f becomes F *int32.
- Repeated fields are slices.
- Helper functions are available to aid the setting of fields.
msg.Foo = proto.String("hello") // set field
- Constants are defined to hold the default values of all fields that
have them. They have the form Default_StructName_FieldName.
Because the getter methods handle defaulted values,
direct use of these constants should be rare.
- Enums are given type names and maps from names to values.
Enum values are prefixed by the enclosing message's name, or by the
enum's type name if it is a top-level enum. Enum types have a String
method, and a Enum method to assist in message construction.
- Nested messages, groups and enums have type names prefixed with the name of
the surrounding message type.
- Extensions are given descriptor names that start with E_,
followed by an underscore-delimited list of the nested messages
that contain it (if any) followed by the CamelCased name of the
extension field itself. HasExtension, ClearExtension, GetExtension
and SetExtension are functions for manipulating extensions.
- Oneof field sets are given a single field in their message,
with distinguished wrapper types for each possible field value.
- Marshal and Unmarshal are functions to encode and decode the wire format.
When the .proto file specifies `syntax="proto3"`, there are some differences:
- Non-repeated fields of non-message type are values instead of pointers.
- Enum types do not get an Enum method.
The simplest way to describe this is to see an example.
Given file test.proto, containing
package example;
enum FOO { X = 17; }
message Test {
required string label = 1;
optional int32 type = 2 [default=77];
repeated int64 reps = 3;
optional group OptionalGroup = 4 {
required string RequiredField = 5;
}
oneof union {
int32 number = 6;
string name = 7;
}
}
The resulting file, test.pb.go, is:
package example
import proto "github.com/golang/protobuf/proto"
import math "math"
type FOO int32
const (
FOO_X FOO = 17
)
var FOO_name = map[int32]string{
17: "X",
}
var FOO_value = map[string]int32{
"X": 17,
}
func (x FOO) Enum() *FOO {
p := new(FOO)
*p = x
return p
}
func (x FOO) String() string {
return proto.EnumName(FOO_name, int32(x))
}
func (x *FOO) UnmarshalJSON(data []byte) error {
value, err := proto.UnmarshalJSONEnum(FOO_value, data)
if err != nil {
return err
}
*x = FOO(value)
return nil
}
type Test struct {
Label *string `protobuf:"bytes,1,req,name=label" json:"label,omitempty"`
Type *int32 `protobuf:"varint,2,opt,name=type,def=77" json:"type,omitempty"`
Reps []int64 `protobuf:"varint,3,rep,name=reps" json:"reps,omitempty"`
Optionalgroup *Test_OptionalGroup `protobuf:"group,4,opt,name=OptionalGroup" json:"optionalgroup,omitempty"`
// Types that are valid to be assigned to Union:
// *Test_Number
// *Test_Name
Union isTest_Union `protobuf_oneof:"union"`
XXX_unrecognized []byte `json:"-"`
}
func (m *Test) Reset() { *m = Test{} }
func (m *Test) String() string { return proto.CompactTextString(m) }
func (*Test) ProtoMessage() {}
type isTest_Union interface {
isTest_Union()
}
type Test_Number struct {
Number int32 `protobuf:"varint,6,opt,name=number"`
}
type Test_Name struct {
Name string `protobuf:"bytes,7,opt,name=name"`
}
func (*Test_Number) isTest_Union() {}
func (*Test_Name) isTest_Union() {}
func (m *Test) GetUnion() isTest_Union {
if m != nil {
return m.Union
}
return nil
}
const Default_Test_Type int32 = 77
func (m *Test) GetLabel() string {
if m != nil && m.Label != nil {
return *m.Label
}
return ""
}
func (m *Test) GetType() int32 {
if m != nil && m.Type != nil {
return *m.Type
}
return Default_Test_Type
}
func (m *Test) GetOptionalgroup() *Test_OptionalGroup {
if m != nil {
return m.Optionalgroup
}
return nil
}
type Test_OptionalGroup struct {
RequiredField *string `protobuf:"bytes,5,req" json:"RequiredField,omitempty"`
}
func (m *Test_OptionalGroup) Reset() { *m = Test_OptionalGroup{} }
func (m *Test_OptionalGroup) String() string { return proto.CompactTextString(m) }
func (m *Test_OptionalGroup) GetRequiredField() string {
if m != nil && m.RequiredField != nil {
return *m.RequiredField
}
return ""
}
func (m *Test) GetNumber() int32 {
if x, ok := m.GetUnion().(*Test_Number); ok {
return x.Number
}
return 0
}
func (m *Test) GetName() string {
if x, ok := m.GetUnion().(*Test_Name); ok {
return x.Name
}
return ""
}
func init() {
proto.RegisterEnum("example.FOO", FOO_name, FOO_value)
}
To create and play with a Test object:
package main
import (
"log"
"github.com/golang/protobuf/proto"
pb "./example.pb"
)
func main() {
test := &pb.Test{
Label: proto.String("hello"),
Type: proto.Int32(17),
Reps: []int64{1, 2, 3},
Optionalgroup: &pb.Test_OptionalGroup{
RequiredField: proto.String("good bye"),
},
Union: &pb.Test_Name{"fred"},
}
data, err := proto.Marshal(test)
if err != nil {
log.Fatal("marshaling error: ", err)
}
newTest := &pb.Test{}
err = proto.Unmarshal(data, newTest)
if err != nil {
log.Fatal("unmarshaling error: ", err)
}
// Now test and newTest contain the same data.
if test.GetLabel() != newTest.GetLabel() {
log.Fatalf("data mismatch %q != %q", test.GetLabel(), newTest.GetLabel())
}
// Use a type switch to determine which oneof was set.
switch u := test.Union.(type) {
case *pb.Test_Number: // u.Number contains the number.
case *pb.Test_Name: // u.Name contains the string.
}
// etc.
}
*/
package proto
import (
"encoding/json"
"fmt"
"log"
"reflect"
"sort"
"strconv"
"sync"
)
// Message is implemented by generated protocol buffer messages.
type Message interface {
Reset()
String() string
ProtoMessage()
}
// Stats records allocation details about the protocol buffer encoders
// and decoders. Useful for tuning the library itself.
type Stats struct {
Emalloc uint64 // mallocs in encode
Dmalloc uint64 // mallocs in decode
Encode uint64 // number of encodes
Decode uint64 // number of decodes
Chit uint64 // number of cache hits
Cmiss uint64 // number of cache misses
Size uint64 // number of sizes
}
// Set to true to enable stats collection.
const collectStats = false
var stats Stats
// GetStats returns a copy of the global Stats structure.
func GetStats() Stats { return stats }
// A Buffer is a buffer manager for marshaling and unmarshaling
// protocol buffers. It may be reused between invocations to
// reduce memory usage. It is not necessary to use a Buffer;
// the global functions Marshal and Unmarshal create a
// temporary Buffer and are fine for most applications.
type Buffer struct {
buf []byte // encode/decode byte stream
index int // read point
// pools of basic types to amortize allocation.
bools []bool
uint32s []uint32
uint64s []uint64
// extra pools, only used with pointer_reflect.go
int32s []int32
int64s []int64
float32s []float32
float64s []float64
}
// NewBuffer allocates a new Buffer and initializes its internal data to
// the contents of the argument slice.
func NewBuffer(e []byte) *Buffer {
return &Buffer{buf: e}
}
// Reset resets the Buffer, ready for marshaling a new protocol buffer.
func (p *Buffer) Reset() {
p.buf = p.buf[0:0] // for reading/writing
p.index = 0 // for reading
}
// SetBuf replaces the internal buffer with the slice,
// ready for unmarshaling the contents of the slice.
func (p *Buffer) SetBuf(s []byte) {
p.buf = s
p.index = 0
}
// Bytes returns the contents of the Buffer.
func (p *Buffer) Bytes() []byte { return p.buf }
/*
* Helper routines for simplifying the creation of optional fields of basic type.
*/
// Bool is a helper routine that allocates a new bool value
// to store v and returns a pointer to it.
func Bool(v bool) *bool {
return &v
}
// Int32 is a helper routine that allocates a new int32 value
// to store v and returns a pointer to it.
func Int32(v int32) *int32 {
return &v
}
// Int is a helper routine that allocates a new int32 value
// to store v and returns a pointer to it, but unlike Int32
// its argument value is an int.
func Int(v int) *int32 {
p := new(int32)
*p = int32(v)
return p
}
// Int64 is a helper routine that allocates a new int64 value
// to store v and returns a pointer to it.
func Int64(v int64) *int64 {
return &v
}
// Float32 is a helper routine that allocates a new float32 value
// to store v and returns a pointer to it.
func Float32(v float32) *float32 {
return &v
}
// Float64 is a helper routine that allocates a new float64 value
// to store v and returns a pointer to it.
func Float64(v float64) *float64 {
return &v
}
// Uint32 is a helper routine that allocates a new uint32 value
// to store v and returns a pointer to it.
func Uint32(v uint32) *uint32 {
return &v
}
// Uint64 is a helper routine that allocates a new uint64 value
// to store v and returns a pointer to it.
func Uint64(v uint64) *uint64 {
return &v
}
// String is a helper routine that allocates a new string value
// to store v and returns a pointer to it.
func String(v string) *string {
return &v
}
// EnumName is a helper function to simplify printing protocol buffer enums
// by name. Given an enum map and a value, it returns a useful string.
func EnumName(m map[int32]string, v int32) string {
s, ok := m[v]
if ok {
return s
}
return strconv.Itoa(int(v))
}
// UnmarshalJSONEnum is a helper function to simplify recovering enum int values
// from their JSON-encoded representation. Given a map from the enum's symbolic
// names to its int values, and a byte buffer containing the JSON-encoded
// value, it returns an int32 that can be cast to the enum type by the caller.
//
// The function can deal with both JSON representations, numeric and symbolic.
func UnmarshalJSONEnum(m map[string]int32, data []byte, enumName string) (int32, error) {
if data[0] == '"' {
// New style: enums are strings.
var repr string
if err := json.Unmarshal(data, &repr); err != nil {
return -1, err
}
val, ok := m[repr]
if !ok {
return 0, fmt.Errorf("unrecognized enum %s value %q", enumName, repr)
}
return val, nil
}
// Old style: enums are ints.
var val int32
if err := json.Unmarshal(data, &val); err != nil {
return 0, fmt.Errorf("cannot unmarshal %#q into enum %s", data, enumName)
}
return val, nil
}
// DebugPrint dumps the encoded data in b in a debugging format with a header
// including the string s. Used in testing but made available for general debugging.
func (p *Buffer) DebugPrint(s string, b []byte) {
var u uint64
obuf := p.buf
index := p.index
p.buf = b
p.index = 0
depth := 0
fmt.Printf("\n--- %s ---\n", s)
out:
for {
for i := 0; i < depth; i++ {
fmt.Print(" ")
}
index := p.index
if index == len(p.buf) {
break
}
op, err := p.DecodeVarint()
if err != nil {
fmt.Printf("%3d: fetching op err %v\n", index, err)
break out
}
tag := op >> 3
wire := op & 7
switch wire {
default:
fmt.Printf("%3d: t=%3d unknown wire=%d\n",
index, tag, wire)
break out
case WireBytes:
var r []byte
r, err = p.DecodeRawBytes(false)
if err != nil {
break out
}
fmt.Printf("%3d: t=%3d bytes [%d]", index, tag, len(r))
if len(r) <= 6 {
for i := 0; i < len(r); i++ {
fmt.Printf(" %.2x", r[i])
}
} else {
for i := 0; i < 3; i++ {
fmt.Printf(" %.2x", r[i])
}
fmt.Printf(" ..")
for i := len(r) - 3; i < len(r); i++ {
fmt.Printf(" %.2x", r[i])
}
}
fmt.Printf("\n")
case WireFixed32:
u, err = p.DecodeFixed32()
if err != nil {
fmt.Printf("%3d: t=%3d fix32 err %v\n", index, tag, err)
break out
}
fmt.Printf("%3d: t=%3d fix32 %d\n", index, tag, u)
case WireFixed64:
u, err = p.DecodeFixed64()
if err != nil {
fmt.Printf("%3d: t=%3d fix64 err %v\n", index, tag, err)
break out
}
fmt.Printf("%3d: t=%3d fix64 %d\n", index, tag, u)
case WireVarint:
u, err = p.DecodeVarint()
if err != nil {
fmt.Printf("%3d: t=%3d varint err %v\n", index, tag, err)
break out
}
fmt.Printf("%3d: t=%3d varint %d\n", index, tag, u)
case WireStartGroup:
fmt.Printf("%3d: t=%3d start\n", index, tag)
depth++
case WireEndGroup:
depth--
fmt.Printf("%3d: t=%3d end\n", index, tag)
}
}
if depth != 0 {
fmt.Printf("%3d: start-end not balanced %d\n", p.index, depth)
}
fmt.Printf("\n")
p.buf = obuf
p.index = index
}
// SetDefaults sets unset protocol buffer fields to their default values.
// It only modifies fields that are both unset and have defined defaults.
// It recursively sets default values in any non-nil sub-messages.
func SetDefaults(pb Message) {
setDefaults(reflect.ValueOf(pb), true, false)
}
// v is a pointer to a struct.
func setDefaults(v reflect.Value, recur, zeros bool) {
v = v.Elem()
defaultMu.RLock()
dm, ok := defaults[v.Type()]
defaultMu.RUnlock()
if !ok {
dm = buildDefaultMessage(v.Type())
defaultMu.Lock()
defaults[v.Type()] = dm
defaultMu.Unlock()
}
for _, sf := range dm.scalars {
f := v.Field(sf.index)
if !f.IsNil() {
// field already set
continue
}
dv := sf.value
if dv == nil && !zeros {
// no explicit default, and don't want to set zeros
continue
}
fptr := f.Addr().Interface() // **T
// TODO: Consider batching the allocations we do here.
switch sf.kind {
case reflect.Bool:
b := new(bool)
if dv != nil {
*b = dv.(bool)
}
*(fptr.(**bool)) = b
case reflect.Float32:
f := new(float32)
if dv != nil {
*f = dv.(float32)
}
*(fptr.(**float32)) = f
case reflect.Float64:
f := new(float64)
if dv != nil {
*f = dv.(float64)
}
*(fptr.(**float64)) = f
case reflect.Int32:
// might be an enum
if ft := f.Type(); ft != int32PtrType {
// enum
f.Set(reflect.New(ft.Elem()))
if dv != nil {
f.Elem().SetInt(int64(dv.(int32)))
}
} else {
// int32 field
i := new(int32)
if dv != nil {
*i = dv.(int32)
}
*(fptr.(**int32)) = i
}
case reflect.Int64:
i := new(int64)
if dv != nil {
*i = dv.(int64)
}
*(fptr.(**int64)) = i
case reflect.String:
s := new(string)
if dv != nil {
*s = dv.(string)
}
*(fptr.(**string)) = s
case reflect.Uint8:
// exceptional case: []byte
var b []byte
if dv != nil {
db := dv.([]byte)
b = make([]byte, len(db))
copy(b, db)
} else {
b = []byte{}
}
*(fptr.(*[]byte)) = b
case reflect.Uint32:
u := new(uint32)
if dv != nil {
*u = dv.(uint32)
}
*(fptr.(**uint32)) = u
case reflect.Uint64:
u := new(uint64)
if dv != nil {
*u = dv.(uint64)
}
*(fptr.(**uint64)) = u
default:
log.Printf("proto: can't set default for field %v (sf.kind=%v)", f, sf.kind)
}
}
for _, ni := range dm.nested {
f := v.Field(ni)
// f is *T or []*T or map[T]*T
switch f.Kind() {
case reflect.Ptr:
if f.IsNil() {
continue
}
setDefaults(f, recur, zeros)
case reflect.Slice:
for i := 0; i < f.Len(); i++ {
e := f.Index(i)
if e.IsNil() {
continue
}
setDefaults(e, recur, zeros)
}
case reflect.Map:
for _, k := range f.MapKeys() {
e := f.MapIndex(k)
if e.IsNil() {
continue
}
setDefaults(e, recur, zeros)
}
}
}
}
var (
// defaults maps a protocol buffer struct type to a slice of the fields,
// with its scalar fields set to their proto-declared non-zero default values.
defaultMu sync.RWMutex
defaults = make(map[reflect.Type]defaultMessage)
int32PtrType = reflect.TypeOf((*int32)(nil))
)
// defaultMessage represents information about the default values of a message.
type defaultMessage struct {
scalars []scalarField
nested []int // struct field index of nested messages
}
type scalarField struct {
index int // struct field index
kind reflect.Kind // element type (the T in *T or []T)
value interface{} // the proto-declared default value, or nil
}
// t is a struct type.
func buildDefaultMessage(t reflect.Type) (dm defaultMessage) {
sprop := GetProperties(t)
for _, prop := range sprop.Prop {
fi, ok := sprop.decoderTags.get(prop.Tag)
if !ok {
// XXX_unrecognized
continue
}
ft := t.Field(fi).Type
sf, nested, err := fieldDefault(ft, prop)
switch {
case err != nil:
log.Print(err)
case nested:
dm.nested = append(dm.nested, fi)
case sf != nil:
sf.index = fi
dm.scalars = append(dm.scalars, *sf)
}
}
return dm
}
// fieldDefault returns the scalarField for field type ft.
// sf will be nil if the field can not have a default.
// nestedMessage will be true if this is a nested message.
// Note that sf.index is not set on return.
func fieldDefault(ft reflect.Type, prop *Properties) (sf *scalarField, nestedMessage bool, err error) {
var canHaveDefault bool
switch ft.Kind() {
case reflect.Ptr:
if ft.Elem().Kind() == reflect.Struct {
nestedMessage = true
} else {
canHaveDefault = true // proto2 scalar field
}
case reflect.Slice:
switch ft.Elem().Kind() {
case reflect.Ptr:
nestedMessage = true // repeated message
case reflect.Uint8:
canHaveDefault = true // bytes field
}
case reflect.Map:
if ft.Elem().Kind() == reflect.Ptr {
nestedMessage = true // map with message values
}
}
if !canHaveDefault {
if nestedMessage {
return nil, true, nil
}
return nil, false, nil
}
// We now know that ft is a pointer or slice.
sf = &scalarField{kind: ft.Elem().Kind()}
// scalar fields without defaults
if !prop.HasDefault {
return sf, false, nil
}
// a scalar field: either *T or []byte
switch ft.Elem().Kind() {
case reflect.Bool:
x, err := strconv.ParseBool(prop.Default)
if err != nil {
return nil, false, fmt.Errorf("proto: bad default bool %q: %v", prop.Default, err)
}
sf.value = x
case reflect.Float32:
x, err := strconv.ParseFloat(prop.Default, 32)
if err != nil {
return nil, false, fmt.Errorf("proto: bad default float32 %q: %v", prop.Default, err)
}
sf.value = float32(x)
case reflect.Float64:
x, err := strconv.ParseFloat(prop.Default, 64)
if err != nil {
return nil, false, fmt.Errorf("proto: bad default float64 %q: %v", prop.Default, err)
}
sf.value = x
case reflect.Int32:
x, err := strconv.ParseInt(prop.Default, 10, 32)
if err != nil {
return nil, false, fmt.Errorf("proto: bad default int32 %q: %v", prop.Default, err)
}
sf.value = int32(x)
case reflect.Int64:
x, err := strconv.ParseInt(prop.Default, 10, 64)
if err != nil {
return nil, false, fmt.Errorf("proto: bad default int64 %q: %v", prop.Default, err)
}
sf.value = x
case reflect.String:
sf.value = prop.Default
case reflect.Uint8:
// []byte (not *uint8)
sf.value = []byte(prop.Default)
case reflect.Uint32:
x, err := strconv.ParseUint(prop.Default, 10, 32)
if err != nil {
return nil, false, fmt.Errorf("proto: bad default uint32 %q: %v", prop.Default, err)
}
sf.value = uint32(x)
case reflect.Uint64:
x, err := strconv.ParseUint(prop.Default, 10, 64)
if err != nil {
return nil, false, fmt.Errorf("proto: bad default uint64 %q: %v", prop.Default, err)
}
sf.value = x
default:
return nil, false, fmt.Errorf("proto: unhandled def kind %v", ft.Elem().Kind())
}
return sf, false, nil
}
// Map fields may have key types of non-float scalars, strings and enums.
// The easiest way to sort them in some deterministic order is to use fmt.
// If this turns out to be inefficient we can always consider other options,
// such as doing a Schwartzian transform.
func mapKeys(vs []reflect.Value) sort.Interface {
s := mapKeySorter{
vs: vs,
// default Less function: textual comparison
less: func(a, b reflect.Value) bool {
return fmt.Sprint(a.Interface()) < fmt.Sprint(b.Interface())
},
}
// Type specialization per https://developers.google.com/protocol-buffers/docs/proto#maps;
// numeric keys are sorted numerically.
if len(vs) == 0 {
return s
}
switch vs[0].Kind() {
case reflect.Int32, reflect.Int64:
s.less = func(a, b reflect.Value) bool { return a.Int() < b.Int() }
case reflect.Uint32, reflect.Uint64:
s.less = func(a, b reflect.Value) bool { return a.Uint() < b.Uint() }
}
return s
}
type mapKeySorter struct {
vs []reflect.Value
less func(a, b reflect.Value) bool
}
func (s mapKeySorter) Len() int { return len(s.vs) }
func (s mapKeySorter) Swap(i, j int) { s.vs[i], s.vs[j] = s.vs[j], s.vs[i] }
func (s mapKeySorter) Less(i, j int) bool {
return s.less(s.vs[i], s.vs[j])
}
// isProto3Zero reports whether v is a zero proto3 value.
func isProto3Zero(v reflect.Value) bool {
switch v.Kind() {
case reflect.Bool:
return !v.Bool()
case reflect.Int32, reflect.Int64:
return v.Int() == 0
case reflect.Uint32, reflect.Uint64:
return v.Uint() == 0
case reflect.Float32, reflect.Float64:
return v.Float() == 0
case reflect.String:
return v.String() == ""
}
return false
}
// ProtoPackageIsVersion2 is referenced from generated protocol buffer files
// to assert that that code is compatible with this version of the proto package.
const ProtoPackageIsVersion2 = true
// ProtoPackageIsVersion1 is referenced from generated protocol buffer files
// to assert that that code is compatible with this version of the proto package.
const ProtoPackageIsVersion1 = true

View file

@ -1,311 +0,0 @@
// Go support for Protocol Buffers - Google's data interchange format
//
// Copyright 2010 The Go Authors. All rights reserved.
// https://github.com/golang/protobuf
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package proto
/*
* Support for message sets.
*/
import (
"bytes"
"encoding/json"
"errors"
"fmt"
"reflect"
"sort"
)
// errNoMessageTypeID occurs when a protocol buffer does not have a message type ID.
// A message type ID is required for storing a protocol buffer in a message set.
var errNoMessageTypeID = errors.New("proto does not have a message type ID")
// The first two types (_MessageSet_Item and messageSet)
// model what the protocol compiler produces for the following protocol message:
// message MessageSet {
// repeated group Item = 1 {
// required int32 type_id = 2;
// required string message = 3;
// };
// }
// That is the MessageSet wire format. We can't use a proto to generate these
// because that would introduce a circular dependency between it and this package.
type _MessageSet_Item struct {
TypeId *int32 `protobuf:"varint,2,req,name=type_id"`
Message []byte `protobuf:"bytes,3,req,name=message"`
}
type messageSet struct {
Item []*_MessageSet_Item `protobuf:"group,1,rep"`
XXX_unrecognized []byte
// TODO: caching?
}
// Make sure messageSet is a Message.
var _ Message = (*messageSet)(nil)
// messageTypeIder is an interface satisfied by a protocol buffer type
// that may be stored in a MessageSet.
type messageTypeIder interface {
MessageTypeId() int32
}
func (ms *messageSet) find(pb Message) *_MessageSet_Item {
mti, ok := pb.(messageTypeIder)
if !ok {
return nil
}
id := mti.MessageTypeId()
for _, item := range ms.Item {
if *item.TypeId == id {
return item
}
}
return nil
}
func (ms *messageSet) Has(pb Message) bool {
if ms.find(pb) != nil {
return true
}
return false
}
func (ms *messageSet) Unmarshal(pb Message) error {
if item := ms.find(pb); item != nil {
return Unmarshal(item.Message, pb)
}
if _, ok := pb.(messageTypeIder); !ok {
return errNoMessageTypeID
}
return nil // TODO: return error instead?
}
func (ms *messageSet) Marshal(pb Message) error {
msg, err := Marshal(pb)
if err != nil {
return err
}
if item := ms.find(pb); item != nil {
// reuse existing item
item.Message = msg
return nil
}
mti, ok := pb.(messageTypeIder)
if !ok {
return errNoMessageTypeID
}
mtid := mti.MessageTypeId()
ms.Item = append(ms.Item, &_MessageSet_Item{
TypeId: &mtid,
Message: msg,
})
return nil
}
func (ms *messageSet) Reset() { *ms = messageSet{} }
func (ms *messageSet) String() string { return CompactTextString(ms) }
func (*messageSet) ProtoMessage() {}
// Support for the message_set_wire_format message option.
func skipVarint(buf []byte) []byte {
i := 0
for ; buf[i]&0x80 != 0; i++ {
}
return buf[i+1:]
}
// MarshalMessageSet encodes the extension map represented by m in the message set wire format.
// It is called by generated Marshal methods on protocol buffer messages with the message_set_wire_format option.
func MarshalMessageSet(exts interface{}) ([]byte, error) {
var m map[int32]Extension
switch exts := exts.(type) {
case *XXX_InternalExtensions:
if err := encodeExtensions(exts); err != nil {
return nil, err
}
m, _ = exts.extensionsRead()
case map[int32]Extension:
if err := encodeExtensionsMap(exts); err != nil {
return nil, err
}
m = exts
default:
return nil, errors.New("proto: not an extension map")
}
// Sort extension IDs to provide a deterministic encoding.
// See also enc_map in encode.go.
ids := make([]int, 0, len(m))
for id := range m {
ids = append(ids, int(id))
}
sort.Ints(ids)
ms := &messageSet{Item: make([]*_MessageSet_Item, 0, len(m))}
for _, id := range ids {
e := m[int32(id)]
// Remove the wire type and field number varint, as well as the length varint.
msg := skipVarint(skipVarint(e.enc))
ms.Item = append(ms.Item, &_MessageSet_Item{
TypeId: Int32(int32(id)),
Message: msg,
})
}
return Marshal(ms)
}
// UnmarshalMessageSet decodes the extension map encoded in buf in the message set wire format.
// It is called by generated Unmarshal methods on protocol buffer messages with the message_set_wire_format option.
func UnmarshalMessageSet(buf []byte, exts interface{}) error {
var m map[int32]Extension
switch exts := exts.(type) {
case *XXX_InternalExtensions:
m = exts.extensionsWrite()
case map[int32]Extension:
m = exts
default:
return errors.New("proto: not an extension map")
}
ms := new(messageSet)
if err := Unmarshal(buf, ms); err != nil {
return err
}
for _, item := range ms.Item {
id := *item.TypeId
msg := item.Message
// Restore wire type and field number varint, plus length varint.
// Be careful to preserve duplicate items.
b := EncodeVarint(uint64(id)<<3 | WireBytes)
if ext, ok := m[id]; ok {
// Existing data; rip off the tag and length varint
// so we join the new data correctly.
// We can assume that ext.enc is set because we are unmarshaling.
o := ext.enc[len(b):] // skip wire type and field number
_, n := DecodeVarint(o) // calculate length of length varint
o = o[n:] // skip length varint
msg = append(o, msg...) // join old data and new data
}
b = append(b, EncodeVarint(uint64(len(msg)))...)
b = append(b, msg...)
m[id] = Extension{enc: b}
}
return nil
}
// MarshalMessageSetJSON encodes the extension map represented by m in JSON format.
// It is called by generated MarshalJSON methods on protocol buffer messages with the message_set_wire_format option.
func MarshalMessageSetJSON(exts interface{}) ([]byte, error) {
var m map[int32]Extension
switch exts := exts.(type) {
case *XXX_InternalExtensions:
m, _ = exts.extensionsRead()
case map[int32]Extension:
m = exts
default:
return nil, errors.New("proto: not an extension map")
}
var b bytes.Buffer
b.WriteByte('{')
// Process the map in key order for deterministic output.
ids := make([]int32, 0, len(m))
for id := range m {
ids = append(ids, id)
}
sort.Sort(int32Slice(ids)) // int32Slice defined in text.go
for i, id := range ids {
ext := m[id]
if i > 0 {
b.WriteByte(',')
}
msd, ok := messageSetMap[id]
if !ok {
// Unknown type; we can't render it, so skip it.
continue
}
fmt.Fprintf(&b, `"[%s]":`, msd.name)
x := ext.value
if x == nil {
x = reflect.New(msd.t.Elem()).Interface()
if err := Unmarshal(ext.enc, x.(Message)); err != nil {
return nil, err
}
}
d, err := json.Marshal(x)
if err != nil {
return nil, err
}
b.Write(d)
}
b.WriteByte('}')
return b.Bytes(), nil
}
// UnmarshalMessageSetJSON decodes the extension map encoded in buf in JSON format.
// It is called by generated UnmarshalJSON methods on protocol buffer messages with the message_set_wire_format option.
func UnmarshalMessageSetJSON(buf []byte, exts interface{}) error {
// Common-case fast path.
if len(buf) == 0 || bytes.Equal(buf, []byte("{}")) {
return nil
}
// This is fairly tricky, and it's not clear that it is needed.
return errors.New("TODO: UnmarshalMessageSetJSON not yet implemented")
}
// A global registry of types that can be used in a MessageSet.
var messageSetMap = make(map[int32]messageSetDesc)
type messageSetDesc struct {
t reflect.Type // pointer to struct
name string
}
// RegisterMessageSetType is called from the generated code.
func RegisterMessageSetType(m Message, fieldNum int32, name string) {
messageSetMap[fieldNum] = messageSetDesc{
t: reflect.TypeOf(m),
name: name,
}
}

View file

@ -1,484 +0,0 @@
// Go support for Protocol Buffers - Google's data interchange format
//
// Copyright 2012 The Go Authors. All rights reserved.
// https://github.com/golang/protobuf
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// +build appengine js
// This file contains an implementation of proto field accesses using package reflect.
// It is slower than the code in pointer_unsafe.go but it avoids package unsafe and can
// be used on App Engine.
package proto
import (
"math"
"reflect"
)
// A structPointer is a pointer to a struct.
type structPointer struct {
v reflect.Value
}
// toStructPointer returns a structPointer equivalent to the given reflect value.
// The reflect value must itself be a pointer to a struct.
func toStructPointer(v reflect.Value) structPointer {
return structPointer{v}
}
// IsNil reports whether p is nil.
func structPointer_IsNil(p structPointer) bool {
return p.v.IsNil()
}
// Interface returns the struct pointer as an interface value.
func structPointer_Interface(p structPointer, _ reflect.Type) interface{} {
return p.v.Interface()
}
// A field identifies a field in a struct, accessible from a structPointer.
// In this implementation, a field is identified by the sequence of field indices
// passed to reflect's FieldByIndex.
type field []int
// toField returns a field equivalent to the given reflect field.
func toField(f *reflect.StructField) field {
return f.Index
}
// invalidField is an invalid field identifier.
var invalidField = field(nil)
// IsValid reports whether the field identifier is valid.
func (f field) IsValid() bool { return f != nil }
// field returns the given field in the struct as a reflect value.
func structPointer_field(p structPointer, f field) reflect.Value {
// Special case: an extension map entry with a value of type T
// passes a *T to the struct-handling code with a zero field,
// expecting that it will be treated as equivalent to *struct{ X T },
// which has the same memory layout. We have to handle that case
// specially, because reflect will panic if we call FieldByIndex on a
// non-struct.
if f == nil {
return p.v.Elem()
}
return p.v.Elem().FieldByIndex(f)
}
// ifield returns the given field in the struct as an interface value.
func structPointer_ifield(p structPointer, f field) interface{} {
return structPointer_field(p, f).Addr().Interface()
}
// Bytes returns the address of a []byte field in the struct.
func structPointer_Bytes(p structPointer, f field) *[]byte {
return structPointer_ifield(p, f).(*[]byte)
}
// BytesSlice returns the address of a [][]byte field in the struct.
func structPointer_BytesSlice(p structPointer, f field) *[][]byte {
return structPointer_ifield(p, f).(*[][]byte)
}
// Bool returns the address of a *bool field in the struct.
func structPointer_Bool(p structPointer, f field) **bool {
return structPointer_ifield(p, f).(**bool)
}
// BoolVal returns the address of a bool field in the struct.
func structPointer_BoolVal(p structPointer, f field) *bool {
return structPointer_ifield(p, f).(*bool)
}
// BoolSlice returns the address of a []bool field in the struct.
func structPointer_BoolSlice(p structPointer, f field) *[]bool {
return structPointer_ifield(p, f).(*[]bool)
}
// String returns the address of a *string field in the struct.
func structPointer_String(p structPointer, f field) **string {
return structPointer_ifield(p, f).(**string)
}
// StringVal returns the address of a string field in the struct.
func structPointer_StringVal(p structPointer, f field) *string {
return structPointer_ifield(p, f).(*string)
}
// StringSlice returns the address of a []string field in the struct.
func structPointer_StringSlice(p structPointer, f field) *[]string {
return structPointer_ifield(p, f).(*[]string)
}
// Extensions returns the address of an extension map field in the struct.
func structPointer_Extensions(p structPointer, f field) *XXX_InternalExtensions {
return structPointer_ifield(p, f).(*XXX_InternalExtensions)
}
// ExtMap returns the address of an extension map field in the struct.
func structPointer_ExtMap(p structPointer, f field) *map[int32]Extension {
return structPointer_ifield(p, f).(*map[int32]Extension)
}
// NewAt returns the reflect.Value for a pointer to a field in the struct.
func structPointer_NewAt(p structPointer, f field, typ reflect.Type) reflect.Value {
return structPointer_field(p, f).Addr()
}
// SetStructPointer writes a *struct field in the struct.
func structPointer_SetStructPointer(p structPointer, f field, q structPointer) {
structPointer_field(p, f).Set(q.v)
}
// GetStructPointer reads a *struct field in the struct.
func structPointer_GetStructPointer(p structPointer, f field) structPointer {
return structPointer{structPointer_field(p, f)}
}
// StructPointerSlice the address of a []*struct field in the struct.
func structPointer_StructPointerSlice(p structPointer, f field) structPointerSlice {
return structPointerSlice{structPointer_field(p, f)}
}
// A structPointerSlice represents the address of a slice of pointers to structs
// (themselves messages or groups). That is, v.Type() is *[]*struct{...}.
type structPointerSlice struct {
v reflect.Value
}
func (p structPointerSlice) Len() int { return p.v.Len() }
func (p structPointerSlice) Index(i int) structPointer { return structPointer{p.v.Index(i)} }
func (p structPointerSlice) Append(q structPointer) {
p.v.Set(reflect.Append(p.v, q.v))
}
var (
int32Type = reflect.TypeOf(int32(0))
uint32Type = reflect.TypeOf(uint32(0))
float32Type = reflect.TypeOf(float32(0))
int64Type = reflect.TypeOf(int64(0))
uint64Type = reflect.TypeOf(uint64(0))
float64Type = reflect.TypeOf(float64(0))
)
// A word32 represents a field of type *int32, *uint32, *float32, or *enum.
// That is, v.Type() is *int32, *uint32, *float32, or *enum and v is assignable.
type word32 struct {
v reflect.Value
}
// IsNil reports whether p is nil.
func word32_IsNil(p word32) bool {
return p.v.IsNil()
}
// Set sets p to point at a newly allocated word with bits set to x.
func word32_Set(p word32, o *Buffer, x uint32) {
t := p.v.Type().Elem()
switch t {
case int32Type:
if len(o.int32s) == 0 {
o.int32s = make([]int32, uint32PoolSize)
}
o.int32s[0] = int32(x)
p.v.Set(reflect.ValueOf(&o.int32s[0]))
o.int32s = o.int32s[1:]
return
case uint32Type:
if len(o.uint32s) == 0 {
o.uint32s = make([]uint32, uint32PoolSize)
}
o.uint32s[0] = x
p.v.Set(reflect.ValueOf(&o.uint32s[0]))
o.uint32s = o.uint32s[1:]
return
case float32Type:
if len(o.float32s) == 0 {
o.float32s = make([]float32, uint32PoolSize)
}
o.float32s[0] = math.Float32frombits(x)
p.v.Set(reflect.ValueOf(&o.float32s[0]))
o.float32s = o.float32s[1:]
return
}
// must be enum
p.v.Set(reflect.New(t))
p.v.Elem().SetInt(int64(int32(x)))
}
// Get gets the bits pointed at by p, as a uint32.
func word32_Get(p word32) uint32 {
elem := p.v.Elem()
switch elem.Kind() {
case reflect.Int32:
return uint32(elem.Int())
case reflect.Uint32:
return uint32(elem.Uint())
case reflect.Float32:
return math.Float32bits(float32(elem.Float()))
}
panic("unreachable")
}
// Word32 returns a reference to a *int32, *uint32, *float32, or *enum field in the struct.
func structPointer_Word32(p structPointer, f field) word32 {
return word32{structPointer_field(p, f)}
}
// A word32Val represents a field of type int32, uint32, float32, or enum.
// That is, v.Type() is int32, uint32, float32, or enum and v is assignable.
type word32Val struct {
v reflect.Value
}
// Set sets *p to x.
func word32Val_Set(p word32Val, x uint32) {
switch p.v.Type() {
case int32Type:
p.v.SetInt(int64(x))
return
case uint32Type:
p.v.SetUint(uint64(x))
return
case float32Type:
p.v.SetFloat(float64(math.Float32frombits(x)))
return
}
// must be enum
p.v.SetInt(int64(int32(x)))
}
// Get gets the bits pointed at by p, as a uint32.
func word32Val_Get(p word32Val) uint32 {
elem := p.v
switch elem.Kind() {
case reflect.Int32:
return uint32(elem.Int())
case reflect.Uint32:
return uint32(elem.Uint())
case reflect.Float32:
return math.Float32bits(float32(elem.Float()))
}
panic("unreachable")
}
// Word32Val returns a reference to a int32, uint32, float32, or enum field in the struct.
func structPointer_Word32Val(p structPointer, f field) word32Val {
return word32Val{structPointer_field(p, f)}
}
// A word32Slice is a slice of 32-bit values.
// That is, v.Type() is []int32, []uint32, []float32, or []enum.
type word32Slice struct {
v reflect.Value
}
func (p word32Slice) Append(x uint32) {
n, m := p.v.Len(), p.v.Cap()
if n < m {
p.v.SetLen(n + 1)
} else {
t := p.v.Type().Elem()
p.v.Set(reflect.Append(p.v, reflect.Zero(t)))
}
elem := p.v.Index(n)
switch elem.Kind() {
case reflect.Int32:
elem.SetInt(int64(int32(x)))
case reflect.Uint32:
elem.SetUint(uint64(x))
case reflect.Float32:
elem.SetFloat(float64(math.Float32frombits(x)))
}
}
func (p word32Slice) Len() int {
return p.v.Len()
}
func (p word32Slice) Index(i int) uint32 {
elem := p.v.Index(i)
switch elem.Kind() {
case reflect.Int32:
return uint32(elem.Int())
case reflect.Uint32:
return uint32(elem.Uint())
case reflect.Float32:
return math.Float32bits(float32(elem.Float()))
}
panic("unreachable")
}
// Word32Slice returns a reference to a []int32, []uint32, []float32, or []enum field in the struct.
func structPointer_Word32Slice(p structPointer, f field) word32Slice {
return word32Slice{structPointer_field(p, f)}
}
// word64 is like word32 but for 64-bit values.
type word64 struct {
v reflect.Value
}
func word64_Set(p word64, o *Buffer, x uint64) {
t := p.v.Type().Elem()
switch t {
case int64Type:
if len(o.int64s) == 0 {
o.int64s = make([]int64, uint64PoolSize)
}
o.int64s[0] = int64(x)
p.v.Set(reflect.ValueOf(&o.int64s[0]))
o.int64s = o.int64s[1:]
return
case uint64Type:
if len(o.uint64s) == 0 {
o.uint64s = make([]uint64, uint64PoolSize)
}
o.uint64s[0] = x
p.v.Set(reflect.ValueOf(&o.uint64s[0]))
o.uint64s = o.uint64s[1:]
return
case float64Type:
if len(o.float64s) == 0 {
o.float64s = make([]float64, uint64PoolSize)
}
o.float64s[0] = math.Float64frombits(x)
p.v.Set(reflect.ValueOf(&o.float64s[0]))
o.float64s = o.float64s[1:]
return
}
panic("unreachable")
}
func word64_IsNil(p word64) bool {
return p.v.IsNil()
}
func word64_Get(p word64) uint64 {
elem := p.v.Elem()
switch elem.Kind() {
case reflect.Int64:
return uint64(elem.Int())
case reflect.Uint64:
return elem.Uint()
case reflect.Float64:
return math.Float64bits(elem.Float())
}
panic("unreachable")
}
func structPointer_Word64(p structPointer, f field) word64 {
return word64{structPointer_field(p, f)}
}
// word64Val is like word32Val but for 64-bit values.
type word64Val struct {
v reflect.Value
}
func word64Val_Set(p word64Val, o *Buffer, x uint64) {
switch p.v.Type() {
case int64Type:
p.v.SetInt(int64(x))
return
case uint64Type:
p.v.SetUint(x)
return
case float64Type:
p.v.SetFloat(math.Float64frombits(x))
return
}
panic("unreachable")
}
func word64Val_Get(p word64Val) uint64 {
elem := p.v
switch elem.Kind() {
case reflect.Int64:
return uint64(elem.Int())
case reflect.Uint64:
return elem.Uint()
case reflect.Float64:
return math.Float64bits(elem.Float())
}
panic("unreachable")
}
func structPointer_Word64Val(p structPointer, f field) word64Val {
return word64Val{structPointer_field(p, f)}
}
type word64Slice struct {
v reflect.Value
}
func (p word64Slice) Append(x uint64) {
n, m := p.v.Len(), p.v.Cap()
if n < m {
p.v.SetLen(n + 1)
} else {
t := p.v.Type().Elem()
p.v.Set(reflect.Append(p.v, reflect.Zero(t)))
}
elem := p.v.Index(n)
switch elem.Kind() {
case reflect.Int64:
elem.SetInt(int64(int64(x)))
case reflect.Uint64:
elem.SetUint(uint64(x))
case reflect.Float64:
elem.SetFloat(float64(math.Float64frombits(x)))
}
}
func (p word64Slice) Len() int {
return p.v.Len()
}
func (p word64Slice) Index(i int) uint64 {
elem := p.v.Index(i)
switch elem.Kind() {
case reflect.Int64:
return uint64(elem.Int())
case reflect.Uint64:
return uint64(elem.Uint())
case reflect.Float64:
return math.Float64bits(float64(elem.Float()))
}
panic("unreachable")
}
func structPointer_Word64Slice(p structPointer, f field) word64Slice {
return word64Slice{structPointer_field(p, f)}
}

View file

@ -1,270 +0,0 @@
// Go support for Protocol Buffers - Google's data interchange format
//
// Copyright 2012 The Go Authors. All rights reserved.
// https://github.com/golang/protobuf
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// +build !appengine,!js
// This file contains the implementation of the proto field accesses using package unsafe.
package proto
import (
"reflect"
"unsafe"
)
// NOTE: These type_Foo functions would more idiomatically be methods,
// but Go does not allow methods on pointer types, and we must preserve
// some pointer type for the garbage collector. We use these
// funcs with clunky names as our poor approximation to methods.
//
// An alternative would be
// type structPointer struct { p unsafe.Pointer }
// but that does not registerize as well.
// A structPointer is a pointer to a struct.
type structPointer unsafe.Pointer
// toStructPointer returns a structPointer equivalent to the given reflect value.
func toStructPointer(v reflect.Value) structPointer {
return structPointer(unsafe.Pointer(v.Pointer()))
}
// IsNil reports whether p is nil.
func structPointer_IsNil(p structPointer) bool {
return p == nil
}
// Interface returns the struct pointer, assumed to have element type t,
// as an interface value.
func structPointer_Interface(p structPointer, t reflect.Type) interface{} {
return reflect.NewAt(t, unsafe.Pointer(p)).Interface()
}
// A field identifies a field in a struct, accessible from a structPointer.
// In this implementation, a field is identified by its byte offset from the start of the struct.
type field uintptr
// toField returns a field equivalent to the given reflect field.
func toField(f *reflect.StructField) field {
return field(f.Offset)
}
// invalidField is an invalid field identifier.
const invalidField = ^field(0)
// IsValid reports whether the field identifier is valid.
func (f field) IsValid() bool {
return f != ^field(0)
}
// Bytes returns the address of a []byte field in the struct.
func structPointer_Bytes(p structPointer, f field) *[]byte {
return (*[]byte)(unsafe.Pointer(uintptr(p) + uintptr(f)))
}
// BytesSlice returns the address of a [][]byte field in the struct.
func structPointer_BytesSlice(p structPointer, f field) *[][]byte {
return (*[][]byte)(unsafe.Pointer(uintptr(p) + uintptr(f)))
}
// Bool returns the address of a *bool field in the struct.
func structPointer_Bool(p structPointer, f field) **bool {
return (**bool)(unsafe.Pointer(uintptr(p) + uintptr(f)))
}
// BoolVal returns the address of a bool field in the struct.
func structPointer_BoolVal(p structPointer, f field) *bool {
return (*bool)(unsafe.Pointer(uintptr(p) + uintptr(f)))
}
// BoolSlice returns the address of a []bool field in the struct.
func structPointer_BoolSlice(p structPointer, f field) *[]bool {
return (*[]bool)(unsafe.Pointer(uintptr(p) + uintptr(f)))
}
// String returns the address of a *string field in the struct.
func structPointer_String(p structPointer, f field) **string {
return (**string)(unsafe.Pointer(uintptr(p) + uintptr(f)))
}
// StringVal returns the address of a string field in the struct.
func structPointer_StringVal(p structPointer, f field) *string {
return (*string)(unsafe.Pointer(uintptr(p) + uintptr(f)))
}
// StringSlice returns the address of a []string field in the struct.
func structPointer_StringSlice(p structPointer, f field) *[]string {
return (*[]string)(unsafe.Pointer(uintptr(p) + uintptr(f)))
}
// ExtMap returns the address of an extension map field in the struct.
func structPointer_Extensions(p structPointer, f field) *XXX_InternalExtensions {
return (*XXX_InternalExtensions)(unsafe.Pointer(uintptr(p) + uintptr(f)))
}
func structPointer_ExtMap(p structPointer, f field) *map[int32]Extension {
return (*map[int32]Extension)(unsafe.Pointer(uintptr(p) + uintptr(f)))
}
// NewAt returns the reflect.Value for a pointer to a field in the struct.
func structPointer_NewAt(p structPointer, f field, typ reflect.Type) reflect.Value {
return reflect.NewAt(typ, unsafe.Pointer(uintptr(p)+uintptr(f)))
}
// SetStructPointer writes a *struct field in the struct.
func structPointer_SetStructPointer(p structPointer, f field, q structPointer) {
*(*structPointer)(unsafe.Pointer(uintptr(p) + uintptr(f))) = q
}
// GetStructPointer reads a *struct field in the struct.
func structPointer_GetStructPointer(p structPointer, f field) structPointer {
return *(*structPointer)(unsafe.Pointer(uintptr(p) + uintptr(f)))
}
// StructPointerSlice the address of a []*struct field in the struct.
func structPointer_StructPointerSlice(p structPointer, f field) *structPointerSlice {
return (*structPointerSlice)(unsafe.Pointer(uintptr(p) + uintptr(f)))
}
// A structPointerSlice represents a slice of pointers to structs (themselves submessages or groups).
type structPointerSlice []structPointer
func (v *structPointerSlice) Len() int { return len(*v) }
func (v *structPointerSlice) Index(i int) structPointer { return (*v)[i] }
func (v *structPointerSlice) Append(p structPointer) { *v = append(*v, p) }
// A word32 is the address of a "pointer to 32-bit value" field.
type word32 **uint32
// IsNil reports whether *v is nil.
func word32_IsNil(p word32) bool {
return *p == nil
}
// Set sets *v to point at a newly allocated word set to x.
func word32_Set(p word32, o *Buffer, x uint32) {
if len(o.uint32s) == 0 {
o.uint32s = make([]uint32, uint32PoolSize)
}
o.uint32s[0] = x
*p = &o.uint32s[0]
o.uint32s = o.uint32s[1:]
}
// Get gets the value pointed at by *v.
func word32_Get(p word32) uint32 {
return **p
}
// Word32 returns the address of a *int32, *uint32, *float32, or *enum field in the struct.
func structPointer_Word32(p structPointer, f field) word32 {
return word32((**uint32)(unsafe.Pointer(uintptr(p) + uintptr(f))))
}
// A word32Val is the address of a 32-bit value field.
type word32Val *uint32
// Set sets *p to x.
func word32Val_Set(p word32Val, x uint32) {
*p = x
}
// Get gets the value pointed at by p.
func word32Val_Get(p word32Val) uint32 {
return *p
}
// Word32Val returns the address of a *int32, *uint32, *float32, or *enum field in the struct.
func structPointer_Word32Val(p structPointer, f field) word32Val {
return word32Val((*uint32)(unsafe.Pointer(uintptr(p) + uintptr(f))))
}
// A word32Slice is a slice of 32-bit values.
type word32Slice []uint32
func (v *word32Slice) Append(x uint32) { *v = append(*v, x) }
func (v *word32Slice) Len() int { return len(*v) }
func (v *word32Slice) Index(i int) uint32 { return (*v)[i] }
// Word32Slice returns the address of a []int32, []uint32, []float32, or []enum field in the struct.
func structPointer_Word32Slice(p structPointer, f field) *word32Slice {
return (*word32Slice)(unsafe.Pointer(uintptr(p) + uintptr(f)))
}
// word64 is like word32 but for 64-bit values.
type word64 **uint64
func word64_Set(p word64, o *Buffer, x uint64) {
if len(o.uint64s) == 0 {
o.uint64s = make([]uint64, uint64PoolSize)
}
o.uint64s[0] = x
*p = &o.uint64s[0]
o.uint64s = o.uint64s[1:]
}
func word64_IsNil(p word64) bool {
return *p == nil
}
func word64_Get(p word64) uint64 {
return **p
}
func structPointer_Word64(p structPointer, f field) word64 {
return word64((**uint64)(unsafe.Pointer(uintptr(p) + uintptr(f))))
}
// word64Val is like word32Val but for 64-bit values.
type word64Val *uint64
func word64Val_Set(p word64Val, o *Buffer, x uint64) {
*p = x
}
func word64Val_Get(p word64Val) uint64 {
return *p
}
func structPointer_Word64Val(p structPointer, f field) word64Val {
return word64Val((*uint64)(unsafe.Pointer(uintptr(p) + uintptr(f))))
}
// word64Slice is like word32Slice but for 64-bit values.
type word64Slice []uint64
func (v *word64Slice) Append(x uint64) { *v = append(*v, x) }
func (v *word64Slice) Len() int { return len(*v) }
func (v *word64Slice) Index(i int) uint64 { return (*v)[i] }
func structPointer_Word64Slice(p structPointer, f field) *word64Slice {
return (*word64Slice)(unsafe.Pointer(uintptr(p) + uintptr(f)))
}

View file

@ -1,872 +0,0 @@
// Go support for Protocol Buffers - Google's data interchange format
//
// Copyright 2010 The Go Authors. All rights reserved.
// https://github.com/golang/protobuf
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package proto
/*
* Routines for encoding data into the wire format for protocol buffers.
*/
import (
"fmt"
"log"
"os"
"reflect"
"sort"
"strconv"
"strings"
"sync"
)
const debug bool = false
// Constants that identify the encoding of a value on the wire.
const (
WireVarint = 0
WireFixed64 = 1
WireBytes = 2
WireStartGroup = 3
WireEndGroup = 4
WireFixed32 = 5
)
const startSize = 10 // initial slice/string sizes
// Encoders are defined in encode.go
// An encoder outputs the full representation of a field, including its
// tag and encoder type.
type encoder func(p *Buffer, prop *Properties, base structPointer) error
// A valueEncoder encodes a single integer in a particular encoding.
type valueEncoder func(o *Buffer, x uint64) error
// Sizers are defined in encode.go
// A sizer returns the encoded size of a field, including its tag and encoder
// type.
type sizer func(prop *Properties, base structPointer) int
// A valueSizer returns the encoded size of a single integer in a particular
// encoding.
type valueSizer func(x uint64) int
// Decoders are defined in decode.go
// A decoder creates a value from its wire representation.
// Unrecognized subelements are saved in unrec.
type decoder func(p *Buffer, prop *Properties, base structPointer) error
// A valueDecoder decodes a single integer in a particular encoding.
type valueDecoder func(o *Buffer) (x uint64, err error)
// A oneofMarshaler does the marshaling for all oneof fields in a message.
type oneofMarshaler func(Message, *Buffer) error
// A oneofUnmarshaler does the unmarshaling for a oneof field in a message.
type oneofUnmarshaler func(Message, int, int, *Buffer) (bool, error)
// A oneofSizer does the sizing for all oneof fields in a message.
type oneofSizer func(Message) int
// tagMap is an optimization over map[int]int for typical protocol buffer
// use-cases. Encoded protocol buffers are often in tag order with small tag
// numbers.
type tagMap struct {
fastTags []int
slowTags map[int]int
}
// tagMapFastLimit is the upper bound on the tag number that will be stored in
// the tagMap slice rather than its map.
const tagMapFastLimit = 1024
func (p *tagMap) get(t int) (int, bool) {
if t > 0 && t < tagMapFastLimit {
if t >= len(p.fastTags) {
return 0, false
}
fi := p.fastTags[t]
return fi, fi >= 0
}
fi, ok := p.slowTags[t]
return fi, ok
}
func (p *tagMap) put(t int, fi int) {
if t > 0 && t < tagMapFastLimit {
for len(p.fastTags) < t+1 {
p.fastTags = append(p.fastTags, -1)
}
p.fastTags[t] = fi
return
}
if p.slowTags == nil {
p.slowTags = make(map[int]int)
}
p.slowTags[t] = fi
}
// StructProperties represents properties for all the fields of a struct.
// decoderTags and decoderOrigNames should only be used by the decoder.
type StructProperties struct {
Prop []*Properties // properties for each field
reqCount int // required count
decoderTags tagMap // map from proto tag to struct field number
decoderOrigNames map[string]int // map from original name to struct field number
order []int // list of struct field numbers in tag order
unrecField field // field id of the XXX_unrecognized []byte field
extendable bool // is this an extendable proto
oneofMarshaler oneofMarshaler
oneofUnmarshaler oneofUnmarshaler
oneofSizer oneofSizer
stype reflect.Type
// OneofTypes contains information about the oneof fields in this message.
// It is keyed by the original name of a field.
OneofTypes map[string]*OneofProperties
}
// OneofProperties represents information about a specific field in a oneof.
type OneofProperties struct {
Type reflect.Type // pointer to generated struct type for this oneof field
Field int // struct field number of the containing oneof in the message
Prop *Properties
}
// Implement the sorting interface so we can sort the fields in tag order, as recommended by the spec.
// See encode.go, (*Buffer).enc_struct.
func (sp *StructProperties) Len() int { return len(sp.order) }
func (sp *StructProperties) Less(i, j int) bool {
return sp.Prop[sp.order[i]].Tag < sp.Prop[sp.order[j]].Tag
}
func (sp *StructProperties) Swap(i, j int) { sp.order[i], sp.order[j] = sp.order[j], sp.order[i] }
// Properties represents the protocol-specific behavior of a single struct field.
type Properties struct {
Name string // name of the field, for error messages
OrigName string // original name before protocol compiler (always set)
JSONName string // name to use for JSON; determined by protoc
Wire string
WireType int
Tag int
Required bool
Optional bool
Repeated bool
Packed bool // relevant for repeated primitives only
Enum string // set for enum types only
proto3 bool // whether this is known to be a proto3 field; set for []byte only
oneof bool // whether this is a oneof field
Default string // default value
HasDefault bool // whether an explicit default was provided
def_uint64 uint64
enc encoder
valEnc valueEncoder // set for bool and numeric types only
field field
tagcode []byte // encoding of EncodeVarint((Tag<<3)|WireType)
tagbuf [8]byte
stype reflect.Type // set for struct types only
sprop *StructProperties // set for struct types only
isMarshaler bool
isUnmarshaler bool
mtype reflect.Type // set for map types only
mkeyprop *Properties // set for map types only
mvalprop *Properties // set for map types only
size sizer
valSize valueSizer // set for bool and numeric types only
dec decoder
valDec valueDecoder // set for bool and numeric types only
// If this is a packable field, this will be the decoder for the packed version of the field.
packedDec decoder
}
// String formats the properties in the protobuf struct field tag style.
func (p *Properties) String() string {
s := p.Wire
s = ","
s += strconv.Itoa(p.Tag)
if p.Required {
s += ",req"
}
if p.Optional {
s += ",opt"
}
if p.Repeated {
s += ",rep"
}
if p.Packed {
s += ",packed"
}
s += ",name=" + p.OrigName
if p.JSONName != p.OrigName {
s += ",json=" + p.JSONName
}
if p.proto3 {
s += ",proto3"
}
if p.oneof {
s += ",oneof"
}
if len(p.Enum) > 0 {
s += ",enum=" + p.Enum
}
if p.HasDefault {
s += ",def=" + p.Default
}
return s
}
// Parse populates p by parsing a string in the protobuf struct field tag style.
func (p *Properties) Parse(s string) {
// "bytes,49,opt,name=foo,def=hello!"
fields := strings.Split(s, ",") // breaks def=, but handled below.
if len(fields) < 2 {
fmt.Fprintf(os.Stderr, "proto: tag has too few fields: %q\n", s)
return
}
p.Wire = fields[0]
switch p.Wire {
case "varint":
p.WireType = WireVarint
p.valEnc = (*Buffer).EncodeVarint
p.valDec = (*Buffer).DecodeVarint
p.valSize = sizeVarint
case "fixed32":
p.WireType = WireFixed32
p.valEnc = (*Buffer).EncodeFixed32
p.valDec = (*Buffer).DecodeFixed32
p.valSize = sizeFixed32
case "fixed64":
p.WireType = WireFixed64
p.valEnc = (*Buffer).EncodeFixed64
p.valDec = (*Buffer).DecodeFixed64
p.valSize = sizeFixed64
case "zigzag32":
p.WireType = WireVarint
p.valEnc = (*Buffer).EncodeZigzag32
p.valDec = (*Buffer).DecodeZigzag32
p.valSize = sizeZigzag32
case "zigzag64":
p.WireType = WireVarint
p.valEnc = (*Buffer).EncodeZigzag64
p.valDec = (*Buffer).DecodeZigzag64
p.valSize = sizeZigzag64
case "bytes", "group":
p.WireType = WireBytes
// no numeric converter for non-numeric types
default:
fmt.Fprintf(os.Stderr, "proto: tag has unknown wire type: %q\n", s)
return
}
var err error
p.Tag, err = strconv.Atoi(fields[1])
if err != nil {
return
}
for i := 2; i < len(fields); i++ {
f := fields[i]
switch {
case f == "req":
p.Required = true
case f == "opt":
p.Optional = true
case f == "rep":
p.Repeated = true
case f == "packed":
p.Packed = true
case strings.HasPrefix(f, "name="):
p.OrigName = f[5:]
case strings.HasPrefix(f, "json="):
p.JSONName = f[5:]
case strings.HasPrefix(f, "enum="):
p.Enum = f[5:]
case f == "proto3":
p.proto3 = true
case f == "oneof":
p.oneof = true
case strings.HasPrefix(f, "def="):
p.HasDefault = true
p.Default = f[4:] // rest of string
if i+1 < len(fields) {
// Commas aren't escaped, and def is always last.
p.Default += "," + strings.Join(fields[i+1:], ",")
break
}
}
}
}
func logNoSliceEnc(t1, t2 reflect.Type) {
fmt.Fprintf(os.Stderr, "proto: no slice oenc for %T = []%T\n", t1, t2)
}
var protoMessageType = reflect.TypeOf((*Message)(nil)).Elem()
// Initialize the fields for encoding and decoding.
func (p *Properties) setEncAndDec(typ reflect.Type, f *reflect.StructField, lockGetProp bool) {
p.enc = nil
p.dec = nil
p.size = nil
switch t1 := typ; t1.Kind() {
default:
fmt.Fprintf(os.Stderr, "proto: no coders for %v\n", t1)
// proto3 scalar types
case reflect.Bool:
p.enc = (*Buffer).enc_proto3_bool
p.dec = (*Buffer).dec_proto3_bool
p.size = size_proto3_bool
case reflect.Int32:
p.enc = (*Buffer).enc_proto3_int32
p.dec = (*Buffer).dec_proto3_int32
p.size = size_proto3_int32
case reflect.Uint32:
p.enc = (*Buffer).enc_proto3_uint32
p.dec = (*Buffer).dec_proto3_int32 // can reuse
p.size = size_proto3_uint32
case reflect.Int64, reflect.Uint64:
p.enc = (*Buffer).enc_proto3_int64
p.dec = (*Buffer).dec_proto3_int64
p.size = size_proto3_int64
case reflect.Float32:
p.enc = (*Buffer).enc_proto3_uint32 // can just treat them as bits
p.dec = (*Buffer).dec_proto3_int32
p.size = size_proto3_uint32
case reflect.Float64:
p.enc = (*Buffer).enc_proto3_int64 // can just treat them as bits
p.dec = (*Buffer).dec_proto3_int64
p.size = size_proto3_int64
case reflect.String:
p.enc = (*Buffer).enc_proto3_string
p.dec = (*Buffer).dec_proto3_string
p.size = size_proto3_string
case reflect.Ptr:
switch t2 := t1.Elem(); t2.Kind() {
default:
fmt.Fprintf(os.Stderr, "proto: no encoder function for %v -> %v\n", t1, t2)
break
case reflect.Bool:
p.enc = (*Buffer).enc_bool
p.dec = (*Buffer).dec_bool
p.size = size_bool
case reflect.Int32:
p.enc = (*Buffer).enc_int32
p.dec = (*Buffer).dec_int32
p.size = size_int32
case reflect.Uint32:
p.enc = (*Buffer).enc_uint32
p.dec = (*Buffer).dec_int32 // can reuse
p.size = size_uint32
case reflect.Int64, reflect.Uint64:
p.enc = (*Buffer).enc_int64
p.dec = (*Buffer).dec_int64
p.size = size_int64
case reflect.Float32:
p.enc = (*Buffer).enc_uint32 // can just treat them as bits
p.dec = (*Buffer).dec_int32
p.size = size_uint32
case reflect.Float64:
p.enc = (*Buffer).enc_int64 // can just treat them as bits
p.dec = (*Buffer).dec_int64
p.size = size_int64
case reflect.String:
p.enc = (*Buffer).enc_string
p.dec = (*Buffer).dec_string
p.size = size_string
case reflect.Struct:
p.stype = t1.Elem()
p.isMarshaler = isMarshaler(t1)
p.isUnmarshaler = isUnmarshaler(t1)
if p.Wire == "bytes" {
p.enc = (*Buffer).enc_struct_message
p.dec = (*Buffer).dec_struct_message
p.size = size_struct_message
} else {
p.enc = (*Buffer).enc_struct_group
p.dec = (*Buffer).dec_struct_group
p.size = size_struct_group
}
}
case reflect.Slice:
switch t2 := t1.Elem(); t2.Kind() {
default:
logNoSliceEnc(t1, t2)
break
case reflect.Bool:
if p.Packed {
p.enc = (*Buffer).enc_slice_packed_bool
p.size = size_slice_packed_bool
} else {
p.enc = (*Buffer).enc_slice_bool
p.size = size_slice_bool
}
p.dec = (*Buffer).dec_slice_bool
p.packedDec = (*Buffer).dec_slice_packed_bool
case reflect.Int32:
if p.Packed {
p.enc = (*Buffer).enc_slice_packed_int32
p.size = size_slice_packed_int32
} else {
p.enc = (*Buffer).enc_slice_int32
p.size = size_slice_int32
}
p.dec = (*Buffer).dec_slice_int32
p.packedDec = (*Buffer).dec_slice_packed_int32
case reflect.Uint32:
if p.Packed {
p.enc = (*Buffer).enc_slice_packed_uint32
p.size = size_slice_packed_uint32
} else {
p.enc = (*Buffer).enc_slice_uint32
p.size = size_slice_uint32
}
p.dec = (*Buffer).dec_slice_int32
p.packedDec = (*Buffer).dec_slice_packed_int32
case reflect.Int64, reflect.Uint64:
if p.Packed {
p.enc = (*Buffer).enc_slice_packed_int64
p.size = size_slice_packed_int64
} else {
p.enc = (*Buffer).enc_slice_int64
p.size = size_slice_int64
}
p.dec = (*Buffer).dec_slice_int64
p.packedDec = (*Buffer).dec_slice_packed_int64
case reflect.Uint8:
p.dec = (*Buffer).dec_slice_byte
if p.proto3 {
p.enc = (*Buffer).enc_proto3_slice_byte
p.size = size_proto3_slice_byte
} else {
p.enc = (*Buffer).enc_slice_byte
p.size = size_slice_byte
}
case reflect.Float32, reflect.Float64:
switch t2.Bits() {
case 32:
// can just treat them as bits
if p.Packed {
p.enc = (*Buffer).enc_slice_packed_uint32
p.size = size_slice_packed_uint32
} else {
p.enc = (*Buffer).enc_slice_uint32
p.size = size_slice_uint32
}
p.dec = (*Buffer).dec_slice_int32
p.packedDec = (*Buffer).dec_slice_packed_int32
case 64:
// can just treat them as bits
if p.Packed {
p.enc = (*Buffer).enc_slice_packed_int64
p.size = size_slice_packed_int64
} else {
p.enc = (*Buffer).enc_slice_int64
p.size = size_slice_int64
}
p.dec = (*Buffer).dec_slice_int64
p.packedDec = (*Buffer).dec_slice_packed_int64
default:
logNoSliceEnc(t1, t2)
break
}
case reflect.String:
p.enc = (*Buffer).enc_slice_string
p.dec = (*Buffer).dec_slice_string
p.size = size_slice_string
case reflect.Ptr:
switch t3 := t2.Elem(); t3.Kind() {
default:
fmt.Fprintf(os.Stderr, "proto: no ptr oenc for %T -> %T -> %T\n", t1, t2, t3)
break
case reflect.Struct:
p.stype = t2.Elem()
p.isMarshaler = isMarshaler(t2)
p.isUnmarshaler = isUnmarshaler(t2)
if p.Wire == "bytes" {
p.enc = (*Buffer).enc_slice_struct_message
p.dec = (*Buffer).dec_slice_struct_message
p.size = size_slice_struct_message
} else {
p.enc = (*Buffer).enc_slice_struct_group
p.dec = (*Buffer).dec_slice_struct_group
p.size = size_slice_struct_group
}
}
case reflect.Slice:
switch t2.Elem().Kind() {
default:
fmt.Fprintf(os.Stderr, "proto: no slice elem oenc for %T -> %T -> %T\n", t1, t2, t2.Elem())
break
case reflect.Uint8:
p.enc = (*Buffer).enc_slice_slice_byte
p.dec = (*Buffer).dec_slice_slice_byte
p.size = size_slice_slice_byte
}
}
case reflect.Map:
p.enc = (*Buffer).enc_new_map
p.dec = (*Buffer).dec_new_map
p.size = size_new_map
p.mtype = t1
p.mkeyprop = &Properties{}
p.mkeyprop.init(reflect.PtrTo(p.mtype.Key()), "Key", f.Tag.Get("protobuf_key"), nil, lockGetProp)
p.mvalprop = &Properties{}
vtype := p.mtype.Elem()
if vtype.Kind() != reflect.Ptr && vtype.Kind() != reflect.Slice {
// The value type is not a message (*T) or bytes ([]byte),
// so we need encoders for the pointer to this type.
vtype = reflect.PtrTo(vtype)
}
p.mvalprop.init(vtype, "Value", f.Tag.Get("protobuf_val"), nil, lockGetProp)
}
// precalculate tag code
wire := p.WireType
if p.Packed {
wire = WireBytes
}
x := uint32(p.Tag)<<3 | uint32(wire)
i := 0
for i = 0; x > 127; i++ {
p.tagbuf[i] = 0x80 | uint8(x&0x7F)
x >>= 7
}
p.tagbuf[i] = uint8(x)
p.tagcode = p.tagbuf[0 : i+1]
if p.stype != nil {
if lockGetProp {
p.sprop = GetProperties(p.stype)
} else {
p.sprop = getPropertiesLocked(p.stype)
}
}
}
var (
marshalerType = reflect.TypeOf((*Marshaler)(nil)).Elem()
unmarshalerType = reflect.TypeOf((*Unmarshaler)(nil)).Elem()
)
// isMarshaler reports whether type t implements Marshaler.
func isMarshaler(t reflect.Type) bool {
// We're checking for (likely) pointer-receiver methods
// so if t is not a pointer, something is very wrong.
// The calls above only invoke isMarshaler on pointer types.
if t.Kind() != reflect.Ptr {
panic("proto: misuse of isMarshaler")
}
return t.Implements(marshalerType)
}
// isUnmarshaler reports whether type t implements Unmarshaler.
func isUnmarshaler(t reflect.Type) bool {
// We're checking for (likely) pointer-receiver methods
// so if t is not a pointer, something is very wrong.
// The calls above only invoke isUnmarshaler on pointer types.
if t.Kind() != reflect.Ptr {
panic("proto: misuse of isUnmarshaler")
}
return t.Implements(unmarshalerType)
}
// Init populates the properties from a protocol buffer struct tag.
func (p *Properties) Init(typ reflect.Type, name, tag string, f *reflect.StructField) {
p.init(typ, name, tag, f, true)
}
func (p *Properties) init(typ reflect.Type, name, tag string, f *reflect.StructField, lockGetProp bool) {
// "bytes,49,opt,def=hello!"
p.Name = name
p.OrigName = name
if f != nil {
p.field = toField(f)
}
if tag == "" {
return
}
p.Parse(tag)
p.setEncAndDec(typ, f, lockGetProp)
}
var (
propertiesMu sync.RWMutex
propertiesMap = make(map[reflect.Type]*StructProperties)
)
// GetProperties returns the list of properties for the type represented by t.
// t must represent a generated struct type of a protocol message.
func GetProperties(t reflect.Type) *StructProperties {
if t.Kind() != reflect.Struct {
panic("proto: type must have kind struct")
}
// Most calls to GetProperties in a long-running program will be
// retrieving details for types we have seen before.
propertiesMu.RLock()
sprop, ok := propertiesMap[t]
propertiesMu.RUnlock()
if ok {
if collectStats {
stats.Chit++
}
return sprop
}
propertiesMu.Lock()
sprop = getPropertiesLocked(t)
propertiesMu.Unlock()
return sprop
}
// getPropertiesLocked requires that propertiesMu is held.
func getPropertiesLocked(t reflect.Type) *StructProperties {
if prop, ok := propertiesMap[t]; ok {
if collectStats {
stats.Chit++
}
return prop
}
if collectStats {
stats.Cmiss++
}
prop := new(StructProperties)
// in case of recursive protos, fill this in now.
propertiesMap[t] = prop
// build properties
prop.extendable = reflect.PtrTo(t).Implements(extendableProtoType) ||
reflect.PtrTo(t).Implements(extendableProtoV1Type)
prop.unrecField = invalidField
prop.Prop = make([]*Properties, t.NumField())
prop.order = make([]int, t.NumField())
for i := 0; i < t.NumField(); i++ {
f := t.Field(i)
p := new(Properties)
name := f.Name
p.init(f.Type, name, f.Tag.Get("protobuf"), &f, false)
if f.Name == "XXX_InternalExtensions" { // special case
p.enc = (*Buffer).enc_exts
p.dec = nil // not needed
p.size = size_exts
} else if f.Name == "XXX_extensions" { // special case
p.enc = (*Buffer).enc_map
p.dec = nil // not needed
p.size = size_map
} else if f.Name == "XXX_unrecognized" { // special case
prop.unrecField = toField(&f)
}
oneof := f.Tag.Get("protobuf_oneof") // special case
if oneof != "" {
// Oneof fields don't use the traditional protobuf tag.
p.OrigName = oneof
}
prop.Prop[i] = p
prop.order[i] = i
if debug {
print(i, " ", f.Name, " ", t.String(), " ")
if p.Tag > 0 {
print(p.String())
}
print("\n")
}
if p.enc == nil && !strings.HasPrefix(f.Name, "XXX_") && oneof == "" {
fmt.Fprintln(os.Stderr, "proto: no encoder for", f.Name, f.Type.String(), "[GetProperties]")
}
}
// Re-order prop.order.
sort.Sort(prop)
type oneofMessage interface {
XXX_OneofFuncs() (func(Message, *Buffer) error, func(Message, int, int, *Buffer) (bool, error), func(Message) int, []interface{})
}
if om, ok := reflect.Zero(reflect.PtrTo(t)).Interface().(oneofMessage); ok {
var oots []interface{}
prop.oneofMarshaler, prop.oneofUnmarshaler, prop.oneofSizer, oots = om.XXX_OneofFuncs()
prop.stype = t
// Interpret oneof metadata.
prop.OneofTypes = make(map[string]*OneofProperties)
for _, oot := range oots {
oop := &OneofProperties{
Type: reflect.ValueOf(oot).Type(), // *T
Prop: new(Properties),
}
sft := oop.Type.Elem().Field(0)
oop.Prop.Name = sft.Name
oop.Prop.Parse(sft.Tag.Get("protobuf"))
// There will be exactly one interface field that
// this new value is assignable to.
for i := 0; i < t.NumField(); i++ {
f := t.Field(i)
if f.Type.Kind() != reflect.Interface {
continue
}
if !oop.Type.AssignableTo(f.Type) {
continue
}
oop.Field = i
break
}
prop.OneofTypes[oop.Prop.OrigName] = oop
}
}
// build required counts
// build tags
reqCount := 0
prop.decoderOrigNames = make(map[string]int)
for i, p := range prop.Prop {
if strings.HasPrefix(p.Name, "XXX_") {
// Internal fields should not appear in tags/origNames maps.
// They are handled specially when encoding and decoding.
continue
}
if p.Required {
reqCount++
}
prop.decoderTags.put(p.Tag, i)
prop.decoderOrigNames[p.OrigName] = i
}
prop.reqCount = reqCount
return prop
}
// Return the Properties object for the x[0]'th field of the structure.
func propByIndex(t reflect.Type, x []int) *Properties {
if len(x) != 1 {
fmt.Fprintf(os.Stderr, "proto: field index dimension %d (not 1) for type %s\n", len(x), t)
return nil
}
prop := GetProperties(t)
return prop.Prop[x[0]]
}
// Get the address and type of a pointer to a struct from an interface.
func getbase(pb Message) (t reflect.Type, b structPointer, err error) {
if pb == nil {
err = ErrNil
return
}
// get the reflect type of the pointer to the struct.
t = reflect.TypeOf(pb)
// get the address of the struct.
value := reflect.ValueOf(pb)
b = toStructPointer(value)
return
}
// A global registry of enum types.
// The generated code will register the generated maps by calling RegisterEnum.
var enumValueMaps = make(map[string]map[string]int32)
// RegisterEnum is called from the generated code to install the enum descriptor
// maps into the global table to aid parsing text format protocol buffers.
func RegisterEnum(typeName string, unusedNameMap map[int32]string, valueMap map[string]int32) {
if _, ok := enumValueMaps[typeName]; ok {
panic("proto: duplicate enum registered: " + typeName)
}
enumValueMaps[typeName] = valueMap
}
// EnumValueMap returns the mapping from names to integers of the
// enum type enumType, or a nil if not found.
func EnumValueMap(enumType string) map[string]int32 {
return enumValueMaps[enumType]
}
// A registry of all linked message types.
// The string is a fully-qualified proto name ("pkg.Message").
var (
protoTypes = make(map[string]reflect.Type)
revProtoTypes = make(map[reflect.Type]string)
)
// RegisterType is called from generated code and maps from the fully qualified
// proto name to the type (pointer to struct) of the protocol buffer.
func RegisterType(x Message, name string) {
if _, ok := protoTypes[name]; ok {
// TODO: Some day, make this a panic.
log.Printf("proto: duplicate proto type registered: %s", name)
return
}
t := reflect.TypeOf(x)
protoTypes[name] = t
revProtoTypes[t] = name
}
// MessageName returns the fully-qualified proto name for the given message type.
func MessageName(x Message) string {
type xname interface {
XXX_MessageName() string
}
if m, ok := x.(xname); ok {
return m.XXX_MessageName()
}
return revProtoTypes[reflect.TypeOf(x)]
}
// MessageType returns the message type (pointer to struct) for a named message.
func MessageType(name string) reflect.Type { return protoTypes[name] }
// A registry of all linked proto files.
var (
protoFiles = make(map[string][]byte) // file name => fileDescriptor
)
// RegisterFile is called from generated code and maps from the
// full file name of a .proto file to its compressed FileDescriptorProto.
func RegisterFile(filename string, fileDescriptor []byte) {
protoFiles[filename] = fileDescriptor
}
// FileDescriptor returns the compressed FileDescriptorProto for a .proto file.
func FileDescriptor(filename string) []byte { return protoFiles[filename] }

View file

@ -1,347 +0,0 @@
// Code generated by protoc-gen-go.
// source: proto3_proto/proto3.proto
// DO NOT EDIT!
/*
Package proto3_proto is a generated protocol buffer package.
It is generated from these files:
proto3_proto/proto3.proto
It has these top-level messages:
Message
Nested
MessageWithMap
IntMap
IntMaps
*/
package proto3_proto
import proto "github.com/golang/protobuf/proto"
import fmt "fmt"
import math "math"
import google_protobuf "github.com/golang/protobuf/ptypes/any"
import testdata "github.com/golang/protobuf/proto/testdata"
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
type Message_Humour int32
const (
Message_UNKNOWN Message_Humour = 0
Message_PUNS Message_Humour = 1
Message_SLAPSTICK Message_Humour = 2
Message_BILL_BAILEY Message_Humour = 3
)
var Message_Humour_name = map[int32]string{
0: "UNKNOWN",
1: "PUNS",
2: "SLAPSTICK",
3: "BILL_BAILEY",
}
var Message_Humour_value = map[string]int32{
"UNKNOWN": 0,
"PUNS": 1,
"SLAPSTICK": 2,
"BILL_BAILEY": 3,
}
func (x Message_Humour) String() string {
return proto.EnumName(Message_Humour_name, int32(x))
}
func (Message_Humour) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{0, 0} }
type Message struct {
Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
Hilarity Message_Humour `protobuf:"varint,2,opt,name=hilarity,enum=proto3_proto.Message_Humour" json:"hilarity,omitempty"`
HeightInCm uint32 `protobuf:"varint,3,opt,name=height_in_cm,json=heightInCm" json:"height_in_cm,omitempty"`
Data []byte `protobuf:"bytes,4,opt,name=data,proto3" json:"data,omitempty"`
ResultCount int64 `protobuf:"varint,7,opt,name=result_count,json=resultCount" json:"result_count,omitempty"`
TrueScotsman bool `protobuf:"varint,8,opt,name=true_scotsman,json=trueScotsman" json:"true_scotsman,omitempty"`
Score float32 `protobuf:"fixed32,9,opt,name=score" json:"score,omitempty"`
Key []uint64 `protobuf:"varint,5,rep,packed,name=key" json:"key,omitempty"`
ShortKey []int32 `protobuf:"varint,19,rep,packed,name=short_key,json=shortKey" json:"short_key,omitempty"`
Nested *Nested `protobuf:"bytes,6,opt,name=nested" json:"nested,omitempty"`
RFunny []Message_Humour `protobuf:"varint,16,rep,packed,name=r_funny,json=rFunny,enum=proto3_proto.Message_Humour" json:"r_funny,omitempty"`
Terrain map[string]*Nested `protobuf:"bytes,10,rep,name=terrain" json:"terrain,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
Proto2Field *testdata.SubDefaults `protobuf:"bytes,11,opt,name=proto2_field,json=proto2Field" json:"proto2_field,omitempty"`
Proto2Value map[string]*testdata.SubDefaults `protobuf:"bytes,13,rep,name=proto2_value,json=proto2Value" json:"proto2_value,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
Anything *google_protobuf.Any `protobuf:"bytes,14,opt,name=anything" json:"anything,omitempty"`
ManyThings []*google_protobuf.Any `protobuf:"bytes,15,rep,name=many_things,json=manyThings" json:"many_things,omitempty"`
Submessage *Message `protobuf:"bytes,17,opt,name=submessage" json:"submessage,omitempty"`
Children []*Message `protobuf:"bytes,18,rep,name=children" json:"children,omitempty"`
}
func (m *Message) Reset() { *m = Message{} }
func (m *Message) String() string { return proto.CompactTextString(m) }
func (*Message) ProtoMessage() {}
func (*Message) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
func (m *Message) GetName() string {
if m != nil {
return m.Name
}
return ""
}
func (m *Message) GetHilarity() Message_Humour {
if m != nil {
return m.Hilarity
}
return Message_UNKNOWN
}
func (m *Message) GetHeightInCm() uint32 {
if m != nil {
return m.HeightInCm
}
return 0
}
func (m *Message) GetData() []byte {
if m != nil {
return m.Data
}
return nil
}
func (m *Message) GetResultCount() int64 {
if m != nil {
return m.ResultCount
}
return 0
}
func (m *Message) GetTrueScotsman() bool {
if m != nil {
return m.TrueScotsman
}
return false
}
func (m *Message) GetScore() float32 {
if m != nil {
return m.Score
}
return 0
}
func (m *Message) GetKey() []uint64 {
if m != nil {
return m.Key
}
return nil
}
func (m *Message) GetShortKey() []int32 {
if m != nil {
return m.ShortKey
}
return nil
}
func (m *Message) GetNested() *Nested {
if m != nil {
return m.Nested
}
return nil
}
func (m *Message) GetRFunny() []Message_Humour {
if m != nil {
return m.RFunny
}
return nil
}
func (m *Message) GetTerrain() map[string]*Nested {
if m != nil {
return m.Terrain
}
return nil
}
func (m *Message) GetProto2Field() *testdata.SubDefaults {
if m != nil {
return m.Proto2Field
}
return nil
}
func (m *Message) GetProto2Value() map[string]*testdata.SubDefaults {
if m != nil {
return m.Proto2Value
}
return nil
}
func (m *Message) GetAnything() *google_protobuf.Any {
if m != nil {
return m.Anything
}
return nil
}
func (m *Message) GetManyThings() []*google_protobuf.Any {
if m != nil {
return m.ManyThings
}
return nil
}
func (m *Message) GetSubmessage() *Message {
if m != nil {
return m.Submessage
}
return nil
}
func (m *Message) GetChildren() []*Message {
if m != nil {
return m.Children
}
return nil
}
type Nested struct {
Bunny string `protobuf:"bytes,1,opt,name=bunny" json:"bunny,omitempty"`
Cute bool `protobuf:"varint,2,opt,name=cute" json:"cute,omitempty"`
}
func (m *Nested) Reset() { *m = Nested{} }
func (m *Nested) String() string { return proto.CompactTextString(m) }
func (*Nested) ProtoMessage() {}
func (*Nested) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} }
func (m *Nested) GetBunny() string {
if m != nil {
return m.Bunny
}
return ""
}
func (m *Nested) GetCute() bool {
if m != nil {
return m.Cute
}
return false
}
type MessageWithMap struct {
ByteMapping map[bool][]byte `protobuf:"bytes,1,rep,name=byte_mapping,json=byteMapping" json:"byte_mapping,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value,proto3"`
}
func (m *MessageWithMap) Reset() { *m = MessageWithMap{} }
func (m *MessageWithMap) String() string { return proto.CompactTextString(m) }
func (*MessageWithMap) ProtoMessage() {}
func (*MessageWithMap) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} }
func (m *MessageWithMap) GetByteMapping() map[bool][]byte {
if m != nil {
return m.ByteMapping
}
return nil
}
type IntMap struct {
Rtt map[int32]int32 `protobuf:"bytes,1,rep,name=rtt" json:"rtt,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"varint,2,opt,name=value"`
}
func (m *IntMap) Reset() { *m = IntMap{} }
func (m *IntMap) String() string { return proto.CompactTextString(m) }
func (*IntMap) ProtoMessage() {}
func (*IntMap) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} }
func (m *IntMap) GetRtt() map[int32]int32 {
if m != nil {
return m.Rtt
}
return nil
}
type IntMaps struct {
Maps []*IntMap `protobuf:"bytes,1,rep,name=maps" json:"maps,omitempty"`
}
func (m *IntMaps) Reset() { *m = IntMaps{} }
func (m *IntMaps) String() string { return proto.CompactTextString(m) }
func (*IntMaps) ProtoMessage() {}
func (*IntMaps) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} }
func (m *IntMaps) GetMaps() []*IntMap {
if m != nil {
return m.Maps
}
return nil
}
func init() {
proto.RegisterType((*Message)(nil), "proto3_proto.Message")
proto.RegisterType((*Nested)(nil), "proto3_proto.Nested")
proto.RegisterType((*MessageWithMap)(nil), "proto3_proto.MessageWithMap")
proto.RegisterType((*IntMap)(nil), "proto3_proto.IntMap")
proto.RegisterType((*IntMaps)(nil), "proto3_proto.IntMaps")
proto.RegisterEnum("proto3_proto.Message_Humour", Message_Humour_name, Message_Humour_value)
}
func init() { proto.RegisterFile("proto3_proto/proto3.proto", fileDescriptor0) }
var fileDescriptor0 = []byte{
// 733 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x84, 0x53, 0x6d, 0x6f, 0xf3, 0x34,
0x14, 0x25, 0x4d, 0x5f, 0xd2, 0x9b, 0x74, 0x0b, 0x5e, 0x91, 0xbc, 0x02, 0x52, 0x28, 0x12, 0x8a,
0x78, 0x49, 0xa1, 0xd3, 0xd0, 0x84, 0x10, 0x68, 0x1b, 0x9b, 0xa8, 0xd6, 0x95, 0xca, 0xdd, 0x98,
0xf8, 0x14, 0xa5, 0xad, 0xdb, 0x46, 0x34, 0x4e, 0x49, 0x1c, 0xa4, 0xfc, 0x1d, 0xfe, 0x28, 0x8f,
0x6c, 0xa7, 0x5d, 0x36, 0x65, 0xcf, 0xf3, 0x29, 0xf6, 0xf1, 0xb9, 0xf7, 0x9c, 0x1c, 0x5f, 0xc3,
0xe9, 0x2e, 0x89, 0x79, 0x7c, 0xe6, 0xcb, 0xcf, 0x40, 0x6d, 0x3c, 0xf9, 0x41, 0x56, 0xf9, 0xa8,
0x77, 0xba, 0x8e, 0xe3, 0xf5, 0x96, 0x2a, 0xca, 0x3c, 0x5b, 0x0d, 0x02, 0x96, 0x2b, 0x62, 0xef,
0x84, 0xd3, 0x94, 0x2f, 0x03, 0x1e, 0x0c, 0xc4, 0x42, 0x81, 0xfd, 0xff, 0x5b, 0xd0, 0xba, 0xa7,
0x69, 0x1a, 0xac, 0x29, 0x42, 0x50, 0x67, 0x41, 0x44, 0xb1, 0xe6, 0x68, 0x6e, 0x9b, 0xc8, 0x35,
0xba, 0x00, 0x63, 0x13, 0x6e, 0x83, 0x24, 0xe4, 0x39, 0xae, 0x39, 0x9a, 0x7b, 0x34, 0xfc, 0xcc,
0x2b, 0x0b, 0x7a, 0x45, 0xb1, 0xf7, 0x7b, 0x16, 0xc5, 0x59, 0x42, 0x0e, 0x6c, 0xe4, 0x80, 0xb5,
0xa1, 0xe1, 0x7a, 0xc3, 0xfd, 0x90, 0xf9, 0x8b, 0x08, 0xeb, 0x8e, 0xe6, 0x76, 0x08, 0x28, 0x6c,
0xc4, 0xae, 0x23, 0xa1, 0x27, 0xec, 0xe0, 0xba, 0xa3, 0xb9, 0x16, 0x91, 0x6b, 0xf4, 0x05, 0x58,
0x09, 0x4d, 0xb3, 0x2d, 0xf7, 0x17, 0x71, 0xc6, 0x38, 0x6e, 0x39, 0x9a, 0xab, 0x13, 0x53, 0x61,
0xd7, 0x02, 0x42, 0x5f, 0x42, 0x87, 0x27, 0x19, 0xf5, 0xd3, 0x45, 0xcc, 0xd3, 0x28, 0x60, 0xd8,
0x70, 0x34, 0xd7, 0x20, 0x96, 0x00, 0x67, 0x05, 0x86, 0xba, 0xd0, 0x48, 0x17, 0x71, 0x42, 0x71,
0xdb, 0xd1, 0xdc, 0x1a, 0x51, 0x1b, 0x64, 0x83, 0xfe, 0x37, 0xcd, 0x71, 0xc3, 0xd1, 0xdd, 0x3a,
0x11, 0x4b, 0xf4, 0x29, 0xb4, 0xd3, 0x4d, 0x9c, 0x70, 0x5f, 0xe0, 0x27, 0x8e, 0xee, 0x36, 0x88,
0x21, 0x81, 0x3b, 0x9a, 0xa3, 0x6f, 0xa1, 0xc9, 0x68, 0xca, 0xe9, 0x12, 0x37, 0x1d, 0xcd, 0x35,
0x87, 0xdd, 0x97, 0xbf, 0x3e, 0x91, 0x67, 0xa4, 0xe0, 0xa0, 0x73, 0x68, 0x25, 0xfe, 0x2a, 0x63,
0x2c, 0xc7, 0xb6, 0xa3, 0x7f, 0x30, 0xa9, 0x66, 0x72, 0x2b, 0xb8, 0xe8, 0x67, 0x68, 0x71, 0x9a,
0x24, 0x41, 0xc8, 0x30, 0x38, 0xba, 0x6b, 0x0e, 0xfb, 0xd5, 0x65, 0x0f, 0x8a, 0x74, 0xc3, 0x78,
0x92, 0x93, 0x7d, 0x09, 0xba, 0x00, 0x75, 0xff, 0x43, 0x7f, 0x15, 0xd2, 0xed, 0x12, 0x9b, 0xd2,
0xe8, 0x27, 0xde, 0xfe, 0xae, 0xbd, 0x59, 0x36, 0xff, 0x8d, 0xae, 0x82, 0x6c, 0xcb, 0x53, 0x62,
0x2a, 0xea, 0xad, 0x60, 0xa2, 0xd1, 0xa1, 0xf2, 0xdf, 0x60, 0x9b, 0x51, 0xdc, 0x91, 0xe2, 0x5f,
0x55, 0x8b, 0x4f, 0x25, 0xf3, 0x4f, 0x41, 0x54, 0x06, 0x8a, 0x56, 0x12, 0x41, 0xdf, 0x83, 0x11,
0xb0, 0x9c, 0x6f, 0x42, 0xb6, 0xc6, 0x47, 0x45, 0x52, 0x6a, 0x0e, 0xbd, 0xfd, 0x1c, 0x7a, 0x97,
0x2c, 0x27, 0x07, 0x16, 0x3a, 0x07, 0x33, 0x0a, 0x58, 0xee, 0xcb, 0x5d, 0x8a, 0x8f, 0xa5, 0x76,
0x75, 0x11, 0x08, 0xe2, 0x83, 0xe4, 0xa1, 0x73, 0x80, 0x34, 0x9b, 0x47, 0xca, 0x14, 0xfe, 0xb8,
0xf8, 0xd7, 0x2a, 0xc7, 0xa4, 0x44, 0x44, 0x3f, 0x80, 0xb1, 0xd8, 0x84, 0xdb, 0x65, 0x42, 0x19,
0x46, 0x52, 0xea, 0x8d, 0xa2, 0x03, 0xad, 0x37, 0x05, 0xab, 0x1c, 0xf8, 0x7e, 0x72, 0xd4, 0xd3,
0x90, 0x93, 0xf3, 0x35, 0x34, 0x54, 0x70, 0xb5, 0xf7, 0xcc, 0x86, 0xa2, 0xfc, 0x54, 0xbb, 0xd0,
0x7a, 0x8f, 0x60, 0xbf, 0x4e, 0xb1, 0xa2, 0xeb, 0x37, 0x2f, 0xbb, 0xbe, 0x71, 0x91, 0xcf, 0x6d,
0xfb, 0xbf, 0x42, 0x53, 0x0d, 0x14, 0x32, 0xa1, 0xf5, 0x38, 0xb9, 0x9b, 0xfc, 0xf1, 0x34, 0xb1,
0x3f, 0x42, 0x06, 0xd4, 0xa7, 0x8f, 0x93, 0x99, 0xad, 0xa1, 0x0e, 0xb4, 0x67, 0xe3, 0xcb, 0xe9,
0xec, 0x61, 0x74, 0x7d, 0x67, 0xd7, 0xd0, 0x31, 0x98, 0x57, 0xa3, 0xf1, 0xd8, 0xbf, 0xba, 0x1c,
0x8d, 0x6f, 0xfe, 0xb2, 0xf5, 0xfe, 0x10, 0x9a, 0xca, 0xac, 0x78, 0x33, 0x73, 0x39, 0xbe, 0xca,
0x8f, 0xda, 0x88, 0x57, 0xba, 0xc8, 0xb8, 0x32, 0x64, 0x10, 0xb9, 0xee, 0xff, 0xa7, 0xc1, 0x51,
0x91, 0xd9, 0x53, 0xc8, 0x37, 0xf7, 0xc1, 0x0e, 0x4d, 0xc1, 0x9a, 0xe7, 0x9c, 0xfa, 0x51, 0xb0,
0xdb, 0x89, 0x39, 0xd0, 0x64, 0xce, 0xdf, 0x55, 0xe6, 0x5c, 0xd4, 0x78, 0x57, 0x39, 0xa7, 0xf7,
0x8a, 0x5f, 0x4c, 0xd5, 0xfc, 0x19, 0xe9, 0xfd, 0x02, 0xf6, 0x6b, 0x42, 0x39, 0x30, 0x43, 0x05,
0xd6, 0x2d, 0x07, 0x66, 0x95, 0x93, 0xf9, 0x07, 0x9a, 0x23, 0xc6, 0x85, 0xb7, 0x01, 0xe8, 0x09,
0xe7, 0x85, 0xa5, 0xcf, 0x5f, 0x5a, 0x52, 0x14, 0x8f, 0x70, 0xae, 0x2c, 0x08, 0x66, 0xef, 0x47,
0x30, 0xf6, 0x40, 0x59, 0xb2, 0x51, 0x21, 0xd9, 0x28, 0x4b, 0x9e, 0x41, 0x4b, 0xf5, 0x4b, 0x91,
0x0b, 0xf5, 0x28, 0xd8, 0xa5, 0x85, 0x68, 0xb7, 0x4a, 0x94, 0x48, 0xc6, 0xbc, 0xa9, 0x8e, 0xde,
0x05, 0x00, 0x00, 0xff, 0xff, 0x75, 0x38, 0xad, 0x84, 0xe4, 0x05, 0x00, 0x00,
}

View file

@ -1,854 +0,0 @@
// Go support for Protocol Buffers - Google's data interchange format
//
// Copyright 2010 The Go Authors. All rights reserved.
// https://github.com/golang/protobuf
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package proto
// Functions for writing the text protocol buffer format.
import (
"bufio"
"bytes"
"encoding"
"errors"
"fmt"
"io"
"log"
"math"
"reflect"
"sort"
"strings"
)
var (
newline = []byte("\n")
spaces = []byte(" ")
gtNewline = []byte(">\n")
endBraceNewline = []byte("}\n")
backslashN = []byte{'\\', 'n'}
backslashR = []byte{'\\', 'r'}
backslashT = []byte{'\\', 't'}
backslashDQ = []byte{'\\', '"'}
backslashBS = []byte{'\\', '\\'}
posInf = []byte("inf")
negInf = []byte("-inf")
nan = []byte("nan")
)
type writer interface {
io.Writer
WriteByte(byte) error
}
// textWriter is an io.Writer that tracks its indentation level.
type textWriter struct {
ind int
complete bool // if the current position is a complete line
compact bool // whether to write out as a one-liner
w writer
}
func (w *textWriter) WriteString(s string) (n int, err error) {
if !strings.Contains(s, "\n") {
if !w.compact && w.complete {
w.writeIndent()
}
w.complete = false
return io.WriteString(w.w, s)
}
// WriteString is typically called without newlines, so this
// codepath and its copy are rare. We copy to avoid
// duplicating all of Write's logic here.
return w.Write([]byte(s))
}
func (w *textWriter) Write(p []byte) (n int, err error) {
newlines := bytes.Count(p, newline)
if newlines == 0 {
if !w.compact && w.complete {
w.writeIndent()
}
n, err = w.w.Write(p)
w.complete = false
return n, err
}
frags := bytes.SplitN(p, newline, newlines+1)
if w.compact {
for i, frag := range frags {
if i > 0 {
if err := w.w.WriteByte(' '); err != nil {
return n, err
}
n++
}
nn, err := w.w.Write(frag)
n += nn
if err != nil {
return n, err
}
}
return n, nil
}
for i, frag := range frags {
if w.complete {
w.writeIndent()
}
nn, err := w.w.Write(frag)
n += nn
if err != nil {
return n, err
}
if i+1 < len(frags) {
if err := w.w.WriteByte('\n'); err != nil {
return n, err
}
n++
}
}
w.complete = len(frags[len(frags)-1]) == 0
return n, nil
}
func (w *textWriter) WriteByte(c byte) error {
if w.compact && c == '\n' {
c = ' '
}
if !w.compact && w.complete {
w.writeIndent()
}
err := w.w.WriteByte(c)
w.complete = c == '\n'
return err
}
func (w *textWriter) indent() { w.ind++ }
func (w *textWriter) unindent() {
if w.ind == 0 {
log.Print("proto: textWriter unindented too far")
return
}
w.ind--
}
func writeName(w *textWriter, props *Properties) error {
if _, err := w.WriteString(props.OrigName); err != nil {
return err
}
if props.Wire != "group" {
return w.WriteByte(':')
}
return nil
}
// raw is the interface satisfied by RawMessage.
type raw interface {
Bytes() []byte
}
func requiresQuotes(u string) bool {
// When type URL contains any characters except [0-9A-Za-z./\-]*, it must be quoted.
for _, ch := range u {
switch {
case ch == '.' || ch == '/' || ch == '_':
continue
case '0' <= ch && ch <= '9':
continue
case 'A' <= ch && ch <= 'Z':
continue
case 'a' <= ch && ch <= 'z':
continue
default:
return true
}
}
return false
}
// isAny reports whether sv is a google.protobuf.Any message
func isAny(sv reflect.Value) bool {
type wkt interface {
XXX_WellKnownType() string
}
t, ok := sv.Addr().Interface().(wkt)
return ok && t.XXX_WellKnownType() == "Any"
}
// writeProto3Any writes an expanded google.protobuf.Any message.
//
// It returns (false, nil) if sv value can't be unmarshaled (e.g. because
// required messages are not linked in).
//
// It returns (true, error) when sv was written in expanded format or an error
// was encountered.
func (tm *TextMarshaler) writeProto3Any(w *textWriter, sv reflect.Value) (bool, error) {
turl := sv.FieldByName("TypeUrl")
val := sv.FieldByName("Value")
if !turl.IsValid() || !val.IsValid() {
return true, errors.New("proto: invalid google.protobuf.Any message")
}
b, ok := val.Interface().([]byte)
if !ok {
return true, errors.New("proto: invalid google.protobuf.Any message")
}
parts := strings.Split(turl.String(), "/")
mt := MessageType(parts[len(parts)-1])
if mt == nil {
return false, nil
}
m := reflect.New(mt.Elem())
if err := Unmarshal(b, m.Interface().(Message)); err != nil {
return false, nil
}
w.Write([]byte("["))
u := turl.String()
if requiresQuotes(u) {
writeString(w, u)
} else {
w.Write([]byte(u))
}
if w.compact {
w.Write([]byte("]:<"))
} else {
w.Write([]byte("]: <\n"))
w.ind++
}
if err := tm.writeStruct(w, m.Elem()); err != nil {
return true, err
}
if w.compact {
w.Write([]byte("> "))
} else {
w.ind--
w.Write([]byte(">\n"))
}
return true, nil
}
func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error {
if tm.ExpandAny && isAny(sv) {
if canExpand, err := tm.writeProto3Any(w, sv); canExpand {
return err
}
}
st := sv.Type()
sprops := GetProperties(st)
for i := 0; i < sv.NumField(); i++ {
fv := sv.Field(i)
props := sprops.Prop[i]
name := st.Field(i).Name
if strings.HasPrefix(name, "XXX_") {
// There are two XXX_ fields:
// XXX_unrecognized []byte
// XXX_extensions map[int32]proto.Extension
// The first is handled here;
// the second is handled at the bottom of this function.
if name == "XXX_unrecognized" && !fv.IsNil() {
if err := writeUnknownStruct(w, fv.Interface().([]byte)); err != nil {
return err
}
}
continue
}
if fv.Kind() == reflect.Ptr && fv.IsNil() {
// Field not filled in. This could be an optional field or
// a required field that wasn't filled in. Either way, there
// isn't anything we can show for it.
continue
}
if fv.Kind() == reflect.Slice && fv.IsNil() {
// Repeated field that is empty, or a bytes field that is unused.
continue
}
if props.Repeated && fv.Kind() == reflect.Slice {
// Repeated field.
for j := 0; j < fv.Len(); j++ {
if err := writeName(w, props); err != nil {
return err
}
if !w.compact {
if err := w.WriteByte(' '); err != nil {
return err
}
}
v := fv.Index(j)
if v.Kind() == reflect.Ptr && v.IsNil() {
// A nil message in a repeated field is not valid,
// but we can handle that more gracefully than panicking.
if _, err := w.Write([]byte("<nil>\n")); err != nil {
return err
}
continue
}
if err := tm.writeAny(w, v, props); err != nil {
return err
}
if err := w.WriteByte('\n'); err != nil {
return err
}
}
continue
}
if fv.Kind() == reflect.Map {
// Map fields are rendered as a repeated struct with key/value fields.
keys := fv.MapKeys()
sort.Sort(mapKeys(keys))
for _, key := range keys {
val := fv.MapIndex(key)
if err := writeName(w, props); err != nil {
return err
}
if !w.compact {
if err := w.WriteByte(' '); err != nil {
return err
}
}
// open struct
if err := w.WriteByte('<'); err != nil {
return err
}
if !w.compact {
if err := w.WriteByte('\n'); err != nil {
return err
}
}
w.indent()
// key
if _, err := w.WriteString("key:"); err != nil {
return err
}
if !w.compact {
if err := w.WriteByte(' '); err != nil {
return err
}
}
if err := tm.writeAny(w, key, props.mkeyprop); err != nil {
return err
}
if err := w.WriteByte('\n'); err != nil {
return err
}
// nil values aren't legal, but we can avoid panicking because of them.
if val.Kind() != reflect.Ptr || !val.IsNil() {
// value
if _, err := w.WriteString("value:"); err != nil {
return err
}
if !w.compact {
if err := w.WriteByte(' '); err != nil {
return err
}
}
if err := tm.writeAny(w, val, props.mvalprop); err != nil {
return err
}
if err := w.WriteByte('\n'); err != nil {
return err
}
}
// close struct
w.unindent()
if err := w.WriteByte('>'); err != nil {
return err
}
if err := w.WriteByte('\n'); err != nil {
return err
}
}
continue
}
if props.proto3 && fv.Kind() == reflect.Slice && fv.Len() == 0 {
// empty bytes field
continue
}
if fv.Kind() != reflect.Ptr && fv.Kind() != reflect.Slice {
// proto3 non-repeated scalar field; skip if zero value
if isProto3Zero(fv) {
continue
}
}
if fv.Kind() == reflect.Interface {
// Check if it is a oneof.
if st.Field(i).Tag.Get("protobuf_oneof") != "" {
// fv is nil, or holds a pointer to generated struct.
// That generated struct has exactly one field,
// which has a protobuf struct tag.
if fv.IsNil() {
continue
}
inner := fv.Elem().Elem() // interface -> *T -> T
tag := inner.Type().Field(0).Tag.Get("protobuf")
props = new(Properties) // Overwrite the outer props var, but not its pointee.
props.Parse(tag)
// Write the value in the oneof, not the oneof itself.
fv = inner.Field(0)
// Special case to cope with malformed messages gracefully:
// If the value in the oneof is a nil pointer, don't panic
// in writeAny.
if fv.Kind() == reflect.Ptr && fv.IsNil() {
// Use errors.New so writeAny won't render quotes.
msg := errors.New("/* nil */")
fv = reflect.ValueOf(&msg).Elem()
}
}
}
if err := writeName(w, props); err != nil {
return err
}
if !w.compact {
if err := w.WriteByte(' '); err != nil {
return err
}
}
if b, ok := fv.Interface().(raw); ok {
if err := writeRaw(w, b.Bytes()); err != nil {
return err
}
continue
}
// Enums have a String method, so writeAny will work fine.
if err := tm.writeAny(w, fv, props); err != nil {
return err
}
if err := w.WriteByte('\n'); err != nil {
return err
}
}
// Extensions (the XXX_extensions field).
pv := sv.Addr()
if _, ok := extendable(pv.Interface()); ok {
if err := tm.writeExtensions(w, pv); err != nil {
return err
}
}
return nil
}
// writeRaw writes an uninterpreted raw message.
func writeRaw(w *textWriter, b []byte) error {
if err := w.WriteByte('<'); err != nil {
return err
}
if !w.compact {
if err := w.WriteByte('\n'); err != nil {
return err
}
}
w.indent()
if err := writeUnknownStruct(w, b); err != nil {
return err
}
w.unindent()
if err := w.WriteByte('>'); err != nil {
return err
}
return nil
}
// writeAny writes an arbitrary field.
func (tm *TextMarshaler) writeAny(w *textWriter, v reflect.Value, props *Properties) error {
v = reflect.Indirect(v)
// Floats have special cases.
if v.Kind() == reflect.Float32 || v.Kind() == reflect.Float64 {
x := v.Float()
var b []byte
switch {
case math.IsInf(x, 1):
b = posInf
case math.IsInf(x, -1):
b = negInf
case math.IsNaN(x):
b = nan
}
if b != nil {
_, err := w.Write(b)
return err
}
// Other values are handled below.
}
// We don't attempt to serialise every possible value type; only those
// that can occur in protocol buffers.
switch v.Kind() {
case reflect.Slice:
// Should only be a []byte; repeated fields are handled in writeStruct.
if err := writeString(w, string(v.Bytes())); err != nil {
return err
}
case reflect.String:
if err := writeString(w, v.String()); err != nil {
return err
}
case reflect.Struct:
// Required/optional group/message.
var bra, ket byte = '<', '>'
if props != nil && props.Wire == "group" {
bra, ket = '{', '}'
}
if err := w.WriteByte(bra); err != nil {
return err
}
if !w.compact {
if err := w.WriteByte('\n'); err != nil {
return err
}
}
w.indent()
if etm, ok := v.Interface().(encoding.TextMarshaler); ok {
text, err := etm.MarshalText()
if err != nil {
return err
}
if _, err = w.Write(text); err != nil {
return err
}
} else if err := tm.writeStruct(w, v); err != nil {
return err
}
w.unindent()
if err := w.WriteByte(ket); err != nil {
return err
}
default:
_, err := fmt.Fprint(w, v.Interface())
return err
}
return nil
}
// equivalent to C's isprint.
func isprint(c byte) bool {
return c >= 0x20 && c < 0x7f
}
// writeString writes a string in the protocol buffer text format.
// It is similar to strconv.Quote except we don't use Go escape sequences,
// we treat the string as a byte sequence, and we use octal escapes.
// These differences are to maintain interoperability with the other
// languages' implementations of the text format.
func writeString(w *textWriter, s string) error {
// use WriteByte here to get any needed indent
if err := w.WriteByte('"'); err != nil {
return err
}
// Loop over the bytes, not the runes.
for i := 0; i < len(s); i++ {
var err error
// Divergence from C++: we don't escape apostrophes.
// There's no need to escape them, and the C++ parser
// copes with a naked apostrophe.
switch c := s[i]; c {
case '\n':
_, err = w.w.Write(backslashN)
case '\r':
_, err = w.w.Write(backslashR)
case '\t':
_, err = w.w.Write(backslashT)
case '"':
_, err = w.w.Write(backslashDQ)
case '\\':
_, err = w.w.Write(backslashBS)
default:
if isprint(c) {
err = w.w.WriteByte(c)
} else {
_, err = fmt.Fprintf(w.w, "\\%03o", c)
}
}
if err != nil {
return err
}
}
return w.WriteByte('"')
}
func writeUnknownStruct(w *textWriter, data []byte) (err error) {
if !w.compact {
if _, err := fmt.Fprintf(w, "/* %d unknown bytes */\n", len(data)); err != nil {
return err
}
}
b := NewBuffer(data)
for b.index < len(b.buf) {
x, err := b.DecodeVarint()
if err != nil {
_, err := fmt.Fprintf(w, "/* %v */\n", err)
return err
}
wire, tag := x&7, x>>3
if wire == WireEndGroup {
w.unindent()
if _, err := w.Write(endBraceNewline); err != nil {
return err
}
continue
}
if _, err := fmt.Fprint(w, tag); err != nil {
return err
}
if wire != WireStartGroup {
if err := w.WriteByte(':'); err != nil {
return err
}
}
if !w.compact || wire == WireStartGroup {
if err := w.WriteByte(' '); err != nil {
return err
}
}
switch wire {
case WireBytes:
buf, e := b.DecodeRawBytes(false)
if e == nil {
_, err = fmt.Fprintf(w, "%q", buf)
} else {
_, err = fmt.Fprintf(w, "/* %v */", e)
}
case WireFixed32:
x, err = b.DecodeFixed32()
err = writeUnknownInt(w, x, err)
case WireFixed64:
x, err = b.DecodeFixed64()
err = writeUnknownInt(w, x, err)
case WireStartGroup:
err = w.WriteByte('{')
w.indent()
case WireVarint:
x, err = b.DecodeVarint()
err = writeUnknownInt(w, x, err)
default:
_, err = fmt.Fprintf(w, "/* unknown wire type %d */", wire)
}
if err != nil {
return err
}
if err = w.WriteByte('\n'); err != nil {
return err
}
}
return nil
}
func writeUnknownInt(w *textWriter, x uint64, err error) error {
if err == nil {
_, err = fmt.Fprint(w, x)
} else {
_, err = fmt.Fprintf(w, "/* %v */", err)
}
return err
}
type int32Slice []int32
func (s int32Slice) Len() int { return len(s) }
func (s int32Slice) Less(i, j int) bool { return s[i] < s[j] }
func (s int32Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
// writeExtensions writes all the extensions in pv.
// pv is assumed to be a pointer to a protocol message struct that is extendable.
func (tm *TextMarshaler) writeExtensions(w *textWriter, pv reflect.Value) error {
emap := extensionMaps[pv.Type().Elem()]
ep, _ := extendable(pv.Interface())
// Order the extensions by ID.
// This isn't strictly necessary, but it will give us
// canonical output, which will also make testing easier.
m, mu := ep.extensionsRead()
if m == nil {
return nil
}
mu.Lock()
ids := make([]int32, 0, len(m))
for id := range m {
ids = append(ids, id)
}
sort.Sort(int32Slice(ids))
mu.Unlock()
for _, extNum := range ids {
ext := m[extNum]
var desc *ExtensionDesc
if emap != nil {
desc = emap[extNum]
}
if desc == nil {
// Unknown extension.
if err := writeUnknownStruct(w, ext.enc); err != nil {
return err
}
continue
}
pb, err := GetExtension(ep, desc)
if err != nil {
return fmt.Errorf("failed getting extension: %v", err)
}
// Repeated extensions will appear as a slice.
if !desc.repeated() {
if err := tm.writeExtension(w, desc.Name, pb); err != nil {
return err
}
} else {
v := reflect.ValueOf(pb)
for i := 0; i < v.Len(); i++ {
if err := tm.writeExtension(w, desc.Name, v.Index(i).Interface()); err != nil {
return err
}
}
}
}
return nil
}
func (tm *TextMarshaler) writeExtension(w *textWriter, name string, pb interface{}) error {
if _, err := fmt.Fprintf(w, "[%s]:", name); err != nil {
return err
}
if !w.compact {
if err := w.WriteByte(' '); err != nil {
return err
}
}
if err := tm.writeAny(w, reflect.ValueOf(pb), nil); err != nil {
return err
}
if err := w.WriteByte('\n'); err != nil {
return err
}
return nil
}
func (w *textWriter) writeIndent() {
if !w.complete {
return
}
remain := w.ind * 2
for remain > 0 {
n := remain
if n > len(spaces) {
n = len(spaces)
}
w.w.Write(spaces[:n])
remain -= n
}
w.complete = false
}
// TextMarshaler is a configurable text format marshaler.
type TextMarshaler struct {
Compact bool // use compact text format (one line).
ExpandAny bool // expand google.protobuf.Any messages of known types
}
// Marshal writes a given protocol buffer in text format.
// The only errors returned are from w.
func (tm *TextMarshaler) Marshal(w io.Writer, pb Message) error {
val := reflect.ValueOf(pb)
if pb == nil || val.IsNil() {
w.Write([]byte("<nil>"))
return nil
}
var bw *bufio.Writer
ww, ok := w.(writer)
if !ok {
bw = bufio.NewWriter(w)
ww = bw
}
aw := &textWriter{
w: ww,
complete: true,
compact: tm.Compact,
}
if etm, ok := pb.(encoding.TextMarshaler); ok {
text, err := etm.MarshalText()
if err != nil {
return err
}
if _, err = aw.Write(text); err != nil {
return err
}
if bw != nil {
return bw.Flush()
}
return nil
}
// Dereference the received pointer so we don't have outer < and >.
v := reflect.Indirect(val)
if err := tm.writeStruct(aw, v); err != nil {
return err
}
if bw != nil {
return bw.Flush()
}
return nil
}
// Text is the same as Marshal, but returns the string directly.
func (tm *TextMarshaler) Text(pb Message) string {
var buf bytes.Buffer
tm.Marshal(&buf, pb)
return buf.String()
}
var (
defaultTextMarshaler = TextMarshaler{}
compactTextMarshaler = TextMarshaler{Compact: true}
)
// TODO: consider removing some of the Marshal functions below.
// MarshalText writes a given protocol buffer in text format.
// The only errors returned are from w.
func MarshalText(w io.Writer, pb Message) error { return defaultTextMarshaler.Marshal(w, pb) }
// MarshalTextString is the same as MarshalText, but returns the string directly.
func MarshalTextString(pb Message) string { return defaultTextMarshaler.Text(pb) }
// CompactText writes a given protocol buffer in compact text format (one line).
func CompactText(w io.Writer, pb Message) error { return compactTextMarshaler.Marshal(w, pb) }
// CompactTextString is the same as CompactText, but returns the string directly.
func CompactTextString(pb Message) string { return compactTextMarshaler.Text(pb) }

View file

@ -1,895 +0,0 @@
// Go support for Protocol Buffers - Google's data interchange format
//
// Copyright 2010 The Go Authors. All rights reserved.
// https://github.com/golang/protobuf
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package proto
// Functions for parsing the Text protocol buffer format.
// TODO: message sets.
import (
"encoding"
"errors"
"fmt"
"reflect"
"strconv"
"strings"
"unicode/utf8"
)
// Error string emitted when deserializing Any and fields are already set
const anyRepeatedlyUnpacked = "Any message unpacked multiple times, or %q already set"
type ParseError struct {
Message string
Line int // 1-based line number
Offset int // 0-based byte offset from start of input
}
func (p *ParseError) Error() string {
if p.Line == 1 {
// show offset only for first line
return fmt.Sprintf("line 1.%d: %v", p.Offset, p.Message)
}
return fmt.Sprintf("line %d: %v", p.Line, p.Message)
}
type token struct {
value string
err *ParseError
line int // line number
offset int // byte number from start of input, not start of line
unquoted string // the unquoted version of value, if it was a quoted string
}
func (t *token) String() string {
if t.err == nil {
return fmt.Sprintf("%q (line=%d, offset=%d)", t.value, t.line, t.offset)
}
return fmt.Sprintf("parse error: %v", t.err)
}
type textParser struct {
s string // remaining input
done bool // whether the parsing is finished (success or error)
backed bool // whether back() was called
offset, line int
cur token
}
func newTextParser(s string) *textParser {
p := new(textParser)
p.s = s
p.line = 1
p.cur.line = 1
return p
}
func (p *textParser) errorf(format string, a ...interface{}) *ParseError {
pe := &ParseError{fmt.Sprintf(format, a...), p.cur.line, p.cur.offset}
p.cur.err = pe
p.done = true
return pe
}
// Numbers and identifiers are matched by [-+._A-Za-z0-9]
func isIdentOrNumberChar(c byte) bool {
switch {
case 'A' <= c && c <= 'Z', 'a' <= c && c <= 'z':
return true
case '0' <= c && c <= '9':
return true
}
switch c {
case '-', '+', '.', '_':
return true
}
return false
}
func isWhitespace(c byte) bool {
switch c {
case ' ', '\t', '\n', '\r':
return true
}
return false
}
func isQuote(c byte) bool {
switch c {
case '"', '\'':
return true
}
return false
}
func (p *textParser) skipWhitespace() {
i := 0
for i < len(p.s) && (isWhitespace(p.s[i]) || p.s[i] == '#') {
if p.s[i] == '#' {
// comment; skip to end of line or input
for i < len(p.s) && p.s[i] != '\n' {
i++
}
if i == len(p.s) {
break
}
}
if p.s[i] == '\n' {
p.line++
}
i++
}
p.offset += i
p.s = p.s[i:len(p.s)]
if len(p.s) == 0 {
p.done = true
}
}
func (p *textParser) advance() {
// Skip whitespace
p.skipWhitespace()
if p.done {
return
}
// Start of non-whitespace
p.cur.err = nil
p.cur.offset, p.cur.line = p.offset, p.line
p.cur.unquoted = ""
switch p.s[0] {
case '<', '>', '{', '}', ':', '[', ']', ';', ',', '/':
// Single symbol
p.cur.value, p.s = p.s[0:1], p.s[1:len(p.s)]
case '"', '\'':
// Quoted string
i := 1
for i < len(p.s) && p.s[i] != p.s[0] && p.s[i] != '\n' {
if p.s[i] == '\\' && i+1 < len(p.s) {
// skip escaped char
i++
}
i++
}
if i >= len(p.s) || p.s[i] != p.s[0] {
p.errorf("unmatched quote")
return
}
unq, err := unquoteC(p.s[1:i], rune(p.s[0]))
if err != nil {
p.errorf("invalid quoted string %s: %v", p.s[0:i+1], err)
return
}
p.cur.value, p.s = p.s[0:i+1], p.s[i+1:len(p.s)]
p.cur.unquoted = unq
default:
i := 0
for i < len(p.s) && isIdentOrNumberChar(p.s[i]) {
i++
}
if i == 0 {
p.errorf("unexpected byte %#x", p.s[0])
return
}
p.cur.value, p.s = p.s[0:i], p.s[i:len(p.s)]
}
p.offset += len(p.cur.value)
}
var (
errBadUTF8 = errors.New("proto: bad UTF-8")
errBadHex = errors.New("proto: bad hexadecimal")
)
func unquoteC(s string, quote rune) (string, error) {
// This is based on C++'s tokenizer.cc.
// Despite its name, this is *not* parsing C syntax.
// For instance, "\0" is an invalid quoted string.
// Avoid allocation in trivial cases.
simple := true
for _, r := range s {
if r == '\\' || r == quote {
simple = false
break
}
}
if simple {
return s, nil
}
buf := make([]byte, 0, 3*len(s)/2)
for len(s) > 0 {
r, n := utf8.DecodeRuneInString(s)
if r == utf8.RuneError && n == 1 {
return "", errBadUTF8
}
s = s[n:]
if r != '\\' {
if r < utf8.RuneSelf {
buf = append(buf, byte(r))
} else {
buf = append(buf, string(r)...)
}
continue
}
ch, tail, err := unescape(s)
if err != nil {
return "", err
}
buf = append(buf, ch...)
s = tail
}
return string(buf), nil
}
func unescape(s string) (ch string, tail string, err error) {
r, n := utf8.DecodeRuneInString(s)
if r == utf8.RuneError && n == 1 {
return "", "", errBadUTF8
}
s = s[n:]
switch r {
case 'a':
return "\a", s, nil
case 'b':
return "\b", s, nil
case 'f':
return "\f", s, nil
case 'n':
return "\n", s, nil
case 'r':
return "\r", s, nil
case 't':
return "\t", s, nil
case 'v':
return "\v", s, nil
case '?':
return "?", s, nil // trigraph workaround
case '\'', '"', '\\':
return string(r), s, nil
case '0', '1', '2', '3', '4', '5', '6', '7', 'x', 'X':
if len(s) < 2 {
return "", "", fmt.Errorf(`\%c requires 2 following digits`, r)
}
base := 8
ss := s[:2]
s = s[2:]
if r == 'x' || r == 'X' {
base = 16
} else {
ss = string(r) + ss
}
i, err := strconv.ParseUint(ss, base, 8)
if err != nil {
return "", "", err
}
return string([]byte{byte(i)}), s, nil
case 'u', 'U':
n := 4
if r == 'U' {
n = 8
}
if len(s) < n {
return "", "", fmt.Errorf(`\%c requires %d digits`, r, n)
}
bs := make([]byte, n/2)
for i := 0; i < n; i += 2 {
a, ok1 := unhex(s[i])
b, ok2 := unhex(s[i+1])
if !ok1 || !ok2 {
return "", "", errBadHex
}
bs[i/2] = a<<4 | b
}
s = s[n:]
return string(bs), s, nil
}
return "", "", fmt.Errorf(`unknown escape \%c`, r)
}
// Adapted from src/pkg/strconv/quote.go.
func unhex(b byte) (v byte, ok bool) {
switch {
case '0' <= b && b <= '9':
return b - '0', true
case 'a' <= b && b <= 'f':
return b - 'a' + 10, true
case 'A' <= b && b <= 'F':
return b - 'A' + 10, true
}
return 0, false
}
// Back off the parser by one token. Can only be done between calls to next().
// It makes the next advance() a no-op.
func (p *textParser) back() { p.backed = true }
// Advances the parser and returns the new current token.
func (p *textParser) next() *token {
if p.backed || p.done {
p.backed = false
return &p.cur
}
p.advance()
if p.done {
p.cur.value = ""
} else if len(p.cur.value) > 0 && isQuote(p.cur.value[0]) {
// Look for multiple quoted strings separated by whitespace,
// and concatenate them.
cat := p.cur
for {
p.skipWhitespace()
if p.done || !isQuote(p.s[0]) {
break
}
p.advance()
if p.cur.err != nil {
return &p.cur
}
cat.value += " " + p.cur.value
cat.unquoted += p.cur.unquoted
}
p.done = false // parser may have seen EOF, but we want to return cat
p.cur = cat
}
return &p.cur
}
func (p *textParser) consumeToken(s string) error {
tok := p.next()
if tok.err != nil {
return tok.err
}
if tok.value != s {
p.back()
return p.errorf("expected %q, found %q", s, tok.value)
}
return nil
}
// Return a RequiredNotSetError indicating which required field was not set.
func (p *textParser) missingRequiredFieldError(sv reflect.Value) *RequiredNotSetError {
st := sv.Type()
sprops := GetProperties(st)
for i := 0; i < st.NumField(); i++ {
if !isNil(sv.Field(i)) {
continue
}
props := sprops.Prop[i]
if props.Required {
return &RequiredNotSetError{fmt.Sprintf("%v.%v", st, props.OrigName)}
}
}
return &RequiredNotSetError{fmt.Sprintf("%v.<unknown field name>", st)} // should not happen
}
// Returns the index in the struct for the named field, as well as the parsed tag properties.
func structFieldByName(sprops *StructProperties, name string) (int, *Properties, bool) {
i, ok := sprops.decoderOrigNames[name]
if ok {
return i, sprops.Prop[i], true
}
return -1, nil, false
}
// Consume a ':' from the input stream (if the next token is a colon),
// returning an error if a colon is needed but not present.
func (p *textParser) checkForColon(props *Properties, typ reflect.Type) *ParseError {
tok := p.next()
if tok.err != nil {
return tok.err
}
if tok.value != ":" {
// Colon is optional when the field is a group or message.
needColon := true
switch props.Wire {
case "group":
needColon = false
case "bytes":
// A "bytes" field is either a message, a string, or a repeated field;
// those three become *T, *string and []T respectively, so we can check for
// this field being a pointer to a non-string.
if typ.Kind() == reflect.Ptr {
// *T or *string
if typ.Elem().Kind() == reflect.String {
break
}
} else if typ.Kind() == reflect.Slice {
// []T or []*T
if typ.Elem().Kind() != reflect.Ptr {
break
}
} else if typ.Kind() == reflect.String {
// The proto3 exception is for a string field,
// which requires a colon.
break
}
needColon = false
}
if needColon {
return p.errorf("expected ':', found %q", tok.value)
}
p.back()
}
return nil
}
func (p *textParser) readStruct(sv reflect.Value, terminator string) error {
st := sv.Type()
sprops := GetProperties(st)
reqCount := sprops.reqCount
var reqFieldErr error
fieldSet := make(map[string]bool)
// A struct is a sequence of "name: value", terminated by one of
// '>' or '}', or the end of the input. A name may also be
// "[extension]" or "[type/url]".
//
// The whole struct can also be an expanded Any message, like:
// [type/url] < ... struct contents ... >
for {
tok := p.next()
if tok.err != nil {
return tok.err
}
if tok.value == terminator {
break
}
if tok.value == "[" {
// Looks like an extension or an Any.
//
// TODO: Check whether we need to handle
// namespace rooted names (e.g. ".something.Foo").
extName, err := p.consumeExtName()
if err != nil {
return err
}
if s := strings.LastIndex(extName, "/"); s >= 0 {
// If it contains a slash, it's an Any type URL.
messageName := extName[s+1:]
mt := MessageType(messageName)
if mt == nil {
return p.errorf("unrecognized message %q in google.protobuf.Any", messageName)
}
tok = p.next()
if tok.err != nil {
return tok.err
}
// consume an optional colon
if tok.value == ":" {
tok = p.next()
if tok.err != nil {
return tok.err
}
}
var terminator string
switch tok.value {
case "<":
terminator = ">"
case "{":
terminator = "}"
default:
return p.errorf("expected '{' or '<', found %q", tok.value)
}
v := reflect.New(mt.Elem())
if pe := p.readStruct(v.Elem(), terminator); pe != nil {
return pe
}
b, err := Marshal(v.Interface().(Message))
if err != nil {
return p.errorf("failed to marshal message of type %q: %v", messageName, err)
}
if fieldSet["type_url"] {
return p.errorf(anyRepeatedlyUnpacked, "type_url")
}
if fieldSet["value"] {
return p.errorf(anyRepeatedlyUnpacked, "value")
}
sv.FieldByName("TypeUrl").SetString(extName)
sv.FieldByName("Value").SetBytes(b)
fieldSet["type_url"] = true
fieldSet["value"] = true
continue
}
var desc *ExtensionDesc
// This could be faster, but it's functional.
// TODO: Do something smarter than a linear scan.
for _, d := range RegisteredExtensions(reflect.New(st).Interface().(Message)) {
if d.Name == extName {
desc = d
break
}
}
if desc == nil {
return p.errorf("unrecognized extension %q", extName)
}
props := &Properties{}
props.Parse(desc.Tag)
typ := reflect.TypeOf(desc.ExtensionType)
if err := p.checkForColon(props, typ); err != nil {
return err
}
rep := desc.repeated()
// Read the extension structure, and set it in
// the value we're constructing.
var ext reflect.Value
if !rep {
ext = reflect.New(typ).Elem()
} else {
ext = reflect.New(typ.Elem()).Elem()
}
if err := p.readAny(ext, props); err != nil {
if _, ok := err.(*RequiredNotSetError); !ok {
return err
}
reqFieldErr = err
}
ep := sv.Addr().Interface().(Message)
if !rep {
SetExtension(ep, desc, ext.Interface())
} else {
old, err := GetExtension(ep, desc)
var sl reflect.Value
if err == nil {
sl = reflect.ValueOf(old) // existing slice
} else {
sl = reflect.MakeSlice(typ, 0, 1)
}
sl = reflect.Append(sl, ext)
SetExtension(ep, desc, sl.Interface())
}
if err := p.consumeOptionalSeparator(); err != nil {
return err
}
continue
}
// This is a normal, non-extension field.
name := tok.value
var dst reflect.Value
fi, props, ok := structFieldByName(sprops, name)
if ok {
dst = sv.Field(fi)
} else if oop, ok := sprops.OneofTypes[name]; ok {
// It is a oneof.
props = oop.Prop
nv := reflect.New(oop.Type.Elem())
dst = nv.Elem().Field(0)
field := sv.Field(oop.Field)
if !field.IsNil() {
return p.errorf("field '%s' would overwrite already parsed oneof '%s'", name, sv.Type().Field(oop.Field).Name)
}
field.Set(nv)
}
if !dst.IsValid() {
return p.errorf("unknown field name %q in %v", name, st)
}
if dst.Kind() == reflect.Map {
// Consume any colon.
if err := p.checkForColon(props, dst.Type()); err != nil {
return err
}
// Construct the map if it doesn't already exist.
if dst.IsNil() {
dst.Set(reflect.MakeMap(dst.Type()))
}
key := reflect.New(dst.Type().Key()).Elem()
val := reflect.New(dst.Type().Elem()).Elem()
// The map entry should be this sequence of tokens:
// < key : KEY value : VALUE >
// However, implementations may omit key or value, and technically
// we should support them in any order. See b/28924776 for a time
// this went wrong.
tok := p.next()
var terminator string
switch tok.value {
case "<":
terminator = ">"
case "{":
terminator = "}"
default:
return p.errorf("expected '{' or '<', found %q", tok.value)
}
for {
tok := p.next()
if tok.err != nil {
return tok.err
}
if tok.value == terminator {
break
}
switch tok.value {
case "key":
if err := p.consumeToken(":"); err != nil {
return err
}
if err := p.readAny(key, props.mkeyprop); err != nil {
return err
}
if err := p.consumeOptionalSeparator(); err != nil {
return err
}
case "value":
if err := p.checkForColon(props.mvalprop, dst.Type().Elem()); err != nil {
return err
}
if err := p.readAny(val, props.mvalprop); err != nil {
return err
}
if err := p.consumeOptionalSeparator(); err != nil {
return err
}
default:
p.back()
return p.errorf(`expected "key", "value", or %q, found %q`, terminator, tok.value)
}
}
dst.SetMapIndex(key, val)
continue
}
// Check that it's not already set if it's not a repeated field.
if !props.Repeated && fieldSet[name] {
return p.errorf("non-repeated field %q was repeated", name)
}
if err := p.checkForColon(props, dst.Type()); err != nil {
return err
}
// Parse into the field.
fieldSet[name] = true
if err := p.readAny(dst, props); err != nil {
if _, ok := err.(*RequiredNotSetError); !ok {
return err
}
reqFieldErr = err
}
if props.Required {
reqCount--
}
if err := p.consumeOptionalSeparator(); err != nil {
return err
}
}
if reqCount > 0 {
return p.missingRequiredFieldError(sv)
}
return reqFieldErr
}
// consumeExtName consumes extension name or expanded Any type URL and the
// following ']'. It returns the name or URL consumed.
func (p *textParser) consumeExtName() (string, error) {
tok := p.next()
if tok.err != nil {
return "", tok.err
}
// If extension name or type url is quoted, it's a single token.
if len(tok.value) > 2 && isQuote(tok.value[0]) && tok.value[len(tok.value)-1] == tok.value[0] {
name, err := unquoteC(tok.value[1:len(tok.value)-1], rune(tok.value[0]))
if err != nil {
return "", err
}
return name, p.consumeToken("]")
}
// Consume everything up to "]"
var parts []string
for tok.value != "]" {
parts = append(parts, tok.value)
tok = p.next()
if tok.err != nil {
return "", p.errorf("unrecognized type_url or extension name: %s", tok.err)
}
}
return strings.Join(parts, ""), nil
}
// consumeOptionalSeparator consumes an optional semicolon or comma.
// It is used in readStruct to provide backward compatibility.
func (p *textParser) consumeOptionalSeparator() error {
tok := p.next()
if tok.err != nil {
return tok.err
}
if tok.value != ";" && tok.value != "," {
p.back()
}
return nil
}
func (p *textParser) readAny(v reflect.Value, props *Properties) error {
tok := p.next()
if tok.err != nil {
return tok.err
}
if tok.value == "" {
return p.errorf("unexpected EOF")
}
switch fv := v; fv.Kind() {
case reflect.Slice:
at := v.Type()
if at.Elem().Kind() == reflect.Uint8 {
// Special case for []byte
if tok.value[0] != '"' && tok.value[0] != '\'' {
// Deliberately written out here, as the error after
// this switch statement would write "invalid []byte: ...",
// which is not as user-friendly.
return p.errorf("invalid string: %v", tok.value)
}
bytes := []byte(tok.unquoted)
fv.Set(reflect.ValueOf(bytes))
return nil
}
// Repeated field.
if tok.value == "[" {
// Repeated field with list notation, like [1,2,3].
for {
fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem()))
err := p.readAny(fv.Index(fv.Len()-1), props)
if err != nil {
return err
}
tok := p.next()
if tok.err != nil {
return tok.err
}
if tok.value == "]" {
break
}
if tok.value != "," {
return p.errorf("Expected ']' or ',' found %q", tok.value)
}
}
return nil
}
// One value of the repeated field.
p.back()
fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem()))
return p.readAny(fv.Index(fv.Len()-1), props)
case reflect.Bool:
// true/1/t/True or false/f/0/False.
switch tok.value {
case "true", "1", "t", "True":
fv.SetBool(true)
return nil
case "false", "0", "f", "False":
fv.SetBool(false)
return nil
}
case reflect.Float32, reflect.Float64:
v := tok.value
// Ignore 'f' for compatibility with output generated by C++, but don't
// remove 'f' when the value is "-inf" or "inf".
if strings.HasSuffix(v, "f") && tok.value != "-inf" && tok.value != "inf" {
v = v[:len(v)-1]
}
if f, err := strconv.ParseFloat(v, fv.Type().Bits()); err == nil {
fv.SetFloat(f)
return nil
}
case reflect.Int32:
if x, err := strconv.ParseInt(tok.value, 0, 32); err == nil {
fv.SetInt(x)
return nil
}
if len(props.Enum) == 0 {
break
}
m, ok := enumValueMaps[props.Enum]
if !ok {
break
}
x, ok := m[tok.value]
if !ok {
break
}
fv.SetInt(int64(x))
return nil
case reflect.Int64:
if x, err := strconv.ParseInt(tok.value, 0, 64); err == nil {
fv.SetInt(x)
return nil
}
case reflect.Ptr:
// A basic field (indirected through pointer), or a repeated message/group
p.back()
fv.Set(reflect.New(fv.Type().Elem()))
return p.readAny(fv.Elem(), props)
case reflect.String:
if tok.value[0] == '"' || tok.value[0] == '\'' {
fv.SetString(tok.unquoted)
return nil
}
case reflect.Struct:
var terminator string
switch tok.value {
case "{":
terminator = "}"
case "<":
terminator = ">"
default:
return p.errorf("expected '{' or '<', found %q", tok.value)
}
// TODO: Handle nested messages which implement encoding.TextUnmarshaler.
return p.readStruct(fv, terminator)
case reflect.Uint32:
if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil {
fv.SetUint(x)
return nil
}
case reflect.Uint64:
if x, err := strconv.ParseUint(tok.value, 0, 64); err == nil {
fv.SetUint(x)
return nil
}
}
return p.errorf("invalid %v: %v", v.Type(), tok.value)
}
// UnmarshalText reads a protocol buffer in Text format. UnmarshalText resets pb
// before starting to unmarshal, so any existing data in pb is always removed.
// If a required field is not set and no other error occurs,
// UnmarshalText returns *RequiredNotSetError.
func UnmarshalText(s string, pb Message) error {
if um, ok := pb.(encoding.TextUnmarshaler); ok {
err := um.UnmarshalText([]byte(s))
return err
}
pb.Reset()
v := reflect.ValueOf(pb)
if pe := newTextParser(s).readStruct(v.Elem(), ""); pe != nil {
return pe
}
return nil
}

View file

@ -1,31 +0,0 @@
Go support for Protocol Buffers - Google's data interchange format
Copyright 2010 The Go Authors. All rights reserved.
https://github.com/golang/protobuf
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

View file

@ -1,168 +0,0 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// source: github.com/golang/protobuf/ptypes/any/any.proto
/*
Package any is a generated protocol buffer package.
It is generated from these files:
github.com/golang/protobuf/ptypes/any/any.proto
It has these top-level messages:
Any
*/
package any
import proto "github.com/golang/protobuf/proto"
import fmt "fmt"
import math "math"
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
// `Any` contains an arbitrary serialized protocol buffer message along with a
// URL that describes the type of the serialized message.
//
// Protobuf library provides support to pack/unpack Any values in the form
// of utility functions or additional generated methods of the Any type.
//
// Example 1: Pack and unpack a message in C++.
//
// Foo foo = ...;
// Any any;
// any.PackFrom(foo);
// ...
// if (any.UnpackTo(&foo)) {
// ...
// }
//
// Example 2: Pack and unpack a message in Java.
//
// Foo foo = ...;
// Any any = Any.pack(foo);
// ...
// if (any.is(Foo.class)) {
// foo = any.unpack(Foo.class);
// }
//
// Example 3: Pack and unpack a message in Python.
//
// foo = Foo(...)
// any = Any()
// any.Pack(foo)
// ...
// if any.Is(Foo.DESCRIPTOR):
// any.Unpack(foo)
// ...
//
// The pack methods provided by protobuf library will by default use
// 'type.googleapis.com/full.type.name' as the type URL and the unpack
// methods only use the fully qualified type name after the last '/'
// in the type URL, for example "foo.bar.com/x/y.z" will yield type
// name "y.z".
//
//
// JSON
// ====
// The JSON representation of an `Any` value uses the regular
// representation of the deserialized, embedded message, with an
// additional field `@type` which contains the type URL. Example:
//
// package google.profile;
// message Person {
// string first_name = 1;
// string last_name = 2;
// }
//
// {
// "@type": "type.googleapis.com/google.profile.Person",
// "firstName": <string>,
// "lastName": <string>
// }
//
// If the embedded message type is well-known and has a custom JSON
// representation, that representation will be embedded adding a field
// `value` which holds the custom JSON in addition to the `@type`
// field. Example (for message [google.protobuf.Duration][]):
//
// {
// "@type": "type.googleapis.com/google.protobuf.Duration",
// "value": "1.212s"
// }
//
type Any struct {
// A URL/resource name whose content describes the type of the
// serialized protocol buffer message.
//
// For URLs which use the scheme `http`, `https`, or no scheme, the
// following restrictions and interpretations apply:
//
// * If no scheme is provided, `https` is assumed.
// * The last segment of the URL's path must represent the fully
// qualified name of the type (as in `path/google.protobuf.Duration`).
// The name should be in a canonical form (e.g., leading "." is
// not accepted).
// * An HTTP GET on the URL must yield a [google.protobuf.Type][]
// value in binary format, or produce an error.
// * Applications are allowed to cache lookup results based on the
// URL, or have them precompiled into a binary to avoid any
// lookup. Therefore, binary compatibility needs to be preserved
// on changes to types. (Use versioned type names to manage
// breaking changes.)
//
// Schemes other than `http`, `https` (or the empty scheme) might be
// used with implementation specific semantics.
//
TypeUrl string `protobuf:"bytes,1,opt,name=type_url,json=typeUrl" json:"type_url,omitempty"`
// Must be a valid serialized protocol buffer of the above specified type.
Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
}
func (m *Any) Reset() { *m = Any{} }
func (m *Any) String() string { return proto.CompactTextString(m) }
func (*Any) ProtoMessage() {}
func (*Any) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
func (*Any) XXX_WellKnownType() string { return "Any" }
func (m *Any) GetTypeUrl() string {
if m != nil {
return m.TypeUrl
}
return ""
}
func (m *Any) GetValue() []byte {
if m != nil {
return m.Value
}
return nil
}
func init() {
proto.RegisterType((*Any)(nil), "google.protobuf.Any")
}
func init() { proto.RegisterFile("github.com/golang/protobuf/ptypes/any/any.proto", fileDescriptor0) }
var fileDescriptor0 = []byte{
// 184 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xd2, 0x4f, 0xcf, 0x2c, 0xc9,
0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xcf, 0xcf, 0x49, 0xcc, 0x4b, 0xd7, 0x2f, 0x28,
0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x2f, 0x28, 0xa9, 0x2c, 0x48, 0x2d, 0xd6, 0x4f, 0xcc,
0xab, 0x04, 0x61, 0x3d, 0xb0, 0xb8, 0x10, 0x7f, 0x7a, 0x7e, 0x7e, 0x7a, 0x4e, 0xaa, 0x1e, 0x4c,
0x95, 0x92, 0x19, 0x17, 0xb3, 0x63, 0x5e, 0xa5, 0x90, 0x24, 0x17, 0x07, 0x48, 0x79, 0x7c, 0x69,
0x51, 0x8e, 0x04, 0xa3, 0x02, 0xa3, 0x06, 0x67, 0x10, 0x3b, 0x88, 0x1f, 0x5a, 0x94, 0x23, 0x24,
0xc2, 0xc5, 0x5a, 0x96, 0x98, 0x53, 0x9a, 0x2a, 0xc1, 0xa4, 0xc0, 0xa8, 0xc1, 0x13, 0x04, 0xe1,
0x38, 0xe5, 0x73, 0x09, 0x27, 0xe7, 0xe7, 0xea, 0xa1, 0x19, 0xe7, 0xc4, 0xe1, 0x98, 0x57, 0x19,
0x00, 0xe2, 0x04, 0x30, 0x46, 0xa9, 0x12, 0xe5, 0xb8, 0x45, 0x4c, 0xcc, 0xee, 0x01, 0x4e, 0xab,
0x98, 0xe4, 0xdc, 0x21, 0x46, 0x05, 0x40, 0x95, 0xe8, 0x85, 0xa7, 0xe6, 0xe4, 0x78, 0xe7, 0xe5,
0x97, 0xe7, 0x85, 0x80, 0x94, 0x26, 0xb1, 0x81, 0xf5, 0x1a, 0x03, 0x02, 0x00, 0x00, 0xff, 0xff,
0x45, 0x1f, 0x1a, 0xf2, 0xf3, 0x00, 0x00, 0x00,
}

View file

@ -1,27 +0,0 @@
Copyright (c) 2009,2014 Google Inc. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

80
vendor/github.com/google/uuid/dce.go generated vendored
View file

@ -1,80 +0,0 @@
// Copyright 2016 Google Inc. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package uuid
import (
"encoding/binary"
"fmt"
"os"
)
// A Domain represents a Version 2 domain
type Domain byte
// Domain constants for DCE Security (Version 2) UUIDs.
const (
Person = Domain(0)
Group = Domain(1)
Org = Domain(2)
)
// NewDCESecurity returns a DCE Security (Version 2) UUID.
//
// The domain should be one of Person, Group or Org.
// On a POSIX system the id should be the users UID for the Person
// domain and the users GID for the Group. The meaning of id for
// the domain Org or on non-POSIX systems is site defined.
//
// For a given domain/id pair the same token may be returned for up to
// 7 minutes and 10 seconds.
func NewDCESecurity(domain Domain, id uint32) (UUID, error) {
uuid, err := NewUUID()
if err == nil {
uuid[6] = (uuid[6] & 0x0f) | 0x20 // Version 2
uuid[9] = byte(domain)
binary.BigEndian.PutUint32(uuid[0:], id)
}
return uuid, err
}
// NewDCEPerson returns a DCE Security (Version 2) UUID in the person
// domain with the id returned by os.Getuid.
//
// NewDCESecurity(Person, uint32(os.Getuid()))
func NewDCEPerson() (UUID, error) {
return NewDCESecurity(Person, uint32(os.Getuid()))
}
// NewDCEGroup returns a DCE Security (Version 2) UUID in the group
// domain with the id returned by os.Getgid.
//
// NewDCESecurity(Group, uint32(os.Getgid()))
func NewDCEGroup() (UUID, error) {
return NewDCESecurity(Group, uint32(os.Getgid()))
}
// Domain returns the domain for a Version 2 UUID. Domains are only defined
// for Version 2 UUIDs.
func (uuid UUID) Domain() Domain {
return Domain(uuid[9])
}
// ID returns the id for a Version 2 UUID. IDs are only defined for Version 2
// UUIDs.
func (uuid UUID) ID() uint32 {
return binary.BigEndian.Uint32(uuid[0:4])
}
func (d Domain) String() string {
switch d {
case Person:
return "Person"
case Group:
return "Group"
case Org:
return "Org"
}
return fmt.Sprintf("Domain%d", int(d))
}

12
vendor/github.com/google/uuid/doc.go generated vendored
View file

@ -1,12 +0,0 @@
// Copyright 2016 Google Inc. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package uuid generates and inspects UUIDs.
//
// UUIDs are based on RFC 4122 and DCE 1.1: Authentication and Security
// Services.
//
// A UUID is a 16 byte (128 bit) array. UUIDs may be used as keys to
// maps or compared directly.
package uuid

View file

@ -1,53 +0,0 @@
// Copyright 2016 Google Inc. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package uuid
import (
"crypto/md5"
"crypto/sha1"
"hash"
)
// Well known namespace IDs and UUIDs
var (
NameSpaceDNS = Must(Parse("6ba7b810-9dad-11d1-80b4-00c04fd430c8"))
NameSpaceURL = Must(Parse("6ba7b811-9dad-11d1-80b4-00c04fd430c8"))
NameSpaceOID = Must(Parse("6ba7b812-9dad-11d1-80b4-00c04fd430c8"))
NameSpaceX500 = Must(Parse("6ba7b814-9dad-11d1-80b4-00c04fd430c8"))
Nil UUID // empty UUID, all zeros
)
// NewHash returns a new UUID derived from the hash of space concatenated with
// data generated by h. The hash should be at least 16 byte in length. The
// first 16 bytes of the hash are used to form the UUID. The version of the
// UUID will be the lower 4 bits of version. NewHash is used to implement
// NewMD5 and NewSHA1.
func NewHash(h hash.Hash, space UUID, data []byte, version int) UUID {
h.Reset()
h.Write(space[:])
h.Write(data)
s := h.Sum(nil)
var uuid UUID
copy(uuid[:], s)
uuid[6] = (uuid[6] & 0x0f) | uint8((version&0xf)<<4)
uuid[8] = (uuid[8] & 0x3f) | 0x80 // RFC 4122 variant
return uuid
}
// NewMD5 returns a new MD5 (Version 3) UUID based on the
// supplied name space and data. It is the same as calling:
//
// NewHash(md5.New(), space, data, 3)
func NewMD5(space UUID, data []byte) UUID {
return NewHash(md5.New(), space, data, 3)
}
// NewSHA1 returns a new SHA1 (Version 5) UUID based on the
// supplied name space and data. It is the same as calling:
//
// NewHash(sha1.New(), space, data, 5)
func NewSHA1(space UUID, data []byte) UUID {
return NewHash(sha1.New(), space, data, 5)
}

View file

@ -1,39 +0,0 @@
// Copyright 2016 Google Inc. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package uuid
import "fmt"
// MarshalText implements encoding.TextMarshaler.
func (uuid UUID) MarshalText() ([]byte, error) {
var js [36]byte
encodeHex(js[:], uuid)
return js[:], nil
}
// UnmarshalText implements encoding.TextUnmarshaler.
func (uuid *UUID) UnmarshalText(data []byte) error {
// See comment in ParseBytes why we do this.
// id, err := ParseBytes(data)
id, err := ParseBytes(data)
if err == nil {
*uuid = id
}
return err
}
// MarshalBinary implements encoding.BinaryMarshaler.
func (uuid UUID) MarshalBinary() ([]byte, error) {
return uuid[:], nil
}
// UnmarshalBinary implements encoding.BinaryUnmarshaler.
func (uuid *UUID) UnmarshalBinary(data []byte) error {
if len(data) != 16 {
return fmt.Errorf("invalid UUID (got %d bytes)", len(data))
}
copy(uuid[:], data)
return nil
}

View file

@ -1,90 +0,0 @@
// Copyright 2016 Google Inc. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package uuid
import (
"sync"
)
var (
nodeMu sync.Mutex
ifname string // name of interface being used
nodeID [6]byte // hardware for version 1 UUIDs
zeroID [6]byte // nodeID with only 0's
)
// NodeInterface returns the name of the interface from which the NodeID was
// derived. The interface "user" is returned if the NodeID was set by
// SetNodeID.
func NodeInterface() string {
defer nodeMu.Unlock()
nodeMu.Lock()
return ifname
}
// SetNodeInterface selects the hardware address to be used for Version 1 UUIDs.
// If name is "" then the first usable interface found will be used or a random
// Node ID will be generated. If a named interface cannot be found then false
// is returned.
//
// SetNodeInterface never fails when name is "".
func SetNodeInterface(name string) bool {
defer nodeMu.Unlock()
nodeMu.Lock()
return setNodeInterface(name)
}
func setNodeInterface(name string) bool {
iname, addr := getHardwareInterface(name) // null implementation for js
if iname != "" && addr != nil {
ifname = iname
copy(nodeID[:], addr)
return true
}
// We found no interfaces with a valid hardware address. If name
// does not specify a specific interface generate a random Node ID
// (section 4.1.6)
if name == "" {
randomBits(nodeID[:])
return true
}
return false
}
// NodeID returns a slice of a copy of the current Node ID, setting the Node ID
// if not already set.
func NodeID() []byte {
defer nodeMu.Unlock()
nodeMu.Lock()
if nodeID == zeroID {
setNodeInterface("")
}
nid := nodeID
return nid[:]
}
// SetNodeID sets the Node ID to be used for Version 1 UUIDs. The first 6 bytes
// of id are used. If id is less than 6 bytes then false is returned and the
// Node ID is not set.
func SetNodeID(id []byte) bool {
if len(id) < 6 {
return false
}
defer nodeMu.Unlock()
nodeMu.Lock()
copy(nodeID[:], id)
ifname = "user"
return true
}
// NodeID returns the 6 byte node id encoded in uuid. It returns nil if uuid is
// not valid. The NodeID is only well defined for version 1 and 2 UUIDs.
func (uuid UUID) NodeID() []byte {
var node [6]byte
copy(node[:], uuid[10:])
return node[:]
}

View file

@ -1,12 +0,0 @@
// Copyright 2017 Google Inc. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build js
package uuid
// getHardwareInterface returns nil values for the JS version of the code.
// This remvoves the "net" dependency, because it is not used in the browser.
// Using the "net" library inflates the size of the transpiled JS code by 673k bytes.
func getHardwareInterface(name string) (string, []byte) { return "", nil }

View file

@ -1,33 +0,0 @@
// Copyright 2017 Google Inc. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build !js
package uuid
import "net"
var interfaces []net.Interface // cached list of interfaces
// getHardwareInterface returns the name and hardware address of interface name.
// If name is "" then the name and hardware address of one of the system's
// interfaces is returned. If no interfaces are found (name does not exist or
// there are no interfaces) then "", nil is returned.
//
// Only addresses of at least 6 bytes are returned.
func getHardwareInterface(name string) (string, []byte) {
if interfaces == nil {
var err error
interfaces, err = net.Interfaces()
if err != nil {
return "", nil
}
}
for _, ifs := range interfaces {
if len(ifs.HardwareAddr) >= 6 && (name == "" || name == ifs.Name) {
return ifs.Name, ifs.HardwareAddr
}
}
return "", nil
}

59
vendor/github.com/google/uuid/sql.go generated vendored
View file

@ -1,59 +0,0 @@
// Copyright 2016 Google Inc. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package uuid
import (
"database/sql/driver"
"fmt"
)
// Scan implements sql.Scanner so UUIDs can be read from databases transparently
// Currently, database types that map to string and []byte are supported. Please
// consult database-specific driver documentation for matching types.
func (uuid *UUID) Scan(src interface{}) error {
switch src := src.(type) {
case nil:
return nil
case string:
// if an empty UUID comes from a table, we return a null UUID
if src == "" {
return nil
}
// see Parse for required string format
u, err := Parse(src)
if err != nil {
return fmt.Errorf("Scan: %v", err)
}
*uuid = u
case []byte:
// if an empty UUID comes from a table, we return a null UUID
if len(src) == 0 {
return nil
}
// assumes a simple slice of bytes if 16 bytes
// otherwise attempts to parse
if len(src) != 16 {
return uuid.Scan(string(src))
}
copy((*uuid)[:], src)
default:
return fmt.Errorf("Scan: unable to scan type %T into UUID", src)
}
return nil
}
// Value implements sql.Valuer so that UUIDs can be written to databases
// transparently. Currently, UUIDs map to strings. Please consult
// database-specific driver documentation for matching types.
func (uuid UUID) Value() (driver.Value, error) {
return uuid.String(), nil
}

123
vendor/github.com/google/uuid/time.go generated vendored
View file

@ -1,123 +0,0 @@
// Copyright 2016 Google Inc. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package uuid
import (
"encoding/binary"
"sync"
"time"
)
// A Time represents a time as the number of 100's of nanoseconds since 15 Oct
// 1582.
type Time int64
const (
lillian = 2299160 // Julian day of 15 Oct 1582
unix = 2440587 // Julian day of 1 Jan 1970
epoch = unix - lillian // Days between epochs
g1582 = epoch * 86400 // seconds between epochs
g1582ns100 = g1582 * 10000000 // 100s of a nanoseconds between epochs
)
var (
timeMu sync.Mutex
lasttime uint64 // last time we returned
clockSeq uint16 // clock sequence for this run
timeNow = time.Now // for testing
)
// UnixTime converts t the number of seconds and nanoseconds using the Unix
// epoch of 1 Jan 1970.
func (t Time) UnixTime() (sec, nsec int64) {
sec = int64(t - g1582ns100)
nsec = (sec % 10000000) * 100
sec /= 10000000
return sec, nsec
}
// GetTime returns the current Time (100s of nanoseconds since 15 Oct 1582) and
// clock sequence as well as adjusting the clock sequence as needed. An error
// is returned if the current time cannot be determined.
func GetTime() (Time, uint16, error) {
defer timeMu.Unlock()
timeMu.Lock()
return getTime()
}
func getTime() (Time, uint16, error) {
t := timeNow()
// If we don't have a clock sequence already, set one.
if clockSeq == 0 {
setClockSequence(-1)
}
now := uint64(t.UnixNano()/100) + g1582ns100
// If time has gone backwards with this clock sequence then we
// increment the clock sequence
if now <= lasttime {
clockSeq = ((clockSeq + 1) & 0x3fff) | 0x8000
}
lasttime = now
return Time(now), clockSeq, nil
}
// ClockSequence returns the current clock sequence, generating one if not
// already set. The clock sequence is only used for Version 1 UUIDs.
//
// The uuid package does not use global static storage for the clock sequence or
// the last time a UUID was generated. Unless SetClockSequence is used, a new
// random clock sequence is generated the first time a clock sequence is
// requested by ClockSequence, GetTime, or NewUUID. (section 4.2.1.1)
func ClockSequence() int {
defer timeMu.Unlock()
timeMu.Lock()
return clockSequence()
}
func clockSequence() int {
if clockSeq == 0 {
setClockSequence(-1)
}
return int(clockSeq & 0x3fff)
}
// SetClockSequence sets the clock sequence to the lower 14 bits of seq. Setting to
// -1 causes a new sequence to be generated.
func SetClockSequence(seq int) {
defer timeMu.Unlock()
timeMu.Lock()
setClockSequence(seq)
}
func setClockSequence(seq int) {
if seq == -1 {
var b [2]byte
randomBits(b[:]) // clock sequence
seq = int(b[0])<<8 | int(b[1])
}
oldSeq := clockSeq
clockSeq = uint16(seq&0x3fff) | 0x8000 // Set our variant
if oldSeq != clockSeq {
lasttime = 0
}
}
// Time returns the time in 100s of nanoseconds since 15 Oct 1582 encoded in
// uuid. The time is only defined for version 1 and 2 UUIDs.
func (uuid UUID) Time() Time {
time := int64(binary.BigEndian.Uint32(uuid[0:4]))
time |= int64(binary.BigEndian.Uint16(uuid[4:6])) << 32
time |= int64(binary.BigEndian.Uint16(uuid[6:8])&0xfff) << 48
return Time(time)
}
// ClockSequence returns the clock sequence encoded in uuid.
// The clock sequence is only well defined for version 1 and 2 UUIDs.
func (uuid UUID) ClockSequence() int {
return int(binary.BigEndian.Uint16(uuid[8:10])) & 0x3fff
}

View file

@ -1,43 +0,0 @@
// Copyright 2016 Google Inc. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package uuid
import (
"io"
)
// randomBits completely fills slice b with random data.
func randomBits(b []byte) {
if _, err := io.ReadFull(rander, b); err != nil {
panic(err.Error()) // rand should never fail
}
}
// xvalues returns the value of a byte as a hexadecimal digit or 255.
var xvalues = [256]byte{
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 255, 255, 255, 255, 255, 255,
255, 10, 11, 12, 13, 14, 15, 255, 255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
255, 10, 11, 12, 13, 14, 15, 255, 255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
}
// xtob converts hex characters x1 and x2 into a byte.
func xtob(x1, x2 byte) (byte, bool) {
b1 := xvalues[x1]
b2 := xvalues[x2]
return (b1 << 4) | b2, b1 != 255 && b2 != 255
}

198
vendor/github.com/google/uuid/uuid.go generated vendored
View file

@ -1,198 +0,0 @@
// Copyright 2016 Google Inc. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package uuid
import (
"bytes"
"crypto/rand"
"encoding/hex"
"errors"
"fmt"
"io"
"strings"
)
// A UUID is a 128 bit (16 byte) Universal Unique IDentifier as defined in RFC
// 4122.
type UUID [16]byte
// A Version represents a UUID's version.
type Version byte
// A Variant represents a UUID's variant.
type Variant byte
// Constants returned by Variant.
const (
Invalid = Variant(iota) // Invalid UUID
RFC4122 // The variant specified in RFC4122
Reserved // Reserved, NCS backward compatibility.
Microsoft // Reserved, Microsoft Corporation backward compatibility.
Future // Reserved for future definition.
)
var rander = rand.Reader // random function
// Parse decodes s into a UUID or returns an error. Both the UUID form of
// xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx and
// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx are decoded.
func Parse(s string) (UUID, error) {
var uuid UUID
if len(s) != 36 {
if len(s) != 36+9 {
return uuid, fmt.Errorf("invalid UUID length: %d", len(s))
}
if strings.ToLower(s[:9]) != "urn:uuid:" {
return uuid, fmt.Errorf("invalid urn prefix: %q", s[:9])
}
s = s[9:]
}
if s[8] != '-' || s[13] != '-' || s[18] != '-' || s[23] != '-' {
return uuid, errors.New("invalid UUID format")
}
for i, x := range [16]int{
0, 2, 4, 6,
9, 11,
14, 16,
19, 21,
24, 26, 28, 30, 32, 34} {
v, ok := xtob(s[x], s[x+1])
if !ok {
return uuid, errors.New("invalid UUID format")
}
uuid[i] = v
}
return uuid, nil
}
// ParseBytes is like Parse, except it parses a byte slice instead of a string.
func ParseBytes(b []byte) (UUID, error) {
var uuid UUID
if len(b) != 36 {
if len(b) != 36+9 {
return uuid, fmt.Errorf("invalid UUID length: %d", len(b))
}
if !bytes.Equal(bytes.ToLower(b[:9]), []byte("urn:uuid:")) {
return uuid, fmt.Errorf("invalid urn prefix: %q", b[:9])
}
b = b[9:]
}
if b[8] != '-' || b[13] != '-' || b[18] != '-' || b[23] != '-' {
return uuid, errors.New("invalid UUID format")
}
for i, x := range [16]int{
0, 2, 4, 6,
9, 11,
14, 16,
19, 21,
24, 26, 28, 30, 32, 34} {
v, ok := xtob(b[x], b[x+1])
if !ok {
return uuid, errors.New("invalid UUID format")
}
uuid[i] = v
}
return uuid, nil
}
// FromBytes creates a new UUID from a byte slice. Returns an error if the slice
// does not have a length of 16. The bytes are copied from the slice.
func FromBytes(b []byte) (uuid UUID, err error) {
err = uuid.UnmarshalBinary(b)
return uuid, err
}
// Must returns uuid if err is nil and panics otherwise.
func Must(uuid UUID, err error) UUID {
if err != nil {
panic(err)
}
return uuid
}
// String returns the string form of uuid, xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
// , or "" if uuid is invalid.
func (uuid UUID) String() string {
var buf [36]byte
encodeHex(buf[:], uuid)
return string(buf[:])
}
// URN returns the RFC 2141 URN form of uuid,
// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx, or "" if uuid is invalid.
func (uuid UUID) URN() string {
var buf [36 + 9]byte
copy(buf[:], "urn:uuid:")
encodeHex(buf[9:], uuid)
return string(buf[:])
}
func encodeHex(dst []byte, uuid UUID) {
hex.Encode(dst[:], uuid[:4])
dst[8] = '-'
hex.Encode(dst[9:13], uuid[4:6])
dst[13] = '-'
hex.Encode(dst[14:18], uuid[6:8])
dst[18] = '-'
hex.Encode(dst[19:23], uuid[8:10])
dst[23] = '-'
hex.Encode(dst[24:], uuid[10:])
}
// Variant returns the variant encoded in uuid.
func (uuid UUID) Variant() Variant {
switch {
case (uuid[8] & 0xc0) == 0x80:
return RFC4122
case (uuid[8] & 0xe0) == 0xc0:
return Microsoft
case (uuid[8] & 0xe0) == 0xe0:
return Future
default:
return Reserved
}
}
// Version returns the version of uuid.
func (uuid UUID) Version() Version {
return Version(uuid[6] >> 4)
}
func (v Version) String() string {
if v > 15 {
return fmt.Sprintf("BAD_VERSION_%d", v)
}
return fmt.Sprintf("VERSION_%d", v)
}
func (v Variant) String() string {
switch v {
case RFC4122:
return "RFC4122"
case Reserved:
return "Reserved"
case Microsoft:
return "Microsoft"
case Future:
return "Future"
case Invalid:
return "Invalid"
}
return fmt.Sprintf("BadVariant%d", int(v))
}
// SetRand sets the random number generator to r, which implements io.Reader.
// If r.Read returns an error when the package requests random data then
// a panic will be issued.
//
// Calling SetRand with nil sets the random number generator to the default
// generator.
func SetRand(r io.Reader) {
if r == nil {
rander = rand.Reader
return
}
rander = r
}

View file

@ -1,44 +0,0 @@
// Copyright 2016 Google Inc. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package uuid
import (
"encoding/binary"
)
// NewUUID returns a Version 1 UUID based on the current NodeID and clock
// sequence, and the current time. If the NodeID has not been set by SetNodeID
// or SetNodeInterface then it will be set automatically. If the NodeID cannot
// be set NewUUID returns nil. If clock sequence has not been set by
// SetClockSequence then it will be set automatically. If GetTime fails to
// return the current NewUUID returns nil and an error.
//
// In most cases, New should be used.
func NewUUID() (UUID, error) {
nodeMu.Lock()
if nodeID == zeroID {
setNodeInterface("")
}
nodeMu.Unlock()
var uuid UUID
now, seq, err := GetTime()
if err != nil {
return uuid, err
}
timeLow := uint32(now & 0xffffffff)
timeMid := uint16((now >> 32) & 0xffff)
timeHi := uint16((now >> 48) & 0x0fff)
timeHi |= 0x1000 // Version 1
binary.BigEndian.PutUint32(uuid[0:], timeLow)
binary.BigEndian.PutUint16(uuid[4:], timeMid)
binary.BigEndian.PutUint16(uuid[6:], timeHi)
binary.BigEndian.PutUint16(uuid[8:], seq)
copy(uuid[10:], nodeID[:])
return uuid, nil
}

View file

@ -1,38 +0,0 @@
// Copyright 2016 Google Inc. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package uuid
import "io"
// New creates a new random UUID or panics. New is equivalent to
// the expression
//
// uuid.Must(uuid.NewRandom())
func New() UUID {
return Must(NewRandom())
}
// NewRandom returns a Random (Version 4) UUID.
//
// The strength of the UUIDs is based on the strength of the crypto/rand
// package.
//
// A note about uniqueness derived from the UUID Wikipedia entry:
//
// Randomly generated UUIDs have 122 random bits. One's annual risk of being
// hit by a meteorite is estimated to be one chance in 17 billion, that
// means the probability is about 0.00000000006 (6 × 1011),
// equivalent to the odds of creating a few tens of trillions of UUIDs in a
// year and having one duplicate.
func NewRandom() (UUID, error) {
var uuid UUID
_, err := io.ReadFull(rander, uuid[:])
if err != nil {
return Nil, err
}
uuid[6] = (uuid[6] & 0x0f) | 0x40 // Version 4
uuid[8] = (uuid[8] & 0x3f) | 0x80 // Variant is 10
return uuid, nil
}

View file

@ -1,22 +0,0 @@
Copyright (c) 2013 The Gorilla WebSocket Authors. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

View file

@ -1,392 +0,0 @@
// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package websocket
import (
"bufio"
"bytes"
"crypto/tls"
"encoding/base64"
"errors"
"io"
"io/ioutil"
"net"
"net/http"
"net/url"
"strings"
"time"
)
// ErrBadHandshake is returned when the server response to opening handshake is
// invalid.
var ErrBadHandshake = errors.New("websocket: bad handshake")
var errInvalidCompression = errors.New("websocket: invalid compression negotiation")
// NewClient creates a new client connection using the given net connection.
// The URL u specifies the host and request URI. Use requestHeader to specify
// the origin (Origin), subprotocols (Sec-WebSocket-Protocol) and cookies
// (Cookie). Use the response.Header to get the selected subprotocol
// (Sec-WebSocket-Protocol) and cookies (Set-Cookie).
//
// If the WebSocket handshake fails, ErrBadHandshake is returned along with a
// non-nil *http.Response so that callers can handle redirects, authentication,
// etc.
//
// Deprecated: Use Dialer instead.
func NewClient(netConn net.Conn, u *url.URL, requestHeader http.Header, readBufSize, writeBufSize int) (c *Conn, response *http.Response, err error) {
d := Dialer{
ReadBufferSize: readBufSize,
WriteBufferSize: writeBufSize,
NetDial: func(net, addr string) (net.Conn, error) {
return netConn, nil
},
}
return d.Dial(u.String(), requestHeader)
}
// A Dialer contains options for connecting to WebSocket server.
type Dialer struct {
// NetDial specifies the dial function for creating TCP connections. If
// NetDial is nil, net.Dial is used.
NetDial func(network, addr string) (net.Conn, error)
// Proxy specifies a function to return a proxy for a given
// Request. If the function returns a non-nil error, the
// request is aborted with the provided error.
// If Proxy is nil or returns a nil *URL, no proxy is used.
Proxy func(*http.Request) (*url.URL, error)
// TLSClientConfig specifies the TLS configuration to use with tls.Client.
// If nil, the default configuration is used.
TLSClientConfig *tls.Config
// HandshakeTimeout specifies the duration for the handshake to complete.
HandshakeTimeout time.Duration
// ReadBufferSize and WriteBufferSize specify I/O buffer sizes. If a buffer
// size is zero, then a useful default size is used. The I/O buffer sizes
// do not limit the size of the messages that can be sent or received.
ReadBufferSize, WriteBufferSize int
// Subprotocols specifies the client's requested subprotocols.
Subprotocols []string
// EnableCompression specifies if the client should attempt to negotiate
// per message compression (RFC 7692). Setting this value to true does not
// guarantee that compression will be supported. Currently only "no context
// takeover" modes are supported.
EnableCompression bool
// Jar specifies the cookie jar.
// If Jar is nil, cookies are not sent in requests and ignored
// in responses.
Jar http.CookieJar
}
var errMalformedURL = errors.New("malformed ws or wss URL")
// parseURL parses the URL.
//
// This function is a replacement for the standard library url.Parse function.
// In Go 1.4 and earlier, url.Parse loses information from the path.
func parseURL(s string) (*url.URL, error) {
// From the RFC:
//
// ws-URI = "ws:" "//" host [ ":" port ] path [ "?" query ]
// wss-URI = "wss:" "//" host [ ":" port ] path [ "?" query ]
var u url.URL
switch {
case strings.HasPrefix(s, "ws://"):
u.Scheme = "ws"
s = s[len("ws://"):]
case strings.HasPrefix(s, "wss://"):
u.Scheme = "wss"
s = s[len("wss://"):]
default:
return nil, errMalformedURL
}
if i := strings.Index(s, "?"); i >= 0 {
u.RawQuery = s[i+1:]
s = s[:i]
}
if i := strings.Index(s, "/"); i >= 0 {
u.Opaque = s[i:]
s = s[:i]
} else {
u.Opaque = "/"
}
u.Host = s
if strings.Contains(u.Host, "@") {
// Don't bother parsing user information because user information is
// not allowed in websocket URIs.
return nil, errMalformedURL
}
return &u, nil
}
func hostPortNoPort(u *url.URL) (hostPort, hostNoPort string) {
hostPort = u.Host
hostNoPort = u.Host
if i := strings.LastIndex(u.Host, ":"); i > strings.LastIndex(u.Host, "]") {
hostNoPort = hostNoPort[:i]
} else {
switch u.Scheme {
case "wss":
hostPort += ":443"
case "https":
hostPort += ":443"
default:
hostPort += ":80"
}
}
return hostPort, hostNoPort
}
// DefaultDialer is a dialer with all fields set to the default zero values.
var DefaultDialer = &Dialer{
Proxy: http.ProxyFromEnvironment,
}
// Dial creates a new client connection. Use requestHeader to specify the
// origin (Origin), subprotocols (Sec-WebSocket-Protocol) and cookies (Cookie).
// Use the response.Header to get the selected subprotocol
// (Sec-WebSocket-Protocol) and cookies (Set-Cookie).
//
// If the WebSocket handshake fails, ErrBadHandshake is returned along with a
// non-nil *http.Response so that callers can handle redirects, authentication,
// etcetera. The response body may not contain the entire response and does not
// need to be closed by the application.
func (d *Dialer) Dial(urlStr string, requestHeader http.Header) (*Conn, *http.Response, error) {
if d == nil {
d = &Dialer{
Proxy: http.ProxyFromEnvironment,
}
}
challengeKey, err := generateChallengeKey()
if err != nil {
return nil, nil, err
}
u, err := parseURL(urlStr)
if err != nil {
return nil, nil, err
}
switch u.Scheme {
case "ws":
u.Scheme = "http"
case "wss":
u.Scheme = "https"
default:
return nil, nil, errMalformedURL
}
if u.User != nil {
// User name and password are not allowed in websocket URIs.
return nil, nil, errMalformedURL
}
req := &http.Request{
Method: "GET",
URL: u,
Proto: "HTTP/1.1",
ProtoMajor: 1,
ProtoMinor: 1,
Header: make(http.Header),
Host: u.Host,
}
// Set the cookies present in the cookie jar of the dialer
if d.Jar != nil {
for _, cookie := range d.Jar.Cookies(u) {
req.AddCookie(cookie)
}
}
// Set the request headers using the capitalization for names and values in
// RFC examples. Although the capitalization shouldn't matter, there are
// servers that depend on it. The Header.Set method is not used because the
// method canonicalizes the header names.
req.Header["Upgrade"] = []string{"websocket"}
req.Header["Connection"] = []string{"Upgrade"}
req.Header["Sec-WebSocket-Key"] = []string{challengeKey}
req.Header["Sec-WebSocket-Version"] = []string{"13"}
if len(d.Subprotocols) > 0 {
req.Header["Sec-WebSocket-Protocol"] = []string{strings.Join(d.Subprotocols, ", ")}
}
for k, vs := range requestHeader {
switch {
case k == "Host":
if len(vs) > 0 {
req.Host = vs[0]
}
case k == "Upgrade" ||
k == "Connection" ||
k == "Sec-Websocket-Key" ||
k == "Sec-Websocket-Version" ||
k == "Sec-Websocket-Extensions" ||
(k == "Sec-Websocket-Protocol" && len(d.Subprotocols) > 0):
return nil, nil, errors.New("websocket: duplicate header not allowed: " + k)
default:
req.Header[k] = vs
}
}
if d.EnableCompression {
req.Header.Set("Sec-Websocket-Extensions", "permessage-deflate; server_no_context_takeover; client_no_context_takeover")
}
hostPort, hostNoPort := hostPortNoPort(u)
var proxyURL *url.URL
// Check wether the proxy method has been configured
if d.Proxy != nil {
proxyURL, err = d.Proxy(req)
}
if err != nil {
return nil, nil, err
}
var targetHostPort string
if proxyURL != nil {
targetHostPort, _ = hostPortNoPort(proxyURL)
} else {
targetHostPort = hostPort
}
var deadline time.Time
if d.HandshakeTimeout != 0 {
deadline = time.Now().Add(d.HandshakeTimeout)
}
netDial := d.NetDial
if netDial == nil {
netDialer := &net.Dialer{Deadline: deadline}
netDial = netDialer.Dial
}
netConn, err := netDial("tcp", targetHostPort)
if err != nil {
return nil, nil, err
}
defer func() {
if netConn != nil {
netConn.Close()
}
}()
if err := netConn.SetDeadline(deadline); err != nil {
return nil, nil, err
}
if proxyURL != nil {
connectHeader := make(http.Header)
if user := proxyURL.User; user != nil {
proxyUser := user.Username()
if proxyPassword, passwordSet := user.Password(); passwordSet {
credential := base64.StdEncoding.EncodeToString([]byte(proxyUser + ":" + proxyPassword))
connectHeader.Set("Proxy-Authorization", "Basic "+credential)
}
}
connectReq := &http.Request{
Method: "CONNECT",
URL: &url.URL{Opaque: hostPort},
Host: hostPort,
Header: connectHeader,
}
connectReq.Write(netConn)
// Read response.
// Okay to use and discard buffered reader here, because
// TLS server will not speak until spoken to.
br := bufio.NewReader(netConn)
resp, err := http.ReadResponse(br, connectReq)
if err != nil {
return nil, nil, err
}
if resp.StatusCode != 200 {
f := strings.SplitN(resp.Status, " ", 2)
return nil, nil, errors.New(f[1])
}
}
if u.Scheme == "https" {
cfg := cloneTLSConfig(d.TLSClientConfig)
if cfg.ServerName == "" {
cfg.ServerName = hostNoPort
}
tlsConn := tls.Client(netConn, cfg)
netConn = tlsConn
if err := tlsConn.Handshake(); err != nil {
return nil, nil, err
}
if !cfg.InsecureSkipVerify {
if err := tlsConn.VerifyHostname(cfg.ServerName); err != nil {
return nil, nil, err
}
}
}
conn := newConn(netConn, false, d.ReadBufferSize, d.WriteBufferSize)
if err := req.Write(netConn); err != nil {
return nil, nil, err
}
resp, err := http.ReadResponse(conn.br, req)
if err != nil {
return nil, nil, err
}
if d.Jar != nil {
if rc := resp.Cookies(); len(rc) > 0 {
d.Jar.SetCookies(u, rc)
}
}
if resp.StatusCode != 101 ||
!strings.EqualFold(resp.Header.Get("Upgrade"), "websocket") ||
!strings.EqualFold(resp.Header.Get("Connection"), "upgrade") ||
resp.Header.Get("Sec-Websocket-Accept") != computeAcceptKey(challengeKey) {
// Before closing the network connection on return from this
// function, slurp up some of the response to aid application
// debugging.
buf := make([]byte, 1024)
n, _ := io.ReadFull(resp.Body, buf)
resp.Body = ioutil.NopCloser(bytes.NewReader(buf[:n]))
return nil, resp, ErrBadHandshake
}
for _, ext := range parseExtensions(resp.Header) {
if ext[""] != "permessage-deflate" {
continue
}
_, snct := ext["server_no_context_takeover"]
_, cnct := ext["client_no_context_takeover"]
if !snct || !cnct {
return nil, resp, errInvalidCompression
}
conn.newCompressionWriter = compressNoContextTakeover
conn.newDecompressionReader = decompressNoContextTakeover
break
}
resp.Body = ioutil.NopCloser(bytes.NewReader([]byte{}))
conn.subprotocol = resp.Header.Get("Sec-Websocket-Protocol")
netConn.SetDeadline(time.Time{})
netConn = nil // to avoid close in defer.
return conn, resp, nil
}

View file

@ -1,16 +0,0 @@
// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build go1.8
package websocket
import "crypto/tls"
func cloneTLSConfig(cfg *tls.Config) *tls.Config {
if cfg == nil {
return &tls.Config{}
}
return cfg.Clone()
}

Some files were not shown because too many files have changed in this diff Show more