Migrate to Go111Modules, Add Dockerfile and GoRelaser support

This commit is contained in:
James Mills 2020-03-21 08:20:16 +10:00
parent 13ffd71171
commit d7a1eab5be
No known key found for this signature in database
GPG key ID: AC4C014F1440EBD6
423 changed files with 190 additions and 246252 deletions

9
.gitignore vendored
View file

@ -1,4 +1,9 @@
*~
*.bak
*.key
/dist
/tube
videos/*
!videos/README.md
onion.key
build/bin

35
.goreleaser.yml Normal file
View file

@ -0,0 +1,35 @@
---
builds:
-
flags: -tags "static_build"
ldflags: -w -X github.com/prologic/tube/.Version={{.Version}} -X github.com/prologic/tube/.Commit={{.Commit}}
env:
- CGO_ENABLED=0
goos:
- windows
- freebsd
- darwin
- linux
goarch:
- amd64
- arm
- arm64
sign:
artifacts: checksum
archive:
replacements:
darwin: Darwin
linux: Linux
windows: Windows
386: i386
amd64: x86_64
checksum:
name_template: 'checksums.txt'
snapshot:
name_template: "{{ .Tag }}-next"
changelog:
sort: asc
filters:
exclude:
- '^docs:'
- '^test:'

29
.yamllint.yml Normal file
View file

@ -0,0 +1,29 @@
---
yaml-files:
- '*.yaml'
- '*.yml'
- '.yamllint'
rules:
braces: enable
brackets: enable
colons: enable
commas: enable
comments: disable
comments-indentation: disable
document-end: disable
document-start:
level: warning
empty-lines: enable
empty-values: disable
hyphens: enable
indentation: enable
key-duplicates: enable
key-ordering: disable
line-length: disable
new-line-at-end-of-file: enable
new-lines: enable
octal-values: enable
quoted-strings: disable
trailing-spaces: enable
truthy: disable

10
Dockerfile Normal file
View file

@ -0,0 +1,10 @@
# Build
FROM prologic/go-builder:latest AS build
# Runtime
FROM alpine
COPY --from=build /src/tube /tube
ENTRYPOINT ["/tube"]
CMD [""]

67
Gopkg.lock generated
View file

@ -1,67 +0,0 @@
# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'.
[[projects]]
branch = "master"
name = "github.com/dhowden/tag"
packages = ["."]
revision = "db0c67e351b1bfbdfc4f99c911e8afd0ca67de98"
[[projects]]
name = "github.com/fsnotify/fsnotify"
packages = ["."]
revision = "c2828203cd70a50dcccfb2761f8b1f8ceef9a8e9"
version = "v1.4.7"
[[projects]]
name = "github.com/gorilla/feeds"
packages = ["."]
revision = "2079b9bbce59c062ad62eecb771e3ed3a772b8e4"
version = "v1.1.1"
[[projects]]
name = "github.com/gorilla/mux"
packages = ["."]
revision = "ed099d42384823742bba0bf9a72b53b55c9e2e38"
version = "v1.7.2"
[[projects]]
branch = "master"
name = "github.com/wybiral/torgo"
packages = ["."]
revision = "a19a6c8a50489e2fdf37223edc5f1284bb965308"
[[projects]]
branch = "master"
name = "golang.org/x/crypto"
packages = [
"ed25519",
"ed25519/internal/edwards25519",
"sha3"
]
revision = "4def268fd1a49955bfb3dda92fe3db4f924f2285"
[[projects]]
branch = "master"
name = "golang.org/x/net"
packages = [
"internal/socks",
"proxy"
]
revision = "da137c7871d730100384dbcf36e6f8fa493aef5b"
[[projects]]
branch = "master"
name = "golang.org/x/sys"
packages = [
"cpu",
"unix"
]
revision = "04f50cda93cbb67f2afa353c52f342100e80e625"
[solve-meta]
analyzer-name = "dep"
analyzer-version = 1
inputs-digest = "c0b13b7e13cee43839881abf059f0b69ef9385100bcad825291bce099a60c3d8"
solver-name = "gps-cdcl"
solver-version = 1

View file

@ -1,38 +0,0 @@
# Gopkg.toml example
#
# Refer to https://github.com/golang/dep/blob/master/docs/Gopkg.toml.md
# for detailed Gopkg.toml documentation.
#
# required = ["github.com/user/thing/cmd/thing"]
# ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"]
#
# [[constraint]]
# name = "github.com/user/project"
# version = "1.0.0"
#
# [[constraint]]
# name = "github.com/user/project2"
# branch = "dev"
# source = "github.com/myfork/project2"
#
# [[override]]
# name = "github.com/x/y"
# version = "2.4.0"
#
# [prune]
# non-go = false
# go-tests = true
# unused-packages = true
[[constraint]]
branch = "master"
name = "github.com/dhowden/tag"
[[constraint]]
name = "github.com/gorilla/mux"
version = "1.7.2"
[prune]
go-tests = true
unused-packages = true

28
Makefile Normal file
View file

@ -0,0 +1,28 @@
.PHONY: dev build install test release clean
CGO_ENABLED=0
VERSION=$(shell git describe --abbrev=0 --tags)
COMMIT=$(shell git rev-parse --short HEAD)
all: dev
dev: build
@./tube
build: clean
@go build \
-tags "netgo static_build" -installsuffix netgo \
-ldflags "-w -X $(shell go list).Version=$(VERSION) -X $(shell go list).Commit=$(COMMIT)" \
.
install: build
@go install
test: install
@go test
release:
@./tools/release.sh
clean:
@git clean -f -d -X

View file

@ -1,54 +0,0 @@
# Build Go project for different platforms and create zip archive of project
# directory structure.
import os
import zipfile
def build(pkg, bin, env):
src = 'github.com/wybiral/tube'
x = os.system('{env} go build -o bin/{bin} {src}'.format(
env=env,
bin=bin,
src=src,
))
if x != 0:
print('Error building ' + pkg)
return
z = zipfile.ZipFile('bin/' + pkg, mode='w')
z.write('bin/' + bin, arcname=bin)
z.write('../config.json', arcname='config.json')
z.write('../README.md', 'README.md')
z.write('../videos/README.md', 'videos/README.md')
for filename in os.listdir('../static'):
z.write('../static/' + filename, 'static/' + filename)
for filename in os.listdir('../templates'):
z.write('../templates/' + filename, 'templates/' + filename)
# cleanup executable
os.remove('bin/' + bin)
print('Built ' + pkg)
build(
pkg='tube_linux.zip',
bin='tube',
env='GOOS=linux GOARCH=amd64',
)
build(
pkg='tube_windows.zip',
bin='tube.exe',
env='GOOS=windows GOARCH=amd64',
)
build(
pkg='tube_osx.zip',
bin='tube',
env='GOOS=darwin GOARCH=amd64',
)
build(
pkg='tube_arm6.zip',
bin='tube',
env='GOOS=linux GOARCH=arm GOARM=6',
)
print('Done.')

View file

@ -7,7 +7,7 @@
],
"server": {
"host": "127.0.0.1",
"port": 0
"port": 8000
},
"feed": {
"external_url": "",

15
go.mod Normal file
View file

@ -0,0 +1,15 @@
module github.com/wybiral/tube
go 1.14
require (
github.com/dhowden/tag v0.0.0-20190519100835-db0c67e351b1
github.com/fsnotify/fsnotify v1.4.7
github.com/gorilla/feeds v1.1.1
github.com/gorilla/mux v1.7.2
github.com/wybiral/feeds v1.1.1
github.com/wybiral/torgo v0.0.0-20190413024533-a19a6c8a5048
golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4
golang.org/x/net v0.0.0-20190628185345-da137c7871d7
golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb
)

22
go.sum Normal file
View file

@ -0,0 +1,22 @@
github.com/dhowden/tag v0.0.0-20190519100835-db0c67e351b1 h1:HR8W6GvuS20j4kNxa/XQeyVA0vHLKVMCAVJj0RGWauY=
github.com/dhowden/tag v0.0.0-20190519100835-db0c67e351b1/go.mod h1:SniNVYuaD1jmdEEvi+7ywb1QFR7agjeTdGKyFb0p7Rw=
github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I=
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
github.com/gorilla/feeds v1.1.1/go.mod h1:Nk0jZrvPFZX1OBe5NPiddPw7CfwF6Q9eqzaBbaightA=
github.com/gorilla/mux v1.7.2 h1:zoNxOV7WjqXptQOVngLmcSQgXmgk4NMz1HibBchjl/I=
github.com/gorilla/mux v1.7.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
github.com/wybiral/feeds v1.1.1 h1:KWE/JxA2XfP0My+C0wymqXrWK5oIyjRVbH5kpclsea0=
github.com/wybiral/feeds v1.1.1/go.mod h1:MRSqtY+Oy5HMM51cF212xqU39MYbsYq/AH+PY8IfeM0=
github.com/wybiral/torgo v0.0.0-20190413024533-a19a6c8a5048 h1:HQmSLHtGgSARs7VYzAID703qM/5m1il3X7f9AHfg/9c=
github.com/wybiral/torgo v0.0.0-20190413024533-a19a6c8a5048/go.mod h1:LAhGyZRjuXZ/+uO4tqc5QV26hkdIo+yGHPfX1aubR0M=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4 h1:HuIa8hRrWRSrqYzx1qI49NNxhdi2PrY7gxVSq1JjLDc=
golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190628185345-da137c7871d7 h1:rTIdg5QFRR7XCaK4LCjBiPbx8j4DQRpdYMnGn/bJUEU=
golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb h1:fgwFCsaw9buMuxNd6+DQfAuSFqbNiQZpcgJQAgJsK6k=
golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=

25
tools/release.sh Executable file
View file

@ -0,0 +1,25 @@
#!/bin/sh
# Get the highest tag number
VERSION="$(git describe --abbrev=0 --tags)"
VERSION=${VERSION:-'0.0.0'}
# Get number parts
MAJOR="${VERSION%%.*}"; VERSION="${VERSION#*.}"
MINOR="${VERSION%%.*}"; VERSION="${VERSION#*.}"
PATCH="${VERSION%%.*}"; VERSION="${VERSION#*.}"
# Increase version
PATCH=$((PATCH+1))
TAG="${1}"
if [ "${TAG}" = "" ]; then
TAG="${MAJOR}.${MINOR}.${PATCH}"
fi
echo "Releasing ${TAG} ..."
git tag -a -s -m "Release ${TAG}" "${TAG}"
git push --tags
goreleaser release --rm-dist

View file

@ -1,19 +0,0 @@
# EditorConfig helps developers define and maintain consistent
# coding styles between different editors and IDEs
# editorconfig.org
root = true
[*.go]
indent_style = tab
indent_size = 3
[*.md]
trim_trailing_whitespace = false
[*]
# We recommend you to keep these unchanged
end_of_line = lf
charset = utf-8
trim_trailing_whitespace = true
insert_final_newline = true

View file

@ -1,5 +0,0 @@
language: go
go:
- 1.7
- tip

View file

@ -1,23 +0,0 @@
Copyright 2015, David Howden
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
Redistributions in binary form must reproduce the above copyright notice, this
list of conditions and the following disclaimer in the documentation and/or
other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

View file

@ -1,72 +0,0 @@
# MP3/MP4/OGG/FLAC metadata parsing library
[![Build Status](https://travis-ci.org/dhowden/tag.svg?branch=master)](https://travis-ci.org/dhowden/tag)
[![GoDoc](https://godoc.org/github.com/dhowden/tag?status.svg)](https://godoc.org/github.com/dhowden/tag)
This package provides MP3 (ID3v1,2.{2,3,4}) and MP4 (ACC, M4A, ALAC), OGG and FLAC metadata detection, parsing and artwork extraction.
Detect and parse tag metadata from an `io.ReadSeeker` (i.e. an `*os.File`):
```go
m, err := tag.ReadFrom(f)
if err != nil {
log.Fatal(err)
}
log.Print(m.Format()) // The detected format.
log.Print(m.Title()) // The title of the track (see Metadata interface for more details).
```
Parsed metadata is exported via a single interface (giving a consistent API for all supported metadata formats).
```go
// Metadata is an interface which is used to describe metadata retrieved by this package.
type Metadata interface {
Format() Format
FileType() FileType
Title() string
Album() string
Artist() string
AlbumArtist() string
Composer() string
Genre() string
Year() int
Track() (int, int) // Number, Total
Disc() (int, int) // Number, Total
Picture() *Picture // Artwork
Lyrics() string
Comment() string
Raw() map[string]interface{} // NB: raw tag names are not consistent across formats.
}
```
## Audio Data Checksum (SHA1)
This package also provides a metadata-invariant checksum for audio files: only the audio data is used to
construct the checksum.
[http://godoc.org/github.com/dhowden/tag#Sum](http://godoc.org/github.com/dhowden/tag#Sum)
## Tools
There are simple command-line tools which demonstrate basic tag extraction and summing:
```console
$ go get github.com/dhowden/tag/...
$ cd $GOPATH/bin
$ ./tag 11\ High\ Hopes.m4a
Metadata Format: MP4
Title: High Hopes
Album: The Division Bell
Artist: Pink Floyd
Composer: Abbey Road Recording Studios/David Gilmour/Polly Samson
Year: 1994
Track: 11 of 11
Disc: 1 of 1
Picture: Picture{Ext: jpeg, MIMEType: image/jpeg, Type: , Description: , Data.Size: 606109}
$ ./sum 11\ High\ Hopes.m4a
2ae208c5f00a1f21f5fac9b7f6e0b8e52c06da29
```

110
vendor/github.com/dhowden/tag/dsf.go generated vendored
View file

@ -1,110 +0,0 @@
// Copyright 2015, David Howden
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package tag
import (
"errors"
"io"
)
// ReadDSFTags reads DSF metadata from the io.ReadSeeker, returning the resulting
// metadata in a Metadata implementation, or non-nil error if there was a problem.
// samples: http://www.2l.no/hires/index.html
func ReadDSFTags(r io.ReadSeeker) (Metadata, error) {
dsd, err := readString(r, 4)
if err != nil {
return nil, err
}
if dsd != "DSD " {
return nil, errors.New("expected 'DSD '")
}
_, err = r.Seek(int64(16), io.SeekCurrent)
if err != nil {
return nil, err
}
n4, err := readBytes(r, 8)
if err != nil {
return nil, err
}
id3Pointer := getIntLittleEndian(n4)
_, err = r.Seek(int64(id3Pointer), io.SeekStart)
if err != nil {
return nil, err
}
id3, err := ReadID3v2Tags(r)
if err != nil {
return nil, err
}
return metadataDSF{id3}, nil
}
type metadataDSF struct {
id3 Metadata
}
func (m metadataDSF) Format() Format {
return m.id3.Format()
}
func (m metadataDSF) FileType() FileType {
return DSF
}
func (m metadataDSF) Title() string {
return m.id3.Title()
}
func (m metadataDSF) Album() string {
return m.id3.Album()
}
func (m metadataDSF) Artist() string {
return m.id3.Artist()
}
func (m metadataDSF) AlbumArtist() string {
return m.id3.AlbumArtist()
}
func (m metadataDSF) Composer() string {
return m.id3.Composer()
}
func (m metadataDSF) Year() int {
return m.id3.Year()
}
func (m metadataDSF) Genre() string {
return m.id3.Genre()
}
func (m metadataDSF) Track() (int, int) {
return m.id3.Track()
}
func (m metadataDSF) Disc() (int, int) {
return m.id3.Disc()
}
func (m metadataDSF) Picture() *Picture {
return m.id3.Picture()
}
func (m metadataDSF) Lyrics() string {
return m.id3.Lyrics()
}
func (m metadataDSF) Comment() string {
return m.id3.Comment()
}
func (m metadataDSF) Raw() map[string]interface{} {
return m.id3.Raw()
}

View file

@ -1,89 +0,0 @@
// Copyright 2015, David Howden
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package tag
import (
"errors"
"io"
)
// blockType is a type which represents an enumeration of valid FLAC blocks
type blockType byte
// FLAC block types.
const (
// Stream Info Block 0
// Padding Block 1
// Application Block 2
// Seektable Block 3
// Cue Sheet Block 5
vorbisCommentBlock blockType = 4
pictureBlock blockType = 6
)
// ReadFLACTags reads FLAC metadata from the io.ReadSeeker, returning the resulting
// metadata in a Metadata implementation, or non-nil error if there was a problem.
func ReadFLACTags(r io.ReadSeeker) (Metadata, error) {
flac, err := readString(r, 4)
if err != nil {
return nil, err
}
if flac != "fLaC" {
return nil, errors.New("expected 'fLaC'")
}
m := &metadataFLAC{
newMetadataVorbis(),
}
for {
last, err := m.readFLACMetadataBlock(r)
if err != nil {
return nil, err
}
if last {
break
}
}
return m, nil
}
type metadataFLAC struct {
*metadataVorbis
}
func (m *metadataFLAC) readFLACMetadataBlock(r io.ReadSeeker) (last bool, err error) {
blockHeader, err := readBytes(r, 1)
if err != nil {
return
}
if getBit(blockHeader[0], 7) {
blockHeader[0] ^= (1 << 7)
last = true
}
blockLen, err := readInt(r, 3)
if err != nil {
return
}
switch blockType(blockHeader[0]) {
case vorbisCommentBlock:
err = m.readVorbisComment(r)
case pictureBlock:
err = m.readPictureBlock(r)
default:
_, err = r.Seek(int64(blockLen), io.SeekCurrent)
}
return
}
func (m *metadataFLAC) FileType() FileType {
return FLAC
}

81
vendor/github.com/dhowden/tag/id.go generated vendored
View file

@ -1,81 +0,0 @@
package tag
import (
"fmt"
"io"
)
// Identify identifies the format and file type of the data in the ReadSeeker.
func Identify(r io.ReadSeeker) (format Format, fileType FileType, err error) {
b, err := readBytes(r, 11)
if err != nil {
return
}
_, err = r.Seek(-11, io.SeekCurrent)
if err != nil {
err = fmt.Errorf("could not seek back to original position: %v", err)
return
}
switch {
case string(b[0:4]) == "fLaC":
return VORBIS, FLAC, nil
case string(b[0:4]) == "OggS":
return VORBIS, OGG, nil
case string(b[4:8]) == "ftyp":
b = b[8:11]
fileType = UnknownFileType
switch string(b) {
case "M4A":
fileType = M4A
case "M4B":
fileType = M4B
case "M4P":
fileType = M4P
}
return MP4, fileType, nil
case string(b[0:3]) == "ID3":
b := b[3:]
switch uint(b[0]) {
case 2:
format = ID3v2_2
case 3:
format = ID3v2_3
case 4:
format = ID3v2_4
case 0, 1:
fallthrough
default:
err = fmt.Errorf("ID3 version: %v, expected: 2, 3 or 4", uint(b[0]))
return
}
return format, MP3, nil
}
n, err := r.Seek(-128, io.SeekEnd)
if err != nil {
return
}
tag, err := readString(r, 3)
if err != nil {
return
}
_, err = r.Seek(-n, io.SeekCurrent)
if err != nil {
return
}
if tag != "TAG" {
err = ErrNoTagsFound
return
}
return ID3v1, MP3, nil
}

View file

@ -1,144 +0,0 @@
// Copyright 2015, David Howden
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package tag
import (
"errors"
"io"
"strconv"
"strings"
)
// id3v1Genres is a list of genres as given in the ID3v1 specification.
var id3v1Genres = [...]string{
"Blues", "Classic Rock", "Country", "Dance", "Disco", "Funk", "Grunge",
"Hip-Hop", "Jazz", "Metal", "New Age", "Oldies", "Other", "Pop", "R&B",
"Rap", "Reggae", "Rock", "Techno", "Industrial", "Alternative", "Ska",
"Death Metal", "Pranks", "Soundtrack", "Euro-Techno", "Ambient",
"Trip-Hop", "Vocal", "Jazz+Funk", "Fusion", "Trance", "Classical",
"Instrumental", "Acid", "House", "Game", "Sound Clip", "Gospel",
"Noise", "AlternRock", "Bass", "Soul", "Punk", "Space", "Meditative",
"Instrumental Pop", "Instrumental Rock", "Ethnic", "Gothic",
"Darkwave", "Techno-Industrial", "Electronic", "Pop-Folk",
"Eurodance", "Dream", "Southern Rock", "Comedy", "Cult", "Gangsta",
"Top 40", "Christian Rap", "Pop/Funk", "Jungle", "Native American",
"Cabaret", "New Wave", "Psychadelic", "Rave", "Showtunes", "Trailer",
"Lo-Fi", "Tribal", "Acid Punk", "Acid Jazz", "Polka", "Retro",
"Musical", "Rock & Roll", "Hard Rock", "Folk", "Folk-Rock",
"National Folk", "Swing", "Fast Fusion", "Bebob", "Latin", "Revival",
"Celtic", "Bluegrass", "Avantgarde", "Gothic Rock", "Progressive Rock",
"Psychedelic Rock", "Symphonic Rock", "Slow Rock", "Big Band",
"Chorus", "Easy Listening", "Acoustic", "Humour", "Speech", "Chanson",
"Opera", "Chamber Music", "Sonata", "Symphony", "Booty Bass", "Primus",
"Porn Groove", "Satire", "Slow Jam", "Club", "Tango", "Samba",
"Folklore", "Ballad", "Power Ballad", "Rhythmic Soul", "Freestyle",
"Duet", "Punk Rock", "Drum Solo", "Acapella", "Euro-House", "Dance Hall",
}
// ErrNotID3v1 is an error which is returned when no ID3v1 header is found.
var ErrNotID3v1 = errors.New("invalid ID3v1 header")
// ReadID3v1Tags reads ID3v1 tags from the io.ReadSeeker. Returns ErrNotID3v1
// if there are no ID3v1 tags, otherwise non-nil error if there was a problem.
func ReadID3v1Tags(r io.ReadSeeker) (Metadata, error) {
_, err := r.Seek(-128, io.SeekEnd)
if err != nil {
return nil, err
}
if tag, err := readString(r, 3); err != nil {
return nil, err
} else if tag != "TAG" {
return nil, ErrNotID3v1
}
title, err := readString(r, 30)
if err != nil {
return nil, err
}
artist, err := readString(r, 30)
if err != nil {
return nil, err
}
album, err := readString(r, 30)
if err != nil {
return nil, err
}
year, err := readString(r, 4)
if err != nil {
return nil, err
}
commentBytes, err := readBytes(r, 30)
if err != nil {
return nil, err
}
var comment string
var track int
if commentBytes[28] == 0 {
comment = trimString(string(commentBytes[:28]))
track = int(commentBytes[29])
} else {
comment = trimString(string(commentBytes))
}
var genre string
genreID, err := readBytes(r, 1)
if err != nil {
return nil, err
}
if int(genreID[0]) < len(id3v1Genres) {
genre = id3v1Genres[int(genreID[0])]
}
m := make(map[string]interface{})
m["title"] = trimString(title)
m["artist"] = trimString(artist)
m["album"] = trimString(album)
m["year"] = trimString(year)
m["comment"] = trimString(comment)
m["track"] = track
m["genre"] = genre
return metadataID3v1(m), nil
}
func trimString(x string) string {
return strings.TrimSpace(strings.Trim(x, "\x00"))
}
// metadataID3v1 is the implementation of Metadata used for ID3v1 tags.
type metadataID3v1 map[string]interface{}
func (metadataID3v1) Format() Format { return ID3v1 }
func (metadataID3v1) FileType() FileType { return MP3 }
func (m metadataID3v1) Raw() map[string]interface{} { return m }
func (m metadataID3v1) Title() string { return m["title"].(string) }
func (m metadataID3v1) Album() string { return m["album"].(string) }
func (m metadataID3v1) Artist() string { return m["artist"].(string) }
func (m metadataID3v1) Genre() string { return m["genre"].(string) }
func (m metadataID3v1) Year() int {
y := m["year"].(string)
n, err := strconv.Atoi(y)
if err != nil {
return 0
}
return n
}
func (m metadataID3v1) Track() (int, int) { return m["track"].(int), 0 }
func (m metadataID3v1) AlbumArtist() string { return "" }
func (m metadataID3v1) Composer() string { return "" }
func (metadataID3v1) Disc() (int, int) { return 0, 0 }
func (m metadataID3v1) Picture() *Picture { return nil }
func (m metadataID3v1) Lyrics() string { return "" }
func (m metadataID3v1) Comment() string { return m["comment"].(string) }

View file

@ -1,434 +0,0 @@
// Copyright 2015, David Howden
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package tag
import (
"fmt"
"io"
"regexp"
"strconv"
"strings"
)
var id3v2Genres = [...]string{
"Blues", "Classic Rock", "Country", "Dance", "Disco", "Funk", "Grunge",
"Hip-Hop", "Jazz", "Metal", "New Age", "Oldies", "Other", "Pop", "R&B",
"Rap", "Reggae", "Rock", "Techno", "Industrial", "Alternative", "Ska",
"Death Metal", "Pranks", "Soundtrack", "Euro-Techno", "Ambient",
"Trip-Hop", "Vocal", "Jazz+Funk", "Fusion", "Trance", "Classical",
"Instrumental", "Acid", "House", "Game", "Sound Clip", "Gospel",
"Noise", "AlternRock", "Bass", "Soul", "Punk", "Space", "Meditative",
"Instrumental Pop", "Instrumental Rock", "Ethnic", "Gothic",
"Darkwave", "Techno-Industrial", "Electronic", "Pop-Folk",
"Eurodance", "Dream", "Southern Rock", "Comedy", "Cult", "Gangsta",
"Top 40", "Christian Rap", "Pop/Funk", "Jungle", "Native American",
"Cabaret", "New Wave", "Psychedelic", "Rave", "Showtunes", "Trailer",
"Lo-Fi", "Tribal", "Acid Punk", "Acid Jazz", "Polka", "Retro",
"Musical", "Rock & Roll", "Hard Rock", "Folk", "Folk-Rock",
"National Folk", "Swing", "Fast Fusion", "Bebob", "Latin", "Revival",
"Celtic", "Bluegrass", "Avantgarde", "Gothic Rock", "Progressive Rock",
"Psychedelic Rock", "Symphonic Rock", "Slow Rock", "Big Band",
"Chorus", "Easy Listening", "Acoustic", "Humour", "Speech", "Chanson",
"Opera", "Chamber Music", "Sonata", "Symphony", "Booty Bass", "Primus",
"Porn Groove", "Satire", "Slow Jam", "Club", "Tango", "Samba",
"Folklore", "Ballad", "Power Ballad", "Rhythmic Soul", "Freestyle",
"Duet", "Punk Rock", "Drum Solo", "A capella", "Euro-House", "Dance Hall",
"Goa", "Drum & Bass", "Club-House", "Hardcore", "Terror", "Indie",
"Britpop", "Negerpunk", "Polsk Punk", "Beat", "Christian Gangsta Rap",
"Heavy Metal", "Black Metal", "Crossover", "Contemporary Christian",
"Christian Rock ", "Merengue", "Salsa", "Thrash Metal", "Anime", "JPop",
"Synthpop",
}
// id3v2Header is a type which represents an ID3v2 tag header.
type id3v2Header struct {
Version Format
Unsynchronisation bool
ExtendedHeader bool
Experimental bool
Size int
}
// readID3v2Header reads the ID3v2 header from the given io.Reader.
// offset it number of bytes of header that was read
func readID3v2Header(r io.Reader) (h *id3v2Header, offset int, err error) {
offset = 10
b, err := readBytes(r, offset)
if err != nil {
return nil, 0, fmt.Errorf("expected to read 10 bytes (ID3v2Header): %v", err)
}
if string(b[0:3]) != "ID3" {
return nil, 0, fmt.Errorf("expected to read \"ID3\"")
}
b = b[3:]
var vers Format
switch uint(b[0]) {
case 2:
vers = ID3v2_2
case 3:
vers = ID3v2_3
case 4:
vers = ID3v2_4
case 0, 1:
fallthrough
default:
return nil, 0, fmt.Errorf("ID3 version: %v, expected: 2, 3 or 4", uint(b[0]))
}
// NB: We ignore b[1] (the revision) as we don't currently rely on it.
h = &id3v2Header{
Version: vers,
Unsynchronisation: getBit(b[2], 7),
ExtendedHeader: getBit(b[2], 6),
Experimental: getBit(b[2], 5),
Size: get7BitChunkedInt(b[3:7]),
}
if h.ExtendedHeader {
switch vers {
case ID3v2_3:
b, err := readBytes(r, 4)
if err != nil {
return nil, 0, fmt.Errorf("expected to read 4 bytes (ID3v23 extended header len): %v", err)
}
// skip header, size is excluding len bytes
extendedHeaderSize := getInt(b)
_, err = readBytes(r, extendedHeaderSize)
if err != nil {
return nil, 0, fmt.Errorf("expected to read %d bytes (ID3v23 skip extended header): %v", extendedHeaderSize, err)
}
offset += extendedHeaderSize
case ID3v2_4:
b, err := readBytes(r, 4)
if err != nil {
return nil, 0, fmt.Errorf("expected to read 4 bytes (ID3v24 extended header len): %v", err)
}
// skip header, size is synchsafe int including len bytes
extendedHeaderSize := get7BitChunkedInt(b) - 4
_, err = readBytes(r, extendedHeaderSize)
if err != nil {
return nil, 0, fmt.Errorf("expected to read %d bytes (ID3v24 skip extended header): %v", extendedHeaderSize, err)
}
offset += extendedHeaderSize
default:
// nop, only 2.3 and 2.4 should have extended header
}
}
return h, offset, nil
}
// id3v2FrameFlags is a type which represents the flags which can be set on an ID3v2 frame.
type id3v2FrameFlags struct {
// Message (ID3 2.3.0 and 2.4.0)
TagAlterPreservation bool
FileAlterPreservation bool
ReadOnly bool
// Format (ID3 2.3.0 and 2.4.0)
Compression bool
Encryption bool
GroupIdentity bool
// ID3 2.4.0 only (see http://id3.org/id3v2.4.0-structure sec 4.1)
Unsynchronisation bool
DataLengthIndicator bool
}
func readID3v23FrameFlags(r io.Reader) (*id3v2FrameFlags, error) {
b, err := readBytes(r, 2)
if err != nil {
return nil, err
}
msg := b[0]
fmt := b[1]
return &id3v2FrameFlags{
TagAlterPreservation: getBit(msg, 7),
FileAlterPreservation: getBit(msg, 6),
ReadOnly: getBit(msg, 5),
Compression: getBit(fmt, 7),
Encryption: getBit(fmt, 6),
GroupIdentity: getBit(fmt, 5),
}, nil
}
func readID3v24FrameFlags(r io.Reader) (*id3v2FrameFlags, error) {
b, err := readBytes(r, 2)
if err != nil {
return nil, err
}
msg := b[0]
fmt := b[1]
return &id3v2FrameFlags{
TagAlterPreservation: getBit(msg, 6),
FileAlterPreservation: getBit(msg, 5),
ReadOnly: getBit(msg, 4),
GroupIdentity: getBit(fmt, 6),
Compression: getBit(fmt, 3),
Encryption: getBit(fmt, 2),
Unsynchronisation: getBit(fmt, 1),
DataLengthIndicator: getBit(fmt, 0),
}, nil
}
func readID3v2_2FrameHeader(r io.Reader) (name string, size int, headerSize int, err error) {
name, err = readString(r, 3)
if err != nil {
return
}
size, err = readInt(r, 3)
if err != nil {
return
}
headerSize = 6
return
}
func readID3v2_3FrameHeader(r io.Reader) (name string, size int, headerSize int, err error) {
name, err = readString(r, 4)
if err != nil {
return
}
size, err = readInt(r, 4)
if err != nil {
return
}
headerSize = 8
return
}
func readID3v2_4FrameHeader(r io.Reader) (name string, size int, headerSize int, err error) {
name, err = readString(r, 4)
if err != nil {
return
}
size, err = read7BitChunkedInt(r, 4)
if err != nil {
return
}
headerSize = 8
return
}
// readID3v2Frames reads ID3v2 frames from the given reader using the ID3v2Header.
func readID3v2Frames(r io.Reader, offset int, h *id3v2Header) (map[string]interface{}, error) {
result := make(map[string]interface{})
for offset < h.Size {
var err error
var name string
var size, headerSize int
var flags *id3v2FrameFlags
switch h.Version {
case ID3v2_2:
name, size, headerSize, err = readID3v2_2FrameHeader(r)
case ID3v2_3:
name, size, headerSize, err = readID3v2_3FrameHeader(r)
if err != nil {
return nil, err
}
flags, err = readID3v23FrameFlags(r)
headerSize += 2
case ID3v2_4:
name, size, headerSize, err = readID3v2_4FrameHeader(r)
if err != nil {
return nil, err
}
flags, err = readID3v24FrameFlags(r)
headerSize += 2
}
if err != nil {
return nil, err
}
// FIXME: Do we still need this?
// if size=0, we certainly are in a padding zone. ignore the rest of
// the tags
if size == 0 {
break
}
offset += headerSize + size
// Avoid corrupted padding (see http://id3.org/Compliance%20Issues).
if !validID3Frame(h.Version, name) && offset > h.Size {
break
}
if flags != nil {
if flags.Compression {
_, err = read7BitChunkedInt(r, 4) // read 4
if err != nil {
return nil, err
}
size -= 4
}
if flags.Encryption {
_, err = readBytes(r, 1) // read 1 byte of encryption method
if err != nil {
return nil, err
}
size -= 1
}
}
b, err := readBytes(r, size)
if err != nil {
return nil, err
}
// There can be multiple tag with the same name. Append a number to the
// name if there is more than one.
rawName := name
if _, ok := result[rawName]; ok {
for i := 0; ok; i++ {
rawName = name + "_" + strconv.Itoa(i)
_, ok = result[rawName]
}
}
switch {
case name == "TXXX" || name == "TXX":
t, err := readTextWithDescrFrame(b, false, true) // no lang, but enc
if err != nil {
return nil, err
}
result[rawName] = t
case name[0] == 'T':
txt, err := readTFrame(b)
if err != nil {
return nil, err
}
result[rawName] = txt
case name == "UFID" || name == "UFI":
t, err := readUFID(b)
if err != nil {
return nil, err
}
result[rawName] = t
case name == "WXXX" || name == "WXX":
t, err := readTextWithDescrFrame(b, false, false) // no lang, no enc
if err != nil {
return nil, err
}
result[rawName] = t
case name[0] == 'W':
txt, err := readWFrame(b)
if err != nil {
return nil, err
}
result[rawName] = txt
case name == "COMM" || name == "COM" || name == "USLT" || name == "ULT":
t, err := readTextWithDescrFrame(b, true, true) // both lang and enc
if err != nil {
return nil, err
}
result[rawName] = t
case name == "APIC":
p, err := readAPICFrame(b)
if err != nil {
return nil, err
}
result[rawName] = p
case name == "PIC":
p, err := readPICFrame(b)
if err != nil {
return nil, err
}
result[rawName] = p
default:
result[rawName] = b
}
}
return result, nil
}
type unsynchroniser struct {
io.Reader
ff bool
}
// filter io.Reader which skip the Unsynchronisation bytes
func (r *unsynchroniser) Read(p []byte) (int, error) {
b := make([]byte, 1)
i := 0
for i < len(p) {
if n, err := r.Reader.Read(b); err != nil || n == 0 {
return i, err
}
if r.ff && b[0] == 0x00 {
r.ff = false
continue
}
p[i] = b[0]
i++
r.ff = (b[0] == 0xFF)
}
return i, nil
}
// ReadID3v2Tags parses ID3v2.{2,3,4} tags from the io.ReadSeeker into a Metadata, returning
// non-nil error on failure.
func ReadID3v2Tags(r io.ReadSeeker) (Metadata, error) {
h, offset, err := readID3v2Header(r)
if err != nil {
return nil, err
}
var ur io.Reader = r
if h.Unsynchronisation {
ur = &unsynchroniser{Reader: r}
}
f, err := readID3v2Frames(ur, offset, h)
if err != nil {
return nil, err
}
return metadataID3v2{header: h, frames: f}, nil
}
var id3v2genreRe = regexp.MustCompile(`(.*[^(]|.* |^)\(([0-9]+)\) *(.*)$`)
// id3v2genre parse a id3v2 genre tag and expand the numeric genres
func id3v2genre(genre string) string {
c := true
for c {
orig := genre
if match := id3v2genreRe.FindStringSubmatch(genre); len(match) > 0 {
if genreID, err := strconv.Atoi(match[2]); err == nil {
if genreID < len(id3v2Genres) {
genre = id3v2Genres[genreID]
if match[1] != "" {
genre = strings.TrimSpace(match[1]) + " " + genre
}
if match[3] != "" {
genre = genre + " " + match[3]
}
}
}
}
c = (orig != genre)
}
return strings.Replace(genre, "((", "(", -1)
}

View file

@ -1,638 +0,0 @@
// Copyright 2015, David Howden
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package tag
import (
"bytes"
"encoding/binary"
"errors"
"fmt"
"strings"
"unicode/utf16"
)
// DefaultUTF16WithBOMByteOrder is the byte order used when the "UTF16 with BOM" encoding
// is specified without a corresponding BOM in the data.
var DefaultUTF16WithBOMByteOrder binary.ByteOrder = binary.LittleEndian
// ID3v2.2.0 frames (see http://id3.org/id3v2-00, sec 4).
var id3v22Frames = map[string]string{
"BUF": "Recommended buffer size",
"CNT": "Play counter",
"COM": "Comments",
"CRA": "Audio encryption",
"CRM": "Encrypted meta frame",
"ETC": "Event timing codes",
"EQU": "Equalization",
"GEO": "General encapsulated object",
"IPL": "Involved people list",
"LNK": "Linked information",
"MCI": "Music CD Identifier",
"MLL": "MPEG location lookup table",
"PIC": "Attached picture",
"POP": "Popularimeter",
"REV": "Reverb",
"RVA": "Relative volume adjustment",
"SLT": "Synchronized lyric/text",
"STC": "Synced tempo codes",
"TAL": "Album/Movie/Show title",
"TBP": "BPM (Beats Per Minute)",
"TCM": "Composer",
"TCO": "Content type",
"TCR": "Copyright message",
"TDA": "Date",
"TDY": "Playlist delay",
"TEN": "Encoded by",
"TFT": "File type",
"TIM": "Time",
"TKE": "Initial key",
"TLA": "Language(s)",
"TLE": "Length",
"TMT": "Media type",
"TOA": "Original artist(s)/performer(s)",
"TOF": "Original filename",
"TOL": "Original Lyricist(s)/text writer(s)",
"TOR": "Original release year",
"TOT": "Original album/Movie/Show title",
"TP1": "Lead artist(s)/Lead performer(s)/Soloist(s)/Performing group",
"TP2": "Band/Orchestra/Accompaniment",
"TP3": "Conductor/Performer refinement",
"TP4": "Interpreted, remixed, or otherwise modified by",
"TPA": "Part of a set",
"TPB": "Publisher",
"TRC": "ISRC (International Standard Recording Code)",
"TRD": "Recording dates",
"TRK": "Track number/Position in set",
"TSI": "Size",
"TSS": "Software/hardware and settings used for encoding",
"TT1": "Content group description",
"TT2": "Title/Songname/Content description",
"TT3": "Subtitle/Description refinement",
"TXT": "Lyricist/text writer",
"TXX": "User defined text information frame",
"TYE": "Year",
"UFI": "Unique file identifier",
"ULT": "Unsychronized lyric/text transcription",
"WAF": "Official audio file webpage",
"WAR": "Official artist/performer webpage",
"WAS": "Official audio source webpage",
"WCM": "Commercial information",
"WCP": "Copyright/Legal information",
"WPB": "Publishers official webpage",
"WXX": "User defined URL link frame",
}
// ID3v2.3.0 frames (see http://id3.org/id3v2.3.0#Declared_ID3v2_frames).
var id3v23Frames = map[string]string{
"AENC": "Audio encryption]",
"APIC": "Attached picture",
"COMM": "Comments",
"COMR": "Commercial frame",
"ENCR": "Encryption method registration",
"EQUA": "Equalization",
"ETCO": "Event timing codes",
"GEOB": "General encapsulated object",
"GRID": "Group identification registration",
"IPLS": "Involved people list",
"LINK": "Linked information",
"MCDI": "Music CD identifier",
"MLLT": "MPEG location lookup table",
"OWNE": "Ownership frame",
"PRIV": "Private frame",
"PCNT": "Play counter",
"POPM": "Popularimeter",
"POSS": "Position synchronisation frame",
"RBUF": "Recommended buffer size",
"RVAD": "Relative volume adjustment",
"RVRB": "Reverb",
"SYLT": "Synchronized lyric/text",
"SYTC": "Synchronized tempo codes",
"TALB": "Album/Movie/Show title",
"TBPM": "BPM (beats per minute)",
"TCMP": "iTunes Compilation Flag",
"TCOM": "Composer",
"TCON": "Content type",
"TCOP": "Copyright message",
"TDAT": "Date",
"TDLY": "Playlist delay",
"TENC": "Encoded by",
"TEXT": "Lyricist/Text writer",
"TFLT": "File type",
"TIME": "Time",
"TIT1": "Content group description",
"TIT2": "Title/songname/content description",
"TIT3": "Subtitle/Description refinement",
"TKEY": "Initial key",
"TLAN": "Language(s)",
"TLEN": "Length",
"TMED": "Media type",
"TOAL": "Original album/movie/show title",
"TOFN": "Original filename",
"TOLY": "Original lyricist(s)/text writer(s)",
"TOPE": "Original artist(s)/performer(s)",
"TORY": "Original release year",
"TOWN": "File owner/licensee",
"TPE1": "Lead performer(s)/Soloist(s)",
"TPE2": "Band/orchestra/accompaniment",
"TPE3": "Conductor/performer refinement",
"TPE4": "Interpreted, remixed, or otherwise modified by",
"TPOS": "Part of a set",
"TPUB": "Publisher",
"TRCK": "Track number/Position in set",
"TRDA": "Recording dates",
"TRSN": "Internet radio station name",
"TRSO": "Internet radio station owner",
"TSIZ": "Size",
"TSO2": "iTunes uses this for Album Artist sort order",
"TSOC": "iTunes uses this for Composer sort order",
"TSRC": "ISRC (international standard recording code)",
"TSSE": "Software/Hardware and settings used for encoding",
"TYER": "Year",
"TXXX": "User defined text information frame",
"UFID": "Unique file identifier",
"USER": "Terms of use",
"USLT": "Unsychronized lyric/text transcription",
"WCOM": "Commercial information",
"WCOP": "Copyright/Legal information",
"WOAF": "Official audio file webpage",
"WOAR": "Official artist/performer webpage",
"WOAS": "Official audio source webpage",
"WORS": "Official internet radio station homepage",
"WPAY": "Payment",
"WPUB": "Publishers official webpage",
"WXXX": "User defined URL link frame",
}
// ID3v2.4.0 frames (see http://id3.org/id3v2.4.0-frames, sec 4).
var id3v24Frames = map[string]string{
"AENC": "Audio encryption",
"APIC": "Attached picture",
"ASPI": "Audio seek point index",
"COMM": "Comments",
"COMR": "Commercial frame",
"ENCR": "Encryption method registration",
"EQU2": "Equalisation (2)",
"ETCO": "Event timing codes",
"GEOB": "General encapsulated object",
"GRID": "Group identification registration",
"LINK": "Linked information",
"MCDI": "Music CD identifier",
"MLLT": "MPEG location lookup table",
"OWNE": "Ownership frame",
"PRIV": "Private frame",
"PCNT": "Play counter",
"POPM": "Popularimeter",
"POSS": "Position synchronisation frame",
"RBUF": "Recommended buffer size",
"RVA2": "Relative volume adjustment (2)",
"RVRB": "Reverb",
"SEEK": "Seek frame",
"SIGN": "Signature frame",
"SYLT": "Synchronised lyric/text",
"SYTC": "Synchronised tempo codes",
"TALB": "Album/Movie/Show title",
"TBPM": "BPM (beats per minute)",
"TCMP": "iTunes Compilation Flag",
"TCOM": "Composer",
"TCON": "Content type",
"TCOP": "Copyright message",
"TDEN": "Encoding time",
"TDLY": "Playlist delay",
"TDOR": "Original release time",
"TDRC": "Recording time",
"TDRL": "Release time",
"TDTG": "Tagging time",
"TENC": "Encoded by",
"TEXT": "Lyricist/Text writer",
"TFLT": "File type",
"TIPL": "Involved people list",
"TIT1": "Content group description",
"TIT2": "Title/songname/content description",
"TIT3": "Subtitle/Description refinement",
"TKEY": "Initial key",
"TLAN": "Language(s)",
"TLEN": "Length",
"TMCL": "Musician credits list",
"TMED": "Media type",
"TMOO": "Mood",
"TOAL": "Original album/movie/show title",
"TOFN": "Original filename",
"TOLY": "Original lyricist(s)/text writer(s)",
"TOPE": "Original artist(s)/performer(s)",
"TOWN": "File owner/licensee",
"TPE1": "Lead performer(s)/Soloist(s)",
"TPE2": "Band/orchestra/accompaniment",
"TPE3": "Conductor/performer refinement",
"TPE4": "Interpreted, remixed, or otherwise modified by",
"TPOS": "Part of a set",
"TPRO": "Produced notice",
"TPUB": "Publisher",
"TRCK": "Track number/Position in set",
"TRSN": "Internet radio station name",
"TRSO": "Internet radio station owner",
"TSO2": "iTunes uses this for Album Artist sort order",
"TSOA": "Album sort order",
"TSOC": "iTunes uses this for Composer sort order",
"TSOP": "Performer sort order",
"TSOT": "Title sort order",
"TSRC": "ISRC (international standard recording code)",
"TSSE": "Software/Hardware and settings used for encoding",
"TSST": "Set subtitle",
"TXXX": "User defined text information frame",
"UFID": "Unique file identifier",
"USER": "Terms of use",
"USLT": "Unsynchronised lyric/text transcription",
"WCOM": "Commercial information",
"WCOP": "Copyright/Legal information",
"WOAF": "Official audio file webpage",
"WOAR": "Official artist/performer webpage",
"WOAS": "Official audio source webpage",
"WORS": "Official Internet radio station homepage",
"WPAY": "Payment",
"WPUB": "Publishers official webpage",
"WXXX": "User defined URL link frame",
}
// ID3 frames that are defined in the specs.
var id3Frames = map[Format]map[string]string{
ID3v2_2: id3v22Frames,
ID3v2_3: id3v23Frames,
ID3v2_4: id3v24Frames,
}
func validID3Frame(version Format, name string) bool {
names, ok := id3Frames[version]
if !ok {
return false
}
_, ok = names[name]
return ok
}
func readWFrame(b []byte) (string, error) {
// Frame text is always encoded in ISO-8859-1
b = append([]byte{0}, b...)
return readTFrame(b)
}
func readTFrame(b []byte) (string, error) {
if len(b) == 0 {
return "", nil
}
txt, err := decodeText(b[0], b[1:])
if err != nil {
return "", err
}
return strings.Join(strings.Split(txt, string(singleZero)), ""), nil
}
const (
encodingISO8859 byte = 0
encodingUTF16WithBOM byte = 1
encodingUTF16 byte = 2
encodingUTF8 byte = 3
)
func decodeText(enc byte, b []byte) (string, error) {
if len(b) == 0 {
return "", nil
}
switch enc {
case encodingISO8859: // ISO-8859-1
return decodeISO8859(b), nil
case encodingUTF16WithBOM: // UTF-16 with byte order marker
if len(b) == 1 {
return "", nil
}
return decodeUTF16WithBOM(b)
case encodingUTF16: // UTF-16 without byte order (assuming BigEndian)
if len(b) == 1 {
return "", nil
}
return decodeUTF16(b, binary.BigEndian)
case encodingUTF8: // UTF-8
return string(b), nil
default: // Fallback to ISO-8859-1
return decodeISO8859(b), nil
}
}
var (
singleZero = []byte{0}
doubleZero = []byte{0, 0}
)
func dataSplit(b []byte, enc byte) [][]byte {
delim := singleZero
if enc == encodingUTF16 || enc == encodingUTF16WithBOM {
delim = doubleZero
}
result := bytes.SplitN(b, delim, 2)
if len(result) != 2 {
return result
}
if len(result[1]) == 0 {
return result
}
if result[1][0] == 0 {
// there was a double (or triple) 0 and we cut too early
result[0] = append(result[0], result[1][0])
result[1] = result[1][1:]
}
return result
}
func decodeISO8859(b []byte) string {
r := make([]rune, len(b))
for i, x := range b {
r[i] = rune(x)
}
return string(r)
}
func decodeUTF16WithBOM(b []byte) (string, error) {
if len(b) < 2 {
return "", errors.New("invalid encoding: expected at least 2 bytes for UTF-16 byte order mark")
}
var bo binary.ByteOrder
switch {
case b[0] == 0xFE && b[1] == 0xFF:
bo = binary.BigEndian
b = b[2:]
case b[0] == 0xFF && b[1] == 0xFE:
bo = binary.LittleEndian
b = b[2:]
default:
bo = DefaultUTF16WithBOMByteOrder
}
return decodeUTF16(b, bo)
}
func decodeUTF16(b []byte, bo binary.ByteOrder) (string, error) {
if len(b)%2 != 0 {
return "", errors.New("invalid encoding: expected even number of bytes for UTF-16 encoded text")
}
s := make([]uint16, 0, len(b)/2)
for i := 0; i < len(b); i += 2 {
s = append(s, bo.Uint16(b[i:i+2]))
}
return string(utf16.Decode(s)), nil
}
// Comm is a type used in COMM, UFID, TXXX, WXXX and USLT tag.
// It's a text with a description and a specified language
// For WXXX, TXXX and UFID, we don't set a Language
type Comm struct {
Language string
Description string
Text string
}
// String returns a string representation of the underlying Comm instance.
func (t Comm) String() string {
if t.Language != "" {
return fmt.Sprintf("Text{Lang: '%v', Description: '%v', %v lines}",
t.Language, t.Description, strings.Count(t.Text, "\n"))
}
return fmt.Sprintf("Text{Description: '%v', %v}", t.Description, t.Text)
}
// IDv2.{3,4}
// -- Header
// <Header for 'Unsynchronised lyrics/text transcription', ID: "USLT">
// <Header for 'Comment', ID: "COMM">
// -- readTextWithDescrFrame(data, true, true)
// Text encoding $xx
// Language $xx xx xx
// Content descriptor <text string according to encoding> $00 (00)
// Lyrics/text <full text string according to encoding>
// -- Header
// <Header for 'User defined text information frame', ID: "TXXX">
// <Header for 'User defined URL link frame', ID: "WXXX">
// -- readTextWithDescrFrame(data, false, <isDataEncoded>)
// Text encoding $xx
// Description <text string according to encoding> $00 (00)
// Value <text string according to encoding>
func readTextWithDescrFrame(b []byte, hasLang bool, encoded bool) (*Comm, error) {
enc := b[0]
b = b[1:]
c := &Comm{}
if hasLang {
c.Language = string(b[:3])
b = b[3:]
}
descTextSplit := dataSplit(b, enc)
if len(descTextSplit) < 1 {
return nil, fmt.Errorf("error decoding tag description text: invalid encoding")
}
desc, err := decodeText(enc, descTextSplit[0])
if err != nil {
return nil, fmt.Errorf("error decoding tag description text: %v", err)
}
c.Description = desc
if len(descTextSplit) == 1 {
return c, nil
}
if !encoded {
enc = byte(0)
}
text, err := decodeText(enc, descTextSplit[1])
if err != nil {
return nil, fmt.Errorf("error decoding tag text: %v", err)
}
c.Text = text
return c, nil
}
// UFID is composed of a provider (frequently a URL and a binary identifier)
// The identifier can be a text (Musicbrainz use texts, but not necessary)
type UFID struct {
Provider string
Identifier []byte
}
func (u UFID) String() string {
return fmt.Sprintf("%v (%v)", u.Provider, string(u.Identifier))
}
func readUFID(b []byte) (*UFID, error) {
result := bytes.SplitN(b, singleZero, 2)
if len(result) != 2 {
return nil, errors.New("expected to split UFID data into 2 pieces")
}
return &UFID{
Provider: string(result[0]),
Identifier: result[1],
}, nil
}
var pictureTypes = map[byte]string{
0x00: "Other",
0x01: "32x32 pixels 'file icon' (PNG only)",
0x02: "Other file icon",
0x03: "Cover (front)",
0x04: "Cover (back)",
0x05: "Leaflet page",
0x06: "Media (e.g. lable side of CD)",
0x07: "Lead artist/lead performer/soloist",
0x08: "Artist/performer",
0x09: "Conductor",
0x0A: "Band/Orchestra",
0x0B: "Composer",
0x0C: "Lyricist/text writer",
0x0D: "Recording Location",
0x0E: "During recording",
0x0F: "During performance",
0x10: "Movie/video screen capture",
0x11: "A bright coloured fish",
0x12: "Illustration",
0x13: "Band/artist logotype",
0x14: "Publisher/Studio logotype",
}
// Picture is a type which represents an attached picture extracted from metadata.
type Picture struct {
Ext string // Extension of the picture file.
MIMEType string // MIMEType of the picture.
Type string // Type of the picture (see pictureTypes).
Description string // Description.
Data []byte // Raw picture data.
}
// String returns a string representation of the underlying Picture instance.
func (p Picture) String() string {
return fmt.Sprintf("Picture{Ext: %v, MIMEType: %v, Type: %v, Description: %v, Data.Size: %v}",
p.Ext, p.MIMEType, p.Type, p.Description, len(p.Data))
}
// IDv2.2
// -- Header
// Attached picture "PIC"
// Frame size $xx xx xx
// -- readPICFrame
// Text encoding $xx
// Image format $xx xx xx
// Picture type $xx
// Description <textstring> $00 (00)
// Picture data <binary data>
func readPICFrame(b []byte) (*Picture, error) {
enc := b[0]
ext := string(b[1:4])
picType := b[4]
descDataSplit := dataSplit(b[5:], enc)
if len(descDataSplit) != 2 {
return nil, errors.New("error decoding PIC description text: invalid encoding")
}
desc, err := decodeText(enc, descDataSplit[0])
if err != nil {
return nil, fmt.Errorf("error decoding PIC description text: %v", err)
}
var mimeType string
switch ext {
case "jpeg", "jpg":
mimeType = "image/jpeg"
case "png":
mimeType = "image/png"
}
return &Picture{
Ext: ext,
MIMEType: mimeType,
Type: pictureTypes[picType],
Description: desc,
Data: descDataSplit[1],
}, nil
}
// IDv2.{3,4}
// -- Header
// <Header for 'Attached picture', ID: "APIC">
// -- readAPICFrame
// Text encoding $xx
// MIME type <text string> $00
// Picture type $xx
// Description <text string according to encoding> $00 (00)
// Picture data <binary data>
func readAPICFrame(b []byte) (*Picture, error) {
enc := b[0]
mimeDataSplit := bytes.SplitN(b[1:], singleZero, 2)
mimeType := string(mimeDataSplit[0])
b = mimeDataSplit[1]
if len(b) < 1 {
return nil, fmt.Errorf("error decoding APIC mimetype")
}
picType := b[0]
descDataSplit := dataSplit(b[1:], enc)
if len(descDataSplit) != 2 {
return nil, errors.New("error decoding APIC description text: invalid encoding")
}
desc, err := decodeText(enc, descDataSplit[0])
if err != nil {
return nil, fmt.Errorf("error decoding APIC description text: %v", err)
}
var ext string
switch mimeType {
case "image/jpeg":
ext = "jpg"
case "image/png":
ext = "png"
}
return &Picture{
Ext: ext,
MIMEType: mimeType,
Type: pictureTypes[picType],
Description: desc,
Data: descDataSplit[1],
}, nil
}

View file

@ -1,141 +0,0 @@
// Copyright 2015, David Howden
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package tag
import (
"strconv"
"strings"
)
type frameNames map[string][2]string
func (f frameNames) Name(s string, fm Format) string {
l, ok := f[s]
if !ok {
return ""
}
switch fm {
case ID3v2_2:
return l[0]
case ID3v2_3:
return l[1]
case ID3v2_4:
if s == "year" {
return "TDRC"
}
return l[1]
}
return ""
}
var frames = frameNames(map[string][2]string{
"title": [2]string{"TT2", "TIT2"},
"artist": [2]string{"TP1", "TPE1"},
"album": [2]string{"TAL", "TALB"},
"album_artist": [2]string{"TP2", "TPE2"},
"composer": [2]string{"TCM", "TCOM"},
"year": [2]string{"TYE", "TYER"},
"track": [2]string{"TRK", "TRCK"},
"disc": [2]string{"TPA", "TPOS"},
"genre": [2]string{"TCO", "TCON"},
"picture": [2]string{"PIC", "APIC"},
"lyrics": [2]string{"", "USLT"},
"comment": [2]string{"COM", "COMM"},
})
// metadataID3v2 is the implementation of Metadata used for ID3v2 tags.
type metadataID3v2 struct {
header *id3v2Header
frames map[string]interface{}
}
func (m metadataID3v2) getString(k string) string {
v, ok := m.frames[k]
if !ok {
return ""
}
return v.(string)
}
func (m metadataID3v2) Format() Format { return m.header.Version }
func (m metadataID3v2) FileType() FileType { return MP3 }
func (m metadataID3v2) Raw() map[string]interface{} { return m.frames }
func (m metadataID3v2) Title() string {
return m.getString(frames.Name("title", m.Format()))
}
func (m metadataID3v2) Artist() string {
return m.getString(frames.Name("artist", m.Format()))
}
func (m metadataID3v2) Album() string {
return m.getString(frames.Name("album", m.Format()))
}
func (m metadataID3v2) AlbumArtist() string {
return m.getString(frames.Name("album_artist", m.Format()))
}
func (m metadataID3v2) Composer() string {
return m.getString(frames.Name("composer", m.Format()))
}
func (m metadataID3v2) Genre() string {
return id3v2genre(m.getString(frames.Name("genre", m.Format())))
}
func (m metadataID3v2) Year() int {
year, _ := strconv.Atoi(m.getString(frames.Name("year", m.Format())))
return year
}
func parseXofN(s string) (x, n int) {
xn := strings.Split(s, "/")
if len(xn) != 2 {
x, _ = strconv.Atoi(s)
return x, 0
}
x, _ = strconv.Atoi(strings.TrimSpace(xn[0]))
n, _ = strconv.Atoi(strings.TrimSpace(xn[1]))
return x, n
}
func (m metadataID3v2) Track() (int, int) {
return parseXofN(m.getString(frames.Name("track", m.Format())))
}
func (m metadataID3v2) Disc() (int, int) {
return parseXofN(m.getString(frames.Name("disc", m.Format())))
}
func (m metadataID3v2) Lyrics() string {
t, ok := m.frames[frames.Name("lyrics", m.Format())]
if !ok {
return ""
}
return t.(*Comm).Text
}
func (m metadataID3v2) Comment() string {
t, ok := m.frames[frames.Name("comment", m.Format())]
if !ok {
return ""
}
// id3v23 has Text, id3v24 has Description
if t.(*Comm).Description == "" {
return trimString(t.(*Comm).Text)
}
return trimString(t.(*Comm).Description)
}
func (m metadataID3v2) Picture() *Picture {
v, ok := m.frames[frames.Name("picture", m.Format())]
if !ok {
return nil
}
return v.(*Picture)
}

View file

@ -1,3 +0,0 @@
All files in this directory are subject to the CC0 1.0 Universal (CC0 1.0)
Public Domain Dedication license. Its contents can be found at:
http://creativecommons.org/publicdomain/zero/1.0

372
vendor/github.com/dhowden/tag/mp4.go generated vendored
View file

@ -1,372 +0,0 @@
// Copyright 2015, David Howden
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package tag
import (
"bytes"
"encoding/binary"
"errors"
"fmt"
"io"
"strconv"
"strings"
)
var atomTypes = map[int]string{
0: "implicit", // automatic based on atom name
1: "text",
13: "jpeg",
14: "png",
21: "uint8",
}
// NB: atoms does not include "----", this is handled separately
var atoms = atomNames(map[string]string{
"\xa9alb": "album",
"\xa9art": "artist",
"\xa9ART": "artist",
"aART": "album_artist",
"\xa9day": "year",
"\xa9nam": "title",
"\xa9gen": "genre",
"trkn": "track",
"\xa9wrt": "composer",
"\xa9too": "encoder",
"cprt": "copyright",
"covr": "picture",
"\xa9grp": "grouping",
"keyw": "keyword",
"\xa9lyr": "lyrics",
"\xa9cmt": "comment",
"tmpo": "tempo",
"cpil": "compilation",
"disk": "disc",
})
// Detect PNG image if "implicit" class is used
var pngHeader = []byte{137, 80, 78, 71, 13, 10, 26, 10}
type atomNames map[string]string
func (f atomNames) Name(n string) []string {
res := make([]string, 1)
for k, v := range f {
if v == n {
res = append(res, k)
}
}
return res
}
// metadataMP4 is the implementation of Metadata for MP4 tag (atom) data.
type metadataMP4 struct {
fileType FileType
data map[string]interface{}
}
// ReadAtoms reads MP4 metadata atoms from the io.ReadSeeker into a Metadata, returning
// non-nil error if there was a problem.
func ReadAtoms(r io.ReadSeeker) (Metadata, error) {
m := metadataMP4{
data: make(map[string]interface{}),
fileType: UnknownFileType,
}
err := m.readAtoms(r)
return m, err
}
func (m metadataMP4) readAtoms(r io.ReadSeeker) error {
for {
name, size, err := readAtomHeader(r)
if err != nil {
if err == io.EOF {
return nil
}
return err
}
switch name {
case "meta":
// next_item_id (int32)
_, err := readBytes(r, 4)
if err != nil {
return err
}
fallthrough
case "moov", "udta", "ilst":
return m.readAtoms(r)
}
_, ok := atoms[name]
var data []string
if name == "----" {
name, data, err = readCustomAtom(r, size)
if err != nil {
return err
}
if name != "----" {
ok = true
size = 0 // already read data
}
}
if !ok {
_, err := r.Seek(int64(size-8), io.SeekCurrent)
if err != nil {
return err
}
continue
}
err = m.readAtomData(r, name, size-8, data)
if err != nil {
return err
}
}
}
func (m metadataMP4) readAtomData(r io.ReadSeeker, name string, size uint32, processedData []string) error {
var b []byte
var err error
var contentType string
if len(processedData) > 0 {
b = []byte(strings.Join(processedData, ";")) // add delimiter if multiple data fields
contentType = "text"
} else {
// read the data
b, err = readBytes(r, int(size))
if err != nil {
return err
}
if len(b) < 8 {
return fmt.Errorf("invalid encoding: expected at least %d bytes, got %d", 8, len(b))
}
// "data" + size (4 bytes each)
b = b[8:]
if len(b) < 3 {
return fmt.Errorf("invalid encoding: expected at least %d bytes, for class, got %d", 3, len(b))
}
class := getInt(b[1:4])
var ok bool
contentType, ok = atomTypes[class]
if !ok {
return fmt.Errorf("invalid content type: %v (%x) (%x)", class, b[1:4], b)
}
// 4: atom version (1 byte) + atom flags (3 bytes)
// 4: NULL (usually locale indicator)
if len(b) < 8 {
return fmt.Errorf("invalid encoding: expected at least %d bytes, for atom version and flags, got %d", 8, len(b))
}
b = b[8:]
}
if name == "trkn" || name == "disk" {
if len(b) < 6 {
return fmt.Errorf("invalid encoding: expected at least %d bytes, for track and disk numbers, got %d", 6, len(b))
}
m.data[name] = int(b[3])
m.data[name+"_count"] = int(b[5])
return nil
}
if contentType == "implicit" {
if name == "covr" {
if bytes.HasPrefix(b, pngHeader) {
contentType = "png"
}
// TODO(dhowden): Detect JPEG formats too (harder).
}
}
var data interface{}
switch contentType {
case "implicit":
if _, ok := atoms[name]; ok {
return fmt.Errorf("unhandled implicit content type for required atom: %q", name)
}
return nil
case "text":
data = string(b)
case "uint8":
if len(b) < 1 {
return fmt.Errorf("invalid encoding: expected at least %d bytes, for integer tag data, got %d", 1, len(b))
}
data = getInt(b[:1])
case "jpeg", "png":
data = &Picture{
Ext: contentType,
MIMEType: "image/" + contentType,
Data: b,
}
}
m.data[name] = data
return nil
}
func readAtomHeader(r io.ReadSeeker) (name string, size uint32, err error) {
err = binary.Read(r, binary.BigEndian, &size)
if err != nil {
return
}
name, err = readString(r, 4)
return
}
// Generic atom.
// Should have 3 sub atoms : mean, name and data.
// We check that mean is "com.apple.iTunes" and we use the subname as
// the name, and move to the data atom.
// Data atom could have multiple data values, each with a header.
// If anything goes wrong, we jump at the end of the "----" atom.
func readCustomAtom(r io.ReadSeeker, size uint32) (_ string, data []string, _ error) {
subNames := make(map[string]string)
for size > 8 {
subName, subSize, err := readAtomHeader(r)
if err != nil {
return "", nil, err
}
// Remove the size of the atom from the size counter
if size >= subSize {
size -= subSize
} else {
return "", nil, errors.New("--- invalid size")
}
b, err := readBytes(r, int(subSize-8))
if err != nil {
return "", nil, err
}
if len(b) < 4 {
return "", nil, fmt.Errorf("invalid encoding: expected at least %d bytes, got %d", 4, len(b))
}
switch subName {
case "mean", "name":
subNames[subName] = string(b[4:])
case "data":
data = append(data, string(b[4:]))
}
}
// there should remain only the header size
if size != 8 {
err := errors.New("---- atom out of bounds")
return "", nil, err
}
if subNames["mean"] != "com.apple.iTunes" || subNames["name"] == "" || len(data) == 0 {
return "----", nil, nil
}
return subNames["name"], data, nil
}
func (metadataMP4) Format() Format { return MP4 }
func (m metadataMP4) FileType() FileType { return m.fileType }
func (m metadataMP4) Raw() map[string]interface{} { return m.data }
func (m metadataMP4) getString(n []string) string {
for _, k := range n {
if x, ok := m.data[k]; ok {
return x.(string)
}
}
return ""
}
func (m metadataMP4) getInt(n []string) int {
for _, k := range n {
if x, ok := m.data[k]; ok {
return x.(int)
}
}
return 0
}
func (m metadataMP4) Title() string {
return m.getString(atoms.Name("title"))
}
func (m metadataMP4) Artist() string {
return m.getString(atoms.Name("artist"))
}
func (m metadataMP4) Album() string {
return m.getString(atoms.Name("album"))
}
func (m metadataMP4) AlbumArtist() string {
return m.getString(atoms.Name("album_artist"))
}
func (m metadataMP4) Composer() string {
return m.getString(atoms.Name("composer"))
}
func (m metadataMP4) Genre() string {
return m.getString(atoms.Name("genre"))
}
func (m metadataMP4) Year() int {
date := m.getString(atoms.Name("year"))
if len(date) >= 4 {
year, _ := strconv.Atoi(date[:4])
return year
}
return 0
}
func (m metadataMP4) Track() (int, int) {
x := m.getInt([]string{"trkn"})
if n, ok := m.data["trkn_count"]; ok {
return x, n.(int)
}
return x, 0
}
func (m metadataMP4) Disc() (int, int) {
x := m.getInt([]string{"disk"})
if n, ok := m.data["disk_count"]; ok {
return x, n.(int)
}
return x, 0
}
func (m metadataMP4) Lyrics() string {
t, ok := m.data["\xa9lyr"]
if !ok {
return ""
}
return t.(string)
}
func (m metadataMP4) Comment() string {
t, ok := m.data["\xa9cmt"]
if !ok {
return ""
}
return t.(string)
}
func (m metadataMP4) Picture() *Picture {
v, ok := m.data["covr"]
if !ok {
return nil
}
p, _ := v.(*Picture)
return p
}

119
vendor/github.com/dhowden/tag/ogg.go generated vendored
View file

@ -1,119 +0,0 @@
// Copyright 2015, David Howden
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package tag
import (
"errors"
"io"
)
const (
idType int = 1
commentType int = 3
)
// ReadOGGTags reads OGG metadata from the io.ReadSeeker, returning the resulting
// metadata in a Metadata implementation, or non-nil error if there was a problem.
// See http://www.xiph.org/vorbis/doc/Vorbis_I_spec.html
// and http://www.xiph.org/ogg/doc/framing.html for details.
func ReadOGGTags(r io.ReadSeeker) (Metadata, error) {
oggs, err := readString(r, 4)
if err != nil {
return nil, err
}
if oggs != "OggS" {
return nil, errors.New("expected 'OggS'")
}
// Skip 22 bytes of Page header to read page_segments length byte at position 26
// See http://www.xiph.org/ogg/doc/framing.html
_, err = r.Seek(22, io.SeekCurrent)
if err != nil {
return nil, err
}
nS, err := readInt(r, 1)
if err != nil {
return nil, err
}
// Seek and discard the segments
_, err = r.Seek(int64(nS), io.SeekCurrent)
if err != nil {
return nil, err
}
// First packet type is identification, type 1
t, err := readInt(r, 1)
if err != nil {
return nil, err
}
if t != idType {
return nil, errors.New("expected 'vorbis' identification type 1")
}
// Seek and discard 29 bytes from common and identification header
// See http://www.xiph.org/vorbis/doc/Vorbis_I_spec.html#x1-610004.2
_, err = r.Seek(29, io.SeekCurrent)
if err != nil {
return nil, err
}
// Beginning of a new page. Comment packet is on a separate page
// See http://www.xiph.org/vorbis/doc/Vorbis_I_spec.html#x1-132000A.2
oggs, err = readString(r, 4)
if err != nil {
return nil, err
}
if oggs != "OggS" {
return nil, errors.New("expected 'OggS'")
}
// Skip page 2 header, same as line 30
_, err = r.Seek(22, io.SeekCurrent)
if err != nil {
return nil, err
}
nS, err = readInt(r, 1)
if err != nil {
return nil, err
}
_, err = r.Seek(int64(nS), io.SeekCurrent)
if err != nil {
return nil, err
}
// Packet type is comment, type 3
t, err = readInt(r, 1)
if err != nil {
return nil, err
}
if t != commentType {
return nil, errors.New("expected 'vorbis' comment type 3")
}
// Seek and discard 6 bytes from common header
_, err = r.Seek(6, io.SeekCurrent)
if err != nil {
return nil, err
}
m := &metadataOGG{
newMetadataVorbis(),
}
err = m.readVorbisComment(r)
return m, err
}
type metadataOGG struct {
*metadataVorbis
}
func (m *metadataOGG) FileType() FileType {
return OGG
}

219
vendor/github.com/dhowden/tag/sum.go generated vendored
View file

@ -1,219 +0,0 @@
package tag
import (
"crypto/sha1"
"encoding/binary"
"errors"
"fmt"
"hash"
"io"
)
// Sum creates a checksum of the audio file data provided by the io.ReadSeeker which is metadata
// (ID3, MP4) invariant.
func Sum(r io.ReadSeeker) (string, error) {
b, err := readBytes(r, 11)
if err != nil {
return "", err
}
_, err = r.Seek(-11, io.SeekCurrent)
if err != nil {
return "", fmt.Errorf("could not seek back to original position: %v", err)
}
switch {
case string(b[0:4]) == "fLaC":
return SumFLAC(r)
case string(b[4:11]) == "ftypM4A":
return SumAtoms(r)
case string(b[0:3]) == "ID3":
return SumID3v2(r)
}
h, err := SumID3v1(r)
if err != nil {
if err == ErrNotID3v1 {
return SumAll(r)
}
return "", err
}
return h, nil
}
// SumAll returns a checksum of the content from the reader (until EOF).
func SumAll(r io.ReadSeeker) (string, error) {
h := sha1.New()
_, err := io.Copy(h, r)
if err != nil {
return "", nil
}
return hashSum(h), nil
}
// SumAtoms constructs a checksum of MP4 audio file data provided by the io.ReadSeeker which is
// metadata invariant.
func SumAtoms(r io.ReadSeeker) (string, error) {
for {
var size uint32
err := binary.Read(r, binary.BigEndian, &size)
if err != nil {
if err == io.EOF {
return "", fmt.Errorf("reached EOF before audio data")
}
return "", err
}
name, err := readString(r, 4)
if err != nil {
return "", err
}
switch name {
case "meta":
// next_item_id (int32)
_, err := r.Seek(4, io.SeekCurrent)
if err != nil {
return "", err
}
fallthrough
case "moov", "udta", "ilst":
continue
case "mdat": // stop when we get to the data
h := sha1.New()
_, err := io.CopyN(h, r, int64(size-8))
if err != nil {
return "", fmt.Errorf("error reading audio data: %v", err)
}
return hashSum(h), nil
}
_, err = r.Seek(int64(size-8), io.SeekCurrent)
if err != nil {
return "", fmt.Errorf("error reading '%v' tag: %v", name, err)
}
}
}
func sizeToEndOffset(r io.ReadSeeker, offset int64) (int64, error) {
n, err := r.Seek(-128, io.SeekEnd)
if err != nil {
return 0, fmt.Errorf("error seeking end offset (%d bytes): %v", offset, err)
}
_, err = r.Seek(-n, io.SeekCurrent)
if err != nil {
return 0, fmt.Errorf("error seeking back to original position: %v", err)
}
return n, nil
}
// SumID3v1 constructs a checksum of MP3 audio file data (assumed to have ID3v1 tags) provided
// by the io.ReadSeeker which is metadata invariant.
func SumID3v1(r io.ReadSeeker) (string, error) {
n, err := sizeToEndOffset(r, 128)
if err != nil {
return "", fmt.Errorf("error determining read size to ID3v1 header: %v", err)
}
// TODO: improve this check???
if n <= 0 {
return "", fmt.Errorf("file size must be greater than 128 bytes (ID3v1 header size) for MP3")
}
h := sha1.New()
_, err = io.CopyN(h, r, n)
if err != nil {
return "", fmt.Errorf("error reading %v bytes: %v", n, err)
}
return hashSum(h), nil
}
// SumID3v2 constructs a checksum of MP3 audio file data (assumed to have ID3v2 tags) provided by the
// io.ReadSeeker which is metadata invariant.
func SumID3v2(r io.ReadSeeker) (string, error) {
header, _, err := readID3v2Header(r)
if err != nil {
return "", fmt.Errorf("error reading ID3v2 header: %v", err)
}
_, err = r.Seek(int64(header.Size), io.SeekCurrent)
if err != nil {
return "", fmt.Errorf("error seeking to end of ID3V2 header: %v", err)
}
n, err := sizeToEndOffset(r, 128)
if err != nil {
return "", fmt.Errorf("error determining read size to ID3v1 header: %v", err)
}
// TODO: remove this check?????
if n < 0 {
return "", fmt.Errorf("file size must be greater than 128 bytes for MP3: %v bytes", n)
}
h := sha1.New()
_, err = io.CopyN(h, r, n)
if err != nil {
return "", fmt.Errorf("error reading %v bytes: %v", n, err)
}
return hashSum(h), nil
}
// SumFLAC costructs a checksum of the FLAC audio file data provided by the io.ReadSeeker (ignores
// metadata fields).
func SumFLAC(r io.ReadSeeker) (string, error) {
flac, err := readString(r, 4)
if err != nil {
return "", err
}
if flac != "fLaC" {
return "", errors.New("expected 'fLaC'")
}
for {
last, err := skipFLACMetadataBlock(r)
if err != nil {
return "", err
}
if last {
break
}
}
h := sha1.New()
_, err = io.Copy(h, r)
if err != nil {
return "", fmt.Errorf("error reading data bytes from FLAC: %v", err)
}
return hashSum(h), nil
}
func skipFLACMetadataBlock(r io.ReadSeeker) (last bool, err error) {
blockHeader, err := readBytes(r, 1)
if err != nil {
return
}
if getBit(blockHeader[0], 7) {
blockHeader[0] ^= (1 << 7)
last = true
}
blockLen, err := readInt(r, 3)
if err != nil {
return
}
_, err = r.Seek(int64(blockLen), io.SeekCurrent)
return
}
func hashSum(h hash.Hash) string {
return fmt.Sprintf("%x", h.Sum([]byte{}))
}

147
vendor/github.com/dhowden/tag/tag.go generated vendored
View file

@ -1,147 +0,0 @@
// Copyright 2015, David Howden
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package tag provides MP3 (ID3: v1, 2.2, 2.3 and 2.4), MP4, FLAC and OGG metadata detection,
// parsing and artwork extraction.
//
// Detect and parse tag metadata from an io.ReadSeeker (i.e. an *os.File):
// m, err := tag.ReadFrom(f)
// if err != nil {
// log.Fatal(err)
// }
// log.Print(m.Format()) // The detected format.
// log.Print(m.Title()) // The title of the track (see Metadata interface for more details).
package tag
import (
"errors"
"fmt"
"io"
)
// ErrNoTagsFound is the error returned by ReadFrom when the metadata format
// cannot be identified.
var ErrNoTagsFound = errors.New("no tags found")
// ReadFrom detects and parses audio file metadata tags (currently supports ID3v1,2.{2,3,4}, MP4, FLAC/OGG).
// Returns non-nil error if the format of the given data could not be determined, or if there was a problem
// parsing the data.
func ReadFrom(r io.ReadSeeker) (Metadata, error) {
b, err := readBytes(r, 11)
if err != nil {
return nil, err
}
_, err = r.Seek(-11, io.SeekCurrent)
if err != nil {
return nil, fmt.Errorf("could not seek back to original position: %v", err)
}
switch {
case string(b[0:4]) == "fLaC":
return ReadFLACTags(r)
case string(b[0:4]) == "OggS":
return ReadOGGTags(r)
case string(b[4:8]) == "ftyp":
return ReadAtoms(r)
case string(b[0:3]) == "ID3":
return ReadID3v2Tags(r)
case string(b[0:4]) == "DSD ":
return ReadDSFTags(r)
}
m, err := ReadID3v1Tags(r)
if err != nil {
if err == ErrNotID3v1 {
err = ErrNoTagsFound
}
return nil, err
}
return m, nil
}
// Format is an enumeration of metadata types supported by this package.
type Format string
// Supported tag formats.
const (
UnknownFormat Format = "" // Unknown Format.
ID3v1 Format = "ID3v1" // ID3v1 tag format.
ID3v2_2 Format = "ID3v2.2" // ID3v2.2 tag format.
ID3v2_3 Format = "ID3v2.3" // ID3v2.3 tag format (most common).
ID3v2_4 Format = "ID3v2.4" // ID3v2.4 tag format.
MP4 Format = "MP4" // MP4 tag (atom) format (see http://www.ftyps.com/ for a full file type list)
VORBIS Format = "VORBIS" // Vorbis Comment tag format.
)
// FileType is an enumeration of the audio file types supported by this package, in particular
// there are audio file types which share metadata formats, and this type is used to distinguish
// between them.
type FileType string
// Supported file types.
const (
UnknownFileType FileType = "" // Unknown FileType.
MP3 FileType = "MP3" // MP3 file
M4A FileType = "M4A" // M4A file Apple iTunes (ACC) Audio
M4B FileType = "M4B" // M4A file Apple iTunes (ACC) Audio Book
M4P FileType = "M4P" // M4A file Apple iTunes (ACC) AES Protected Audio
ALAC FileType = "ALAC" // Apple Lossless file FIXME: actually detect this
FLAC FileType = "FLAC" // FLAC file
OGG FileType = "OGG" // OGG file
DSF FileType = "DSF" // DSF file DSD Sony format see https://dsd-guide.com/sites/default/files/white-papers/DSFFileFormatSpec_E.pdf
)
// Metadata is an interface which is used to describe metadata retrieved by this package.
type Metadata interface {
// Format returns the metadata Format used to encode the data.
Format() Format
// FileType returns the file type of the audio file.
FileType() FileType
// Title returns the title of the track.
Title() string
// Album returns the album name of the track.
Album() string
// Artist returns the artist name of the track.
Artist() string
// AlbumArtist returns the album artist name of the track.
AlbumArtist() string
// Composer returns the composer of the track.
Composer() string
// Year returns the year of the track.
Year() int
// Genre returns the genre of the track.
Genre() string
// Track returns the track number and total tracks, or zero values if unavailable.
Track() (int, int)
// Disc returns the disc number and total discs, or zero values if unavailable.
Disc() (int, int)
// Picture returns a picture, or nil if not available.
Picture() *Picture
// Lyrics returns the lyrics, or an empty string if unavailable.
Lyrics() string
// Comment returns the comment, or an empty string if unavailable.
Comment() string
// Raw returns the raw mapping of retrieved tag names and associated values.
// NB: tag/atom names are not standardised between formats.
Raw() map[string]interface{}
}

View file

@ -1,81 +0,0 @@
// Copyright 2015, David Howden
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package tag
import (
"encoding/binary"
"io"
)
func getBit(b byte, n uint) bool {
x := byte(1 << n)
return (b & x) == x
}
func get7BitChunkedInt(b []byte) int {
var n int
for _, x := range b {
n = n << 7
n |= int(x)
}
return n
}
func getInt(b []byte) int {
var n int
for _, x := range b {
n = n << 8
n |= int(x)
}
return n
}
func getIntLittleEndian(b []byte) int {
var n int
for i := len(b) - 1; i >= 0; i-- {
n = n << 8
n |= int(b[i])
}
return n
}
func readBytes(r io.Reader, n int) ([]byte, error) {
b := make([]byte, n)
_, err := io.ReadFull(r, b)
if err != nil {
return nil, err
}
return b, nil
}
func readString(r io.Reader, n int) (string, error) {
b, err := readBytes(r, n)
if err != nil {
return "", err
}
return string(b), nil
}
func readInt(r io.Reader, n int) (int, error) {
b, err := readBytes(r, n)
if err != nil {
return 0, err
}
return getInt(b), nil
}
func read7BitChunkedInt(r io.Reader, n int) (int, error) {
b, err := readBytes(r, n)
if err != nil {
return 0, err
}
return get7BitChunkedInt(b), nil
}
func readInt32LittleEndian(r io.Reader) (int, error) {
var n int32
err := binary.Read(r, binary.LittleEndian, &n)
return int(n), err
}

View file

@ -1,255 +0,0 @@
// Copyright 2015, David Howden
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package tag
import (
"errors"
"fmt"
"io"
"strconv"
"strings"
"time"
)
func newMetadataVorbis() *metadataVorbis {
return &metadataVorbis{
c: make(map[string]string),
}
}
type metadataVorbis struct {
c map[string]string // the vorbis comments
p *Picture
}
func (m *metadataVorbis) readVorbisComment(r io.Reader) error {
vendorLen, err := readInt32LittleEndian(r)
if err != nil {
return err
}
if vendorLen < 0 {
return fmt.Errorf("invalid encoding: expected positive length, got %d", vendorLen)
}
vendor, err := readString(r, vendorLen)
if err != nil {
return err
}
m.c["vendor"] = vendor
commentsLen, err := readInt32LittleEndian(r)
if err != nil {
return err
}
for i := 0; i < commentsLen; i++ {
l, err := readInt32LittleEndian(r)
if err != nil {
return err
}
s, err := readString(r, l)
if err != nil {
return err
}
k, v, err := parseComment(s)
if err != nil {
return err
}
m.c[strings.ToLower(k)] = v
}
return nil
}
func (m *metadataVorbis) readPictureBlock(r io.Reader) error {
b, err := readInt(r, 4)
if err != nil {
return err
}
pictureType, ok := pictureTypes[byte(b)]
if !ok {
return fmt.Errorf("invalid picture type: %v", b)
}
mimeLen, err := readInt(r, 4)
if err != nil {
return err
}
mime, err := readString(r, mimeLen)
if err != nil {
return err
}
ext := ""
switch mime {
case "image/jpeg":
ext = "jpg"
case "image/png":
ext = "png"
case "image/gif":
ext = "gif"
}
descLen, err := readInt(r, 4)
if err != nil {
return err
}
desc, err := readString(r, descLen)
if err != nil {
return err
}
// We skip width <32>, height <32>, colorDepth <32>, coloresUsed <32>
_, err = readInt(r, 4) // width
if err != nil {
return err
}
_, err = readInt(r, 4) // height
if err != nil {
return err
}
_, err = readInt(r, 4) // color depth
if err != nil {
return err
}
_, err = readInt(r, 4) // colors used
if err != nil {
return err
}
dataLen, err := readInt(r, 4)
if err != nil {
return err
}
data := make([]byte, dataLen)
_, err = io.ReadFull(r, data)
if err != nil {
return err
}
m.p = &Picture{
Ext: ext,
MIMEType: mime,
Type: pictureType,
Description: desc,
Data: data,
}
return nil
}
func parseComment(c string) (k, v string, err error) {
kv := strings.SplitN(c, "=", 2)
if len(kv) != 2 {
err = errors.New("vorbis comment must contain '='")
return
}
k = kv[0]
v = kv[1]
return
}
func (m *metadataVorbis) Format() Format {
return VORBIS
}
func (m *metadataVorbis) Raw() map[string]interface{} {
raw := make(map[string]interface{}, len(m.c))
for k, v := range m.c {
raw[k] = v
}
return raw
}
func (m *metadataVorbis) Title() string {
return m.c["title"]
}
func (m *metadataVorbis) Artist() string {
// PERFORMER
// The artist(s) who performed the work. In classical music this would be the
// conductor, orchestra, soloists. In an audio book it would be the actor who
// did the reading. In popular music this is typically the same as the ARTIST
// and is omitted.
if m.c["performer"] != "" {
return m.c["performer"]
}
return m.c["artist"]
}
func (m *metadataVorbis) Album() string {
return m.c["album"]
}
func (m *metadataVorbis) AlbumArtist() string {
// This field isn't actually included in the standard, though
// it is commonly assigned to albumartist.
return m.c["albumartist"]
}
func (m *metadataVorbis) Composer() string {
// ARTIST
// The artist generally considered responsible for the work. In popular music
// this is usually the performing band or singer. For classical music it would
// be the composer. For an audio book it would be the author of the original text.
if m.c["composer"] != "" {
return m.c["composer"]
}
if m.c["performer"] == "" {
return ""
}
return m.c["artist"]
}
func (m *metadataVorbis) Genre() string {
return m.c["genre"]
}
func (m *metadataVorbis) Year() int {
var dateFormat string
// The date need to follow the international standard https://en.wikipedia.org/wiki/ISO_8601
// and obviously the VorbisComment standard https://wiki.xiph.org/VorbisComment#Date_and_time
switch len(m.c["date"]) {
case 0:
return 0
case 4:
dateFormat = "2006"
case 7:
dateFormat = "2006-01"
case 10:
dateFormat = "2006-01-02"
}
t, _ := time.Parse(dateFormat, m.c["date"])
return t.Year()
}
func (m *metadataVorbis) Track() (int, int) {
x, _ := strconv.Atoi(m.c["tracknumber"])
// https://wiki.xiph.org/Field_names
n, _ := strconv.Atoi(m.c["tracktotal"])
return x, n
}
func (m *metadataVorbis) Disc() (int, int) {
// https://wiki.xiph.org/Field_names
x, _ := strconv.Atoi(m.c["discnumber"])
n, _ := strconv.Atoi(m.c["disctotal"])
return x, n
}
func (m *metadataVorbis) Lyrics() string {
return m.c["lyrics"]
}
func (m *metadataVorbis) Comment() string {
if m.c["comment"] != "" {
return m.c["comment"]
}
return m.c["description"]
}
func (m *metadataVorbis) Picture() *Picture {
return m.p
}

View file

@ -1,5 +0,0 @@
root = true
[*]
indent_style = tab
indent_size = 4

View file

@ -1,6 +0,0 @@
# Setup a Global .gitignore for OS and editor generated files:
# https://help.github.com/articles/ignoring-files
# git config --global core.excludesfile ~/.gitignore_global
.vagrant
*.sublime-project

View file

@ -1,30 +0,0 @@
sudo: false
language: go
go:
- 1.8.x
- 1.9.x
- tip
matrix:
allow_failures:
- go: tip
fast_finish: true
before_script:
- go get -u github.com/golang/lint/golint
script:
- go test -v --race ./...
after_script:
- test -z "$(gofmt -s -l -w . | tee /dev/stderr)"
- test -z "$(golint ./... | tee /dev/stderr)"
- go vet ./...
os:
- linux
- osx
notifications:
email: false

View file

@ -1,52 +0,0 @@
# Names should be added to this file as
# Name or Organization <email address>
# The email address is not required for organizations.
# You can update this list using the following command:
#
# $ git shortlog -se | awk '{print $2 " " $3 " " $4}'
# Please keep the list sorted.
Aaron L <aaron@bettercoder.net>
Adrien Bustany <adrien@bustany.org>
Amit Krishnan <amit.krishnan@oracle.com>
Anmol Sethi <me@anmol.io>
Bjørn Erik Pedersen <bjorn.erik.pedersen@gmail.com>
Bruno Bigras <bigras.bruno@gmail.com>
Caleb Spare <cespare@gmail.com>
Case Nelson <case@teammating.com>
Chris Howey <chris@howey.me> <howeyc@gmail.com>
Christoffer Buchholz <christoffer.buchholz@gmail.com>
Daniel Wagner-Hall <dawagner@gmail.com>
Dave Cheney <dave@cheney.net>
Evan Phoenix <evan@fallingsnow.net>
Francisco Souza <f@souza.cc>
Hari haran <hariharan.uno@gmail.com>
John C Barstow
Kelvin Fo <vmirage@gmail.com>
Ken-ichirou MATSUZAWA <chamas@h4.dion.ne.jp>
Matt Layher <mdlayher@gmail.com>
Nathan Youngman <git@nathany.com>
Nickolai Zeldovich <nickolai@csail.mit.edu>
Patrick <patrick@dropbox.com>
Paul Hammond <paul@paulhammond.org>
Pawel Knap <pawelknap88@gmail.com>
Pieter Droogendijk <pieter@binky.org.uk>
Pursuit92 <JoshChase@techpursuit.net>
Riku Voipio <riku.voipio@linaro.org>
Rob Figueiredo <robfig@gmail.com>
Rodrigo Chiossi <rodrigochiossi@gmail.com>
Slawek Ligus <root@ooz.ie>
Soge Zhang <zhssoge@gmail.com>
Tiffany Jernigan <tiffany.jernigan@intel.com>
Tilak Sharma <tilaks@google.com>
Tom Payne <twpayne@gmail.com>
Travis Cline <travis.cline@gmail.com>
Tudor Golubenco <tudor.g@gmail.com>
Vahe Khachikyan <vahe@live.ca>
Yukang <moorekang@gmail.com>
bronze1man <bronze1man@gmail.com>
debrando <denis.brandolini@gmail.com>
henrikedwards <henrik.edwards@gmail.com>
铁哥 <guotie.9@gmail.com>

View file

@ -1,317 +0,0 @@
# Changelog
## v1.4.7 / 2018-01-09
* BSD/macOS: Fix possible deadlock on closing the watcher on kqueue (thanks @nhooyr and @glycerine)
* Tests: Fix missing verb on format string (thanks @rchiossi)
* Linux: Fix deadlock in Remove (thanks @aarondl)
* Linux: Watch.Add improvements (avoid race, fix consistency, reduce garbage) (thanks @twpayne)
* Docs: Moved FAQ into the README (thanks @vahe)
* Linux: Properly handle inotify's IN_Q_OVERFLOW event (thanks @zeldovich)
* Docs: replace references to OS X with macOS
## v1.4.2 / 2016-10-10
* Linux: use InotifyInit1 with IN_CLOEXEC to stop leaking a file descriptor to a child process when using fork/exec [#178](https://github.com/fsnotify/fsnotify/pull/178) (thanks @pattyshack)
## v1.4.1 / 2016-10-04
* Fix flaky inotify stress test on Linux [#177](https://github.com/fsnotify/fsnotify/pull/177) (thanks @pattyshack)
## v1.4.0 / 2016-10-01
* add a String() method to Event.Op [#165](https://github.com/fsnotify/fsnotify/pull/165) (thanks @oozie)
## v1.3.1 / 2016-06-28
* Windows: fix for double backslash when watching the root of a drive [#151](https://github.com/fsnotify/fsnotify/issues/151) (thanks @brunoqc)
## v1.3.0 / 2016-04-19
* Support linux/arm64 by [patching](https://go-review.googlesource.com/#/c/21971/) x/sys/unix and switching to to it from syscall (thanks @suihkulokki) [#135](https://github.com/fsnotify/fsnotify/pull/135)
## v1.2.10 / 2016-03-02
* Fix golint errors in windows.go [#121](https://github.com/fsnotify/fsnotify/pull/121) (thanks @tiffanyfj)
## v1.2.9 / 2016-01-13
kqueue: Fix logic for CREATE after REMOVE [#111](https://github.com/fsnotify/fsnotify/pull/111) (thanks @bep)
## v1.2.8 / 2015-12-17
* kqueue: fix race condition in Close [#105](https://github.com/fsnotify/fsnotify/pull/105) (thanks @djui for reporting the issue and @ppknap for writing a failing test)
* inotify: fix race in test
* enable race detection for continuous integration (Linux, Mac, Windows)
## v1.2.5 / 2015-10-17
* inotify: use epoll_create1 for arm64 support (requires Linux 2.6.27 or later) [#100](https://github.com/fsnotify/fsnotify/pull/100) (thanks @suihkulokki)
* inotify: fix path leaks [#73](https://github.com/fsnotify/fsnotify/pull/73) (thanks @chamaken)
* kqueue: watch for rename events on subdirectories [#83](https://github.com/fsnotify/fsnotify/pull/83) (thanks @guotie)
* kqueue: avoid infinite loops from symlinks cycles [#101](https://github.com/fsnotify/fsnotify/pull/101) (thanks @illicitonion)
## v1.2.1 / 2015-10-14
* kqueue: don't watch named pipes [#98](https://github.com/fsnotify/fsnotify/pull/98) (thanks @evanphx)
## v1.2.0 / 2015-02-08
* inotify: use epoll to wake up readEvents [#66](https://github.com/fsnotify/fsnotify/pull/66) (thanks @PieterD)
* inotify: closing watcher should now always shut down goroutine [#63](https://github.com/fsnotify/fsnotify/pull/63) (thanks @PieterD)
* kqueue: close kqueue after removing watches, fixes [#59](https://github.com/fsnotify/fsnotify/issues/59)
## v1.1.1 / 2015-02-05
* inotify: Retry read on EINTR [#61](https://github.com/fsnotify/fsnotify/issues/61) (thanks @PieterD)
## v1.1.0 / 2014-12-12
* kqueue: rework internals [#43](https://github.com/fsnotify/fsnotify/pull/43)
* add low-level functions
* only need to store flags on directories
* less mutexes [#13](https://github.com/fsnotify/fsnotify/issues/13)
* done can be an unbuffered channel
* remove calls to os.NewSyscallError
* More efficient string concatenation for Event.String() [#52](https://github.com/fsnotify/fsnotify/pull/52) (thanks @mdlayher)
* kqueue: fix regression in rework causing subdirectories to be watched [#48](https://github.com/fsnotify/fsnotify/issues/48)
* kqueue: cleanup internal watch before sending remove event [#51](https://github.com/fsnotify/fsnotify/issues/51)
## v1.0.4 / 2014-09-07
* kqueue: add dragonfly to the build tags.
* Rename source code files, rearrange code so exported APIs are at the top.
* Add done channel to example code. [#37](https://github.com/fsnotify/fsnotify/pull/37) (thanks @chenyukang)
## v1.0.3 / 2014-08-19
* [Fix] Windows MOVED_TO now translates to Create like on BSD and Linux. [#36](https://github.com/fsnotify/fsnotify/issues/36)
## v1.0.2 / 2014-08-17
* [Fix] Missing create events on macOS. [#14](https://github.com/fsnotify/fsnotify/issues/14) (thanks @zhsso)
* [Fix] Make ./path and path equivalent. (thanks @zhsso)
## v1.0.0 / 2014-08-15
* [API] Remove AddWatch on Windows, use Add.
* Improve documentation for exported identifiers. [#30](https://github.com/fsnotify/fsnotify/issues/30)
* Minor updates based on feedback from golint.
## dev / 2014-07-09
* Moved to [github.com/fsnotify/fsnotify](https://github.com/fsnotify/fsnotify).
* Use os.NewSyscallError instead of returning errno (thanks @hariharan-uno)
## dev / 2014-07-04
* kqueue: fix incorrect mutex used in Close()
* Update example to demonstrate usage of Op.
## dev / 2014-06-28
* [API] Don't set the Write Op for attribute notifications [#4](https://github.com/fsnotify/fsnotify/issues/4)
* Fix for String() method on Event (thanks Alex Brainman)
* Don't build on Plan 9 or Solaris (thanks @4ad)
## dev / 2014-06-21
* Events channel of type Event rather than *Event.
* [internal] use syscall constants directly for inotify and kqueue.
* [internal] kqueue: rename events to kevents and fileEvent to event.
## dev / 2014-06-19
* Go 1.3+ required on Windows (uses syscall.ERROR_MORE_DATA internally).
* [internal] remove cookie from Event struct (unused).
* [internal] Event struct has the same definition across every OS.
* [internal] remove internal watch and removeWatch methods.
## dev / 2014-06-12
* [API] Renamed Watch() to Add() and RemoveWatch() to Remove().
* [API] Pluralized channel names: Events and Errors.
* [API] Renamed FileEvent struct to Event.
* [API] Op constants replace methods like IsCreate().
## dev / 2014-06-12
* Fix data race on kevent buffer (thanks @tilaks) [#98](https://github.com/howeyc/fsnotify/pull/98)
## dev / 2014-05-23
* [API] Remove current implementation of WatchFlags.
* current implementation doesn't take advantage of OS for efficiency
* provides little benefit over filtering events as they are received, but has extra bookkeeping and mutexes
* no tests for the current implementation
* not fully implemented on Windows [#93](https://github.com/howeyc/fsnotify/issues/93#issuecomment-39285195)
## v0.9.3 / 2014-12-31
* kqueue: cleanup internal watch before sending remove event [#51](https://github.com/fsnotify/fsnotify/issues/51)
## v0.9.2 / 2014-08-17
* [Backport] Fix missing create events on macOS. [#14](https://github.com/fsnotify/fsnotify/issues/14) (thanks @zhsso)
## v0.9.1 / 2014-06-12
* Fix data race on kevent buffer (thanks @tilaks) [#98](https://github.com/howeyc/fsnotify/pull/98)
## v0.9.0 / 2014-01-17
* IsAttrib() for events that only concern a file's metadata [#79][] (thanks @abustany)
* [Fix] kqueue: fix deadlock [#77][] (thanks @cespare)
* [NOTICE] Development has moved to `code.google.com/p/go.exp/fsnotify` in preparation for inclusion in the Go standard library.
## v0.8.12 / 2013-11-13
* [API] Remove FD_SET and friends from Linux adapter
## v0.8.11 / 2013-11-02
* [Doc] Add Changelog [#72][] (thanks @nathany)
* [Doc] Spotlight and double modify events on macOS [#62][] (reported by @paulhammond)
## v0.8.10 / 2013-10-19
* [Fix] kqueue: remove file watches when parent directory is removed [#71][] (reported by @mdwhatcott)
* [Fix] kqueue: race between Close and readEvents [#70][] (reported by @bernerdschaefer)
* [Doc] specify OS-specific limits in README (thanks @debrando)
## v0.8.9 / 2013-09-08
* [Doc] Contributing (thanks @nathany)
* [Doc] update package path in example code [#63][] (thanks @paulhammond)
* [Doc] GoCI badge in README (Linux only) [#60][]
* [Doc] Cross-platform testing with Vagrant [#59][] (thanks @nathany)
## v0.8.8 / 2013-06-17
* [Fix] Windows: handle `ERROR_MORE_DATA` on Windows [#49][] (thanks @jbowtie)
## v0.8.7 / 2013-06-03
* [API] Make syscall flags internal
* [Fix] inotify: ignore event changes
* [Fix] race in symlink test [#45][] (reported by @srid)
* [Fix] tests on Windows
* lower case error messages
## v0.8.6 / 2013-05-23
* kqueue: Use EVT_ONLY flag on Darwin
* [Doc] Update README with full example
## v0.8.5 / 2013-05-09
* [Fix] inotify: allow monitoring of "broken" symlinks (thanks @tsg)
## v0.8.4 / 2013-04-07
* [Fix] kqueue: watch all file events [#40][] (thanks @ChrisBuchholz)
## v0.8.3 / 2013-03-13
* [Fix] inoitfy/kqueue memory leak [#36][] (reported by @nbkolchin)
* [Fix] kqueue: use fsnFlags for watching a directory [#33][] (reported by @nbkolchin)
## v0.8.2 / 2013-02-07
* [Doc] add Authors
* [Fix] fix data races for map access [#29][] (thanks @fsouza)
## v0.8.1 / 2013-01-09
* [Fix] Windows path separators
* [Doc] BSD License
## v0.8.0 / 2012-11-09
* kqueue: directory watching improvements (thanks @vmirage)
* inotify: add `IN_MOVED_TO` [#25][] (requested by @cpisto)
* [Fix] kqueue: deleting watched directory [#24][] (reported by @jakerr)
## v0.7.4 / 2012-10-09
* [Fix] inotify: fixes from https://codereview.appspot.com/5418045/ (ugorji)
* [Fix] kqueue: preserve watch flags when watching for delete [#21][] (reported by @robfig)
* [Fix] kqueue: watch the directory even if it isn't a new watch (thanks @robfig)
* [Fix] kqueue: modify after recreation of file
## v0.7.3 / 2012-09-27
* [Fix] kqueue: watch with an existing folder inside the watched folder (thanks @vmirage)
* [Fix] kqueue: no longer get duplicate CREATE events
## v0.7.2 / 2012-09-01
* kqueue: events for created directories
## v0.7.1 / 2012-07-14
* [Fix] for renaming files
## v0.7.0 / 2012-07-02
* [Feature] FSNotify flags
* [Fix] inotify: Added file name back to event path
## v0.6.0 / 2012-06-06
* kqueue: watch files after directory created (thanks @tmc)
## v0.5.1 / 2012-05-22
* [Fix] inotify: remove all watches before Close()
## v0.5.0 / 2012-05-03
* [API] kqueue: return errors during watch instead of sending over channel
* kqueue: match symlink behavior on Linux
* inotify: add `DELETE_SELF` (requested by @taralx)
* [Fix] kqueue: handle EINTR (reported by @robfig)
* [Doc] Godoc example [#1][] (thanks @davecheney)
## v0.4.0 / 2012-03-30
* Go 1 released: build with go tool
* [Feature] Windows support using winfsnotify
* Windows does not have attribute change notifications
* Roll attribute notifications into IsModify
## v0.3.0 / 2012-02-19
* kqueue: add files when watch directory
## v0.2.0 / 2011-12-30
* update to latest Go weekly code
## v0.1.0 / 2011-10-19
* kqueue: add watch on file creation to match inotify
* kqueue: create file event
* inotify: ignore `IN_IGNORED` events
* event String()
* linux: common FileEvent functions
* initial commit
[#79]: https://github.com/howeyc/fsnotify/pull/79
[#77]: https://github.com/howeyc/fsnotify/pull/77
[#72]: https://github.com/howeyc/fsnotify/issues/72
[#71]: https://github.com/howeyc/fsnotify/issues/71
[#70]: https://github.com/howeyc/fsnotify/issues/70
[#63]: https://github.com/howeyc/fsnotify/issues/63
[#62]: https://github.com/howeyc/fsnotify/issues/62
[#60]: https://github.com/howeyc/fsnotify/issues/60
[#59]: https://github.com/howeyc/fsnotify/issues/59
[#49]: https://github.com/howeyc/fsnotify/issues/49
[#45]: https://github.com/howeyc/fsnotify/issues/45
[#40]: https://github.com/howeyc/fsnotify/issues/40
[#36]: https://github.com/howeyc/fsnotify/issues/36
[#33]: https://github.com/howeyc/fsnotify/issues/33
[#29]: https://github.com/howeyc/fsnotify/issues/29
[#25]: https://github.com/howeyc/fsnotify/issues/25
[#24]: https://github.com/howeyc/fsnotify/issues/24
[#21]: https://github.com/howeyc/fsnotify/issues/21

View file

@ -1,77 +0,0 @@
# Contributing
## Issues
* Request features and report bugs using the [GitHub Issue Tracker](https://github.com/fsnotify/fsnotify/issues).
* Please indicate the platform you are using fsnotify on.
* A code example to reproduce the problem is appreciated.
## Pull Requests
### Contributor License Agreement
fsnotify is derived from code in the [golang.org/x/exp](https://godoc.org/golang.org/x/exp) package and it may be included [in the standard library](https://github.com/fsnotify/fsnotify/issues/1) in the future. Therefore fsnotify carries the same [LICENSE](https://github.com/fsnotify/fsnotify/blob/master/LICENSE) as Go. Contributors retain their copyright, so you need to fill out a short form before we can accept your contribution: [Google Individual Contributor License Agreement](https://developers.google.com/open-source/cla/individual).
Please indicate that you have signed the CLA in your pull request.
### How fsnotify is Developed
* Development is done on feature branches.
* Tests are run on BSD, Linux, macOS and Windows.
* Pull requests are reviewed and [applied to master][am] using [hub][].
* Maintainers may modify or squash commits rather than asking contributors to.
* To issue a new release, the maintainers will:
* Update the CHANGELOG
* Tag a version, which will become available through gopkg.in.
### How to Fork
For smooth sailing, always use the original import path. Installing with `go get` makes this easy.
1. Install from GitHub (`go get -u github.com/fsnotify/fsnotify`)
2. Create your feature branch (`git checkout -b my-new-feature`)
3. Ensure everything works and the tests pass (see below)
4. Commit your changes (`git commit -am 'Add some feature'`)
Contribute upstream:
1. Fork fsnotify on GitHub
2. Add your remote (`git remote add fork git@github.com:mycompany/repo.git`)
3. Push to the branch (`git push fork my-new-feature`)
4. Create a new Pull Request on GitHub
This workflow is [thoroughly explained by Katrina Owen](https://splice.com/blog/contributing-open-source-git-repositories-go/).
### Testing
fsnotify uses build tags to compile different code on Linux, BSD, macOS, and Windows.
Before doing a pull request, please do your best to test your changes on multiple platforms, and list which platforms you were able/unable to test on.
To aid in cross-platform testing there is a Vagrantfile for Linux and BSD.
* Install [Vagrant](http://www.vagrantup.com/) and [VirtualBox](https://www.virtualbox.org/)
* Setup [Vagrant Gopher](https://github.com/nathany/vagrant-gopher) in your `src` folder.
* Run `vagrant up` from the project folder. You can also setup just one box with `vagrant up linux` or `vagrant up bsd` (note: the BSD box doesn't support Windows hosts at this time, and NFS may prompt for your host OS password)
* Once setup, you can run the test suite on a given OS with a single command `vagrant ssh linux -c 'cd fsnotify/fsnotify; go test'`.
* When you're done, you will want to halt or destroy the Vagrant boxes.
Notice: fsnotify file system events won't trigger in shared folders. The tests get around this limitation by using the /tmp directory.
Right now there is no equivalent solution for Windows and macOS, but there are Windows VMs [freely available from Microsoft](http://www.modern.ie/en-us/virtualization-tools#downloads).
### Maintainers
Help maintaining fsnotify is welcome. To be a maintainer:
* Submit a pull request and sign the CLA as above.
* You must be able to run the test suite on Mac, Windows, Linux and BSD.
To keep master clean, the fsnotify project uses the "apply mail" workflow outlined in Nathaniel Talbott's post ["Merge pull request" Considered Harmful][am]. This requires installing [hub][].
All code changes should be internal pull requests.
Releases are tagged using [Semantic Versioning](http://semver.org/).
[hub]: https://github.com/github/hub
[am]: http://blog.spreedly.com/2014/06/24/merge-pull-request-considered-harmful/#.VGa5yZPF_Zs

View file

@ -1,28 +0,0 @@
Copyright (c) 2012 The Go Authors. All rights reserved.
Copyright (c) 2012 fsnotify Authors. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

View file

@ -1,79 +0,0 @@
# File system notifications for Go
[![GoDoc](https://godoc.org/github.com/fsnotify/fsnotify?status.svg)](https://godoc.org/github.com/fsnotify/fsnotify) [![Go Report Card](https://goreportcard.com/badge/github.com/fsnotify/fsnotify)](https://goreportcard.com/report/github.com/fsnotify/fsnotify)
fsnotify utilizes [golang.org/x/sys](https://godoc.org/golang.org/x/sys) rather than `syscall` from the standard library. Ensure you have the latest version installed by running:
```console
go get -u golang.org/x/sys/...
```
Cross platform: Windows, Linux, BSD and macOS.
|Adapter |OS |Status |
|----------|----------|----------|
|inotify |Linux 2.6.27 or later, Android\*|Supported [![Build Status](https://travis-ci.org/fsnotify/fsnotify.svg?branch=master)](https://travis-ci.org/fsnotify/fsnotify)|
|kqueue |BSD, macOS, iOS\*|Supported [![Build Status](https://travis-ci.org/fsnotify/fsnotify.svg?branch=master)](https://travis-ci.org/fsnotify/fsnotify)|
|ReadDirectoryChangesW|Windows|Supported [![Build status](https://ci.appveyor.com/api/projects/status/ivwjubaih4r0udeh/branch/master?svg=true)](https://ci.appveyor.com/project/NathanYoungman/fsnotify/branch/master)|
|FSEvents |macOS |[Planned](https://github.com/fsnotify/fsnotify/issues/11)|
|FEN |Solaris 11 |[In Progress](https://github.com/fsnotify/fsnotify/issues/12)|
|fanotify |Linux 2.6.37+ | |
|USN Journals |Windows |[Maybe](https://github.com/fsnotify/fsnotify/issues/53)|
|Polling |*All* |[Maybe](https://github.com/fsnotify/fsnotify/issues/9)|
\* Android and iOS are untested.
Please see [the documentation](https://godoc.org/github.com/fsnotify/fsnotify) and consult the [FAQ](#faq) for usage information.
## API stability
fsnotify is a fork of [howeyc/fsnotify](https://godoc.org/github.com/howeyc/fsnotify) with a new API as of v1.0. The API is based on [this design document](http://goo.gl/MrYxyA).
All [releases](https://github.com/fsnotify/fsnotify/releases) are tagged based on [Semantic Versioning](http://semver.org/). Further API changes are [planned](https://github.com/fsnotify/fsnotify/milestones), and will be tagged with a new major revision number.
Go 1.6 supports dependencies located in the `vendor/` folder. Unless you are creating a library, it is recommended that you copy fsnotify into `vendor/github.com/fsnotify/fsnotify` within your project, and likewise for `golang.org/x/sys`.
## Contributing
Please refer to [CONTRIBUTING][] before opening an issue or pull request.
## Example
See [example_test.go](https://github.com/fsnotify/fsnotify/blob/master/example_test.go).
## FAQ
**When a file is moved to another directory is it still being watched?**
No (it shouldn't be, unless you are watching where it was moved to).
**When I watch a directory, are all subdirectories watched as well?**
No, you must add watches for any directory you want to watch (a recursive watcher is on the roadmap [#18][]).
**Do I have to watch the Error and Event channels in a separate goroutine?**
As of now, yes. Looking into making this single-thread friendly (see [howeyc #7][#7])
**Why am I receiving multiple events for the same file on OS X?**
Spotlight indexing on OS X can result in multiple events (see [howeyc #62][#62]). A temporary workaround is to add your folder(s) to the *Spotlight Privacy settings* until we have a native FSEvents implementation (see [#11][]).
**How many files can be watched at once?**
There are OS-specific limits as to how many watches can be created:
* Linux: /proc/sys/fs/inotify/max_user_watches contains the limit, reaching this limit results in a "no space left on device" error.
* BSD / OSX: sysctl variables "kern.maxfiles" and "kern.maxfilesperproc", reaching these limits results in a "too many open files" error.
[#62]: https://github.com/howeyc/fsnotify/issues/62
[#18]: https://github.com/fsnotify/fsnotify/issues/18
[#11]: https://github.com/fsnotify/fsnotify/issues/11
[#7]: https://github.com/howeyc/fsnotify/issues/7
[contributing]: https://github.com/fsnotify/fsnotify/blob/master/CONTRIBUTING.md
## Related Projects
* [notify](https://github.com/rjeczalik/notify)
* [fsevents](https://github.com/fsnotify/fsevents)

View file

@ -1,37 +0,0 @@
// Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build solaris
package fsnotify
import (
"errors"
)
// Watcher watches a set of files, delivering events to a channel.
type Watcher struct {
Events chan Event
Errors chan error
}
// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events.
func NewWatcher() (*Watcher, error) {
return nil, errors.New("FEN based watcher not yet supported for fsnotify\n")
}
// Close removes all watches and closes the events channel.
func (w *Watcher) Close() error {
return nil
}
// Add starts watching the named file or directory (non-recursively).
func (w *Watcher) Add(name string) error {
return nil
}
// Remove stops watching the the named file or directory (non-recursively).
func (w *Watcher) Remove(name string) error {
return nil
}

View file

@ -1,66 +0,0 @@
// Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build !plan9
// Package fsnotify provides a platform-independent interface for file system notifications.
package fsnotify
import (
"bytes"
"errors"
"fmt"
)
// Event represents a single file system notification.
type Event struct {
Name string // Relative path to the file or directory.
Op Op // File operation that triggered the event.
}
// Op describes a set of file operations.
type Op uint32
// These are the generalized file operations that can trigger a notification.
const (
Create Op = 1 << iota
Write
Remove
Rename
Chmod
)
func (op Op) String() string {
// Use a buffer for efficient string concatenation
var buffer bytes.Buffer
if op&Create == Create {
buffer.WriteString("|CREATE")
}
if op&Remove == Remove {
buffer.WriteString("|REMOVE")
}
if op&Write == Write {
buffer.WriteString("|WRITE")
}
if op&Rename == Rename {
buffer.WriteString("|RENAME")
}
if op&Chmod == Chmod {
buffer.WriteString("|CHMOD")
}
if buffer.Len() == 0 {
return ""
}
return buffer.String()[1:] // Strip leading pipe
}
// String returns a string representation of the event in the form
// "file: REMOVE|WRITE|..."
func (e Event) String() string {
return fmt.Sprintf("%q: %s", e.Name, e.Op.String())
}
// Common errors that can be reported by a watcher
var ErrEventOverflow = errors.New("fsnotify queue overflow")

View file

@ -1,337 +0,0 @@
// Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build linux
package fsnotify
import (
"errors"
"fmt"
"io"
"os"
"path/filepath"
"strings"
"sync"
"unsafe"
"golang.org/x/sys/unix"
)
// Watcher watches a set of files, delivering events to a channel.
type Watcher struct {
Events chan Event
Errors chan error
mu sync.Mutex // Map access
fd int
poller *fdPoller
watches map[string]*watch // Map of inotify watches (key: path)
paths map[int]string // Map of watched paths (key: watch descriptor)
done chan struct{} // Channel for sending a "quit message" to the reader goroutine
doneResp chan struct{} // Channel to respond to Close
}
// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events.
func NewWatcher() (*Watcher, error) {
// Create inotify fd
fd, errno := unix.InotifyInit1(unix.IN_CLOEXEC)
if fd == -1 {
return nil, errno
}
// Create epoll
poller, err := newFdPoller(fd)
if err != nil {
unix.Close(fd)
return nil, err
}
w := &Watcher{
fd: fd,
poller: poller,
watches: make(map[string]*watch),
paths: make(map[int]string),
Events: make(chan Event),
Errors: make(chan error),
done: make(chan struct{}),
doneResp: make(chan struct{}),
}
go w.readEvents()
return w, nil
}
func (w *Watcher) isClosed() bool {
select {
case <-w.done:
return true
default:
return false
}
}
// Close removes all watches and closes the events channel.
func (w *Watcher) Close() error {
if w.isClosed() {
return nil
}
// Send 'close' signal to goroutine, and set the Watcher to closed.
close(w.done)
// Wake up goroutine
w.poller.wake()
// Wait for goroutine to close
<-w.doneResp
return nil
}
// Add starts watching the named file or directory (non-recursively).
func (w *Watcher) Add(name string) error {
name = filepath.Clean(name)
if w.isClosed() {
return errors.New("inotify instance already closed")
}
const agnosticEvents = unix.IN_MOVED_TO | unix.IN_MOVED_FROM |
unix.IN_CREATE | unix.IN_ATTRIB | unix.IN_MODIFY |
unix.IN_MOVE_SELF | unix.IN_DELETE | unix.IN_DELETE_SELF
var flags uint32 = agnosticEvents
w.mu.Lock()
defer w.mu.Unlock()
watchEntry := w.watches[name]
if watchEntry != nil {
flags |= watchEntry.flags | unix.IN_MASK_ADD
}
wd, errno := unix.InotifyAddWatch(w.fd, name, flags)
if wd == -1 {
return errno
}
if watchEntry == nil {
w.watches[name] = &watch{wd: uint32(wd), flags: flags}
w.paths[wd] = name
} else {
watchEntry.wd = uint32(wd)
watchEntry.flags = flags
}
return nil
}
// Remove stops watching the named file or directory (non-recursively).
func (w *Watcher) Remove(name string) error {
name = filepath.Clean(name)
// Fetch the watch.
w.mu.Lock()
defer w.mu.Unlock()
watch, ok := w.watches[name]
// Remove it from inotify.
if !ok {
return fmt.Errorf("can't remove non-existent inotify watch for: %s", name)
}
// We successfully removed the watch if InotifyRmWatch doesn't return an
// error, we need to clean up our internal state to ensure it matches
// inotify's kernel state.
delete(w.paths, int(watch.wd))
delete(w.watches, name)
// inotify_rm_watch will return EINVAL if the file has been deleted;
// the inotify will already have been removed.
// watches and pathes are deleted in ignoreLinux() implicitly and asynchronously
// by calling inotify_rm_watch() below. e.g. readEvents() goroutine receives IN_IGNORE
// so that EINVAL means that the wd is being rm_watch()ed or its file removed
// by another thread and we have not received IN_IGNORE event.
success, errno := unix.InotifyRmWatch(w.fd, watch.wd)
if success == -1 {
// TODO: Perhaps it's not helpful to return an error here in every case.
// the only two possible errors are:
// EBADF, which happens when w.fd is not a valid file descriptor of any kind.
// EINVAL, which is when fd is not an inotify descriptor or wd is not a valid watch descriptor.
// Watch descriptors are invalidated when they are removed explicitly or implicitly;
// explicitly by inotify_rm_watch, implicitly when the file they are watching is deleted.
return errno
}
return nil
}
type watch struct {
wd uint32 // Watch descriptor (as returned by the inotify_add_watch() syscall)
flags uint32 // inotify flags of this watch (see inotify(7) for the list of valid flags)
}
// readEvents reads from the inotify file descriptor, converts the
// received events into Event objects and sends them via the Events channel
func (w *Watcher) readEvents() {
var (
buf [unix.SizeofInotifyEvent * 4096]byte // Buffer for a maximum of 4096 raw events
n int // Number of bytes read with read()
errno error // Syscall errno
ok bool // For poller.wait
)
defer close(w.doneResp)
defer close(w.Errors)
defer close(w.Events)
defer unix.Close(w.fd)
defer w.poller.close()
for {
// See if we have been closed.
if w.isClosed() {
return
}
ok, errno = w.poller.wait()
if errno != nil {
select {
case w.Errors <- errno:
case <-w.done:
return
}
continue
}
if !ok {
continue
}
n, errno = unix.Read(w.fd, buf[:])
// If a signal interrupted execution, see if we've been asked to close, and try again.
// http://man7.org/linux/man-pages/man7/signal.7.html :
// "Before Linux 3.8, reads from an inotify(7) file descriptor were not restartable"
if errno == unix.EINTR {
continue
}
// unix.Read might have been woken up by Close. If so, we're done.
if w.isClosed() {
return
}
if n < unix.SizeofInotifyEvent {
var err error
if n == 0 {
// If EOF is received. This should really never happen.
err = io.EOF
} else if n < 0 {
// If an error occurred while reading.
err = errno
} else {
// Read was too short.
err = errors.New("notify: short read in readEvents()")
}
select {
case w.Errors <- err:
case <-w.done:
return
}
continue
}
var offset uint32
// We don't know how many events we just read into the buffer
// While the offset points to at least one whole event...
for offset <= uint32(n-unix.SizeofInotifyEvent) {
// Point "raw" to the event in the buffer
raw := (*unix.InotifyEvent)(unsafe.Pointer(&buf[offset]))
mask := uint32(raw.Mask)
nameLen := uint32(raw.Len)
if mask&unix.IN_Q_OVERFLOW != 0 {
select {
case w.Errors <- ErrEventOverflow:
case <-w.done:
return
}
}
// If the event happened to the watched directory or the watched file, the kernel
// doesn't append the filename to the event, but we would like to always fill the
// the "Name" field with a valid filename. We retrieve the path of the watch from
// the "paths" map.
w.mu.Lock()
name, ok := w.paths[int(raw.Wd)]
// IN_DELETE_SELF occurs when the file/directory being watched is removed.
// This is a sign to clean up the maps, otherwise we are no longer in sync
// with the inotify kernel state which has already deleted the watch
// automatically.
if ok && mask&unix.IN_DELETE_SELF == unix.IN_DELETE_SELF {
delete(w.paths, int(raw.Wd))
delete(w.watches, name)
}
w.mu.Unlock()
if nameLen > 0 {
// Point "bytes" at the first byte of the filename
bytes := (*[unix.PathMax]byte)(unsafe.Pointer(&buf[offset+unix.SizeofInotifyEvent]))
// The filename is padded with NULL bytes. TrimRight() gets rid of those.
name += "/" + strings.TrimRight(string(bytes[0:nameLen]), "\000")
}
event := newEvent(name, mask)
// Send the events that are not ignored on the events channel
if !event.ignoreLinux(mask) {
select {
case w.Events <- event:
case <-w.done:
return
}
}
// Move to the next event in the buffer
offset += unix.SizeofInotifyEvent + nameLen
}
}
}
// Certain types of events can be "ignored" and not sent over the Events
// channel. Such as events marked ignore by the kernel, or MODIFY events
// against files that do not exist.
func (e *Event) ignoreLinux(mask uint32) bool {
// Ignore anything the inotify API says to ignore
if mask&unix.IN_IGNORED == unix.IN_IGNORED {
return true
}
// If the event is not a DELETE or RENAME, the file must exist.
// Otherwise the event is ignored.
// *Note*: this was put in place because it was seen that a MODIFY
// event was sent after the DELETE. This ignores that MODIFY and
// assumes a DELETE will come or has come if the file doesn't exist.
if !(e.Op&Remove == Remove || e.Op&Rename == Rename) {
_, statErr := os.Lstat(e.Name)
return os.IsNotExist(statErr)
}
return false
}
// newEvent returns an platform-independent Event based on an inotify mask.
func newEvent(name string, mask uint32) Event {
e := Event{Name: name}
if mask&unix.IN_CREATE == unix.IN_CREATE || mask&unix.IN_MOVED_TO == unix.IN_MOVED_TO {
e.Op |= Create
}
if mask&unix.IN_DELETE_SELF == unix.IN_DELETE_SELF || mask&unix.IN_DELETE == unix.IN_DELETE {
e.Op |= Remove
}
if mask&unix.IN_MODIFY == unix.IN_MODIFY {
e.Op |= Write
}
if mask&unix.IN_MOVE_SELF == unix.IN_MOVE_SELF || mask&unix.IN_MOVED_FROM == unix.IN_MOVED_FROM {
e.Op |= Rename
}
if mask&unix.IN_ATTRIB == unix.IN_ATTRIB {
e.Op |= Chmod
}
return e
}

View file

@ -1,187 +0,0 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build linux
package fsnotify
import (
"errors"
"golang.org/x/sys/unix"
)
type fdPoller struct {
fd int // File descriptor (as returned by the inotify_init() syscall)
epfd int // Epoll file descriptor
pipe [2]int // Pipe for waking up
}
func emptyPoller(fd int) *fdPoller {
poller := new(fdPoller)
poller.fd = fd
poller.epfd = -1
poller.pipe[0] = -1
poller.pipe[1] = -1
return poller
}
// Create a new inotify poller.
// This creates an inotify handler, and an epoll handler.
func newFdPoller(fd int) (*fdPoller, error) {
var errno error
poller := emptyPoller(fd)
defer func() {
if errno != nil {
poller.close()
}
}()
poller.fd = fd
// Create epoll fd
poller.epfd, errno = unix.EpollCreate1(0)
if poller.epfd == -1 {
return nil, errno
}
// Create pipe; pipe[0] is the read end, pipe[1] the write end.
errno = unix.Pipe2(poller.pipe[:], unix.O_NONBLOCK)
if errno != nil {
return nil, errno
}
// Register inotify fd with epoll
event := unix.EpollEvent{
Fd: int32(poller.fd),
Events: unix.EPOLLIN,
}
errno = unix.EpollCtl(poller.epfd, unix.EPOLL_CTL_ADD, poller.fd, &event)
if errno != nil {
return nil, errno
}
// Register pipe fd with epoll
event = unix.EpollEvent{
Fd: int32(poller.pipe[0]),
Events: unix.EPOLLIN,
}
errno = unix.EpollCtl(poller.epfd, unix.EPOLL_CTL_ADD, poller.pipe[0], &event)
if errno != nil {
return nil, errno
}
return poller, nil
}
// Wait using epoll.
// Returns true if something is ready to be read,
// false if there is not.
func (poller *fdPoller) wait() (bool, error) {
// 3 possible events per fd, and 2 fds, makes a maximum of 6 events.
// I don't know whether epoll_wait returns the number of events returned,
// or the total number of events ready.
// I decided to catch both by making the buffer one larger than the maximum.
events := make([]unix.EpollEvent, 7)
for {
n, errno := unix.EpollWait(poller.epfd, events, -1)
if n == -1 {
if errno == unix.EINTR {
continue
}
return false, errno
}
if n == 0 {
// If there are no events, try again.
continue
}
if n > 6 {
// This should never happen. More events were returned than should be possible.
return false, errors.New("epoll_wait returned more events than I know what to do with")
}
ready := events[:n]
epollhup := false
epollerr := false
epollin := false
for _, event := range ready {
if event.Fd == int32(poller.fd) {
if event.Events&unix.EPOLLHUP != 0 {
// This should not happen, but if it does, treat it as a wakeup.
epollhup = true
}
if event.Events&unix.EPOLLERR != 0 {
// If an error is waiting on the file descriptor, we should pretend
// something is ready to read, and let unix.Read pick up the error.
epollerr = true
}
if event.Events&unix.EPOLLIN != 0 {
// There is data to read.
epollin = true
}
}
if event.Fd == int32(poller.pipe[0]) {
if event.Events&unix.EPOLLHUP != 0 {
// Write pipe descriptor was closed, by us. This means we're closing down the
// watcher, and we should wake up.
}
if event.Events&unix.EPOLLERR != 0 {
// If an error is waiting on the pipe file descriptor.
// This is an absolute mystery, and should never ever happen.
return false, errors.New("Error on the pipe descriptor.")
}
if event.Events&unix.EPOLLIN != 0 {
// This is a regular wakeup, so we have to clear the buffer.
err := poller.clearWake()
if err != nil {
return false, err
}
}
}
}
if epollhup || epollerr || epollin {
return true, nil
}
return false, nil
}
}
// Close the write end of the poller.
func (poller *fdPoller) wake() error {
buf := make([]byte, 1)
n, errno := unix.Write(poller.pipe[1], buf)
if n == -1 {
if errno == unix.EAGAIN {
// Buffer is full, poller will wake.
return nil
}
return errno
}
return nil
}
func (poller *fdPoller) clearWake() error {
// You have to be woken up a LOT in order to get to 100!
buf := make([]byte, 100)
n, errno := unix.Read(poller.pipe[0], buf)
if n == -1 {
if errno == unix.EAGAIN {
// Buffer is empty, someone else cleared our wake.
return nil
}
return errno
}
return nil
}
// Close all poller file descriptors, but not the one passed to it.
func (poller *fdPoller) close() {
if poller.pipe[1] != -1 {
unix.Close(poller.pipe[1])
}
if poller.pipe[0] != -1 {
unix.Close(poller.pipe[0])
}
if poller.epfd != -1 {
unix.Close(poller.epfd)
}
}

View file

@ -1,521 +0,0 @@
// Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build freebsd openbsd netbsd dragonfly darwin
package fsnotify
import (
"errors"
"fmt"
"io/ioutil"
"os"
"path/filepath"
"sync"
"time"
"golang.org/x/sys/unix"
)
// Watcher watches a set of files, delivering events to a channel.
type Watcher struct {
Events chan Event
Errors chan error
done chan struct{} // Channel for sending a "quit message" to the reader goroutine
kq int // File descriptor (as returned by the kqueue() syscall).
mu sync.Mutex // Protects access to watcher data
watches map[string]int // Map of watched file descriptors (key: path).
externalWatches map[string]bool // Map of watches added by user of the library.
dirFlags map[string]uint32 // Map of watched directories to fflags used in kqueue.
paths map[int]pathInfo // Map file descriptors to path names for processing kqueue events.
fileExists map[string]bool // Keep track of if we know this file exists (to stop duplicate create events).
isClosed bool // Set to true when Close() is first called
}
type pathInfo struct {
name string
isDir bool
}
// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events.
func NewWatcher() (*Watcher, error) {
kq, err := kqueue()
if err != nil {
return nil, err
}
w := &Watcher{
kq: kq,
watches: make(map[string]int),
dirFlags: make(map[string]uint32),
paths: make(map[int]pathInfo),
fileExists: make(map[string]bool),
externalWatches: make(map[string]bool),
Events: make(chan Event),
Errors: make(chan error),
done: make(chan struct{}),
}
go w.readEvents()
return w, nil
}
// Close removes all watches and closes the events channel.
func (w *Watcher) Close() error {
w.mu.Lock()
if w.isClosed {
w.mu.Unlock()
return nil
}
w.isClosed = true
// copy paths to remove while locked
var pathsToRemove = make([]string, 0, len(w.watches))
for name := range w.watches {
pathsToRemove = append(pathsToRemove, name)
}
w.mu.Unlock()
// unlock before calling Remove, which also locks
for _, name := range pathsToRemove {
w.Remove(name)
}
// send a "quit" message to the reader goroutine
close(w.done)
return nil
}
// Add starts watching the named file or directory (non-recursively).
func (w *Watcher) Add(name string) error {
w.mu.Lock()
w.externalWatches[name] = true
w.mu.Unlock()
_, err := w.addWatch(name, noteAllEvents)
return err
}
// Remove stops watching the the named file or directory (non-recursively).
func (w *Watcher) Remove(name string) error {
name = filepath.Clean(name)
w.mu.Lock()
watchfd, ok := w.watches[name]
w.mu.Unlock()
if !ok {
return fmt.Errorf("can't remove non-existent kevent watch for: %s", name)
}
const registerRemove = unix.EV_DELETE
if err := register(w.kq, []int{watchfd}, registerRemove, 0); err != nil {
return err
}
unix.Close(watchfd)
w.mu.Lock()
isDir := w.paths[watchfd].isDir
delete(w.watches, name)
delete(w.paths, watchfd)
delete(w.dirFlags, name)
w.mu.Unlock()
// Find all watched paths that are in this directory that are not external.
if isDir {
var pathsToRemove []string
w.mu.Lock()
for _, path := range w.paths {
wdir, _ := filepath.Split(path.name)
if filepath.Clean(wdir) == name {
if !w.externalWatches[path.name] {
pathsToRemove = append(pathsToRemove, path.name)
}
}
}
w.mu.Unlock()
for _, name := range pathsToRemove {
// Since these are internal, not much sense in propagating error
// to the user, as that will just confuse them with an error about
// a path they did not explicitly watch themselves.
w.Remove(name)
}
}
return nil
}
// Watch all events (except NOTE_EXTEND, NOTE_LINK, NOTE_REVOKE)
const noteAllEvents = unix.NOTE_DELETE | unix.NOTE_WRITE | unix.NOTE_ATTRIB | unix.NOTE_RENAME
// keventWaitTime to block on each read from kevent
var keventWaitTime = durationToTimespec(100 * time.Millisecond)
// addWatch adds name to the watched file set.
// The flags are interpreted as described in kevent(2).
// Returns the real path to the file which was added, if any, which may be different from the one passed in the case of symlinks.
func (w *Watcher) addWatch(name string, flags uint32) (string, error) {
var isDir bool
// Make ./name and name equivalent
name = filepath.Clean(name)
w.mu.Lock()
if w.isClosed {
w.mu.Unlock()
return "", errors.New("kevent instance already closed")
}
watchfd, alreadyWatching := w.watches[name]
// We already have a watch, but we can still override flags.
if alreadyWatching {
isDir = w.paths[watchfd].isDir
}
w.mu.Unlock()
if !alreadyWatching {
fi, err := os.Lstat(name)
if err != nil {
return "", err
}
// Don't watch sockets.
if fi.Mode()&os.ModeSocket == os.ModeSocket {
return "", nil
}
// Don't watch named pipes.
if fi.Mode()&os.ModeNamedPipe == os.ModeNamedPipe {
return "", nil
}
// Follow Symlinks
// Unfortunately, Linux can add bogus symlinks to watch list without
// issue, and Windows can't do symlinks period (AFAIK). To maintain
// consistency, we will act like everything is fine. There will simply
// be no file events for broken symlinks.
// Hence the returns of nil on errors.
if fi.Mode()&os.ModeSymlink == os.ModeSymlink {
name, err = filepath.EvalSymlinks(name)
if err != nil {
return "", nil
}
w.mu.Lock()
_, alreadyWatching = w.watches[name]
w.mu.Unlock()
if alreadyWatching {
return name, nil
}
fi, err = os.Lstat(name)
if err != nil {
return "", nil
}
}
watchfd, err = unix.Open(name, openMode, 0700)
if watchfd == -1 {
return "", err
}
isDir = fi.IsDir()
}
const registerAdd = unix.EV_ADD | unix.EV_CLEAR | unix.EV_ENABLE
if err := register(w.kq, []int{watchfd}, registerAdd, flags); err != nil {
unix.Close(watchfd)
return "", err
}
if !alreadyWatching {
w.mu.Lock()
w.watches[name] = watchfd
w.paths[watchfd] = pathInfo{name: name, isDir: isDir}
w.mu.Unlock()
}
if isDir {
// Watch the directory if it has not been watched before,
// or if it was watched before, but perhaps only a NOTE_DELETE (watchDirectoryFiles)
w.mu.Lock()
watchDir := (flags&unix.NOTE_WRITE) == unix.NOTE_WRITE &&
(!alreadyWatching || (w.dirFlags[name]&unix.NOTE_WRITE) != unix.NOTE_WRITE)
// Store flags so this watch can be updated later
w.dirFlags[name] = flags
w.mu.Unlock()
if watchDir {
if err := w.watchDirectoryFiles(name); err != nil {
return "", err
}
}
}
return name, nil
}
// readEvents reads from kqueue and converts the received kevents into
// Event values that it sends down the Events channel.
func (w *Watcher) readEvents() {
eventBuffer := make([]unix.Kevent_t, 10)
loop:
for {
// See if there is a message on the "done" channel
select {
case <-w.done:
break loop
default:
}
// Get new events
kevents, err := read(w.kq, eventBuffer, &keventWaitTime)
// EINTR is okay, the syscall was interrupted before timeout expired.
if err != nil && err != unix.EINTR {
select {
case w.Errors <- err:
case <-w.done:
break loop
}
continue
}
// Flush the events we received to the Events channel
for len(kevents) > 0 {
kevent := &kevents[0]
watchfd := int(kevent.Ident)
mask := uint32(kevent.Fflags)
w.mu.Lock()
path := w.paths[watchfd]
w.mu.Unlock()
event := newEvent(path.name, mask)
if path.isDir && !(event.Op&Remove == Remove) {
// Double check to make sure the directory exists. This can happen when
// we do a rm -fr on a recursively watched folders and we receive a
// modification event first but the folder has been deleted and later
// receive the delete event
if _, err := os.Lstat(event.Name); os.IsNotExist(err) {
// mark is as delete event
event.Op |= Remove
}
}
if event.Op&Rename == Rename || event.Op&Remove == Remove {
w.Remove(event.Name)
w.mu.Lock()
delete(w.fileExists, event.Name)
w.mu.Unlock()
}
if path.isDir && event.Op&Write == Write && !(event.Op&Remove == Remove) {
w.sendDirectoryChangeEvents(event.Name)
} else {
// Send the event on the Events channel.
select {
case w.Events <- event:
case <-w.done:
break loop
}
}
if event.Op&Remove == Remove {
// Look for a file that may have overwritten this.
// For example, mv f1 f2 will delete f2, then create f2.
if path.isDir {
fileDir := filepath.Clean(event.Name)
w.mu.Lock()
_, found := w.watches[fileDir]
w.mu.Unlock()
if found {
// make sure the directory exists before we watch for changes. When we
// do a recursive watch and perform rm -fr, the parent directory might
// have gone missing, ignore the missing directory and let the
// upcoming delete event remove the watch from the parent directory.
if _, err := os.Lstat(fileDir); err == nil {
w.sendDirectoryChangeEvents(fileDir)
}
}
} else {
filePath := filepath.Clean(event.Name)
if fileInfo, err := os.Lstat(filePath); err == nil {
w.sendFileCreatedEventIfNew(filePath, fileInfo)
}
}
}
// Move to next event
kevents = kevents[1:]
}
}
// cleanup
err := unix.Close(w.kq)
if err != nil {
// only way the previous loop breaks is if w.done was closed so we need to async send to w.Errors.
select {
case w.Errors <- err:
default:
}
}
close(w.Events)
close(w.Errors)
}
// newEvent returns an platform-independent Event based on kqueue Fflags.
func newEvent(name string, mask uint32) Event {
e := Event{Name: name}
if mask&unix.NOTE_DELETE == unix.NOTE_DELETE {
e.Op |= Remove
}
if mask&unix.NOTE_WRITE == unix.NOTE_WRITE {
e.Op |= Write
}
if mask&unix.NOTE_RENAME == unix.NOTE_RENAME {
e.Op |= Rename
}
if mask&unix.NOTE_ATTRIB == unix.NOTE_ATTRIB {
e.Op |= Chmod
}
return e
}
func newCreateEvent(name string) Event {
return Event{Name: name, Op: Create}
}
// watchDirectoryFiles to mimic inotify when adding a watch on a directory
func (w *Watcher) watchDirectoryFiles(dirPath string) error {
// Get all files
files, err := ioutil.ReadDir(dirPath)
if err != nil {
return err
}
for _, fileInfo := range files {
filePath := filepath.Join(dirPath, fileInfo.Name())
filePath, err = w.internalWatch(filePath, fileInfo)
if err != nil {
return err
}
w.mu.Lock()
w.fileExists[filePath] = true
w.mu.Unlock()
}
return nil
}
// sendDirectoryEvents searches the directory for newly created files
// and sends them over the event channel. This functionality is to have
// the BSD version of fsnotify match Linux inotify which provides a
// create event for files created in a watched directory.
func (w *Watcher) sendDirectoryChangeEvents(dirPath string) {
// Get all files
files, err := ioutil.ReadDir(dirPath)
if err != nil {
select {
case w.Errors <- err:
case <-w.done:
return
}
}
// Search for new files
for _, fileInfo := range files {
filePath := filepath.Join(dirPath, fileInfo.Name())
err := w.sendFileCreatedEventIfNew(filePath, fileInfo)
if err != nil {
return
}
}
}
// sendFileCreatedEvent sends a create event if the file isn't already being tracked.
func (w *Watcher) sendFileCreatedEventIfNew(filePath string, fileInfo os.FileInfo) (err error) {
w.mu.Lock()
_, doesExist := w.fileExists[filePath]
w.mu.Unlock()
if !doesExist {
// Send create event
select {
case w.Events <- newCreateEvent(filePath):
case <-w.done:
return
}
}
// like watchDirectoryFiles (but without doing another ReadDir)
filePath, err = w.internalWatch(filePath, fileInfo)
if err != nil {
return err
}
w.mu.Lock()
w.fileExists[filePath] = true
w.mu.Unlock()
return nil
}
func (w *Watcher) internalWatch(name string, fileInfo os.FileInfo) (string, error) {
if fileInfo.IsDir() {
// mimic Linux providing delete events for subdirectories
// but preserve the flags used if currently watching subdirectory
w.mu.Lock()
flags := w.dirFlags[name]
w.mu.Unlock()
flags |= unix.NOTE_DELETE | unix.NOTE_RENAME
return w.addWatch(name, flags)
}
// watch file to mimic Linux inotify
return w.addWatch(name, noteAllEvents)
}
// kqueue creates a new kernel event queue and returns a descriptor.
func kqueue() (kq int, err error) {
kq, err = unix.Kqueue()
if kq == -1 {
return kq, err
}
return kq, nil
}
// register events with the queue
func register(kq int, fds []int, flags int, fflags uint32) error {
changes := make([]unix.Kevent_t, len(fds))
for i, fd := range fds {
// SetKevent converts int to the platform-specific types:
unix.SetKevent(&changes[i], fd, unix.EVFILT_VNODE, flags)
changes[i].Fflags = fflags
}
// register the events
success, err := unix.Kevent(kq, changes, nil, nil)
if success == -1 {
return err
}
return nil
}
// read retrieves pending events, or waits until an event occurs.
// A timeout of nil blocks indefinitely, while 0 polls the queue.
func read(kq int, events []unix.Kevent_t, timeout *unix.Timespec) ([]unix.Kevent_t, error) {
n, err := unix.Kevent(kq, nil, events, timeout)
if err != nil {
return nil, err
}
return events[0:n], nil
}
// durationToTimespec prepares a timeout value
func durationToTimespec(d time.Duration) unix.Timespec {
return unix.NsecToTimespec(d.Nanoseconds())
}

View file

@ -1,11 +0,0 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build freebsd openbsd netbsd dragonfly
package fsnotify
import "golang.org/x/sys/unix"
const openMode = unix.O_NONBLOCK | unix.O_RDONLY

View file

@ -1,12 +0,0 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build darwin
package fsnotify
import "golang.org/x/sys/unix"
// note: this constant is not defined on BSD
const openMode = unix.O_EVTONLY

View file

@ -1,561 +0,0 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build windows
package fsnotify
import (
"errors"
"fmt"
"os"
"path/filepath"
"runtime"
"sync"
"syscall"
"unsafe"
)
// Watcher watches a set of files, delivering events to a channel.
type Watcher struct {
Events chan Event
Errors chan error
isClosed bool // Set to true when Close() is first called
mu sync.Mutex // Map access
port syscall.Handle // Handle to completion port
watches watchMap // Map of watches (key: i-number)
input chan *input // Inputs to the reader are sent on this channel
quit chan chan<- error
}
// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events.
func NewWatcher() (*Watcher, error) {
port, e := syscall.CreateIoCompletionPort(syscall.InvalidHandle, 0, 0, 0)
if e != nil {
return nil, os.NewSyscallError("CreateIoCompletionPort", e)
}
w := &Watcher{
port: port,
watches: make(watchMap),
input: make(chan *input, 1),
Events: make(chan Event, 50),
Errors: make(chan error),
quit: make(chan chan<- error, 1),
}
go w.readEvents()
return w, nil
}
// Close removes all watches and closes the events channel.
func (w *Watcher) Close() error {
if w.isClosed {
return nil
}
w.isClosed = true
// Send "quit" message to the reader goroutine
ch := make(chan error)
w.quit <- ch
if err := w.wakeupReader(); err != nil {
return err
}
return <-ch
}
// Add starts watching the named file or directory (non-recursively).
func (w *Watcher) Add(name string) error {
if w.isClosed {
return errors.New("watcher already closed")
}
in := &input{
op: opAddWatch,
path: filepath.Clean(name),
flags: sysFSALLEVENTS,
reply: make(chan error),
}
w.input <- in
if err := w.wakeupReader(); err != nil {
return err
}
return <-in.reply
}
// Remove stops watching the the named file or directory (non-recursively).
func (w *Watcher) Remove(name string) error {
in := &input{
op: opRemoveWatch,
path: filepath.Clean(name),
reply: make(chan error),
}
w.input <- in
if err := w.wakeupReader(); err != nil {
return err
}
return <-in.reply
}
const (
// Options for AddWatch
sysFSONESHOT = 0x80000000
sysFSONLYDIR = 0x1000000
// Events
sysFSACCESS = 0x1
sysFSALLEVENTS = 0xfff
sysFSATTRIB = 0x4
sysFSCLOSE = 0x18
sysFSCREATE = 0x100
sysFSDELETE = 0x200
sysFSDELETESELF = 0x400
sysFSMODIFY = 0x2
sysFSMOVE = 0xc0
sysFSMOVEDFROM = 0x40
sysFSMOVEDTO = 0x80
sysFSMOVESELF = 0x800
// Special events
sysFSIGNORED = 0x8000
sysFSQOVERFLOW = 0x4000
)
func newEvent(name string, mask uint32) Event {
e := Event{Name: name}
if mask&sysFSCREATE == sysFSCREATE || mask&sysFSMOVEDTO == sysFSMOVEDTO {
e.Op |= Create
}
if mask&sysFSDELETE == sysFSDELETE || mask&sysFSDELETESELF == sysFSDELETESELF {
e.Op |= Remove
}
if mask&sysFSMODIFY == sysFSMODIFY {
e.Op |= Write
}
if mask&sysFSMOVE == sysFSMOVE || mask&sysFSMOVESELF == sysFSMOVESELF || mask&sysFSMOVEDFROM == sysFSMOVEDFROM {
e.Op |= Rename
}
if mask&sysFSATTRIB == sysFSATTRIB {
e.Op |= Chmod
}
return e
}
const (
opAddWatch = iota
opRemoveWatch
)
const (
provisional uint64 = 1 << (32 + iota)
)
type input struct {
op int
path string
flags uint32
reply chan error
}
type inode struct {
handle syscall.Handle
volume uint32
index uint64
}
type watch struct {
ov syscall.Overlapped
ino *inode // i-number
path string // Directory path
mask uint64 // Directory itself is being watched with these notify flags
names map[string]uint64 // Map of names being watched and their notify flags
rename string // Remembers the old name while renaming a file
buf [4096]byte
}
type indexMap map[uint64]*watch
type watchMap map[uint32]indexMap
func (w *Watcher) wakeupReader() error {
e := syscall.PostQueuedCompletionStatus(w.port, 0, 0, nil)
if e != nil {
return os.NewSyscallError("PostQueuedCompletionStatus", e)
}
return nil
}
func getDir(pathname string) (dir string, err error) {
attr, e := syscall.GetFileAttributes(syscall.StringToUTF16Ptr(pathname))
if e != nil {
return "", os.NewSyscallError("GetFileAttributes", e)
}
if attr&syscall.FILE_ATTRIBUTE_DIRECTORY != 0 {
dir = pathname
} else {
dir, _ = filepath.Split(pathname)
dir = filepath.Clean(dir)
}
return
}
func getIno(path string) (ino *inode, err error) {
h, e := syscall.CreateFile(syscall.StringToUTF16Ptr(path),
syscall.FILE_LIST_DIRECTORY,
syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE|syscall.FILE_SHARE_DELETE,
nil, syscall.OPEN_EXISTING,
syscall.FILE_FLAG_BACKUP_SEMANTICS|syscall.FILE_FLAG_OVERLAPPED, 0)
if e != nil {
return nil, os.NewSyscallError("CreateFile", e)
}
var fi syscall.ByHandleFileInformation
if e = syscall.GetFileInformationByHandle(h, &fi); e != nil {
syscall.CloseHandle(h)
return nil, os.NewSyscallError("GetFileInformationByHandle", e)
}
ino = &inode{
handle: h,
volume: fi.VolumeSerialNumber,
index: uint64(fi.FileIndexHigh)<<32 | uint64(fi.FileIndexLow),
}
return ino, nil
}
// Must run within the I/O thread.
func (m watchMap) get(ino *inode) *watch {
if i := m[ino.volume]; i != nil {
return i[ino.index]
}
return nil
}
// Must run within the I/O thread.
func (m watchMap) set(ino *inode, watch *watch) {
i := m[ino.volume]
if i == nil {
i = make(indexMap)
m[ino.volume] = i
}
i[ino.index] = watch
}
// Must run within the I/O thread.
func (w *Watcher) addWatch(pathname string, flags uint64) error {
dir, err := getDir(pathname)
if err != nil {
return err
}
if flags&sysFSONLYDIR != 0 && pathname != dir {
return nil
}
ino, err := getIno(dir)
if err != nil {
return err
}
w.mu.Lock()
watchEntry := w.watches.get(ino)
w.mu.Unlock()
if watchEntry == nil {
if _, e := syscall.CreateIoCompletionPort(ino.handle, w.port, 0, 0); e != nil {
syscall.CloseHandle(ino.handle)
return os.NewSyscallError("CreateIoCompletionPort", e)
}
watchEntry = &watch{
ino: ino,
path: dir,
names: make(map[string]uint64),
}
w.mu.Lock()
w.watches.set(ino, watchEntry)
w.mu.Unlock()
flags |= provisional
} else {
syscall.CloseHandle(ino.handle)
}
if pathname == dir {
watchEntry.mask |= flags
} else {
watchEntry.names[filepath.Base(pathname)] |= flags
}
if err = w.startRead(watchEntry); err != nil {
return err
}
if pathname == dir {
watchEntry.mask &= ^provisional
} else {
watchEntry.names[filepath.Base(pathname)] &= ^provisional
}
return nil
}
// Must run within the I/O thread.
func (w *Watcher) remWatch(pathname string) error {
dir, err := getDir(pathname)
if err != nil {
return err
}
ino, err := getIno(dir)
if err != nil {
return err
}
w.mu.Lock()
watch := w.watches.get(ino)
w.mu.Unlock()
if watch == nil {
return fmt.Errorf("can't remove non-existent watch for: %s", pathname)
}
if pathname == dir {
w.sendEvent(watch.path, watch.mask&sysFSIGNORED)
watch.mask = 0
} else {
name := filepath.Base(pathname)
w.sendEvent(filepath.Join(watch.path, name), watch.names[name]&sysFSIGNORED)
delete(watch.names, name)
}
return w.startRead(watch)
}
// Must run within the I/O thread.
func (w *Watcher) deleteWatch(watch *watch) {
for name, mask := range watch.names {
if mask&provisional == 0 {
w.sendEvent(filepath.Join(watch.path, name), mask&sysFSIGNORED)
}
delete(watch.names, name)
}
if watch.mask != 0 {
if watch.mask&provisional == 0 {
w.sendEvent(watch.path, watch.mask&sysFSIGNORED)
}
watch.mask = 0
}
}
// Must run within the I/O thread.
func (w *Watcher) startRead(watch *watch) error {
if e := syscall.CancelIo(watch.ino.handle); e != nil {
w.Errors <- os.NewSyscallError("CancelIo", e)
w.deleteWatch(watch)
}
mask := toWindowsFlags(watch.mask)
for _, m := range watch.names {
mask |= toWindowsFlags(m)
}
if mask == 0 {
if e := syscall.CloseHandle(watch.ino.handle); e != nil {
w.Errors <- os.NewSyscallError("CloseHandle", e)
}
w.mu.Lock()
delete(w.watches[watch.ino.volume], watch.ino.index)
w.mu.Unlock()
return nil
}
e := syscall.ReadDirectoryChanges(watch.ino.handle, &watch.buf[0],
uint32(unsafe.Sizeof(watch.buf)), false, mask, nil, &watch.ov, 0)
if e != nil {
err := os.NewSyscallError("ReadDirectoryChanges", e)
if e == syscall.ERROR_ACCESS_DENIED && watch.mask&provisional == 0 {
// Watched directory was probably removed
if w.sendEvent(watch.path, watch.mask&sysFSDELETESELF) {
if watch.mask&sysFSONESHOT != 0 {
watch.mask = 0
}
}
err = nil
}
w.deleteWatch(watch)
w.startRead(watch)
return err
}
return nil
}
// readEvents reads from the I/O completion port, converts the
// received events into Event objects and sends them via the Events channel.
// Entry point to the I/O thread.
func (w *Watcher) readEvents() {
var (
n, key uint32
ov *syscall.Overlapped
)
runtime.LockOSThread()
for {
e := syscall.GetQueuedCompletionStatus(w.port, &n, &key, &ov, syscall.INFINITE)
watch := (*watch)(unsafe.Pointer(ov))
if watch == nil {
select {
case ch := <-w.quit:
w.mu.Lock()
var indexes []indexMap
for _, index := range w.watches {
indexes = append(indexes, index)
}
w.mu.Unlock()
for _, index := range indexes {
for _, watch := range index {
w.deleteWatch(watch)
w.startRead(watch)
}
}
var err error
if e := syscall.CloseHandle(w.port); e != nil {
err = os.NewSyscallError("CloseHandle", e)
}
close(w.Events)
close(w.Errors)
ch <- err
return
case in := <-w.input:
switch in.op {
case opAddWatch:
in.reply <- w.addWatch(in.path, uint64(in.flags))
case opRemoveWatch:
in.reply <- w.remWatch(in.path)
}
default:
}
continue
}
switch e {
case syscall.ERROR_MORE_DATA:
if watch == nil {
w.Errors <- errors.New("ERROR_MORE_DATA has unexpectedly null lpOverlapped buffer")
} else {
// The i/o succeeded but the buffer is full.
// In theory we should be building up a full packet.
// In practice we can get away with just carrying on.
n = uint32(unsafe.Sizeof(watch.buf))
}
case syscall.ERROR_ACCESS_DENIED:
// Watched directory was probably removed
w.sendEvent(watch.path, watch.mask&sysFSDELETESELF)
w.deleteWatch(watch)
w.startRead(watch)
continue
case syscall.ERROR_OPERATION_ABORTED:
// CancelIo was called on this handle
continue
default:
w.Errors <- os.NewSyscallError("GetQueuedCompletionPort", e)
continue
case nil:
}
var offset uint32
for {
if n == 0 {
w.Events <- newEvent("", sysFSQOVERFLOW)
w.Errors <- errors.New("short read in readEvents()")
break
}
// Point "raw" to the event in the buffer
raw := (*syscall.FileNotifyInformation)(unsafe.Pointer(&watch.buf[offset]))
buf := (*[syscall.MAX_PATH]uint16)(unsafe.Pointer(&raw.FileName))
name := syscall.UTF16ToString(buf[:raw.FileNameLength/2])
fullname := filepath.Join(watch.path, name)
var mask uint64
switch raw.Action {
case syscall.FILE_ACTION_REMOVED:
mask = sysFSDELETESELF
case syscall.FILE_ACTION_MODIFIED:
mask = sysFSMODIFY
case syscall.FILE_ACTION_RENAMED_OLD_NAME:
watch.rename = name
case syscall.FILE_ACTION_RENAMED_NEW_NAME:
if watch.names[watch.rename] != 0 {
watch.names[name] |= watch.names[watch.rename]
delete(watch.names, watch.rename)
mask = sysFSMOVESELF
}
}
sendNameEvent := func() {
if w.sendEvent(fullname, watch.names[name]&mask) {
if watch.names[name]&sysFSONESHOT != 0 {
delete(watch.names, name)
}
}
}
if raw.Action != syscall.FILE_ACTION_RENAMED_NEW_NAME {
sendNameEvent()
}
if raw.Action == syscall.FILE_ACTION_REMOVED {
w.sendEvent(fullname, watch.names[name]&sysFSIGNORED)
delete(watch.names, name)
}
if w.sendEvent(fullname, watch.mask&toFSnotifyFlags(raw.Action)) {
if watch.mask&sysFSONESHOT != 0 {
watch.mask = 0
}
}
if raw.Action == syscall.FILE_ACTION_RENAMED_NEW_NAME {
fullname = filepath.Join(watch.path, watch.rename)
sendNameEvent()
}
// Move to the next event in the buffer
if raw.NextEntryOffset == 0 {
break
}
offset += raw.NextEntryOffset
// Error!
if offset >= n {
w.Errors <- errors.New("Windows system assumed buffer larger than it is, events have likely been missed.")
break
}
}
if err := w.startRead(watch); err != nil {
w.Errors <- err
}
}
}
func (w *Watcher) sendEvent(name string, mask uint64) bool {
if mask == 0 {
return false
}
event := newEvent(name, uint32(mask))
select {
case ch := <-w.quit:
w.quit <- ch
case w.Events <- event:
}
return true
}
func toWindowsFlags(mask uint64) uint32 {
var m uint32
if mask&sysFSACCESS != 0 {
m |= syscall.FILE_NOTIFY_CHANGE_LAST_ACCESS
}
if mask&sysFSMODIFY != 0 {
m |= syscall.FILE_NOTIFY_CHANGE_LAST_WRITE
}
if mask&sysFSATTRIB != 0 {
m |= syscall.FILE_NOTIFY_CHANGE_ATTRIBUTES
}
if mask&(sysFSMOVE|sysFSCREATE|sysFSDELETE) != 0 {
m |= syscall.FILE_NOTIFY_CHANGE_FILE_NAME | syscall.FILE_NOTIFY_CHANGE_DIR_NAME
}
return m
}
func toFSnotifyFlags(action uint32) uint64 {
switch action {
case syscall.FILE_ACTION_ADDED:
return sysFSCREATE
case syscall.FILE_ACTION_REMOVED:
return sysFSDELETE
case syscall.FILE_ACTION_MODIFIED:
return sysFSMODIFY
case syscall.FILE_ACTION_RENAMED_OLD_NAME:
return sysFSMOVEDFROM
case syscall.FILE_ACTION_RENAMED_NEW_NAME:
return sysFSMOVEDTO
}
return 0
}

View file

@ -1,16 +0,0 @@
language: go
sudo: false
matrix:
include:
- go: 1.8
- go: 1.9
- go: "1.10"
- go: 1.x
- go: tip
allow_failures:
- go: tip
script:
- go get -t -v ./...
- diff -u <(echo -n) <(gofmt -d -s .)
- go vet .
- go test -v -race ./...

View file

@ -1,29 +0,0 @@
# This is the official list of gorilla/feeds authors for copyright purposes.
# Please keep the list sorted.
Dmitry Chestnykh <dmitry@codingrobots.com>
Eddie Scholtz <eascholtz@gmail.com>
Gabriel Simmer <bladesimmer@gmail.com>
Google LLC (https://opensource.google.com/)
honky <honky@defendtheplanet.net>
James Gregory <james@jagregory.com>
Jason Hall <imjasonh@gmail.com>
Jason Moiron <jmoiron@jmoiron.net>
Kamil Kisiel <kamil@kamilkisiel.net>
Kevin Stock <kevinstock@tantalic.com>
Markus Zimmermann <markus.zimmermann@nethead.at>
Matt Silverlock <matt@eatsleeprepeat.net>
Matthew Dawson <matthew@mjdsystems.ca>
Milan Aleksic <milanaleksic@gmail.com>
Milan Aleksić <milanaleksic@gmail.com>
nlimpid <jshuangzl@gmail.com>
Paul Petring <paul@defendtheplanet.net>
Sean Enck <enckse@users.noreply.github.com>
Sue Spence <virtuallysue@gmail.com>
Supermighty <ukiah@faction.com>
Toru Fukui <fukuimone@gmail.com>
Vabd <vabd@anon.acme>
Volker <lists.volker@gmail.com>
ZhiFeng Hu <hufeng1987@gmail.com>
weberc2 <weberc2@gmail.com>

View file

@ -1,22 +0,0 @@
Copyright (c) 2013-2018 The Gorilla Feeds Authors. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

View file

@ -1,185 +0,0 @@
## gorilla/feeds
[![GoDoc](https://godoc.org/github.com/gorilla/feeds?status.svg)](https://godoc.org/github.com/gorilla/feeds)
[![Build Status](https://travis-ci.org/gorilla/feeds.svg?branch=master)](https://travis-ci.org/gorilla/feeds)
feeds is a web feed generator library for generating RSS, Atom and JSON feeds from Go
applications.
### Goals
* Provide a simple interface to create both Atom & RSS 2.0 feeds
* Full support for [Atom][atom], [RSS 2.0][rss], and [JSON Feed Version 1][jsonfeed] spec elements
* Ability to modify particulars for each spec
[atom]: https://tools.ietf.org/html/rfc4287
[rss]: http://www.rssboard.org/rss-specification
[jsonfeed]: https://jsonfeed.org/version/1
### Usage
```go
package main
import (
"fmt"
"log"
"time"
"github.com/gorilla/feeds"
)
func main() {
now := time.Now()
feed := &feeds.Feed{
Title: "jmoiron.net blog",
Link: &feeds.Link{Href: "http://jmoiron.net/blog"},
Description: "discussion about tech, footie, photos",
Author: &feeds.Author{Name: "Jason Moiron", Email: "jmoiron@jmoiron.net"},
Created: now,
}
feed.Items = []*feeds.Item{
&feeds.Item{
Title: "Limiting Concurrency in Go",
Link: &feeds.Link{Href: "http://jmoiron.net/blog/limiting-concurrency-in-go/"},
Description: "A discussion on controlled parallelism in golang",
Author: &feeds.Author{Name: "Jason Moiron", Email: "jmoiron@jmoiron.net"},
Created: now,
},
&feeds.Item{
Title: "Logic-less Template Redux",
Link: &feeds.Link{Href: "http://jmoiron.net/blog/logicless-template-redux/"},
Description: "More thoughts on logicless templates",
Created: now,
},
&feeds.Item{
Title: "Idiomatic Code Reuse in Go",
Link: &feeds.Link{Href: "http://jmoiron.net/blog/idiomatic-code-reuse-in-go/"},
Description: "How to use interfaces <em>effectively</em>",
Created: now,
},
}
atom, err := feed.ToAtom()
if err != nil {
log.Fatal(err)
}
rss, err := feed.ToRss()
if err != nil {
log.Fatal(err)
}
json, err := feed.ToJSON()
if err != nil {
log.Fatal(err)
}
fmt.Println(atom, "\n", rss, "\n", json)
}
```
Outputs:
```xml
<?xml version="1.0" encoding="UTF-8"?>
<feed xmlns="http://www.w3.org/2005/Atom">
<title>jmoiron.net blog</title>
<link href="http://jmoiron.net/blog"></link>
<id>http://jmoiron.net/blog</id>
<updated>2013-01-16T03:26:01-05:00</updated>
<summary>discussion about tech, footie, photos</summary>
<entry>
<title>Limiting Concurrency in Go</title>
<link href="http://jmoiron.net/blog/limiting-concurrency-in-go/"></link>
<updated>2013-01-16T03:26:01-05:00</updated>
<id>tag:jmoiron.net,2013-01-16:/blog/limiting-concurrency-in-go/</id>
<summary type="html">A discussion on controlled parallelism in golang</summary>
<author>
<name>Jason Moiron</name>
<email>jmoiron@jmoiron.net</email>
</author>
</entry>
<entry>
<title>Logic-less Template Redux</title>
<link href="http://jmoiron.net/blog/logicless-template-redux/"></link>
<updated>2013-01-16T03:26:01-05:00</updated>
<id>tag:jmoiron.net,2013-01-16:/blog/logicless-template-redux/</id>
<summary type="html">More thoughts on logicless templates</summary>
<author></author>
</entry>
<entry>
<title>Idiomatic Code Reuse in Go</title>
<link href="http://jmoiron.net/blog/idiomatic-code-reuse-in-go/"></link>
<updated>2013-01-16T03:26:01-05:00</updated>
<id>tag:jmoiron.net,2013-01-16:/blog/idiomatic-code-reuse-in-go/</id>
<summary type="html">How to use interfaces &lt;em&gt;effectively&lt;/em&gt;</summary>
<author></author>
</entry>
</feed>
<?xml version="1.0" encoding="UTF-8"?>
<rss version="2.0">
<channel>
<title>jmoiron.net blog</title>
<link>http://jmoiron.net/blog</link>
<description>discussion about tech, footie, photos</description>
<managingEditor>jmoiron@jmoiron.net (Jason Moiron)</managingEditor>
<pubDate>2013-01-16T03:22:24-05:00</pubDate>
<item>
<title>Limiting Concurrency in Go</title>
<link>http://jmoiron.net/blog/limiting-concurrency-in-go/</link>
<description>A discussion on controlled parallelism in golang</description>
<pubDate>2013-01-16T03:22:24-05:00</pubDate>
</item>
<item>
<title>Logic-less Template Redux</title>
<link>http://jmoiron.net/blog/logicless-template-redux/</link>
<description>More thoughts on logicless templates</description>
<pubDate>2013-01-16T03:22:24-05:00</pubDate>
</item>
<item>
<title>Idiomatic Code Reuse in Go</title>
<link>http://jmoiron.net/blog/idiomatic-code-reuse-in-go/</link>
<description>How to use interfaces &lt;em&gt;effectively&lt;/em&gt;</description>
<pubDate>2013-01-16T03:22:24-05:00</pubDate>
</item>
</channel>
</rss>
{
"version": "https://jsonfeed.org/version/1",
"title": "jmoiron.net blog",
"home_page_url": "http://jmoiron.net/blog",
"description": "discussion about tech, footie, photos",
"author": {
"name": "Jason Moiron"
},
"items": [
{
"id": "",
"url": "http://jmoiron.net/blog/limiting-concurrency-in-go/",
"title": "Limiting Concurrency in Go",
"summary": "A discussion on controlled parallelism in golang",
"date_published": "2013-01-16T03:22:24.530817846-05:00",
"author": {
"name": "Jason Moiron"
}
},
{
"id": "",
"url": "http://jmoiron.net/blog/logicless-template-redux/",
"title": "Logic-less Template Redux",
"summary": "More thoughts on logicless templates",
"date_published": "2013-01-16T03:22:24.530817846-05:00"
},
{
"id": "",
"url": "http://jmoiron.net/blog/idiomatic-code-reuse-in-go/",
"title": "Idiomatic Code Reuse in Go",
"summary": "How to use interfaces \u003cem\u003eeffectively\u003c/em\u003e",
"date_published": "2013-01-16T03:22:24.530817846-05:00"
}
]
}
```

View file

@ -1,169 +0,0 @@
package feeds
import (
"encoding/xml"
"fmt"
"net/url"
"time"
)
// Generates Atom feed as XML
const ns = "http://www.w3.org/2005/Atom"
type AtomPerson struct {
Name string `xml:"name,omitempty"`
Uri string `xml:"uri,omitempty"`
Email string `xml:"email,omitempty"`
}
type AtomSummary struct {
XMLName xml.Name `xml:"summary"`
Content string `xml:",chardata"`
Type string `xml:"type,attr"`
}
type AtomContent struct {
XMLName xml.Name `xml:"content"`
Content string `xml:",chardata"`
Type string `xml:"type,attr"`
}
type AtomAuthor struct {
XMLName xml.Name `xml:"author"`
AtomPerson
}
type AtomContributor struct {
XMLName xml.Name `xml:"contributor"`
AtomPerson
}
type AtomEntry struct {
XMLName xml.Name `xml:"entry"`
Xmlns string `xml:"xmlns,attr,omitempty"`
Title string `xml:"title"` // required
Updated string `xml:"updated"` // required
Id string `xml:"id"` // required
Category string `xml:"category,omitempty"`
Content *AtomContent
Rights string `xml:"rights,omitempty"`
Source string `xml:"source,omitempty"`
Published string `xml:"published,omitempty"`
Contributor *AtomContributor
Links []AtomLink // required if no child 'content' elements
Summary *AtomSummary // required if content has src or content is base64
Author *AtomAuthor // required if feed lacks an author
}
// Multiple links with different rel can coexist
type AtomLink struct {
//Atom 1.0 <link rel="enclosure" type="audio/mpeg" title="MP3" href="http://www.example.org/myaudiofile.mp3" length="1234" />
XMLName xml.Name `xml:"link"`
Href string `xml:"href,attr"`
Rel string `xml:"rel,attr,omitempty"`
Type string `xml:"type,attr,omitempty"`
Length string `xml:"length,attr,omitempty"`
}
type AtomFeed struct {
XMLName xml.Name `xml:"feed"`
Xmlns string `xml:"xmlns,attr"`
Title string `xml:"title"` // required
Id string `xml:"id"` // required
Updated string `xml:"updated"` // required
Category string `xml:"category,omitempty"`
Icon string `xml:"icon,omitempty"`
Logo string `xml:"logo,omitempty"`
Rights string `xml:"rights,omitempty"` // copyright used
Subtitle string `xml:"subtitle,omitempty"`
Link *AtomLink
Author *AtomAuthor `xml:"author,omitempty"`
Contributor *AtomContributor
Entries []*AtomEntry `xml:"entry"`
}
type Atom struct {
*Feed
}
func newAtomEntry(i *Item) *AtomEntry {
id := i.Id
// assume the description is html
s := &AtomSummary{Content: i.Description, Type: "html"}
if len(id) == 0 {
// if there's no id set, try to create one, either from data or just a uuid
if len(i.Link.Href) > 0 && (!i.Created.IsZero() || !i.Updated.IsZero()) {
dateStr := anyTimeFormat("2006-01-02", i.Updated, i.Created)
host, path := i.Link.Href, "/invalid.html"
if url, err := url.Parse(i.Link.Href); err == nil {
host, path = url.Host, url.Path
}
id = fmt.Sprintf("tag:%s,%s:%s", host, dateStr, path)
} else {
id = "urn:uuid:" + NewUUID().String()
}
}
var name, email string
if i.Author != nil {
name, email = i.Author.Name, i.Author.Email
}
link_rel := i.Link.Rel
if link_rel == "" {
link_rel = "alternate"
}
x := &AtomEntry{
Title: i.Title,
Links: []AtomLink{{Href: i.Link.Href, Rel: link_rel, Type: i.Link.Type}},
Id: id,
Updated: anyTimeFormat(time.RFC3339, i.Updated, i.Created),
Summary: s,
}
// if there's a content, assume it's html
if len(i.Content) > 0 {
x.Content = &AtomContent{Content: i.Content, Type: "html"}
}
if i.Enclosure != nil && link_rel != "enclosure" {
x.Links = append(x.Links, AtomLink{Href: i.Enclosure.Url, Rel: "enclosure", Type: i.Enclosure.Type, Length: i.Enclosure.Length})
}
if len(name) > 0 || len(email) > 0 {
x.Author = &AtomAuthor{AtomPerson: AtomPerson{Name: name, Email: email}}
}
return x
}
// create a new AtomFeed with a generic Feed struct's data
func (a *Atom) AtomFeed() *AtomFeed {
updated := anyTimeFormat(time.RFC3339, a.Updated, a.Created)
feed := &AtomFeed{
Xmlns: ns,
Title: a.Title,
Link: &AtomLink{Href: a.Link.Href, Rel: a.Link.Rel},
Subtitle: a.Description,
Id: a.Link.Href,
Updated: updated,
Rights: a.Copyright,
}
if a.Author != nil {
feed.Author = &AtomAuthor{AtomPerson: AtomPerson{Name: a.Author.Name, Email: a.Author.Email}}
}
for _, e := range a.Items {
feed.Entries = append(feed.Entries, newAtomEntry(e))
}
return feed
}
// FeedXml returns an XML-Ready object for an Atom object
func (a *Atom) FeedXml() interface{} {
return a.AtomFeed()
}
// FeedXml returns an XML-ready object for an AtomFeed object
func (a *AtomFeed) FeedXml() interface{} {
return a
}

View file

@ -1,73 +0,0 @@
/*
Syndication (feed) generator library for golang.
Installing
go get github.com/gorilla/feeds
Feeds provides a simple, generic Feed interface with a generic Item object as well as RSS, Atom and JSON Feed specific RssFeed, AtomFeed and JSONFeed objects which allow access to all of each spec's defined elements.
Examples
Create a Feed and some Items in that feed using the generic interfaces:
import (
"time"
. "github.com/gorilla/feeds"
)
now = time.Now()
feed := &Feed{
Title: "jmoiron.net blog",
Link: &Link{Href: "http://jmoiron.net/blog"},
Description: "discussion about tech, footie, photos",
Author: &Author{Name: "Jason Moiron", Email: "jmoiron@jmoiron.net"},
Created: now,
Copyright: "This work is copyright © Benjamin Button",
}
feed.Items = []*Item{
&Item{
Title: "Limiting Concurrency in Go",
Link: &Link{Href: "http://jmoiron.net/blog/limiting-concurrency-in-go/"},
Description: "A discussion on controlled parallelism in golang",
Author: &Author{Name: "Jason Moiron", Email: "jmoiron@jmoiron.net"},
Created: now,
},
&Item{
Title: "Logic-less Template Redux",
Link: &Link{Href: "http://jmoiron.net/blog/logicless-template-redux/"},
Description: "More thoughts on logicless templates",
Created: now,
},
&Item{
Title: "Idiomatic Code Reuse in Go",
Link: &Link{Href: "http://jmoiron.net/blog/idiomatic-code-reuse-in-go/"},
Description: "How to use interfaces <em>effectively</em>",
Created: now,
},
}
From here, you can output Atom, RSS, or JSON Feed versions of this feed easily
atom, err := feed.ToAtom()
rss, err := feed.ToRss()
json, err := feed.ToJSON()
You can also get access to the underlying objects that feeds uses to export its XML
atomFeed := (&Atom{Feed: feed}).AtomFeed()
rssFeed := (&Rss{Feed: feed}).RssFeed()
jsonFeed := (&JSON{Feed: feed}).JSONFeed()
From here, you can modify or add each syndication's specific fields before outputting
atomFeed.Subtitle = "plays the blues"
atom, err := ToXML(atomFeed)
rssFeed.Generator = "gorilla/feeds v1.0 (github.com/gorilla/feeds)"
rss, err := ToXML(rssFeed)
jsonFeed.NextUrl = "https://www.example.com/feed.json?page=2"
json, err := jsonFeed.ToJSON()
*/
package feeds

View file

@ -1,145 +0,0 @@
package feeds
import (
"encoding/json"
"encoding/xml"
"io"
"sort"
"time"
)
type Link struct {
Href, Rel, Type, Length string
}
type Author struct {
Name, Email string
}
type Image struct {
Url, Title, Link string
Width, Height int
}
type Enclosure struct {
Url, Length, Type string
}
type Item struct {
Title string
Link *Link
Source *Link
Author *Author
Description string // used as description in rss, summary in atom
Id string // used as guid in rss, id in atom
Updated time.Time
Created time.Time
Enclosure *Enclosure
Content string
}
type Feed struct {
Title string
Link *Link
Description string
Author *Author
Updated time.Time
Created time.Time
Id string
Subtitle string
Items []*Item
Copyright string
Image *Image
}
// add a new Item to a Feed
func (f *Feed) Add(item *Item) {
f.Items = append(f.Items, item)
}
// returns the first non-zero time formatted as a string or ""
func anyTimeFormat(format string, times ...time.Time) string {
for _, t := range times {
if !t.IsZero() {
return t.Format(format)
}
}
return ""
}
// interface used by ToXML to get a object suitable for exporting XML.
type XmlFeed interface {
FeedXml() interface{}
}
// turn a feed object (either a Feed, AtomFeed, or RssFeed) into xml
// returns an error if xml marshaling fails
func ToXML(feed XmlFeed) (string, error) {
x := feed.FeedXml()
data, err := xml.MarshalIndent(x, "", " ")
if err != nil {
return "", err
}
// strip empty line from default xml header
s := xml.Header[:len(xml.Header)-1] + string(data)
return s, nil
}
// WriteXML writes a feed object (either a Feed, AtomFeed, or RssFeed) as XML into
// the writer. Returns an error if XML marshaling fails.
func WriteXML(feed XmlFeed, w io.Writer) error {
x := feed.FeedXml()
// write default xml header, without the newline
if _, err := w.Write([]byte(xml.Header[:len(xml.Header)-1])); err != nil {
return err
}
e := xml.NewEncoder(w)
e.Indent("", " ")
return e.Encode(x)
}
// creates an Atom representation of this feed
func (f *Feed) ToAtom() (string, error) {
a := &Atom{f}
return ToXML(a)
}
// WriteAtom writes an Atom representation of this feed to the writer.
func (f *Feed) WriteAtom(w io.Writer) error {
return WriteXML(&Atom{f}, w)
}
// creates an Rss representation of this feed
func (f *Feed) ToRss() (string, error) {
r := &Rss{f}
return ToXML(r)
}
// WriteRss writes an RSS representation of this feed to the writer.
func (f *Feed) WriteRss(w io.Writer) error {
return WriteXML(&Rss{f}, w)
}
// ToJSON creates a JSON Feed representation of this feed
func (f *Feed) ToJSON() (string, error) {
j := &JSON{f}
return j.ToJSON()
}
// WriteJSON writes an JSON representation of this feed to the writer.
func (f *Feed) WriteJSON(w io.Writer) error {
j := &JSON{f}
feed := j.JSONFeed()
e := json.NewEncoder(w)
e.SetIndent("", " ")
return e.Encode(feed)
}
// Sort sorts the Items in the feed with the given less function.
func (f *Feed) Sort(less func(a, b *Item) bool) {
lessFunc := func(i, j int) bool {
return less(f.Items[i], f.Items[j])
}
sort.SliceStable(f.Items, lessFunc)
}

View file

@ -1,183 +0,0 @@
package feeds
import (
"encoding/json"
"strings"
"time"
)
const jsonFeedVersion = "https://jsonfeed.org/version/1"
// JSONAuthor represents the author of the feed or of an individual item
// in the feed
type JSONAuthor struct {
Name string `json:"name,omitempty"`
Url string `json:"url,omitempty"`
Avatar string `json:"avatar,omitempty"`
}
// JSONAttachment represents a related resource. Podcasts, for instance, would
// include an attachment thats an audio or video file.
type JSONAttachment struct {
Url string `json:"url,omitempty"`
MIMEType string `json:"mime_type,omitempty"`
Title string `json:"title,omitempty"`
Size int32 `json:"size,omitempty"`
Duration time.Duration `json:"duration_in_seconds,omitempty"`
}
// MarshalJSON implements the json.Marshaler interface.
// The Duration field is marshaled in seconds, all other fields are marshaled
// based upon the definitions in struct tags.
func (a *JSONAttachment) MarshalJSON() ([]byte, error) {
type EmbeddedJSONAttachment JSONAttachment
return json.Marshal(&struct {
Duration float64 `json:"duration_in_seconds,omitempty"`
*EmbeddedJSONAttachment
}{
EmbeddedJSONAttachment: (*EmbeddedJSONAttachment)(a),
Duration: a.Duration.Seconds(),
})
}
// UnmarshalJSON implements the json.Unmarshaler interface.
// The Duration field is expected to be in seconds, all other field types
// match the struct definition.
func (a *JSONAttachment) UnmarshalJSON(data []byte) error {
type EmbeddedJSONAttachment JSONAttachment
var raw struct {
Duration float64 `json:"duration_in_seconds,omitempty"`
*EmbeddedJSONAttachment
}
raw.EmbeddedJSONAttachment = (*EmbeddedJSONAttachment)(a)
err := json.Unmarshal(data, &raw)
if err != nil {
return err
}
if raw.Duration > 0 {
nsec := int64(raw.Duration * float64(time.Second))
raw.EmbeddedJSONAttachment.Duration = time.Duration(nsec)
}
return nil
}
// JSONItem represents a single entry/post for the feed.
type JSONItem struct {
Id string `json:"id"`
Url string `json:"url,omitempty"`
ExternalUrl string `json:"external_url,omitempty"`
Title string `json:"title,omitempty"`
ContentHTML string `json:"content_html,omitempty"`
ContentText string `json:"content_text,omitempty"`
Summary string `json:"summary,omitempty"`
Image string `json:"image,omitempty"`
BannerImage string `json:"banner_,omitempty"`
PublishedDate *time.Time `json:"date_published,omitempty"`
ModifiedDate *time.Time `json:"date_modified,omitempty"`
Author *JSONAuthor `json:"author,omitempty"`
Tags []string `json:"tags,omitempty"`
Attachments []JSONAttachment `json:"attachments,omitempty"`
}
// JSONHub describes an endpoint that can be used to subscribe to real-time
// notifications from the publisher of this feed.
type JSONHub struct {
Type string `json:"type"`
Url string `json:"url"`
}
// JSONFeed represents a syndication feed in the JSON Feed Version 1 format.
// Matching the specification found here: https://jsonfeed.org/version/1.
type JSONFeed struct {
Version string `json:"version"`
Title string `json:"title"`
HomePageUrl string `json:"home_page_url,omitempty"`
FeedUrl string `json:"feed_url,omitempty"`
Description string `json:"description,omitempty"`
UserComment string `json:"user_comment,omitempty"`
NextUrl string `json:"next_url,omitempty"`
Icon string `json:"icon,omitempty"`
Favicon string `json:"favicon,omitempty"`
Author *JSONAuthor `json:"author,omitempty"`
Expired *bool `json:"expired,omitempty"`
Hubs []*JSONItem `json:"hubs,omitempty"`
Items []*JSONItem `json:"items,omitempty"`
}
// JSON is used to convert a generic Feed to a JSONFeed.
type JSON struct {
*Feed
}
// ToJSON encodes f into a JSON string. Returns an error if marshalling fails.
func (f *JSON) ToJSON() (string, error) {
return f.JSONFeed().ToJSON()
}
// ToJSON encodes f into a JSON string. Returns an error if marshalling fails.
func (f *JSONFeed) ToJSON() (string, error) {
data, err := json.MarshalIndent(f, "", " ")
if err != nil {
return "", err
}
return string(data), nil
}
// JSONFeed creates a new JSONFeed with a generic Feed struct's data.
func (f *JSON) JSONFeed() *JSONFeed {
feed := &JSONFeed{
Version: jsonFeedVersion,
Title: f.Title,
Description: f.Description,
}
if f.Link != nil {
feed.HomePageUrl = f.Link.Href
}
if f.Author != nil {
feed.Author = &JSONAuthor{
Name: f.Author.Name,
}
}
for _, e := range f.Items {
feed.Items = append(feed.Items, newJSONItem(e))
}
return feed
}
func newJSONItem(i *Item) *JSONItem {
item := &JSONItem{
Id: i.Id,
Title: i.Title,
Summary: i.Description,
ContentHTML: i.Content,
}
if i.Link != nil {
item.Url = i.Link.Href
}
if i.Source != nil {
item.ExternalUrl = i.Source.Href
}
if i.Author != nil {
item.Author = &JSONAuthor{
Name: i.Author.Name,
}
}
if !i.Created.IsZero() {
item.PublishedDate = &i.Created
}
if !i.Updated.IsZero() {
item.ModifiedDate = &i.Updated
}
if i.Enclosure != nil && strings.HasPrefix(i.Enclosure.Type, "image/") {
item.Image = i.Enclosure.Url
}
return item
}

View file

@ -1,168 +0,0 @@
package feeds
// rss support
// validation done according to spec here:
// http://cyber.law.harvard.edu/rss/rss.html
import (
"encoding/xml"
"fmt"
"time"
)
// private wrapper around the RssFeed which gives us the <rss>..</rss> xml
type RssFeedXml struct {
XMLName xml.Name `xml:"rss"`
Version string `xml:"version,attr"`
ContentNamespace string `xml:"xmlns:content,attr"`
Channel *RssFeed
}
type RssContent struct {
XMLName xml.Name `xml:"content:encoded"`
Content string `xml:",cdata"`
}
type RssImage struct {
XMLName xml.Name `xml:"image"`
Url string `xml:"url"`
Title string `xml:"title"`
Link string `xml:"link"`
Width int `xml:"width,omitempty"`
Height int `xml:"height,omitempty"`
}
type RssTextInput struct {
XMLName xml.Name `xml:"textInput"`
Title string `xml:"title"`
Description string `xml:"description"`
Name string `xml:"name"`
Link string `xml:"link"`
}
type RssFeed struct {
XMLName xml.Name `xml:"channel"`
Title string `xml:"title"` // required
Link string `xml:"link"` // required
Description string `xml:"description"` // required
Language string `xml:"language,omitempty"`
Copyright string `xml:"copyright,omitempty"`
ManagingEditor string `xml:"managingEditor,omitempty"` // Author used
WebMaster string `xml:"webMaster,omitempty"`
PubDate string `xml:"pubDate,omitempty"` // created or updated
LastBuildDate string `xml:"lastBuildDate,omitempty"` // updated used
Category string `xml:"category,omitempty"`
Generator string `xml:"generator,omitempty"`
Docs string `xml:"docs,omitempty"`
Cloud string `xml:"cloud,omitempty"`
Ttl int `xml:"ttl,omitempty"`
Rating string `xml:"rating,omitempty"`
SkipHours string `xml:"skipHours,omitempty"`
SkipDays string `xml:"skipDays,omitempty"`
Image *RssImage
TextInput *RssTextInput
Items []*RssItem `xml:"item"`
}
type RssItem struct {
XMLName xml.Name `xml:"item"`
Title string `xml:"title"` // required
Link string `xml:"link"` // required
Description string `xml:"description"` // required
Content *RssContent
Author string `xml:"author,omitempty"`
Category string `xml:"category,omitempty"`
Comments string `xml:"comments,omitempty"`
Enclosure *RssEnclosure
Guid string `xml:"guid,omitempty"` // Id used
PubDate string `xml:"pubDate,omitempty"` // created or updated
Source string `xml:"source,omitempty"`
}
type RssEnclosure struct {
//RSS 2.0 <enclosure url="http://example.com/file.mp3" length="123456789" type="audio/mpeg" />
XMLName xml.Name `xml:"enclosure"`
Url string `xml:"url,attr"`
Length string `xml:"length,attr"`
Type string `xml:"type,attr"`
}
type Rss struct {
*Feed
}
// create a new RssItem with a generic Item struct's data
func newRssItem(i *Item) *RssItem {
item := &RssItem{
Title: i.Title,
Link: i.Link.Href,
Description: i.Description,
Guid: i.Id,
PubDate: anyTimeFormat(time.RFC1123Z, i.Created, i.Updated),
}
if len(i.Content) > 0 {
item.Content = &RssContent{Content: i.Content}
}
if i.Source != nil {
item.Source = i.Source.Href
}
// Define a closure
if i.Enclosure != nil && i.Enclosure.Type != "" && i.Enclosure.Length != "" {
item.Enclosure = &RssEnclosure{Url: i.Enclosure.Url, Type: i.Enclosure.Type, Length: i.Enclosure.Length}
}
if i.Author != nil {
item.Author = i.Author.Name
}
return item
}
// create a new RssFeed with a generic Feed struct's data
func (r *Rss) RssFeed() *RssFeed {
pub := anyTimeFormat(time.RFC1123Z, r.Created, r.Updated)
build := anyTimeFormat(time.RFC1123Z, r.Updated)
author := ""
if r.Author != nil {
author = r.Author.Email
if len(r.Author.Name) > 0 {
author = fmt.Sprintf("%s (%s)", r.Author.Email, r.Author.Name)
}
}
var image *RssImage
if r.Image != nil {
image = &RssImage{Url: r.Image.Url, Title: r.Image.Title, Link: r.Image.Link, Width: r.Image.Width, Height: r.Image.Height}
}
channel := &RssFeed{
Title: r.Title,
Link: r.Link.Href,
Description: r.Description,
ManagingEditor: author,
PubDate: pub,
LastBuildDate: build,
Copyright: r.Copyright,
Image: image,
}
for _, i := range r.Items {
channel.Items = append(channel.Items, newRssItem(i))
}
return channel
}
// FeedXml returns an XML-Ready object for an Rss object
func (r *Rss) FeedXml() interface{} {
// only generate version 2.0 feeds for now
return r.RssFeed().FeedXml()
}
// FeedXml returns an XML-ready object for an RssFeed object
func (r *RssFeed) FeedXml() interface{} {
return &RssFeedXml{
Version: "2.0",
Channel: r,
ContentNamespace: "http://purl.org/rss/1.0/modules/content/",
}
}

View file

@ -1,92 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<feed xmlns:atom="http://www.w3.org/2005/Atom">
<title><![CDATA[Lorem ipsum feed for an interval of 1 minutes]]></title>
<description><![CDATA[This is a constantly updating lorem ipsum feed]]></description>
<link>http://example.com/</link>
<generator>RSS for Node</generator>
<lastBuildDate>Tue, 30 Oct 2018 23:22:37 GMT</lastBuildDate>
<author><![CDATA[John Smith]]></author>
<pubDate>Tue, 30 Oct 2018 23:22:00 GMT</pubDate>
<copyright><![CDATA[Michael Bertolacci, licensed under a Creative Commons Attribution 3.0 Unported License.]]></copyright>
<ttl>60</ttl>
<entry>
<title><![CDATA[Lorem ipsum 2018-10-30T23:22:00+00:00]]></title>
<description><![CDATA[Exercitation ut Lorem sint proident.]]></description>
<link>http://example.com/test/1540941720</link>
<guid isPermaLink="true">http://example.com/test/1540941720</guid>
<dc:creator><![CDATA[John Smith]]></dc:creator>
<pubDate>Tue, 30 Oct 2018 23:22:00 GMT</pubDate>
</entry>
<entry>
<title><![CDATA[Lorem ipsum 2018-10-30T23:21:00+00:00]]></title>
<description><![CDATA[Ea est do quis fugiat exercitation.]]></description>
<link>http://example.com/test/1540941660</link>
<guid isPermaLink="true">http://example.com/test/1540941660</guid>
<dc:creator><![CDATA[John Smith]]></dc:creator>
<pubDate>Tue, 30 Oct 2018 23:21:00 GMT</pubDate>
</entry>
<entry>
<title><![CDATA[Lorem ipsum 2018-10-30T23:20:00+00:00]]></title>
<description><![CDATA[Ipsum velit cillum ad laborum sit nulla exercitation consequat sint veniam culpa veniam voluptate incididunt.]]></description>
<link>http://example.com/test/1540941600</link>
<guid isPermaLink="true">http://example.com/test/1540941600</guid>
<dc:creator><![CDATA[John Smith]]></dc:creator>
<pubDate>Tue, 30 Oct 2018 23:20:00 GMT</pubDate>
</entry>
<entry>
<title><![CDATA[Lorem ipsum 2018-10-30T23:19:00+00:00]]></title>
<description><![CDATA[Ullamco pariatur aliqua consequat ea veniam id qui incididunt laborum.]]></description>
<link>http://example.com/test/1540941540</link>
<guid isPermaLink="true">http://example.com/test/1540941540</guid>
<dc:creator><![CDATA[John Smith]]></dc:creator>
<pubDate>Tue, 30 Oct 2018 23:19:00 GMT</pubDate>
</entry>
<entry>
<title><![CDATA[Lorem ipsum 2018-10-30T23:18:00+00:00]]></title>
<description><![CDATA[Velit proident aliquip aliquip anim mollit voluptate laboris voluptate et occaecat occaecat laboris ea nulla.]]></description>
<link>http://example.com/test/1540941480</link>
<guid isPermaLink="true">http://example.com/test/1540941480</guid>
<dc:creator><![CDATA[John Smith]]></dc:creator>
<pubDate>Tue, 30 Oct 2018 23:18:00 GMT</pubDate>
</entry>
<entry>
<title><![CDATA[Lorem ipsum 2018-10-30T23:17:00+00:00]]></title>
<description><![CDATA[Do in quis mollit consequat id in minim laborum sint exercitation laborum elit officia.]]></description>
<link>http://example.com/test/1540941420</link>
<guid isPermaLink="true">http://example.com/test/1540941420</guid>
<dc:creator><![CDATA[John Smith]]></dc:creator>
<pubDate>Tue, 30 Oct 2018 23:17:00 GMT</pubDate>
</entry>
<entry>
<title><![CDATA[Lorem ipsum 2018-10-30T23:16:00+00:00]]></title>
<description><![CDATA[Irure id sint ullamco Lorem magna consectetur officia adipisicing duis incididunt.]]></description>
<link>http://example.com/test/1540941360</link>
<guid isPermaLink="true">http://example.com/test/1540941360</guid>
<dc:creator><![CDATA[John Smith]]></dc:creator>
<pubDate>Tue, 30 Oct 2018 23:16:00 GMT</pubDate>
</entry>
<entry>
<title><![CDATA[Lorem ipsum 2018-10-30T23:15:00+00:00]]></title>
<description><![CDATA[Sunt anim excepteur esse nisi commodo culpa laborum exercitation ad anim ex elit.]]></description>
<link>http://example.com/test/1540941300</link>
<guid isPermaLink="true">http://example.com/test/1540941300</guid>
<dc:creator><![CDATA[John Smith]]></dc:creator>
<pubDate>Tue, 30 Oct 2018 23:15:00 GMT</pubDate>
</entry>
<entry>
<title><![CDATA[Lorem ipsum 2018-10-30T23:14:00+00:00]]></title>
<description><![CDATA[Excepteur aliquip fugiat ex labore nisi.]]></description>
<link>http://example.com/test/1540941240</link>
<guid isPermaLink="true">http://example.com/test/1540941240</guid>
<dc:creator><![CDATA[John Smith]]></dc:creator>
<pubDate>Tue, 30 Oct 2018 23:14:00 GMT</pubDate>
</entry>
<entry>
<title><![CDATA[Lorem ipsum 2018-10-30T23:13:00+00:00]]></title>
<description><![CDATA[Id proident adipisicing proident pariatur aute pariatur pariatur dolor dolor in voluptate dolor.]]></description>
<link>http://example.com/test/1540941180</link>
<guid isPermaLink="true">http://example.com/test/1540941180</guid>
<dc:creator><![CDATA[John Smith]]></dc:creator>
<pubDate>Tue, 30 Oct 2018 23:13:00 GMT</pubDate>
</entry>
</feed>

View file

@ -1,96 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<rss xmlns:dc="http://purl.org/dc/elements/1.1/"
xmlns:content="http://purl.org/rss/1.0/modules/content/"
xmlns:atom="http://www.w3.org/2005/Atom" version="2.0">
<channel>
<title><![CDATA[Lorem ipsum feed for an interval of 1 minutes]]></title>
<description><![CDATA[This is a constantly updating lorem ipsum feed]]></description>
<link>http://example.com/</link>
<generator>RSS for Node</generator>
<lastBuildDate>Tue, 30 Oct 2018 23:22:37 GMT</lastBuildDate>
<author><![CDATA[John Smith]]></author>
<pubDate>Tue, 30 Oct 2018 23:22:00 GMT</pubDate>
<copyright><![CDATA[Michael Bertolacci, licensed under a Creative Commons Attribution 3.0 Unported License.]]></copyright>
<ttl>60</ttl>
<item>
<title><![CDATA[Lorem ipsum 2018-10-30T23:22:00+00:00]]></title>
<description><![CDATA[Exercitation ut Lorem sint proident.]]></description>
<link>http://example.com/test/1540941720</link>
<guid isPermaLink="true">http://example.com/test/1540941720</guid>
<dc:creator><![CDATA[John Smith]]></dc:creator>
<pubDate>Tue, 30 Oct 2018 23:22:00 GMT</pubDate>
</item>
<item>
<title><![CDATA[Lorem ipsum 2018-10-30T23:21:00+00:00]]></title>
<description><![CDATA[Ea est do quis fugiat exercitation.]]></description>
<link>http://example.com/test/1540941660</link>
<guid isPermaLink="true">http://example.com/test/1540941660</guid>
<dc:creator><![CDATA[John Smith]]></dc:creator>
<pubDate>Tue, 30 Oct 2018 23:21:00 GMT</pubDate>
</item>
<item>
<title><![CDATA[Lorem ipsum 2018-10-30T23:20:00+00:00]]></title>
<description><![CDATA[Ipsum velit cillum ad laborum sit nulla exercitation consequat sint veniam culpa veniam voluptate incididunt.]]></description>
<link>http://example.com/test/1540941600</link>
<guid isPermaLink="true">http://example.com/test/1540941600</guid>
<dc:creator><![CDATA[John Smith]]></dc:creator>
<pubDate>Tue, 30 Oct 2018 23:20:00 GMT</pubDate>
</item>
<item>
<title><![CDATA[Lorem ipsum 2018-10-30T23:19:00+00:00]]></title>
<description><![CDATA[Ullamco pariatur aliqua consequat ea veniam id qui incididunt laborum.]]></description>
<link>http://example.com/test/1540941540</link>
<guid isPermaLink="true">http://example.com/test/1540941540</guid>
<dc:creator><![CDATA[John Smith]]></dc:creator>
<pubDate>Tue, 30 Oct 2018 23:19:00 GMT</pubDate>
</item>
<item>
<title><![CDATA[Lorem ipsum 2018-10-30T23:18:00+00:00]]></title>
<description><![CDATA[Velit proident aliquip aliquip anim mollit voluptate laboris voluptate et occaecat occaecat laboris ea nulla.]]></description>
<link>http://example.com/test/1540941480</link>
<guid isPermaLink="true">http://example.com/test/1540941480</guid>
<dc:creator><![CDATA[John Smith]]></dc:creator>
<pubDate>Tue, 30 Oct 2018 23:18:00 GMT</pubDate>
</item>
<item>
<title><![CDATA[Lorem ipsum 2018-10-30T23:17:00+00:00]]></title>
<description><![CDATA[Do in quis mollit consequat id in minim laborum sint exercitation laborum elit officia.]]></description>
<link>http://example.com/test/1540941420</link>
<guid isPermaLink="true">http://example.com/test/1540941420</guid>
<dc:creator><![CDATA[John Smith]]></dc:creator>
<pubDate>Tue, 30 Oct 2018 23:17:00 GMT</pubDate>
</item>
<item>
<title><![CDATA[Lorem ipsum 2018-10-30T23:16:00+00:00]]></title>
<description><![CDATA[Irure id sint ullamco Lorem magna consectetur officia adipisicing duis incididunt.]]></description>
<link>http://example.com/test/1540941360</link>
<guid isPermaLink="true">http://example.com/test/1540941360</guid>
<dc:creator><![CDATA[John Smith]]></dc:creator>
<pubDate>Tue, 30 Oct 2018 23:16:00 GMT</pubDate>
</item>
<item>
<title><![CDATA[Lorem ipsum 2018-10-30T23:15:00+00:00]]></title>
<description><![CDATA[Sunt anim excepteur esse nisi commodo culpa laborum exercitation ad anim ex elit.]]></description>
<link>http://example.com/test/1540941300</link>
<guid isPermaLink="true">http://example.com/test/1540941300</guid>
<dc:creator><![CDATA[John Smith]]></dc:creator>
<pubDate>Tue, 30 Oct 2018 23:15:00 GMT</pubDate>
</item>
<item>
<title><![CDATA[Lorem ipsum 2018-10-30T23:14:00+00:00]]></title>
<description><![CDATA[Excepteur aliquip fugiat ex labore nisi.]]></description>
<link>http://example.com/test/1540941240</link>
<guid isPermaLink="true">http://example.com/test/1540941240</guid>
<dc:creator><![CDATA[John Smith]]></dc:creator>
<pubDate>Tue, 30 Oct 2018 23:14:00 GMT</pubDate>
</item>
<item>
<title><![CDATA[Lorem ipsum 2018-10-30T23:13:00+00:00]]></title>
<description><![CDATA[Id proident adipisicing proident pariatur aute pariatur pariatur dolor dolor in voluptate dolor.]]></description>
<link>http://example.com/test/1540941180</link>
<guid isPermaLink="true">http://example.com/test/1540941180</guid>
<dc:creator><![CDATA[John Smith]]></dc:creator>
<pubDate>Tue, 30 Oct 2018 23:13:00 GMT</pubDate>
</item>
</channel>
</rss>

View file

@ -1,20 +0,0 @@
[Full iTunes list](https://help.apple.com/itc/podcasts_connect/#/itcb54353390)
[Example of ideal iTunes RSS feed](https://help.apple.com/itc/podcasts_connect/#/itcbaf351599)
```
<itunes:author>
<itunes:block>
<itunes:catergory>
<itunes:image>
<itunes:duration>
<itunes:explicit>
<itunes:isClosedCaptioned>
<itunes:order>
<itunes:complete>
<itunes:new-feed-url>
<itunes:owner>
<itunes:subtitle>
<itunes:summary>
<language>
```

View file

@ -1,27 +0,0 @@
package feeds
// relevant bits from https://github.com/abneptis/GoUUID/blob/master/uuid.go
import (
"crypto/rand"
"fmt"
)
type UUID [16]byte
// create a new uuid v4
func NewUUID() *UUID {
u := &UUID{}
_, err := rand.Read(u[:16])
if err != nil {
panic(err)
}
u[8] = (u[8] | 0x80) & 0xBf
u[6] = (u[6] | 0x40) & 0x4f
return u
}
func (u *UUID) String() string {
return fmt.Sprintf("%x-%x-%x-%x-%x", u[:4], u[4:6], u[6:8], u[8:10], u[10:])
}

View file

@ -1,24 +0,0 @@
language: go
matrix:
include:
- go: 1.7.x
- go: 1.8.x
- go: 1.9.x
- go: 1.10.x
- go: 1.11.x
- go: 1.x
env: LATEST=true
- go: tip
allow_failures:
- go: tip
install:
- # Skip
script:
- go get -t -v ./...
- diff -u <(echo -n) <(gofmt -d .)
- if [[ "$LATEST" = true ]]; then go vet .; fi
- go test -v -race ./...

View file

@ -1,8 +0,0 @@
# This is the official list of gorilla/mux authors for copyright purposes.
#
# Please keep the list sorted.
Google LLC (https://opensource.google.com/)
Kamil Kisielk <kamil@kamilkisiel.net>
Matt Silverlock <matt@eatsleeprepeat.net>
Rodrigo Moraes (https://github.com/moraes)

View file

@ -1,11 +0,0 @@
**What version of Go are you running?** (Paste the output of `go version`)
**What version of gorilla/mux are you at?** (Paste the output of `git rev-parse HEAD` inside `$GOPATH/src/github.com/gorilla/mux`)
**Describe your problem** (and what you have tried so far)
**Paste a minimal, runnable, reproduction of your issue below** (use backticks to format it)

View file

@ -1,27 +0,0 @@
Copyright (c) 2012-2018 The Gorilla Authors. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

View file

@ -1,649 +0,0 @@
# gorilla/mux
[![GoDoc](https://godoc.org/github.com/gorilla/mux?status.svg)](https://godoc.org/github.com/gorilla/mux)
[![Build Status](https://travis-ci.org/gorilla/mux.svg?branch=master)](https://travis-ci.org/gorilla/mux)
[![Sourcegraph](https://sourcegraph.com/github.com/gorilla/mux/-/badge.svg)](https://sourcegraph.com/github.com/gorilla/mux?badge)
![Gorilla Logo](http://www.gorillatoolkit.org/static/images/gorilla-icon-64.png)
https://www.gorillatoolkit.org/pkg/mux
Package `gorilla/mux` implements a request router and dispatcher for matching incoming requests to
their respective handler.
The name mux stands for "HTTP request multiplexer". Like the standard `http.ServeMux`, `mux.Router` matches incoming requests against a list of registered routes and calls a handler for the route that matches the URL or other conditions. The main features are:
* It implements the `http.Handler` interface so it is compatible with the standard `http.ServeMux`.
* Requests can be matched based on URL host, path, path prefix, schemes, header and query values, HTTP methods or using custom matchers.
* URL hosts, paths and query values can have variables with an optional regular expression.
* Registered URLs can be built, or "reversed", which helps maintaining references to resources.
* Routes can be used as subrouters: nested routes are only tested if the parent route matches. This is useful to define groups of routes that share common conditions like a host, a path prefix or other repeated attributes. As a bonus, this optimizes request matching.
---
* [Install](#install)
* [Examples](#examples)
* [Matching Routes](#matching-routes)
* [Static Files](#static-files)
* [Registered URLs](#registered-urls)
* [Walking Routes](#walking-routes)
* [Graceful Shutdown](#graceful-shutdown)
* [Middleware](#middleware)
* [Testing Handlers](#testing-handlers)
* [Full Example](#full-example)
---
## Install
With a [correctly configured](https://golang.org/doc/install#testing) Go toolchain:
```sh
go get -u github.com/gorilla/mux
```
## Examples
Let's start registering a couple of URL paths and handlers:
```go
func main() {
r := mux.NewRouter()
r.HandleFunc("/", HomeHandler)
r.HandleFunc("/products", ProductsHandler)
r.HandleFunc("/articles", ArticlesHandler)
http.Handle("/", r)
}
```
Here we register three routes mapping URL paths to handlers. This is equivalent to how `http.HandleFunc()` works: if an incoming request URL matches one of the paths, the corresponding handler is called passing (`http.ResponseWriter`, `*http.Request`) as parameters.
Paths can have variables. They are defined using the format `{name}` or `{name:pattern}`. If a regular expression pattern is not defined, the matched variable will be anything until the next slash. For example:
```go
r := mux.NewRouter()
r.HandleFunc("/products/{key}", ProductHandler)
r.HandleFunc("/articles/{category}/", ArticlesCategoryHandler)
r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler)
```
The names are used to create a map of route variables which can be retrieved calling `mux.Vars()`:
```go
func ArticlesCategoryHandler(w http.ResponseWriter, r *http.Request) {
vars := mux.Vars(r)
w.WriteHeader(http.StatusOK)
fmt.Fprintf(w, "Category: %v\n", vars["category"])
}
```
And this is all you need to know about the basic usage. More advanced options are explained below.
### Matching Routes
Routes can also be restricted to a domain or subdomain. Just define a host pattern to be matched. They can also have variables:
```go
r := mux.NewRouter()
// Only matches if domain is "www.example.com".
r.Host("www.example.com")
// Matches a dynamic subdomain.
r.Host("{subdomain:[a-z]+}.example.com")
```
There are several other matchers that can be added. To match path prefixes:
```go
r.PathPrefix("/products/")
```
...or HTTP methods:
```go
r.Methods("GET", "POST")
```
...or URL schemes:
```go
r.Schemes("https")
```
...or header values:
```go
r.Headers("X-Requested-With", "XMLHttpRequest")
```
...or query values:
```go
r.Queries("key", "value")
```
...or to use a custom matcher function:
```go
r.MatcherFunc(func(r *http.Request, rm *RouteMatch) bool {
return r.ProtoMajor == 0
})
```
...and finally, it is possible to combine several matchers in a single route:
```go
r.HandleFunc("/products", ProductsHandler).
Host("www.example.com").
Methods("GET").
Schemes("http")
```
Routes are tested in the order they were added to the router. If two routes match, the first one wins:
```go
r := mux.NewRouter()
r.HandleFunc("/specific", specificHandler)
r.PathPrefix("/").Handler(catchAllHandler)
```
Setting the same matching conditions again and again can be boring, so we have a way to group several routes that share the same requirements. We call it "subrouting".
For example, let's say we have several URLs that should only match when the host is `www.example.com`. Create a route for that host and get a "subrouter" from it:
```go
r := mux.NewRouter()
s := r.Host("www.example.com").Subrouter()
```
Then register routes in the subrouter:
```go
s.HandleFunc("/products/", ProductsHandler)
s.HandleFunc("/products/{key}", ProductHandler)
s.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler)
```
The three URL paths we registered above will only be tested if the domain is `www.example.com`, because the subrouter is tested first. This is not only convenient, but also optimizes request matching. You can create subrouters combining any attribute matchers accepted by a route.
Subrouters can be used to create domain or path "namespaces": you define subrouters in a central place and then parts of the app can register its paths relatively to a given subrouter.
There's one more thing about subroutes. When a subrouter has a path prefix, the inner routes use it as base for their paths:
```go
r := mux.NewRouter()
s := r.PathPrefix("/products").Subrouter()
// "/products/"
s.HandleFunc("/", ProductsHandler)
// "/products/{key}/"
s.HandleFunc("/{key}/", ProductHandler)
// "/products/{key}/details"
s.HandleFunc("/{key}/details", ProductDetailsHandler)
```
### Static Files
Note that the path provided to `PathPrefix()` represents a "wildcard": calling
`PathPrefix("/static/").Handler(...)` means that the handler will be passed any
request that matches "/static/\*". This makes it easy to serve static files with mux:
```go
func main() {
var dir string
flag.StringVar(&dir, "dir", ".", "the directory to serve files from. Defaults to the current dir")
flag.Parse()
r := mux.NewRouter()
// This will serve files under http://localhost:8000/static/<filename>
r.PathPrefix("/static/").Handler(http.StripPrefix("/static/", http.FileServer(http.Dir(dir))))
srv := &http.Server{
Handler: r,
Addr: "127.0.0.1:8000",
// Good practice: enforce timeouts for servers you create!
WriteTimeout: 15 * time.Second,
ReadTimeout: 15 * time.Second,
}
log.Fatal(srv.ListenAndServe())
}
```
### Registered URLs
Now let's see how to build registered URLs.
Routes can be named. All routes that define a name can have their URLs built, or "reversed". We define a name calling `Name()` on a route. For example:
```go
r := mux.NewRouter()
r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler).
Name("article")
```
To build a URL, get the route and call the `URL()` method, passing a sequence of key/value pairs for the route variables. For the previous route, we would do:
```go
url, err := r.Get("article").URL("category", "technology", "id", "42")
```
...and the result will be a `url.URL` with the following path:
```
"/articles/technology/42"
```
This also works for host and query value variables:
```go
r := mux.NewRouter()
r.Host("{subdomain}.example.com").
Path("/articles/{category}/{id:[0-9]+}").
Queries("filter", "{filter}").
HandlerFunc(ArticleHandler).
Name("article")
// url.String() will be "http://news.example.com/articles/technology/42?filter=gorilla"
url, err := r.Get("article").URL("subdomain", "news",
"category", "technology",
"id", "42",
"filter", "gorilla")
```
All variables defined in the route are required, and their values must conform to the corresponding patterns. These requirements guarantee that a generated URL will always match a registered route -- the only exception is for explicitly defined "build-only" routes which never match.
Regex support also exists for matching Headers within a route. For example, we could do:
```go
r.HeadersRegexp("Content-Type", "application/(text|json)")
```
...and the route will match both requests with a Content-Type of `application/json` as well as `application/text`
There's also a way to build only the URL host or path for a route: use the methods `URLHost()` or `URLPath()` instead. For the previous route, we would do:
```go
// "http://news.example.com/"
host, err := r.Get("article").URLHost("subdomain", "news")
// "/articles/technology/42"
path, err := r.Get("article").URLPath("category", "technology", "id", "42")
```
And if you use subrouters, host and path defined separately can be built as well:
```go
r := mux.NewRouter()
s := r.Host("{subdomain}.example.com").Subrouter()
s.Path("/articles/{category}/{id:[0-9]+}").
HandlerFunc(ArticleHandler).
Name("article")
// "http://news.example.com/articles/technology/42"
url, err := r.Get("article").URL("subdomain", "news",
"category", "technology",
"id", "42")
```
### Walking Routes
The `Walk` function on `mux.Router` can be used to visit all of the routes that are registered on a router. For example,
the following prints all of the registered routes:
```go
package main
import (
"fmt"
"net/http"
"strings"
"github.com/gorilla/mux"
)
func handler(w http.ResponseWriter, r *http.Request) {
return
}
func main() {
r := mux.NewRouter()
r.HandleFunc("/", handler)
r.HandleFunc("/products", handler).Methods("POST")
r.HandleFunc("/articles", handler).Methods("GET")
r.HandleFunc("/articles/{id}", handler).Methods("GET", "PUT")
r.HandleFunc("/authors", handler).Queries("surname", "{surname}")
err := r.Walk(func(route *mux.Route, router *mux.Router, ancestors []*mux.Route) error {
pathTemplate, err := route.GetPathTemplate()
if err == nil {
fmt.Println("ROUTE:", pathTemplate)
}
pathRegexp, err := route.GetPathRegexp()
if err == nil {
fmt.Println("Path regexp:", pathRegexp)
}
queriesTemplates, err := route.GetQueriesTemplates()
if err == nil {
fmt.Println("Queries templates:", strings.Join(queriesTemplates, ","))
}
queriesRegexps, err := route.GetQueriesRegexp()
if err == nil {
fmt.Println("Queries regexps:", strings.Join(queriesRegexps, ","))
}
methods, err := route.GetMethods()
if err == nil {
fmt.Println("Methods:", strings.Join(methods, ","))
}
fmt.Println()
return nil
})
if err != nil {
fmt.Println(err)
}
http.Handle("/", r)
}
```
### Graceful Shutdown
Go 1.8 introduced the ability to [gracefully shutdown](https://golang.org/doc/go1.8#http_shutdown) a `*http.Server`. Here's how to do that alongside `mux`:
```go
package main
import (
"context"
"flag"
"log"
"net/http"
"os"
"os/signal"
"time"
"github.com/gorilla/mux"
)
func main() {
var wait time.Duration
flag.DurationVar(&wait, "graceful-timeout", time.Second * 15, "the duration for which the server gracefully wait for existing connections to finish - e.g. 15s or 1m")
flag.Parse()
r := mux.NewRouter()
// Add your routes as needed
srv := &http.Server{
Addr: "0.0.0.0:8080",
// Good practice to set timeouts to avoid Slowloris attacks.
WriteTimeout: time.Second * 15,
ReadTimeout: time.Second * 15,
IdleTimeout: time.Second * 60,
Handler: r, // Pass our instance of gorilla/mux in.
}
// Run our server in a goroutine so that it doesn't block.
go func() {
if err := srv.ListenAndServe(); err != nil {
log.Println(err)
}
}()
c := make(chan os.Signal, 1)
// We'll accept graceful shutdowns when quit via SIGINT (Ctrl+C)
// SIGKILL, SIGQUIT or SIGTERM (Ctrl+/) will not be caught.
signal.Notify(c, os.Interrupt)
// Block until we receive our signal.
<-c
// Create a deadline to wait for.
ctx, cancel := context.WithTimeout(context.Background(), wait)
defer cancel()
// Doesn't block if no connections, but will otherwise wait
// until the timeout deadline.
srv.Shutdown(ctx)
// Optionally, you could run srv.Shutdown in a goroutine and block on
// <-ctx.Done() if your application should wait for other services
// to finalize based on context cancellation.
log.Println("shutting down")
os.Exit(0)
}
```
### Middleware
Mux supports the addition of middlewares to a [Router](https://godoc.org/github.com/gorilla/mux#Router), which are executed in the order they are added if a match is found, including its subrouters.
Middlewares are (typically) small pieces of code which take one request, do something with it, and pass it down to another middleware or the final handler. Some common use cases for middleware are request logging, header manipulation, or `ResponseWriter` hijacking.
Mux middlewares are defined using the de facto standard type:
```go
type MiddlewareFunc func(http.Handler) http.Handler
```
Typically, the returned handler is a closure which does something with the http.ResponseWriter and http.Request passed to it, and then calls the handler passed as parameter to the MiddlewareFunc. This takes advantage of closures being able access variables from the context where they are created, while retaining the signature enforced by the receivers.
A very basic middleware which logs the URI of the request being handled could be written as:
```go
func loggingMiddleware(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
// Do stuff here
log.Println(r.RequestURI)
// Call the next handler, which can be another middleware in the chain, or the final handler.
next.ServeHTTP(w, r)
})
}
```
Middlewares can be added to a router using `Router.Use()`:
```go
r := mux.NewRouter()
r.HandleFunc("/", handler)
r.Use(loggingMiddleware)
```
A more complex authentication middleware, which maps session token to users, could be written as:
```go
// Define our struct
type authenticationMiddleware struct {
tokenUsers map[string]string
}
// Initialize it somewhere
func (amw *authenticationMiddleware) Populate() {
amw.tokenUsers["00000000"] = "user0"
amw.tokenUsers["aaaaaaaa"] = "userA"
amw.tokenUsers["05f717e5"] = "randomUser"
amw.tokenUsers["deadbeef"] = "user0"
}
// Middleware function, which will be called for each request
func (amw *authenticationMiddleware) Middleware(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
token := r.Header.Get("X-Session-Token")
if user, found := amw.tokenUsers[token]; found {
// We found the token in our map
log.Printf("Authenticated user %s\n", user)
// Pass down the request to the next middleware (or final handler)
next.ServeHTTP(w, r)
} else {
// Write an error and stop the handler chain
http.Error(w, "Forbidden", http.StatusForbidden)
}
})
}
```
```go
r := mux.NewRouter()
r.HandleFunc("/", handler)
amw := authenticationMiddleware{}
amw.Populate()
r.Use(amw.Middleware)
```
Note: The handler chain will be stopped if your middleware doesn't call `next.ServeHTTP()` with the corresponding parameters. This can be used to abort a request if the middleware writer wants to. Middlewares _should_ write to `ResponseWriter` if they _are_ going to terminate the request, and they _should not_ write to `ResponseWriter` if they _are not_ going to terminate it.
### Testing Handlers
Testing handlers in a Go web application is straightforward, and _mux_ doesn't complicate this any further. Given two files: `endpoints.go` and `endpoints_test.go`, here's how we'd test an application using _mux_.
First, our simple HTTP handler:
```go
// endpoints.go
package main
func HealthCheckHandler(w http.ResponseWriter, r *http.Request) {
// A very simple health check.
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusOK)
// In the future we could report back on the status of our DB, or our cache
// (e.g. Redis) by performing a simple PING, and include them in the response.
io.WriteString(w, `{"alive": true}`)
}
func main() {
r := mux.NewRouter()
r.HandleFunc("/health", HealthCheckHandler)
log.Fatal(http.ListenAndServe("localhost:8080", r))
}
```
Our test code:
```go
// endpoints_test.go
package main
import (
"net/http"
"net/http/httptest"
"testing"
)
func TestHealthCheckHandler(t *testing.T) {
// Create a request to pass to our handler. We don't have any query parameters for now, so we'll
// pass 'nil' as the third parameter.
req, err := http.NewRequest("GET", "/health", nil)
if err != nil {
t.Fatal(err)
}
// We create a ResponseRecorder (which satisfies http.ResponseWriter) to record the response.
rr := httptest.NewRecorder()
handler := http.HandlerFunc(HealthCheckHandler)
// Our handlers satisfy http.Handler, so we can call their ServeHTTP method
// directly and pass in our Request and ResponseRecorder.
handler.ServeHTTP(rr, req)
// Check the status code is what we expect.
if status := rr.Code; status != http.StatusOK {
t.Errorf("handler returned wrong status code: got %v want %v",
status, http.StatusOK)
}
// Check the response body is what we expect.
expected := `{"alive": true}`
if rr.Body.String() != expected {
t.Errorf("handler returned unexpected body: got %v want %v",
rr.Body.String(), expected)
}
}
```
In the case that our routes have [variables](#examples), we can pass those in the request. We could write
[table-driven tests](https://dave.cheney.net/2013/06/09/writing-table-driven-tests-in-go) to test multiple
possible route variables as needed.
```go
// endpoints.go
func main() {
r := mux.NewRouter()
// A route with a route variable:
r.HandleFunc("/metrics/{type}", MetricsHandler)
log.Fatal(http.ListenAndServe("localhost:8080", r))
}
```
Our test file, with a table-driven test of `routeVariables`:
```go
// endpoints_test.go
func TestMetricsHandler(t *testing.T) {
tt := []struct{
routeVariable string
shouldPass bool
}{
{"goroutines", true},
{"heap", true},
{"counters", true},
{"queries", true},
{"adhadaeqm3k", false},
}
for _, tc := range tt {
path := fmt.Sprintf("/metrics/%s", tc.routeVariable)
req, err := http.NewRequest("GET", path, nil)
if err != nil {
t.Fatal(err)
}
rr := httptest.NewRecorder()
// Need to create a router that we can pass the request through so that the vars will be added to the context
router := mux.NewRouter()
router.HandleFunc("/metrics/{type}", MetricsHandler)
router.ServeHTTP(rr, req)
// In this case, our MetricsHandler returns a non-200 response
// for a route variable it doesn't know about.
if rr.Code == http.StatusOK && !tc.shouldPass {
t.Errorf("handler should have failed on routeVariable %s: got %v want %v",
tc.routeVariable, rr.Code, http.StatusOK)
}
}
}
```
## Full Example
Here's a complete, runnable example of a small `mux` based server:
```go
package main
import (
"net/http"
"log"
"github.com/gorilla/mux"
)
func YourHandler(w http.ResponseWriter, r *http.Request) {
w.Write([]byte("Gorilla!\n"))
}
func main() {
r := mux.NewRouter()
// Routes consist of a path and a handler function.
r.HandleFunc("/", YourHandler)
// Bind to a port and pass our router in
log.Fatal(http.ListenAndServe(":8000", r))
}
```
## License
BSD licensed. See the LICENSE file for details.

View file

@ -1,18 +0,0 @@
package mux
import (
"context"
"net/http"
)
func contextGet(r *http.Request, key interface{}) interface{} {
return r.Context().Value(key)
}
func contextSet(r *http.Request, key, val interface{}) *http.Request {
if val == nil {
return r
}
return r.WithContext(context.WithValue(r.Context(), key, val))
}

306
vendor/github.com/gorilla/mux/doc.go generated vendored
View file

@ -1,306 +0,0 @@
// Copyright 2012 The Gorilla Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
/*
Package mux implements a request router and dispatcher.
The name mux stands for "HTTP request multiplexer". Like the standard
http.ServeMux, mux.Router matches incoming requests against a list of
registered routes and calls a handler for the route that matches the URL
or other conditions. The main features are:
* Requests can be matched based on URL host, path, path prefix, schemes,
header and query values, HTTP methods or using custom matchers.
* URL hosts, paths and query values can have variables with an optional
regular expression.
* Registered URLs can be built, or "reversed", which helps maintaining
references to resources.
* Routes can be used as subrouters: nested routes are only tested if the
parent route matches. This is useful to define groups of routes that
share common conditions like a host, a path prefix or other repeated
attributes. As a bonus, this optimizes request matching.
* It implements the http.Handler interface so it is compatible with the
standard http.ServeMux.
Let's start registering a couple of URL paths and handlers:
func main() {
r := mux.NewRouter()
r.HandleFunc("/", HomeHandler)
r.HandleFunc("/products", ProductsHandler)
r.HandleFunc("/articles", ArticlesHandler)
http.Handle("/", r)
}
Here we register three routes mapping URL paths to handlers. This is
equivalent to how http.HandleFunc() works: if an incoming request URL matches
one of the paths, the corresponding handler is called passing
(http.ResponseWriter, *http.Request) as parameters.
Paths can have variables. They are defined using the format {name} or
{name:pattern}. If a regular expression pattern is not defined, the matched
variable will be anything until the next slash. For example:
r := mux.NewRouter()
r.HandleFunc("/products/{key}", ProductHandler)
r.HandleFunc("/articles/{category}/", ArticlesCategoryHandler)
r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler)
Groups can be used inside patterns, as long as they are non-capturing (?:re). For example:
r.HandleFunc("/articles/{category}/{sort:(?:asc|desc|new)}", ArticlesCategoryHandler)
The names are used to create a map of route variables which can be retrieved
calling mux.Vars():
vars := mux.Vars(request)
category := vars["category"]
Note that if any capturing groups are present, mux will panic() during parsing. To prevent
this, convert any capturing groups to non-capturing, e.g. change "/{sort:(asc|desc)}" to
"/{sort:(?:asc|desc)}". This is a change from prior versions which behaved unpredictably
when capturing groups were present.
And this is all you need to know about the basic usage. More advanced options
are explained below.
Routes can also be restricted to a domain or subdomain. Just define a host
pattern to be matched. They can also have variables:
r := mux.NewRouter()
// Only matches if domain is "www.example.com".
r.Host("www.example.com")
// Matches a dynamic subdomain.
r.Host("{subdomain:[a-z]+}.domain.com")
There are several other matchers that can be added. To match path prefixes:
r.PathPrefix("/products/")
...or HTTP methods:
r.Methods("GET", "POST")
...or URL schemes:
r.Schemes("https")
...or header values:
r.Headers("X-Requested-With", "XMLHttpRequest")
...or query values:
r.Queries("key", "value")
...or to use a custom matcher function:
r.MatcherFunc(func(r *http.Request, rm *RouteMatch) bool {
return r.ProtoMajor == 0
})
...and finally, it is possible to combine several matchers in a single route:
r.HandleFunc("/products", ProductsHandler).
Host("www.example.com").
Methods("GET").
Schemes("http")
Setting the same matching conditions again and again can be boring, so we have
a way to group several routes that share the same requirements.
We call it "subrouting".
For example, let's say we have several URLs that should only match when the
host is "www.example.com". Create a route for that host and get a "subrouter"
from it:
r := mux.NewRouter()
s := r.Host("www.example.com").Subrouter()
Then register routes in the subrouter:
s.HandleFunc("/products/", ProductsHandler)
s.HandleFunc("/products/{key}", ProductHandler)
s.HandleFunc("/articles/{category}/{id:[0-9]+}"), ArticleHandler)
The three URL paths we registered above will only be tested if the domain is
"www.example.com", because the subrouter is tested first. This is not
only convenient, but also optimizes request matching. You can create
subrouters combining any attribute matchers accepted by a route.
Subrouters can be used to create domain or path "namespaces": you define
subrouters in a central place and then parts of the app can register its
paths relatively to a given subrouter.
There's one more thing about subroutes. When a subrouter has a path prefix,
the inner routes use it as base for their paths:
r := mux.NewRouter()
s := r.PathPrefix("/products").Subrouter()
// "/products/"
s.HandleFunc("/", ProductsHandler)
// "/products/{key}/"
s.HandleFunc("/{key}/", ProductHandler)
// "/products/{key}/details"
s.HandleFunc("/{key}/details", ProductDetailsHandler)
Note that the path provided to PathPrefix() represents a "wildcard": calling
PathPrefix("/static/").Handler(...) means that the handler will be passed any
request that matches "/static/*". This makes it easy to serve static files with mux:
func main() {
var dir string
flag.StringVar(&dir, "dir", ".", "the directory to serve files from. Defaults to the current dir")
flag.Parse()
r := mux.NewRouter()
// This will serve files under http://localhost:8000/static/<filename>
r.PathPrefix("/static/").Handler(http.StripPrefix("/static/", http.FileServer(http.Dir(dir))))
srv := &http.Server{
Handler: r,
Addr: "127.0.0.1:8000",
// Good practice: enforce timeouts for servers you create!
WriteTimeout: 15 * time.Second,
ReadTimeout: 15 * time.Second,
}
log.Fatal(srv.ListenAndServe())
}
Now let's see how to build registered URLs.
Routes can be named. All routes that define a name can have their URLs built,
or "reversed". We define a name calling Name() on a route. For example:
r := mux.NewRouter()
r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler).
Name("article")
To build a URL, get the route and call the URL() method, passing a sequence of
key/value pairs for the route variables. For the previous route, we would do:
url, err := r.Get("article").URL("category", "technology", "id", "42")
...and the result will be a url.URL with the following path:
"/articles/technology/42"
This also works for host and query value variables:
r := mux.NewRouter()
r.Host("{subdomain}.domain.com").
Path("/articles/{category}/{id:[0-9]+}").
Queries("filter", "{filter}").
HandlerFunc(ArticleHandler).
Name("article")
// url.String() will be "http://news.domain.com/articles/technology/42?filter=gorilla"
url, err := r.Get("article").URL("subdomain", "news",
"category", "technology",
"id", "42",
"filter", "gorilla")
All variables defined in the route are required, and their values must
conform to the corresponding patterns. These requirements guarantee that a
generated URL will always match a registered route -- the only exception is
for explicitly defined "build-only" routes which never match.
Regex support also exists for matching Headers within a route. For example, we could do:
r.HeadersRegexp("Content-Type", "application/(text|json)")
...and the route will match both requests with a Content-Type of `application/json` as well as
`application/text`
There's also a way to build only the URL host or path for a route:
use the methods URLHost() or URLPath() instead. For the previous route,
we would do:
// "http://news.domain.com/"
host, err := r.Get("article").URLHost("subdomain", "news")
// "/articles/technology/42"
path, err := r.Get("article").URLPath("category", "technology", "id", "42")
And if you use subrouters, host and path defined separately can be built
as well:
r := mux.NewRouter()
s := r.Host("{subdomain}.domain.com").Subrouter()
s.Path("/articles/{category}/{id:[0-9]+}").
HandlerFunc(ArticleHandler).
Name("article")
// "http://news.domain.com/articles/technology/42"
url, err := r.Get("article").URL("subdomain", "news",
"category", "technology",
"id", "42")
Mux supports the addition of middlewares to a Router, which are executed in the order they are added if a match is found, including its subrouters. Middlewares are (typically) small pieces of code which take one request, do something with it, and pass it down to another middleware or the final handler. Some common use cases for middleware are request logging, header manipulation, or ResponseWriter hijacking.
type MiddlewareFunc func(http.Handler) http.Handler
Typically, the returned handler is a closure which does something with the http.ResponseWriter and http.Request passed to it, and then calls the handler passed as parameter to the MiddlewareFunc (closures can access variables from the context where they are created).
A very basic middleware which logs the URI of the request being handled could be written as:
func simpleMw(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
// Do stuff here
log.Println(r.RequestURI)
// Call the next handler, which can be another middleware in the chain, or the final handler.
next.ServeHTTP(w, r)
})
}
Middlewares can be added to a router using `Router.Use()`:
r := mux.NewRouter()
r.HandleFunc("/", handler)
r.Use(simpleMw)
A more complex authentication middleware, which maps session token to users, could be written as:
// Define our struct
type authenticationMiddleware struct {
tokenUsers map[string]string
}
// Initialize it somewhere
func (amw *authenticationMiddleware) Populate() {
amw.tokenUsers["00000000"] = "user0"
amw.tokenUsers["aaaaaaaa"] = "userA"
amw.tokenUsers["05f717e5"] = "randomUser"
amw.tokenUsers["deadbeef"] = "user0"
}
// Middleware function, which will be called for each request
func (amw *authenticationMiddleware) Middleware(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
token := r.Header.Get("X-Session-Token")
if user, found := amw.tokenUsers[token]; found {
// We found the token in our map
log.Printf("Authenticated user %s\n", user)
next.ServeHTTP(w, r)
} else {
http.Error(w, "Forbidden", http.StatusForbidden)
}
})
}
r := mux.NewRouter()
r.HandleFunc("/", handler)
amw := authenticationMiddleware{}
amw.Populate()
r.Use(amw.Middleware)
Note: The handler chain will be stopped if your middleware doesn't call `next.ServeHTTP()` with the corresponding parameters. This can be used to abort a request if the middleware writer wants to.
*/
package mux

View file

@ -1 +0,0 @@
module github.com/gorilla/mux

View file

@ -1,72 +0,0 @@
package mux
import (
"net/http"
"strings"
)
// MiddlewareFunc is a function which receives an http.Handler and returns another http.Handler.
// Typically, the returned handler is a closure which does something with the http.ResponseWriter and http.Request passed
// to it, and then calls the handler passed as parameter to the MiddlewareFunc.
type MiddlewareFunc func(http.Handler) http.Handler
// middleware interface is anything which implements a MiddlewareFunc named Middleware.
type middleware interface {
Middleware(handler http.Handler) http.Handler
}
// Middleware allows MiddlewareFunc to implement the middleware interface.
func (mw MiddlewareFunc) Middleware(handler http.Handler) http.Handler {
return mw(handler)
}
// Use appends a MiddlewareFunc to the chain. Middleware can be used to intercept or otherwise modify requests and/or responses, and are executed in the order that they are applied to the Router.
func (r *Router) Use(mwf ...MiddlewareFunc) {
for _, fn := range mwf {
r.middlewares = append(r.middlewares, fn)
}
}
// useInterface appends a middleware to the chain. Middleware can be used to intercept or otherwise modify requests and/or responses, and are executed in the order that they are applied to the Router.
func (r *Router) useInterface(mw middleware) {
r.middlewares = append(r.middlewares, mw)
}
// CORSMethodMiddleware sets the Access-Control-Allow-Methods response header
// on a request, by matching routes based only on paths. It also handles
// OPTIONS requests, by settings Access-Control-Allow-Methods, and then
// returning without calling the next http handler.
func CORSMethodMiddleware(r *Router) MiddlewareFunc {
return func(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
var allMethods []string
err := r.Walk(func(route *Route, _ *Router, _ []*Route) error {
for _, m := range route.matchers {
if _, ok := m.(*routeRegexp); ok {
if m.Match(req, &RouteMatch{}) {
methods, err := route.GetMethods()
if err != nil {
return err
}
allMethods = append(allMethods, methods...)
}
break
}
}
return nil
})
if err == nil {
w.Header().Set("Access-Control-Allow-Methods", strings.Join(append(allMethods, "OPTIONS"), ","))
if req.Method == "OPTIONS" {
return
}
}
next.ServeHTTP(w, req)
})
}
}

607
vendor/github.com/gorilla/mux/mux.go generated vendored
View file

@ -1,607 +0,0 @@
// Copyright 2012 The Gorilla Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package mux
import (
"errors"
"fmt"
"net/http"
"path"
"regexp"
)
var (
// ErrMethodMismatch is returned when the method in the request does not match
// the method defined against the route.
ErrMethodMismatch = errors.New("method is not allowed")
// ErrNotFound is returned when no route match is found.
ErrNotFound = errors.New("no matching route was found")
)
// NewRouter returns a new router instance.
func NewRouter() *Router {
return &Router{namedRoutes: make(map[string]*Route)}
}
// Router registers routes to be matched and dispatches a handler.
//
// It implements the http.Handler interface, so it can be registered to serve
// requests:
//
// var router = mux.NewRouter()
//
// func main() {
// http.Handle("/", router)
// }
//
// Or, for Google App Engine, register it in a init() function:
//
// func init() {
// http.Handle("/", router)
// }
//
// This will send all incoming requests to the router.
type Router struct {
// Configurable Handler to be used when no route matches.
NotFoundHandler http.Handler
// Configurable Handler to be used when the request method does not match the route.
MethodNotAllowedHandler http.Handler
// Routes to be matched, in order.
routes []*Route
// Routes by name for URL building.
namedRoutes map[string]*Route
// If true, do not clear the request context after handling the request.
//
// Deprecated: No effect when go1.7+ is used, since the context is stored
// on the request itself.
KeepContext bool
// Slice of middlewares to be called after a match is found
middlewares []middleware
// configuration shared with `Route`
routeConf
}
// common route configuration shared between `Router` and `Route`
type routeConf struct {
// If true, "/path/foo%2Fbar/to" will match the path "/path/{var}/to"
useEncodedPath bool
// If true, when the path pattern is "/path/", accessing "/path" will
// redirect to the former and vice versa.
strictSlash bool
// If true, when the path pattern is "/path//to", accessing "/path//to"
// will not redirect
skipClean bool
// Manager for the variables from host and path.
regexp routeRegexpGroup
// List of matchers.
matchers []matcher
// The scheme used when building URLs.
buildScheme string
buildVarsFunc BuildVarsFunc
}
// returns an effective deep copy of `routeConf`
func copyRouteConf(r routeConf) routeConf {
c := r
if r.regexp.path != nil {
c.regexp.path = copyRouteRegexp(r.regexp.path)
}
if r.regexp.host != nil {
c.regexp.host = copyRouteRegexp(r.regexp.host)
}
c.regexp.queries = make([]*routeRegexp, 0, len(r.regexp.queries))
for _, q := range r.regexp.queries {
c.regexp.queries = append(c.regexp.queries, copyRouteRegexp(q))
}
c.matchers = make([]matcher, 0, len(r.matchers))
for _, m := range r.matchers {
c.matchers = append(c.matchers, m)
}
return c
}
func copyRouteRegexp(r *routeRegexp) *routeRegexp {
c := *r
return &c
}
// Match attempts to match the given request against the router's registered routes.
//
// If the request matches a route of this router or one of its subrouters the Route,
// Handler, and Vars fields of the the match argument are filled and this function
// returns true.
//
// If the request does not match any of this router's or its subrouters' routes
// then this function returns false. If available, a reason for the match failure
// will be filled in the match argument's MatchErr field. If the match failure type
// (eg: not found) has a registered handler, the handler is assigned to the Handler
// field of the match argument.
func (r *Router) Match(req *http.Request, match *RouteMatch) bool {
for _, route := range r.routes {
if route.Match(req, match) {
// Build middleware chain if no error was found
if match.MatchErr == nil {
for i := len(r.middlewares) - 1; i >= 0; i-- {
match.Handler = r.middlewares[i].Middleware(match.Handler)
}
}
return true
}
}
if match.MatchErr == ErrMethodMismatch {
if r.MethodNotAllowedHandler != nil {
match.Handler = r.MethodNotAllowedHandler
return true
}
return false
}
// Closest match for a router (includes sub-routers)
if r.NotFoundHandler != nil {
match.Handler = r.NotFoundHandler
match.MatchErr = ErrNotFound
return true
}
match.MatchErr = ErrNotFound
return false
}
// ServeHTTP dispatches the handler registered in the matched route.
//
// When there is a match, the route variables can be retrieved calling
// mux.Vars(request).
func (r *Router) ServeHTTP(w http.ResponseWriter, req *http.Request) {
if !r.skipClean {
path := req.URL.Path
if r.useEncodedPath {
path = req.URL.EscapedPath()
}
// Clean path to canonical form and redirect.
if p := cleanPath(path); p != path {
// Added 3 lines (Philip Schlump) - It was dropping the query string and #whatever from query.
// This matches with fix in go 1.2 r.c. 4 for same problem. Go Issue:
// http://code.google.com/p/go/issues/detail?id=5252
url := *req.URL
url.Path = p
p = url.String()
w.Header().Set("Location", p)
w.WriteHeader(http.StatusMovedPermanently)
return
}
}
var match RouteMatch
var handler http.Handler
if r.Match(req, &match) {
handler = match.Handler
req = setVars(req, match.Vars)
req = setCurrentRoute(req, match.Route)
}
if handler == nil && match.MatchErr == ErrMethodMismatch {
handler = methodNotAllowedHandler()
}
if handler == nil {
handler = http.NotFoundHandler()
}
handler.ServeHTTP(w, req)
}
// Get returns a route registered with the given name.
func (r *Router) Get(name string) *Route {
return r.namedRoutes[name]
}
// GetRoute returns a route registered with the given name. This method
// was renamed to Get() and remains here for backwards compatibility.
func (r *Router) GetRoute(name string) *Route {
return r.namedRoutes[name]
}
// StrictSlash defines the trailing slash behavior for new routes. The initial
// value is false.
//
// When true, if the route path is "/path/", accessing "/path" will perform a redirect
// to the former and vice versa. In other words, your application will always
// see the path as specified in the route.
//
// When false, if the route path is "/path", accessing "/path/" will not match
// this route and vice versa.
//
// The re-direct is a HTTP 301 (Moved Permanently). Note that when this is set for
// routes with a non-idempotent method (e.g. POST, PUT), the subsequent re-directed
// request will be made as a GET by most clients. Use middleware or client settings
// to modify this behaviour as needed.
//
// Special case: when a route sets a path prefix using the PathPrefix() method,
// strict slash is ignored for that route because the redirect behavior can't
// be determined from a prefix alone. However, any subrouters created from that
// route inherit the original StrictSlash setting.
func (r *Router) StrictSlash(value bool) *Router {
r.strictSlash = value
return r
}
// SkipClean defines the path cleaning behaviour for new routes. The initial
// value is false. Users should be careful about which routes are not cleaned
//
// When true, if the route path is "/path//to", it will remain with the double
// slash. This is helpful if you have a route like: /fetch/http://xkcd.com/534/
//
// When false, the path will be cleaned, so /fetch/http://xkcd.com/534/ will
// become /fetch/http/xkcd.com/534
func (r *Router) SkipClean(value bool) *Router {
r.skipClean = value
return r
}
// UseEncodedPath tells the router to match the encoded original path
// to the routes.
// For eg. "/path/foo%2Fbar/to" will match the path "/path/{var}/to".
//
// If not called, the router will match the unencoded path to the routes.
// For eg. "/path/foo%2Fbar/to" will match the path "/path/foo/bar/to"
func (r *Router) UseEncodedPath() *Router {
r.useEncodedPath = true
return r
}
// ----------------------------------------------------------------------------
// Route factories
// ----------------------------------------------------------------------------
// NewRoute registers an empty route.
func (r *Router) NewRoute() *Route {
// initialize a route with a copy of the parent router's configuration
route := &Route{routeConf: copyRouteConf(r.routeConf), namedRoutes: r.namedRoutes}
r.routes = append(r.routes, route)
return route
}
// Name registers a new route with a name.
// See Route.Name().
func (r *Router) Name(name string) *Route {
return r.NewRoute().Name(name)
}
// Handle registers a new route with a matcher for the URL path.
// See Route.Path() and Route.Handler().
func (r *Router) Handle(path string, handler http.Handler) *Route {
return r.NewRoute().Path(path).Handler(handler)
}
// HandleFunc registers a new route with a matcher for the URL path.
// See Route.Path() and Route.HandlerFunc().
func (r *Router) HandleFunc(path string, f func(http.ResponseWriter,
*http.Request)) *Route {
return r.NewRoute().Path(path).HandlerFunc(f)
}
// Headers registers a new route with a matcher for request header values.
// See Route.Headers().
func (r *Router) Headers(pairs ...string) *Route {
return r.NewRoute().Headers(pairs...)
}
// Host registers a new route with a matcher for the URL host.
// See Route.Host().
func (r *Router) Host(tpl string) *Route {
return r.NewRoute().Host(tpl)
}
// MatcherFunc registers a new route with a custom matcher function.
// See Route.MatcherFunc().
func (r *Router) MatcherFunc(f MatcherFunc) *Route {
return r.NewRoute().MatcherFunc(f)
}
// Methods registers a new route with a matcher for HTTP methods.
// See Route.Methods().
func (r *Router) Methods(methods ...string) *Route {
return r.NewRoute().Methods(methods...)
}
// Path registers a new route with a matcher for the URL path.
// See Route.Path().
func (r *Router) Path(tpl string) *Route {
return r.NewRoute().Path(tpl)
}
// PathPrefix registers a new route with a matcher for the URL path prefix.
// See Route.PathPrefix().
func (r *Router) PathPrefix(tpl string) *Route {
return r.NewRoute().PathPrefix(tpl)
}
// Queries registers a new route with a matcher for URL query values.
// See Route.Queries().
func (r *Router) Queries(pairs ...string) *Route {
return r.NewRoute().Queries(pairs...)
}
// Schemes registers a new route with a matcher for URL schemes.
// See Route.Schemes().
func (r *Router) Schemes(schemes ...string) *Route {
return r.NewRoute().Schemes(schemes...)
}
// BuildVarsFunc registers a new route with a custom function for modifying
// route variables before building a URL.
func (r *Router) BuildVarsFunc(f BuildVarsFunc) *Route {
return r.NewRoute().BuildVarsFunc(f)
}
// Walk walks the router and all its sub-routers, calling walkFn for each route
// in the tree. The routes are walked in the order they were added. Sub-routers
// are explored depth-first.
func (r *Router) Walk(walkFn WalkFunc) error {
return r.walk(walkFn, []*Route{})
}
// SkipRouter is used as a return value from WalkFuncs to indicate that the
// router that walk is about to descend down to should be skipped.
var SkipRouter = errors.New("skip this router")
// WalkFunc is the type of the function called for each route visited by Walk.
// At every invocation, it is given the current route, and the current router,
// and a list of ancestor routes that lead to the current route.
type WalkFunc func(route *Route, router *Router, ancestors []*Route) error
func (r *Router) walk(walkFn WalkFunc, ancestors []*Route) error {
for _, t := range r.routes {
err := walkFn(t, r, ancestors)
if err == SkipRouter {
continue
}
if err != nil {
return err
}
for _, sr := range t.matchers {
if h, ok := sr.(*Router); ok {
ancestors = append(ancestors, t)
err := h.walk(walkFn, ancestors)
if err != nil {
return err
}
ancestors = ancestors[:len(ancestors)-1]
}
}
if h, ok := t.handler.(*Router); ok {
ancestors = append(ancestors, t)
err := h.walk(walkFn, ancestors)
if err != nil {
return err
}
ancestors = ancestors[:len(ancestors)-1]
}
}
return nil
}
// ----------------------------------------------------------------------------
// Context
// ----------------------------------------------------------------------------
// RouteMatch stores information about a matched route.
type RouteMatch struct {
Route *Route
Handler http.Handler
Vars map[string]string
// MatchErr is set to appropriate matching error
// It is set to ErrMethodMismatch if there is a mismatch in
// the request method and route method
MatchErr error
}
type contextKey int
const (
varsKey contextKey = iota
routeKey
)
// Vars returns the route variables for the current request, if any.
func Vars(r *http.Request) map[string]string {
if rv := contextGet(r, varsKey); rv != nil {
return rv.(map[string]string)
}
return nil
}
// CurrentRoute returns the matched route for the current request, if any.
// This only works when called inside the handler of the matched route
// because the matched route is stored in the request context which is cleared
// after the handler returns, unless the KeepContext option is set on the
// Router.
func CurrentRoute(r *http.Request) *Route {
if rv := contextGet(r, routeKey); rv != nil {
return rv.(*Route)
}
return nil
}
func setVars(r *http.Request, val interface{}) *http.Request {
return contextSet(r, varsKey, val)
}
func setCurrentRoute(r *http.Request, val interface{}) *http.Request {
return contextSet(r, routeKey, val)
}
// ----------------------------------------------------------------------------
// Helpers
// ----------------------------------------------------------------------------
// cleanPath returns the canonical path for p, eliminating . and .. elements.
// Borrowed from the net/http package.
func cleanPath(p string) string {
if p == "" {
return "/"
}
if p[0] != '/' {
p = "/" + p
}
np := path.Clean(p)
// path.Clean removes trailing slash except for root;
// put the trailing slash back if necessary.
if p[len(p)-1] == '/' && np != "/" {
np += "/"
}
return np
}
// uniqueVars returns an error if two slices contain duplicated strings.
func uniqueVars(s1, s2 []string) error {
for _, v1 := range s1 {
for _, v2 := range s2 {
if v1 == v2 {
return fmt.Errorf("mux: duplicated route variable %q", v2)
}
}
}
return nil
}
// checkPairs returns the count of strings passed in, and an error if
// the count is not an even number.
func checkPairs(pairs ...string) (int, error) {
length := len(pairs)
if length%2 != 0 {
return length, fmt.Errorf(
"mux: number of parameters must be multiple of 2, got %v", pairs)
}
return length, nil
}
// mapFromPairsToString converts variadic string parameters to a
// string to string map.
func mapFromPairsToString(pairs ...string) (map[string]string, error) {
length, err := checkPairs(pairs...)
if err != nil {
return nil, err
}
m := make(map[string]string, length/2)
for i := 0; i < length; i += 2 {
m[pairs[i]] = pairs[i+1]
}
return m, nil
}
// mapFromPairsToRegex converts variadic string parameters to a
// string to regex map.
func mapFromPairsToRegex(pairs ...string) (map[string]*regexp.Regexp, error) {
length, err := checkPairs(pairs...)
if err != nil {
return nil, err
}
m := make(map[string]*regexp.Regexp, length/2)
for i := 0; i < length; i += 2 {
regex, err := regexp.Compile(pairs[i+1])
if err != nil {
return nil, err
}
m[pairs[i]] = regex
}
return m, nil
}
// matchInArray returns true if the given string value is in the array.
func matchInArray(arr []string, value string) bool {
for _, v := range arr {
if v == value {
return true
}
}
return false
}
// matchMapWithString returns true if the given key/value pairs exist in a given map.
func matchMapWithString(toCheck map[string]string, toMatch map[string][]string, canonicalKey bool) bool {
for k, v := range toCheck {
// Check if key exists.
if canonicalKey {
k = http.CanonicalHeaderKey(k)
}
if values := toMatch[k]; values == nil {
return false
} else if v != "" {
// If value was defined as an empty string we only check that the
// key exists. Otherwise we also check for equality.
valueExists := false
for _, value := range values {
if v == value {
valueExists = true
break
}
}
if !valueExists {
return false
}
}
}
return true
}
// matchMapWithRegex returns true if the given key/value pairs exist in a given map compiled against
// the given regex
func matchMapWithRegex(toCheck map[string]*regexp.Regexp, toMatch map[string][]string, canonicalKey bool) bool {
for k, v := range toCheck {
// Check if key exists.
if canonicalKey {
k = http.CanonicalHeaderKey(k)
}
if values := toMatch[k]; values == nil {
return false
} else if v != nil {
// If value was defined as an empty string we only check that the
// key exists. Otherwise we also check for equality.
valueExists := false
for _, value := range values {
if v.MatchString(value) {
valueExists = true
break
}
}
if !valueExists {
return false
}
}
}
return true
}
// methodNotAllowed replies to the request with an HTTP status code 405.
func methodNotAllowed(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusMethodNotAllowed)
}
// methodNotAllowedHandler returns a simple request handler
// that replies to each request with a status code 405.
func methodNotAllowedHandler() http.Handler { return http.HandlerFunc(methodNotAllowed) }

View file

@ -1,345 +0,0 @@
// Copyright 2012 The Gorilla Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package mux
import (
"bytes"
"fmt"
"net/http"
"net/url"
"regexp"
"strconv"
"strings"
)
type routeRegexpOptions struct {
strictSlash bool
useEncodedPath bool
}
type regexpType int
const (
regexpTypePath regexpType = 0
regexpTypeHost regexpType = 1
regexpTypePrefix regexpType = 2
regexpTypeQuery regexpType = 3
)
// newRouteRegexp parses a route template and returns a routeRegexp,
// used to match a host, a path or a query string.
//
// It will extract named variables, assemble a regexp to be matched, create
// a "reverse" template to build URLs and compile regexps to validate variable
// values used in URL building.
//
// Previously we accepted only Python-like identifiers for variable
// names ([a-zA-Z_][a-zA-Z0-9_]*), but currently the only restriction is that
// name and pattern can't be empty, and names can't contain a colon.
func newRouteRegexp(tpl string, typ regexpType, options routeRegexpOptions) (*routeRegexp, error) {
// Check if it is well-formed.
idxs, errBraces := braceIndices(tpl)
if errBraces != nil {
return nil, errBraces
}
// Backup the original.
template := tpl
// Now let's parse it.
defaultPattern := "[^/]+"
if typ == regexpTypeQuery {
defaultPattern = ".*"
} else if typ == regexpTypeHost {
defaultPattern = "[^.]+"
}
// Only match strict slash if not matching
if typ != regexpTypePath {
options.strictSlash = false
}
// Set a flag for strictSlash.
endSlash := false
if options.strictSlash && strings.HasSuffix(tpl, "/") {
tpl = tpl[:len(tpl)-1]
endSlash = true
}
varsN := make([]string, len(idxs)/2)
varsR := make([]*regexp.Regexp, len(idxs)/2)
pattern := bytes.NewBufferString("")
pattern.WriteByte('^')
reverse := bytes.NewBufferString("")
var end int
var err error
for i := 0; i < len(idxs); i += 2 {
// Set all values we are interested in.
raw := tpl[end:idxs[i]]
end = idxs[i+1]
parts := strings.SplitN(tpl[idxs[i]+1:end-1], ":", 2)
name := parts[0]
patt := defaultPattern
if len(parts) == 2 {
patt = parts[1]
}
// Name or pattern can't be empty.
if name == "" || patt == "" {
return nil, fmt.Errorf("mux: missing name or pattern in %q",
tpl[idxs[i]:end])
}
// Build the regexp pattern.
fmt.Fprintf(pattern, "%s(?P<%s>%s)", regexp.QuoteMeta(raw), varGroupName(i/2), patt)
// Build the reverse template.
fmt.Fprintf(reverse, "%s%%s", raw)
// Append variable name and compiled pattern.
varsN[i/2] = name
varsR[i/2], err = regexp.Compile(fmt.Sprintf("^%s$", patt))
if err != nil {
return nil, err
}
}
// Add the remaining.
raw := tpl[end:]
pattern.WriteString(regexp.QuoteMeta(raw))
if options.strictSlash {
pattern.WriteString("[/]?")
}
if typ == regexpTypeQuery {
// Add the default pattern if the query value is empty
if queryVal := strings.SplitN(template, "=", 2)[1]; queryVal == "" {
pattern.WriteString(defaultPattern)
}
}
if typ != regexpTypePrefix {
pattern.WriteByte('$')
}
var wildcardHostPort bool
if typ == regexpTypeHost {
if !strings.Contains(pattern.String(), ":") {
wildcardHostPort = true
}
}
reverse.WriteString(raw)
if endSlash {
reverse.WriteByte('/')
}
// Compile full regexp.
reg, errCompile := regexp.Compile(pattern.String())
if errCompile != nil {
return nil, errCompile
}
// Check for capturing groups which used to work in older versions
if reg.NumSubexp() != len(idxs)/2 {
panic(fmt.Sprintf("route %s contains capture groups in its regexp. ", template) +
"Only non-capturing groups are accepted: e.g. (?:pattern) instead of (pattern)")
}
// Done!
return &routeRegexp{
template: template,
regexpType: typ,
options: options,
regexp: reg,
reverse: reverse.String(),
varsN: varsN,
varsR: varsR,
wildcardHostPort: wildcardHostPort,
}, nil
}
// routeRegexp stores a regexp to match a host or path and information to
// collect and validate route variables.
type routeRegexp struct {
// The unmodified template.
template string
// The type of match
regexpType regexpType
// Options for matching
options routeRegexpOptions
// Expanded regexp.
regexp *regexp.Regexp
// Reverse template.
reverse string
// Variable names.
varsN []string
// Variable regexps (validators).
varsR []*regexp.Regexp
// Wildcard host-port (no strict port match in hostname)
wildcardHostPort bool
}
// Match matches the regexp against the URL host or path.
func (r *routeRegexp) Match(req *http.Request, match *RouteMatch) bool {
if r.regexpType == regexpTypeHost {
host := getHost(req)
if r.wildcardHostPort {
// Don't be strict on the port match
if i := strings.Index(host, ":"); i != -1 {
host = host[:i]
}
}
return r.regexp.MatchString(host)
} else {
if r.regexpType == regexpTypeQuery {
return r.matchQueryString(req)
}
path := req.URL.Path
if r.options.useEncodedPath {
path = req.URL.EscapedPath()
}
return r.regexp.MatchString(path)
}
}
// url builds a URL part using the given values.
func (r *routeRegexp) url(values map[string]string) (string, error) {
urlValues := make([]interface{}, len(r.varsN))
for k, v := range r.varsN {
value, ok := values[v]
if !ok {
return "", fmt.Errorf("mux: missing route variable %q", v)
}
if r.regexpType == regexpTypeQuery {
value = url.QueryEscape(value)
}
urlValues[k] = value
}
rv := fmt.Sprintf(r.reverse, urlValues...)
if !r.regexp.MatchString(rv) {
// The URL is checked against the full regexp, instead of checking
// individual variables. This is faster but to provide a good error
// message, we check individual regexps if the URL doesn't match.
for k, v := range r.varsN {
if !r.varsR[k].MatchString(values[v]) {
return "", fmt.Errorf(
"mux: variable %q doesn't match, expected %q", values[v],
r.varsR[k].String())
}
}
}
return rv, nil
}
// getURLQuery returns a single query parameter from a request URL.
// For a URL with foo=bar&baz=ding, we return only the relevant key
// value pair for the routeRegexp.
func (r *routeRegexp) getURLQuery(req *http.Request) string {
if r.regexpType != regexpTypeQuery {
return ""
}
templateKey := strings.SplitN(r.template, "=", 2)[0]
for key, vals := range req.URL.Query() {
if key == templateKey && len(vals) > 0 {
return key + "=" + vals[0]
}
}
return ""
}
func (r *routeRegexp) matchQueryString(req *http.Request) bool {
return r.regexp.MatchString(r.getURLQuery(req))
}
// braceIndices returns the first level curly brace indices from a string.
// It returns an error in case of unbalanced braces.
func braceIndices(s string) ([]int, error) {
var level, idx int
var idxs []int
for i := 0; i < len(s); i++ {
switch s[i] {
case '{':
if level++; level == 1 {
idx = i
}
case '}':
if level--; level == 0 {
idxs = append(idxs, idx, i+1)
} else if level < 0 {
return nil, fmt.Errorf("mux: unbalanced braces in %q", s)
}
}
}
if level != 0 {
return nil, fmt.Errorf("mux: unbalanced braces in %q", s)
}
return idxs, nil
}
// varGroupName builds a capturing group name for the indexed variable.
func varGroupName(idx int) string {
return "v" + strconv.Itoa(idx)
}
// ----------------------------------------------------------------------------
// routeRegexpGroup
// ----------------------------------------------------------------------------
// routeRegexpGroup groups the route matchers that carry variables.
type routeRegexpGroup struct {
host *routeRegexp
path *routeRegexp
queries []*routeRegexp
}
// setMatch extracts the variables from the URL once a route matches.
func (v routeRegexpGroup) setMatch(req *http.Request, m *RouteMatch, r *Route) {
// Store host variables.
if v.host != nil {
host := getHost(req)
matches := v.host.regexp.FindStringSubmatchIndex(host)
if len(matches) > 0 {
extractVars(host, matches, v.host.varsN, m.Vars)
}
}
path := req.URL.Path
if r.useEncodedPath {
path = req.URL.EscapedPath()
}
// Store path variables.
if v.path != nil {
matches := v.path.regexp.FindStringSubmatchIndex(path)
if len(matches) > 0 {
extractVars(path, matches, v.path.varsN, m.Vars)
// Check if we should redirect.
if v.path.options.strictSlash {
p1 := strings.HasSuffix(path, "/")
p2 := strings.HasSuffix(v.path.template, "/")
if p1 != p2 {
u, _ := url.Parse(req.URL.String())
if p1 {
u.Path = u.Path[:len(u.Path)-1]
} else {
u.Path += "/"
}
m.Handler = http.RedirectHandler(u.String(), http.StatusMovedPermanently)
}
}
}
}
// Store query string variables.
for _, q := range v.queries {
queryURL := q.getURLQuery(req)
matches := q.regexp.FindStringSubmatchIndex(queryURL)
if len(matches) > 0 {
extractVars(queryURL, matches, q.varsN, m.Vars)
}
}
}
// getHost tries its best to return the request host.
// According to section 14.23 of RFC 2616 the Host header
// can include the port number if the default value of 80 is not used.
func getHost(r *http.Request) string {
if r.URL.IsAbs() {
return r.URL.Host
}
return r.Host
}
func extractVars(input string, matches []int, names []string, output map[string]string) {
for i, name := range names {
output[name] = input[matches[2*i+2]:matches[2*i+3]]
}
}

View file

@ -1,710 +0,0 @@
// Copyright 2012 The Gorilla Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package mux
import (
"errors"
"fmt"
"net/http"
"net/url"
"regexp"
"strings"
)
// Route stores information to match a request and build URLs.
type Route struct {
// Request handler for the route.
handler http.Handler
// If true, this route never matches: it is only used to build URLs.
buildOnly bool
// The name used to build URLs.
name string
// Error resulted from building a route.
err error
// "global" reference to all named routes
namedRoutes map[string]*Route
// config possibly passed in from `Router`
routeConf
}
// SkipClean reports whether path cleaning is enabled for this route via
// Router.SkipClean.
func (r *Route) SkipClean() bool {
return r.skipClean
}
// Match matches the route against the request.
func (r *Route) Match(req *http.Request, match *RouteMatch) bool {
if r.buildOnly || r.err != nil {
return false
}
var matchErr error
// Match everything.
for _, m := range r.matchers {
if matched := m.Match(req, match); !matched {
if _, ok := m.(methodMatcher); ok {
matchErr = ErrMethodMismatch
continue
}
// Ignore ErrNotFound errors. These errors arise from match call
// to Subrouters.
//
// This prevents subsequent matching subrouters from failing to
// run middleware. If not ignored, the middleware would see a
// non-nil MatchErr and be skipped, even when there was a
// matching route.
if match.MatchErr == ErrNotFound {
match.MatchErr = nil
}
matchErr = nil
return false
}
}
if matchErr != nil {
match.MatchErr = matchErr
return false
}
if match.MatchErr == ErrMethodMismatch {
// We found a route which matches request method, clear MatchErr
match.MatchErr = nil
// Then override the mis-matched handler
match.Handler = r.handler
}
// Yay, we have a match. Let's collect some info about it.
if match.Route == nil {
match.Route = r
}
if match.Handler == nil {
match.Handler = r.handler
}
if match.Vars == nil {
match.Vars = make(map[string]string)
}
// Set variables.
r.regexp.setMatch(req, match, r)
return true
}
// ----------------------------------------------------------------------------
// Route attributes
// ----------------------------------------------------------------------------
// GetError returns an error resulted from building the route, if any.
func (r *Route) GetError() error {
return r.err
}
// BuildOnly sets the route to never match: it is only used to build URLs.
func (r *Route) BuildOnly() *Route {
r.buildOnly = true
return r
}
// Handler --------------------------------------------------------------------
// Handler sets a handler for the route.
func (r *Route) Handler(handler http.Handler) *Route {
if r.err == nil {
r.handler = handler
}
return r
}
// HandlerFunc sets a handler function for the route.
func (r *Route) HandlerFunc(f func(http.ResponseWriter, *http.Request)) *Route {
return r.Handler(http.HandlerFunc(f))
}
// GetHandler returns the handler for the route, if any.
func (r *Route) GetHandler() http.Handler {
return r.handler
}
// Name -----------------------------------------------------------------------
// Name sets the name for the route, used to build URLs.
// It is an error to call Name more than once on a route.
func (r *Route) Name(name string) *Route {
if r.name != "" {
r.err = fmt.Errorf("mux: route already has name %q, can't set %q",
r.name, name)
}
if r.err == nil {
r.name = name
r.namedRoutes[name] = r
}
return r
}
// GetName returns the name for the route, if any.
func (r *Route) GetName() string {
return r.name
}
// ----------------------------------------------------------------------------
// Matchers
// ----------------------------------------------------------------------------
// matcher types try to match a request.
type matcher interface {
Match(*http.Request, *RouteMatch) bool
}
// addMatcher adds a matcher to the route.
func (r *Route) addMatcher(m matcher) *Route {
if r.err == nil {
r.matchers = append(r.matchers, m)
}
return r
}
// addRegexpMatcher adds a host or path matcher and builder to a route.
func (r *Route) addRegexpMatcher(tpl string, typ regexpType) error {
if r.err != nil {
return r.err
}
if typ == regexpTypePath || typ == regexpTypePrefix {
if len(tpl) > 0 && tpl[0] != '/' {
return fmt.Errorf("mux: path must start with a slash, got %q", tpl)
}
if r.regexp.path != nil {
tpl = strings.TrimRight(r.regexp.path.template, "/") + tpl
}
}
rr, err := newRouteRegexp(tpl, typ, routeRegexpOptions{
strictSlash: r.strictSlash,
useEncodedPath: r.useEncodedPath,
})
if err != nil {
return err
}
for _, q := range r.regexp.queries {
if err = uniqueVars(rr.varsN, q.varsN); err != nil {
return err
}
}
if typ == regexpTypeHost {
if r.regexp.path != nil {
if err = uniqueVars(rr.varsN, r.regexp.path.varsN); err != nil {
return err
}
}
r.regexp.host = rr
} else {
if r.regexp.host != nil {
if err = uniqueVars(rr.varsN, r.regexp.host.varsN); err != nil {
return err
}
}
if typ == regexpTypeQuery {
r.regexp.queries = append(r.regexp.queries, rr)
} else {
r.regexp.path = rr
}
}
r.addMatcher(rr)
return nil
}
// Headers --------------------------------------------------------------------
// headerMatcher matches the request against header values.
type headerMatcher map[string]string
func (m headerMatcher) Match(r *http.Request, match *RouteMatch) bool {
return matchMapWithString(m, r.Header, true)
}
// Headers adds a matcher for request header values.
// It accepts a sequence of key/value pairs to be matched. For example:
//
// r := mux.NewRouter()
// r.Headers("Content-Type", "application/json",
// "X-Requested-With", "XMLHttpRequest")
//
// The above route will only match if both request header values match.
// If the value is an empty string, it will match any value if the key is set.
func (r *Route) Headers(pairs ...string) *Route {
if r.err == nil {
var headers map[string]string
headers, r.err = mapFromPairsToString(pairs...)
return r.addMatcher(headerMatcher(headers))
}
return r
}
// headerRegexMatcher matches the request against the route given a regex for the header
type headerRegexMatcher map[string]*regexp.Regexp
func (m headerRegexMatcher) Match(r *http.Request, match *RouteMatch) bool {
return matchMapWithRegex(m, r.Header, true)
}
// HeadersRegexp accepts a sequence of key/value pairs, where the value has regex
// support. For example:
//
// r := mux.NewRouter()
// r.HeadersRegexp("Content-Type", "application/(text|json)",
// "X-Requested-With", "XMLHttpRequest")
//
// The above route will only match if both the request header matches both regular expressions.
// If the value is an empty string, it will match any value if the key is set.
// Use the start and end of string anchors (^ and $) to match an exact value.
func (r *Route) HeadersRegexp(pairs ...string) *Route {
if r.err == nil {
var headers map[string]*regexp.Regexp
headers, r.err = mapFromPairsToRegex(pairs...)
return r.addMatcher(headerRegexMatcher(headers))
}
return r
}
// Host -----------------------------------------------------------------------
// Host adds a matcher for the URL host.
// It accepts a template with zero or more URL variables enclosed by {}.
// Variables can define an optional regexp pattern to be matched:
//
// - {name} matches anything until the next dot.
//
// - {name:pattern} matches the given regexp pattern.
//
// For example:
//
// r := mux.NewRouter()
// r.Host("www.example.com")
// r.Host("{subdomain}.domain.com")
// r.Host("{subdomain:[a-z]+}.domain.com")
//
// Variable names must be unique in a given route. They can be retrieved
// calling mux.Vars(request).
func (r *Route) Host(tpl string) *Route {
r.err = r.addRegexpMatcher(tpl, regexpTypeHost)
return r
}
// MatcherFunc ----------------------------------------------------------------
// MatcherFunc is the function signature used by custom matchers.
type MatcherFunc func(*http.Request, *RouteMatch) bool
// Match returns the match for a given request.
func (m MatcherFunc) Match(r *http.Request, match *RouteMatch) bool {
return m(r, match)
}
// MatcherFunc adds a custom function to be used as request matcher.
func (r *Route) MatcherFunc(f MatcherFunc) *Route {
return r.addMatcher(f)
}
// Methods --------------------------------------------------------------------
// methodMatcher matches the request against HTTP methods.
type methodMatcher []string
func (m methodMatcher) Match(r *http.Request, match *RouteMatch) bool {
return matchInArray(m, r.Method)
}
// Methods adds a matcher for HTTP methods.
// It accepts a sequence of one or more methods to be matched, e.g.:
// "GET", "POST", "PUT".
func (r *Route) Methods(methods ...string) *Route {
for k, v := range methods {
methods[k] = strings.ToUpper(v)
}
return r.addMatcher(methodMatcher(methods))
}
// Path -----------------------------------------------------------------------
// Path adds a matcher for the URL path.
// It accepts a template with zero or more URL variables enclosed by {}. The
// template must start with a "/".
// Variables can define an optional regexp pattern to be matched:
//
// - {name} matches anything until the next slash.
//
// - {name:pattern} matches the given regexp pattern.
//
// For example:
//
// r := mux.NewRouter()
// r.Path("/products/").Handler(ProductsHandler)
// r.Path("/products/{key}").Handler(ProductsHandler)
// r.Path("/articles/{category}/{id:[0-9]+}").
// Handler(ArticleHandler)
//
// Variable names must be unique in a given route. They can be retrieved
// calling mux.Vars(request).
func (r *Route) Path(tpl string) *Route {
r.err = r.addRegexpMatcher(tpl, regexpTypePath)
return r
}
// PathPrefix -----------------------------------------------------------------
// PathPrefix adds a matcher for the URL path prefix. This matches if the given
// template is a prefix of the full URL path. See Route.Path() for details on
// the tpl argument.
//
// Note that it does not treat slashes specially ("/foobar/" will be matched by
// the prefix "/foo") so you may want to use a trailing slash here.
//
// Also note that the setting of Router.StrictSlash() has no effect on routes
// with a PathPrefix matcher.
func (r *Route) PathPrefix(tpl string) *Route {
r.err = r.addRegexpMatcher(tpl, regexpTypePrefix)
return r
}
// Query ----------------------------------------------------------------------
// Queries adds a matcher for URL query values.
// It accepts a sequence of key/value pairs. Values may define variables.
// For example:
//
// r := mux.NewRouter()
// r.Queries("foo", "bar", "id", "{id:[0-9]+}")
//
// The above route will only match if the URL contains the defined queries
// values, e.g.: ?foo=bar&id=42.
//
// If the value is an empty string, it will match any value if the key is set.
//
// Variables can define an optional regexp pattern to be matched:
//
// - {name} matches anything until the next slash.
//
// - {name:pattern} matches the given regexp pattern.
func (r *Route) Queries(pairs ...string) *Route {
length := len(pairs)
if length%2 != 0 {
r.err = fmt.Errorf(
"mux: number of parameters must be multiple of 2, got %v", pairs)
return nil
}
for i := 0; i < length; i += 2 {
if r.err = r.addRegexpMatcher(pairs[i]+"="+pairs[i+1], regexpTypeQuery); r.err != nil {
return r
}
}
return r
}
// Schemes --------------------------------------------------------------------
// schemeMatcher matches the request against URL schemes.
type schemeMatcher []string
func (m schemeMatcher) Match(r *http.Request, match *RouteMatch) bool {
return matchInArray(m, r.URL.Scheme)
}
// Schemes adds a matcher for URL schemes.
// It accepts a sequence of schemes to be matched, e.g.: "http", "https".
func (r *Route) Schemes(schemes ...string) *Route {
for k, v := range schemes {
schemes[k] = strings.ToLower(v)
}
if len(schemes) > 0 {
r.buildScheme = schemes[0]
}
return r.addMatcher(schemeMatcher(schemes))
}
// BuildVarsFunc --------------------------------------------------------------
// BuildVarsFunc is the function signature used by custom build variable
// functions (which can modify route variables before a route's URL is built).
type BuildVarsFunc func(map[string]string) map[string]string
// BuildVarsFunc adds a custom function to be used to modify build variables
// before a route's URL is built.
func (r *Route) BuildVarsFunc(f BuildVarsFunc) *Route {
if r.buildVarsFunc != nil {
// compose the old and new functions
old := r.buildVarsFunc
r.buildVarsFunc = func(m map[string]string) map[string]string {
return f(old(m))
}
} else {
r.buildVarsFunc = f
}
return r
}
// Subrouter ------------------------------------------------------------------
// Subrouter creates a subrouter for the route.
//
// It will test the inner routes only if the parent route matched. For example:
//
// r := mux.NewRouter()
// s := r.Host("www.example.com").Subrouter()
// s.HandleFunc("/products/", ProductsHandler)
// s.HandleFunc("/products/{key}", ProductHandler)
// s.HandleFunc("/articles/{category}/{id:[0-9]+}"), ArticleHandler)
//
// Here, the routes registered in the subrouter won't be tested if the host
// doesn't match.
func (r *Route) Subrouter() *Router {
// initialize a subrouter with a copy of the parent route's configuration
router := &Router{routeConf: copyRouteConf(r.routeConf), namedRoutes: r.namedRoutes}
r.addMatcher(router)
return router
}
// ----------------------------------------------------------------------------
// URL building
// ----------------------------------------------------------------------------
// URL builds a URL for the route.
//
// It accepts a sequence of key/value pairs for the route variables. For
// example, given this route:
//
// r := mux.NewRouter()
// r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler).
// Name("article")
//
// ...a URL for it can be built using:
//
// url, err := r.Get("article").URL("category", "technology", "id", "42")
//
// ...which will return an url.URL with the following path:
//
// "/articles/technology/42"
//
// This also works for host variables:
//
// r := mux.NewRouter()
// r.Host("{subdomain}.domain.com").
// HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler).
// Name("article")
//
// // url.String() will be "http://news.domain.com/articles/technology/42"
// url, err := r.Get("article").URL("subdomain", "news",
// "category", "technology",
// "id", "42")
//
// All variables defined in the route are required, and their values must
// conform to the corresponding patterns.
func (r *Route) URL(pairs ...string) (*url.URL, error) {
if r.err != nil {
return nil, r.err
}
values, err := r.prepareVars(pairs...)
if err != nil {
return nil, err
}
var scheme, host, path string
queries := make([]string, 0, len(r.regexp.queries))
if r.regexp.host != nil {
if host, err = r.regexp.host.url(values); err != nil {
return nil, err
}
scheme = "http"
if r.buildScheme != "" {
scheme = r.buildScheme
}
}
if r.regexp.path != nil {
if path, err = r.regexp.path.url(values); err != nil {
return nil, err
}
}
for _, q := range r.regexp.queries {
var query string
if query, err = q.url(values); err != nil {
return nil, err
}
queries = append(queries, query)
}
return &url.URL{
Scheme: scheme,
Host: host,
Path: path,
RawQuery: strings.Join(queries, "&"),
}, nil
}
// URLHost builds the host part of the URL for a route. See Route.URL().
//
// The route must have a host defined.
func (r *Route) URLHost(pairs ...string) (*url.URL, error) {
if r.err != nil {
return nil, r.err
}
if r.regexp.host == nil {
return nil, errors.New("mux: route doesn't have a host")
}
values, err := r.prepareVars(pairs...)
if err != nil {
return nil, err
}
host, err := r.regexp.host.url(values)
if err != nil {
return nil, err
}
u := &url.URL{
Scheme: "http",
Host: host,
}
if r.buildScheme != "" {
u.Scheme = r.buildScheme
}
return u, nil
}
// URLPath builds the path part of the URL for a route. See Route.URL().
//
// The route must have a path defined.
func (r *Route) URLPath(pairs ...string) (*url.URL, error) {
if r.err != nil {
return nil, r.err
}
if r.regexp.path == nil {
return nil, errors.New("mux: route doesn't have a path")
}
values, err := r.prepareVars(pairs...)
if err != nil {
return nil, err
}
path, err := r.regexp.path.url(values)
if err != nil {
return nil, err
}
return &url.URL{
Path: path,
}, nil
}
// GetPathTemplate returns the template used to build the
// route match.
// This is useful for building simple REST API documentation and for instrumentation
// against third-party services.
// An error will be returned if the route does not define a path.
func (r *Route) GetPathTemplate() (string, error) {
if r.err != nil {
return "", r.err
}
if r.regexp.path == nil {
return "", errors.New("mux: route doesn't have a path")
}
return r.regexp.path.template, nil
}
// GetPathRegexp returns the expanded regular expression used to match route path.
// This is useful for building simple REST API documentation and for instrumentation
// against third-party services.
// An error will be returned if the route does not define a path.
func (r *Route) GetPathRegexp() (string, error) {
if r.err != nil {
return "", r.err
}
if r.regexp.path == nil {
return "", errors.New("mux: route does not have a path")
}
return r.regexp.path.regexp.String(), nil
}
// GetQueriesRegexp returns the expanded regular expressions used to match the
// route queries.
// This is useful for building simple REST API documentation and for instrumentation
// against third-party services.
// An error will be returned if the route does not have queries.
func (r *Route) GetQueriesRegexp() ([]string, error) {
if r.err != nil {
return nil, r.err
}
if r.regexp.queries == nil {
return nil, errors.New("mux: route doesn't have queries")
}
var queries []string
for _, query := range r.regexp.queries {
queries = append(queries, query.regexp.String())
}
return queries, nil
}
// GetQueriesTemplates returns the templates used to build the
// query matching.
// This is useful for building simple REST API documentation and for instrumentation
// against third-party services.
// An error will be returned if the route does not define queries.
func (r *Route) GetQueriesTemplates() ([]string, error) {
if r.err != nil {
return nil, r.err
}
if r.regexp.queries == nil {
return nil, errors.New("mux: route doesn't have queries")
}
var queries []string
for _, query := range r.regexp.queries {
queries = append(queries, query.template)
}
return queries, nil
}
// GetMethods returns the methods the route matches against
// This is useful for building simple REST API documentation and for instrumentation
// against third-party services.
// An error will be returned if route does not have methods.
func (r *Route) GetMethods() ([]string, error) {
if r.err != nil {
return nil, r.err
}
for _, m := range r.matchers {
if methods, ok := m.(methodMatcher); ok {
return []string(methods), nil
}
}
return nil, errors.New("mux: route doesn't have methods")
}
// GetHostTemplate returns the template used to build the
// route match.
// This is useful for building simple REST API documentation and for instrumentation
// against third-party services.
// An error will be returned if the route does not define a host.
func (r *Route) GetHostTemplate() (string, error) {
if r.err != nil {
return "", r.err
}
if r.regexp.host == nil {
return "", errors.New("mux: route doesn't have a host")
}
return r.regexp.host.template, nil
}
// prepareVars converts the route variable pairs into a map. If the route has a
// BuildVarsFunc, it is invoked.
func (r *Route) prepareVars(pairs ...string) (map[string]string, error) {
m, err := mapFromPairsToString(pairs...)
if err != nil {
return nil, err
}
return r.buildVars(m), nil
}
func (r *Route) buildVars(m map[string]string) map[string]string {
if r.buildVarsFunc != nil {
m = r.buildVarsFunc(m)
}
return m
}

View file

@ -1,19 +0,0 @@
// Copyright 2012 The Gorilla Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package mux
import "net/http"
// SetURLVars sets the URL variables for the given request, to be accessed via
// mux.Vars for testing route behaviour. Arguments are not modified, a shallow
// copy is returned.
//
// This API should only be used for testing purposes; it provides a way to
// inject variables into the request context. Alternatively, URL variables
// can be set by making a route that captures the required variables,
// starting a server and sending the request to that server.
func SetURLVars(r *http.Request, val map[string]string) *http.Request {
return setVars(r, val)
}

View file

@ -1,674 +0,0 @@
GNU GENERAL PUBLIC LICENSE
Version 3, 29 June 2007
Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/>
Everyone is permitted to copy and distribute verbatim copies
of this license document, but changing it is not allowed.
Preamble
The GNU General Public License is a free, copyleft license for
software and other kinds of works.
The licenses for most software and other practical works are designed
to take away your freedom to share and change the works. By contrast,
the GNU General Public License is intended to guarantee your freedom to
share and change all versions of a program--to make sure it remains free
software for all its users. We, the Free Software Foundation, use the
GNU General Public License for most of our software; it applies also to
any other work released this way by its authors. You can apply it to
your programs, too.
When we speak of free software, we are referring to freedom, not
price. Our General Public Licenses are designed to make sure that you
have the freedom to distribute copies of free software (and charge for
them if you wish), that you receive source code or can get it if you
want it, that you can change the software or use pieces of it in new
free programs, and that you know you can do these things.
To protect your rights, we need to prevent others from denying you
these rights or asking you to surrender the rights. Therefore, you have
certain responsibilities if you distribute copies of the software, or if
you modify it: responsibilities to respect the freedom of others.
For example, if you distribute copies of such a program, whether
gratis or for a fee, you must pass on to the recipients the same
freedoms that you received. You must make sure that they, too, receive
or can get the source code. And you must show them these terms so they
know their rights.
Developers that use the GNU GPL protect your rights with two steps:
(1) assert copyright on the software, and (2) offer you this License
giving you legal permission to copy, distribute and/or modify it.
For the developers' and authors' protection, the GPL clearly explains
that there is no warranty for this free software. For both users' and
authors' sake, the GPL requires that modified versions be marked as
changed, so that their problems will not be attributed erroneously to
authors of previous versions.
Some devices are designed to deny users access to install or run
modified versions of the software inside them, although the manufacturer
can do so. This is fundamentally incompatible with the aim of
protecting users' freedom to change the software. The systematic
pattern of such abuse occurs in the area of products for individuals to
use, which is precisely where it is most unacceptable. Therefore, we
have designed this version of the GPL to prohibit the practice for those
products. If such problems arise substantially in other domains, we
stand ready to extend this provision to those domains in future versions
of the GPL, as needed to protect the freedom of users.
Finally, every program is threatened constantly by software patents.
States should not allow patents to restrict development and use of
software on general-purpose computers, but in those that do, we wish to
avoid the special danger that patents applied to a free program could
make it effectively proprietary. To prevent this, the GPL assures that
patents cannot be used to render the program non-free.
The precise terms and conditions for copying, distribution and
modification follow.
TERMS AND CONDITIONS
0. Definitions.
"This License" refers to version 3 of the GNU General Public License.
"Copyright" also means copyright-like laws that apply to other kinds of
works, such as semiconductor masks.
"The Program" refers to any copyrightable work licensed under this
License. Each licensee is addressed as "you". "Licensees" and
"recipients" may be individuals or organizations.
To "modify" a work means to copy from or adapt all or part of the work
in a fashion requiring copyright permission, other than the making of an
exact copy. The resulting work is called a "modified version" of the
earlier work or a work "based on" the earlier work.
A "covered work" means either the unmodified Program or a work based
on the Program.
To "propagate" a work means to do anything with it that, without
permission, would make you directly or secondarily liable for
infringement under applicable copyright law, except executing it on a
computer or modifying a private copy. Propagation includes copying,
distribution (with or without modification), making available to the
public, and in some countries other activities as well.
To "convey" a work means any kind of propagation that enables other
parties to make or receive copies. Mere interaction with a user through
a computer network, with no transfer of a copy, is not conveying.
An interactive user interface displays "Appropriate Legal Notices"
to the extent that it includes a convenient and prominently visible
feature that (1) displays an appropriate copyright notice, and (2)
tells the user that there is no warranty for the work (except to the
extent that warranties are provided), that licensees may convey the
work under this License, and how to view a copy of this License. If
the interface presents a list of user commands or options, such as a
menu, a prominent item in the list meets this criterion.
1. Source Code.
The "source code" for a work means the preferred form of the work
for making modifications to it. "Object code" means any non-source
form of a work.
A "Standard Interface" means an interface that either is an official
standard defined by a recognized standards body, or, in the case of
interfaces specified for a particular programming language, one that
is widely used among developers working in that language.
The "System Libraries" of an executable work include anything, other
than the work as a whole, that (a) is included in the normal form of
packaging a Major Component, but which is not part of that Major
Component, and (b) serves only to enable use of the work with that
Major Component, or to implement a Standard Interface for which an
implementation is available to the public in source code form. A
"Major Component", in this context, means a major essential component
(kernel, window system, and so on) of the specific operating system
(if any) on which the executable work runs, or a compiler used to
produce the work, or an object code interpreter used to run it.
The "Corresponding Source" for a work in object code form means all
the source code needed to generate, install, and (for an executable
work) run the object code and to modify the work, including scripts to
control those activities. However, it does not include the work's
System Libraries, or general-purpose tools or generally available free
programs which are used unmodified in performing those activities but
which are not part of the work. For example, Corresponding Source
includes interface definition files associated with source files for
the work, and the source code for shared libraries and dynamically
linked subprograms that the work is specifically designed to require,
such as by intimate data communication or control flow between those
subprograms and other parts of the work.
The Corresponding Source need not include anything that users
can regenerate automatically from other parts of the Corresponding
Source.
The Corresponding Source for a work in source code form is that
same work.
2. Basic Permissions.
All rights granted under this License are granted for the term of
copyright on the Program, and are irrevocable provided the stated
conditions are met. This License explicitly affirms your unlimited
permission to run the unmodified Program. The output from running a
covered work is covered by this License only if the output, given its
content, constitutes a covered work. This License acknowledges your
rights of fair use or other equivalent, as provided by copyright law.
You may make, run and propagate covered works that you do not
convey, without conditions so long as your license otherwise remains
in force. You may convey covered works to others for the sole purpose
of having them make modifications exclusively for you, or provide you
with facilities for running those works, provided that you comply with
the terms of this License in conveying all material for which you do
not control copyright. Those thus making or running the covered works
for you must do so exclusively on your behalf, under your direction
and control, on terms that prohibit them from making any copies of
your copyrighted material outside their relationship with you.
Conveying under any other circumstances is permitted solely under
the conditions stated below. Sublicensing is not allowed; section 10
makes it unnecessary.
3. Protecting Users' Legal Rights From Anti-Circumvention Law.
No covered work shall be deemed part of an effective technological
measure under any applicable law fulfilling obligations under article
11 of the WIPO copyright treaty adopted on 20 December 1996, or
similar laws prohibiting or restricting circumvention of such
measures.
When you convey a covered work, you waive any legal power to forbid
circumvention of technological measures to the extent such circumvention
is effected by exercising rights under this License with respect to
the covered work, and you disclaim any intention to limit operation or
modification of the work as a means of enforcing, against the work's
users, your or third parties' legal rights to forbid circumvention of
technological measures.
4. Conveying Verbatim Copies.
You may convey verbatim copies of the Program's source code as you
receive it, in any medium, provided that you conspicuously and
appropriately publish on each copy an appropriate copyright notice;
keep intact all notices stating that this License and any
non-permissive terms added in accord with section 7 apply to the code;
keep intact all notices of the absence of any warranty; and give all
recipients a copy of this License along with the Program.
You may charge any price or no price for each copy that you convey,
and you may offer support or warranty protection for a fee.
5. Conveying Modified Source Versions.
You may convey a work based on the Program, or the modifications to
produce it from the Program, in the form of source code under the
terms of section 4, provided that you also meet all of these conditions:
a) The work must carry prominent notices stating that you modified
it, and giving a relevant date.
b) The work must carry prominent notices stating that it is
released under this License and any conditions added under section
7. This requirement modifies the requirement in section 4 to
"keep intact all notices".
c) You must license the entire work, as a whole, under this
License to anyone who comes into possession of a copy. This
License will therefore apply, along with any applicable section 7
additional terms, to the whole of the work, and all its parts,
regardless of how they are packaged. This License gives no
permission to license the work in any other way, but it does not
invalidate such permission if you have separately received it.
d) If the work has interactive user interfaces, each must display
Appropriate Legal Notices; however, if the Program has interactive
interfaces that do not display Appropriate Legal Notices, your
work need not make them do so.
A compilation of a covered work with other separate and independent
works, which are not by their nature extensions of the covered work,
and which are not combined with it such as to form a larger program,
in or on a volume of a storage or distribution medium, is called an
"aggregate" if the compilation and its resulting copyright are not
used to limit the access or legal rights of the compilation's users
beyond what the individual works permit. Inclusion of a covered work
in an aggregate does not cause this License to apply to the other
parts of the aggregate.
6. Conveying Non-Source Forms.
You may convey a covered work in object code form under the terms
of sections 4 and 5, provided that you also convey the
machine-readable Corresponding Source under the terms of this License,
in one of these ways:
a) Convey the object code in, or embodied in, a physical product
(including a physical distribution medium), accompanied by the
Corresponding Source fixed on a durable physical medium
customarily used for software interchange.
b) Convey the object code in, or embodied in, a physical product
(including a physical distribution medium), accompanied by a
written offer, valid for at least three years and valid for as
long as you offer spare parts or customer support for that product
model, to give anyone who possesses the object code either (1) a
copy of the Corresponding Source for all the software in the
product that is covered by this License, on a durable physical
medium customarily used for software interchange, for a price no
more than your reasonable cost of physically performing this
conveying of source, or (2) access to copy the
Corresponding Source from a network server at no charge.
c) Convey individual copies of the object code with a copy of the
written offer to provide the Corresponding Source. This
alternative is allowed only occasionally and noncommercially, and
only if you received the object code with such an offer, in accord
with subsection 6b.
d) Convey the object code by offering access from a designated
place (gratis or for a charge), and offer equivalent access to the
Corresponding Source in the same way through the same place at no
further charge. You need not require recipients to copy the
Corresponding Source along with the object code. If the place to
copy the object code is a network server, the Corresponding Source
may be on a different server (operated by you or a third party)
that supports equivalent copying facilities, provided you maintain
clear directions next to the object code saying where to find the
Corresponding Source. Regardless of what server hosts the
Corresponding Source, you remain obligated to ensure that it is
available for as long as needed to satisfy these requirements.
e) Convey the object code using peer-to-peer transmission, provided
you inform other peers where the object code and Corresponding
Source of the work are being offered to the general public at no
charge under subsection 6d.
A separable portion of the object code, whose source code is excluded
from the Corresponding Source as a System Library, need not be
included in conveying the object code work.
A "User Product" is either (1) a "consumer product", which means any
tangible personal property which is normally used for personal, family,
or household purposes, or (2) anything designed or sold for incorporation
into a dwelling. In determining whether a product is a consumer product,
doubtful cases shall be resolved in favor of coverage. For a particular
product received by a particular user, "normally used" refers to a
typical or common use of that class of product, regardless of the status
of the particular user or of the way in which the particular user
actually uses, or expects or is expected to use, the product. A product
is a consumer product regardless of whether the product has substantial
commercial, industrial or non-consumer uses, unless such uses represent
the only significant mode of use of the product.
"Installation Information" for a User Product means any methods,
procedures, authorization keys, or other information required to install
and execute modified versions of a covered work in that User Product from
a modified version of its Corresponding Source. The information must
suffice to ensure that the continued functioning of the modified object
code is in no case prevented or interfered with solely because
modification has been made.
If you convey an object code work under this section in, or with, or
specifically for use in, a User Product, and the conveying occurs as
part of a transaction in which the right of possession and use of the
User Product is transferred to the recipient in perpetuity or for a
fixed term (regardless of how the transaction is characterized), the
Corresponding Source conveyed under this section must be accompanied
by the Installation Information. But this requirement does not apply
if neither you nor any third party retains the ability to install
modified object code on the User Product (for example, the work has
been installed in ROM).
The requirement to provide Installation Information does not include a
requirement to continue to provide support service, warranty, or updates
for a work that has been modified or installed by the recipient, or for
the User Product in which it has been modified or installed. Access to a
network may be denied when the modification itself materially and
adversely affects the operation of the network or violates the rules and
protocols for communication across the network.
Corresponding Source conveyed, and Installation Information provided,
in accord with this section must be in a format that is publicly
documented (and with an implementation available to the public in
source code form), and must require no special password or key for
unpacking, reading or copying.
7. Additional Terms.
"Additional permissions" are terms that supplement the terms of this
License by making exceptions from one or more of its conditions.
Additional permissions that are applicable to the entire Program shall
be treated as though they were included in this License, to the extent
that they are valid under applicable law. If additional permissions
apply only to part of the Program, that part may be used separately
under those permissions, but the entire Program remains governed by
this License without regard to the additional permissions.
When you convey a copy of a covered work, you may at your option
remove any additional permissions from that copy, or from any part of
it. (Additional permissions may be written to require their own
removal in certain cases when you modify the work.) You may place
additional permissions on material, added by you to a covered work,
for which you have or can give appropriate copyright permission.
Notwithstanding any other provision of this License, for material you
add to a covered work, you may (if authorized by the copyright holders of
that material) supplement the terms of this License with terms:
a) Disclaiming warranty or limiting liability differently from the
terms of sections 15 and 16 of this License; or
b) Requiring preservation of specified reasonable legal notices or
author attributions in that material or in the Appropriate Legal
Notices displayed by works containing it; or
c) Prohibiting misrepresentation of the origin of that material, or
requiring that modified versions of such material be marked in
reasonable ways as different from the original version; or
d) Limiting the use for publicity purposes of names of licensors or
authors of the material; or
e) Declining to grant rights under trademark law for use of some
trade names, trademarks, or service marks; or
f) Requiring indemnification of licensors and authors of that
material by anyone who conveys the material (or modified versions of
it) with contractual assumptions of liability to the recipient, for
any liability that these contractual assumptions directly impose on
those licensors and authors.
All other non-permissive additional terms are considered "further
restrictions" within the meaning of section 10. If the Program as you
received it, or any part of it, contains a notice stating that it is
governed by this License along with a term that is a further
restriction, you may remove that term. If a license document contains
a further restriction but permits relicensing or conveying under this
License, you may add to a covered work material governed by the terms
of that license document, provided that the further restriction does
not survive such relicensing or conveying.
If you add terms to a covered work in accord with this section, you
must place, in the relevant source files, a statement of the
additional terms that apply to those files, or a notice indicating
where to find the applicable terms.
Additional terms, permissive or non-permissive, may be stated in the
form of a separately written license, or stated as exceptions;
the above requirements apply either way.
8. Termination.
You may not propagate or modify a covered work except as expressly
provided under this License. Any attempt otherwise to propagate or
modify it is void, and will automatically terminate your rights under
this License (including any patent licenses granted under the third
paragraph of section 11).
However, if you cease all violation of this License, then your
license from a particular copyright holder is reinstated (a)
provisionally, unless and until the copyright holder explicitly and
finally terminates your license, and (b) permanently, if the copyright
holder fails to notify you of the violation by some reasonable means
prior to 60 days after the cessation.
Moreover, your license from a particular copyright holder is
reinstated permanently if the copyright holder notifies you of the
violation by some reasonable means, this is the first time you have
received notice of violation of this License (for any work) from that
copyright holder, and you cure the violation prior to 30 days after
your receipt of the notice.
Termination of your rights under this section does not terminate the
licenses of parties who have received copies or rights from you under
this License. If your rights have been terminated and not permanently
reinstated, you do not qualify to receive new licenses for the same
material under section 10.
9. Acceptance Not Required for Having Copies.
You are not required to accept this License in order to receive or
run a copy of the Program. Ancillary propagation of a covered work
occurring solely as a consequence of using peer-to-peer transmission
to receive a copy likewise does not require acceptance. However,
nothing other than this License grants you permission to propagate or
modify any covered work. These actions infringe copyright if you do
not accept this License. Therefore, by modifying or propagating a
covered work, you indicate your acceptance of this License to do so.
10. Automatic Licensing of Downstream Recipients.
Each time you convey a covered work, the recipient automatically
receives a license from the original licensors, to run, modify and
propagate that work, subject to this License. You are not responsible
for enforcing compliance by third parties with this License.
An "entity transaction" is a transaction transferring control of an
organization, or substantially all assets of one, or subdividing an
organization, or merging organizations. If propagation of a covered
work results from an entity transaction, each party to that
transaction who receives a copy of the work also receives whatever
licenses to the work the party's predecessor in interest had or could
give under the previous paragraph, plus a right to possession of the
Corresponding Source of the work from the predecessor in interest, if
the predecessor has it or can get it with reasonable efforts.
You may not impose any further restrictions on the exercise of the
rights granted or affirmed under this License. For example, you may
not impose a license fee, royalty, or other charge for exercise of
rights granted under this License, and you may not initiate litigation
(including a cross-claim or counterclaim in a lawsuit) alleging that
any patent claim is infringed by making, using, selling, offering for
sale, or importing the Program or any portion of it.
11. Patents.
A "contributor" is a copyright holder who authorizes use under this
License of the Program or a work on which the Program is based. The
work thus licensed is called the contributor's "contributor version".
A contributor's "essential patent claims" are all patent claims
owned or controlled by the contributor, whether already acquired or
hereafter acquired, that would be infringed by some manner, permitted
by this License, of making, using, or selling its contributor version,
but do not include claims that would be infringed only as a
consequence of further modification of the contributor version. For
purposes of this definition, "control" includes the right to grant
patent sublicenses in a manner consistent with the requirements of
this License.
Each contributor grants you a non-exclusive, worldwide, royalty-free
patent license under the contributor's essential patent claims, to
make, use, sell, offer for sale, import and otherwise run, modify and
propagate the contents of its contributor version.
In the following three paragraphs, a "patent license" is any express
agreement or commitment, however denominated, not to enforce a patent
(such as an express permission to practice a patent or covenant not to
sue for patent infringement). To "grant" such a patent license to a
party means to make such an agreement or commitment not to enforce a
patent against the party.
If you convey a covered work, knowingly relying on a patent license,
and the Corresponding Source of the work is not available for anyone
to copy, free of charge and under the terms of this License, through a
publicly available network server or other readily accessible means,
then you must either (1) cause the Corresponding Source to be so
available, or (2) arrange to deprive yourself of the benefit of the
patent license for this particular work, or (3) arrange, in a manner
consistent with the requirements of this License, to extend the patent
license to downstream recipients. "Knowingly relying" means you have
actual knowledge that, but for the patent license, your conveying the
covered work in a country, or your recipient's use of the covered work
in a country, would infringe one or more identifiable patents in that
country that you have reason to believe are valid.
If, pursuant to or in connection with a single transaction or
arrangement, you convey, or propagate by procuring conveyance of, a
covered work, and grant a patent license to some of the parties
receiving the covered work authorizing them to use, propagate, modify
or convey a specific copy of the covered work, then the patent license
you grant is automatically extended to all recipients of the covered
work and works based on it.
A patent license is "discriminatory" if it does not include within
the scope of its coverage, prohibits the exercise of, or is
conditioned on the non-exercise of one or more of the rights that are
specifically granted under this License. You may not convey a covered
work if you are a party to an arrangement with a third party that is
in the business of distributing software, under which you make payment
to the third party based on the extent of your activity of conveying
the work, and under which the third party grants, to any of the
parties who would receive the covered work from you, a discriminatory
patent license (a) in connection with copies of the covered work
conveyed by you (or copies made from those copies), or (b) primarily
for and in connection with specific products or compilations that
contain the covered work, unless you entered into that arrangement,
or that patent license was granted, prior to 28 March 2007.
Nothing in this License shall be construed as excluding or limiting
any implied license or other defenses to infringement that may
otherwise be available to you under applicable patent law.
12. No Surrender of Others' Freedom.
If conditions are imposed on you (whether by court order, agreement or
otherwise) that contradict the conditions of this License, they do not
excuse you from the conditions of this License. If you cannot convey a
covered work so as to satisfy simultaneously your obligations under this
License and any other pertinent obligations, then as a consequence you may
not convey it at all. For example, if you agree to terms that obligate you
to collect a royalty for further conveying from those to whom you convey
the Program, the only way you could satisfy both those terms and this
License would be to refrain entirely from conveying the Program.
13. Use with the GNU Affero General Public License.
Notwithstanding any other provision of this License, you have
permission to link or combine any covered work with a work licensed
under version 3 of the GNU Affero General Public License into a single
combined work, and to convey the resulting work. The terms of this
License will continue to apply to the part which is the covered work,
but the special requirements of the GNU Affero General Public License,
section 13, concerning interaction through a network will apply to the
combination as such.
14. Revised Versions of this License.
The Free Software Foundation may publish revised and/or new versions of
the GNU General Public License from time to time. Such new versions will
be similar in spirit to the present version, but may differ in detail to
address new problems or concerns.
Each version is given a distinguishing version number. If the
Program specifies that a certain numbered version of the GNU General
Public License "or any later version" applies to it, you have the
option of following the terms and conditions either of that numbered
version or of any later version published by the Free Software
Foundation. If the Program does not specify a version number of the
GNU General Public License, you may choose any version ever published
by the Free Software Foundation.
If the Program specifies that a proxy can decide which future
versions of the GNU General Public License can be used, that proxy's
public statement of acceptance of a version permanently authorizes you
to choose that version for the Program.
Later license versions may give you additional or different
permissions. However, no additional obligations are imposed on any
author or copyright holder as a result of your choosing to follow a
later version.
15. Disclaimer of Warranty.
THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
16. Limitation of Liability.
IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
SUCH DAMAGES.
17. Interpretation of Sections 15 and 16.
If the disclaimer of warranty and limitation of liability provided
above cannot be given local legal effect according to their terms,
reviewing courts shall apply local law that most closely approximates
an absolute waiver of all civil liability in connection with the
Program, unless a warranty or assumption of liability accompanies a
copy of the Program in return for a fee.
END OF TERMS AND CONDITIONS
How to Apply These Terms to Your New Programs
If you develop a new program, and you want it to be of the greatest
possible use to the public, the best way to achieve this is to make it
free software which everyone can redistribute and change under these terms.
To do so, attach the following notices to the program. It is safest
to attach them to the start of each source file to most effectively
state the exclusion of warranty; and each file should have at least
the "copyright" line and a pointer to where the full notice is found.
<one line to give the program's name and a brief idea of what it does.>
Copyright (C) <year> <name of author>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
Also add information on how to contact you by electronic and paper mail.
If the program does terminal interaction, make it output a short
notice like this when it starts in an interactive mode:
<program> Copyright (C) <year> <name of author>
This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
This is free software, and you are welcome to redistribute it
under certain conditions; type `show c' for details.
The hypothetical commands `show w' and `show c' should show the appropriate
parts of the General Public License. Of course, your program's commands
might be different; for a GUI interface, you would use an "about box".
You should also get your employer (if you work as a programmer) or school,
if any, to sign a "copyright disclaimer" for the program, if necessary.
For more information on this, and how to apply and follow the GNU GPL, see
<http://www.gnu.org/licenses/>.
The GNU General Public License does not permit incorporating your program
into proprietary programs. If your program is a subroutine library, you
may consider it more useful to permit linking proprietary applications with
the library. If this is what you want to do, use the GNU Lesser General
Public License instead of this License. But first, please read
<http://www.gnu.org/philosophy/why-not-lgpl.html>.

View file

@ -1,6 +0,0 @@
# torgo [![GoDoc](https://godoc.org/github.com/wybiral/torgo?status.svg)](https://godoc.org/github.com/wybiral/torgo)
This is a Go library for interacting with Tor over the standard controller interface. It simplifies tasks like creating ephemeral hidden services, working with private keys, and making SOCKS proxied client requests on the Tor network.
# Examples
* [/examples/hello-world](https://github.com/wybiral/torgo/blob/master/examples/hello-world)
* [godoc examples](https://godoc.org/github.com/wybiral/torgo#pkg-examples)

View file

@ -1,23 +0,0 @@
package torgo
import (
"net/http"
"net/url"
"golang.org/x/net/proxy"
)
// NewClient return new HTTP client that uses a Tor SOCKS proxy.
// `addr` is the host:port address of SOCKS proxy (usually "127.0.0.1:9050")
func NewClient(addr string) (*http.Client, error) {
u, err := url.Parse("socks5://" + addr)
if err != nil {
return nil, err
}
d, err := proxy.FromURL(u, proxy.Direct)
if err != nil {
return nil, err
}
t := &http.Transport{Dial: d.Dial}
return &http.Client{Transport: t}, nil
}

View file

@ -1,242 +0,0 @@
package torgo
import (
"encoding/hex"
"errors"
"fmt"
"io/ioutil"
"net/textproto"
"strconv"
"strings"
)
// A Controller instance is a control port connection that provides methods for
// communicating with Tor.
type Controller struct {
// Array of available authentication methods.
AuthMethods []string
// Cookie file path (empty if not available).
CookieFile string
// Text is a textproto.Conn to the control port.
Text *textproto.Conn
}
// NewController returns a new Controller instance connecting to the control
// port at addr.
func NewController(addr string) (*Controller, error) {
text, err := textproto.Dial("tcp", addr)
if err != nil {
return nil, err
}
c := &Controller{Text: text}
err = c.getProtocolInfo()
if err != nil {
return nil, err
}
return c, nil
}
// Make a textproto request and expect the command to have a valid 250 status.
func (c *Controller) makeRequest(request string) (int, string, error) {
id, err := c.Text.Cmd(request)
if err != nil {
return 0, "", err
}
c.Text.StartResponse(id)
defer c.Text.EndResponse(id)
return c.Text.ReadResponse(250)
}
// Perform PROTOCOLINFO command and parse the response.
func (c *Controller) getProtocolInfo() error {
_, msg, err := c.makeRequest("PROTOCOLINFO 1")
if err != nil {
return err
}
lines := strings.Split(msg, "\n")
authPrefix := "AUTH METHODS="
cookiePrefix := "COOKIEFILE="
for _, line := range lines {
// Check for AUTH METHODS line
if strings.HasPrefix(line, authPrefix) {
// This line should be in a format like:
/// AUTH METHODS=method1,method2,methodN COOKIEFILE=cookiefilepath
line = line[len(authPrefix):]
parts := strings.SplitN(line, " ", 2)
c.AuthMethods = strings.Split(parts[0], ",")
// Check for COOKIEFILE key/value
if len(parts) == 2 && strings.HasPrefix(parts[1], cookiePrefix) {
raw := parts[1][len(cookiePrefix):]
c.CookieFile, err = strconv.Unquote(raw)
if err != nil {
return err
}
}
}
}
return nil
}
// Perform GETINFO command based on key.
func (c *Controller) getInfo(key string) (string, error) {
_, msg, err := c.makeRequest("GETINFO " + key)
if err != nil {
return "", err
}
lines := strings.Split(msg, "\n")
for _, line := range lines {
parts := strings.SplitN(line, "=", 2)
if parts[0] == key {
return parts[1], nil
}
}
return "", fmt.Errorf(key + " not found")
}
// Perform GETINFO command and convert response to int.
func (c *Controller) getInfoInt(key string) (int, error) {
s, err := c.getInfo(key)
if err != nil {
return 0, err
}
return strconv.Atoi(s)
}
// GetAddress returns the current external IP address.
func (c *Controller) GetAddress() (string, error) {
return c.getInfo("address")
}
// GetBytesRead returns total bytes downloaded.
func (c *Controller) GetBytesRead() (int, error) {
return c.getInfoInt("traffic/read")
}
// GetBytesWritten returns total bytes uploaded.
func (c *Controller) GetBytesWritten() (int, error) {
return c.getInfoInt("traffic/written")
}
// GetConfigFile return path to Tor config file.
func (c *Controller) GetConfigFile() (string, error) {
return c.getInfo("config-file")
}
// GetTorPid returns PID for current Tor process.
func (c *Controller) GetTorPid() (int, error) {
return c.getInfoInt("process/pid")
}
// GetVersion returns version of Tor server.
func (c *Controller) GetVersion() (string, error) {
return c.getInfo("version")
}
// AuthenticateNone authenticate to controller without password or cookie.
func (c *Controller) AuthenticateNone() error {
_, _, err := c.makeRequest("AUTHENTICATE")
if err != nil {
return err
}
return nil
}
// AuthenticatePassword authenticate to controller with password.
func (c *Controller) AuthenticatePassword(password string) error {
quoted := strconv.Quote(password)
_, _, err := c.makeRequest("AUTHENTICATE " + quoted)
if err != nil {
return err
}
return nil
}
// AuthenticateCookie authenticate to controller with cookie from current
// CookieFile path.
func (c *Controller) AuthenticateCookie() error {
rawCookie, err := ioutil.ReadFile(c.CookieFile)
if err != nil {
return err
}
cookie := hex.EncodeToString(rawCookie)
_, _, err = c.makeRequest("AUTHENTICATE " + cookie)
if err != nil {
return err
}
return nil
}
// AddOnion adds Onion hidden service. If no private key is supplied one will
// be generated and the PrivateKeyType and PrivateKey properties will be set
// with the newly generated one.
// The hidden service will use port mapping contained in Ports map supplied.
// ServiceID will be assigned based on the private key and will be the address
// of this hidden service (without the ".onion" ending).
func (c *Controller) AddOnion(onion *Onion) error {
if onion == nil {
return errors.New("torgo: onion cannot be nil")
}
if len(onion.Ports) == 0 {
return errors.New("torgo: onion requires at least one port mapping")
}
req := "ADD_ONION "
// If no key is supplied set PrivateKeyType to NEW
if len(onion.PrivateKey) == 0 {
if onion.PrivateKeyType == "" {
onion.PrivateKey = "BEST"
} else {
onion.PrivateKey = onion.PrivateKeyType
}
onion.PrivateKeyType = "NEW"
}
req += fmt.Sprintf("%s:%s ", onion.PrivateKeyType, onion.PrivateKey)
for remotePort, localAddr := range onion.Ports {
req += fmt.Sprintf("Port=%d,%s ", remotePort, localAddr)
}
_, msg, err := c.makeRequest(req)
if err != nil {
return err
}
lines := strings.Split(msg, "\n")
for _, line := range lines {
parts := strings.SplitN(line, "=", 2)
if parts[0] == "ServiceID" {
onion.ServiceID = parts[1]
} else if parts[0] == "PrivateKey" {
key := strings.SplitN(parts[1], ":", 2)
onion.PrivateKeyType = key[0]
onion.PrivateKey = key[1]
}
}
return nil
}
// DeleteOnion deletes an onion by its serviceID (stop hidden service created
// by this controller).
func (c *Controller) DeleteOnion(serviceID string) error {
_, _, err := c.makeRequest("DEL_ONION " + serviceID)
if err != nil {
return err
}
return nil
}
// Signal sends a signal to the server. Tor documentations defines the
// following signals :
// * RELOAD
// * SHUTDOWN
// * DUMP
// * DEBUG
// * HALT
// * CLEARDNSCACHE
// * NEWNYM
// * HEARTBEAT
// * DORMANT
// * ACTIVE
func (c *Controller) Signal(signal string) error {
_, _, err := c.makeRequest("SIGNAL " + signal)
if err != nil {
return err
}
return nil
}

View file

@ -1,57 +0,0 @@
package torgo
import (
"crypto/sha512"
"encoding/base32"
"encoding/base64"
"errors"
"strings"
"golang.org/x/crypto/ed25519"
"golang.org/x/crypto/sha3"
)
// OnionFromEd25519 returns an Onion instance from an ED25519 private key which
// can be used to start a hidden service with controller.AddOnion.
func OnionFromEd25519(pri ed25519.PrivateKey) (*Onion, error) {
pub, ok := pri.Public().(ed25519.PublicKey)
if !ok {
return nil, errors.New("torgo: unable to extract ed25519.PublicKey")
}
serviceID, err := ServiceIDFromEd25519(pub)
if err != nil {
return nil, err
}
h := sha512.Sum512(pri[:32])
// Set bits so that h[:32] is private scalar "a"
h[0] &= 248
h[31] &= 127
h[31] |= 64
// Since h[32:] is RH, h is now (a || RH)
key := base64.StdEncoding.EncodeToString(h[:])
return &Onion{
Ports: make(map[int]string),
ServiceID: serviceID,
PrivateKey: key,
PrivateKeyType: "ED25519-V3",
}, nil
}
// ServiceIDFromEd25519 calculates a Tor service ID from an ed25519.PublicKey.
func ServiceIDFromEd25519(pub ed25519.PublicKey) (string, error) {
checkdigits := ed25519Checkdigits(pub)
combined := pub[:]
combined = append(combined, checkdigits...)
combined = append(combined, 0x03)
serviceID := base32.StdEncoding.EncodeToString(combined)
return strings.ToLower(serviceID), nil
}
// ed25519Checkdigits calculates tje check digits used to create service ID.
func ed25519Checkdigits(pub ed25519.PublicKey) []byte {
checkstr := []byte(".onion checksum")
checkstr = append(checkstr, pub...)
checkstr = append(checkstr, 0x03)
checksum := sha3.Sum256(checkstr)
return checksum[:2]
}

View file

@ -1,13 +0,0 @@
package torgo
// Onion represents a hidden service.
type Onion struct {
// Ports maps virtual ports for the hidden service to local addresses.
Ports map[int]string
// ServiceID is the unique hidden service address (without ".onion" ending).
ServiceID string
// Base64 encoded private key for the hidden service.
PrivateKey string
// Type of private key (RSA1024 or ED25519-V3).
PrivateKeyType string
}

View file

@ -1,46 +0,0 @@
package torgo
import (
"crypto/rsa"
"crypto/sha1"
"crypto/x509"
"encoding/asn1"
"encoding/base32"
"encoding/base64"
"errors"
"strings"
)
// OnionFromRSA returns an Onion instance from a 1024 bit RSA private key which
// can be used to start a hidden service with controller.AddOnion.
func OnionFromRSA(pri *rsa.PrivateKey) (*Onion, error) {
pub, ok := pri.Public().(*rsa.PublicKey)
if !ok {
return nil, errors.New("torgo: unable to extract *rsa.PublicKey")
}
serviceID, err := ServiceIDFromRSA(pub)
if err != nil {
return nil, err
}
der := x509.MarshalPKCS1PrivateKey(pri)
key := base64.StdEncoding.EncodeToString(der)
return &Onion{
Ports: make(map[int]string),
ServiceID: serviceID,
PrivateKey: key,
PrivateKeyType: "RSA1024",
}, nil
}
// ServiceIDFromRSA calculates a Tor service ID from an *rsa.PublicKey.
func ServiceIDFromRSA(pub *rsa.PublicKey) (string, error) {
der, err := asn1.Marshal(*pub)
if err != nil {
return "", err
}
// Onion id is base32(firstHalf(sha1(publicKeyDER)))
hash := sha1.Sum(der)
half := hash[:len(hash)/2]
serviceID := base32.StdEncoding.EncodeToString(half)
return strings.ToLower(serviceID), nil
}

View file

@ -1,2 +0,0 @@
// Package torgo provides a Golang controller interface for Tor.
package torgo

3
vendor/golang.org/x/crypto/AUTHORS generated vendored
View file

@ -1,3 +0,0 @@
# This source code refers to The Go Authors for copyright purposes.
# The master list of authors is in the main Go distribution,
# visible at https://tip.golang.org/AUTHORS.

View file

@ -1,3 +0,0 @@
# This source code was written by the Go contributors.
# The master list of contributors is in the main Go distribution,
# visible at https://tip.golang.org/CONTRIBUTORS.

27
vendor/golang.org/x/crypto/LICENSE generated vendored
View file

@ -1,27 +0,0 @@
Copyright (c) 2009 The Go Authors. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

22
vendor/golang.org/x/crypto/PATENTS generated vendored
View file

@ -1,22 +0,0 @@
Additional IP Rights Grant (Patents)
"This implementation" means the copyrightable works distributed by
Google as part of the Go project.
Google hereby grants to You a perpetual, worldwide, non-exclusive,
no-charge, royalty-free, irrevocable (except as stated in this section)
patent license to make, have made, use, offer to sell, sell, import,
transfer and otherwise run, modify and propagate the contents of this
implementation of Go, where such license applies only to those patent
claims, both currently owned or controlled by Google and acquired in
the future, licensable by Google that are necessarily infringed by this
implementation of Go. This grant does not include claims that would be
infringed only as a consequence of further modification of this
implementation. If you or your agent or exclusive licensee institute or
order or agree to the institution of patent litigation against any
entity (including a cross-claim or counterclaim in a lawsuit) alleging
that this implementation of Go or any code incorporated within this
implementation of Go constitutes direct or contributory patent
infringement, or inducement of patent infringement, then any patent
rights granted to you under this License for this implementation of Go
shall terminate as of the date such litigation is filed.

View file

@ -1,222 +0,0 @@
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// In Go 1.13, the ed25519 package was promoted to the standard library as
// crypto/ed25519, and this package became a wrapper for the standard library one.
//
// +build !go1.13
// Package ed25519 implements the Ed25519 signature algorithm. See
// https://ed25519.cr.yp.to/.
//
// These functions are also compatible with the “Ed25519” function defined in
// RFC 8032. However, unlike RFC 8032's formulation, this package's private key
// representation includes a public key suffix to make multiple signing
// operations with the same key more efficient. This package refers to the RFC
// 8032 private key as the “seed”.
package ed25519
// This code is a port of the public domain, “ref10” implementation of ed25519
// from SUPERCOP.
import (
"bytes"
"crypto"
cryptorand "crypto/rand"
"crypto/sha512"
"errors"
"io"
"strconv"
"golang.org/x/crypto/ed25519/internal/edwards25519"
)
const (
// PublicKeySize is the size, in bytes, of public keys as used in this package.
PublicKeySize = 32
// PrivateKeySize is the size, in bytes, of private keys as used in this package.
PrivateKeySize = 64
// SignatureSize is the size, in bytes, of signatures generated and verified by this package.
SignatureSize = 64
// SeedSize is the size, in bytes, of private key seeds. These are the private key representations used by RFC 8032.
SeedSize = 32
)
// PublicKey is the type of Ed25519 public keys.
type PublicKey []byte
// PrivateKey is the type of Ed25519 private keys. It implements crypto.Signer.
type PrivateKey []byte
// Public returns the PublicKey corresponding to priv.
func (priv PrivateKey) Public() crypto.PublicKey {
publicKey := make([]byte, PublicKeySize)
copy(publicKey, priv[32:])
return PublicKey(publicKey)
}
// Seed returns the private key seed corresponding to priv. It is provided for
// interoperability with RFC 8032. RFC 8032's private keys correspond to seeds
// in this package.
func (priv PrivateKey) Seed() []byte {
seed := make([]byte, SeedSize)
copy(seed, priv[:32])
return seed
}
// Sign signs the given message with priv.
// Ed25519 performs two passes over messages to be signed and therefore cannot
// handle pre-hashed messages. Thus opts.HashFunc() must return zero to
// indicate the message hasn't been hashed. This can be achieved by passing
// crypto.Hash(0) as the value for opts.
func (priv PrivateKey) Sign(rand io.Reader, message []byte, opts crypto.SignerOpts) (signature []byte, err error) {
if opts.HashFunc() != crypto.Hash(0) {
return nil, errors.New("ed25519: cannot sign hashed message")
}
return Sign(priv, message), nil
}
// GenerateKey generates a public/private key pair using entropy from rand.
// If rand is nil, crypto/rand.Reader will be used.
func GenerateKey(rand io.Reader) (PublicKey, PrivateKey, error) {
if rand == nil {
rand = cryptorand.Reader
}
seed := make([]byte, SeedSize)
if _, err := io.ReadFull(rand, seed); err != nil {
return nil, nil, err
}
privateKey := NewKeyFromSeed(seed)
publicKey := make([]byte, PublicKeySize)
copy(publicKey, privateKey[32:])
return publicKey, privateKey, nil
}
// NewKeyFromSeed calculates a private key from a seed. It will panic if
// len(seed) is not SeedSize. This function is provided for interoperability
// with RFC 8032. RFC 8032's private keys correspond to seeds in this
// package.
func NewKeyFromSeed(seed []byte) PrivateKey {
if l := len(seed); l != SeedSize {
panic("ed25519: bad seed length: " + strconv.Itoa(l))
}
digest := sha512.Sum512(seed)
digest[0] &= 248
digest[31] &= 127
digest[31] |= 64
var A edwards25519.ExtendedGroupElement
var hBytes [32]byte
copy(hBytes[:], digest[:])
edwards25519.GeScalarMultBase(&A, &hBytes)
var publicKeyBytes [32]byte
A.ToBytes(&publicKeyBytes)
privateKey := make([]byte, PrivateKeySize)
copy(privateKey, seed)
copy(privateKey[32:], publicKeyBytes[:])
return privateKey
}
// Sign signs the message with privateKey and returns a signature. It will
// panic if len(privateKey) is not PrivateKeySize.
func Sign(privateKey PrivateKey, message []byte) []byte {
if l := len(privateKey); l != PrivateKeySize {
panic("ed25519: bad private key length: " + strconv.Itoa(l))
}
h := sha512.New()
h.Write(privateKey[:32])
var digest1, messageDigest, hramDigest [64]byte
var expandedSecretKey [32]byte
h.Sum(digest1[:0])
copy(expandedSecretKey[:], digest1[:])
expandedSecretKey[0] &= 248
expandedSecretKey[31] &= 63
expandedSecretKey[31] |= 64
h.Reset()
h.Write(digest1[32:])
h.Write(message)
h.Sum(messageDigest[:0])
var messageDigestReduced [32]byte
edwards25519.ScReduce(&messageDigestReduced, &messageDigest)
var R edwards25519.ExtendedGroupElement
edwards25519.GeScalarMultBase(&R, &messageDigestReduced)
var encodedR [32]byte
R.ToBytes(&encodedR)
h.Reset()
h.Write(encodedR[:])
h.Write(privateKey[32:])
h.Write(message)
h.Sum(hramDigest[:0])
var hramDigestReduced [32]byte
edwards25519.ScReduce(&hramDigestReduced, &hramDigest)
var s [32]byte
edwards25519.ScMulAdd(&s, &hramDigestReduced, &expandedSecretKey, &messageDigestReduced)
signature := make([]byte, SignatureSize)
copy(signature[:], encodedR[:])
copy(signature[32:], s[:])
return signature
}
// Verify reports whether sig is a valid signature of message by publicKey. It
// will panic if len(publicKey) is not PublicKeySize.
func Verify(publicKey PublicKey, message, sig []byte) bool {
if l := len(publicKey); l != PublicKeySize {
panic("ed25519: bad public key length: " + strconv.Itoa(l))
}
if len(sig) != SignatureSize || sig[63]&224 != 0 {
return false
}
var A edwards25519.ExtendedGroupElement
var publicKeyBytes [32]byte
copy(publicKeyBytes[:], publicKey)
if !A.FromBytes(&publicKeyBytes) {
return false
}
edwards25519.FeNeg(&A.X, &A.X)
edwards25519.FeNeg(&A.T, &A.T)
h := sha512.New()
h.Write(sig[:32])
h.Write(publicKey[:])
h.Write(message)
var digest [64]byte
h.Sum(digest[:0])
var hReduced [32]byte
edwards25519.ScReduce(&hReduced, &digest)
var R edwards25519.ProjectiveGroupElement
var s [32]byte
copy(s[:], sig[32:])
// https://tools.ietf.org/html/rfc8032#section-5.1.7 requires that s be in
// the range [0, order) in order to prevent signature malleability.
if !edwards25519.ScMinimal(&s) {
return false
}
edwards25519.GeDoubleScalarMultVartime(&R, &hReduced, &A, &s)
var checkR [32]byte
R.ToBytes(&checkR)
return bytes.Equal(sig[:32], checkR[:])
}

View file

@ -1,73 +0,0 @@
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build go1.13
// Package ed25519 implements the Ed25519 signature algorithm. See
// https://ed25519.cr.yp.to/.
//
// These functions are also compatible with the “Ed25519” function defined in
// RFC 8032. However, unlike RFC 8032's formulation, this package's private key
// representation includes a public key suffix to make multiple signing
// operations with the same key more efficient. This package refers to the RFC
// 8032 private key as the “seed”.
//
// Beginning with Go 1.13, the functionality of this package was moved to the
// standard library as crypto/ed25519. This package only acts as a compatibility
// wrapper.
package ed25519
import (
"crypto/ed25519"
"io"
)
const (
// PublicKeySize is the size, in bytes, of public keys as used in this package.
PublicKeySize = 32
// PrivateKeySize is the size, in bytes, of private keys as used in this package.
PrivateKeySize = 64
// SignatureSize is the size, in bytes, of signatures generated and verified by this package.
SignatureSize = 64
// SeedSize is the size, in bytes, of private key seeds. These are the private key representations used by RFC 8032.
SeedSize = 32
)
// PublicKey is the type of Ed25519 public keys.
//
// This type is an alias for crypto/ed25519's PublicKey type.
// See the crypto/ed25519 package for the methods on this type.
type PublicKey = ed25519.PublicKey
// PrivateKey is the type of Ed25519 private keys. It implements crypto.Signer.
//
// This type is an alias for crypto/ed25519's PrivateKey type.
// See the crypto/ed25519 package for the methods on this type.
type PrivateKey = ed25519.PrivateKey
// GenerateKey generates a public/private key pair using entropy from rand.
// If rand is nil, crypto/rand.Reader will be used.
func GenerateKey(rand io.Reader) (PublicKey, PrivateKey, error) {
return ed25519.GenerateKey(rand)
}
// NewKeyFromSeed calculates a private key from a seed. It will panic if
// len(seed) is not SeedSize. This function is provided for interoperability
// with RFC 8032. RFC 8032's private keys correspond to seeds in this
// package.
func NewKeyFromSeed(seed []byte) PrivateKey {
return ed25519.NewKeyFromSeed(seed)
}
// Sign signs the message with privateKey and returns a signature. It will
// panic if len(privateKey) is not PrivateKeySize.
func Sign(privateKey PrivateKey, message []byte) []byte {
return ed25519.Sign(privateKey, message)
}
// Verify reports whether sig is a valid signature of message by publicKey. It
// will panic if len(publicKey) is not PublicKeySize.
func Verify(publicKey PublicKey, message, sig []byte) bool {
return ed25519.Verify(publicKey, message, sig)
}

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

View file

@ -1,66 +0,0 @@
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package sha3 implements the SHA-3 fixed-output-length hash functions and
// the SHAKE variable-output-length hash functions defined by FIPS-202.
//
// Both types of hash function use the "sponge" construction and the Keccak
// permutation. For a detailed specification see http://keccak.noekeon.org/
//
//
// Guidance
//
// If you aren't sure what function you need, use SHAKE256 with at least 64
// bytes of output. The SHAKE instances are faster than the SHA3 instances;
// the latter have to allocate memory to conform to the hash.Hash interface.
//
// If you need a secret-key MAC (message authentication code), prepend the
// secret key to the input, hash with SHAKE256 and read at least 32 bytes of
// output.
//
//
// Security strengths
//
// The SHA3-x (x equals 224, 256, 384, or 512) functions have a security
// strength against preimage attacks of x bits. Since they only produce "x"
// bits of output, their collision-resistance is only "x/2" bits.
//
// The SHAKE-256 and -128 functions have a generic security strength of 256 and
// 128 bits against all attacks, provided that at least 2x bits of their output
// is used. Requesting more than 64 or 32 bytes of output, respectively, does
// not increase the collision-resistance of the SHAKE functions.
//
//
// The sponge construction
//
// A sponge builds a pseudo-random function from a public pseudo-random
// permutation, by applying the permutation to a state of "rate + capacity"
// bytes, but hiding "capacity" of the bytes.
//
// A sponge starts out with a zero state. To hash an input using a sponge, up
// to "rate" bytes of the input are XORed into the sponge's state. The sponge
// is then "full" and the permutation is applied to "empty" it. This process is
// repeated until all the input has been "absorbed". The input is then padded.
// The digest is "squeezed" from the sponge in the same way, except that output
// is copied out instead of input being XORed in.
//
// A sponge is parameterized by its generic security strength, which is equal
// to half its capacity; capacity + rate is equal to the permutation's width.
// Since the KeccakF-1600 permutation is 1600 bits (200 bytes) wide, this means
// that the security strength of a sponge instance is equal to (1600 - bitrate) / 2.
//
//
// Recommendations
//
// The SHAKE functions are recommended for most new uses. They can produce
// output of arbitrary length. SHAKE256, with an output length of at least
// 64 bytes, provides 256-bit security against all attacks. The Keccak team
// recommends it for most applications upgrading from SHA2-512. (NIST chose a
// much stronger, but much slower, sponge instance for SHA3-512.)
//
// The SHA-3 functions are "drop-in" replacements for the SHA-2 functions.
// They produce output of the same length, with the same security strengths
// against all attacks. This means, in particular, that SHA3-256 only has
// 128-bit collision resistance, because its output length is 32 bytes.
package sha3 // import "golang.org/x/crypto/sha3"

View file

@ -1,97 +0,0 @@
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package sha3
// This file provides functions for creating instances of the SHA-3
// and SHAKE hash functions, as well as utility functions for hashing
// bytes.
import (
"hash"
)
// New224 creates a new SHA3-224 hash.
// Its generic security strength is 224 bits against preimage attacks,
// and 112 bits against collision attacks.
func New224() hash.Hash {
if h := new224Asm(); h != nil {
return h
}
return &state{rate: 144, outputLen: 28, dsbyte: 0x06}
}
// New256 creates a new SHA3-256 hash.
// Its generic security strength is 256 bits against preimage attacks,
// and 128 bits against collision attacks.
func New256() hash.Hash {
if h := new256Asm(); h != nil {
return h
}
return &state{rate: 136, outputLen: 32, dsbyte: 0x06}
}
// New384 creates a new SHA3-384 hash.
// Its generic security strength is 384 bits against preimage attacks,
// and 192 bits against collision attacks.
func New384() hash.Hash {
if h := new384Asm(); h != nil {
return h
}
return &state{rate: 104, outputLen: 48, dsbyte: 0x06}
}
// New512 creates a new SHA3-512 hash.
// Its generic security strength is 512 bits against preimage attacks,
// and 256 bits against collision attacks.
func New512() hash.Hash {
if h := new512Asm(); h != nil {
return h
}
return &state{rate: 72, outputLen: 64, dsbyte: 0x06}
}
// NewLegacyKeccak256 creates a new Keccak-256 hash.
//
// Only use this function if you require compatibility with an existing cryptosystem
// that uses non-standard padding. All other users should use New256 instead.
func NewLegacyKeccak256() hash.Hash { return &state{rate: 136, outputLen: 32, dsbyte: 0x01} }
// NewLegacyKeccak512 creates a new Keccak-512 hash.
//
// Only use this function if you require compatibility with an existing cryptosystem
// that uses non-standard padding. All other users should use New512 instead.
func NewLegacyKeccak512() hash.Hash { return &state{rate: 72, outputLen: 64, dsbyte: 0x01} }
// Sum224 returns the SHA3-224 digest of the data.
func Sum224(data []byte) (digest [28]byte) {
h := New224()
h.Write(data)
h.Sum(digest[:0])
return
}
// Sum256 returns the SHA3-256 digest of the data.
func Sum256(data []byte) (digest [32]byte) {
h := New256()
h.Write(data)
h.Sum(digest[:0])
return
}
// Sum384 returns the SHA3-384 digest of the data.
func Sum384(data []byte) (digest [48]byte) {
h := New384()
h.Write(data)
h.Sum(digest[:0])
return
}
// Sum512 returns the SHA3-512 digest of the data.
func Sum512(data []byte) (digest [64]byte) {
h := New512()
h.Write(data)
h.Sum(digest[:0])
return
}

View file

@ -1,27 +0,0 @@
// Copyright 2017 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//+build gccgo appengine !s390x
package sha3
import (
"hash"
)
// new224Asm returns an assembly implementation of SHA3-224 if available,
// otherwise it returns nil.
func new224Asm() hash.Hash { return nil }
// new256Asm returns an assembly implementation of SHA3-256 if available,
// otherwise it returns nil.
func new256Asm() hash.Hash { return nil }
// new384Asm returns an assembly implementation of SHA3-384 if available,
// otherwise it returns nil.
func new384Asm() hash.Hash { return nil }
// new512Asm returns an assembly implementation of SHA3-512 if available,
// otherwise it returns nil.
func new512Asm() hash.Hash { return nil }

View file

@ -1,412 +0,0 @@
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build !amd64 appengine gccgo
package sha3
// rc stores the round constants for use in the ι step.
var rc = [24]uint64{
0x0000000000000001,
0x0000000000008082,
0x800000000000808A,
0x8000000080008000,
0x000000000000808B,
0x0000000080000001,
0x8000000080008081,
0x8000000000008009,
0x000000000000008A,
0x0000000000000088,
0x0000000080008009,
0x000000008000000A,
0x000000008000808B,
0x800000000000008B,
0x8000000000008089,
0x8000000000008003,
0x8000000000008002,
0x8000000000000080,
0x000000000000800A,
0x800000008000000A,
0x8000000080008081,
0x8000000000008080,
0x0000000080000001,
0x8000000080008008,
}
// keccakF1600 applies the Keccak permutation to a 1600b-wide
// state represented as a slice of 25 uint64s.
func keccakF1600(a *[25]uint64) {
// Implementation translated from Keccak-inplace.c
// in the keccak reference code.
var t, bc0, bc1, bc2, bc3, bc4, d0, d1, d2, d3, d4 uint64
for i := 0; i < 24; i += 4 {
// Combines the 5 steps in each round into 2 steps.
// Unrolls 4 rounds per loop and spreads some steps across rounds.
// Round 1
bc0 = a[0] ^ a[5] ^ a[10] ^ a[15] ^ a[20]
bc1 = a[1] ^ a[6] ^ a[11] ^ a[16] ^ a[21]
bc2 = a[2] ^ a[7] ^ a[12] ^ a[17] ^ a[22]
bc3 = a[3] ^ a[8] ^ a[13] ^ a[18] ^ a[23]
bc4 = a[4] ^ a[9] ^ a[14] ^ a[19] ^ a[24]
d0 = bc4 ^ (bc1<<1 | bc1>>63)
d1 = bc0 ^ (bc2<<1 | bc2>>63)
d2 = bc1 ^ (bc3<<1 | bc3>>63)
d3 = bc2 ^ (bc4<<1 | bc4>>63)
d4 = bc3 ^ (bc0<<1 | bc0>>63)
bc0 = a[0] ^ d0
t = a[6] ^ d1
bc1 = t<<44 | t>>(64-44)
t = a[12] ^ d2
bc2 = t<<43 | t>>(64-43)
t = a[18] ^ d3
bc3 = t<<21 | t>>(64-21)
t = a[24] ^ d4
bc4 = t<<14 | t>>(64-14)
a[0] = bc0 ^ (bc2 &^ bc1) ^ rc[i]
a[6] = bc1 ^ (bc3 &^ bc2)
a[12] = bc2 ^ (bc4 &^ bc3)
a[18] = bc3 ^ (bc0 &^ bc4)
a[24] = bc4 ^ (bc1 &^ bc0)
t = a[10] ^ d0
bc2 = t<<3 | t>>(64-3)
t = a[16] ^ d1
bc3 = t<<45 | t>>(64-45)
t = a[22] ^ d2
bc4 = t<<61 | t>>(64-61)
t = a[3] ^ d3
bc0 = t<<28 | t>>(64-28)
t = a[9] ^ d4
bc1 = t<<20 | t>>(64-20)
a[10] = bc0 ^ (bc2 &^ bc1)
a[16] = bc1 ^ (bc3 &^ bc2)
a[22] = bc2 ^ (bc4 &^ bc3)
a[3] = bc3 ^ (bc0 &^ bc4)
a[9] = bc4 ^ (bc1 &^ bc0)
t = a[20] ^ d0
bc4 = t<<18 | t>>(64-18)
t = a[1] ^ d1
bc0 = t<<1 | t>>(64-1)
t = a[7] ^ d2
bc1 = t<<6 | t>>(64-6)
t = a[13] ^ d3
bc2 = t<<25 | t>>(64-25)
t = a[19] ^ d4
bc3 = t<<8 | t>>(64-8)
a[20] = bc0 ^ (bc2 &^ bc1)
a[1] = bc1 ^ (bc3 &^ bc2)
a[7] = bc2 ^ (bc4 &^ bc3)
a[13] = bc3 ^ (bc0 &^ bc4)
a[19] = bc4 ^ (bc1 &^ bc0)
t = a[5] ^ d0
bc1 = t<<36 | t>>(64-36)
t = a[11] ^ d1
bc2 = t<<10 | t>>(64-10)
t = a[17] ^ d2
bc3 = t<<15 | t>>(64-15)
t = a[23] ^ d3
bc4 = t<<56 | t>>(64-56)
t = a[4] ^ d4
bc0 = t<<27 | t>>(64-27)
a[5] = bc0 ^ (bc2 &^ bc1)
a[11] = bc1 ^ (bc3 &^ bc2)
a[17] = bc2 ^ (bc4 &^ bc3)
a[23] = bc3 ^ (bc0 &^ bc4)
a[4] = bc4 ^ (bc1 &^ bc0)
t = a[15] ^ d0
bc3 = t<<41 | t>>(64-41)
t = a[21] ^ d1
bc4 = t<<2 | t>>(64-2)
t = a[2] ^ d2
bc0 = t<<62 | t>>(64-62)
t = a[8] ^ d3
bc1 = t<<55 | t>>(64-55)
t = a[14] ^ d4
bc2 = t<<39 | t>>(64-39)
a[15] = bc0 ^ (bc2 &^ bc1)
a[21] = bc1 ^ (bc3 &^ bc2)
a[2] = bc2 ^ (bc4 &^ bc3)
a[8] = bc3 ^ (bc0 &^ bc4)
a[14] = bc4 ^ (bc1 &^ bc0)
// Round 2
bc0 = a[0] ^ a[5] ^ a[10] ^ a[15] ^ a[20]
bc1 = a[1] ^ a[6] ^ a[11] ^ a[16] ^ a[21]
bc2 = a[2] ^ a[7] ^ a[12] ^ a[17] ^ a[22]
bc3 = a[3] ^ a[8] ^ a[13] ^ a[18] ^ a[23]
bc4 = a[4] ^ a[9] ^ a[14] ^ a[19] ^ a[24]
d0 = bc4 ^ (bc1<<1 | bc1>>63)
d1 = bc0 ^ (bc2<<1 | bc2>>63)
d2 = bc1 ^ (bc3<<1 | bc3>>63)
d3 = bc2 ^ (bc4<<1 | bc4>>63)
d4 = bc3 ^ (bc0<<1 | bc0>>63)
bc0 = a[0] ^ d0
t = a[16] ^ d1
bc1 = t<<44 | t>>(64-44)
t = a[7] ^ d2
bc2 = t<<43 | t>>(64-43)
t = a[23] ^ d3
bc3 = t<<21 | t>>(64-21)
t = a[14] ^ d4
bc4 = t<<14 | t>>(64-14)
a[0] = bc0 ^ (bc2 &^ bc1) ^ rc[i+1]
a[16] = bc1 ^ (bc3 &^ bc2)
a[7] = bc2 ^ (bc4 &^ bc3)
a[23] = bc3 ^ (bc0 &^ bc4)
a[14] = bc4 ^ (bc1 &^ bc0)
t = a[20] ^ d0
bc2 = t<<3 | t>>(64-3)
t = a[11] ^ d1
bc3 = t<<45 | t>>(64-45)
t = a[2] ^ d2
bc4 = t<<61 | t>>(64-61)
t = a[18] ^ d3
bc0 = t<<28 | t>>(64-28)
t = a[9] ^ d4
bc1 = t<<20 | t>>(64-20)
a[20] = bc0 ^ (bc2 &^ bc1)
a[11] = bc1 ^ (bc3 &^ bc2)
a[2] = bc2 ^ (bc4 &^ bc3)
a[18] = bc3 ^ (bc0 &^ bc4)
a[9] = bc4 ^ (bc1 &^ bc0)
t = a[15] ^ d0
bc4 = t<<18 | t>>(64-18)
t = a[6] ^ d1
bc0 = t<<1 | t>>(64-1)
t = a[22] ^ d2
bc1 = t<<6 | t>>(64-6)
t = a[13] ^ d3
bc2 = t<<25 | t>>(64-25)
t = a[4] ^ d4
bc3 = t<<8 | t>>(64-8)
a[15] = bc0 ^ (bc2 &^ bc1)
a[6] = bc1 ^ (bc3 &^ bc2)
a[22] = bc2 ^ (bc4 &^ bc3)
a[13] = bc3 ^ (bc0 &^ bc4)
a[4] = bc4 ^ (bc1 &^ bc0)
t = a[10] ^ d0
bc1 = t<<36 | t>>(64-36)
t = a[1] ^ d1
bc2 = t<<10 | t>>(64-10)
t = a[17] ^ d2
bc3 = t<<15 | t>>(64-15)
t = a[8] ^ d3
bc4 = t<<56 | t>>(64-56)
t = a[24] ^ d4
bc0 = t<<27 | t>>(64-27)
a[10] = bc0 ^ (bc2 &^ bc1)
a[1] = bc1 ^ (bc3 &^ bc2)
a[17] = bc2 ^ (bc4 &^ bc3)
a[8] = bc3 ^ (bc0 &^ bc4)
a[24] = bc4 ^ (bc1 &^ bc0)
t = a[5] ^ d0
bc3 = t<<41 | t>>(64-41)
t = a[21] ^ d1
bc4 = t<<2 | t>>(64-2)
t = a[12] ^ d2
bc0 = t<<62 | t>>(64-62)
t = a[3] ^ d3
bc1 = t<<55 | t>>(64-55)
t = a[19] ^ d4
bc2 = t<<39 | t>>(64-39)
a[5] = bc0 ^ (bc2 &^ bc1)
a[21] = bc1 ^ (bc3 &^ bc2)
a[12] = bc2 ^ (bc4 &^ bc3)
a[3] = bc3 ^ (bc0 &^ bc4)
a[19] = bc4 ^ (bc1 &^ bc0)
// Round 3
bc0 = a[0] ^ a[5] ^ a[10] ^ a[15] ^ a[20]
bc1 = a[1] ^ a[6] ^ a[11] ^ a[16] ^ a[21]
bc2 = a[2] ^ a[7] ^ a[12] ^ a[17] ^ a[22]
bc3 = a[3] ^ a[8] ^ a[13] ^ a[18] ^ a[23]
bc4 = a[4] ^ a[9] ^ a[14] ^ a[19] ^ a[24]
d0 = bc4 ^ (bc1<<1 | bc1>>63)
d1 = bc0 ^ (bc2<<1 | bc2>>63)
d2 = bc1 ^ (bc3<<1 | bc3>>63)
d3 = bc2 ^ (bc4<<1 | bc4>>63)
d4 = bc3 ^ (bc0<<1 | bc0>>63)
bc0 = a[0] ^ d0
t = a[11] ^ d1
bc1 = t<<44 | t>>(64-44)
t = a[22] ^ d2
bc2 = t<<43 | t>>(64-43)
t = a[8] ^ d3
bc3 = t<<21 | t>>(64-21)
t = a[19] ^ d4
bc4 = t<<14 | t>>(64-14)
a[0] = bc0 ^ (bc2 &^ bc1) ^ rc[i+2]
a[11] = bc1 ^ (bc3 &^ bc2)
a[22] = bc2 ^ (bc4 &^ bc3)
a[8] = bc3 ^ (bc0 &^ bc4)
a[19] = bc4 ^ (bc1 &^ bc0)
t = a[15] ^ d0
bc2 = t<<3 | t>>(64-3)
t = a[1] ^ d1
bc3 = t<<45 | t>>(64-45)
t = a[12] ^ d2
bc4 = t<<61 | t>>(64-61)
t = a[23] ^ d3
bc0 = t<<28 | t>>(64-28)
t = a[9] ^ d4
bc1 = t<<20 | t>>(64-20)
a[15] = bc0 ^ (bc2 &^ bc1)
a[1] = bc1 ^ (bc3 &^ bc2)
a[12] = bc2 ^ (bc4 &^ bc3)
a[23] = bc3 ^ (bc0 &^ bc4)
a[9] = bc4 ^ (bc1 &^ bc0)
t = a[5] ^ d0
bc4 = t<<18 | t>>(64-18)
t = a[16] ^ d1
bc0 = t<<1 | t>>(64-1)
t = a[2] ^ d2
bc1 = t<<6 | t>>(64-6)
t = a[13] ^ d3
bc2 = t<<25 | t>>(64-25)
t = a[24] ^ d4
bc3 = t<<8 | t>>(64-8)
a[5] = bc0 ^ (bc2 &^ bc1)
a[16] = bc1 ^ (bc3 &^ bc2)
a[2] = bc2 ^ (bc4 &^ bc3)
a[13] = bc3 ^ (bc0 &^ bc4)
a[24] = bc4 ^ (bc1 &^ bc0)
t = a[20] ^ d0
bc1 = t<<36 | t>>(64-36)
t = a[6] ^ d1
bc2 = t<<10 | t>>(64-10)
t = a[17] ^ d2
bc3 = t<<15 | t>>(64-15)
t = a[3] ^ d3
bc4 = t<<56 | t>>(64-56)
t = a[14] ^ d4
bc0 = t<<27 | t>>(64-27)
a[20] = bc0 ^ (bc2 &^ bc1)
a[6] = bc1 ^ (bc3 &^ bc2)
a[17] = bc2 ^ (bc4 &^ bc3)
a[3] = bc3 ^ (bc0 &^ bc4)
a[14] = bc4 ^ (bc1 &^ bc0)
t = a[10] ^ d0
bc3 = t<<41 | t>>(64-41)
t = a[21] ^ d1
bc4 = t<<2 | t>>(64-2)
t = a[7] ^ d2
bc0 = t<<62 | t>>(64-62)
t = a[18] ^ d3
bc1 = t<<55 | t>>(64-55)
t = a[4] ^ d4
bc2 = t<<39 | t>>(64-39)
a[10] = bc0 ^ (bc2 &^ bc1)
a[21] = bc1 ^ (bc3 &^ bc2)
a[7] = bc2 ^ (bc4 &^ bc3)
a[18] = bc3 ^ (bc0 &^ bc4)
a[4] = bc4 ^ (bc1 &^ bc0)
// Round 4
bc0 = a[0] ^ a[5] ^ a[10] ^ a[15] ^ a[20]
bc1 = a[1] ^ a[6] ^ a[11] ^ a[16] ^ a[21]
bc2 = a[2] ^ a[7] ^ a[12] ^ a[17] ^ a[22]
bc3 = a[3] ^ a[8] ^ a[13] ^ a[18] ^ a[23]
bc4 = a[4] ^ a[9] ^ a[14] ^ a[19] ^ a[24]
d0 = bc4 ^ (bc1<<1 | bc1>>63)
d1 = bc0 ^ (bc2<<1 | bc2>>63)
d2 = bc1 ^ (bc3<<1 | bc3>>63)
d3 = bc2 ^ (bc4<<1 | bc4>>63)
d4 = bc3 ^ (bc0<<1 | bc0>>63)
bc0 = a[0] ^ d0
t = a[1] ^ d1
bc1 = t<<44 | t>>(64-44)
t = a[2] ^ d2
bc2 = t<<43 | t>>(64-43)
t = a[3] ^ d3
bc3 = t<<21 | t>>(64-21)
t = a[4] ^ d4
bc4 = t<<14 | t>>(64-14)
a[0] = bc0 ^ (bc2 &^ bc1) ^ rc[i+3]
a[1] = bc1 ^ (bc3 &^ bc2)
a[2] = bc2 ^ (bc4 &^ bc3)
a[3] = bc3 ^ (bc0 &^ bc4)
a[4] = bc4 ^ (bc1 &^ bc0)
t = a[5] ^ d0
bc2 = t<<3 | t>>(64-3)
t = a[6] ^ d1
bc3 = t<<45 | t>>(64-45)
t = a[7] ^ d2
bc4 = t<<61 | t>>(64-61)
t = a[8] ^ d3
bc0 = t<<28 | t>>(64-28)
t = a[9] ^ d4
bc1 = t<<20 | t>>(64-20)
a[5] = bc0 ^ (bc2 &^ bc1)
a[6] = bc1 ^ (bc3 &^ bc2)
a[7] = bc2 ^ (bc4 &^ bc3)
a[8] = bc3 ^ (bc0 &^ bc4)
a[9] = bc4 ^ (bc1 &^ bc0)
t = a[10] ^ d0
bc4 = t<<18 | t>>(64-18)
t = a[11] ^ d1
bc0 = t<<1 | t>>(64-1)
t = a[12] ^ d2
bc1 = t<<6 | t>>(64-6)
t = a[13] ^ d3
bc2 = t<<25 | t>>(64-25)
t = a[14] ^ d4
bc3 = t<<8 | t>>(64-8)
a[10] = bc0 ^ (bc2 &^ bc1)
a[11] = bc1 ^ (bc3 &^ bc2)
a[12] = bc2 ^ (bc4 &^ bc3)
a[13] = bc3 ^ (bc0 &^ bc4)
a[14] = bc4 ^ (bc1 &^ bc0)
t = a[15] ^ d0
bc1 = t<<36 | t>>(64-36)
t = a[16] ^ d1
bc2 = t<<10 | t>>(64-10)
t = a[17] ^ d2
bc3 = t<<15 | t>>(64-15)
t = a[18] ^ d3
bc4 = t<<56 | t>>(64-56)
t = a[19] ^ d4
bc0 = t<<27 | t>>(64-27)
a[15] = bc0 ^ (bc2 &^ bc1)
a[16] = bc1 ^ (bc3 &^ bc2)
a[17] = bc2 ^ (bc4 &^ bc3)
a[18] = bc3 ^ (bc0 &^ bc4)
a[19] = bc4 ^ (bc1 &^ bc0)
t = a[20] ^ d0
bc3 = t<<41 | t>>(64-41)
t = a[21] ^ d1
bc4 = t<<2 | t>>(64-2)
t = a[22] ^ d2
bc0 = t<<62 | t>>(64-62)
t = a[23] ^ d3
bc1 = t<<55 | t>>(64-55)
t = a[24] ^ d4
bc2 = t<<39 | t>>(64-39)
a[20] = bc0 ^ (bc2 &^ bc1)
a[21] = bc1 ^ (bc3 &^ bc2)
a[22] = bc2 ^ (bc4 &^ bc3)
a[23] = bc3 ^ (bc0 &^ bc4)
a[24] = bc4 ^ (bc1 &^ bc0)
}
}

View file

@ -1,13 +0,0 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build amd64,!appengine,!gccgo
package sha3
// This function is implemented in keccakf_amd64.s.
//go:noescape
func keccakF1600(a *[25]uint64)

View file

@ -1,390 +0,0 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build amd64,!appengine,!gccgo
// This code was translated into a form compatible with 6a from the public
// domain sources at https://github.com/gvanas/KeccakCodePackage
// Offsets in state
#define _ba (0*8)
#define _be (1*8)
#define _bi (2*8)
#define _bo (3*8)
#define _bu (4*8)
#define _ga (5*8)
#define _ge (6*8)
#define _gi (7*8)
#define _go (8*8)
#define _gu (9*8)
#define _ka (10*8)
#define _ke (11*8)
#define _ki (12*8)
#define _ko (13*8)
#define _ku (14*8)
#define _ma (15*8)
#define _me (16*8)
#define _mi (17*8)
#define _mo (18*8)
#define _mu (19*8)
#define _sa (20*8)
#define _se (21*8)
#define _si (22*8)
#define _so (23*8)
#define _su (24*8)
// Temporary registers
#define rT1 AX
// Round vars
#define rpState DI
#define rpStack SP
#define rDa BX
#define rDe CX
#define rDi DX
#define rDo R8
#define rDu R9
#define rBa R10
#define rBe R11
#define rBi R12
#define rBo R13
#define rBu R14
#define rCa SI
#define rCe BP
#define rCi rBi
#define rCo rBo
#define rCu R15
#define MOVQ_RBI_RCE MOVQ rBi, rCe
#define XORQ_RT1_RCA XORQ rT1, rCa
#define XORQ_RT1_RCE XORQ rT1, rCe
#define XORQ_RBA_RCU XORQ rBa, rCu
#define XORQ_RBE_RCU XORQ rBe, rCu
#define XORQ_RDU_RCU XORQ rDu, rCu
#define XORQ_RDA_RCA XORQ rDa, rCa
#define XORQ_RDE_RCE XORQ rDe, rCe
#define mKeccakRound(iState, oState, rc, B_RBI_RCE, G_RT1_RCA, G_RT1_RCE, G_RBA_RCU, K_RT1_RCA, K_RT1_RCE, K_RBA_RCU, M_RT1_RCA, M_RT1_RCE, M_RBE_RCU, S_RDU_RCU, S_RDA_RCA, S_RDE_RCE) \
/* Prepare round */ \
MOVQ rCe, rDa; \
ROLQ $1, rDa; \
\
MOVQ _bi(iState), rCi; \
XORQ _gi(iState), rDi; \
XORQ rCu, rDa; \
XORQ _ki(iState), rCi; \
XORQ _mi(iState), rDi; \
XORQ rDi, rCi; \
\
MOVQ rCi, rDe; \
ROLQ $1, rDe; \
\
MOVQ _bo(iState), rCo; \
XORQ _go(iState), rDo; \
XORQ rCa, rDe; \
XORQ _ko(iState), rCo; \
XORQ _mo(iState), rDo; \
XORQ rDo, rCo; \
\
MOVQ rCo, rDi; \
ROLQ $1, rDi; \
\
MOVQ rCu, rDo; \
XORQ rCe, rDi; \
ROLQ $1, rDo; \
\
MOVQ rCa, rDu; \
XORQ rCi, rDo; \
ROLQ $1, rDu; \
\
/* Result b */ \
MOVQ _ba(iState), rBa; \
MOVQ _ge(iState), rBe; \
XORQ rCo, rDu; \
MOVQ _ki(iState), rBi; \
MOVQ _mo(iState), rBo; \
MOVQ _su(iState), rBu; \
XORQ rDe, rBe; \
ROLQ $44, rBe; \
XORQ rDi, rBi; \
XORQ rDa, rBa; \
ROLQ $43, rBi; \
\
MOVQ rBe, rCa; \
MOVQ rc, rT1; \
ORQ rBi, rCa; \
XORQ rBa, rT1; \
XORQ rT1, rCa; \
MOVQ rCa, _ba(oState); \
\
XORQ rDu, rBu; \
ROLQ $14, rBu; \
MOVQ rBa, rCu; \
ANDQ rBe, rCu; \
XORQ rBu, rCu; \
MOVQ rCu, _bu(oState); \
\
XORQ rDo, rBo; \
ROLQ $21, rBo; \
MOVQ rBo, rT1; \
ANDQ rBu, rT1; \
XORQ rBi, rT1; \
MOVQ rT1, _bi(oState); \
\
NOTQ rBi; \
ORQ rBa, rBu; \
ORQ rBo, rBi; \
XORQ rBo, rBu; \
XORQ rBe, rBi; \
MOVQ rBu, _bo(oState); \
MOVQ rBi, _be(oState); \
B_RBI_RCE; \
\
/* Result g */ \
MOVQ _gu(iState), rBe; \
XORQ rDu, rBe; \
MOVQ _ka(iState), rBi; \
ROLQ $20, rBe; \
XORQ rDa, rBi; \
ROLQ $3, rBi; \
MOVQ _bo(iState), rBa; \
MOVQ rBe, rT1; \
ORQ rBi, rT1; \
XORQ rDo, rBa; \
MOVQ _me(iState), rBo; \
MOVQ _si(iState), rBu; \
ROLQ $28, rBa; \
XORQ rBa, rT1; \
MOVQ rT1, _ga(oState); \
G_RT1_RCA; \
\
XORQ rDe, rBo; \
ROLQ $45, rBo; \
MOVQ rBi, rT1; \
ANDQ rBo, rT1; \
XORQ rBe, rT1; \
MOVQ rT1, _ge(oState); \
G_RT1_RCE; \
\
XORQ rDi, rBu; \
ROLQ $61, rBu; \
MOVQ rBu, rT1; \
ORQ rBa, rT1; \
XORQ rBo, rT1; \
MOVQ rT1, _go(oState); \
\
ANDQ rBe, rBa; \
XORQ rBu, rBa; \
MOVQ rBa, _gu(oState); \
NOTQ rBu; \
G_RBA_RCU; \
\
ORQ rBu, rBo; \
XORQ rBi, rBo; \
MOVQ rBo, _gi(oState); \
\
/* Result k */ \
MOVQ _be(iState), rBa; \
MOVQ _gi(iState), rBe; \
MOVQ _ko(iState), rBi; \
MOVQ _mu(iState), rBo; \
MOVQ _sa(iState), rBu; \
XORQ rDi, rBe; \
ROLQ $6, rBe; \
XORQ rDo, rBi; \
ROLQ $25, rBi; \
MOVQ rBe, rT1; \
ORQ rBi, rT1; \
XORQ rDe, rBa; \
ROLQ $1, rBa; \
XORQ rBa, rT1; \
MOVQ rT1, _ka(oState); \
K_RT1_RCA; \
\
XORQ rDu, rBo; \
ROLQ $8, rBo; \
MOVQ rBi, rT1; \
ANDQ rBo, rT1; \
XORQ rBe, rT1; \
MOVQ rT1, _ke(oState); \
K_RT1_RCE; \
\
XORQ rDa, rBu; \
ROLQ $18, rBu; \
NOTQ rBo; \
MOVQ rBo, rT1; \
ANDQ rBu, rT1; \
XORQ rBi, rT1; \
MOVQ rT1, _ki(oState); \
\
MOVQ rBu, rT1; \
ORQ rBa, rT1; \
XORQ rBo, rT1; \
MOVQ rT1, _ko(oState); \
\
ANDQ rBe, rBa; \
XORQ rBu, rBa; \
MOVQ rBa, _ku(oState); \
K_RBA_RCU; \
\
/* Result m */ \
MOVQ _ga(iState), rBe; \
XORQ rDa, rBe; \
MOVQ _ke(iState), rBi; \
ROLQ $36, rBe; \
XORQ rDe, rBi; \
MOVQ _bu(iState), rBa; \
ROLQ $10, rBi; \
MOVQ rBe, rT1; \
MOVQ _mi(iState), rBo; \
ANDQ rBi, rT1; \
XORQ rDu, rBa; \
MOVQ _so(iState), rBu; \
ROLQ $27, rBa; \
XORQ rBa, rT1; \
MOVQ rT1, _ma(oState); \
M_RT1_RCA; \
\
XORQ rDi, rBo; \
ROLQ $15, rBo; \
MOVQ rBi, rT1; \
ORQ rBo, rT1; \
XORQ rBe, rT1; \
MOVQ rT1, _me(oState); \
M_RT1_RCE; \
\
XORQ rDo, rBu; \
ROLQ $56, rBu; \
NOTQ rBo; \
MOVQ rBo, rT1; \
ORQ rBu, rT1; \
XORQ rBi, rT1; \
MOVQ rT1, _mi(oState); \
\
ORQ rBa, rBe; \
XORQ rBu, rBe; \
MOVQ rBe, _mu(oState); \
\
ANDQ rBa, rBu; \
XORQ rBo, rBu; \
MOVQ rBu, _mo(oState); \
M_RBE_RCU; \
\
/* Result s */ \
MOVQ _bi(iState), rBa; \
MOVQ _go(iState), rBe; \
MOVQ _ku(iState), rBi; \
XORQ rDi, rBa; \
MOVQ _ma(iState), rBo; \
ROLQ $62, rBa; \
XORQ rDo, rBe; \
MOVQ _se(iState), rBu; \
ROLQ $55, rBe; \
\
XORQ rDu, rBi; \
MOVQ rBa, rDu; \
XORQ rDe, rBu; \
ROLQ $2, rBu; \
ANDQ rBe, rDu; \
XORQ rBu, rDu; \
MOVQ rDu, _su(oState); \
\
ROLQ $39, rBi; \
S_RDU_RCU; \
NOTQ rBe; \
XORQ rDa, rBo; \
MOVQ rBe, rDa; \
ANDQ rBi, rDa; \
XORQ rBa, rDa; \
MOVQ rDa, _sa(oState); \
S_RDA_RCA; \
\
ROLQ $41, rBo; \
MOVQ rBi, rDe; \
ORQ rBo, rDe; \
XORQ rBe, rDe; \
MOVQ rDe, _se(oState); \
S_RDE_RCE; \
\
MOVQ rBo, rDi; \
MOVQ rBu, rDo; \
ANDQ rBu, rDi; \
ORQ rBa, rDo; \
XORQ rBi, rDi; \
XORQ rBo, rDo; \
MOVQ rDi, _si(oState); \
MOVQ rDo, _so(oState) \
// func keccakF1600(state *[25]uint64)
TEXT ·keccakF1600(SB), 0, $200-8
MOVQ state+0(FP), rpState
// Convert the user state into an internal state
NOTQ _be(rpState)
NOTQ _bi(rpState)
NOTQ _go(rpState)
NOTQ _ki(rpState)
NOTQ _mi(rpState)
NOTQ _sa(rpState)
// Execute the KeccakF permutation
MOVQ _ba(rpState), rCa
MOVQ _be(rpState), rCe
MOVQ _bu(rpState), rCu
XORQ _ga(rpState), rCa
XORQ _ge(rpState), rCe
XORQ _gu(rpState), rCu
XORQ _ka(rpState), rCa
XORQ _ke(rpState), rCe
XORQ _ku(rpState), rCu
XORQ _ma(rpState), rCa
XORQ _me(rpState), rCe
XORQ _mu(rpState), rCu
XORQ _sa(rpState), rCa
XORQ _se(rpState), rCe
MOVQ _si(rpState), rDi
MOVQ _so(rpState), rDo
XORQ _su(rpState), rCu
mKeccakRound(rpState, rpStack, $0x0000000000000001, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE)
mKeccakRound(rpStack, rpState, $0x0000000000008082, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE)
mKeccakRound(rpState, rpStack, $0x800000000000808a, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE)
mKeccakRound(rpStack, rpState, $0x8000000080008000, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE)
mKeccakRound(rpState, rpStack, $0x000000000000808b, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE)
mKeccakRound(rpStack, rpState, $0x0000000080000001, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE)
mKeccakRound(rpState, rpStack, $0x8000000080008081, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE)
mKeccakRound(rpStack, rpState, $0x8000000000008009, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE)
mKeccakRound(rpState, rpStack, $0x000000000000008a, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE)
mKeccakRound(rpStack, rpState, $0x0000000000000088, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE)
mKeccakRound(rpState, rpStack, $0x0000000080008009, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE)
mKeccakRound(rpStack, rpState, $0x000000008000000a, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE)
mKeccakRound(rpState, rpStack, $0x000000008000808b, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE)
mKeccakRound(rpStack, rpState, $0x800000000000008b, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE)
mKeccakRound(rpState, rpStack, $0x8000000000008089, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE)
mKeccakRound(rpStack, rpState, $0x8000000000008003, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE)
mKeccakRound(rpState, rpStack, $0x8000000000008002, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE)
mKeccakRound(rpStack, rpState, $0x8000000000000080, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE)
mKeccakRound(rpState, rpStack, $0x000000000000800a, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE)
mKeccakRound(rpStack, rpState, $0x800000008000000a, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE)
mKeccakRound(rpState, rpStack, $0x8000000080008081, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE)
mKeccakRound(rpStack, rpState, $0x8000000000008080, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE)
mKeccakRound(rpState, rpStack, $0x0000000080000001, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE)
mKeccakRound(rpStack, rpState, $0x8000000080008008, NOP, NOP, NOP, NOP, NOP, NOP, NOP, NOP, NOP, NOP, NOP, NOP, NOP)
// Revert the internal state to the user state
NOTQ _be(rpState)
NOTQ _bi(rpState)
NOTQ _go(rpState)
NOTQ _ki(rpState)
NOTQ _mi(rpState)
NOTQ _sa(rpState)
RET

View file

@ -1,18 +0,0 @@
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build go1.4
package sha3
import (
"crypto"
)
func init() {
crypto.RegisterHash(crypto.SHA3_224, New224)
crypto.RegisterHash(crypto.SHA3_256, New256)
crypto.RegisterHash(crypto.SHA3_384, New384)
crypto.RegisterHash(crypto.SHA3_512, New512)
}

View file

@ -1,192 +0,0 @@
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package sha3
// spongeDirection indicates the direction bytes are flowing through the sponge.
type spongeDirection int
const (
// spongeAbsorbing indicates that the sponge is absorbing input.
spongeAbsorbing spongeDirection = iota
// spongeSqueezing indicates that the sponge is being squeezed.
spongeSqueezing
)
const (
// maxRate is the maximum size of the internal buffer. SHAKE-256
// currently needs the largest buffer.
maxRate = 168
)
type state struct {
// Generic sponge components.
a [25]uint64 // main state of the hash
buf []byte // points into storage
rate int // the number of bytes of state to use
// dsbyte contains the "domain separation" bits and the first bit of
// the padding. Sections 6.1 and 6.2 of [1] separate the outputs of the
// SHA-3 and SHAKE functions by appending bitstrings to the message.
// Using a little-endian bit-ordering convention, these are "01" for SHA-3
// and "1111" for SHAKE, or 00000010b and 00001111b, respectively. Then the
// padding rule from section 5.1 is applied to pad the message to a multiple
// of the rate, which involves adding a "1" bit, zero or more "0" bits, and
// a final "1" bit. We merge the first "1" bit from the padding into dsbyte,
// giving 00000110b (0x06) and 00011111b (0x1f).
// [1] http://csrc.nist.gov/publications/drafts/fips-202/fips_202_draft.pdf
// "Draft FIPS 202: SHA-3 Standard: Permutation-Based Hash and
// Extendable-Output Functions (May 2014)"
dsbyte byte
storage [maxRate]byte
// Specific to SHA-3 and SHAKE.
outputLen int // the default output size in bytes
state spongeDirection // whether the sponge is absorbing or squeezing
}
// BlockSize returns the rate of sponge underlying this hash function.
func (d *state) BlockSize() int { return d.rate }
// Size returns the output size of the hash function in bytes.
func (d *state) Size() int { return d.outputLen }
// Reset clears the internal state by zeroing the sponge state and
// the byte buffer, and setting Sponge.state to absorbing.
func (d *state) Reset() {
// Zero the permutation's state.
for i := range d.a {
d.a[i] = 0
}
d.state = spongeAbsorbing
d.buf = d.storage[:0]
}
func (d *state) clone() *state {
ret := *d
if ret.state == spongeAbsorbing {
ret.buf = ret.storage[:len(ret.buf)]
} else {
ret.buf = ret.storage[d.rate-cap(d.buf) : d.rate]
}
return &ret
}
// permute applies the KeccakF-1600 permutation. It handles
// any input-output buffering.
func (d *state) permute() {
switch d.state {
case spongeAbsorbing:
// If we're absorbing, we need to xor the input into the state
// before applying the permutation.
xorIn(d, d.buf)
d.buf = d.storage[:0]
keccakF1600(&d.a)
case spongeSqueezing:
// If we're squeezing, we need to apply the permutatin before
// copying more output.
keccakF1600(&d.a)
d.buf = d.storage[:d.rate]
copyOut(d, d.buf)
}
}
// pads appends the domain separation bits in dsbyte, applies
// the multi-bitrate 10..1 padding rule, and permutes the state.
func (d *state) padAndPermute(dsbyte byte) {
if d.buf == nil {
d.buf = d.storage[:0]
}
// Pad with this instance's domain-separator bits. We know that there's
// at least one byte of space in d.buf because, if it were full,
// permute would have been called to empty it. dsbyte also contains the
// first one bit for the padding. See the comment in the state struct.
d.buf = append(d.buf, dsbyte)
zerosStart := len(d.buf)
d.buf = d.storage[:d.rate]
for i := zerosStart; i < d.rate; i++ {
d.buf[i] = 0
}
// This adds the final one bit for the padding. Because of the way that
// bits are numbered from the LSB upwards, the final bit is the MSB of
// the last byte.
d.buf[d.rate-1] ^= 0x80
// Apply the permutation
d.permute()
d.state = spongeSqueezing
d.buf = d.storage[:d.rate]
copyOut(d, d.buf)
}
// Write absorbs more data into the hash's state. It produces an error
// if more data is written to the ShakeHash after writing
func (d *state) Write(p []byte) (written int, err error) {
if d.state != spongeAbsorbing {
panic("sha3: write to sponge after read")
}
if d.buf == nil {
d.buf = d.storage[:0]
}
written = len(p)
for len(p) > 0 {
if len(d.buf) == 0 && len(p) >= d.rate {
// The fast path; absorb a full "rate" bytes of input and apply the permutation.
xorIn(d, p[:d.rate])
p = p[d.rate:]
keccakF1600(&d.a)
} else {
// The slow path; buffer the input until we can fill the sponge, and then xor it in.
todo := d.rate - len(d.buf)
if todo > len(p) {
todo = len(p)
}
d.buf = append(d.buf, p[:todo]...)
p = p[todo:]
// If the sponge is full, apply the permutation.
if len(d.buf) == d.rate {
d.permute()
}
}
}
return
}
// Read squeezes an arbitrary number of bytes from the sponge.
func (d *state) Read(out []byte) (n int, err error) {
// If we're still absorbing, pad and apply the permutation.
if d.state == spongeAbsorbing {
d.padAndPermute(d.dsbyte)
}
n = len(out)
// Now, do the squeezing.
for len(out) > 0 {
n := copy(out, d.buf)
d.buf = d.buf[n:]
out = out[n:]
// Apply the permutation if we've squeezed the sponge dry.
if len(d.buf) == 0 {
d.permute()
}
}
return
}
// Sum applies padding to the hash state and then squeezes out the desired
// number of output bytes.
func (d *state) Sum(in []byte) []byte {
// Make a copy of the original hash so that caller can keep writing
// and summing.
dup := d.clone()
hash := make([]byte, dup.outputLen)
dup.Read(hash)
return append(in, hash...)
}

View file

@ -1,284 +0,0 @@
// Copyright 2017 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//+build !gccgo,!appengine
package sha3
// This file contains code for using the 'compute intermediate
// message digest' (KIMD) and 'compute last message digest' (KLMD)
// instructions to compute SHA-3 and SHAKE hashes on IBM Z.
import (
"hash"
"golang.org/x/sys/cpu"
)
// codes represent 7-bit KIMD/KLMD function codes as defined in
// the Principles of Operation.
type code uint64
const (
// function codes for KIMD/KLMD
sha3_224 code = 32
sha3_256 = 33
sha3_384 = 34
sha3_512 = 35
shake_128 = 36
shake_256 = 37
nopad = 0x100
)
// kimd is a wrapper for the 'compute intermediate message digest' instruction.
// src must be a multiple of the rate for the given function code.
//go:noescape
func kimd(function code, chain *[200]byte, src []byte)
// klmd is a wrapper for the 'compute last message digest' instruction.
// src padding is handled by the instruction.
//go:noescape
func klmd(function code, chain *[200]byte, dst, src []byte)
type asmState struct {
a [200]byte // 1600 bit state
buf []byte // care must be taken to ensure cap(buf) is a multiple of rate
rate int // equivalent to block size
storage [3072]byte // underlying storage for buf
outputLen int // output length if fixed, 0 if not
function code // KIMD/KLMD function code
state spongeDirection // whether the sponge is absorbing or squeezing
}
func newAsmState(function code) *asmState {
var s asmState
s.function = function
switch function {
case sha3_224:
s.rate = 144
s.outputLen = 28
case sha3_256:
s.rate = 136
s.outputLen = 32
case sha3_384:
s.rate = 104
s.outputLen = 48
case sha3_512:
s.rate = 72
s.outputLen = 64
case shake_128:
s.rate = 168
case shake_256:
s.rate = 136
default:
panic("sha3: unrecognized function code")
}
// limit s.buf size to a multiple of s.rate
s.resetBuf()
return &s
}
func (s *asmState) clone() *asmState {
c := *s
c.buf = c.storage[:len(s.buf):cap(s.buf)]
return &c
}
// copyIntoBuf copies b into buf. It will panic if there is not enough space to
// store all of b.
func (s *asmState) copyIntoBuf(b []byte) {
bufLen := len(s.buf)
s.buf = s.buf[:len(s.buf)+len(b)]
copy(s.buf[bufLen:], b)
}
// resetBuf points buf at storage, sets the length to 0 and sets cap to be a
// multiple of the rate.
func (s *asmState) resetBuf() {
max := (cap(s.storage) / s.rate) * s.rate
s.buf = s.storage[:0:max]
}
// Write (via the embedded io.Writer interface) adds more data to the running hash.
// It never returns an error.
func (s *asmState) Write(b []byte) (int, error) {
if s.state != spongeAbsorbing {
panic("sha3: write to sponge after read")
}
length := len(b)
for len(b) > 0 {
if len(s.buf) == 0 && len(b) >= cap(s.buf) {
// Hash the data directly and push any remaining bytes
// into the buffer.
remainder := len(s.buf) % s.rate
kimd(s.function, &s.a, b[:len(b)-remainder])
if remainder != 0 {
s.copyIntoBuf(b[len(b)-remainder:])
}
return length, nil
}
if len(s.buf) == cap(s.buf) {
// flush the buffer
kimd(s.function, &s.a, s.buf)
s.buf = s.buf[:0]
}
// copy as much as we can into the buffer
n := len(b)
if len(b) > cap(s.buf)-len(s.buf) {
n = cap(s.buf) - len(s.buf)
}
s.copyIntoBuf(b[:n])
b = b[n:]
}
return length, nil
}
// Read squeezes an arbitrary number of bytes from the sponge.
func (s *asmState) Read(out []byte) (n int, err error) {
n = len(out)
// need to pad if we were absorbing
if s.state == spongeAbsorbing {
s.state = spongeSqueezing
// write hash directly into out if possible
if len(out)%s.rate == 0 {
klmd(s.function, &s.a, out, s.buf) // len(out) may be 0
s.buf = s.buf[:0]
return
}
// write hash into buffer
max := cap(s.buf)
if max > len(out) {
max = (len(out)/s.rate)*s.rate + s.rate
}
klmd(s.function, &s.a, s.buf[:max], s.buf)
s.buf = s.buf[:max]
}
for len(out) > 0 {
// flush the buffer
if len(s.buf) != 0 {
c := copy(out, s.buf)
out = out[c:]
s.buf = s.buf[c:]
continue
}
// write hash directly into out if possible
if len(out)%s.rate == 0 {
klmd(s.function|nopad, &s.a, out, nil)
return
}
// write hash into buffer
s.resetBuf()
if cap(s.buf) > len(out) {
s.buf = s.buf[:(len(out)/s.rate)*s.rate+s.rate]
}
klmd(s.function|nopad, &s.a, s.buf, nil)
}
return
}
// Sum appends the current hash to b and returns the resulting slice.
// It does not change the underlying hash state.
func (s *asmState) Sum(b []byte) []byte {
if s.outputLen == 0 {
panic("sha3: cannot call Sum on SHAKE functions")
}
// Copy the state to preserve the original.
a := s.a
// Hash the buffer. Note that we don't clear it because we
// aren't updating the state.
klmd(s.function, &a, nil, s.buf)
return append(b, a[:s.outputLen]...)
}
// Reset resets the Hash to its initial state.
func (s *asmState) Reset() {
for i := range s.a {
s.a[i] = 0
}
s.resetBuf()
s.state = spongeAbsorbing
}
// Size returns the number of bytes Sum will return.
func (s *asmState) Size() int {
return s.outputLen
}
// BlockSize returns the hash's underlying block size.
// The Write method must be able to accept any amount
// of data, but it may operate more efficiently if all writes
// are a multiple of the block size.
func (s *asmState) BlockSize() int {
return s.rate
}
// Clone returns a copy of the ShakeHash in its current state.
func (s *asmState) Clone() ShakeHash {
return s.clone()
}
// new224Asm returns an assembly implementation of SHA3-224 if available,
// otherwise it returns nil.
func new224Asm() hash.Hash {
if cpu.S390X.HasSHA3 {
return newAsmState(sha3_224)
}
return nil
}
// new256Asm returns an assembly implementation of SHA3-256 if available,
// otherwise it returns nil.
func new256Asm() hash.Hash {
if cpu.S390X.HasSHA3 {
return newAsmState(sha3_256)
}
return nil
}
// new384Asm returns an assembly implementation of SHA3-384 if available,
// otherwise it returns nil.
func new384Asm() hash.Hash {
if cpu.S390X.HasSHA3 {
return newAsmState(sha3_384)
}
return nil
}
// new512Asm returns an assembly implementation of SHA3-512 if available,
// otherwise it returns nil.
func new512Asm() hash.Hash {
if cpu.S390X.HasSHA3 {
return newAsmState(sha3_512)
}
return nil
}
// newShake128Asm returns an assembly implementation of SHAKE-128 if available,
// otherwise it returns nil.
func newShake128Asm() ShakeHash {
if cpu.S390X.HasSHA3 {
return newAsmState(shake_128)
}
return nil
}
// newShake256Asm returns an assembly implementation of SHAKE-256 if available,
// otherwise it returns nil.
func newShake256Asm() ShakeHash {
if cpu.S390X.HasSHA3 {
return newAsmState(shake_256)
}
return nil
}

View file

@ -1,33 +0,0 @@
// Copyright 2017 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//+build !gccgo,!appengine
#include "textflag.h"
// func kimd(function code, chain *[200]byte, src []byte)
TEXT ·kimd(SB), NOFRAME|NOSPLIT, $0-40
MOVD function+0(FP), R0
MOVD chain+8(FP), R1
LMG src+16(FP), R2, R3 // R2=base, R3=len
continue:
WORD $0xB93E0002 // KIMD --, R2
BVS continue // continue if interrupted
MOVD $0, R0 // reset R0 for pre-go1.8 compilers
RET
// func klmd(function code, chain *[200]byte, dst, src []byte)
TEXT ·klmd(SB), NOFRAME|NOSPLIT, $0-64
// TODO: SHAKE support
MOVD function+0(FP), R0
MOVD chain+8(FP), R1
LMG dst+16(FP), R2, R3 // R2=base, R3=len
LMG src+40(FP), R4, R5 // R4=base, R5=len
continue:
WORD $0xB93F0024 // KLMD R2, R4
BVS continue // continue if interrupted
MOVD $0, R0 // reset R0 for pre-go1.8 compilers
RET

View file

@ -1,173 +0,0 @@
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package sha3
// This file defines the ShakeHash interface, and provides
// functions for creating SHAKE and cSHAKE instances, as well as utility
// functions for hashing bytes to arbitrary-length output.
//
//
// SHAKE implementation is based on FIPS PUB 202 [1]
// cSHAKE implementations is based on NIST SP 800-185 [2]
//
// [1] https://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.202.pdf
// [2] https://doi.org/10.6028/NIST.SP.800-185
import (
"encoding/binary"
"io"
)
// ShakeHash defines the interface to hash functions that
// support arbitrary-length output.
type ShakeHash interface {
// Write absorbs more data into the hash's state. It panics if input is
// written to it after output has been read from it.
io.Writer
// Read reads more output from the hash; reading affects the hash's
// state. (ShakeHash.Read is thus very different from Hash.Sum)
// It never returns an error.
io.Reader
// Clone returns a copy of the ShakeHash in its current state.
Clone() ShakeHash
// Reset resets the ShakeHash to its initial state.
Reset()
}
// cSHAKE specific context
type cshakeState struct {
*state // SHA-3 state context and Read/Write operations
// initBlock is the cSHAKE specific initialization set of bytes. It is initialized
// by newCShake function and stores concatenation of N followed by S, encoded
// by the method specified in 3.3 of [1].
// It is stored here in order for Reset() to be able to put context into
// initial state.
initBlock []byte
}
// Consts for configuring initial SHA-3 state
const (
dsbyteShake = 0x1f
dsbyteCShake = 0x04
rate128 = 168
rate256 = 136
)
func bytepad(input []byte, w int) []byte {
// leftEncode always returns max 9 bytes
buf := make([]byte, 0, 9+len(input)+w)
buf = append(buf, leftEncode(uint64(w))...)
buf = append(buf, input...)
padlen := w - (len(buf) % w)
return append(buf, make([]byte, padlen)...)
}
func leftEncode(value uint64) []byte {
var b [9]byte
binary.BigEndian.PutUint64(b[1:], value)
// Trim all but last leading zero bytes
i := byte(1)
for i < 8 && b[i] == 0 {
i++
}
// Prepend number of encoded bytes
b[i-1] = 9 - i
return b[i-1:]
}
func newCShake(N, S []byte, rate int, dsbyte byte) ShakeHash {
c := cshakeState{state: &state{rate: rate, dsbyte: dsbyte}}
// leftEncode returns max 9 bytes
c.initBlock = make([]byte, 0, 9*2+len(N)+len(S))
c.initBlock = append(c.initBlock, leftEncode(uint64(len(N)*8))...)
c.initBlock = append(c.initBlock, N...)
c.initBlock = append(c.initBlock, leftEncode(uint64(len(S)*8))...)
c.initBlock = append(c.initBlock, S...)
c.Write(bytepad(c.initBlock, c.rate))
return &c
}
// Reset resets the hash to initial state.
func (c *cshakeState) Reset() {
c.state.Reset()
c.Write(bytepad(c.initBlock, c.rate))
}
// Clone returns copy of a cSHAKE context within its current state.
func (c *cshakeState) Clone() ShakeHash {
b := make([]byte, len(c.initBlock))
copy(b, c.initBlock)
return &cshakeState{state: c.clone(), initBlock: b}
}
// Clone returns copy of SHAKE context within its current state.
func (c *state) Clone() ShakeHash {
return c.clone()
}
// NewShake128 creates a new SHAKE128 variable-output-length ShakeHash.
// Its generic security strength is 128 bits against all attacks if at
// least 32 bytes of its output are used.
func NewShake128() ShakeHash {
if h := newShake128Asm(); h != nil {
return h
}
return &state{rate: rate128, dsbyte: dsbyteShake}
}
// NewShake256 creates a new SHAKE256 variable-output-length ShakeHash.
// Its generic security strength is 256 bits against all attacks if
// at least 64 bytes of its output are used.
func NewShake256() ShakeHash {
if h := newShake256Asm(); h != nil {
return h
}
return &state{rate: rate256, dsbyte: dsbyteShake}
}
// NewCShake128 creates a new instance of cSHAKE128 variable-output-length ShakeHash,
// a customizable variant of SHAKE128.
// N is used to define functions based on cSHAKE, it can be empty when plain cSHAKE is
// desired. S is a customization byte string used for domain separation - two cSHAKE
// computations on same input with different S yield unrelated outputs.
// When N and S are both empty, this is equivalent to NewShake128.
func NewCShake128(N, S []byte) ShakeHash {
if len(N) == 0 && len(S) == 0 {
return NewShake128()
}
return newCShake(N, S, rate128, dsbyteCShake)
}
// NewCShake256 creates a new instance of cSHAKE256 variable-output-length ShakeHash,
// a customizable variant of SHAKE256.
// N is used to define functions based on cSHAKE, it can be empty when plain cSHAKE is
// desired. S is a customization byte string used for domain separation - two cSHAKE
// computations on same input with different S yield unrelated outputs.
// When N and S are both empty, this is equivalent to NewShake256.
func NewCShake256(N, S []byte) ShakeHash {
if len(N) == 0 && len(S) == 0 {
return NewShake256()
}
return newCShake(N, S, rate256, dsbyteCShake)
}
// ShakeSum128 writes an arbitrary-length digest of data into hash.
func ShakeSum128(hash, data []byte) {
h := NewShake128()
h.Write(data)
h.Read(hash)
}
// ShakeSum256 writes an arbitrary-length digest of data into hash.
func ShakeSum256(hash, data []byte) {
h := NewShake256()
h.Write(data)
h.Read(hash)
}

View file

@ -1,19 +0,0 @@
// Copyright 2017 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//+build gccgo appengine !s390x
package sha3
// newShake128Asm returns an assembly implementation of SHAKE-128 if available,
// otherwise it returns nil.
func newShake128Asm() ShakeHash {
return nil
}
// newShake256Asm returns an assembly implementation of SHAKE-256 if available,
// otherwise it returns nil.
func newShake256Asm() ShakeHash {
return nil
}

Some files were not shown because too many files have changed in this diff Show more