update dependencies, including bolt with stability fixes

This commit is contained in:
Mechiel Lukkien 2023-02-17 18:55:01 +01:00
parent fb3794e31b
commit 6df4b454d5
No known key found for this signature in database
69 changed files with 1242 additions and 462 deletions

10
go.mod
View file

@ -9,9 +9,9 @@ require (
github.com/mjl-/sherpadoc v0.0.10
github.com/mjl-/sherpaprom v0.0.2
github.com/prometheus/client_golang v1.14.0
golang.org/x/crypto v0.5.0
golang.org/x/net v0.5.0
golang.org/x/text v0.6.0
golang.org/x/crypto v0.6.0
golang.org/x/net v0.7.0
golang.org/x/text v0.7.0
)
require (
@ -23,9 +23,9 @@ require (
github.com/prometheus/client_model v0.3.0 // indirect
github.com/prometheus/common v0.37.0 // indirect
github.com/prometheus/procfs v0.8.0 // indirect
go.etcd.io/bbolt v1.3.6 // indirect
go.etcd.io/bbolt v1.3.7 // indirect
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4 // indirect
golang.org/x/sys v0.4.0 // indirect
golang.org/x/sys v0.5.0 // indirect
golang.org/x/tools v0.1.12 // indirect
google.golang.org/protobuf v1.28.1 // indirect
)

24
go.sum
View file

@ -168,6 +168,7 @@ github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRW
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
@ -206,11 +207,12 @@ github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk=
github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
go.etcd.io/bbolt v1.3.6 h1:/ecaJf0sk1l4l6V4awd65v2C3ILy7MSj+s/x1ADCIMU=
go.etcd.io/bbolt v1.3.6/go.mod h1:qXsaaIqmgQH0T+OPdb99Bf+PKfBBQVAdyD6TY9G8XM4=
go.etcd.io/bbolt v1.3.7 h1:j+zJOnnEjF/kyHlDDgGnVL/AIqIJPq8UoB2GSNfkUfQ=
go.etcd.io/bbolt v1.3.7/go.mod h1:N9Mkw9X8x5fupy0IKsmuqVtoGDyxsaDlbk4Rd05IAQw=
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
@ -222,8 +224,8 @@ golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8U
golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.5.0 h1:U/0M97KRkSFvyD/3FSmdP5W5swImpNgle/EHFhOsQPE=
golang.org/x/crypto v0.5.0/go.mod h1:NK/OQwhpMQP3MwtdjgLlYHnH9ebylxKWv3e0fK+mkQU=
golang.org/x/crypto v0.6.0 h1:qfktjS5LUO+fFKeJXZ+ikTRijMmljikvG68fpMMruSc=
golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
@ -288,8 +290,8 @@ golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81R
golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
golang.org/x/net v0.5.0 h1:GyT4nK/YDHSqa1c4753ouYCDajOYKTja9Xb/OHtgvSw=
golang.org/x/net v0.5.0/go.mod h1:DivGGAXEgPSlEBzxGzZI+ZLohi+xUj054jfeKui00ws=
golang.org/x/net v0.7.0 h1:rJrUqqhjsgNp7KqAIc25s9pZnjU7TUcSY7HcVZjdn1g=
golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
@ -337,7 +339,6 @@ golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200923182605-d9f96fdee20d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@ -345,8 +346,8 @@ golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.4.0 h1:Zr2JFtRQNX3BCZ8YtxRE9hNJYC8J6I1MVbMg6owUp18=
golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.5.0 h1:MUK/U/4lj1t1oPg0HfuXDN/Z1wv31ZJ/YcPiGccS4DU=
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
@ -356,8 +357,8 @@ golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/text v0.6.0 h1:3XmdazWV+ubf7QgHSTWeykHOci5oeekaGJBLkrkaw4k=
golang.org/x/text v0.6.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/text v0.7.0 h1:4BRB4x83lYWy72KwLD/qYDuTu7q9PjSagHvijDw7cLo=
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
@ -495,6 +496,7 @@ gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=

3
vendor/go.etcd.io/bbolt/.gitignore generated vendored
View file

@ -3,5 +3,8 @@
*.swp
/bin/
cover.out
cover-*.out
/.idea
*.iml
/cmd/bbolt/bbolt

18
vendor/go.etcd.io/bbolt/.travis.yml generated vendored
View file

@ -1,18 +0,0 @@
language: go
go_import_path: go.etcd.io/bbolt
sudo: false
go:
- 1.15
before_install:
- go get -v golang.org/x/sys/unix
- go get -v honnef.co/go/tools/...
- go get -v github.com/kisielk/errcheck
script:
- make fmt
- make test
- make race
# - make errcheck

71
vendor/go.etcd.io/bbolt/Makefile generated vendored
View file

@ -2,35 +2,62 @@ BRANCH=`git rev-parse --abbrev-ref HEAD`
COMMIT=`git rev-parse --short HEAD`
GOLDFLAGS="-X main.branch $(BRANCH) -X main.commit $(COMMIT)"
race:
@TEST_FREELIST_TYPE=hashmap go test -v -race -test.run="TestSimulate_(100op|1000op)"
@echo "array freelist test"
@TEST_FREELIST_TYPE=array go test -v -race -test.run="TestSimulate_(100op|1000op)"
TESTFLAGS_RACE=-race=false
ifdef ENABLE_RACE
TESTFLAGS_RACE=-race=true
endif
TESTFLAGS_CPU=
ifdef CPU
TESTFLAGS_CPU=-cpu=$(CPU)
endif
TESTFLAGS = $(TESTFLAGS_RACE) $(TESTFLAGS_CPU) $(EXTRA_TESTFLAGS)
.PHONY: fmt
fmt:
!(gofmt -l -s -d $(shell find . -name \*.go) | grep '[a-z]')
# go get honnef.co/go/tools/simple
gosimple:
gosimple ./...
# go get honnef.co/go/tools/unused
unused:
unused ./...
# go get github.com/kisielk/errcheck
errcheck:
@errcheck -ignorepkg=bytes -ignore=os:Remove go.etcd.io/bbolt
.PHONY: lint
lint:
golangci-lint run ./...
.PHONY: test
test:
TEST_FREELIST_TYPE=hashmap go test -timeout 20m -v -coverprofile cover.out -covermode atomic
# Note: gets "program not an importable package" in out of path builds
TEST_FREELIST_TYPE=hashmap go test -v ./cmd/bbolt
@echo "hashmap freelist test"
TEST_FREELIST_TYPE=hashmap go test -v ${TESTFLAGS} -timeout 30m
TEST_FREELIST_TYPE=hashmap go test -v ${TESTFLAGS} ./cmd/bbolt
@echo "array freelist test"
TEST_FREELIST_TYPE=array go test -v ${TESTFLAGS} -timeout 30m
TEST_FREELIST_TYPE=array go test -v ${TESTFLAGS} ./cmd/bbolt
@TEST_FREELIST_TYPE=array go test -timeout 20m -v -coverprofile cover.out -covermode atomic
# Note: gets "program not an importable package" in out of path builds
@TEST_FREELIST_TYPE=array go test -v ./cmd/bbolt
.PHONY: coverage
coverage:
@echo "hashmap freelist test"
TEST_FREELIST_TYPE=hashmap go test -v -timeout 30m \
-coverprofile cover-freelist-hashmap.out -covermode atomic
@echo "array freelist test"
TEST_FREELIST_TYPE=array go test -v -timeout 30m \
-coverprofile cover-freelist-array.out -covermode atomic
.PHONY: gofail-enable
gofail-enable: install-gofail
gofail enable .
.PHONY: gofail-disable
gofail-disable:
gofail disable .
.PHONY: install-gofail
install-gofail:
go install go.etcd.io/gofail
.PHONY: test-failpoint
test-failpoint:
@echo "[failpoint] hashmap freelist test"
TEST_FREELIST_TYPE=hashmap go test -v ${TESTFLAGS} -timeout 30m ./tests/failpoint
@echo "[failpoint] array freelist test"
TEST_FREELIST_TYPE=array go test -v ${TESTFLAGS} -timeout 30m ./tests/failpoint
.PHONY: race fmt errcheck test gosimple unused

21
vendor/go.etcd.io/bbolt/README.md generated vendored
View file

@ -26,7 +26,7 @@ and setting values. That's it.
[gh_ben]: https://github.com/benbjohnson
[bolt]: https://github.com/boltdb/bolt
[hyc_symas]: https://twitter.com/hyc_symas
[lmdb]: http://symas.com/mdb/
[lmdb]: https://www.symas.com/symas-embedded-database-lmdb
## Project Status
@ -78,14 +78,23 @@ New minor versions may add additional features to the API.
### Installing
To start using Bolt, install Go and run `go get`:
```sh
$ go get go.etcd.io/bbolt/...
$ go get go.etcd.io/bbolt@latest
```
This will retrieve the library and install the `bolt` command line utility into
your `$GOBIN` path.
This will retrieve the library and update your `go.mod` and `go.sum` files.
To run the command line utility, execute:
```sh
$ go run go.etcd.io/bbolt/cmd/bbolt@latest
```
Run `go install` to install the `bbolt` command line utility into
your `$GOBIN` path, which defaults to `$GOPATH/bin` or `$HOME/go/bin` if the
`GOPATH` environment variable is not set.
```sh
$ go install go.etcd.io/bbolt/cmd/bbolt@latest
```
### Importing bbolt
@ -933,7 +942,7 @@ Below is a list of public, open source projects that use Bolt:
* [ipxed](https://github.com/kelseyhightower/ipxed) - Web interface and api for ipxed.
* [Ironsmith](https://github.com/timshannon/ironsmith) - A simple, script-driven continuous integration (build - > test -> release) tool, with no external dependencies
* [Kala](https://github.com/ajvb/kala) - Kala is a modern job scheduler optimized to run on a single node. It is persistent, JSON over HTTP API, ISO 8601 duration notation, and dependent jobs.
* [Key Value Access Langusge (KVAL)](https://github.com/kval-access-language) - A proposed grammar for key-value datastores offering a bbolt binding.
* [Key Value Access Language (KVAL)](https://github.com/kval-access-language) - A proposed grammar for key-value datastores offering a bbolt binding.
* [LedisDB](https://github.com/siddontang/ledisdb) - A high performance NoSQL, using Bolt as optional storage.
* [lru](https://github.com/crowdriff/lru) - Easy to use Bolt-backed Least-Recently-Used (LRU) read-through cache with chainable remote stores.
* [mbuckets](https://github.com/abhigupta912/mbuckets) - A Bolt wrapper that allows easy operations on multi level (nested) buckets.

View file

@ -1,3 +1,4 @@
//go:build arm64
// +build arm64
package bbolt

10
vendor/go.etcd.io/bbolt/bolt_loong64.go generated vendored Normal file
View file

@ -0,0 +1,10 @@
//go:build loong64
// +build loong64
package bbolt
// maxMapSize represents the largest mmap size supported by Bolt.
const maxMapSize = 0xFFFFFFFFFFFF // 256TB
// maxAllocSize is the size used when creating array pointers.
const maxAllocSize = 0x7FFFFFFF

View file

@ -1,3 +1,4 @@
//go:build mips64 || mips64le
// +build mips64 mips64le
package bbolt

View file

@ -1,3 +1,4 @@
//go:build mips || mipsle
// +build mips mipsle
package bbolt

View file

@ -1,3 +1,4 @@
//go:build ppc
// +build ppc
package bbolt

View file

@ -1,3 +1,4 @@
//go:build ppc64
// +build ppc64
package bbolt

View file

@ -1,3 +1,4 @@
//go:build ppc64le
// +build ppc64le
package bbolt

View file

@ -1,3 +1,4 @@
//go:build riscv64
// +build riscv64
package bbolt

View file

@ -1,3 +1,4 @@
//go:build s390x
// +build s390x
package bbolt

View file

@ -1,3 +1,4 @@
//go:build !windows && !plan9 && !solaris && !aix
// +build !windows,!plan9,!solaris,!aix
package bbolt

View file

@ -1,3 +1,4 @@
//go:build aix
// +build aix
package bbolt

View file

@ -6,40 +6,10 @@ import (
"syscall"
"time"
"unsafe"
"golang.org/x/sys/windows"
)
// LockFileEx code derived from golang build filemutex_windows.go @ v1.5.1
var (
modkernel32 = syscall.NewLazyDLL("kernel32.dll")
procLockFileEx = modkernel32.NewProc("LockFileEx")
procUnlockFileEx = modkernel32.NewProc("UnlockFileEx")
)
const (
// see https://msdn.microsoft.com/en-us/library/windows/desktop/aa365203(v=vs.85).aspx
flagLockExclusive = 2
flagLockFailImmediately = 1
// see https://msdn.microsoft.com/en-us/library/windows/desktop/ms681382(v=vs.85).aspx
errLockViolation syscall.Errno = 0x21
)
func lockFileEx(h syscall.Handle, flags, reserved, locklow, lockhigh uint32, ol *syscall.Overlapped) (err error) {
r, _, err := procLockFileEx.Call(uintptr(h), uintptr(flags), uintptr(reserved), uintptr(locklow), uintptr(lockhigh), uintptr(unsafe.Pointer(ol)))
if r == 0 {
return err
}
return nil
}
func unlockFileEx(h syscall.Handle, reserved, locklow, lockhigh uint32, ol *syscall.Overlapped) (err error) {
r, _, err := procUnlockFileEx.Call(uintptr(h), uintptr(reserved), uintptr(locklow), uintptr(lockhigh), uintptr(unsafe.Pointer(ol)), 0)
if r == 0 {
return err
}
return nil
}
// fdatasync flushes written data to a file descriptor.
func fdatasync(db *DB) error {
return db.file.Sync()
@ -51,22 +21,22 @@ func flock(db *DB, exclusive bool, timeout time.Duration) error {
if timeout != 0 {
t = time.Now()
}
var flag uint32 = flagLockFailImmediately
var flags uint32 = windows.LOCKFILE_FAIL_IMMEDIATELY
if exclusive {
flag |= flagLockExclusive
flags |= windows.LOCKFILE_EXCLUSIVE_LOCK
}
for {
// Fix for https://github.com/etcd-io/bbolt/issues/121. Use byte-range
// -1..0 as the lock on the database file.
var m1 uint32 = (1 << 32) - 1 // -1 in a uint32
err := lockFileEx(syscall.Handle(db.file.Fd()), flag, 0, 1, 0, &syscall.Overlapped{
err := windows.LockFileEx(windows.Handle(db.file.Fd()), flags, 0, 1, 0, &windows.Overlapped{
Offset: m1,
OffsetHigh: m1,
})
if err == nil {
return nil
} else if err != errLockViolation {
} else if err != windows.ERROR_LOCK_VIOLATION {
return err
}
@ -83,34 +53,37 @@ func flock(db *DB, exclusive bool, timeout time.Duration) error {
// funlock releases an advisory lock on a file descriptor.
func funlock(db *DB) error {
var m1 uint32 = (1 << 32) - 1 // -1 in a uint32
err := unlockFileEx(syscall.Handle(db.file.Fd()), 0, 1, 0, &syscall.Overlapped{
return windows.UnlockFileEx(windows.Handle(db.file.Fd()), 0, 1, 0, &windows.Overlapped{
Offset: m1,
OffsetHigh: m1,
})
return err
}
// mmap memory maps a DB's data file.
// Based on: https://github.com/edsrzf/mmap-go
func mmap(db *DB, sz int) error {
var sizelo, sizehi uint32
if !db.readOnly {
// Truncate the database to the size of the mmap.
if err := db.file.Truncate(int64(sz)); err != nil {
return fmt.Errorf("truncate: %s", err)
}
sizehi = uint32(sz >> 32)
sizelo = uint32(sz) & 0xffffffff
}
// Open a file mapping handle.
sizelo := uint32(sz >> 32)
sizehi := uint32(sz) & 0xffffffff
h, errno := syscall.CreateFileMapping(syscall.Handle(db.file.Fd()), nil, syscall.PAGE_READONLY, sizelo, sizehi, nil)
h, errno := syscall.CreateFileMapping(syscall.Handle(db.file.Fd()), nil, syscall.PAGE_READONLY, sizehi, sizelo, nil)
if h == 0 {
return os.NewSyscallError("CreateFileMapping", errno)
}
// Create the memory map.
addr, errno := syscall.MapViewOfFile(h, syscall.FILE_MAP_READ, 0, 0, uintptr(sz))
addr, errno := syscall.MapViewOfFile(h, syscall.FILE_MAP_READ, 0, 0, 0)
if addr == 0 {
// Do our best and report error returned from MapViewOfFile.
_ = syscall.CloseHandle(h)
return os.NewSyscallError("MapViewOfFile", errno)
}
@ -134,8 +107,11 @@ func munmap(db *DB) error {
}
addr := (uintptr)(unsafe.Pointer(&db.data[0]))
var err1 error
if err := syscall.UnmapViewOfFile(addr); err != nil {
return os.NewSyscallError("UnmapViewOfFile", err)
err1 = os.NewSyscallError("UnmapViewOfFile", err)
}
return nil
db.data = nil
db.datasz = 0
return err1
}

View file

@ -1,3 +1,4 @@
//go:build !windows && !plan9 && !linux && !openbsd
// +build !windows,!plan9,!linux,!openbsd
package bbolt

54
vendor/go.etcd.io/bbolt/bucket.go generated vendored
View file

@ -81,7 +81,7 @@ func (b *Bucket) Writable() bool {
// Do not use a cursor after the transaction is closed.
func (b *Bucket) Cursor() *Cursor {
// Update transaction statistics.
b.tx.stats.CursorCount++
b.tx.stats.IncCursorCount(1)
// Allocate and return a cursor.
return &Cursor{
@ -229,11 +229,9 @@ func (b *Bucket) DeleteBucket(key []byte) error {
// Recursively delete all child buckets.
child := b.Bucket(key)
err := child.ForEach(func(k, v []byte) error {
if _, _, childFlags := child.Cursor().seek(k); (childFlags & bucketLeafFlag) != 0 {
if err := child.DeleteBucket(k); err != nil {
return fmt.Errorf("delete bucket: %s", err)
}
err := child.ForEachBucket(func(k []byte) error {
if err := child.DeleteBucket(k); err != nil {
return fmt.Errorf("delete bucket: %s", err)
}
return nil
})
@ -353,7 +351,7 @@ func (b *Bucket) SetSequence(v uint64) error {
_ = b.node(b.root, nil)
}
// Increment and return the sequence.
// Set the sequence.
b.bucket.sequence = v
return nil
}
@ -378,6 +376,7 @@ func (b *Bucket) NextSequence() (uint64, error) {
}
// ForEach executes a function for each key/value pair in a bucket.
// Because ForEach uses a Cursor, the iteration over keys is in lexicographical order.
// If the provided function returns an error then the iteration is stopped and
// the error is returned to the caller. The provided function must not modify
// the bucket; this will result in undefined behavior.
@ -394,7 +393,22 @@ func (b *Bucket) ForEach(fn func(k, v []byte) error) error {
return nil
}
// Stat returns stats on a bucket.
func (b *Bucket) ForEachBucket(fn func(k []byte) error) error {
if b.tx.db == nil {
return ErrTxClosed
}
c := b.Cursor()
for k, _, flags := c.first(); k != nil; k, _, flags = c.next() {
if flags&bucketLeafFlag != 0 {
if err := fn(k); err != nil {
return err
}
}
}
return nil
}
// Stats returns stats on a bucket.
func (b *Bucket) Stats() BucketStats {
var s, subStats BucketStats
pageSize := b.tx.db.pageSize
@ -402,7 +416,7 @@ func (b *Bucket) Stats() BucketStats {
if b.root == 0 {
s.InlineBucketN += 1
}
b.forEachPage(func(p *page, depth int) {
b.forEachPage(func(p *page, depth int, pgstack []pgid) {
if (p.flags & leafPageFlag) != 0 {
s.KeyN += int(p.count)
@ -461,7 +475,7 @@ func (b *Bucket) Stats() BucketStats {
// Keep track of maximum page depth.
if depth+1 > s.Depth {
s.Depth = (depth + 1)
s.Depth = depth + 1
}
})
@ -477,15 +491,15 @@ func (b *Bucket) Stats() BucketStats {
}
// forEachPage iterates over every page in a bucket, including inline pages.
func (b *Bucket) forEachPage(fn func(*page, int)) {
func (b *Bucket) forEachPage(fn func(*page, int, []pgid)) {
// If we have an inline page then just use that.
if b.page != nil {
fn(b.page, 0)
fn(b.page, 0, []pgid{b.root})
return
}
// Otherwise traverse the page hierarchy.
b.tx.forEachPage(b.root, 0, fn)
b.tx.forEachPage(b.root, fn)
}
// forEachPageNode iterates over every page (or node) in a bucket.
@ -499,8 +513,8 @@ func (b *Bucket) forEachPageNode(fn func(*page, *node, int)) {
b._forEachPageNode(b.root, 0, fn)
}
func (b *Bucket) _forEachPageNode(pgid pgid, depth int, fn func(*page, *node, int)) {
var p, n = b.pageNode(pgid)
func (b *Bucket) _forEachPageNode(pgId pgid, depth int, fn func(*page, *node, int)) {
var p, n = b.pageNode(pgId)
// Execute function.
fn(p, n, depth)
@ -640,11 +654,11 @@ func (b *Bucket) rebalance() {
}
// node creates a node from a page and associates it with a given parent.
func (b *Bucket) node(pgid pgid, parent *node) *node {
func (b *Bucket) node(pgId pgid, parent *node) *node {
_assert(b.nodes != nil, "nodes map expected")
// Retrieve node if it's already been created.
if n := b.nodes[pgid]; n != nil {
if n := b.nodes[pgId]; n != nil {
return n
}
@ -659,15 +673,15 @@ func (b *Bucket) node(pgid pgid, parent *node) *node {
// Use the inline page if this is an inline bucket.
var p = b.page
if p == nil {
p = b.tx.page(pgid)
p = b.tx.page(pgId)
}
// Read the page into the node and cache it.
n.read(p)
b.nodes[pgid] = n
b.nodes[pgId] = n
// Update statistics.
b.tx.stats.NodeCount++
b.tx.stats.IncNodeCount(1)
return n
}

9
vendor/go.etcd.io/bbolt/compact.go generated vendored
View file

@ -12,7 +12,11 @@ func Compact(dst, src *DB, txMaxSize int64) error {
if err != nil {
return err
}
defer tx.Rollback()
defer func() {
if tempErr := tx.Rollback(); tempErr != nil {
err = tempErr
}
}()
if err := walk(src, func(keys [][]byte, k, v []byte, seq uint64) error {
// On each key/value, check if we have exceeded tx size.
@ -73,8 +77,9 @@ func Compact(dst, src *DB, txMaxSize int64) error {
}); err != nil {
return err
}
err = tx.Commit()
return tx.Commit()
return err
}
// walkFunc is the type of the function called for keys (buckets and "normal"

104
vendor/go.etcd.io/bbolt/cursor.go generated vendored
View file

@ -6,7 +6,8 @@ import (
"sort"
)
// Cursor represents an iterator that can traverse over all key/value pairs in a bucket in sorted order.
// Cursor represents an iterator that can traverse over all key/value pairs in a bucket
// in lexicographical order.
// Cursors see nested buckets with value == nil.
// Cursors can be obtained from a transaction and are valid as long as the transaction is open.
//
@ -30,10 +31,18 @@ func (c *Cursor) Bucket() *Bucket {
// The returned key and value are only valid for the life of the transaction.
func (c *Cursor) First() (key []byte, value []byte) {
_assert(c.bucket.tx.db != nil, "tx closed")
k, v, flags := c.first()
if (flags & uint32(bucketLeafFlag)) != 0 {
return k, nil
}
return k, v
}
func (c *Cursor) first() (key []byte, value []byte, flags uint32) {
c.stack = c.stack[:0]
p, n := c.bucket.pageNode(c.bucket.root)
c.stack = append(c.stack, elemRef{page: p, node: n, index: 0})
c.first()
c.goToFirstElementOnTheStack()
// If we land on an empty page then move to the next value.
// https://github.com/boltdb/bolt/issues/450
@ -43,10 +52,9 @@ func (c *Cursor) First() (key []byte, value []byte) {
k, v, flags := c.keyValue()
if (flags & uint32(bucketLeafFlag)) != 0 {
return k, nil
return k, nil, flags
}
return k, v
return k, v, flags
}
// Last moves the cursor to the last item in the bucket and returns its key and value.
@ -60,6 +68,17 @@ func (c *Cursor) Last() (key []byte, value []byte) {
ref.index = ref.count() - 1
c.stack = append(c.stack, ref)
c.last()
// If this is an empty page (calling Delete may result in empty pages)
// we call prev to find the last page that is not empty
for len(c.stack) > 0 && c.stack[len(c.stack)-1].count() == 0 {
c.prev()
}
if len(c.stack) == 0 {
return nil, nil
}
k, v, flags := c.keyValue()
if (flags & uint32(bucketLeafFlag)) != 0 {
return k, nil
@ -84,37 +103,20 @@ func (c *Cursor) Next() (key []byte, value []byte) {
// The returned key and value are only valid for the life of the transaction.
func (c *Cursor) Prev() (key []byte, value []byte) {
_assert(c.bucket.tx.db != nil, "tx closed")
// Attempt to move back one element until we're successful.
// Move up the stack as we hit the beginning of each page in our stack.
for i := len(c.stack) - 1; i >= 0; i-- {
elem := &c.stack[i]
if elem.index > 0 {
elem.index--
break
}
c.stack = c.stack[:i]
}
// If we've hit the end then return nil.
if len(c.stack) == 0 {
return nil, nil
}
// Move down the stack to find the last element of the last leaf under this branch.
c.last()
k, v, flags := c.keyValue()
k, v, flags := c.prev()
if (flags & uint32(bucketLeafFlag)) != 0 {
return k, nil
}
return k, v
}
// Seek moves the cursor to a given key and returns it.
// Seek moves the cursor to a given key using a b-tree search and returns it.
// If the key does not exist then the next key is used. If no keys
// follow, a nil key is returned.
// The returned key and value are only valid for the life of the transaction.
func (c *Cursor) Seek(seek []byte) (key []byte, value []byte) {
_assert(c.bucket.tx.db != nil, "tx closed")
k, v, flags := c.seek(seek)
// If we ended up after the last element of a page then move to the next one.
@ -152,8 +154,6 @@ func (c *Cursor) Delete() error {
// seek moves the cursor to a given key and returns it.
// If the key does not exist then the next key is used.
func (c *Cursor) seek(seek []byte) (key []byte, value []byte, flags uint32) {
_assert(c.bucket.tx.db != nil, "tx closed")
// Start from root page/node and traverse to correct page.
c.stack = c.stack[:0]
c.search(seek, c.bucket.root)
@ -163,7 +163,7 @@ func (c *Cursor) seek(seek []byte) (key []byte, value []byte, flags uint32) {
}
// first moves the cursor to the first leaf element under the last page in the stack.
func (c *Cursor) first() {
func (c *Cursor) goToFirstElementOnTheStack() {
for {
// Exit when we hit a leaf page.
var ref = &c.stack[len(c.stack)-1]
@ -172,13 +172,13 @@ func (c *Cursor) first() {
}
// Keep adding pages pointing to the first element to the stack.
var pgid pgid
var pgId pgid
if ref.node != nil {
pgid = ref.node.inodes[ref.index].pgid
pgId = ref.node.inodes[ref.index].pgid
} else {
pgid = ref.page.branchPageElement(uint16(ref.index)).pgid
pgId = ref.page.branchPageElement(uint16(ref.index)).pgid
}
p, n := c.bucket.pageNode(pgid)
p, n := c.bucket.pageNode(pgId)
c.stack = append(c.stack, elemRef{page: p, node: n, index: 0})
}
}
@ -193,13 +193,13 @@ func (c *Cursor) last() {
}
// Keep adding pages pointing to the last element in the stack.
var pgid pgid
var pgId pgid
if ref.node != nil {
pgid = ref.node.inodes[ref.index].pgid
pgId = ref.node.inodes[ref.index].pgid
} else {
pgid = ref.page.branchPageElement(uint16(ref.index)).pgid
pgId = ref.page.branchPageElement(uint16(ref.index)).pgid
}
p, n := c.bucket.pageNode(pgid)
p, n := c.bucket.pageNode(pgId)
var nextRef = elemRef{page: p, node: n}
nextRef.index = nextRef.count() - 1
@ -231,7 +231,7 @@ func (c *Cursor) next() (key []byte, value []byte, flags uint32) {
// Otherwise start from where we left off in the stack and find the
// first element of the first leaf page.
c.stack = c.stack[:i+1]
c.first()
c.goToFirstElementOnTheStack()
// If this is an empty page then restart and move back up the stack.
// https://github.com/boltdb/bolt/issues/450
@ -243,9 +243,33 @@ func (c *Cursor) next() (key []byte, value []byte, flags uint32) {
}
}
// prev moves the cursor to the previous item in the bucket and returns its key and value.
// If the cursor is at the beginning of the bucket then a nil key and value are returned.
func (c *Cursor) prev() (key []byte, value []byte, flags uint32) {
// Attempt to move back one element until we're successful.
// Move up the stack as we hit the beginning of each page in our stack.
for i := len(c.stack) - 1; i >= 0; i-- {
elem := &c.stack[i]
if elem.index > 0 {
elem.index--
break
}
c.stack = c.stack[:i]
}
// If we've hit the end then return nil.
if len(c.stack) == 0 {
return nil, nil, 0
}
// Move down the stack to find the last element of the last leaf under this branch.
c.last()
return c.keyValue()
}
// search recursively performs a binary search against a given page/node until it finds a given key.
func (c *Cursor) search(key []byte, pgid pgid) {
p, n := c.bucket.pageNode(pgid)
func (c *Cursor) search(key []byte, pgId pgid) {
p, n := c.bucket.pageNode(pgId)
if p != nil && (p.flags&(branchPageFlag|leafPageFlag)) == 0 {
panic(fmt.Sprintf("invalid page type: %d: %x", p.id, p.flags))
}

179
vendor/go.etcd.io/bbolt/db.go generated vendored
View file

@ -4,7 +4,7 @@ import (
"errors"
"fmt"
"hash/fnv"
"log"
"io"
"os"
"runtime"
"sort"
@ -81,7 +81,7 @@ type DB struct {
NoFreelistSync bool
// FreelistType sets the backend freelist type. There are two options. Array which is simple but endures
// dramatic performance degradation if database is large and framentation in freelist is common.
// dramatic performance degradation if database is large and fragmentation in freelist is common.
// The alternative one is using hashmap, it is faster in almost all circumstances
// but it doesn't guarantee that it offers the smallest page id available. In normal case it is safe.
// The default type is array
@ -95,6 +95,11 @@ type DB struct {
// https://github.com/boltdb/bolt/issues/284
NoGrowSync bool
// When `true`, bbolt will always load the free pages when opening the DB.
// When opening db in write mode, this flag will always automatically
// set to `true`.
PreLoadFreelist bool
// If you want to read the entire database fast, you can set MmapFlag to
// syscall.MAP_POPULATE on Linux 2.6.23+ for sequential read-ahead.
MmapFlags int
@ -129,6 +134,9 @@ type DB struct {
path string
openFile func(string, int, os.FileMode) (*os.File, error)
file *os.File
// `dataref` isn't used at all on Windows, and the golangci-lint
// always fails on Windows platform.
//nolint
dataref []byte // mmap'ed readonly, write throws SEGV
data *[maxMapSize]byte
datasz int
@ -193,6 +201,7 @@ func Open(path string, mode os.FileMode, options *Options) (*DB, error) {
db.NoGrowSync = options.NoGrowSync
db.MmapFlags = options.MmapFlags
db.NoFreelistSync = options.NoFreelistSync
db.PreLoadFreelist = options.PreLoadFreelist
db.FreelistType = options.FreelistType
db.Mlock = options.Mlock
@ -205,6 +214,9 @@ func Open(path string, mode os.FileMode, options *Options) (*DB, error) {
if options.ReadOnly {
flag = os.O_RDONLY
db.readOnly = true
} else {
// always load free pages in write mode
db.PreLoadFreelist = true
}
db.openFile = options.OpenFile
@ -252,21 +264,9 @@ func Open(path string, mode os.FileMode, options *Options) (*DB, error) {
return nil, err
}
} else {
// Read the first meta page to determine the page size.
var buf [0x1000]byte
// If we can't read the page size, but can read a page, assume
// it's the same as the OS or one given -- since that's how the
// page size was chosen in the first place.
//
// If the first page is invalid and this OS uses a different
// page size than what the database was created with then we
// are out of luck and cannot access the database.
//
// TODO: scan for next page
if bw, err := db.file.ReadAt(buf[:], 0); err == nil && bw == len(buf) {
if m := db.pageInBuffer(buf[:], 0).meta(); m.validate() == nil {
db.pageSize = int(m.pageSize)
}
// try to get the page size from the metadata pages
if pgSize, err := db.getPageSize(); err == nil {
db.pageSize = pgSize
} else {
_ = db.close()
return nil, ErrInvalid
@ -286,12 +286,14 @@ func Open(path string, mode os.FileMode, options *Options) (*DB, error) {
return nil, err
}
if db.PreLoadFreelist {
db.loadFreelist()
}
if db.readOnly {
return db, nil
}
db.loadFreelist()
// Flush freelist when transitioning from no sync to sync so
// NoFreelistSync unaware boltdb can open the db later.
if !db.NoFreelistSync && !db.hasSyncedFreelist() {
@ -309,6 +311,96 @@ func Open(path string, mode os.FileMode, options *Options) (*DB, error) {
return db, nil
}
// getPageSize reads the pageSize from the meta pages. It tries
// to read the first meta page firstly. If the first page is invalid,
// then it tries to read the second page using the default page size.
func (db *DB) getPageSize() (int, error) {
var (
meta0CanRead, meta1CanRead bool
)
// Read the first meta page to determine the page size.
if pgSize, canRead, err := db.getPageSizeFromFirstMeta(); err != nil {
// We cannot read the page size from page 0, but can read page 0.
meta0CanRead = canRead
} else {
return pgSize, nil
}
// Read the second meta page to determine the page size.
if pgSize, canRead, err := db.getPageSizeFromSecondMeta(); err != nil {
// We cannot read the page size from page 1, but can read page 1.
meta1CanRead = canRead
} else {
return pgSize, nil
}
// If we can't read the page size from both pages, but can read
// either page, then we assume it's the same as the OS or the one
// given, since that's how the page size was chosen in the first place.
//
// If both pages are invalid, and (this OS uses a different page size
// from what the database was created with or the given page size is
// different from what the database was created with), then we are out
// of luck and cannot access the database.
if meta0CanRead || meta1CanRead {
return db.pageSize, nil
}
return 0, ErrInvalid
}
// getPageSizeFromFirstMeta reads the pageSize from the first meta page
func (db *DB) getPageSizeFromFirstMeta() (int, bool, error) {
var buf [0x1000]byte
var metaCanRead bool
if bw, err := db.file.ReadAt(buf[:], 0); err == nil && bw == len(buf) {
metaCanRead = true
if m := db.pageInBuffer(buf[:], 0).meta(); m.validate() == nil {
return int(m.pageSize), metaCanRead, nil
}
}
return 0, metaCanRead, ErrInvalid
}
// getPageSizeFromSecondMeta reads the pageSize from the second meta page
func (db *DB) getPageSizeFromSecondMeta() (int, bool, error) {
var (
fileSize int64
metaCanRead bool
)
// get the db file size
if info, err := db.file.Stat(); err != nil {
return 0, metaCanRead, err
} else {
fileSize = info.Size()
}
// We need to read the second meta page, so we should skip the first page;
// but we don't know the exact page size yet, it's chicken & egg problem.
// The solution is to try all the possible page sizes, which starts from 1KB
// and until 16MB (1024<<14) or the end of the db file
//
// TODO: should we support larger page size?
for i := 0; i <= 14; i++ {
var buf [0x1000]byte
var pos int64 = 1024 << uint(i)
if pos >= fileSize-1024 {
break
}
bw, err := db.file.ReadAt(buf[:], pos)
if (err == nil && bw == len(buf)) || (err == io.EOF && int64(bw) == (fileSize-pos)) {
metaCanRead = true
if m := db.pageInBuffer(buf[:], 0).meta(); m.validate() == nil {
return int(m.pageSize), metaCanRead, nil
}
}
}
return 0, metaCanRead, ErrInvalid
}
// loadFreelist reads the freelist if it is synced, or reconstructs it
// by scanning the DB if it is not synced. It assumes there are no
// concurrent accesses being made to the freelist.
@ -372,6 +464,8 @@ func (db *DB) mmap(minsz int) error {
}
// Memory-map the data file as a byte slice.
// gofail: var mapError string
// return errors.New(mapError)
if err := mmap(db, size); err != nil {
return err
}
@ -399,11 +493,25 @@ func (db *DB) mmap(minsz int) error {
return nil
}
func (db *DB) invalidate() {
db.dataref = nil
db.data = nil
db.datasz = 0
db.meta0 = nil
db.meta1 = nil
}
// munmap unmaps the data file from memory.
func (db *DB) munmap() error {
defer db.invalidate()
// gofail: var unmapError string
// return errors.New(unmapError)
if err := munmap(db); err != nil {
return fmt.Errorf("unmap error: " + err.Error())
}
return nil
}
@ -552,7 +660,7 @@ func (db *DB) close() error {
if !db.readOnly {
// Unlock the file.
if err := funlock(db); err != nil {
log.Printf("bolt.Close(): funlock error: %s", err)
return fmt.Errorf("bolt.Close(): funlock error: %w", err)
}
}
@ -609,6 +717,13 @@ func (db *DB) beginTx() (*Tx, error) {
return nil, ErrDatabaseNotOpen
}
// Exit if the database is not correctly mapped.
if db.data == nil {
db.mmaplock.RUnlock()
db.metalock.Unlock()
return nil, ErrInvalidMapping
}
// Create a transaction associated with the database.
t := &Tx{}
t.init(db)
@ -650,6 +765,12 @@ func (db *DB) beginRWTx() (*Tx, error) {
return nil, ErrDatabaseNotOpen
}
// Exit if the database is not correctly mapped.
if db.data == nil {
db.rwlock.Unlock()
return nil, ErrInvalidMapping
}
// Create a transaction associated with the database.
t := &Tx{writable: true}
t.init(db)
@ -924,6 +1045,7 @@ func (db *DB) Stats() Stats {
// This is for internal access to the raw data bytes from the C cursor, use
// carefully, or not at all.
func (db *DB) Info() *Info {
_assert(db.data != nil, "database file isn't correctly mapped")
return &Info{uintptr(unsafe.Pointer(&db.data[0])), db.pageSize}
}
@ -950,7 +1072,7 @@ func (db *DB) meta() *meta {
metaB = db.meta0
}
// Use higher meta page if valid. Otherwise fallback to previous, if valid.
// Use higher meta page if valid. Otherwise, fallback to previous, if valid.
if err := metaA.validate(); err == nil {
return metaA
} else if err := metaB.validate(); err == nil {
@ -1003,7 +1125,7 @@ func (db *DB) grow(sz int) error {
// If the data is smaller than the alloc size then only allocate what's needed.
// Once it goes over the allocation size then allocate in chunks.
if db.datasz < db.AllocSize {
if db.datasz <= db.AllocSize {
sz = db.datasz
} else {
sz += db.AllocSize
@ -1056,9 +1178,11 @@ func (db *DB) freepages() []pgid {
panic(fmt.Sprintf("freepages: failed to get all reachable pages (%v)", e))
}
}()
tx.checkBucket(&tx.root, reachable, nofreed, ech)
tx.checkBucket(&tx.root, reachable, nofreed, HexKVStringer(), ech)
close(ech)
// TODO: If check bucket reported any corruptions (ech) we shouldn't proceed to freeing the pages.
var fids []pgid
for i := pgid(2); i < db.meta().pgid; i++ {
if _, ok := reachable[i]; !ok {
@ -1082,8 +1206,13 @@ type Options struct {
// under normal operation, but requires a full database re-sync during recovery.
NoFreelistSync bool
// PreLoadFreelist sets whether to load the free pages when opening
// the db file. Note when opening db in write mode, bbolt will always
// load the free pages.
PreLoadFreelist bool
// FreelistType sets the backend freelist type. There are two options. Array which is simple but endures
// dramatic performance degradation if database is large and framentation in freelist is common.
// dramatic performance degradation if database is large and fragmentation in freelist is common.
// The alternative one is using hashmap, it is faster in almost all circumstances
// but it doesn't guarantee that it offers the smallest page id available. In normal case it is safe.
// The default type is array
@ -1187,7 +1316,7 @@ func (m *meta) validate() error {
return ErrInvalid
} else if m.version != version {
return ErrVersionMismatch
} else if m.checksum != 0 && m.checksum != m.sum64() {
} else if m.checksum != m.sum64() {
return ErrChecksum
}
return nil

8
vendor/go.etcd.io/bbolt/doc.go generated vendored
View file

@ -14,8 +14,7 @@ The design of Bolt is based on Howard Chu's LMDB database project.
Bolt currently works on Windows, Mac OS X, and Linux.
Basics
# Basics
There are only a few types in Bolt: DB, Bucket, Tx, and Cursor. The DB is
a collection of buckets and is represented by a single file on disk. A bucket is
@ -27,8 +26,7 @@ iterate over the dataset sequentially. Read-write transactions can create and
delete buckets and can insert and remove keys. Only one read-write transaction
is allowed at a time.
Caveats
# Caveats
The database uses a read-only, memory-mapped data file to ensure that
applications cannot corrupt the database, however, this means that keys and
@ -38,7 +36,5 @@ will cause Go to panic.
Keys and values retrieved from the database are only valid for the life of
the transaction. When used outside the transaction, these byte slices can
point to different data or can point to invalid memory which will cause a panic.
*/
package bbolt

7
vendor/go.etcd.io/bbolt/errors.go generated vendored
View file

@ -16,6 +16,9 @@ var (
// This typically occurs when a file is not a bolt database.
ErrInvalid = errors.New("invalid database")
// ErrInvalidMapping is returned when the database file fails to get mapped.
ErrInvalidMapping = errors.New("database isn't correctly mapped")
// ErrVersionMismatch is returned when the data file was created with a
// different version of Bolt.
ErrVersionMismatch = errors.New("version mismatch")
@ -41,6 +44,10 @@ var (
// ErrDatabaseReadOnly is returned when a mutating transaction is started on a
// read-only database.
ErrDatabaseReadOnly = errors.New("database is in read-only mode")
// ErrFreePagesNotLoaded is returned when a readonly transaction without
// preloading the free pages is trying to access the free pages.
ErrFreePagesNotLoaded = errors.New("free pages are not pre-loaded")
)
// These errors can occur when putting or deleting a value or a bucket.

19
vendor/go.etcd.io/bbolt/freelist.go generated vendored
View file

@ -24,7 +24,7 @@ type freelist struct {
ids []pgid // all free and available free page ids.
allocs map[pgid]txid // mapping of txid that allocated a pgid.
pending map[txid]*txPending // mapping of soon-to-be free page ids by tx.
cache map[pgid]bool // fast lookup of all free and pending page ids.
cache map[pgid]struct{} // fast lookup of all free and pending page ids.
freemaps map[uint64]pidSet // key is the size of continuous pages(span), value is a set which contains the starting pgids of same size
forwardMap map[pgid]uint64 // key is start pgid, value is its span size
backwardMap map[pgid]uint64 // key is end pgid, value is its span size
@ -41,7 +41,7 @@ func newFreelist(freelistType FreelistType) *freelist {
freelistType: freelistType,
allocs: make(map[pgid]txid),
pending: make(map[txid]*txPending),
cache: make(map[pgid]bool),
cache: make(map[pgid]struct{}),
freemaps: make(map[uint64]pidSet),
forwardMap: make(map[pgid]uint64),
backwardMap: make(map[pgid]uint64),
@ -171,13 +171,13 @@ func (f *freelist) free(txid txid, p *page) {
for id := p.id; id <= p.id+pgid(p.overflow); id++ {
// Verify that page is not already free.
if f.cache[id] {
if _, ok := f.cache[id]; ok {
panic(fmt.Sprintf("page %d already freed", id))
}
// Add to the freelist and cache.
txp.ids = append(txp.ids, id)
txp.alloctx = append(txp.alloctx, allocTxid)
f.cache[id] = true
f.cache[id] = struct{}{}
}
}
@ -256,8 +256,9 @@ func (f *freelist) rollback(txid txid) {
}
// freed returns whether a given page is in the free list.
func (f *freelist) freed(pgid pgid) bool {
return f.cache[pgid]
func (f *freelist) freed(pgId pgid) bool {
_, ok := f.cache[pgId]
return ok
}
// read initializes the freelist from a freelist page.
@ -386,13 +387,13 @@ func (f *freelist) noSyncReload(pgids []pgid) {
// reindex rebuilds the free cache based on available and pending free lists.
func (f *freelist) reindex() {
ids := f.getFreePageIDs()
f.cache = make(map[pgid]bool, len(ids))
f.cache = make(map[pgid]struct{}, len(ids))
for _, id := range ids {
f.cache[id] = true
f.cache[id] = struct{}{}
}
for _, txp := range f.pending {
for _, pendingID := range txp.ids {
f.cache[pendingID] = true
f.cache[pendingID] = struct{}{}
}
}
}

View file

@ -1,3 +1,4 @@
//go:build !windows
// +build !windows
package bbolt
@ -17,7 +18,7 @@ func mlock(db *DB, fileSize int) error {
return nil
}
//munlock unlocks memory of db file
// munlock unlocks memory of db file
func munlock(db *DB, fileSize int) error {
if db.dataref == nil {
return nil

View file

@ -5,7 +5,7 @@ func mlock(_ *DB, _ int) error {
panic("mlock is supported only on UNIX systems")
}
//munlock unlocks memory of db file
// munlock unlocks memory of db file
func munlock(_ *DB, _ int) error {
panic("munlock is supported only on UNIX systems")
}

28
vendor/go.etcd.io/bbolt/node.go generated vendored
View file

@ -113,9 +113,9 @@ func (n *node) prevSibling() *node {
}
// put inserts a key/value.
func (n *node) put(oldKey, newKey, value []byte, pgid pgid, flags uint32) {
if pgid >= n.bucket.tx.meta.pgid {
panic(fmt.Sprintf("pgid (%d) above high water mark (%d)", pgid, n.bucket.tx.meta.pgid))
func (n *node) put(oldKey, newKey, value []byte, pgId pgid, flags uint32) {
if pgId >= n.bucket.tx.meta.pgid {
panic(fmt.Sprintf("pgId (%d) above high water mark (%d)", pgId, n.bucket.tx.meta.pgid))
} else if len(oldKey) <= 0 {
panic("put: zero-length old key")
} else if len(newKey) <= 0 {
@ -136,7 +136,7 @@ func (n *node) put(oldKey, newKey, value []byte, pgid pgid, flags uint32) {
inode.flags = flags
inode.key = newKey
inode.value = value
inode.pgid = pgid
inode.pgid = pgId
_assert(len(inode.key) > 0, "put: zero-length inode key")
}
@ -188,12 +188,16 @@ func (n *node) read(p *page) {
}
// write writes the items onto one or more pages.
// The page should have p.id (might be 0 for meta or bucket-inline page) and p.overflow set
// and the rest should be zeroed.
func (n *node) write(p *page) {
_assert(p.count == 0 && p.flags == 0, "node cannot be written into a not empty page")
// Initialize page.
if n.isLeaf {
p.flags |= leafPageFlag
p.flags = leafPageFlag
} else {
p.flags |= branchPageFlag
p.flags = branchPageFlag
}
if len(n.inodes) >= 0xFFFF {
@ -300,7 +304,7 @@ func (n *node) splitTwo(pageSize uintptr) (*node, *node) {
n.inodes = n.inodes[:splitIndex]
// Update the statistics.
n.bucket.tx.stats.Split++
n.bucket.tx.stats.IncSplit(1)
return n, next
}
@ -387,7 +391,7 @@ func (n *node) spill() error {
}
// Update the statistics.
tx.stats.Spill++
tx.stats.IncSpill(1)
}
// If the root node split and created a new root then we need to spill that
@ -409,7 +413,7 @@ func (n *node) rebalance() {
n.unbalanced = false
// Update statistics.
n.bucket.tx.stats.Rebalance++
n.bucket.tx.stats.IncRebalance(1)
// Ignore if node is above threshold (25%) and has enough keys.
var threshold = n.bucket.tx.db.pageSize / 4
@ -543,7 +547,7 @@ func (n *node) dereference() {
}
// Update statistics.
n.bucket.tx.stats.NodeDeref++
n.bucket.tx.stats.IncNodeDeref(1)
}
// free adds the node's underlying page to the freelist.
@ -581,6 +585,10 @@ func (n *node) dump() {
}
*/
func compareKeys(left, right []byte) int {
return bytes.Compare(left, right)
}
type nodes []*node
func (s nodes) Len() int { return len(s) }

10
vendor/go.etcd.io/bbolt/page.go generated vendored
View file

@ -53,6 +53,16 @@ func (p *page) meta() *meta {
return (*meta)(unsafeAdd(unsafe.Pointer(p), unsafe.Sizeof(*p)))
}
func (p *page) fastCheck(id pgid) {
_assert(p.id == id, "Page expected to be: %v, but self identifies as %v", id, p.id)
// Only one flag of page-type can be set.
_assert(p.flags == branchPageFlag ||
p.flags == leafPageFlag ||
p.flags == metaPageFlag ||
p.flags == freelistPageFlag,
"page %v: has unexpected type/flags: %x", p.id, p.flags)
}
// leafPageElement retrieves the leaf node by index
func (p *page) leafPageElement(index uint16) *leafPageElement {
return (*leafPageElement)(unsafeIndex(unsafe.Pointer(p), unsafe.Sizeof(*p),

382
vendor/go.etcd.io/bbolt/tx.go generated vendored
View file

@ -6,6 +6,7 @@ import (
"os"
"sort"
"strings"
"sync/atomic"
"time"
"unsafe"
)
@ -151,17 +152,19 @@ func (tx *Tx) Commit() error {
// Rebalance nodes which have had deletions.
var startTime = time.Now()
tx.root.rebalance()
if tx.stats.Rebalance > 0 {
tx.stats.RebalanceTime += time.Since(startTime)
if tx.stats.GetRebalance() > 0 {
tx.stats.IncRebalanceTime(time.Since(startTime))
}
opgid := tx.meta.pgid
// spill data onto dirty pages.
startTime = time.Now()
if err := tx.root.spill(); err != nil {
tx.rollback()
return err
}
tx.stats.SpillTime += time.Since(startTime)
tx.stats.IncSpillTime(time.Since(startTime))
// Free the old root bucket.
tx.meta.root.root = tx.root.root
@ -180,6 +183,14 @@ func (tx *Tx) Commit() error {
tx.meta.freelist = pgidNoFreelist
}
// If the high water mark has moved up then attempt to grow the database.
if tx.meta.pgid > opgid {
if err := tx.db.grow(int(tx.meta.pgid+1) * tx.db.pageSize); err != nil {
tx.rollback()
return err
}
}
// Write dirty pages to disk.
startTime = time.Now()
if err := tx.write(); err != nil {
@ -208,7 +219,7 @@ func (tx *Tx) Commit() error {
tx.rollback()
return err
}
tx.stats.WriteTime += time.Since(startTime)
tx.stats.IncWriteTime(time.Since(startTime))
// Finalize the transaction.
tx.close()
@ -224,7 +235,6 @@ func (tx *Tx) Commit() error {
func (tx *Tx) commitFreelist() error {
// Allocate new pages for the new free list. This will overestimate
// the size of the freelist but not underestimate the size (which would be bad).
opgid := tx.meta.pgid
p, err := tx.allocate((tx.db.freelist.size() / tx.db.pageSize) + 1)
if err != nil {
tx.rollback()
@ -235,13 +245,6 @@ func (tx *Tx) commitFreelist() error {
return err
}
tx.meta.freelist = p.id
// If the high water mark has moved up then attempt to grow the database.
if tx.meta.pgid > opgid {
if err := tx.db.grow(int(tx.meta.pgid+1) * tx.db.pageSize); err != nil {
tx.rollback()
return err
}
}
return nil
}
@ -275,13 +278,17 @@ func (tx *Tx) rollback() {
}
if tx.writable {
tx.db.freelist.rollback(tx.meta.txid)
if !tx.db.hasSyncedFreelist() {
// Reconstruct free page list by scanning the DB to get the whole free page list.
// Note: scaning the whole db is heavy if your db size is large in NoSyncFreeList mode.
tx.db.freelist.noSyncReload(tx.db.freepages())
} else {
// Read free page list from freelist page.
tx.db.freelist.reload(tx.db.page(tx.db.meta().freelist))
// When mmap fails, the `data`, `dataref` and `datasz` may be reset to
// zero values, and there is no way to reload free page IDs in this case.
if tx.db.data != nil {
if !tx.db.hasSyncedFreelist() {
// Reconstruct free page list by scanning the DB to get the whole free page list.
// Note: scaning the whole db is heavy if your db size is large in NoSyncFreeList mode.
tx.db.freelist.noSyncReload(tx.db.freepages())
} else {
// Read free page list from freelist page.
tx.db.freelist.reload(tx.db.page(tx.db.meta().freelist))
}
}
}
tx.close()
@ -400,98 +407,6 @@ func (tx *Tx) CopyFile(path string, mode os.FileMode) error {
return f.Close()
}
// Check performs several consistency checks on the database for this transaction.
// An error is returned if any inconsistency is found.
//
// It can be safely run concurrently on a writable transaction. However, this
// incurs a high cost for large databases and databases with a lot of subbuckets
// because of caching. This overhead can be removed if running on a read-only
// transaction, however, it is not safe to execute other writer transactions at
// the same time.
func (tx *Tx) Check() <-chan error {
ch := make(chan error)
go tx.check(ch)
return ch
}
func (tx *Tx) check(ch chan error) {
// Force loading free list if opened in ReadOnly mode.
tx.db.loadFreelist()
// Check if any pages are double freed.
freed := make(map[pgid]bool)
all := make([]pgid, tx.db.freelist.count())
tx.db.freelist.copyall(all)
for _, id := range all {
if freed[id] {
ch <- fmt.Errorf("page %d: already freed", id)
}
freed[id] = true
}
// Track every reachable page.
reachable := make(map[pgid]*page)
reachable[0] = tx.page(0) // meta0
reachable[1] = tx.page(1) // meta1
if tx.meta.freelist != pgidNoFreelist {
for i := uint32(0); i <= tx.page(tx.meta.freelist).overflow; i++ {
reachable[tx.meta.freelist+pgid(i)] = tx.page(tx.meta.freelist)
}
}
// Recursively check buckets.
tx.checkBucket(&tx.root, reachable, freed, ch)
// Ensure all pages below high water mark are either reachable or freed.
for i := pgid(0); i < tx.meta.pgid; i++ {
_, isReachable := reachable[i]
if !isReachable && !freed[i] {
ch <- fmt.Errorf("page %d: unreachable unfreed", int(i))
}
}
// Close the channel to signal completion.
close(ch)
}
func (tx *Tx) checkBucket(b *Bucket, reachable map[pgid]*page, freed map[pgid]bool, ch chan error) {
// Ignore inline buckets.
if b.root == 0 {
return
}
// Check every page used by this bucket.
b.tx.forEachPage(b.root, 0, func(p *page, _ int) {
if p.id > tx.meta.pgid {
ch <- fmt.Errorf("page %d: out of bounds: %d", int(p.id), int(b.tx.meta.pgid))
}
// Ensure each page is only referenced once.
for i := pgid(0); i <= pgid(p.overflow); i++ {
var id = p.id + i
if _, ok := reachable[id]; ok {
ch <- fmt.Errorf("page %d: multiple references", int(id))
}
reachable[id] = p
}
// We should only encounter un-freed leaf and branch pages.
if freed[p.id] {
ch <- fmt.Errorf("page %d: reachable freed", int(p.id))
} else if (p.flags&branchPageFlag) == 0 && (p.flags&leafPageFlag) == 0 {
ch <- fmt.Errorf("page %d: invalid type: %s", int(p.id), p.typ())
}
})
// Check each bucket within this bucket.
_ = b.ForEach(func(k, v []byte) error {
if child := b.Bucket(k); child != nil {
tx.checkBucket(child, reachable, freed, ch)
}
return nil
})
}
// allocate returns a contiguous block of memory starting at a given page.
func (tx *Tx) allocate(count int) (*page, error) {
p, err := tx.db.allocate(tx.meta.txid, count)
@ -503,8 +418,8 @@ func (tx *Tx) allocate(count int) (*page, error) {
tx.pages[p.id] = p
// Update statistics.
tx.stats.PageCount += count
tx.stats.PageAlloc += count * tx.db.pageSize
tx.stats.IncPageCount(int64(count))
tx.stats.IncPageAlloc(int64(count * tx.db.pageSize))
return p, nil
}
@ -539,7 +454,7 @@ func (tx *Tx) write() error {
}
// Update statistics.
tx.stats.Write++
tx.stats.IncWrite(1)
// Exit inner for loop if we've written all the chunks.
rem -= sz
@ -574,7 +489,7 @@ func (tx *Tx) write() error {
for i := range buf {
buf[i] = 0
}
tx.db.pagePool.Put(buf)
tx.db.pagePool.Put(buf) //nolint:staticcheck
}
return nil
@ -598,7 +513,7 @@ func (tx *Tx) writeMeta() error {
}
// Update statistics.
tx.stats.Write++
tx.stats.IncWrite(1)
return nil
}
@ -609,26 +524,35 @@ func (tx *Tx) page(id pgid) *page {
// Check the dirty pages first.
if tx.pages != nil {
if p, ok := tx.pages[id]; ok {
p.fastCheck(id)
return p
}
}
// Otherwise return directly from the mmap.
return tx.db.page(id)
p := tx.db.page(id)
p.fastCheck(id)
return p
}
// forEachPage iterates over every page within a given page and executes a function.
func (tx *Tx) forEachPage(pgid pgid, depth int, fn func(*page, int)) {
p := tx.page(pgid)
func (tx *Tx) forEachPage(pgidnum pgid, fn func(*page, int, []pgid)) {
stack := make([]pgid, 10)
stack[0] = pgidnum
tx.forEachPageInternal(stack[:1], fn)
}
func (tx *Tx) forEachPageInternal(pgidstack []pgid, fn func(*page, int, []pgid)) {
p := tx.page(pgidstack[len(pgidstack)-1])
// Execute function.
fn(p, depth)
fn(p, len(pgidstack)-1, pgidstack)
// Recursively loop over children.
if (p.flags & branchPageFlag) != 0 {
for i := 0; i < int(p.count); i++ {
elem := p.branchPageElement(uint16(i))
tx.forEachPage(elem.pgid, depth+1, fn)
tx.forEachPageInternal(append(pgidstack, elem.pgid), fn)
}
}
}
@ -642,6 +566,10 @@ func (tx *Tx) Page(id int) (*PageInfo, error) {
return nil, nil
}
if tx.db.freelist == nil {
return nil, ErrFreePagesNotLoaded
}
// Build the page info.
p := tx.db.page(pgid(id))
info := &PageInfo{
@ -663,43 +591,61 @@ func (tx *Tx) Page(id int) (*PageInfo, error) {
// TxStats represents statistics about the actions performed by the transaction.
type TxStats struct {
// Page statistics.
PageCount int // number of page allocations
PageAlloc int // total bytes allocated
//
// DEPRECATED: Use GetPageCount() or IncPageCount()
PageCount int64 // number of page allocations
// DEPRECATED: Use GetPageAlloc() or IncPageAlloc()
PageAlloc int64 // total bytes allocated
// Cursor statistics.
CursorCount int // number of cursors created
//
// DEPRECATED: Use GetCursorCount() or IncCursorCount()
CursorCount int64 // number of cursors created
// Node statistics
NodeCount int // number of node allocations
NodeDeref int // number of node dereferences
//
// DEPRECATED: Use GetNodeCount() or IncNodeCount()
NodeCount int64 // number of node allocations
// DEPRECATED: Use GetNodeDeref() or IncNodeDeref()
NodeDeref int64 // number of node dereferences
// Rebalance statistics.
Rebalance int // number of node rebalances
//
// DEPRECATED: Use GetRebalance() or IncRebalance()
Rebalance int64 // number of node rebalances
// DEPRECATED: Use GetRebalanceTime() or IncRebalanceTime()
RebalanceTime time.Duration // total time spent rebalancing
// Split/Spill statistics.
Split int // number of nodes split
Spill int // number of nodes spilled
//
// DEPRECATED: Use GetSplit() or IncSplit()
Split int64 // number of nodes split
// DEPRECATED: Use GetSpill() or IncSpill()
Spill int64 // number of nodes spilled
// DEPRECATED: Use GetSpillTime() or IncSpillTime()
SpillTime time.Duration // total time spent spilling
// Write statistics.
Write int // number of writes performed
//
// DEPRECATED: Use GetWrite() or IncWrite()
Write int64 // number of writes performed
// DEPRECATED: Use GetWriteTime() or IncWriteTime()
WriteTime time.Duration // total time spent writing to disk
}
func (s *TxStats) add(other *TxStats) {
s.PageCount += other.PageCount
s.PageAlloc += other.PageAlloc
s.CursorCount += other.CursorCount
s.NodeCount += other.NodeCount
s.NodeDeref += other.NodeDeref
s.Rebalance += other.Rebalance
s.RebalanceTime += other.RebalanceTime
s.Split += other.Split
s.Spill += other.Spill
s.SpillTime += other.SpillTime
s.Write += other.Write
s.WriteTime += other.WriteTime
s.IncPageCount(other.GetPageCount())
s.IncPageAlloc(other.GetPageAlloc())
s.IncCursorCount(other.GetCursorCount())
s.IncNodeCount(other.GetNodeCount())
s.IncNodeDeref(other.GetNodeDeref())
s.IncRebalance(other.GetRebalance())
s.IncRebalanceTime(other.GetRebalanceTime())
s.IncSplit(other.GetSplit())
s.IncSpill(other.GetSpill())
s.IncSpillTime(other.GetSpillTime())
s.IncWrite(other.GetWrite())
s.IncWriteTime(other.GetWriteTime())
}
// Sub calculates and returns the difference between two sets of transaction stats.
@ -707,17 +653,145 @@ func (s *TxStats) add(other *TxStats) {
// you need the performance counters that occurred within that time span.
func (s *TxStats) Sub(other *TxStats) TxStats {
var diff TxStats
diff.PageCount = s.PageCount - other.PageCount
diff.PageAlloc = s.PageAlloc - other.PageAlloc
diff.CursorCount = s.CursorCount - other.CursorCount
diff.NodeCount = s.NodeCount - other.NodeCount
diff.NodeDeref = s.NodeDeref - other.NodeDeref
diff.Rebalance = s.Rebalance - other.Rebalance
diff.RebalanceTime = s.RebalanceTime - other.RebalanceTime
diff.Split = s.Split - other.Split
diff.Spill = s.Spill - other.Spill
diff.SpillTime = s.SpillTime - other.SpillTime
diff.Write = s.Write - other.Write
diff.WriteTime = s.WriteTime - other.WriteTime
diff.PageCount = s.GetPageCount() - other.GetPageCount()
diff.PageAlloc = s.GetPageAlloc() - other.GetPageAlloc()
diff.CursorCount = s.GetCursorCount() - other.GetCursorCount()
diff.NodeCount = s.GetNodeCount() - other.GetNodeCount()
diff.NodeDeref = s.GetNodeDeref() - other.GetNodeDeref()
diff.Rebalance = s.GetRebalance() - other.GetRebalance()
diff.RebalanceTime = s.GetRebalanceTime() - other.GetRebalanceTime()
diff.Split = s.GetSplit() - other.GetSplit()
diff.Spill = s.GetSpill() - other.GetSpill()
diff.SpillTime = s.GetSpillTime() - other.GetSpillTime()
diff.Write = s.GetWrite() - other.GetWrite()
diff.WriteTime = s.GetWriteTime() - other.GetWriteTime()
return diff
}
// GetPageCount returns PageCount atomically.
func (s *TxStats) GetPageCount() int64 {
return atomic.LoadInt64(&s.PageCount)
}
// IncPageCount increases PageCount atomically and returns the new value.
func (s *TxStats) IncPageCount(delta int64) int64 {
return atomic.AddInt64(&s.PageCount, delta)
}
// GetPageAlloc returns PageAlloc atomically.
func (s *TxStats) GetPageAlloc() int64 {
return atomic.LoadInt64(&s.PageAlloc)
}
// IncPageAlloc increases PageAlloc atomically and returns the new value.
func (s *TxStats) IncPageAlloc(delta int64) int64 {
return atomic.AddInt64(&s.PageAlloc, delta)
}
// GetCursorCount returns CursorCount atomically.
func (s *TxStats) GetCursorCount() int64 {
return atomic.LoadInt64(&s.CursorCount)
}
// IncCursorCount increases CursorCount atomically and return the new value.
func (s *TxStats) IncCursorCount(delta int64) int64 {
return atomic.AddInt64(&s.CursorCount, delta)
}
// GetNodeCount returns NodeCount atomically.
func (s *TxStats) GetNodeCount() int64 {
return atomic.LoadInt64(&s.NodeCount)
}
// IncNodeCount increases NodeCount atomically and returns the new value.
func (s *TxStats) IncNodeCount(delta int64) int64 {
return atomic.AddInt64(&s.NodeCount, delta)
}
// GetNodeDeref returns NodeDeref atomically.
func (s *TxStats) GetNodeDeref() int64 {
return atomic.LoadInt64(&s.NodeDeref)
}
// IncNodeDeref increases NodeDeref atomically and returns the new value.
func (s *TxStats) IncNodeDeref(delta int64) int64 {
return atomic.AddInt64(&s.NodeDeref, delta)
}
// GetRebalance returns Rebalance atomically.
func (s *TxStats) GetRebalance() int64 {
return atomic.LoadInt64(&s.Rebalance)
}
// IncRebalance increases Rebalance atomically and returns the new value.
func (s *TxStats) IncRebalance(delta int64) int64 {
return atomic.AddInt64(&s.Rebalance, delta)
}
// GetRebalanceTime returns RebalanceTime atomically.
func (s *TxStats) GetRebalanceTime() time.Duration {
return atomicLoadDuration(&s.RebalanceTime)
}
// IncRebalanceTime increases RebalanceTime atomically and returns the new value.
func (s *TxStats) IncRebalanceTime(delta time.Duration) time.Duration {
return atomicAddDuration(&s.RebalanceTime, delta)
}
// GetSplit returns Split atomically.
func (s *TxStats) GetSplit() int64 {
return atomic.LoadInt64(&s.Split)
}
// IncSplit increases Split atomically and returns the new value.
func (s *TxStats) IncSplit(delta int64) int64 {
return atomic.AddInt64(&s.Split, delta)
}
// GetSpill returns Spill atomically.
func (s *TxStats) GetSpill() int64 {
return atomic.LoadInt64(&s.Spill)
}
// IncSpill increases Spill atomically and returns the new value.
func (s *TxStats) IncSpill(delta int64) int64 {
return atomic.AddInt64(&s.Spill, delta)
}
// GetSpillTime returns SpillTime atomically.
func (s *TxStats) GetSpillTime() time.Duration {
return atomicLoadDuration(&s.SpillTime)
}
// IncSpillTime increases SpillTime atomically and returns the new value.
func (s *TxStats) IncSpillTime(delta time.Duration) time.Duration {
return atomicAddDuration(&s.SpillTime, delta)
}
// GetWrite returns Write atomically.
func (s *TxStats) GetWrite() int64 {
return atomic.LoadInt64(&s.Write)
}
// IncWrite increases Write atomically and returns the new value.
func (s *TxStats) IncWrite(delta int64) int64 {
return atomic.AddInt64(&s.Write, delta)
}
// GetWriteTime returns WriteTime atomically.
func (s *TxStats) GetWriteTime() time.Duration {
return atomicLoadDuration(&s.WriteTime)
}
// IncWriteTime increases WriteTime atomically and returns the new value.
func (s *TxStats) IncWriteTime(delta time.Duration) time.Duration {
return atomicAddDuration(&s.WriteTime, delta)
}
func atomicAddDuration(ptr *time.Duration, du time.Duration) time.Duration {
return time.Duration(atomic.AddInt64((*int64)(unsafe.Pointer(ptr)), int64(du)))
}
func atomicLoadDuration(ptr *time.Duration) time.Duration {
return time.Duration(atomic.LoadInt64((*int64)(unsafe.Pointer(ptr))))
}

226
vendor/go.etcd.io/bbolt/tx_check.go generated vendored Normal file
View file

@ -0,0 +1,226 @@
package bbolt
import (
"encoding/hex"
"fmt"
)
// Check performs several consistency checks on the database for this transaction.
// An error is returned if any inconsistency is found.
//
// It can be safely run concurrently on a writable transaction. However, this
// incurs a high cost for large databases and databases with a lot of subbuckets
// because of caching. This overhead can be removed if running on a read-only
// transaction, however, it is not safe to execute other writer transactions at
// the same time.
func (tx *Tx) Check() <-chan error {
return tx.CheckWithOptions()
}
// CheckWithOptions allows users to provide a customized `KVStringer` implementation,
// so that bolt can generate human-readable diagnostic messages.
func (tx *Tx) CheckWithOptions(options ...CheckOption) <-chan error {
chkConfig := checkConfig{
kvStringer: HexKVStringer(),
}
for _, op := range options {
op(&chkConfig)
}
ch := make(chan error)
go tx.check(chkConfig.kvStringer, ch)
return ch
}
func (tx *Tx) check(kvStringer KVStringer, ch chan error) {
// Force loading free list if opened in ReadOnly mode.
tx.db.loadFreelist()
// Check if any pages are double freed.
freed := make(map[pgid]bool)
all := make([]pgid, tx.db.freelist.count())
tx.db.freelist.copyall(all)
for _, id := range all {
if freed[id] {
ch <- fmt.Errorf("page %d: already freed", id)
}
freed[id] = true
}
// Track every reachable page.
reachable := make(map[pgid]*page)
reachable[0] = tx.page(0) // meta0
reachable[1] = tx.page(1) // meta1
if tx.meta.freelist != pgidNoFreelist {
for i := uint32(0); i <= tx.page(tx.meta.freelist).overflow; i++ {
reachable[tx.meta.freelist+pgid(i)] = tx.page(tx.meta.freelist)
}
}
// Recursively check buckets.
tx.checkBucket(&tx.root, reachable, freed, kvStringer, ch)
// Ensure all pages below high water mark are either reachable or freed.
for i := pgid(0); i < tx.meta.pgid; i++ {
_, isReachable := reachable[i]
if !isReachable && !freed[i] {
ch <- fmt.Errorf("page %d: unreachable unfreed", int(i))
}
}
// Close the channel to signal completion.
close(ch)
}
func (tx *Tx) checkBucket(b *Bucket, reachable map[pgid]*page, freed map[pgid]bool,
kvStringer KVStringer, ch chan error) {
// Ignore inline buckets.
if b.root == 0 {
return
}
// Check every page used by this bucket.
b.tx.forEachPage(b.root, func(p *page, _ int, stack []pgid) {
if p.id > tx.meta.pgid {
ch <- fmt.Errorf("page %d: out of bounds: %d (stack: %v)", int(p.id), int(b.tx.meta.pgid), stack)
}
// Ensure each page is only referenced once.
for i := pgid(0); i <= pgid(p.overflow); i++ {
var id = p.id + i
if _, ok := reachable[id]; ok {
ch <- fmt.Errorf("page %d: multiple references (stack: %v)", int(id), stack)
}
reachable[id] = p
}
// We should only encounter un-freed leaf and branch pages.
if freed[p.id] {
ch <- fmt.Errorf("page %d: reachable freed", int(p.id))
} else if (p.flags&branchPageFlag) == 0 && (p.flags&leafPageFlag) == 0 {
ch <- fmt.Errorf("page %d: invalid type: %s (stack: %v)", int(p.id), p.typ(), stack)
}
})
tx.recursivelyCheckPages(b.root, kvStringer.KeyToString, ch)
// Check each bucket within this bucket.
_ = b.ForEachBucket(func(k []byte) error {
if child := b.Bucket(k); child != nil {
tx.checkBucket(child, reachable, freed, kvStringer, ch)
}
return nil
})
}
// recursivelyCheckPages confirms database consistency with respect to b-tree
// key order constraints:
// - keys on pages must be sorted
// - keys on children pages are between 2 consecutive keys on the parent's branch page).
func (tx *Tx) recursivelyCheckPages(pgId pgid, keyToString func([]byte) string, ch chan error) {
tx.recursivelyCheckPagesInternal(pgId, nil, nil, nil, keyToString, ch)
}
// recursivelyCheckPagesInternal verifies that all keys in the subtree rooted at `pgid` are:
// - >=`minKeyClosed` (can be nil)
// - <`maxKeyOpen` (can be nil)
// - Are in right ordering relationship to their parents.
// `pagesStack` is expected to contain IDs of pages from the tree root to `pgid` for the clean debugging message.
func (tx *Tx) recursivelyCheckPagesInternal(
pgId pgid, minKeyClosed, maxKeyOpen []byte, pagesStack []pgid,
keyToString func([]byte) string, ch chan error) (maxKeyInSubtree []byte) {
p := tx.page(pgId)
pagesStack = append(pagesStack, pgId)
switch {
case p.flags&branchPageFlag != 0:
// For branch page we navigate ranges of all subpages.
runningMin := minKeyClosed
for i := range p.branchPageElements() {
elem := p.branchPageElement(uint16(i))
verifyKeyOrder(elem.pgid, "branch", i, elem.key(), runningMin, maxKeyOpen, ch, keyToString, pagesStack)
maxKey := maxKeyOpen
if i < len(p.branchPageElements())-1 {
maxKey = p.branchPageElement(uint16(i + 1)).key()
}
maxKeyInSubtree = tx.recursivelyCheckPagesInternal(elem.pgid, elem.key(), maxKey, pagesStack, keyToString, ch)
runningMin = maxKeyInSubtree
}
return maxKeyInSubtree
case p.flags&leafPageFlag != 0:
runningMin := minKeyClosed
for i := range p.leafPageElements() {
elem := p.leafPageElement(uint16(i))
verifyKeyOrder(pgId, "leaf", i, elem.key(), runningMin, maxKeyOpen, ch, keyToString, pagesStack)
runningMin = elem.key()
}
if p.count > 0 {
return p.leafPageElement(p.count - 1).key()
}
default:
ch <- fmt.Errorf("unexpected page type for pgId:%d", pgId)
}
return maxKeyInSubtree
}
/***
* verifyKeyOrder checks whether an entry with given #index on pgId (pageType: "branch|leaf") that has given "key",
* is within range determined by (previousKey..maxKeyOpen) and reports found violations to the channel (ch).
*/
func verifyKeyOrder(pgId pgid, pageType string, index int, key []byte, previousKey []byte, maxKeyOpen []byte, ch chan error, keyToString func([]byte) string, pagesStack []pgid) {
if index == 0 && previousKey != nil && compareKeys(previousKey, key) > 0 {
ch <- fmt.Errorf("the first key[%d]=(hex)%s on %s page(%d) needs to be >= the key in the ancestor (%s). Stack: %v",
index, keyToString(key), pageType, pgId, keyToString(previousKey), pagesStack)
}
if index > 0 {
cmpRet := compareKeys(previousKey, key)
if cmpRet > 0 {
ch <- fmt.Errorf("key[%d]=(hex)%s on %s page(%d) needs to be > (found <) than previous element (hex)%s. Stack: %v",
index, keyToString(key), pageType, pgId, keyToString(previousKey), pagesStack)
}
if cmpRet == 0 {
ch <- fmt.Errorf("key[%d]=(hex)%s on %s page(%d) needs to be > (found =) than previous element (hex)%s. Stack: %v",
index, keyToString(key), pageType, pgId, keyToString(previousKey), pagesStack)
}
}
if maxKeyOpen != nil && compareKeys(key, maxKeyOpen) >= 0 {
ch <- fmt.Errorf("key[%d]=(hex)%s on %s page(%d) needs to be < than key of the next element in ancestor (hex)%s. Pages stack: %v",
index, keyToString(key), pageType, pgId, keyToString(previousKey), pagesStack)
}
}
// ===========================================================================================
type checkConfig struct {
kvStringer KVStringer
}
type CheckOption func(options *checkConfig)
func WithKVStringer(kvStringer KVStringer) CheckOption {
return func(c *checkConfig) {
c.kvStringer = kvStringer
}
}
// KVStringer allows to prepare human-readable diagnostic messages.
type KVStringer interface {
KeyToString([]byte) string
ValueToString([]byte) string
}
// HexKVStringer serializes both key & value to hex representation.
func HexKVStringer() KVStringer {
return hexKvStringer{}
}
type hexKvStringer struct{}
func (_ hexKvStringer) KeyToString(key []byte) string {
return hex.EncodeToString(key)
}
func (_ hexKvStringer) ValueToString(value []byte) string {
return hex.EncodeToString(value)
}

View file

@ -117,7 +117,7 @@ func (c *Client) updateRegRFC(ctx context.Context, a *Account) (*Account, error)
return responseAccount(res)
}
// getGegRFC is equivalent to c.GetReg but for CAs implementing RFC 8555.
// getRegRFC is equivalent to c.GetReg but for CAs implementing RFC 8555.
// It expects c.Discover to have already been called.
func (c *Client) getRegRFC(ctx context.Context) (*Account, error) {
req := json.RawMessage(`{"onlyReturnExisting": true}`)

View file

@ -184,7 +184,7 @@ func (p *parser) clearStackToContext(s scope) {
}
}
// parseGenericRawTextElements implements the generic raw text element parsing
// parseGenericRawTextElement implements the generic raw text element parsing
// algorithm defined in 12.2.6.2.
// https://html.spec.whatwg.org/multipage/parsing.html#parsing-elements-that-contain-only-text
// TODO: Since both RAWTEXT and RCDATA states are treated as tokenizer's part

View file

@ -598,6 +598,11 @@ scriptDataDoubleEscapeEnd:
// readComment reads the next comment token starting with "<!--". The opening
// "<!--" has already been consumed.
func (z *Tokenizer) readComment() {
// When modifying this function, consider manually increasing the suffixLen
// constant in func TestComments, from 6 to e.g. 9 or more. That increase
// should only be temporary, not committed, as it exponentially affects the
// test running time.
z.data.start = z.raw.end
defer func() {
if z.data.end < z.data.start {
@ -611,11 +616,7 @@ func (z *Tokenizer) readComment() {
for {
c := z.readByte()
if z.err != nil {
// Ignore up to two dashes at EOF.
if dashCount > 2 {
dashCount = 2
}
z.data.end = z.raw.end - dashCount
z.data.end = z.calculateAbruptCommentDataEnd()
return
}
switch c {
@ -631,12 +632,15 @@ func (z *Tokenizer) readComment() {
if dashCount >= 2 {
c = z.readByte()
if z.err != nil {
z.data.end = z.raw.end
z.data.end = z.calculateAbruptCommentDataEnd()
return
}
if c == '>' {
} else if c == '>' {
z.data.end = z.raw.end - len("--!>")
return
} else if c == '-' {
dashCount = 1
beginning = false
continue
}
}
}
@ -645,6 +649,35 @@ func (z *Tokenizer) readComment() {
}
}
func (z *Tokenizer) calculateAbruptCommentDataEnd() int {
raw := z.Raw()
const prefixLen = len("<!--")
if len(raw) >= prefixLen {
raw = raw[prefixLen:]
if hasSuffix(raw, "--!") {
return z.raw.end - 3
} else if hasSuffix(raw, "--") {
return z.raw.end - 2
} else if hasSuffix(raw, "-") {
return z.raw.end - 1
}
}
return z.raw.end
}
func hasSuffix(b []byte, suffix string) bool {
if len(b) < len(suffix) {
return false
}
b = b[len(b)-len(suffix):]
for i := range b {
if b[i] != suffix[i] {
return false
}
}
return true
}
// readUntilCloseAngle reads until the next ">".
func (z *Tokenizer) readUntilCloseAngle() {
z.data.start = z.raw.end

View file

@ -2,6 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build (386 || amd64 || amd64p32) && gccgo
// +build 386 amd64 amd64p32
// +build gccgo

11
vendor/golang.org/x/sys/cpu/endian_big.go generated vendored Normal file
View file

@ -0,0 +1,11 @@
// Copyright 2023 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build armbe || arm64be || m68k || mips || mips64 || mips64p32 || ppc || ppc64 || s390 || s390x || shbe || sparc || sparc64
// +build armbe arm64be m68k mips mips64 mips64p32 ppc ppc64 s390 s390x shbe sparc sparc64
package cpu
// IsBigEndian records whether the GOARCH's byte order is big endian.
const IsBigEndian = true

11
vendor/golang.org/x/sys/cpu/endian_little.go generated vendored Normal file
View file

@ -0,0 +1,11 @@
// Copyright 2023 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build 386 || amd64 || amd64p32 || alpha || arm || arm64 || loong64 || mipsle || mips64le || mips64p32le || nios2 || ppc64le || riscv || riscv64 || sh
// +build 386 amd64 amd64p32 alpha arm arm64 loong64 mipsle mips64le mips64p32le nios2 ppc64le riscv riscv64 sh
package cpu
// IsBigEndian records whether the GOARCH's byte order is big endian.
const IsBigEndian = false

View file

@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build gccgo,!hurd
// +build !aix,!hurd
//go:build gccgo && !aix && !hurd
// +build gccgo,!aix,!hurd
#include <errno.h>
#include <stdint.h>

View file

@ -230,6 +230,7 @@ func direntNamlen(buf []byte) (uint64, bool) {
func PtraceAttach(pid int) (err error) { return ptrace(PT_ATTACH, pid, 0, 0) }
func PtraceDetach(pid int) (err error) { return ptrace(PT_DETACH, pid, 0, 0) }
func PtraceDenyAttach() (err error) { return ptrace(PT_DENY_ATTACH, 0, 0, 0) }
//sysnb pipe(p *[2]int32) (err error)

View file

@ -60,8 +60,13 @@ func PtraceGetFsBase(pid int, fsbase *int64) (err error) {
return ptrace(PT_GETFSBASE, pid, uintptr(unsafe.Pointer(fsbase)), 0)
}
func PtraceIO(req int, pid int, addr uintptr, out []byte, countin int) (count int, err error) {
ioDesc := PtraceIoDesc{Op: int32(req), Offs: uintptr(unsafe.Pointer(addr)), Addr: uintptr(unsafe.Pointer(&out[0])), Len: uint32(countin)}
func PtraceIO(req int, pid int, offs uintptr, out []byte, countin int) (count int, err error) {
ioDesc := PtraceIoDesc{
Op: int32(req),
Offs: offs,
Addr: uintptr(unsafe.Pointer(&out[0])), // TODO(#58351): this is not safe.
Len: uint32(countin),
}
err = ptrace(PT_IO, pid, uintptr(unsafe.Pointer(&ioDesc)), 0)
return int(ioDesc.Len), err
}

View file

@ -60,8 +60,13 @@ func PtraceGetFsBase(pid int, fsbase *int64) (err error) {
return ptrace(PT_GETFSBASE, pid, uintptr(unsafe.Pointer(fsbase)), 0)
}
func PtraceIO(req int, pid int, addr uintptr, out []byte, countin int) (count int, err error) {
ioDesc := PtraceIoDesc{Op: int32(req), Offs: uintptr(unsafe.Pointer(addr)), Addr: uintptr(unsafe.Pointer(&out[0])), Len: uint64(countin)}
func PtraceIO(req int, pid int, offs uintptr, out []byte, countin int) (count int, err error) {
ioDesc := PtraceIoDesc{
Op: int32(req),
Offs: offs,
Addr: uintptr(unsafe.Pointer(&out[0])), // TODO(#58351): this is not safe.
Len: uint64(countin),
}
err = ptrace(PT_IO, pid, uintptr(unsafe.Pointer(&ioDesc)), 0)
return int(ioDesc.Len), err
}

View file

@ -56,8 +56,13 @@ func sendfile(outfd int, infd int, offset *int64, count int) (written int, err e
func Syscall9(num, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, err syscall.Errno)
func PtraceIO(req int, pid int, addr uintptr, out []byte, countin int) (count int, err error) {
ioDesc := PtraceIoDesc{Op: int32(req), Offs: uintptr(unsafe.Pointer(addr)), Addr: uintptr(unsafe.Pointer(&out[0])), Len: uint32(countin)}
func PtraceIO(req int, pid int, offs uintptr, out []byte, countin int) (count int, err error) {
ioDesc := PtraceIoDesc{
Op: int32(req),
Offs: offs,
Addr: uintptr(unsafe.Pointer(&out[0])), // TODO(#58351): this is not safe.
Len: uint32(countin),
}
err = ptrace(PT_IO, pid, uintptr(unsafe.Pointer(&ioDesc)), 0)
return int(ioDesc.Len), err
}

View file

@ -56,8 +56,13 @@ func sendfile(outfd int, infd int, offset *int64, count int) (written int, err e
func Syscall9(num, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, err syscall.Errno)
func PtraceIO(req int, pid int, addr uintptr, out []byte, countin int) (count int, err error) {
ioDesc := PtraceIoDesc{Op: int32(req), Offs: uintptr(unsafe.Pointer(addr)), Addr: uintptr(unsafe.Pointer(&out[0])), Len: uint64(countin)}
func PtraceIO(req int, pid int, offs uintptr, out []byte, countin int) (count int, err error) {
ioDesc := PtraceIoDesc{
Op: int32(req),
Offs: offs,
Addr: uintptr(unsafe.Pointer(&out[0])), // TODO(#58351): this is not safe.
Len: uint64(countin),
}
err = ptrace(PT_IO, pid, uintptr(unsafe.Pointer(&ioDesc)), 0)
return int(ioDesc.Len), err
}

View file

@ -56,8 +56,13 @@ func sendfile(outfd int, infd int, offset *int64, count int) (written int, err e
func Syscall9(num, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, err syscall.Errno)
func PtraceIO(req int, pid int, addr uintptr, out []byte, countin int) (count int, err error) {
ioDesc := PtraceIoDesc{Op: int32(req), Offs: uintptr(unsafe.Pointer(addr)), Addr: uintptr(unsafe.Pointer(&out[0])), Len: uint64(countin)}
func PtraceIO(req int, pid int, offs uintptr, out []byte, countin int) (count int, err error) {
ioDesc := PtraceIoDesc{
Op: int32(req),
Offs: offs,
Addr: uintptr(unsafe.Pointer(&out[0])), // TODO(#58351): this is not safe.
Len: uint64(countin),
}
err = ptrace(PT_IO, pid, uintptr(unsafe.Pointer(&ioDesc)), 0)
return int(ioDesc.Len), err
}

View file

@ -1800,6 +1800,7 @@ func Sendfile(outfd int, infd int, offset *int64, count int) (written int, err e
//sysnb Capset(hdr *CapUserHeader, data *CapUserData) (err error)
//sys Chdir(path string) (err error)
//sys Chroot(path string) (err error)
//sys ClockAdjtime(clockid int32, buf *Timex) (state int, err error)
//sys ClockGetres(clockid int32, res *Timespec) (err error)
//sys ClockGettime(clockid int32, time *Timespec) (err error)
//sys ClockNanosleep(clockid int32, flags int, request *Timespec, remain *Timespec) (err error)
@ -1999,7 +2000,7 @@ func appendBytes(vecs []Iovec, bs [][]byte) []Iovec {
// offs2lohi splits offs into its low and high order bits.
func offs2lohi(offs int64) (lo, hi uintptr) {
const longBits = SizeofLong * 8
return uintptr(offs), uintptr(uint64(offs) >> longBits)
return uintptr(offs), uintptr(uint64(offs) >> (longBits - 1) >> 1) // two shifts to avoid false positive in vet
}
func Readv(fd int, iovs [][]byte) (n int, err error) {

View file

@ -578,7 +578,7 @@ func Lutimes(path string, tv []Timeval) error {
return UtimesNanoAt(AT_FDCWD, path, ts, AT_SYMLINK_NOFOLLOW)
}
// emptyIovec reports whether there are no bytes in the slice of Iovec.
// emptyIovecs reports whether there are no bytes in the slice of Iovec.
func emptyIovecs(iov []Iovec) bool {
for i := range iov {
if iov[i].Len > 0 {

View file

@ -9,7 +9,7 @@ package unix
import "time"
// TimespecToNSec returns the time stored in ts as nanoseconds.
// TimespecToNsec returns the time stored in ts as nanoseconds.
func TimespecToNsec(ts Timespec) int64 { return ts.Nano() }
// NsecToTimespec converts a number of nanoseconds into a Timespec.

View file

@ -36,9 +36,14 @@ func xattrnamespace(fullattr string) (ns int, attr string, err error) {
func initxattrdest(dest []byte, idx int) (d unsafe.Pointer) {
if len(dest) > idx {
return unsafe.Pointer(&dest[idx])
} else {
return unsafe.Pointer(_zero)
}
if dest != nil {
// extattr_get_file and extattr_list_file treat NULL differently from
// a non-NULL pointer of length zero. Preserve the property of nilness,
// even if we can't use dest directly.
return unsafe.Pointer(&_zero)
}
return nil
}
// FreeBSD and NetBSD implement their own syscalls to handle extended attributes

View file

@ -457,7 +457,6 @@ const (
B600 = 0x8
B75 = 0x2
B9600 = 0xd
BALLOON_KVM_MAGIC = 0x13661366
BDEVFS_MAGIC = 0x62646576
BINDERFS_SUPER_MAGIC = 0x6c6f6f70
BINFMTFS_MAGIC = 0x42494e4d
@ -563,6 +562,7 @@ const (
BUS_USB = 0x3
BUS_VIRTUAL = 0x6
CAN_BCM = 0x2
CAN_BUS_OFF_THRESHOLD = 0x100
CAN_CTRLMODE_3_SAMPLES = 0x4
CAN_CTRLMODE_BERR_REPORTING = 0x10
CAN_CTRLMODE_CC_LEN8_DLC = 0x100
@ -577,9 +577,12 @@ const (
CAN_EFF_FLAG = 0x80000000
CAN_EFF_ID_BITS = 0x1d
CAN_EFF_MASK = 0x1fffffff
CAN_ERROR_PASSIVE_THRESHOLD = 0x80
CAN_ERROR_WARNING_THRESHOLD = 0x60
CAN_ERR_ACK = 0x20
CAN_ERR_BUSERROR = 0x80
CAN_ERR_BUSOFF = 0x40
CAN_ERR_CNT = 0x200
CAN_ERR_CRTL = 0x4
CAN_ERR_CRTL_ACTIVE = 0x40
CAN_ERR_CRTL_RX_OVERFLOW = 0x1
@ -820,9 +823,9 @@ const (
DM_UUID_FLAG = 0x4000
DM_UUID_LEN = 0x81
DM_VERSION = 0xc138fd00
DM_VERSION_EXTRA = "-ioctl (2022-02-22)"
DM_VERSION_EXTRA = "-ioctl (2022-07-28)"
DM_VERSION_MAJOR = 0x4
DM_VERSION_MINOR = 0x2e
DM_VERSION_MINOR = 0x2f
DM_VERSION_PATCHLEVEL = 0x0
DT_BLK = 0x6
DT_CHR = 0x2
@ -1049,6 +1052,7 @@ const (
ETH_P_CAIF = 0xf7
ETH_P_CAN = 0xc
ETH_P_CANFD = 0xd
ETH_P_CANXL = 0xe
ETH_P_CFM = 0x8902
ETH_P_CONTROL = 0x16
ETH_P_CUST = 0x6006
@ -1060,6 +1064,7 @@ const (
ETH_P_DNA_RT = 0x6003
ETH_P_DSA = 0x1b
ETH_P_DSA_8021Q = 0xdadb
ETH_P_DSA_A5PSW = 0xe001
ETH_P_ECONET = 0x18
ETH_P_EDSA = 0xdada
ETH_P_ERSPAN = 0x88be
@ -1194,8 +1199,10 @@ const (
FAN_MARK_EVICTABLE = 0x200
FAN_MARK_FILESYSTEM = 0x100
FAN_MARK_FLUSH = 0x80
FAN_MARK_IGNORE = 0x400
FAN_MARK_IGNORED_MASK = 0x20
FAN_MARK_IGNORED_SURV_MODIFY = 0x40
FAN_MARK_IGNORE_SURV = 0x440
FAN_MARK_INODE = 0x0
FAN_MARK_MOUNT = 0x10
FAN_MARK_ONLYDIR = 0x8
@ -1253,6 +1260,7 @@ const (
FSCRYPT_MODE_AES_128_CBC = 0x5
FSCRYPT_MODE_AES_128_CTS = 0x6
FSCRYPT_MODE_AES_256_CTS = 0x4
FSCRYPT_MODE_AES_256_HCTR2 = 0xa
FSCRYPT_MODE_AES_256_XTS = 0x1
FSCRYPT_POLICY_FLAGS_PAD_16 = 0x2
FSCRYPT_POLICY_FLAGS_PAD_32 = 0x3
@ -1430,6 +1438,7 @@ const (
IFF_NOARP = 0x80
IFF_NOFILTER = 0x1000
IFF_NOTRAILERS = 0x20
IFF_NO_CARRIER = 0x40
IFF_NO_PI = 0x1000
IFF_ONE_QUEUE = 0x2000
IFF_PERSIST = 0x800
@ -1805,6 +1814,7 @@ const (
MADV_DONTDUMP = 0x10
MADV_DONTFORK = 0xa
MADV_DONTNEED = 0x4
MADV_DONTNEED_LOCKED = 0x18
MADV_FREE = 0x8
MADV_HUGEPAGE = 0xe
MADV_HWPOISON = 0x64
@ -1846,7 +1856,7 @@ const (
MFD_ALLOW_SEALING = 0x2
MFD_CLOEXEC = 0x1
MFD_HUGETLB = 0x4
MFD_HUGE_16GB = -0x78000000
MFD_HUGE_16GB = 0x88000000
MFD_HUGE_16MB = 0x60000000
MFD_HUGE_1GB = 0x78000000
MFD_HUGE_1MB = 0x50000000
@ -2212,6 +2222,11 @@ const (
PERF_AUX_FLAG_PARTIAL = 0x4
PERF_AUX_FLAG_PMU_FORMAT_TYPE_MASK = 0xff00
PERF_AUX_FLAG_TRUNCATED = 0x1
PERF_BR_ARM64_DEBUG_DATA = 0x7
PERF_BR_ARM64_DEBUG_EXIT = 0x5
PERF_BR_ARM64_DEBUG_HALT = 0x4
PERF_BR_ARM64_DEBUG_INST = 0x6
PERF_BR_ARM64_FIQ = 0x3
PERF_FLAG_FD_CLOEXEC = 0x8
PERF_FLAG_FD_NO_GROUP = 0x1
PERF_FLAG_FD_OUTPUT = 0x2
@ -2232,6 +2247,8 @@ const (
PERF_MEM_LOCK_NA = 0x1
PERF_MEM_LOCK_SHIFT = 0x18
PERF_MEM_LVLNUM_ANY_CACHE = 0xb
PERF_MEM_LVLNUM_CXL = 0x9
PERF_MEM_LVLNUM_IO = 0xa
PERF_MEM_LVLNUM_L1 = 0x1
PERF_MEM_LVLNUM_L2 = 0x2
PERF_MEM_LVLNUM_L3 = 0x3
@ -2265,6 +2282,7 @@ const (
PERF_MEM_REMOTE_REMOTE = 0x1
PERF_MEM_REMOTE_SHIFT = 0x25
PERF_MEM_SNOOPX_FWD = 0x1
PERF_MEM_SNOOPX_PEER = 0x2
PERF_MEM_SNOOPX_SHIFT = 0x26
PERF_MEM_SNOOP_HIT = 0x4
PERF_MEM_SNOOP_HITM = 0x10
@ -2301,7 +2319,6 @@ const (
PERF_SAMPLE_BRANCH_PLM_ALL = 0x7
PERF_SAMPLE_WEIGHT_TYPE = 0x1004000
PIPEFS_MAGIC = 0x50495045
PPC_CMM_MAGIC = 0xc7571590
PPPIOCGNPMODE = 0xc008744c
PPPIOCNEWUNIT = 0xc004743e
PRIO_PGRP = 0x1
@ -2999,6 +3016,7 @@ const (
STATX_BLOCKS = 0x400
STATX_BTIME = 0x800
STATX_CTIME = 0x80
STATX_DIOALIGN = 0x2000
STATX_GID = 0x10
STATX_INO = 0x100
STATX_MNT_ID = 0x1000
@ -3392,9 +3410,7 @@ const (
XDP_ZEROCOPY = 0x4
XENFS_SUPER_MAGIC = 0xabba1974
XFS_SUPER_MAGIC = 0x58465342
Z3FOLD_MAGIC = 0x33
ZONEFS_MAGIC = 0x5a4f4653
ZSMALLOC_MAGIC = 0x58295829
_HIDIOCGRAWNAME_LEN = 0x80
_HIDIOCGRAWPHYS_LEN = 0x40
_HIDIOCGRAWUNIQ_LEN = 0x40

View file

@ -133,6 +133,7 @@ const (
MEMGETREGIONCOUNT = 0x80044d07
MEMISLOCKED = 0x80084d17
MEMLOCK = 0x40084d05
MEMREAD = 0xc03c4d1a
MEMREADOOB = 0xc00c4d04
MEMSETBADBLOCK = 0x40084d0c
MEMUNLOCK = 0x40084d06

View file

@ -133,6 +133,7 @@ const (
MEMGETREGIONCOUNT = 0x80044d07
MEMISLOCKED = 0x80084d17
MEMLOCK = 0x40084d05
MEMREAD = 0xc0404d1a
MEMREADOOB = 0xc0104d04
MEMSETBADBLOCK = 0x40084d0c
MEMUNLOCK = 0x40084d06

View file

@ -131,6 +131,7 @@ const (
MEMGETREGIONCOUNT = 0x80044d07
MEMISLOCKED = 0x80084d17
MEMLOCK = 0x40084d05
MEMREAD = 0xc0404d1a
MEMREADOOB = 0xc00c4d04
MEMSETBADBLOCK = 0x40084d0c
MEMUNLOCK = 0x40084d06

View file

@ -134,6 +134,7 @@ const (
MEMGETREGIONCOUNT = 0x80044d07
MEMISLOCKED = 0x80084d17
MEMLOCK = 0x40084d05
MEMREAD = 0xc0404d1a
MEMREADOOB = 0xc0104d04
MEMSETBADBLOCK = 0x40084d0c
MEMUNLOCK = 0x40084d06

View file

@ -132,6 +132,7 @@ const (
MEMGETREGIONCOUNT = 0x80044d07
MEMISLOCKED = 0x80084d17
MEMLOCK = 0x40084d05
MEMREAD = 0xc0404d1a
MEMREADOOB = 0xc0104d04
MEMSETBADBLOCK = 0x40084d0c
MEMUNLOCK = 0x40084d06

View file

@ -131,6 +131,7 @@ const (
MEMGETREGIONCOUNT = 0x40044d07
MEMISLOCKED = 0x40084d17
MEMLOCK = 0x80084d05
MEMREAD = 0xc0404d1a
MEMREADOOB = 0xc00c4d04
MEMSETBADBLOCK = 0x80084d0c
MEMUNLOCK = 0x80084d06

View file

@ -131,6 +131,7 @@ const (
MEMGETREGIONCOUNT = 0x40044d07
MEMISLOCKED = 0x40084d17
MEMLOCK = 0x80084d05
MEMREAD = 0xc0404d1a
MEMREADOOB = 0xc0104d04
MEMSETBADBLOCK = 0x80084d0c
MEMUNLOCK = 0x80084d06

View file

@ -131,6 +131,7 @@ const (
MEMGETREGIONCOUNT = 0x40044d07
MEMISLOCKED = 0x40084d17
MEMLOCK = 0x80084d05
MEMREAD = 0xc0404d1a
MEMREADOOB = 0xc0104d04
MEMSETBADBLOCK = 0x80084d0c
MEMUNLOCK = 0x80084d06

View file

@ -131,6 +131,7 @@ const (
MEMGETREGIONCOUNT = 0x40044d07
MEMISLOCKED = 0x40084d17
MEMLOCK = 0x80084d05
MEMREAD = 0xc0404d1a
MEMREADOOB = 0xc00c4d04
MEMSETBADBLOCK = 0x80084d0c
MEMUNLOCK = 0x80084d06

View file

@ -131,6 +131,7 @@ const (
MEMGETREGIONCOUNT = 0x40044d07
MEMISLOCKED = 0x40084d17
MEMLOCK = 0x80084d05
MEMREAD = 0xc0404d1a
MEMREADOOB = 0xc00c4d04
MEMSETBADBLOCK = 0x80084d0c
MEMUNLOCK = 0x80084d06

View file

@ -131,6 +131,7 @@ const (
MEMGETREGIONCOUNT = 0x40044d07
MEMISLOCKED = 0x40084d17
MEMLOCK = 0x80084d05
MEMREAD = 0xc0404d1a
MEMREADOOB = 0xc0104d04
MEMSETBADBLOCK = 0x80084d0c
MEMUNLOCK = 0x80084d06

View file

@ -131,6 +131,7 @@ const (
MEMGETREGIONCOUNT = 0x40044d07
MEMISLOCKED = 0x40084d17
MEMLOCK = 0x80084d05
MEMREAD = 0xc0404d1a
MEMREADOOB = 0xc0104d04
MEMSETBADBLOCK = 0x80084d0c
MEMUNLOCK = 0x80084d06

View file

@ -131,6 +131,7 @@ const (
MEMGETREGIONCOUNT = 0x80044d07
MEMISLOCKED = 0x80084d17
MEMLOCK = 0x40084d05
MEMREAD = 0xc0404d1a
MEMREADOOB = 0xc0104d04
MEMSETBADBLOCK = 0x40084d0c
MEMUNLOCK = 0x40084d06

View file

@ -131,6 +131,7 @@ const (
MEMGETREGIONCOUNT = 0x80044d07
MEMISLOCKED = 0x80084d17
MEMLOCK = 0x40084d05
MEMREAD = 0xc0404d1a
MEMREADOOB = 0xc0104d04
MEMSETBADBLOCK = 0x40084d0c
MEMUNLOCK = 0x40084d06

View file

@ -136,6 +136,7 @@ const (
MEMGETREGIONCOUNT = 0x40044d07
MEMISLOCKED = 0x40084d17
MEMLOCK = 0x80084d05
MEMREAD = 0xc0404d1a
MEMREADOOB = 0xc0104d04
MEMSETBADBLOCK = 0x80084d0c
MEMUNLOCK = 0x80084d06

View file

@ -537,6 +537,17 @@ func Chroot(path string) (err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func ClockAdjtime(clockid int32, buf *Timex) (state int, err error) {
r0, _, e1 := Syscall(SYS_CLOCK_ADJTIME, uintptr(clockid), uintptr(unsafe.Pointer(buf)), 0)
state = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func ClockGetres(clockid int32, res *Timespec) (err error) {
_, _, e1 := Syscall(SYS_CLOCK_GETRES, uintptr(clockid), uintptr(unsafe.Pointer(res)), 0)
if e1 != 0 {

View file

@ -29,6 +29,41 @@ type Itimerval struct {
Value Timeval
}
const (
ADJ_OFFSET = 0x1
ADJ_FREQUENCY = 0x2
ADJ_MAXERROR = 0x4
ADJ_ESTERROR = 0x8
ADJ_STATUS = 0x10
ADJ_TIMECONST = 0x20
ADJ_TAI = 0x80
ADJ_SETOFFSET = 0x100
ADJ_MICRO = 0x1000
ADJ_NANO = 0x2000
ADJ_TICK = 0x4000
ADJ_OFFSET_SINGLESHOT = 0x8001
ADJ_OFFSET_SS_READ = 0xa001
)
const (
STA_PLL = 0x1
STA_PPSFREQ = 0x2
STA_PPSTIME = 0x4
STA_FLL = 0x8
STA_INS = 0x10
STA_DEL = 0x20
STA_UNSYNC = 0x40
STA_FREQHOLD = 0x80
STA_PPSSIGNAL = 0x100
STA_PPSJITTER = 0x200
STA_PPSWANDER = 0x400
STA_PPSERROR = 0x800
STA_CLOCKERR = 0x1000
STA_NANO = 0x2000
STA_MODE = 0x4000
STA_CLK = 0x8000
)
const (
TIME_OK = 0x0
TIME_INS = 0x1
@ -53,29 +88,30 @@ type StatxTimestamp struct {
}
type Statx_t struct {
Mask uint32
Blksize uint32
Attributes uint64
Nlink uint32
Uid uint32
Gid uint32
Mode uint16
_ [1]uint16
Ino uint64
Size uint64
Blocks uint64
Attributes_mask uint64
Atime StatxTimestamp
Btime StatxTimestamp
Ctime StatxTimestamp
Mtime StatxTimestamp
Rdev_major uint32
Rdev_minor uint32
Dev_major uint32
Dev_minor uint32
Mnt_id uint64
_ uint64
_ [12]uint64
Mask uint32
Blksize uint32
Attributes uint64
Nlink uint32
Uid uint32
Gid uint32
Mode uint16
_ [1]uint16
Ino uint64
Size uint64
Blocks uint64
Attributes_mask uint64
Atime StatxTimestamp
Btime StatxTimestamp
Ctime StatxTimestamp
Mtime StatxTimestamp
Rdev_major uint32
Rdev_minor uint32
Dev_major uint32
Dev_minor uint32
Mnt_id uint64
Dio_mem_align uint32
Dio_offset_align uint32
_ [12]uint64
}
type Fsid struct {
@ -1099,7 +1135,8 @@ const (
PERF_SAMPLE_BRANCH_NO_CYCLES_SHIFT = 0xf
PERF_SAMPLE_BRANCH_TYPE_SAVE_SHIFT = 0x10
PERF_SAMPLE_BRANCH_HW_INDEX_SHIFT = 0x11
PERF_SAMPLE_BRANCH_MAX_SHIFT = 0x12
PERF_SAMPLE_BRANCH_PRIV_SAVE_SHIFT = 0x12
PERF_SAMPLE_BRANCH_MAX_SHIFT = 0x13
PERF_SAMPLE_BRANCH_USER = 0x1
PERF_SAMPLE_BRANCH_KERNEL = 0x2
PERF_SAMPLE_BRANCH_HV = 0x4
@ -1118,7 +1155,8 @@ const (
PERF_SAMPLE_BRANCH_NO_CYCLES = 0x8000
PERF_SAMPLE_BRANCH_TYPE_SAVE = 0x10000
PERF_SAMPLE_BRANCH_HW_INDEX = 0x20000
PERF_SAMPLE_BRANCH_MAX = 0x40000
PERF_SAMPLE_BRANCH_PRIV_SAVE = 0x40000
PERF_SAMPLE_BRANCH_MAX = 0x80000
PERF_BR_UNKNOWN = 0x0
PERF_BR_COND = 0x1
PERF_BR_UNCOND = 0x2
@ -1132,7 +1170,10 @@ const (
PERF_BR_COND_RET = 0xa
PERF_BR_ERET = 0xb
PERF_BR_IRQ = 0xc
PERF_BR_MAX = 0xd
PERF_BR_SERROR = 0xd
PERF_BR_NO_TX = 0xe
PERF_BR_EXTEND_ABI = 0xf
PERF_BR_MAX = 0x10
PERF_SAMPLE_REGS_ABI_NONE = 0x0
PERF_SAMPLE_REGS_ABI_32 = 0x1
PERF_SAMPLE_REGS_ABI_64 = 0x2
@ -1151,7 +1192,8 @@ const (
PERF_FORMAT_TOTAL_TIME_RUNNING = 0x2
PERF_FORMAT_ID = 0x4
PERF_FORMAT_GROUP = 0x8
PERF_FORMAT_MAX = 0x10
PERF_FORMAT_LOST = 0x10
PERF_FORMAT_MAX = 0x20
PERF_IOC_FLAG_GROUP = 0x1
PERF_RECORD_MMAP = 0x1
PERF_RECORD_LOST = 0x2
@ -2979,7 +3021,16 @@ const (
DEVLINK_CMD_TRAP_POLICER_NEW = 0x47
DEVLINK_CMD_TRAP_POLICER_DEL = 0x48
DEVLINK_CMD_HEALTH_REPORTER_TEST = 0x49
DEVLINK_CMD_MAX = 0x51
DEVLINK_CMD_RATE_GET = 0x4a
DEVLINK_CMD_RATE_SET = 0x4b
DEVLINK_CMD_RATE_NEW = 0x4c
DEVLINK_CMD_RATE_DEL = 0x4d
DEVLINK_CMD_LINECARD_GET = 0x4e
DEVLINK_CMD_LINECARD_SET = 0x4f
DEVLINK_CMD_LINECARD_NEW = 0x50
DEVLINK_CMD_LINECARD_DEL = 0x51
DEVLINK_CMD_SELFTESTS_GET = 0x52
DEVLINK_CMD_MAX = 0x53
DEVLINK_PORT_TYPE_NOTSET = 0x0
DEVLINK_PORT_TYPE_AUTO = 0x1
DEVLINK_PORT_TYPE_ETH = 0x2
@ -3208,7 +3259,13 @@ const (
DEVLINK_ATTR_RATE_NODE_NAME = 0xa8
DEVLINK_ATTR_RATE_PARENT_NODE_NAME = 0xa9
DEVLINK_ATTR_REGION_MAX_SNAPSHOTS = 0xaa
DEVLINK_ATTR_MAX = 0xae
DEVLINK_ATTR_LINECARD_INDEX = 0xab
DEVLINK_ATTR_LINECARD_STATE = 0xac
DEVLINK_ATTR_LINECARD_TYPE = 0xad
DEVLINK_ATTR_LINECARD_SUPPORTED_TYPES = 0xae
DEVLINK_ATTR_NESTED_DEVLINK = 0xaf
DEVLINK_ATTR_SELFTESTS = 0xb0
DEVLINK_ATTR_MAX = 0xb0
DEVLINK_DPIPE_FIELD_MAPPING_TYPE_NONE = 0x0
DEVLINK_DPIPE_FIELD_MAPPING_TYPE_IFINDEX = 0x1
DEVLINK_DPIPE_MATCH_TYPE_FIELD_EXACT = 0x0
@ -3317,7 +3374,8 @@ const (
LWTUNNEL_ENCAP_SEG6_LOCAL = 0x7
LWTUNNEL_ENCAP_RPL = 0x8
LWTUNNEL_ENCAP_IOAM6 = 0x9
LWTUNNEL_ENCAP_MAX = 0x9
LWTUNNEL_ENCAP_XFRM = 0xa
LWTUNNEL_ENCAP_MAX = 0xa
MPLS_IPTUNNEL_UNSPEC = 0x0
MPLS_IPTUNNEL_DST = 0x1
@ -3512,7 +3570,9 @@ const (
ETHTOOL_MSG_PHC_VCLOCKS_GET = 0x21
ETHTOOL_MSG_MODULE_GET = 0x22
ETHTOOL_MSG_MODULE_SET = 0x23
ETHTOOL_MSG_USER_MAX = 0x23
ETHTOOL_MSG_PSE_GET = 0x24
ETHTOOL_MSG_PSE_SET = 0x25
ETHTOOL_MSG_USER_MAX = 0x25
ETHTOOL_MSG_KERNEL_NONE = 0x0
ETHTOOL_MSG_STRSET_GET_REPLY = 0x1
ETHTOOL_MSG_LINKINFO_GET_REPLY = 0x2
@ -3550,7 +3610,8 @@ const (
ETHTOOL_MSG_PHC_VCLOCKS_GET_REPLY = 0x22
ETHTOOL_MSG_MODULE_GET_REPLY = 0x23
ETHTOOL_MSG_MODULE_NTF = 0x24
ETHTOOL_MSG_KERNEL_MAX = 0x24
ETHTOOL_MSG_PSE_GET_REPLY = 0x25
ETHTOOL_MSG_KERNEL_MAX = 0x25
ETHTOOL_A_HEADER_UNSPEC = 0x0
ETHTOOL_A_HEADER_DEV_INDEX = 0x1
ETHTOOL_A_HEADER_DEV_NAME = 0x2
@ -3609,7 +3670,8 @@ const (
ETHTOOL_A_LINKMODES_MASTER_SLAVE_CFG = 0x7
ETHTOOL_A_LINKMODES_MASTER_SLAVE_STATE = 0x8
ETHTOOL_A_LINKMODES_LANES = 0x9
ETHTOOL_A_LINKMODES_MAX = 0x9
ETHTOOL_A_LINKMODES_RATE_MATCHING = 0xa
ETHTOOL_A_LINKMODES_MAX = 0xa
ETHTOOL_A_LINKSTATE_UNSPEC = 0x0
ETHTOOL_A_LINKSTATE_HEADER = 0x1
ETHTOOL_A_LINKSTATE_LINK = 0x2
@ -4201,6 +4263,9 @@ const (
NL80211_ACL_POLICY_DENY_UNLESS_LISTED = 0x1
NL80211_AC_VI = 0x1
NL80211_AC_VO = 0x0
NL80211_AP_SETTINGS_EXTERNAL_AUTH_SUPPORT = 0x1
NL80211_AP_SETTINGS_SA_QUERY_OFFLOAD_SUPPORT = 0x2
NL80211_AP_SME_SA_QUERY_OFFLOAD = 0x1
NL80211_ATTR_4ADDR = 0x53
NL80211_ATTR_ACK = 0x5c
NL80211_ATTR_ACK_SIGNAL = 0x107
@ -4209,6 +4274,7 @@ const (
NL80211_ATTR_AIRTIME_WEIGHT = 0x112
NL80211_ATTR_AKM_SUITES = 0x4c
NL80211_ATTR_AP_ISOLATE = 0x60
NL80211_ATTR_AP_SETTINGS_FLAGS = 0x135
NL80211_ATTR_AUTH_DATA = 0x9c
NL80211_ATTR_AUTH_TYPE = 0x35
NL80211_ATTR_BANDS = 0xef
@ -4240,6 +4306,9 @@ const (
NL80211_ATTR_COALESCE_RULE_DELAY = 0x1
NL80211_ATTR_COALESCE_RULE_MAX = 0x3
NL80211_ATTR_COALESCE_RULE_PKT_PATTERN = 0x3
NL80211_ATTR_COLOR_CHANGE_COLOR = 0x130
NL80211_ATTR_COLOR_CHANGE_COUNT = 0x12f
NL80211_ATTR_COLOR_CHANGE_ELEMS = 0x131
NL80211_ATTR_CONN_FAILED_REASON = 0x9b
NL80211_ATTR_CONTROL_PORT = 0x44
NL80211_ATTR_CONTROL_PORT_ETHERTYPE = 0x66
@ -4266,6 +4335,7 @@ const (
NL80211_ATTR_DEVICE_AP_SME = 0x8d
NL80211_ATTR_DFS_CAC_TIME = 0x7
NL80211_ATTR_DFS_REGION = 0x92
NL80211_ATTR_DISABLE_EHT = 0x137
NL80211_ATTR_DISABLE_HE = 0x12d
NL80211_ATTR_DISABLE_HT = 0x93
NL80211_ATTR_DISABLE_VHT = 0xaf
@ -4273,6 +4343,8 @@ const (
NL80211_ATTR_DONT_WAIT_FOR_ACK = 0x8e
NL80211_ATTR_DTIM_PERIOD = 0xd
NL80211_ATTR_DURATION = 0x57
NL80211_ATTR_EHT_CAPABILITY = 0x136
NL80211_ATTR_EML_CAPABILITY = 0x13d
NL80211_ATTR_EXT_CAPA = 0xa9
NL80211_ATTR_EXT_CAPA_MASK = 0xaa
NL80211_ATTR_EXTERNAL_AUTH_ACTION = 0x104
@ -4337,10 +4409,11 @@ const (
NL80211_ATTR_MAC_HINT = 0xc8
NL80211_ATTR_MAC_MASK = 0xd7
NL80211_ATTR_MAX_AP_ASSOC_STA = 0xca
NL80211_ATTR_MAX = 0x137
NL80211_ATTR_MAX = 0x140
NL80211_ATTR_MAX_CRIT_PROT_DURATION = 0xb4
NL80211_ATTR_MAX_CSA_COUNTERS = 0xce
NL80211_ATTR_MAX_MATCH_SETS = 0x85
NL80211_ATTR_MAX_NUM_AKM_SUITES = 0x13c
NL80211_ATTR_MAX_NUM_PMKIDS = 0x56
NL80211_ATTR_MAX_NUM_SCAN_SSIDS = 0x2b
NL80211_ATTR_MAX_NUM_SCHED_SCAN_PLANS = 0xde
@ -4350,6 +4423,8 @@ const (
NL80211_ATTR_MAX_SCAN_PLAN_INTERVAL = 0xdf
NL80211_ATTR_MAX_SCAN_PLAN_ITERATIONS = 0xe0
NL80211_ATTR_MAX_SCHED_SCAN_IE_LEN = 0x7c
NL80211_ATTR_MBSSID_CONFIG = 0x132
NL80211_ATTR_MBSSID_ELEMS = 0x133
NL80211_ATTR_MCAST_RATE = 0x6b
NL80211_ATTR_MDID = 0xb1
NL80211_ATTR_MEASUREMENT_DURATION = 0xeb
@ -4359,6 +4434,11 @@ const (
NL80211_ATTR_MESH_PEER_AID = 0xed
NL80211_ATTR_MESH_SETUP = 0x70
NL80211_ATTR_MGMT_SUBTYPE = 0x29
NL80211_ATTR_MLD_ADDR = 0x13a
NL80211_ATTR_MLD_CAPA_AND_OPS = 0x13e
NL80211_ATTR_MLO_LINK_ID = 0x139
NL80211_ATTR_MLO_LINKS = 0x138
NL80211_ATTR_MLO_SUPPORT = 0x13b
NL80211_ATTR_MNTR_FLAGS = 0x17
NL80211_ATTR_MPATH_INFO = 0x1b
NL80211_ATTR_MPATH_NEXT_HOP = 0x1a
@ -4371,6 +4451,7 @@ const (
NL80211_ATTR_NETNS_FD = 0xdb
NL80211_ATTR_NOACK_MAP = 0x95
NL80211_ATTR_NSS = 0x106
NL80211_ATTR_OBSS_COLOR_BITMAP = 0x12e
NL80211_ATTR_OFFCHANNEL_TX_OK = 0x6c
NL80211_ATTR_OPER_CLASS = 0xd6
NL80211_ATTR_OPMODE_NOTIF = 0xc2
@ -4397,6 +4478,7 @@ const (
NL80211_ATTR_PROTOCOL_FEATURES = 0xad
NL80211_ATTR_PS_STATE = 0x5d
NL80211_ATTR_QOS_MAP = 0xc7
NL80211_ATTR_RADAR_BACKGROUND = 0x134
NL80211_ATTR_RADAR_EVENT = 0xa8
NL80211_ATTR_REASON_CODE = 0x36
NL80211_ATTR_RECEIVE_MULTICAST = 0x121
@ -4412,6 +4494,7 @@ const (
NL80211_ATTR_RESP_IE = 0x4e
NL80211_ATTR_ROAM_SUPPORT = 0x83
NL80211_ATTR_RX_FRAME_TYPES = 0x64
NL80211_ATTR_RX_HW_TIMESTAMP = 0x140
NL80211_ATTR_RXMGMT_FLAGS = 0xbc
NL80211_ATTR_RX_SIGNAL_DBM = 0x97
NL80211_ATTR_S1G_CAPABILITY = 0x128
@ -4484,6 +4567,7 @@ const (
NL80211_ATTR_TSID = 0xd2
NL80211_ATTR_TWT_RESPONDER = 0x116
NL80211_ATTR_TX_FRAME_TYPES = 0x63
NL80211_ATTR_TX_HW_TIMESTAMP = 0x13f
NL80211_ATTR_TX_NO_CCK_RATE = 0x87
NL80211_ATTR_TXQ_LIMIT = 0x10a
NL80211_ATTR_TXQ_MEMORY_LIMIT = 0x10b
@ -4557,6 +4641,10 @@ const (
NL80211_BAND_ATTR_RATES = 0x2
NL80211_BAND_ATTR_VHT_CAPA = 0x8
NL80211_BAND_ATTR_VHT_MCS_SET = 0x7
NL80211_BAND_IFTYPE_ATTR_EHT_CAP_MAC = 0x8
NL80211_BAND_IFTYPE_ATTR_EHT_CAP_MCS_SET = 0xa
NL80211_BAND_IFTYPE_ATTR_EHT_CAP_PHY = 0x9
NL80211_BAND_IFTYPE_ATTR_EHT_CAP_PPE = 0xb
NL80211_BAND_IFTYPE_ATTR_HE_6GHZ_CAPA = 0x6
NL80211_BAND_IFTYPE_ATTR_HE_CAP_MAC = 0x2
NL80211_BAND_IFTYPE_ATTR_HE_CAP_MCS_SET = 0x4
@ -4564,6 +4652,8 @@ const (
NL80211_BAND_IFTYPE_ATTR_HE_CAP_PPE = 0x5
NL80211_BAND_IFTYPE_ATTR_IFTYPES = 0x1
NL80211_BAND_IFTYPE_ATTR_MAX = 0xb
NL80211_BAND_IFTYPE_ATTR_VENDOR_ELEMS = 0x7
NL80211_BAND_LC = 0x5
NL80211_BAND_S1GHZ = 0x4
NL80211_BITRATE_ATTR_2GHZ_SHORTPREAMBLE = 0x2
NL80211_BITRATE_ATTR_MAX = 0x2
@ -4584,7 +4674,9 @@ const (
NL80211_BSS_FREQUENCY_OFFSET = 0x14
NL80211_BSS_INFORMATION_ELEMENTS = 0x6
NL80211_BSS_LAST_SEEN_BOOTTIME = 0xf
NL80211_BSS_MAX = 0x14
NL80211_BSS_MAX = 0x16
NL80211_BSS_MLD_ADDR = 0x16
NL80211_BSS_MLO_LINK_ID = 0x15
NL80211_BSS_PAD = 0x10
NL80211_BSS_PARENT_BSSID = 0x12
NL80211_BSS_PARENT_TSF = 0x11
@ -4612,6 +4704,7 @@ const (
NL80211_CHAN_WIDTH_20 = 0x1
NL80211_CHAN_WIDTH_20_NOHT = 0x0
NL80211_CHAN_WIDTH_2 = 0x9
NL80211_CHAN_WIDTH_320 = 0xd
NL80211_CHAN_WIDTH_40 = 0x2
NL80211_CHAN_WIDTH_4 = 0xa
NL80211_CHAN_WIDTH_5 = 0x6
@ -4621,8 +4714,11 @@ const (
NL80211_CMD_ABORT_SCAN = 0x72
NL80211_CMD_ACTION = 0x3b
NL80211_CMD_ACTION_TX_STATUS = 0x3c
NL80211_CMD_ADD_LINK = 0x94
NL80211_CMD_ADD_LINK_STA = 0x96
NL80211_CMD_ADD_NAN_FUNCTION = 0x75
NL80211_CMD_ADD_TX_TS = 0x69
NL80211_CMD_ASSOC_COMEBACK = 0x93
NL80211_CMD_ASSOCIATE = 0x26
NL80211_CMD_AUTHENTICATE = 0x25
NL80211_CMD_CANCEL_REMAIN_ON_CHANNEL = 0x38
@ -4630,6 +4726,10 @@ const (
NL80211_CMD_CHANNEL_SWITCH = 0x66
NL80211_CMD_CH_SWITCH_NOTIFY = 0x58
NL80211_CMD_CH_SWITCH_STARTED_NOTIFY = 0x6e
NL80211_CMD_COLOR_CHANGE_ABORTED = 0x90
NL80211_CMD_COLOR_CHANGE_COMPLETED = 0x91
NL80211_CMD_COLOR_CHANGE_REQUEST = 0x8e
NL80211_CMD_COLOR_CHANGE_STARTED = 0x8f
NL80211_CMD_CONNECT = 0x2e
NL80211_CMD_CONN_FAILED = 0x5b
NL80211_CMD_CONTROL_PORT_FRAME = 0x81
@ -4678,8 +4778,9 @@ const (
NL80211_CMD_LEAVE_IBSS = 0x2c
NL80211_CMD_LEAVE_MESH = 0x45
NL80211_CMD_LEAVE_OCB = 0x6d
NL80211_CMD_MAX = 0x93
NL80211_CMD_MAX = 0x98
NL80211_CMD_MICHAEL_MIC_FAILURE = 0x29
NL80211_CMD_MODIFY_LINK_STA = 0x97
NL80211_CMD_NAN_MATCH = 0x78
NL80211_CMD_NEW_BEACON = 0xf
NL80211_CMD_NEW_INTERFACE = 0x7
@ -4692,6 +4793,7 @@ const (
NL80211_CMD_NEW_WIPHY = 0x3
NL80211_CMD_NOTIFY_CQM = 0x40
NL80211_CMD_NOTIFY_RADAR = 0x86
NL80211_CMD_OBSS_COLOR_COLLISION = 0x8d
NL80211_CMD_PEER_MEASUREMENT_COMPLETE = 0x85
NL80211_CMD_PEER_MEASUREMENT_RESULT = 0x84
NL80211_CMD_PEER_MEASUREMENT_START = 0x83
@ -4707,6 +4809,8 @@ const (
NL80211_CMD_REGISTER_FRAME = 0x3a
NL80211_CMD_RELOAD_REGDB = 0x7e
NL80211_CMD_REMAIN_ON_CHANNEL = 0x37
NL80211_CMD_REMOVE_LINK = 0x95
NL80211_CMD_REMOVE_LINK_STA = 0x98
NL80211_CMD_REQ_SET_REG = 0x1b
NL80211_CMD_ROAM = 0x2f
NL80211_CMD_SCAN_ABORTED = 0x23
@ -4717,6 +4821,7 @@ const (
NL80211_CMD_SET_CHANNEL = 0x41
NL80211_CMD_SET_COALESCE = 0x65
NL80211_CMD_SET_CQM = 0x3f
NL80211_CMD_SET_FILS_AAD = 0x92
NL80211_CMD_SET_INTERFACE = 0x6
NL80211_CMD_SET_KEY = 0xa
NL80211_CMD_SET_MAC_ACL = 0x5d
@ -4791,6 +4896,8 @@ const (
NL80211_EDMG_BW_CONFIG_MIN = 0x4
NL80211_EDMG_CHANNELS_MAX = 0x3c
NL80211_EDMG_CHANNELS_MIN = 0x1
NL80211_EHT_MAX_CAPABILITY_LEN = 0x33
NL80211_EHT_MIN_CAPABILITY_LEN = 0xd
NL80211_EXTERNAL_AUTH_ABORT = 0x1
NL80211_EXTERNAL_AUTH_START = 0x0
NL80211_EXT_FEATURE_4WAY_HANDSHAKE_AP_PSK = 0x32
@ -4807,6 +4914,7 @@ const (
NL80211_EXT_FEATURE_BEACON_RATE_HT = 0x7
NL80211_EXT_FEATURE_BEACON_RATE_LEGACY = 0x6
NL80211_EXT_FEATURE_BEACON_RATE_VHT = 0x8
NL80211_EXT_FEATURE_BSS_COLOR = 0x3a
NL80211_EXT_FEATURE_BSS_PARENT_TSF = 0x4
NL80211_EXT_FEATURE_CAN_REPLACE_PTK0 = 0x1f
NL80211_EXT_FEATURE_CONTROL_PORT_NO_PREAUTH = 0x2a
@ -4818,6 +4926,7 @@ const (
NL80211_EXT_FEATURE_DFS_OFFLOAD = 0x19
NL80211_EXT_FEATURE_ENABLE_FTM_RESPONDER = 0x20
NL80211_EXT_FEATURE_EXT_KEY_ID = 0x24
NL80211_EXT_FEATURE_FILS_CRYPTO_OFFLOAD = 0x3b
NL80211_EXT_FEATURE_FILS_DISCOVERY = 0x34
NL80211_EXT_FEATURE_FILS_MAX_CHANNEL_TIME = 0x11
NL80211_EXT_FEATURE_FILS_SK_OFFLOAD = 0xe
@ -4833,8 +4942,10 @@ const (
NL80211_EXT_FEATURE_OCE_PROBE_REQ_DEFERRAL_SUPPRESSION = 0x14
NL80211_EXT_FEATURE_OCE_PROBE_REQ_HIGH_TX_RATE = 0x13
NL80211_EXT_FEATURE_OPERATING_CHANNEL_VALIDATION = 0x31
NL80211_EXT_FEATURE_POWERED_ADDR_CHANGE = 0x3d
NL80211_EXT_FEATURE_PROTECTED_TWT = 0x2b
NL80211_EXT_FEATURE_PROT_RANGE_NEGO_AND_MEASURE = 0x39
NL80211_EXT_FEATURE_RADAR_BACKGROUND = 0x3c
NL80211_EXT_FEATURE_RRM = 0x1
NL80211_EXT_FEATURE_SAE_OFFLOAD_AP = 0x33
NL80211_EXT_FEATURE_SAE_OFFLOAD = 0x26
@ -4906,7 +5017,9 @@ const (
NL80211_FREQUENCY_ATTR_NO_10MHZ = 0x11
NL80211_FREQUENCY_ATTR_NO_160MHZ = 0xc
NL80211_FREQUENCY_ATTR_NO_20MHZ = 0x10
NL80211_FREQUENCY_ATTR_NO_320MHZ = 0x1a
NL80211_FREQUENCY_ATTR_NO_80MHZ = 0xb
NL80211_FREQUENCY_ATTR_NO_EHT = 0x1b
NL80211_FREQUENCY_ATTR_NO_HE = 0x13
NL80211_FREQUENCY_ATTR_NO_HT40_MINUS = 0x9
NL80211_FREQUENCY_ATTR_NO_HT40_PLUS = 0xa
@ -5006,6 +5119,12 @@ const (
NL80211_MAX_SUPP_HT_RATES = 0x4d
NL80211_MAX_SUPP_RATES = 0x20
NL80211_MAX_SUPP_REG_RULES = 0x80
NL80211_MBSSID_CONFIG_ATTR_EMA = 0x5
NL80211_MBSSID_CONFIG_ATTR_INDEX = 0x3
NL80211_MBSSID_CONFIG_ATTR_MAX = 0x5
NL80211_MBSSID_CONFIG_ATTR_MAX_EMA_PROFILE_PERIODICITY = 0x2
NL80211_MBSSID_CONFIG_ATTR_MAX_INTERFACES = 0x1
NL80211_MBSSID_CONFIG_ATTR_TX_IFINDEX = 0x4
NL80211_MESHCONF_ATTR_MAX = 0x1f
NL80211_MESHCONF_AUTO_OPEN_PLINKS = 0x7
NL80211_MESHCONF_AWAKE_WINDOW = 0x1b
@ -5168,6 +5287,7 @@ const (
NL80211_PMSR_FTM_FAILURE_UNSPECIFIED = 0x0
NL80211_PMSR_FTM_FAILURE_WRONG_CHANNEL = 0x3
NL80211_PMSR_FTM_REQ_ATTR_ASAP = 0x1
NL80211_PMSR_FTM_REQ_ATTR_BSS_COLOR = 0xd
NL80211_PMSR_FTM_REQ_ATTR_BURST_DURATION = 0x5
NL80211_PMSR_FTM_REQ_ATTR_BURST_PERIOD = 0x4
NL80211_PMSR_FTM_REQ_ATTR_FTMS_PER_BURST = 0x6
@ -5244,12 +5364,36 @@ const (
NL80211_RADAR_PRE_CAC_EXPIRED = 0x4
NL80211_RATE_INFO_10_MHZ_WIDTH = 0xb
NL80211_RATE_INFO_160_MHZ_WIDTH = 0xa
NL80211_RATE_INFO_320_MHZ_WIDTH = 0x12
NL80211_RATE_INFO_40_MHZ_WIDTH = 0x3
NL80211_RATE_INFO_5_MHZ_WIDTH = 0xc
NL80211_RATE_INFO_80_MHZ_WIDTH = 0x8
NL80211_RATE_INFO_80P80_MHZ_WIDTH = 0x9
NL80211_RATE_INFO_BITRATE32 = 0x5
NL80211_RATE_INFO_BITRATE = 0x1
NL80211_RATE_INFO_EHT_GI_0_8 = 0x0
NL80211_RATE_INFO_EHT_GI_1_6 = 0x1
NL80211_RATE_INFO_EHT_GI_3_2 = 0x2
NL80211_RATE_INFO_EHT_GI = 0x15
NL80211_RATE_INFO_EHT_MCS = 0x13
NL80211_RATE_INFO_EHT_NSS = 0x14
NL80211_RATE_INFO_EHT_RU_ALLOC_106 = 0x3
NL80211_RATE_INFO_EHT_RU_ALLOC_106P26 = 0x4
NL80211_RATE_INFO_EHT_RU_ALLOC_242 = 0x5
NL80211_RATE_INFO_EHT_RU_ALLOC_26 = 0x0
NL80211_RATE_INFO_EHT_RU_ALLOC_2x996 = 0xb
NL80211_RATE_INFO_EHT_RU_ALLOC_2x996P484 = 0xc
NL80211_RATE_INFO_EHT_RU_ALLOC_3x996 = 0xd
NL80211_RATE_INFO_EHT_RU_ALLOC_3x996P484 = 0xe
NL80211_RATE_INFO_EHT_RU_ALLOC_484 = 0x6
NL80211_RATE_INFO_EHT_RU_ALLOC_484P242 = 0x7
NL80211_RATE_INFO_EHT_RU_ALLOC_4x996 = 0xf
NL80211_RATE_INFO_EHT_RU_ALLOC_52 = 0x1
NL80211_RATE_INFO_EHT_RU_ALLOC_52P26 = 0x2
NL80211_RATE_INFO_EHT_RU_ALLOC_996 = 0x8
NL80211_RATE_INFO_EHT_RU_ALLOC_996P484 = 0x9
NL80211_RATE_INFO_EHT_RU_ALLOC_996P484P242 = 0xa
NL80211_RATE_INFO_EHT_RU_ALLOC = 0x16
NL80211_RATE_INFO_HE_1XLTF = 0x0
NL80211_RATE_INFO_HE_2XLTF = 0x1
NL80211_RATE_INFO_HE_4XLTF = 0x2
@ -5292,6 +5436,7 @@ const (
NL80211_RRF_GO_CONCURRENT = 0x1000
NL80211_RRF_IR_CONCURRENT = 0x1000
NL80211_RRF_NO_160MHZ = 0x10000
NL80211_RRF_NO_320MHZ = 0x40000
NL80211_RRF_NO_80MHZ = 0x8000
NL80211_RRF_NO_CCK = 0x2
NL80211_RRF_NO_HE = 0x20000

View file

@ -10,7 +10,6 @@ import (
errorspkg "errors"
"fmt"
"runtime"
"strings"
"sync"
"syscall"
"time"
@ -87,22 +86,13 @@ func StringToUTF16(s string) []uint16 {
// s, with a terminating NUL added. If s contains a NUL byte at any
// location, it returns (nil, syscall.EINVAL).
func UTF16FromString(s string) ([]uint16, error) {
if strings.IndexByte(s, 0) != -1 {
return nil, syscall.EINVAL
}
return utf16.Encode([]rune(s + "\x00")), nil
return syscall.UTF16FromString(s)
}
// UTF16ToString returns the UTF-8 encoding of the UTF-16 sequence s,
// with a terminating NUL and any bytes after the NUL removed.
func UTF16ToString(s []uint16) string {
for i, v := range s {
if v == 0 {
s = s[:i]
break
}
}
return string(utf16.Decode(s))
return syscall.UTF16ToString(s)
}
// StringToUTF16Ptr is deprecated. Use UTF16PtrFromString instead.

12
vendor/modules.txt vendored
View file

@ -49,10 +49,10 @@ github.com/prometheus/common/model
github.com/prometheus/procfs
github.com/prometheus/procfs/internal/fs
github.com/prometheus/procfs/internal/util
# go.etcd.io/bbolt v1.3.6
## explicit; go 1.12
# go.etcd.io/bbolt v1.3.7
## explicit; go 1.17
go.etcd.io/bbolt
# golang.org/x/crypto v0.5.0
# golang.org/x/crypto v0.6.0
## explicit; go 1.17
golang.org/x/crypto/acme
golang.org/x/crypto/acme/autocert
@ -66,19 +66,19 @@ golang.org/x/mod/internal/lazyregexp
golang.org/x/mod/modfile
golang.org/x/mod/module
golang.org/x/mod/semver
# golang.org/x/net v0.5.0
# golang.org/x/net v0.7.0
## explicit; go 1.17
golang.org/x/net/html
golang.org/x/net/html/atom
golang.org/x/net/idna
# golang.org/x/sys v0.4.0
# golang.org/x/sys v0.5.0
## explicit; go 1.17
golang.org/x/sys/cpu
golang.org/x/sys/execabs
golang.org/x/sys/internal/unsafeheader
golang.org/x/sys/unix
golang.org/x/sys/windows
# golang.org/x/text v0.6.0
# golang.org/x/text v0.7.0
## explicit; go 1.17
golang.org/x/text/secure/bidirule
golang.org/x/text/transform