From cb229cb6cf8c02d226fbd21cf39d5f0ae835d29b Mon Sep 17 00:00:00 2001 From: Mechiel Lukkien Date: Mon, 30 Jan 2023 14:27:06 +0100 Subject: [PATCH] mox! --- .dockerignore | 8 + .gitignore | 22 + .go/empty | 0 .jshintrc | 12 + Dockerfile | 11 + Dockerfile.imaptest | 7 + LICENSE.MIT | 7 + LICENSE.MPLv2.0 | 373 + Makefile | 76 + README.md | 181 + autotls/autotls.go | 279 + autotls/autotls_test.go | 97 + checkhtmljs | 2 + compatibility.txt | 3 + config/config.go | 245 + config/doc.go | 465 + ctl.go | 607 + dkim/dkim.go | 849 + dkim/dkim_test.go | 702 + dkim/fuzz_test.go | 25 + dkim/parser.go | 474 + dkim/policy.go | 49 + dkim/sig.go | 353 + dkim/sig_test.go | 180 + dkim/txt.go | 278 + dkim/txt_test.go | 133 + dmarc/dmarc.go | 239 + dmarc/dmarc_test.go | 275 + dmarc/fuzz_test.go | 17 + dmarc/parse.go | 343 + dmarc/parse_test.go | 142 + dmarc/txt.go | 127 + dmarcdb/db.go | 186 + dmarcdb/db_test.go | 108 + dmarcrpt/feedback.go | 157 + dmarcrpt/parse.go | 124 + dmarcrpt/parse_test.go | 179 + dns/dns.go | 109 + dns/dns_test.go | 27 + dns/ipdomain.go | 42 + dns/mock.go | 156 + dns/resolver.go | 248 + dnsbl/dnsbl.go | 130 + dnsbl/dnsbl_test.go | 64 + doc.go | 613 + docker-compose-imaptest.yml | 31 + docker-compose-integration.yml | 95 + dsn/dsn.go | 405 + dsn/dsn_test.go | 243 + dsn/nameip.go | 15 + dsn/parse.go | 360 + export.go | 264 + gendoc.sh | 71 + go.mod | 31 + go.sum | 507 + http/account.go | 114 + http/account.html | 214 + http/account_test.go | 3 + http/accountapi.json | 25 + http/admin.go | 1382 ++ http/admin.html | 1480 ++ http/admin_test.go | 123 + http/adminapi.json | 3104 ++++ http/autoconf.go | 344 + http/autoconf_test.go | 26 + http/mtasts.go | 64 + http/mtasts_test.go | 3 + http/web.go | 240 + imapclient/client.go | 293 + imapclient/cmds.go | 292 + imapclient/parse.go | 1223 ++ imapclient/protocol.go | 452 + imapserver/append_test.go | 77 + imapserver/authenticate_test.go | 110 + imapserver/copy_test.go | 53 + imapserver/create_test.go | 69 + imapserver/delete_test.go | 56 + imapserver/error.go | 55 + imapserver/expunge_test.go | 74 + imapserver/fetch.go | 738 + imapserver/fetch_test.go | 403 + imapserver/fuzz_test.go | 140 + imapserver/idle_test.go | 52 + imapserver/list.go | 228 + imapserver/list_test.go | 215 + imapserver/lsub_test.go | 35 + imapserver/move_test.go | 92 + imapserver/pack.go | 213 + imapserver/parse.go | 942 ++ imapserver/prefixconn.go | 28 + imapserver/protocol.go | 186 + imapserver/protocol_test.go | 61 + imapserver/rename_test.go | 81 + imapserver/search.go | 463 + imapserver/search_test.go | 345 + imapserver/selectexamine_test.go | 71 + imapserver/server.go | 3012 ++++ imapserver/server_test.go | 646 + imapserver/starttls_test.go | 28 + imapserver/status_test.go | 34 + imapserver/store_test.go | 68 + imapserver/subscribe_test.go | 32 + imapserver/unselect_test.go | 26 + imapserver/unsubscribe_test.go | 23 + imapserver/utf7.go | 83 + imapserver/utf7_test.go | 33 + import.go | 700 + import_test.go | 77 + integration_test.go | 144 + iprev/iprev.go | 90 + iprev/iprev_test.go | 68 + junk.go | 440 + junk/bloom.go | 165 + junk/bloom_test.go | 136 + junk/filter.go | 726 + junk/filter_test.go | 201 + junk/parse.go | 323 + junk/parse_test.go | 33 + main.go | 1908 +++ main_test.go | 24 + message/doc.go | 3 + message/from.go | 43 + message/headerwriter.go | 65 + message/part.go | 777 + message/part_test.go | 501 + message/readheaders.go | 31 + message/time.go | 4 + message/todo.go | 12 + message/writer.go | 55 + message/writer_test.go | 41 + metrics/auth.go | 25 + metrics/http.go | 61 + metrics/panic.go | 20 + mlog/log.go | 376 + mox-/admin.go | 824 + mox-/cid.go | 17 + mox-/config.go | 888 + mox-/dir.go | 36 + mox-/doc.go | 3 + mox-/ip.go | 21 + mox-/lastknown.go | 51 + mox-/lifecycle.go | 147 + mox-/lifecycle_test.go | 44 + mox-/lookup.go | 66 + mox-/msgid.go | 14 + mox-/rand.go | 22 + mox-/recvid.go | 61 + mox-/setcaphint.go | 18 + mox-/sleep.go | 19 + mox-/tlsinfo.go | 29 + mox.service | 55 + moxio/atreader.go | 20 + moxio/bufpool.go | 103 + moxio/bufpool_test.go | 57 + moxio/doc.go | 2 + moxio/isclosed.go | 24 + moxio/limitatreader.go | 20 + moxio/limitreader.go | 27 + moxio/prefixconn.go | 25 + moxio/storagespace.go | 14 + moxio/syncdir.go | 17 + moxio/trace.go | 48 + moxio/umask.go | 18 + moxvar/version.go | 38 + mtasts/mtasts.go | 333 + mtasts/mtasts_test.go | 267 + mtasts/parse.go | 347 + mtasts/parse_test.go | 237 + mtastsdb/db.go | 285 + mtastsdb/db_test.go | 158 + mtastsdb/refresh.go | 176 + mtastsdb/refresh_test.go | 231 + publicsuffix/list.go | 189 + publicsuffix/list_test.go | 79 + publicsuffix/public_suffix_list.txt | 13825 ++++++++++++++++ queue/dsn.go | 181 + queue/queue.go | 897 + queue/queue_test.go | 535 + quickstart.go | 441 + rfc/Makefile | 7 + rfc/errata.go | 59 + rfc/fetch.sh | 14 + rfc/index.md | 301 + rfc/link.go | 199 + scram/parse.go | 266 + scram/scram.go | 368 + scram/scram_test.go | 169 + serve.go | 351 + smtp/address.go | 316 + smtp/address_test.go | 92 + smtp/addrlit.go | 16 + smtp/codes.go | 145 + smtp/data.go | 138 + smtp/data_test.go | 91 + smtp/doc.go | 2 + smtp/ehlo.go | 17 + smtp/path.go | 67 + smtpclient/client.go | 737 + smtpclient/client_test.go | 616 + smtpserver/alignment.go | 42 + smtpserver/analyze.go | 327 + smtpserver/authresults.go | 116 + smtpserver/authresults_test.go | 26 + smtpserver/dnsbl.go | 36 + smtpserver/dsn.go | 56 + smtpserver/error.go | 36 + smtpserver/fuzz_test.go | 113 + smtpserver/limitwriter.go | 25 + smtpserver/mx.go | 37 + smtpserver/parse.go | 447 + smtpserver/parse_test.go | 23 + smtpserver/rejects.go | 67 + smtpserver/reputation.go | 380 + smtpserver/reputation_test.go | 421 + smtpserver/server.go | 2070 +++ smtpserver/server_test.go | 749 + spf/parse.go | 466 + spf/parse_test.go | 138 + spf/received.go | 118 + spf/received_test.go | 39 + spf/spf.go | 958 ++ spf/spf_test.go | 521 + start.go | 44 + store/account.go | 1139 ++ store/account_test.go | 273 + store/msgreader.go | 135 + store/msgreader_test.go | 77 + store/state.go | 157 + store/tmp.go | 26 + store/train.go | 136 + store/transact.go | 24 + store/validation.go | 24 + subjectpass/subjectpass.go | 155 + subjectpass/subjectpass_test.go | 32 + testdata/dmarc-reports/google.eml | 64 + testdata/dmarc-reports/mailru.eml | 52 + testdata/dmarc-reports/outlook.eml | 116 + testdata/dmarc-reports/xs4all.eml | 70 + testdata/dmarc-reports/yahoo.eml | 49 + testdata/dsn/domains.conf | 26 + testdata/dsn/mox.conf | 8 + testdata/dsn/testsel.rsakey.pkcs8.pem | 30 + testdata/imap/domains.conf | 15 + testdata/imap/mox.conf | 14 + testdata/imaptest/domains.conf | 15 + testdata/imaptest/imaptest.mbox | 1778 ++ testdata/imaptest/mox.conf | 14 + .../cur/1642966915.1.mox:2, | 13 + .../new/1642968136.5.mox:2, | 37 + testdata/importtest.mbox | 60 + testdata/integration/Dockerfile.dns | 2 + testdata/integration/Dockerfile.moxmail | 4 + testdata/integration/Dockerfile.postfix | 2 + testdata/integration/dkim/mox1dkim0-key.pem | 5 + testdata/integration/dkim/mox2dkim0-key.pem | 5 + testdata/integration/dkim/mox3dkim0-key.pem | 5 + testdata/integration/dkim/postfix-key.pem | 5 + testdata/integration/dkim/readme.txt | 9 + testdata/integration/domains.conf | 83 + testdata/integration/example.zone | 32 + testdata/integration/mox.conf | 66 + testdata/integration/resolv.conf | 1 + testdata/integration/reverse.zone | 10 + testdata/integration/tls/Makefile | 6 + testdata/integration/tls/ca-key.pem | 5 + testdata/integration/tls/ca.csr | 7 + testdata/integration/tls/ca.pem | 10 + testdata/integration/tls/cfssl-ca-csr.json | 7 + testdata/integration/tls/moxmail1-key.pem | 5 + testdata/integration/tls/moxmail1.csr | 8 + testdata/integration/tls/moxmail1.pem | 12 + testdata/integration/tls/moxmail2-key.pem | 5 + testdata/integration/tls/moxmail2.csr | 8 + testdata/integration/tls/moxmail2.pem | 12 + testdata/integration/tls/moxmail3-key.pem | 5 + testdata/integration/tls/moxmail3.csr | 8 + testdata/integration/tls/moxmail3.pem | 12 + testdata/integration/tls/postfixmail-key.pem | 5 + testdata/integration/tls/postfixmail.csr | 8 + testdata/integration/tls/postfixmail.pem | 12 + testdata/integration/tls/readme.txt | 12 + testdata/integration/unbound.conf | 15 + testdata/junk/parse.eml | 160 + testdata/junk/parse2.eml | 165 + testdata/junk/parse3.eml | 714 + testdata/message/message-rfc822-multipart.eml | 57 + .../message/message-rfc822-multipart2.eml | 50 + testdata/queue/domains.conf | 7 + testdata/queue/mox.conf | 8 + testdata/smtp/dmarcreport/domains.conf | 11 + testdata/smtp/dmarcreport/mox.conf | 8 + testdata/smtp/domains.conf | 15 + testdata/smtp/junk/domains.conf | 36 + testdata/smtp/junk/mox.conf | 8 + testdata/smtp/mox.conf | 8 + testdata/smtp/tlsrpt/domains.conf | 11 + testdata/smtp/tlsrpt/mox.conf | 8 + testdata/store/domains.conf | 26 + testdata/store/mox.conf | 8 + testdata/tlsreports/example.eml | 72 + tlsrpt/doc.go | 6 + tlsrpt/lookup.go | 91 + tlsrpt/lookup_test.go | 46 + tlsrpt/parse.go | 226 + tlsrpt/parse_test.go | 83 + tlsrpt/report.go | 153 + tlsrpt/report_test.go | 149 + tlsrptdb/db.go | 192 + tlsrptdb/db_test.go | 126 + tools.go | 8 + updates.go | 133 + updates/updates.go | 282 + updates/updates_test.go | 152 + vendor/github.com/beorn7/perks/LICENSE | 20 + .../beorn7/perks/quantile/exampledata.txt | 2388 +++ .../beorn7/perks/quantile/stream.go | 316 + .../github.com/cespare/xxhash/v2/LICENSE.txt | 22 + vendor/github.com/cespare/xxhash/v2/README.md | 69 + vendor/github.com/cespare/xxhash/v2/xxhash.go | 235 + .../cespare/xxhash/v2/xxhash_amd64.go | 13 + .../cespare/xxhash/v2/xxhash_amd64.s | 215 + .../cespare/xxhash/v2/xxhash_other.go | 76 + .../cespare/xxhash/v2/xxhash_safe.go | 15 + .../cespare/xxhash/v2/xxhash_unsafe.go | 57 + vendor/github.com/golang/protobuf/AUTHORS | 3 + .../github.com/golang/protobuf/CONTRIBUTORS | 3 + vendor/github.com/golang/protobuf/LICENSE | 28 + .../golang/protobuf/proto/buffer.go | 324 + .../golang/protobuf/proto/defaults.go | 63 + .../golang/protobuf/proto/deprecated.go | 113 + .../golang/protobuf/proto/discard.go | 58 + .../golang/protobuf/proto/extensions.go | 356 + .../golang/protobuf/proto/properties.go | 306 + .../github.com/golang/protobuf/proto/proto.go | 167 + .../golang/protobuf/proto/registry.go | 317 + .../golang/protobuf/proto/text_decode.go | 801 + .../golang/protobuf/proto/text_encode.go | 560 + .../github.com/golang/protobuf/proto/wire.go | 78 + .../golang/protobuf/proto/wrappers.go | 34 + .../protobuf/ptypes/timestamp/timestamp.pb.go | 64 + .../golang_protobuf_extensions/LICENSE | 201 + .../golang_protobuf_extensions/NOTICE | 1 + .../pbutil/.gitignore | 1 + .../pbutil/Makefile | 7 + .../pbutil/decode.go | 75 + .../golang_protobuf_extensions/pbutil/doc.go | 16 + .../pbutil/encode.go | 46 + vendor/github.com/mjl-/bstore/.gitignore | 3 + vendor/github.com/mjl-/bstore/LICENSE | 7 + vendor/github.com/mjl-/bstore/Makefile | 20 + vendor/github.com/mjl-/bstore/README.md | 51 + vendor/github.com/mjl-/bstore/default.go | 80 + vendor/github.com/mjl-/bstore/doc.go | 142 + vendor/github.com/mjl-/bstore/equal.go | 91 + vendor/github.com/mjl-/bstore/exec.go | 568 + vendor/github.com/mjl-/bstore/export.go | 387 + vendor/github.com/mjl-/bstore/format.md | 78 + vendor/github.com/mjl-/bstore/gendoc.sh | 13 + vendor/github.com/mjl-/bstore/keys.go | 282 + vendor/github.com/mjl-/bstore/nonzero.go | 218 + vendor/github.com/mjl-/bstore/pack.go | 276 + vendor/github.com/mjl-/bstore/parse.go | 321 + vendor/github.com/mjl-/bstore/plan.go | 341 + vendor/github.com/mjl-/bstore/query.go | 1130 ++ vendor/github.com/mjl-/bstore/register.go | 1215 ++ vendor/github.com/mjl-/bstore/stats.go | 105 + vendor/github.com/mjl-/bstore/store.go | 566 + vendor/github.com/mjl-/bstore/tags.go | 69 + vendor/github.com/mjl-/bstore/tx.go | 438 + vendor/github.com/mjl-/sconf/.gitignore | 2 + vendor/github.com/mjl-/sconf/LICENSE | 7 + vendor/github.com/mjl-/sconf/Makefile | 12 + vendor/github.com/mjl-/sconf/README.txt | 6 + vendor/github.com/mjl-/sconf/describe.go | 264 + vendor/github.com/mjl-/sconf/doc.go | 106 + vendor/github.com/mjl-/sconf/parse.go | 308 + vendor/github.com/mjl-/sconf/sconf.go | 71 + vendor/github.com/mjl-/sherpa/.gitignore | 4 + vendor/github.com/mjl-/sherpa/LICENSE | 7 + vendor/github.com/mjl-/sherpa/LICENSE-go | 27 + vendor/github.com/mjl-/sherpa/Makefile | 16 + vendor/github.com/mjl-/sherpa/README.md | 39 + vendor/github.com/mjl-/sherpa/codes.go | 19 + vendor/github.com/mjl-/sherpa/collector.go | 21 + vendor/github.com/mjl-/sherpa/doc.go | 8 + vendor/github.com/mjl-/sherpa/handler.go | 653 + vendor/github.com/mjl-/sherpa/intstr.go | 87 + vendor/github.com/mjl-/sherpa/isclosed.go | 13 + .../github.com/mjl-/sherpa/isclosed_plan9.go | 6 + vendor/github.com/mjl-/sherpa/sherpajs.go | 136 + vendor/github.com/mjl-/sherpadoc/LICENSE | 7 + vendor/github.com/mjl-/sherpadoc/README.txt | 28 + vendor/github.com/mjl-/sherpadoc/check.go | 166 + .../mjl-/sherpadoc/cmd/sherpadoc/main.go | 270 + .../mjl-/sherpadoc/cmd/sherpadoc/parse.go | 857 + .../mjl-/sherpadoc/cmd/sherpadoc/sherpa.go | 85 + vendor/github.com/mjl-/sherpadoc/sherpadoc.go | 84 + vendor/github.com/mjl-/sherpaprom/LICENSE.md | 8 + vendor/github.com/mjl-/sherpaprom/README.md | 13 + .../github.com/mjl-/sherpaprom/collector.go | 123 + vendor/github.com/mjl-/xfmt/.gitignore | 1 + vendor/github.com/mjl-/xfmt/LICENSE | 7 + vendor/github.com/mjl-/xfmt/README.txt | 26 + vendor/github.com/mjl-/xfmt/xfmt.go | 207 + .../prometheus/client_golang/LICENSE | 201 + .../prometheus/client_golang/NOTICE | 23 + .../client_golang/prometheus/.gitignore | 1 + .../client_golang/prometheus/README.md | 1 + .../prometheus/build_info_collector.go | 38 + .../client_golang/prometheus/collector.go | 128 + .../client_golang/prometheus/counter.go | 328 + .../client_golang/prometheus/desc.go | 189 + .../client_golang/prometheus/doc.go | 210 + .../prometheus/expvar_collector.go | 86 + .../client_golang/prometheus/fnv.go | 42 + .../client_golang/prometheus/gauge.go | 291 + .../client_golang/prometheus/get_pid.go | 26 + .../prometheus/get_pid_gopherjs.go | 23 + .../client_golang/prometheus/go_collector.go | 281 + .../prometheus/go_collector_go116.go | 122 + .../prometheus/go_collector_latest.go | 568 + .../client_golang/prometheus/histogram.go | 1484 ++ .../prometheus/internal/almost_equal.go | 60 + .../prometheus/internal/difflib.go | 654 + .../internal/go_collector_options.go | 32 + .../prometheus/internal/go_runtime_metrics.go | 142 + .../prometheus/internal/metric.go | 101 + .../client_golang/prometheus/labels.go | 88 + .../client_golang/prometheus/metric.go | 256 + .../client_golang/prometheus/num_threads.go | 25 + .../prometheus/num_threads_gopherjs.go | 22 + .../client_golang/prometheus/observer.go | 64 + .../prometheus/process_collector.go | 164 + .../prometheus/process_collector_js.go | 26 + .../prometheus/process_collector_other.go | 66 + .../prometheus/process_collector_windows.go | 116 + .../client_golang/prometheus/promauto/auto.go | 376 + .../prometheus/promhttp/delegator.go | 374 + .../client_golang/prometheus/promhttp/http.go | 395 + .../prometheus/promhttp/instrument_client.go | 247 + .../prometheus/promhttp/instrument_server.go | 570 + .../prometheus/promhttp/option.go | 58 + .../client_golang/prometheus/registry.go | 1072 ++ .../client_golang/prometheus/summary.go | 747 + .../client_golang/prometheus/timer.go | 55 + .../client_golang/prometheus/untyped.go | 42 + .../client_golang/prometheus/value.go | 237 + .../client_golang/prometheus/vec.go | 642 + .../client_golang/prometheus/wrap.go | 216 + .../prometheus/client_model/LICENSE | 201 + .../github.com/prometheus/client_model/NOTICE | 5 + .../prometheus/client_model/go/metrics.pb.go | 914 + vendor/github.com/prometheus/common/LICENSE | 201 + vendor/github.com/prometheus/common/NOTICE | 5 + .../prometheus/common/expfmt/decode.go | 429 + .../prometheus/common/expfmt/encode.go | 162 + .../prometheus/common/expfmt/expfmt.go | 41 + .../prometheus/common/expfmt/fuzz.go | 37 + .../common/expfmt/openmetrics_create.go | 527 + .../prometheus/common/expfmt/text_create.go | 465 + .../prometheus/common/expfmt/text_parse.go | 775 + .../bitbucket.org/ww/goautoneg/README.txt | 67 + .../bitbucket.org/ww/goautoneg/autoneg.go | 162 + .../prometheus/common/model/alert.go | 136 + .../prometheus/common/model/fingerprinting.go | 105 + .../github.com/prometheus/common/model/fnv.go | 42 + .../prometheus/common/model/labels.go | 218 + .../prometheus/common/model/labelset.go | 169 + .../prometheus/common/model/metric.go | 102 + .../prometheus/common/model/model.go | 16 + .../prometheus/common/model/signature.go | 144 + .../prometheus/common/model/silence.go | 106 + .../prometheus/common/model/time.go | 317 + .../prometheus/common/model/value.go | 416 + .../github.com/prometheus/procfs/.gitignore | 2 + .../prometheus/procfs/.golangci.yml | 12 + .../prometheus/procfs/CODE_OF_CONDUCT.md | 3 + .../prometheus/procfs/CONTRIBUTING.md | 121 + vendor/github.com/prometheus/procfs/LICENSE | 201 + .../prometheus/procfs/MAINTAINERS.md | 2 + vendor/github.com/prometheus/procfs/Makefile | 31 + .../prometheus/procfs/Makefile.common | 264 + vendor/github.com/prometheus/procfs/NOTICE | 7 + vendor/github.com/prometheus/procfs/README.md | 61 + .../github.com/prometheus/procfs/SECURITY.md | 6 + vendor/github.com/prometheus/procfs/arp.go | 116 + .../github.com/prometheus/procfs/buddyinfo.go | 85 + .../github.com/prometheus/procfs/cmdline.go | 30 + .../github.com/prometheus/procfs/cpuinfo.go | 482 + .../prometheus/procfs/cpuinfo_armx.go | 20 + .../prometheus/procfs/cpuinfo_mipsx.go | 20 + .../prometheus/procfs/cpuinfo_others.go | 19 + .../prometheus/procfs/cpuinfo_ppcx.go | 20 + .../prometheus/procfs/cpuinfo_riscvx.go | 20 + .../prometheus/procfs/cpuinfo_s390x.go | 19 + .../prometheus/procfs/cpuinfo_x86.go | 20 + vendor/github.com/prometheus/procfs/crypto.go | 153 + vendor/github.com/prometheus/procfs/doc.go | 45 + vendor/github.com/prometheus/procfs/fs.go | 43 + .../github.com/prometheus/procfs/fscache.go | 422 + .../prometheus/procfs/internal/fs/fs.go | 55 + .../prometheus/procfs/internal/util/parse.go | 97 + .../procfs/internal/util/readfile.go | 37 + .../procfs/internal/util/sysreadfile.go | 50 + .../internal/util/sysreadfile_compat.go | 27 + .../procfs/internal/util/valueparser.go | 91 + vendor/github.com/prometheus/procfs/ipvs.go | 240 + .../prometheus/procfs/kernel_random.go | 63 + .../github.com/prometheus/procfs/loadavg.go | 62 + vendor/github.com/prometheus/procfs/mdstat.go | 266 + .../github.com/prometheus/procfs/meminfo.go | 277 + .../github.com/prometheus/procfs/mountinfo.go | 180 + .../prometheus/procfs/mountstats.go | 638 + .../prometheus/procfs/net_conntrackstat.go | 153 + .../github.com/prometheus/procfs/net_dev.go | 205 + .../prometheus/procfs/net_ip_socket.go | 226 + .../prometheus/procfs/net_protocols.go | 180 + .../prometheus/procfs/net_sockstat.go | 163 + .../prometheus/procfs/net_softnet.go | 102 + .../github.com/prometheus/procfs/net_tcp.go | 64 + .../github.com/prometheus/procfs/net_udp.go | 64 + .../github.com/prometheus/procfs/net_unix.go | 257 + .../github.com/prometheus/procfs/net_xfrm.go | 189 + .../github.com/prometheus/procfs/netstat.go | 68 + vendor/github.com/prometheus/procfs/proc.go | 319 + .../prometheus/procfs/proc_cgroup.go | 98 + .../prometheus/procfs/proc_cgroups.go | 98 + .../prometheus/procfs/proc_environ.go | 37 + .../prometheus/procfs/proc_fdinfo.go | 132 + .../github.com/prometheus/procfs/proc_io.go | 59 + .../prometheus/procfs/proc_limits.go | 160 + .../github.com/prometheus/procfs/proc_maps.go | 211 + .../prometheus/procfs/proc_netstat.go | 440 + .../github.com/prometheus/procfs/proc_ns.go | 68 + .../github.com/prometheus/procfs/proc_psi.go | 102 + .../prometheus/procfs/proc_smaps.go | 166 + .../github.com/prometheus/procfs/proc_snmp.go | 353 + .../prometheus/procfs/proc_snmp6.go | 381 + .../github.com/prometheus/procfs/proc_stat.go | 222 + .../prometheus/procfs/proc_status.go | 170 + .../github.com/prometheus/procfs/proc_sys.go | 51 + .../github.com/prometheus/procfs/schedstat.go | 121 + vendor/github.com/prometheus/procfs/slab.go | 151 + .../github.com/prometheus/procfs/softirqs.go | 160 + vendor/github.com/prometheus/procfs/stat.go | 244 + vendor/github.com/prometheus/procfs/swaps.go | 89 + vendor/github.com/prometheus/procfs/ttar | 413 + vendor/github.com/prometheus/procfs/vm.go | 210 + .../github.com/prometheus/procfs/zoneinfo.go | 196 + vendor/go.etcd.io/bbolt/.gitignore | 7 + vendor/go.etcd.io/bbolt/.travis.yml | 18 + vendor/go.etcd.io/bbolt/LICENSE | 20 + vendor/go.etcd.io/bbolt/Makefile | 36 + vendor/go.etcd.io/bbolt/README.md | 958 ++ vendor/go.etcd.io/bbolt/bolt_386.go | 7 + vendor/go.etcd.io/bbolt/bolt_amd64.go | 7 + vendor/go.etcd.io/bbolt/bolt_arm.go | 7 + vendor/go.etcd.io/bbolt/bolt_arm64.go | 9 + vendor/go.etcd.io/bbolt/bolt_linux.go | 10 + vendor/go.etcd.io/bbolt/bolt_mips64x.go | 9 + vendor/go.etcd.io/bbolt/bolt_mipsx.go | 9 + vendor/go.etcd.io/bbolt/bolt_openbsd.go | 27 + vendor/go.etcd.io/bbolt/bolt_ppc.go | 9 + vendor/go.etcd.io/bbolt/bolt_ppc64.go | 9 + vendor/go.etcd.io/bbolt/bolt_ppc64le.go | 9 + vendor/go.etcd.io/bbolt/bolt_riscv64.go | 9 + vendor/go.etcd.io/bbolt/bolt_s390x.go | 9 + vendor/go.etcd.io/bbolt/bolt_unix.go | 86 + vendor/go.etcd.io/bbolt/bolt_unix_aix.go | 90 + vendor/go.etcd.io/bbolt/bolt_unix_solaris.go | 88 + vendor/go.etcd.io/bbolt/bolt_windows.go | 141 + vendor/go.etcd.io/bbolt/boltsync_unix.go | 8 + vendor/go.etcd.io/bbolt/bucket.go | 777 + vendor/go.etcd.io/bbolt/compact.go | 114 + vendor/go.etcd.io/bbolt/cursor.go | 396 + vendor/go.etcd.io/bbolt/db.go | 1232 ++ vendor/go.etcd.io/bbolt/doc.go | 44 + vendor/go.etcd.io/bbolt/errors.go | 71 + vendor/go.etcd.io/bbolt/freelist.go | 404 + vendor/go.etcd.io/bbolt/freelist_hmap.go | 178 + vendor/go.etcd.io/bbolt/mlock_unix.go | 36 + vendor/go.etcd.io/bbolt/mlock_windows.go | 11 + vendor/go.etcd.io/bbolt/node.go | 602 + vendor/go.etcd.io/bbolt/page.go | 204 + vendor/go.etcd.io/bbolt/tx.go | 723 + vendor/go.etcd.io/bbolt/unsafe.go | 39 + vendor/golang.org/x/crypto/LICENSE | 27 + vendor/golang.org/x/crypto/PATENTS | 22 + vendor/golang.org/x/crypto/acme/acme.go | 818 + .../x/crypto/acme/autocert/autocert.go | 1198 ++ .../x/crypto/acme/autocert/cache.go | 135 + .../x/crypto/acme/autocert/listener.go | 155 + .../x/crypto/acme/autocert/renewal.go | 156 + vendor/golang.org/x/crypto/acme/http.go | 325 + vendor/golang.org/x/crypto/acme/jws.go | 257 + vendor/golang.org/x/crypto/acme/rfc8555.go | 476 + vendor/golang.org/x/crypto/acme/types.go | 614 + .../golang.org/x/crypto/acme/version_go112.go | 28 + vendor/golang.org/x/crypto/bcrypt/base64.go | 35 + vendor/golang.org/x/crypto/bcrypt/bcrypt.go | 304 + vendor/golang.org/x/crypto/blake2b/blake2b.go | 291 + .../x/crypto/blake2b/blake2bAVX2_amd64.go | 38 + .../x/crypto/blake2b/blake2bAVX2_amd64.s | 745 + .../x/crypto/blake2b/blake2b_amd64.go | 25 + .../x/crypto/blake2b/blake2b_amd64.s | 279 + .../x/crypto/blake2b/blake2b_generic.go | 182 + .../x/crypto/blake2b/blake2b_ref.go | 12 + vendor/golang.org/x/crypto/blake2b/blake2x.go | 177 + .../golang.org/x/crypto/blake2b/register.go | 33 + vendor/golang.org/x/crypto/blowfish/block.go | 159 + vendor/golang.org/x/crypto/blowfish/cipher.go | 99 + vendor/golang.org/x/crypto/blowfish/const.go | 199 + vendor/golang.org/x/crypto/pbkdf2/pbkdf2.go | 77 + vendor/golang.org/x/mod/LICENSE | 27 + vendor/golang.org/x/mod/PATENTS | 22 + .../x/mod/internal/lazyregexp/lazyre.go | 78 + vendor/golang.org/x/mod/modfile/print.go | 174 + vendor/golang.org/x/mod/modfile/read.go | 958 ++ vendor/golang.org/x/mod/modfile/rule.go | 1556 ++ vendor/golang.org/x/mod/modfile/work.go | 234 + vendor/golang.org/x/mod/module/module.go | 841 + vendor/golang.org/x/mod/module/pseudo.go | 250 + vendor/golang.org/x/mod/semver/semver.go | 401 + vendor/golang.org/x/net/LICENSE | 27 + vendor/golang.org/x/net/PATENTS | 22 + vendor/golang.org/x/net/html/atom/atom.go | 78 + vendor/golang.org/x/net/html/atom/table.go | 783 + vendor/golang.org/x/net/html/const.go | 111 + vendor/golang.org/x/net/html/doc.go | 106 + vendor/golang.org/x/net/html/doctype.go | 156 + vendor/golang.org/x/net/html/entity.go | 2253 +++ vendor/golang.org/x/net/html/escape.go | 258 + vendor/golang.org/x/net/html/foreign.go | 222 + vendor/golang.org/x/net/html/node.go | 225 + vendor/golang.org/x/net/html/parse.go | 2460 +++ vendor/golang.org/x/net/html/render.go | 273 + vendor/golang.org/x/net/html/token.go | 1228 ++ vendor/golang.org/x/net/idna/go118.go | 14 + vendor/golang.org/x/net/idna/idna10.0.0.go | 770 + vendor/golang.org/x/net/idna/idna9.0.0.go | 718 + vendor/golang.org/x/net/idna/pre_go118.go | 12 + vendor/golang.org/x/net/idna/punycode.go | 217 + vendor/golang.org/x/net/idna/tables10.0.0.go | 4560 +++++ vendor/golang.org/x/net/idna/tables11.0.0.go | 4654 ++++++ vendor/golang.org/x/net/idna/tables12.0.0.go | 4734 ++++++ vendor/golang.org/x/net/idna/tables13.0.0.go | 4840 ++++++ vendor/golang.org/x/net/idna/tables9.0.0.go | 4487 +++++ vendor/golang.org/x/net/idna/trie.go | 72 + vendor/golang.org/x/net/idna/trieval.go | 119 + vendor/golang.org/x/sys/LICENSE | 27 + vendor/golang.org/x/sys/PATENTS | 22 + vendor/golang.org/x/sys/cpu/asm_aix_ppc64.s | 18 + vendor/golang.org/x/sys/cpu/byteorder.go | 66 + vendor/golang.org/x/sys/cpu/cpu.go | 287 + vendor/golang.org/x/sys/cpu/cpu_aix.go | 34 + vendor/golang.org/x/sys/cpu/cpu_arm.go | 73 + vendor/golang.org/x/sys/cpu/cpu_arm64.go | 172 + vendor/golang.org/x/sys/cpu/cpu_arm64.s | 32 + vendor/golang.org/x/sys/cpu/cpu_gc_arm64.go | 12 + vendor/golang.org/x/sys/cpu/cpu_gc_s390x.go | 22 + vendor/golang.org/x/sys/cpu/cpu_gc_x86.go | 17 + .../golang.org/x/sys/cpu/cpu_gccgo_arm64.go | 12 + .../golang.org/x/sys/cpu/cpu_gccgo_s390x.go | 23 + vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.c | 38 + vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.go | 33 + vendor/golang.org/x/sys/cpu/cpu_linux.go | 16 + vendor/golang.org/x/sys/cpu/cpu_linux_arm.go | 39 + .../golang.org/x/sys/cpu/cpu_linux_arm64.go | 111 + .../golang.org/x/sys/cpu/cpu_linux_mips64x.go | 24 + .../golang.org/x/sys/cpu/cpu_linux_noinit.go | 10 + .../golang.org/x/sys/cpu/cpu_linux_ppc64x.go | 32 + .../golang.org/x/sys/cpu/cpu_linux_s390x.go | 40 + vendor/golang.org/x/sys/cpu/cpu_loong64.go | 13 + vendor/golang.org/x/sys/cpu/cpu_mips64x.go | 16 + vendor/golang.org/x/sys/cpu/cpu_mipsx.go | 12 + .../golang.org/x/sys/cpu/cpu_netbsd_arm64.go | 173 + .../golang.org/x/sys/cpu/cpu_openbsd_arm64.go | 65 + .../golang.org/x/sys/cpu/cpu_openbsd_arm64.s | 11 + vendor/golang.org/x/sys/cpu/cpu_other_arm.go | 10 + .../golang.org/x/sys/cpu/cpu_other_arm64.go | 10 + .../golang.org/x/sys/cpu/cpu_other_mips64x.go | 13 + .../golang.org/x/sys/cpu/cpu_other_ppc64x.go | 15 + .../golang.org/x/sys/cpu/cpu_other_riscv64.go | 12 + vendor/golang.org/x/sys/cpu/cpu_ppc64x.go | 17 + vendor/golang.org/x/sys/cpu/cpu_riscv64.go | 12 + vendor/golang.org/x/sys/cpu/cpu_s390x.go | 172 + vendor/golang.org/x/sys/cpu/cpu_s390x.s | 58 + vendor/golang.org/x/sys/cpu/cpu_wasm.go | 18 + vendor/golang.org/x/sys/cpu/cpu_x86.go | 145 + vendor/golang.org/x/sys/cpu/cpu_x86.s | 28 + vendor/golang.org/x/sys/cpu/cpu_zos.go | 10 + vendor/golang.org/x/sys/cpu/cpu_zos_s390x.go | 25 + vendor/golang.org/x/sys/cpu/hwcap_linux.go | 56 + vendor/golang.org/x/sys/cpu/parse.go | 43 + .../x/sys/cpu/proc_cpuinfo_linux.go | 54 + .../golang.org/x/sys/cpu/syscall_aix_gccgo.go | 27 + .../x/sys/cpu/syscall_aix_ppc64_gc.go | 36 + vendor/golang.org/x/sys/execabs/execabs.go | 102 + .../golang.org/x/sys/execabs/execabs_go118.go | 12 + .../golang.org/x/sys/execabs/execabs_go119.go | 17 + .../sys/internal/unsafeheader/unsafeheader.go | 30 + vendor/golang.org/x/sys/unix/.gitignore | 2 + vendor/golang.org/x/sys/unix/README.md | 184 + .../golang.org/x/sys/unix/affinity_linux.go | 86 + vendor/golang.org/x/sys/unix/aliases.go | 15 + vendor/golang.org/x/sys/unix/asm_aix_ppc64.s | 18 + vendor/golang.org/x/sys/unix/asm_bsd_386.s | 29 + vendor/golang.org/x/sys/unix/asm_bsd_amd64.s | 29 + vendor/golang.org/x/sys/unix/asm_bsd_arm.s | 29 + vendor/golang.org/x/sys/unix/asm_bsd_arm64.s | 29 + vendor/golang.org/x/sys/unix/asm_bsd_ppc64.s | 31 + .../golang.org/x/sys/unix/asm_bsd_riscv64.s | 29 + vendor/golang.org/x/sys/unix/asm_linux_386.s | 66 + .../golang.org/x/sys/unix/asm_linux_amd64.s | 58 + vendor/golang.org/x/sys/unix/asm_linux_arm.s | 57 + .../golang.org/x/sys/unix/asm_linux_arm64.s | 53 + .../golang.org/x/sys/unix/asm_linux_loong64.s | 54 + .../golang.org/x/sys/unix/asm_linux_mips64x.s | 57 + .../golang.org/x/sys/unix/asm_linux_mipsx.s | 55 + .../golang.org/x/sys/unix/asm_linux_ppc64x.s | 45 + .../golang.org/x/sys/unix/asm_linux_riscv64.s | 49 + .../golang.org/x/sys/unix/asm_linux_s390x.s | 57 + .../x/sys/unix/asm_openbsd_mips64.s | 30 + .../golang.org/x/sys/unix/asm_solaris_amd64.s | 18 + vendor/golang.org/x/sys/unix/asm_zos_s390x.s | 426 + .../golang.org/x/sys/unix/bluetooth_linux.go | 36 + vendor/golang.org/x/sys/unix/cap_freebsd.go | 196 + vendor/golang.org/x/sys/unix/constants.go | 14 + vendor/golang.org/x/sys/unix/dev_aix_ppc.go | 27 + vendor/golang.org/x/sys/unix/dev_aix_ppc64.go | 29 + vendor/golang.org/x/sys/unix/dev_darwin.go | 24 + vendor/golang.org/x/sys/unix/dev_dragonfly.go | 30 + vendor/golang.org/x/sys/unix/dev_freebsd.go | 30 + vendor/golang.org/x/sys/unix/dev_linux.go | 42 + vendor/golang.org/x/sys/unix/dev_netbsd.go | 29 + vendor/golang.org/x/sys/unix/dev_openbsd.go | 29 + vendor/golang.org/x/sys/unix/dev_zos.go | 29 + vendor/golang.org/x/sys/unix/dirent.go | 103 + vendor/golang.org/x/sys/unix/endian_big.go | 10 + vendor/golang.org/x/sys/unix/endian_little.go | 10 + vendor/golang.org/x/sys/unix/env_unix.go | 32 + vendor/golang.org/x/sys/unix/epoll_zos.go | 221 + vendor/golang.org/x/sys/unix/fcntl.go | 37 + vendor/golang.org/x/sys/unix/fcntl_darwin.go | 24 + .../x/sys/unix/fcntl_linux_32bit.go | 14 + vendor/golang.org/x/sys/unix/fdset.go | 30 + vendor/golang.org/x/sys/unix/fstatfs_zos.go | 164 + vendor/golang.org/x/sys/unix/gccgo.go | 60 + vendor/golang.org/x/sys/unix/gccgo_c.c | 45 + .../x/sys/unix/gccgo_linux_amd64.go | 21 + vendor/golang.org/x/sys/unix/ifreq_linux.go | 142 + vendor/golang.org/x/sys/unix/ioctl.go | 75 + vendor/golang.org/x/sys/unix/ioctl_linux.go | 233 + vendor/golang.org/x/sys/unix/ioctl_zos.go | 74 + vendor/golang.org/x/sys/unix/mkall.sh | 249 + vendor/golang.org/x/sys/unix/mkerrors.sh | 778 + vendor/golang.org/x/sys/unix/pagesize_unix.go | 16 + .../golang.org/x/sys/unix/pledge_openbsd.go | 163 + vendor/golang.org/x/sys/unix/ptrace_darwin.go | 12 + vendor/golang.org/x/sys/unix/ptrace_ios.go | 12 + vendor/golang.org/x/sys/unix/race.go | 31 + vendor/golang.org/x/sys/unix/race0.go | 26 + .../x/sys/unix/readdirent_getdents.go | 13 + .../x/sys/unix/readdirent_getdirentries.go | 20 + .../x/sys/unix/sockcmsg_dragonfly.go | 16 + .../golang.org/x/sys/unix/sockcmsg_linux.go | 85 + vendor/golang.org/x/sys/unix/sockcmsg_unix.go | 107 + .../x/sys/unix/sockcmsg_unix_other.go | 47 + vendor/golang.org/x/sys/unix/syscall.go | 87 + vendor/golang.org/x/sys/unix/syscall_aix.go | 600 + .../golang.org/x/sys/unix/syscall_aix_ppc.go | 54 + .../x/sys/unix/syscall_aix_ppc64.go | 85 + vendor/golang.org/x/sys/unix/syscall_bsd.go | 625 + .../golang.org/x/sys/unix/syscall_darwin.go | 830 + .../x/sys/unix/syscall_darwin_amd64.go | 51 + .../x/sys/unix/syscall_darwin_arm64.go | 51 + .../x/sys/unix/syscall_darwin_libSystem.go | 27 + .../x/sys/unix/syscall_dragonfly.go | 545 + .../x/sys/unix/syscall_dragonfly_amd64.go | 57 + .../golang.org/x/sys/unix/syscall_freebsd.go | 615 + .../x/sys/unix/syscall_freebsd_386.go | 67 + .../x/sys/unix/syscall_freebsd_amd64.go | 67 + .../x/sys/unix/syscall_freebsd_arm.go | 63 + .../x/sys/unix/syscall_freebsd_arm64.go | 63 + .../x/sys/unix/syscall_freebsd_riscv64.go | 63 + vendor/golang.org/x/sys/unix/syscall_hurd.go | 22 + .../golang.org/x/sys/unix/syscall_hurd_386.go | 29 + .../golang.org/x/sys/unix/syscall_illumos.go | 79 + vendor/golang.org/x/sys/unix/syscall_linux.go | 2491 +++ .../x/sys/unix/syscall_linux_386.go | 342 + .../x/sys/unix/syscall_linux_alarm.go | 14 + .../x/sys/unix/syscall_linux_amd64.go | 147 + .../x/sys/unix/syscall_linux_amd64_gc.go | 13 + .../x/sys/unix/syscall_linux_arm.go | 244 + .../x/sys/unix/syscall_linux_arm64.go | 195 + .../golang.org/x/sys/unix/syscall_linux_gc.go | 15 + .../x/sys/unix/syscall_linux_gc_386.go | 17 + .../x/sys/unix/syscall_linux_gc_arm.go | 14 + .../x/sys/unix/syscall_linux_gccgo_386.go | 31 + .../x/sys/unix/syscall_linux_gccgo_arm.go | 21 + .../x/sys/unix/syscall_linux_loong64.go | 222 + .../x/sys/unix/syscall_linux_mips64x.go | 191 + .../x/sys/unix/syscall_linux_mipsx.go | 203 + .../x/sys/unix/syscall_linux_ppc.go | 232 + .../x/sys/unix/syscall_linux_ppc64x.go | 118 + .../x/sys/unix/syscall_linux_riscv64.go | 180 + .../x/sys/unix/syscall_linux_s390x.go | 298 + .../x/sys/unix/syscall_linux_sparc64.go | 114 + .../golang.org/x/sys/unix/syscall_netbsd.go | 624 + .../x/sys/unix/syscall_netbsd_386.go | 38 + .../x/sys/unix/syscall_netbsd_amd64.go | 38 + .../x/sys/unix/syscall_netbsd_arm.go | 38 + .../x/sys/unix/syscall_netbsd_arm64.go | 38 + .../golang.org/x/sys/unix/syscall_openbsd.go | 390 + .../x/sys/unix/syscall_openbsd_386.go | 42 + .../x/sys/unix/syscall_openbsd_amd64.go | 42 + .../x/sys/unix/syscall_openbsd_arm.go | 42 + .../x/sys/unix/syscall_openbsd_arm64.go | 42 + .../x/sys/unix/syscall_openbsd_libc.go | 27 + .../x/sys/unix/syscall_openbsd_mips64.go | 39 + .../x/sys/unix/syscall_openbsd_ppc64.go | 42 + .../x/sys/unix/syscall_openbsd_riscv64.go | 42 + .../golang.org/x/sys/unix/syscall_solaris.go | 1133 ++ .../x/sys/unix/syscall_solaris_amd64.go | 28 + vendor/golang.org/x/sys/unix/syscall_unix.go | 589 + .../golang.org/x/sys/unix/syscall_unix_gc.go | 16 + .../x/sys/unix/syscall_unix_gc_ppc64x.go | 25 + .../x/sys/unix/syscall_zos_s390x.go | 1994 +++ vendor/golang.org/x/sys/unix/sysvshm_linux.go | 21 + vendor/golang.org/x/sys/unix/sysvshm_unix.go | 52 + .../x/sys/unix/sysvshm_unix_other.go | 14 + vendor/golang.org/x/sys/unix/timestruct.go | 77 + .../golang.org/x/sys/unix/unveil_openbsd.go | 42 + vendor/golang.org/x/sys/unix/xattr_bsd.go | 276 + .../golang.org/x/sys/unix/zerrors_aix_ppc.go | 1385 ++ .../x/sys/unix/zerrors_aix_ppc64.go | 1386 ++ .../x/sys/unix/zerrors_darwin_amd64.go | 1892 +++ .../x/sys/unix/zerrors_darwin_arm64.go | 1892 +++ .../x/sys/unix/zerrors_dragonfly_amd64.go | 1738 ++ .../x/sys/unix/zerrors_freebsd_386.go | 2043 +++ .../x/sys/unix/zerrors_freebsd_amd64.go | 2040 +++ .../x/sys/unix/zerrors_freebsd_arm.go | 2034 +++ .../x/sys/unix/zerrors_freebsd_arm64.go | 2034 +++ .../x/sys/unix/zerrors_freebsd_riscv64.go | 2148 +++ vendor/golang.org/x/sys/unix/zerrors_linux.go | 3457 ++++ .../x/sys/unix/zerrors_linux_386.go | 828 + .../x/sys/unix/zerrors_linux_amd64.go | 828 + .../x/sys/unix/zerrors_linux_arm.go | 834 + .../x/sys/unix/zerrors_linux_arm64.go | 826 + .../x/sys/unix/zerrors_linux_loong64.go | 818 + .../x/sys/unix/zerrors_linux_mips.go | 835 + .../x/sys/unix/zerrors_linux_mips64.go | 835 + .../x/sys/unix/zerrors_linux_mips64le.go | 835 + .../x/sys/unix/zerrors_linux_mipsle.go | 835 + .../x/sys/unix/zerrors_linux_ppc.go | 887 + .../x/sys/unix/zerrors_linux_ppc64.go | 891 + .../x/sys/unix/zerrors_linux_ppc64le.go | 891 + .../x/sys/unix/zerrors_linux_riscv64.go | 815 + .../x/sys/unix/zerrors_linux_s390x.go | 890 + .../x/sys/unix/zerrors_linux_sparc64.go | 885 + .../x/sys/unix/zerrors_netbsd_386.go | 1780 ++ .../x/sys/unix/zerrors_netbsd_amd64.go | 1770 ++ .../x/sys/unix/zerrors_netbsd_arm.go | 1759 ++ .../x/sys/unix/zerrors_netbsd_arm64.go | 1770 ++ .../x/sys/unix/zerrors_openbsd_386.go | 1906 +++ .../x/sys/unix/zerrors_openbsd_amd64.go | 1906 +++ .../x/sys/unix/zerrors_openbsd_arm.go | 1906 +++ .../x/sys/unix/zerrors_openbsd_arm64.go | 1906 +++ .../x/sys/unix/zerrors_openbsd_mips64.go | 1906 +++ .../x/sys/unix/zerrors_openbsd_ppc64.go | 1905 +++ .../x/sys/unix/zerrors_openbsd_riscv64.go | 1904 +++ .../x/sys/unix/zerrors_solaris_amd64.go | 1557 ++ .../x/sys/unix/zerrors_zos_s390x.go | 860 + .../x/sys/unix/zptrace_armnn_linux.go | 42 + .../x/sys/unix/zptrace_linux_arm64.go | 17 + .../x/sys/unix/zptrace_mipsnn_linux.go | 51 + .../x/sys/unix/zptrace_mipsnnle_linux.go | 51 + .../x/sys/unix/zptrace_x86_linux.go | 81 + .../golang.org/x/sys/unix/zsyscall_aix_ppc.go | 1485 ++ .../x/sys/unix/zsyscall_aix_ppc64.go | 1443 ++ .../x/sys/unix/zsyscall_aix_ppc64_gc.go | 1192 ++ .../x/sys/unix/zsyscall_aix_ppc64_gccgo.go | 1070 ++ .../x/sys/unix/zsyscall_darwin_amd64.go | 2545 +++ .../x/sys/unix/zsyscall_darwin_amd64.s | 904 + .../x/sys/unix/zsyscall_darwin_arm64.go | 2545 +++ .../x/sys/unix/zsyscall_darwin_arm64.s | 904 + .../x/sys/unix/zsyscall_dragonfly_amd64.go | 1689 ++ .../x/sys/unix/zsyscall_freebsd_386.go | 1899 +++ .../x/sys/unix/zsyscall_freebsd_amd64.go | 1899 +++ .../x/sys/unix/zsyscall_freebsd_arm.go | 1899 +++ .../x/sys/unix/zsyscall_freebsd_arm64.go | 1899 +++ .../x/sys/unix/zsyscall_freebsd_riscv64.go | 1899 +++ .../x/sys/unix/zsyscall_illumos_amd64.go | 102 + .../golang.org/x/sys/unix/zsyscall_linux.go | 2163 +++ .../x/sys/unix/zsyscall_linux_386.go | 497 + .../x/sys/unix/zsyscall_linux_amd64.go | 664 + .../x/sys/unix/zsyscall_linux_arm.go | 612 + .../x/sys/unix/zsyscall_linux_arm64.go | 563 + .../x/sys/unix/zsyscall_linux_loong64.go | 487 + .../x/sys/unix/zsyscall_linux_mips.go | 664 + .../x/sys/unix/zsyscall_linux_mips64.go | 658 + .../x/sys/unix/zsyscall_linux_mips64le.go | 647 + .../x/sys/unix/zsyscall_linux_mipsle.go | 664 + .../x/sys/unix/zsyscall_linux_ppc.go | 669 + .../x/sys/unix/zsyscall_linux_ppc64.go | 715 + .../x/sys/unix/zsyscall_linux_ppc64le.go | 715 + .../x/sys/unix/zsyscall_linux_riscv64.go | 543 + .../x/sys/unix/zsyscall_linux_s390x.go | 506 + .../x/sys/unix/zsyscall_linux_sparc64.go | 659 + .../x/sys/unix/zsyscall_netbsd_386.go | 1860 +++ .../x/sys/unix/zsyscall_netbsd_amd64.go | 1860 +++ .../x/sys/unix/zsyscall_netbsd_arm.go | 1860 +++ .../x/sys/unix/zsyscall_netbsd_arm64.go | 1860 +++ .../x/sys/unix/zsyscall_openbsd_386.go | 2235 +++ .../x/sys/unix/zsyscall_openbsd_386.s | 669 + .../x/sys/unix/zsyscall_openbsd_amd64.go | 2235 +++ .../x/sys/unix/zsyscall_openbsd_amd64.s | 669 + .../x/sys/unix/zsyscall_openbsd_arm.go | 2235 +++ .../x/sys/unix/zsyscall_openbsd_arm.s | 669 + .../x/sys/unix/zsyscall_openbsd_arm64.go | 2235 +++ .../x/sys/unix/zsyscall_openbsd_arm64.s | 669 + .../x/sys/unix/zsyscall_openbsd_mips64.go | 2235 +++ .../x/sys/unix/zsyscall_openbsd_mips64.s | 669 + .../x/sys/unix/zsyscall_openbsd_ppc64.go | 2235 +++ .../x/sys/unix/zsyscall_openbsd_ppc64.s | 802 + .../x/sys/unix/zsyscall_openbsd_riscv64.go | 2235 +++ .../x/sys/unix/zsyscall_openbsd_riscv64.s | 669 + .../x/sys/unix/zsyscall_solaris_amd64.go | 2106 +++ .../x/sys/unix/zsyscall_zos_s390x.go | 1255 ++ .../x/sys/unix/zsysctl_openbsd_386.go | 281 + .../x/sys/unix/zsysctl_openbsd_amd64.go | 281 + .../x/sys/unix/zsysctl_openbsd_arm.go | 281 + .../x/sys/unix/zsysctl_openbsd_arm64.go | 281 + .../x/sys/unix/zsysctl_openbsd_mips64.go | 281 + .../x/sys/unix/zsysctl_openbsd_ppc64.go | 281 + .../x/sys/unix/zsysctl_openbsd_riscv64.go | 282 + .../x/sys/unix/zsysnum_darwin_amd64.go | 440 + .../x/sys/unix/zsysnum_darwin_arm64.go | 438 + .../x/sys/unix/zsysnum_dragonfly_amd64.go | 317 + .../x/sys/unix/zsysnum_freebsd_386.go | 394 + .../x/sys/unix/zsysnum_freebsd_amd64.go | 394 + .../x/sys/unix/zsysnum_freebsd_arm.go | 394 + .../x/sys/unix/zsysnum_freebsd_arm64.go | 394 + .../x/sys/unix/zsysnum_freebsd_riscv64.go | 394 + .../x/sys/unix/zsysnum_linux_386.go | 450 + .../x/sys/unix/zsysnum_linux_amd64.go | 372 + .../x/sys/unix/zsysnum_linux_arm.go | 414 + .../x/sys/unix/zsysnum_linux_arm64.go | 317 + .../x/sys/unix/zsysnum_linux_loong64.go | 311 + .../x/sys/unix/zsysnum_linux_mips.go | 434 + .../x/sys/unix/zsysnum_linux_mips64.go | 364 + .../x/sys/unix/zsysnum_linux_mips64le.go | 364 + .../x/sys/unix/zsysnum_linux_mipsle.go | 434 + .../x/sys/unix/zsysnum_linux_ppc.go | 441 + .../x/sys/unix/zsysnum_linux_ppc64.go | 413 + .../x/sys/unix/zsysnum_linux_ppc64le.go | 413 + .../x/sys/unix/zsysnum_linux_riscv64.go | 316 + .../x/sys/unix/zsysnum_linux_s390x.go | 378 + .../x/sys/unix/zsysnum_linux_sparc64.go | 392 + .../x/sys/unix/zsysnum_netbsd_386.go | 275 + .../x/sys/unix/zsysnum_netbsd_amd64.go | 275 + .../x/sys/unix/zsysnum_netbsd_arm.go | 275 + .../x/sys/unix/zsysnum_netbsd_arm64.go | 275 + .../x/sys/unix/zsysnum_openbsd_386.go | 220 + .../x/sys/unix/zsysnum_openbsd_amd64.go | 220 + .../x/sys/unix/zsysnum_openbsd_arm.go | 220 + .../x/sys/unix/zsysnum_openbsd_arm64.go | 219 + .../x/sys/unix/zsysnum_openbsd_mips64.go | 222 + .../x/sys/unix/zsysnum_openbsd_ppc64.go | 218 + .../x/sys/unix/zsysnum_openbsd_riscv64.go | 219 + .../x/sys/unix/zsysnum_zos_s390x.go | 2670 +++ .../golang.org/x/sys/unix/ztypes_aix_ppc.go | 354 + .../golang.org/x/sys/unix/ztypes_aix_ppc64.go | 358 + .../x/sys/unix/ztypes_darwin_amd64.go | 795 + .../x/sys/unix/ztypes_darwin_arm64.go | 795 + .../x/sys/unix/ztypes_dragonfly_amd64.go | 474 + .../x/sys/unix/ztypes_freebsd_386.go | 651 + .../x/sys/unix/ztypes_freebsd_amd64.go | 656 + .../x/sys/unix/ztypes_freebsd_arm.go | 642 + .../x/sys/unix/ztypes_freebsd_arm64.go | 636 + .../x/sys/unix/ztypes_freebsd_riscv64.go | 638 + vendor/golang.org/x/sys/unix/ztypes_linux.go | 5609 +++++++ .../golang.org/x/sys/unix/ztypes_linux_386.go | 696 + .../x/sys/unix/ztypes_linux_amd64.go | 711 + .../golang.org/x/sys/unix/ztypes_linux_arm.go | 691 + .../x/sys/unix/ztypes_linux_arm64.go | 690 + .../x/sys/unix/ztypes_linux_loong64.go | 691 + .../x/sys/unix/ztypes_linux_mips.go | 696 + .../x/sys/unix/ztypes_linux_mips64.go | 693 + .../x/sys/unix/ztypes_linux_mips64le.go | 693 + .../x/sys/unix/ztypes_linux_mipsle.go | 696 + .../golang.org/x/sys/unix/ztypes_linux_ppc.go | 704 + .../x/sys/unix/ztypes_linux_ppc64.go | 699 + .../x/sys/unix/ztypes_linux_ppc64le.go | 699 + .../x/sys/unix/ztypes_linux_riscv64.go | 718 + .../x/sys/unix/ztypes_linux_s390x.go | 713 + .../x/sys/unix/ztypes_linux_sparc64.go | 694 + .../x/sys/unix/ztypes_netbsd_386.go | 586 + .../x/sys/unix/ztypes_netbsd_amd64.go | 594 + .../x/sys/unix/ztypes_netbsd_arm.go | 591 + .../x/sys/unix/ztypes_netbsd_arm64.go | 594 + .../x/sys/unix/ztypes_openbsd_386.go | 569 + .../x/sys/unix/ztypes_openbsd_amd64.go | 569 + .../x/sys/unix/ztypes_openbsd_arm.go | 576 + .../x/sys/unix/ztypes_openbsd_arm64.go | 569 + .../x/sys/unix/ztypes_openbsd_mips64.go | 569 + .../x/sys/unix/ztypes_openbsd_ppc64.go | 571 + .../x/sys/unix/ztypes_openbsd_riscv64.go | 571 + .../x/sys/unix/ztypes_solaris_amd64.go | 517 + .../golang.org/x/sys/unix/ztypes_zos_s390x.go | 415 + vendor/golang.org/x/sys/windows/aliases.go | 13 + .../golang.org/x/sys/windows/dll_windows.go | 416 + vendor/golang.org/x/sys/windows/empty.s | 9 + .../golang.org/x/sys/windows/env_windows.go | 54 + vendor/golang.org/x/sys/windows/eventlog.go | 21 + .../golang.org/x/sys/windows/exec_windows.go | 178 + .../x/sys/windows/memory_windows.go | 48 + vendor/golang.org/x/sys/windows/mkerrors.bash | 70 + .../x/sys/windows/mkknownfolderids.bash | 27 + vendor/golang.org/x/sys/windows/mksyscall.go | 10 + vendor/golang.org/x/sys/windows/race.go | 31 + vendor/golang.org/x/sys/windows/race0.go | 26 + .../x/sys/windows/security_windows.go | 1444 ++ vendor/golang.org/x/sys/windows/service.go | 247 + .../x/sys/windows/setupapi_windows.go | 1425 ++ vendor/golang.org/x/sys/windows/str.go | 23 + vendor/golang.org/x/sys/windows/syscall.go | 105 + .../x/sys/windows/syscall_windows.go | 1816 ++ .../golang.org/x/sys/windows/types_windows.go | 3260 ++++ .../x/sys/windows/types_windows_386.go | 35 + .../x/sys/windows/types_windows_amd64.go | 34 + .../x/sys/windows/types_windows_arm.go | 35 + .../x/sys/windows/types_windows_arm64.go | 34 + .../x/sys/windows/zerrors_windows.go | 9468 +++++++++++ .../x/sys/windows/zknownfolderids_windows.go | 149 + .../x/sys/windows/zsyscall_windows.go | 4309 +++++ vendor/golang.org/x/text/LICENSE | 27 + vendor/golang.org/x/text/PATENTS | 22 + .../x/text/secure/bidirule/bidirule.go | 336 + .../x/text/secure/bidirule/bidirule10.0.0.go | 12 + .../x/text/secure/bidirule/bidirule9.0.0.go | 15 + .../golang.org/x/text/transform/transform.go | 709 + vendor/golang.org/x/text/unicode/bidi/bidi.go | 359 + .../golang.org/x/text/unicode/bidi/bracket.go | 335 + vendor/golang.org/x/text/unicode/bidi/core.go | 1071 ++ vendor/golang.org/x/text/unicode/bidi/prop.go | 206 + .../x/text/unicode/bidi/tables10.0.0.go | 1816 ++ .../x/text/unicode/bidi/tables11.0.0.go | 1888 +++ .../x/text/unicode/bidi/tables12.0.0.go | 1924 +++ .../x/text/unicode/bidi/tables13.0.0.go | 1956 +++ .../x/text/unicode/bidi/tables9.0.0.go | 1782 ++ .../golang.org/x/text/unicode/bidi/trieval.go | 48 + .../x/text/unicode/norm/composition.go | 512 + .../x/text/unicode/norm/forminfo.go | 279 + .../golang.org/x/text/unicode/norm/input.go | 109 + vendor/golang.org/x/text/unicode/norm/iter.go | 458 + .../x/text/unicode/norm/normalize.go | 610 + .../x/text/unicode/norm/readwriter.go | 125 + .../x/text/unicode/norm/tables10.0.0.go | 7658 +++++++++ .../x/text/unicode/norm/tables11.0.0.go | 7694 +++++++++ .../x/text/unicode/norm/tables12.0.0.go | 7711 +++++++++ .../x/text/unicode/norm/tables13.0.0.go | 7761 +++++++++ .../x/text/unicode/norm/tables9.0.0.go | 7638 +++++++++ .../x/text/unicode/norm/transform.go | 88 + vendor/golang.org/x/text/unicode/norm/trie.go | 54 + vendor/golang.org/x/tools/LICENSE | 27 + vendor/golang.org/x/tools/PATENTS | 22 + .../x/tools/go/gcexportdata/gcexportdata.go | 177 + .../x/tools/go/gcexportdata/importer.go | 75 + .../x/tools/go/internal/gcimporter/bexport.go | 853 + .../x/tools/go/internal/gcimporter/bimport.go | 1053 ++ .../go/internal/gcimporter/exportdata.go | 99 + .../go/internal/gcimporter/gcimporter.go | 1125 ++ .../x/tools/go/internal/gcimporter/iexport.go | 1010 ++ .../x/tools/go/internal/gcimporter/iimport.go | 878 + .../go/internal/gcimporter/newInterface10.go | 22 + .../go/internal/gcimporter/newInterface11.go | 14 + .../go/internal/gcimporter/support_go117.go | 16 + .../go/internal/gcimporter/support_go118.go | 23 + .../go/internal/gcimporter/unified_no.go | 10 + .../go/internal/gcimporter/unified_yes.go | 10 + .../go/internal/gcimporter/ureader_no.go | 19 + .../go/internal/gcimporter/ureader_yes.go | 612 + .../tools/go/internal/packagesdriver/sizes.go | 49 + .../x/tools/go/internal/pkgbits/codes.go | 77 + .../x/tools/go/internal/pkgbits/decoder.go | 433 + .../x/tools/go/internal/pkgbits/doc.go | 32 + .../x/tools/go/internal/pkgbits/encoder.go | 379 + .../x/tools/go/internal/pkgbits/flags.go | 9 + .../x/tools/go/internal/pkgbits/frames_go1.go | 21 + .../tools/go/internal/pkgbits/frames_go17.go | 28 + .../x/tools/go/internal/pkgbits/reloc.go | 42 + .../x/tools/go/internal/pkgbits/support.go | 17 + .../x/tools/go/internal/pkgbits/sync.go | 113 + .../go/internal/pkgbits/syncmarker_string.go | 89 + vendor/golang.org/x/tools/go/packages/doc.go | 220 + .../x/tools/go/packages/external.go | 101 + .../golang.org/x/tools/go/packages/golist.go | 1173 ++ .../x/tools/go/packages/golist_overlay.go | 575 + .../x/tools/go/packages/loadmode_string.go | 57 + .../x/tools/go/packages/packages.go | 1273 ++ .../golang.org/x/tools/go/packages/visit.go | 59 + .../x/tools/internal/event/core/event.go | 85 + .../x/tools/internal/event/core/export.go | 70 + .../x/tools/internal/event/core/fast.go | 77 + .../golang.org/x/tools/internal/event/doc.go | 7 + .../x/tools/internal/event/event.go | 127 + .../x/tools/internal/event/keys/keys.go | 564 + .../x/tools/internal/event/keys/standard.go | 22 + .../x/tools/internal/event/label/label.go | 215 + .../x/tools/internal/gocommand/invoke.go | 283 + .../x/tools/internal/gocommand/vendor.go | 109 + .../x/tools/internal/gocommand/version.go | 51 + .../internal/packagesinternal/packages.go | 30 + .../x/tools/internal/typeparams/common.go | 179 + .../x/tools/internal/typeparams/coretype.go | 122 + .../internal/typeparams/enabled_go117.go | 12 + .../internal/typeparams/enabled_go118.go | 15 + .../x/tools/internal/typeparams/normalize.go | 218 + .../x/tools/internal/typeparams/termlist.go | 163 + .../internal/typeparams/typeparams_go117.go | 197 + .../internal/typeparams/typeparams_go118.go | 151 + .../x/tools/internal/typeparams/typeterm.go | 170 + .../tools/internal/typesinternal/errorcode.go | 1526 ++ .../typesinternal/errorcode_string.go | 167 + .../x/tools/internal/typesinternal/types.go | 52 + .../tools/internal/typesinternal/types_118.go | 19 + vendor/google.golang.org/protobuf/LICENSE | 27 + vendor/google.golang.org/protobuf/PATENTS | 22 + .../protobuf/encoding/prototext/decode.go | 770 + .../protobuf/encoding/prototext/doc.go | 7 + .../protobuf/encoding/prototext/encode.go | 370 + .../protobuf/encoding/protowire/wire.go | 551 + .../protobuf/internal/descfmt/stringer.go | 318 + .../protobuf/internal/descopts/options.go | 29 + .../protobuf/internal/detrand/rand.go | 69 + .../internal/encoding/defval/default.go | 213 + .../encoding/messageset/messageset.go | 242 + .../protobuf/internal/encoding/tag/tag.go | 207 + .../protobuf/internal/encoding/text/decode.go | 685 + .../internal/encoding/text/decode_number.go | 192 + .../internal/encoding/text/decode_string.go | 161 + .../internal/encoding/text/decode_token.go | 373 + .../protobuf/internal/encoding/text/doc.go | 29 + .../protobuf/internal/encoding/text/encode.go | 270 + .../protobuf/internal/errors/errors.go | 89 + .../protobuf/internal/errors/is_go112.go | 40 + .../protobuf/internal/errors/is_go113.go | 13 + .../protobuf/internal/filedesc/build.go | 157 + .../protobuf/internal/filedesc/desc.go | 633 + .../protobuf/internal/filedesc/desc_init.go | 471 + .../protobuf/internal/filedesc/desc_lazy.go | 704 + .../protobuf/internal/filedesc/desc_list.go | 457 + .../internal/filedesc/desc_list_gen.go | 356 + .../protobuf/internal/filedesc/placeholder.go | 109 + .../protobuf/internal/filetype/build.go | 296 + .../protobuf/internal/flags/flags.go | 24 + .../internal/flags/proto_legacy_disable.go | 10 + .../internal/flags/proto_legacy_enable.go | 10 + .../protobuf/internal/genid/any_gen.go | 34 + .../protobuf/internal/genid/api_gen.go | 106 + .../protobuf/internal/genid/descriptor_gen.go | 829 + .../protobuf/internal/genid/doc.go | 11 + .../protobuf/internal/genid/duration_gen.go | 34 + .../protobuf/internal/genid/empty_gen.go | 19 + .../protobuf/internal/genid/field_mask_gen.go | 31 + .../protobuf/internal/genid/goname.go | 25 + .../protobuf/internal/genid/map_entry.go | 16 + .../internal/genid/source_context_gen.go | 31 + .../protobuf/internal/genid/struct_gen.go | 116 + .../protobuf/internal/genid/timestamp_gen.go | 34 + .../protobuf/internal/genid/type_gen.go | 184 + .../protobuf/internal/genid/wrappers.go | 13 + .../protobuf/internal/genid/wrappers_gen.go | 175 + .../protobuf/internal/impl/api_export.go | 177 + .../protobuf/internal/impl/checkinit.go | 141 + .../protobuf/internal/impl/codec_extension.go | 223 + .../protobuf/internal/impl/codec_field.go | 830 + .../protobuf/internal/impl/codec_gen.go | 5637 +++++++ .../protobuf/internal/impl/codec_map.go | 388 + .../protobuf/internal/impl/codec_map_go111.go | 38 + .../protobuf/internal/impl/codec_map_go112.go | 12 + .../protobuf/internal/impl/codec_message.go | 217 + .../internal/impl/codec_messageset.go | 123 + .../protobuf/internal/impl/codec_reflect.go | 210 + .../protobuf/internal/impl/codec_tables.go | 557 + .../protobuf/internal/impl/codec_unsafe.go | 18 + .../protobuf/internal/impl/convert.go | 496 + .../protobuf/internal/impl/convert_list.go | 141 + .../protobuf/internal/impl/convert_map.go | 121 + .../protobuf/internal/impl/decode.go | 285 + .../protobuf/internal/impl/encode.go | 201 + .../protobuf/internal/impl/enum.go | 21 + .../protobuf/internal/impl/extension.go | 156 + .../protobuf/internal/impl/legacy_enum.go | 218 + .../protobuf/internal/impl/legacy_export.go | 92 + .../internal/impl/legacy_extension.go | 176 + .../protobuf/internal/impl/legacy_file.go | 81 + .../protobuf/internal/impl/legacy_message.go | 563 + .../protobuf/internal/impl/merge.go | 176 + .../protobuf/internal/impl/merge_gen.go | 209 + .../protobuf/internal/impl/message.go | 279 + .../protobuf/internal/impl/message_reflect.go | 463 + .../internal/impl/message_reflect_field.go | 543 + .../internal/impl/message_reflect_gen.go | 249 + .../protobuf/internal/impl/pointer_reflect.go | 179 + .../protobuf/internal/impl/pointer_unsafe.go | 175 + .../protobuf/internal/impl/validate.go | 576 + .../protobuf/internal/impl/weak.go | 74 + .../protobuf/internal/order/order.go | 89 + .../protobuf/internal/order/range.go | 115 + .../protobuf/internal/pragma/pragma.go | 29 + .../protobuf/internal/set/ints.go | 58 + .../protobuf/internal/strs/strings.go | 196 + .../protobuf/internal/strs/strings_pure.go | 28 + .../protobuf/internal/strs/strings_unsafe.go | 95 + .../protobuf/internal/version/version.go | 79 + .../protobuf/proto/checkinit.go | 71 + .../protobuf/proto/decode.go | 294 + .../protobuf/proto/decode_gen.go | 603 + .../google.golang.org/protobuf/proto/doc.go | 89 + .../protobuf/proto/encode.go | 322 + .../protobuf/proto/encode_gen.go | 97 + .../google.golang.org/protobuf/proto/equal.go | 171 + .../protobuf/proto/extension.go | 92 + .../google.golang.org/protobuf/proto/merge.go | 139 + .../protobuf/proto/messageset.go | 93 + .../google.golang.org/protobuf/proto/proto.go | 43 + .../protobuf/proto/proto_methods.go | 20 + .../protobuf/proto/proto_reflect.go | 20 + .../google.golang.org/protobuf/proto/reset.go | 43 + .../google.golang.org/protobuf/proto/size.go | 97 + .../protobuf/proto/size_gen.go | 55 + .../protobuf/proto/wrappers.go | 29 + .../protobuf/reflect/protodesc/desc.go | 276 + .../protobuf/reflect/protodesc/desc_init.go | 248 + .../reflect/protodesc/desc_resolve.go | 286 + .../reflect/protodesc/desc_validate.go | 374 + .../protobuf/reflect/protodesc/proto.go | 252 + .../protobuf/reflect/protoreflect/methods.go | 78 + .../protobuf/reflect/protoreflect/proto.go | 508 + .../protobuf/reflect/protoreflect/source.go | 129 + .../reflect/protoreflect/source_gen.go | 461 + .../protobuf/reflect/protoreflect/type.go | 666 + .../protobuf/reflect/protoreflect/value.go | 285 + .../reflect/protoreflect/value_pure.go | 60 + .../reflect/protoreflect/value_union.go | 438 + .../reflect/protoreflect/value_unsafe.go | 99 + .../reflect/protoregistry/registry.go | 882 + .../protobuf/runtime/protoiface/legacy.go | 15 + .../protobuf/runtime/protoiface/methods.go | 168 + .../protobuf/runtime/protoimpl/impl.go | 44 + .../protobuf/runtime/protoimpl/version.go | 60 + .../types/descriptorpb/descriptor.pb.go | 3957 +++++ .../types/known/timestamppb/timestamp.pb.go | 390 + vendor/modules.txt | 131 + 1256 files changed, 491723 insertions(+) create mode 100644 .dockerignore create mode 100644 .gitignore create mode 100644 .go/empty create mode 100644 .jshintrc create mode 100644 Dockerfile create mode 100644 Dockerfile.imaptest create mode 100644 LICENSE.MIT create mode 100644 LICENSE.MPLv2.0 create mode 100644 Makefile create mode 100644 README.md create mode 100644 autotls/autotls.go create mode 100644 autotls/autotls_test.go create mode 100755 checkhtmljs create mode 100644 compatibility.txt create mode 100644 config/config.go create mode 100644 config/doc.go create mode 100644 ctl.go create mode 100644 dkim/dkim.go create mode 100644 dkim/dkim_test.go create mode 100644 dkim/fuzz_test.go create mode 100644 dkim/parser.go create mode 100644 dkim/policy.go create mode 100644 dkim/sig.go create mode 100644 dkim/sig_test.go create mode 100644 dkim/txt.go create mode 100644 dkim/txt_test.go create mode 100644 dmarc/dmarc.go create mode 100644 dmarc/dmarc_test.go create mode 100644 dmarc/fuzz_test.go create mode 100644 dmarc/parse.go create mode 100644 dmarc/parse_test.go create mode 100644 dmarc/txt.go create mode 100644 dmarcdb/db.go create mode 100644 dmarcdb/db_test.go create mode 100644 dmarcrpt/feedback.go create mode 100644 dmarcrpt/parse.go create mode 100644 dmarcrpt/parse_test.go create mode 100644 dns/dns.go create mode 100644 dns/dns_test.go create mode 100644 dns/ipdomain.go create mode 100644 dns/mock.go create mode 100644 dns/resolver.go create mode 100644 dnsbl/dnsbl.go create mode 100644 dnsbl/dnsbl_test.go create mode 100644 doc.go create mode 100644 docker-compose-imaptest.yml create mode 100644 docker-compose-integration.yml create mode 100644 dsn/dsn.go create mode 100644 dsn/dsn_test.go create mode 100644 dsn/nameip.go create mode 100644 dsn/parse.go create mode 100644 export.go create mode 100755 gendoc.sh create mode 100644 go.mod create mode 100644 go.sum create mode 100644 http/account.go create mode 100644 http/account.html create mode 100644 http/account_test.go create mode 100644 http/accountapi.json create mode 100644 http/admin.go create mode 100644 http/admin.html create mode 100644 http/admin_test.go create mode 100644 http/adminapi.json create mode 100644 http/autoconf.go create mode 100644 http/autoconf_test.go create mode 100644 http/mtasts.go create mode 100644 http/mtasts_test.go create mode 100644 http/web.go create mode 100644 imapclient/client.go create mode 100644 imapclient/cmds.go create mode 100644 imapclient/parse.go create mode 100644 imapclient/protocol.go create mode 100644 imapserver/append_test.go create mode 100644 imapserver/authenticate_test.go create mode 100644 imapserver/copy_test.go create mode 100644 imapserver/create_test.go create mode 100644 imapserver/delete_test.go create mode 100644 imapserver/error.go create mode 100644 imapserver/expunge_test.go create mode 100644 imapserver/fetch.go create mode 100644 imapserver/fetch_test.go create mode 100644 imapserver/fuzz_test.go create mode 100644 imapserver/idle_test.go create mode 100644 imapserver/list.go create mode 100644 imapserver/list_test.go create mode 100644 imapserver/lsub_test.go create mode 100644 imapserver/move_test.go create mode 100644 imapserver/pack.go create mode 100644 imapserver/parse.go create mode 100644 imapserver/prefixconn.go create mode 100644 imapserver/protocol.go create mode 100644 imapserver/protocol_test.go create mode 100644 imapserver/rename_test.go create mode 100644 imapserver/search.go create mode 100644 imapserver/search_test.go create mode 100644 imapserver/selectexamine_test.go create mode 100644 imapserver/server.go create mode 100644 imapserver/server_test.go create mode 100644 imapserver/starttls_test.go create mode 100644 imapserver/status_test.go create mode 100644 imapserver/store_test.go create mode 100644 imapserver/subscribe_test.go create mode 100644 imapserver/unselect_test.go create mode 100644 imapserver/unsubscribe_test.go create mode 100644 imapserver/utf7.go create mode 100644 imapserver/utf7_test.go create mode 100644 import.go create mode 100644 import_test.go create mode 100644 integration_test.go create mode 100644 iprev/iprev.go create mode 100644 iprev/iprev_test.go create mode 100644 junk.go create mode 100644 junk/bloom.go create mode 100644 junk/bloom_test.go create mode 100644 junk/filter.go create mode 100644 junk/filter_test.go create mode 100644 junk/parse.go create mode 100644 junk/parse_test.go create mode 100644 main.go create mode 100644 main_test.go create mode 100644 message/doc.go create mode 100644 message/from.go create mode 100644 message/headerwriter.go create mode 100644 message/part.go create mode 100644 message/part_test.go create mode 100644 message/readheaders.go create mode 100644 message/time.go create mode 100644 message/todo.go create mode 100644 message/writer.go create mode 100644 message/writer_test.go create mode 100644 metrics/auth.go create mode 100644 metrics/http.go create mode 100644 metrics/panic.go create mode 100644 mlog/log.go create mode 100644 mox-/admin.go create mode 100644 mox-/cid.go create mode 100644 mox-/config.go create mode 100644 mox-/dir.go create mode 100644 mox-/doc.go create mode 100644 mox-/ip.go create mode 100644 mox-/lastknown.go create mode 100644 mox-/lifecycle.go create mode 100644 mox-/lifecycle_test.go create mode 100644 mox-/lookup.go create mode 100644 mox-/msgid.go create mode 100644 mox-/rand.go create mode 100644 mox-/recvid.go create mode 100644 mox-/setcaphint.go create mode 100644 mox-/sleep.go create mode 100644 mox-/tlsinfo.go create mode 100644 mox.service create mode 100644 moxio/atreader.go create mode 100644 moxio/bufpool.go create mode 100644 moxio/bufpool_test.go create mode 100644 moxio/doc.go create mode 100644 moxio/isclosed.go create mode 100644 moxio/limitatreader.go create mode 100644 moxio/limitreader.go create mode 100644 moxio/prefixconn.go create mode 100644 moxio/storagespace.go create mode 100644 moxio/syncdir.go create mode 100644 moxio/trace.go create mode 100644 moxio/umask.go create mode 100644 moxvar/version.go create mode 100644 mtasts/mtasts.go create mode 100644 mtasts/mtasts_test.go create mode 100644 mtasts/parse.go create mode 100644 mtasts/parse_test.go create mode 100644 mtastsdb/db.go create mode 100644 mtastsdb/db_test.go create mode 100644 mtastsdb/refresh.go create mode 100644 mtastsdb/refresh_test.go create mode 100644 publicsuffix/list.go create mode 100644 publicsuffix/list_test.go create mode 100644 publicsuffix/public_suffix_list.txt create mode 100644 queue/dsn.go create mode 100644 queue/queue.go create mode 100644 queue/queue_test.go create mode 100644 quickstart.go create mode 100644 rfc/Makefile create mode 100644 rfc/errata.go create mode 100755 rfc/fetch.sh create mode 100644 rfc/index.md create mode 100644 rfc/link.go create mode 100644 scram/parse.go create mode 100644 scram/scram.go create mode 100644 scram/scram_test.go create mode 100644 serve.go create mode 100644 smtp/address.go create mode 100644 smtp/address_test.go create mode 100644 smtp/addrlit.go create mode 100644 smtp/codes.go create mode 100644 smtp/data.go create mode 100644 smtp/data_test.go create mode 100644 smtp/doc.go create mode 100644 smtp/ehlo.go create mode 100644 smtp/path.go create mode 100644 smtpclient/client.go create mode 100644 smtpclient/client_test.go create mode 100644 smtpserver/alignment.go create mode 100644 smtpserver/analyze.go create mode 100644 smtpserver/authresults.go create mode 100644 smtpserver/authresults_test.go create mode 100644 smtpserver/dnsbl.go create mode 100644 smtpserver/dsn.go create mode 100644 smtpserver/error.go create mode 100644 smtpserver/fuzz_test.go create mode 100644 smtpserver/limitwriter.go create mode 100644 smtpserver/mx.go create mode 100644 smtpserver/parse.go create mode 100644 smtpserver/parse_test.go create mode 100644 smtpserver/rejects.go create mode 100644 smtpserver/reputation.go create mode 100644 smtpserver/reputation_test.go create mode 100644 smtpserver/server.go create mode 100644 smtpserver/server_test.go create mode 100644 spf/parse.go create mode 100644 spf/parse_test.go create mode 100644 spf/received.go create mode 100644 spf/received_test.go create mode 100644 spf/spf.go create mode 100644 spf/spf_test.go create mode 100644 start.go create mode 100644 store/account.go create mode 100644 store/account_test.go create mode 100644 store/msgreader.go create mode 100644 store/msgreader_test.go create mode 100644 store/state.go create mode 100644 store/tmp.go create mode 100644 store/train.go create mode 100644 store/transact.go create mode 100644 store/validation.go create mode 100644 subjectpass/subjectpass.go create mode 100644 subjectpass/subjectpass_test.go create mode 100644 testdata/dmarc-reports/google.eml create mode 100644 testdata/dmarc-reports/mailru.eml create mode 100644 testdata/dmarc-reports/outlook.eml create mode 100644 testdata/dmarc-reports/xs4all.eml create mode 100644 testdata/dmarc-reports/yahoo.eml create mode 100644 testdata/dsn/domains.conf create mode 100644 testdata/dsn/mox.conf create mode 100644 testdata/dsn/testsel.rsakey.pkcs8.pem create mode 100644 testdata/imap/domains.conf create mode 100644 testdata/imap/mox.conf create mode 100644 testdata/imaptest/domains.conf create mode 100644 testdata/imaptest/imaptest.mbox create mode 100644 testdata/imaptest/mox.conf create mode 100644 testdata/importtest.maildir/cur/1642966915.1.mox:2, create mode 100644 testdata/importtest.maildir/new/1642968136.5.mox:2, create mode 100644 testdata/importtest.mbox create mode 100644 testdata/integration/Dockerfile.dns create mode 100644 testdata/integration/Dockerfile.moxmail create mode 100644 testdata/integration/Dockerfile.postfix create mode 100644 testdata/integration/dkim/mox1dkim0-key.pem create mode 100644 testdata/integration/dkim/mox2dkim0-key.pem create mode 100644 testdata/integration/dkim/mox3dkim0-key.pem create mode 100644 testdata/integration/dkim/postfix-key.pem create mode 100644 testdata/integration/dkim/readme.txt create mode 100644 testdata/integration/domains.conf create mode 100644 testdata/integration/example.zone create mode 100644 testdata/integration/mox.conf create mode 100644 testdata/integration/resolv.conf create mode 100644 testdata/integration/reverse.zone create mode 100644 testdata/integration/tls/Makefile create mode 100644 testdata/integration/tls/ca-key.pem create mode 100644 testdata/integration/tls/ca.csr create mode 100644 testdata/integration/tls/ca.pem create mode 100644 testdata/integration/tls/cfssl-ca-csr.json create mode 100644 testdata/integration/tls/moxmail1-key.pem create mode 100644 testdata/integration/tls/moxmail1.csr create mode 100644 testdata/integration/tls/moxmail1.pem create mode 100644 testdata/integration/tls/moxmail2-key.pem create mode 100644 testdata/integration/tls/moxmail2.csr create mode 100644 testdata/integration/tls/moxmail2.pem create mode 100644 testdata/integration/tls/moxmail3-key.pem create mode 100644 testdata/integration/tls/moxmail3.csr create mode 100644 testdata/integration/tls/moxmail3.pem create mode 100644 testdata/integration/tls/postfixmail-key.pem create mode 100644 testdata/integration/tls/postfixmail.csr create mode 100644 testdata/integration/tls/postfixmail.pem create mode 100644 testdata/integration/tls/readme.txt create mode 100644 testdata/integration/unbound.conf create mode 100644 testdata/junk/parse.eml create mode 100644 testdata/junk/parse2.eml create mode 100644 testdata/junk/parse3.eml create mode 100644 testdata/message/message-rfc822-multipart.eml create mode 100644 testdata/message/message-rfc822-multipart2.eml create mode 100644 testdata/queue/domains.conf create mode 100644 testdata/queue/mox.conf create mode 100644 testdata/smtp/dmarcreport/domains.conf create mode 100644 testdata/smtp/dmarcreport/mox.conf create mode 100644 testdata/smtp/domains.conf create mode 100644 testdata/smtp/junk/domains.conf create mode 100644 testdata/smtp/junk/mox.conf create mode 100644 testdata/smtp/mox.conf create mode 100644 testdata/smtp/tlsrpt/domains.conf create mode 100644 testdata/smtp/tlsrpt/mox.conf create mode 100644 testdata/store/domains.conf create mode 100644 testdata/store/mox.conf create mode 100644 testdata/tlsreports/example.eml create mode 100644 tlsrpt/doc.go create mode 100644 tlsrpt/lookup.go create mode 100644 tlsrpt/lookup_test.go create mode 100644 tlsrpt/parse.go create mode 100644 tlsrpt/parse_test.go create mode 100644 tlsrpt/report.go create mode 100644 tlsrpt/report_test.go create mode 100644 tlsrptdb/db.go create mode 100644 tlsrptdb/db_test.go create mode 100644 tools.go create mode 100644 updates.go create mode 100644 updates/updates.go create mode 100644 updates/updates_test.go create mode 100644 vendor/github.com/beorn7/perks/LICENSE create mode 100644 vendor/github.com/beorn7/perks/quantile/exampledata.txt create mode 100644 vendor/github.com/beorn7/perks/quantile/stream.go create mode 100644 vendor/github.com/cespare/xxhash/v2/LICENSE.txt create mode 100644 vendor/github.com/cespare/xxhash/v2/README.md create mode 100644 vendor/github.com/cespare/xxhash/v2/xxhash.go create mode 100644 vendor/github.com/cespare/xxhash/v2/xxhash_amd64.go create mode 100644 vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s create mode 100644 vendor/github.com/cespare/xxhash/v2/xxhash_other.go create mode 100644 vendor/github.com/cespare/xxhash/v2/xxhash_safe.go create mode 100644 vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go create mode 100644 vendor/github.com/golang/protobuf/AUTHORS create mode 100644 vendor/github.com/golang/protobuf/CONTRIBUTORS create mode 100644 vendor/github.com/golang/protobuf/LICENSE create mode 100644 vendor/github.com/golang/protobuf/proto/buffer.go create mode 100644 vendor/github.com/golang/protobuf/proto/defaults.go create mode 100644 vendor/github.com/golang/protobuf/proto/deprecated.go create mode 100644 vendor/github.com/golang/protobuf/proto/discard.go create mode 100644 vendor/github.com/golang/protobuf/proto/extensions.go create mode 100644 vendor/github.com/golang/protobuf/proto/properties.go create mode 100644 vendor/github.com/golang/protobuf/proto/proto.go create mode 100644 vendor/github.com/golang/protobuf/proto/registry.go create mode 100644 vendor/github.com/golang/protobuf/proto/text_decode.go create mode 100644 vendor/github.com/golang/protobuf/proto/text_encode.go create mode 100644 vendor/github.com/golang/protobuf/proto/wire.go create mode 100644 vendor/github.com/golang/protobuf/proto/wrappers.go create mode 100644 vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go create mode 100644 vendor/github.com/matttproud/golang_protobuf_extensions/LICENSE create mode 100644 vendor/github.com/matttproud/golang_protobuf_extensions/NOTICE create mode 100644 vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/.gitignore create mode 100644 vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/Makefile create mode 100644 vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/decode.go create mode 100644 vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/doc.go create mode 100644 vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/encode.go create mode 100644 vendor/github.com/mjl-/bstore/.gitignore create mode 100644 vendor/github.com/mjl-/bstore/LICENSE create mode 100644 vendor/github.com/mjl-/bstore/Makefile create mode 100644 vendor/github.com/mjl-/bstore/README.md create mode 100644 vendor/github.com/mjl-/bstore/default.go create mode 100644 vendor/github.com/mjl-/bstore/doc.go create mode 100644 vendor/github.com/mjl-/bstore/equal.go create mode 100644 vendor/github.com/mjl-/bstore/exec.go create mode 100644 vendor/github.com/mjl-/bstore/export.go create mode 100644 vendor/github.com/mjl-/bstore/format.md create mode 100644 vendor/github.com/mjl-/bstore/gendoc.sh create mode 100644 vendor/github.com/mjl-/bstore/keys.go create mode 100644 vendor/github.com/mjl-/bstore/nonzero.go create mode 100644 vendor/github.com/mjl-/bstore/pack.go create mode 100644 vendor/github.com/mjl-/bstore/parse.go create mode 100644 vendor/github.com/mjl-/bstore/plan.go create mode 100644 vendor/github.com/mjl-/bstore/query.go create mode 100644 vendor/github.com/mjl-/bstore/register.go create mode 100644 vendor/github.com/mjl-/bstore/stats.go create mode 100644 vendor/github.com/mjl-/bstore/store.go create mode 100644 vendor/github.com/mjl-/bstore/tags.go create mode 100644 vendor/github.com/mjl-/bstore/tx.go create mode 100644 vendor/github.com/mjl-/sconf/.gitignore create mode 100644 vendor/github.com/mjl-/sconf/LICENSE create mode 100644 vendor/github.com/mjl-/sconf/Makefile create mode 100644 vendor/github.com/mjl-/sconf/README.txt create mode 100644 vendor/github.com/mjl-/sconf/describe.go create mode 100644 vendor/github.com/mjl-/sconf/doc.go create mode 100644 vendor/github.com/mjl-/sconf/parse.go create mode 100644 vendor/github.com/mjl-/sconf/sconf.go create mode 100644 vendor/github.com/mjl-/sherpa/.gitignore create mode 100644 vendor/github.com/mjl-/sherpa/LICENSE create mode 100644 vendor/github.com/mjl-/sherpa/LICENSE-go create mode 100644 vendor/github.com/mjl-/sherpa/Makefile create mode 100644 vendor/github.com/mjl-/sherpa/README.md create mode 100644 vendor/github.com/mjl-/sherpa/codes.go create mode 100644 vendor/github.com/mjl-/sherpa/collector.go create mode 100644 vendor/github.com/mjl-/sherpa/doc.go create mode 100644 vendor/github.com/mjl-/sherpa/handler.go create mode 100644 vendor/github.com/mjl-/sherpa/intstr.go create mode 100644 vendor/github.com/mjl-/sherpa/isclosed.go create mode 100644 vendor/github.com/mjl-/sherpa/isclosed_plan9.go create mode 100644 vendor/github.com/mjl-/sherpa/sherpajs.go create mode 100644 vendor/github.com/mjl-/sherpadoc/LICENSE create mode 100644 vendor/github.com/mjl-/sherpadoc/README.txt create mode 100644 vendor/github.com/mjl-/sherpadoc/check.go create mode 100644 vendor/github.com/mjl-/sherpadoc/cmd/sherpadoc/main.go create mode 100644 vendor/github.com/mjl-/sherpadoc/cmd/sherpadoc/parse.go create mode 100644 vendor/github.com/mjl-/sherpadoc/cmd/sherpadoc/sherpa.go create mode 100644 vendor/github.com/mjl-/sherpadoc/sherpadoc.go create mode 100644 vendor/github.com/mjl-/sherpaprom/LICENSE.md create mode 100644 vendor/github.com/mjl-/sherpaprom/README.md create mode 100644 vendor/github.com/mjl-/sherpaprom/collector.go create mode 100644 vendor/github.com/mjl-/xfmt/.gitignore create mode 100644 vendor/github.com/mjl-/xfmt/LICENSE create mode 100644 vendor/github.com/mjl-/xfmt/README.txt create mode 100644 vendor/github.com/mjl-/xfmt/xfmt.go create mode 100644 vendor/github.com/prometheus/client_golang/LICENSE create mode 100644 vendor/github.com/prometheus/client_golang/NOTICE create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/.gitignore create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/README.md create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/build_info_collector.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/collector.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/counter.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/desc.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/doc.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/expvar_collector.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/fnv.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/gauge.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/get_pid.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/get_pid_gopherjs.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/go_collector.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/go_collector_go116.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/go_collector_latest.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/histogram.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/internal/almost_equal.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/internal/difflib.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/internal/go_collector_options.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/internal/go_runtime_metrics.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/internal/metric.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/labels.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/metric.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/num_threads.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/num_threads_gopherjs.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/observer.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/process_collector.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/process_collector_js.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/process_collector_other.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/process_collector_windows.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/promauto/auto.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/promhttp/option.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/registry.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/summary.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/timer.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/untyped.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/value.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/vec.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/wrap.go create mode 100644 vendor/github.com/prometheus/client_model/LICENSE create mode 100644 vendor/github.com/prometheus/client_model/NOTICE create mode 100644 vendor/github.com/prometheus/client_model/go/metrics.pb.go create mode 100644 vendor/github.com/prometheus/common/LICENSE create mode 100644 vendor/github.com/prometheus/common/NOTICE create mode 100644 vendor/github.com/prometheus/common/expfmt/decode.go create mode 100644 vendor/github.com/prometheus/common/expfmt/encode.go create mode 100644 vendor/github.com/prometheus/common/expfmt/expfmt.go create mode 100644 vendor/github.com/prometheus/common/expfmt/fuzz.go create mode 100644 vendor/github.com/prometheus/common/expfmt/openmetrics_create.go create mode 100644 vendor/github.com/prometheus/common/expfmt/text_create.go create mode 100644 vendor/github.com/prometheus/common/expfmt/text_parse.go create mode 100644 vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/README.txt create mode 100644 vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg.go create mode 100644 vendor/github.com/prometheus/common/model/alert.go create mode 100644 vendor/github.com/prometheus/common/model/fingerprinting.go create mode 100644 vendor/github.com/prometheus/common/model/fnv.go create mode 100644 vendor/github.com/prometheus/common/model/labels.go create mode 100644 vendor/github.com/prometheus/common/model/labelset.go create mode 100644 vendor/github.com/prometheus/common/model/metric.go create mode 100644 vendor/github.com/prometheus/common/model/model.go create mode 100644 vendor/github.com/prometheus/common/model/signature.go create mode 100644 vendor/github.com/prometheus/common/model/silence.go create mode 100644 vendor/github.com/prometheus/common/model/time.go create mode 100644 vendor/github.com/prometheus/common/model/value.go create mode 100644 vendor/github.com/prometheus/procfs/.gitignore create mode 100644 vendor/github.com/prometheus/procfs/.golangci.yml create mode 100644 vendor/github.com/prometheus/procfs/CODE_OF_CONDUCT.md create mode 100644 vendor/github.com/prometheus/procfs/CONTRIBUTING.md create mode 100644 vendor/github.com/prometheus/procfs/LICENSE create mode 100644 vendor/github.com/prometheus/procfs/MAINTAINERS.md create mode 100644 vendor/github.com/prometheus/procfs/Makefile create mode 100644 vendor/github.com/prometheus/procfs/Makefile.common create mode 100644 vendor/github.com/prometheus/procfs/NOTICE create mode 100644 vendor/github.com/prometheus/procfs/README.md create mode 100644 vendor/github.com/prometheus/procfs/SECURITY.md create mode 100644 vendor/github.com/prometheus/procfs/arp.go create mode 100644 vendor/github.com/prometheus/procfs/buddyinfo.go create mode 100644 vendor/github.com/prometheus/procfs/cmdline.go create mode 100644 vendor/github.com/prometheus/procfs/cpuinfo.go create mode 100644 vendor/github.com/prometheus/procfs/cpuinfo_armx.go create mode 100644 vendor/github.com/prometheus/procfs/cpuinfo_mipsx.go create mode 100644 vendor/github.com/prometheus/procfs/cpuinfo_others.go create mode 100644 vendor/github.com/prometheus/procfs/cpuinfo_ppcx.go create mode 100644 vendor/github.com/prometheus/procfs/cpuinfo_riscvx.go create mode 100644 vendor/github.com/prometheus/procfs/cpuinfo_s390x.go create mode 100644 vendor/github.com/prometheus/procfs/cpuinfo_x86.go create mode 100644 vendor/github.com/prometheus/procfs/crypto.go create mode 100644 vendor/github.com/prometheus/procfs/doc.go create mode 100644 vendor/github.com/prometheus/procfs/fs.go create mode 100644 vendor/github.com/prometheus/procfs/fscache.go create mode 100644 vendor/github.com/prometheus/procfs/internal/fs/fs.go create mode 100644 vendor/github.com/prometheus/procfs/internal/util/parse.go create mode 100644 vendor/github.com/prometheus/procfs/internal/util/readfile.go create mode 100644 vendor/github.com/prometheus/procfs/internal/util/sysreadfile.go create mode 100644 vendor/github.com/prometheus/procfs/internal/util/sysreadfile_compat.go create mode 100644 vendor/github.com/prometheus/procfs/internal/util/valueparser.go create mode 100644 vendor/github.com/prometheus/procfs/ipvs.go create mode 100644 vendor/github.com/prometheus/procfs/kernel_random.go create mode 100644 vendor/github.com/prometheus/procfs/loadavg.go create mode 100644 vendor/github.com/prometheus/procfs/mdstat.go create mode 100644 vendor/github.com/prometheus/procfs/meminfo.go create mode 100644 vendor/github.com/prometheus/procfs/mountinfo.go create mode 100644 vendor/github.com/prometheus/procfs/mountstats.go create mode 100644 vendor/github.com/prometheus/procfs/net_conntrackstat.go create mode 100644 vendor/github.com/prometheus/procfs/net_dev.go create mode 100644 vendor/github.com/prometheus/procfs/net_ip_socket.go create mode 100644 vendor/github.com/prometheus/procfs/net_protocols.go create mode 100644 vendor/github.com/prometheus/procfs/net_sockstat.go create mode 100644 vendor/github.com/prometheus/procfs/net_softnet.go create mode 100644 vendor/github.com/prometheus/procfs/net_tcp.go create mode 100644 vendor/github.com/prometheus/procfs/net_udp.go create mode 100644 vendor/github.com/prometheus/procfs/net_unix.go create mode 100644 vendor/github.com/prometheus/procfs/net_xfrm.go create mode 100644 vendor/github.com/prometheus/procfs/netstat.go create mode 100644 vendor/github.com/prometheus/procfs/proc.go create mode 100644 vendor/github.com/prometheus/procfs/proc_cgroup.go create mode 100644 vendor/github.com/prometheus/procfs/proc_cgroups.go create mode 100644 vendor/github.com/prometheus/procfs/proc_environ.go create mode 100644 vendor/github.com/prometheus/procfs/proc_fdinfo.go create mode 100644 vendor/github.com/prometheus/procfs/proc_io.go create mode 100644 vendor/github.com/prometheus/procfs/proc_limits.go create mode 100644 vendor/github.com/prometheus/procfs/proc_maps.go create mode 100644 vendor/github.com/prometheus/procfs/proc_netstat.go create mode 100644 vendor/github.com/prometheus/procfs/proc_ns.go create mode 100644 vendor/github.com/prometheus/procfs/proc_psi.go create mode 100644 vendor/github.com/prometheus/procfs/proc_smaps.go create mode 100644 vendor/github.com/prometheus/procfs/proc_snmp.go create mode 100644 vendor/github.com/prometheus/procfs/proc_snmp6.go create mode 100644 vendor/github.com/prometheus/procfs/proc_stat.go create mode 100644 vendor/github.com/prometheus/procfs/proc_status.go create mode 100644 vendor/github.com/prometheus/procfs/proc_sys.go create mode 100644 vendor/github.com/prometheus/procfs/schedstat.go create mode 100644 vendor/github.com/prometheus/procfs/slab.go create mode 100644 vendor/github.com/prometheus/procfs/softirqs.go create mode 100644 vendor/github.com/prometheus/procfs/stat.go create mode 100644 vendor/github.com/prometheus/procfs/swaps.go create mode 100644 vendor/github.com/prometheus/procfs/ttar create mode 100644 vendor/github.com/prometheus/procfs/vm.go create mode 100644 vendor/github.com/prometheus/procfs/zoneinfo.go create mode 100644 vendor/go.etcd.io/bbolt/.gitignore create mode 100644 vendor/go.etcd.io/bbolt/.travis.yml create mode 100644 vendor/go.etcd.io/bbolt/LICENSE create mode 100644 vendor/go.etcd.io/bbolt/Makefile create mode 100644 vendor/go.etcd.io/bbolt/README.md create mode 100644 vendor/go.etcd.io/bbolt/bolt_386.go create mode 100644 vendor/go.etcd.io/bbolt/bolt_amd64.go create mode 100644 vendor/go.etcd.io/bbolt/bolt_arm.go create mode 100644 vendor/go.etcd.io/bbolt/bolt_arm64.go create mode 100644 vendor/go.etcd.io/bbolt/bolt_linux.go create mode 100644 vendor/go.etcd.io/bbolt/bolt_mips64x.go create mode 100644 vendor/go.etcd.io/bbolt/bolt_mipsx.go create mode 100644 vendor/go.etcd.io/bbolt/bolt_openbsd.go create mode 100644 vendor/go.etcd.io/bbolt/bolt_ppc.go create mode 100644 vendor/go.etcd.io/bbolt/bolt_ppc64.go create mode 100644 vendor/go.etcd.io/bbolt/bolt_ppc64le.go create mode 100644 vendor/go.etcd.io/bbolt/bolt_riscv64.go create mode 100644 vendor/go.etcd.io/bbolt/bolt_s390x.go create mode 100644 vendor/go.etcd.io/bbolt/bolt_unix.go create mode 100644 vendor/go.etcd.io/bbolt/bolt_unix_aix.go create mode 100644 vendor/go.etcd.io/bbolt/bolt_unix_solaris.go create mode 100644 vendor/go.etcd.io/bbolt/bolt_windows.go create mode 100644 vendor/go.etcd.io/bbolt/boltsync_unix.go create mode 100644 vendor/go.etcd.io/bbolt/bucket.go create mode 100644 vendor/go.etcd.io/bbolt/compact.go create mode 100644 vendor/go.etcd.io/bbolt/cursor.go create mode 100644 vendor/go.etcd.io/bbolt/db.go create mode 100644 vendor/go.etcd.io/bbolt/doc.go create mode 100644 vendor/go.etcd.io/bbolt/errors.go create mode 100644 vendor/go.etcd.io/bbolt/freelist.go create mode 100644 vendor/go.etcd.io/bbolt/freelist_hmap.go create mode 100644 vendor/go.etcd.io/bbolt/mlock_unix.go create mode 100644 vendor/go.etcd.io/bbolt/mlock_windows.go create mode 100644 vendor/go.etcd.io/bbolt/node.go create mode 100644 vendor/go.etcd.io/bbolt/page.go create mode 100644 vendor/go.etcd.io/bbolt/tx.go create mode 100644 vendor/go.etcd.io/bbolt/unsafe.go create mode 100644 vendor/golang.org/x/crypto/LICENSE create mode 100644 vendor/golang.org/x/crypto/PATENTS create mode 100644 vendor/golang.org/x/crypto/acme/acme.go create mode 100644 vendor/golang.org/x/crypto/acme/autocert/autocert.go create mode 100644 vendor/golang.org/x/crypto/acme/autocert/cache.go create mode 100644 vendor/golang.org/x/crypto/acme/autocert/listener.go create mode 100644 vendor/golang.org/x/crypto/acme/autocert/renewal.go create mode 100644 vendor/golang.org/x/crypto/acme/http.go create mode 100644 vendor/golang.org/x/crypto/acme/jws.go create mode 100644 vendor/golang.org/x/crypto/acme/rfc8555.go create mode 100644 vendor/golang.org/x/crypto/acme/types.go create mode 100644 vendor/golang.org/x/crypto/acme/version_go112.go create mode 100644 vendor/golang.org/x/crypto/bcrypt/base64.go create mode 100644 vendor/golang.org/x/crypto/bcrypt/bcrypt.go create mode 100644 vendor/golang.org/x/crypto/blake2b/blake2b.go create mode 100644 vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.go create mode 100644 vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.s create mode 100644 vendor/golang.org/x/crypto/blake2b/blake2b_amd64.go create mode 100644 vendor/golang.org/x/crypto/blake2b/blake2b_amd64.s create mode 100644 vendor/golang.org/x/crypto/blake2b/blake2b_generic.go create mode 100644 vendor/golang.org/x/crypto/blake2b/blake2b_ref.go create mode 100644 vendor/golang.org/x/crypto/blake2b/blake2x.go create mode 100644 vendor/golang.org/x/crypto/blake2b/register.go create mode 100644 vendor/golang.org/x/crypto/blowfish/block.go create mode 100644 vendor/golang.org/x/crypto/blowfish/cipher.go create mode 100644 vendor/golang.org/x/crypto/blowfish/const.go create mode 100644 vendor/golang.org/x/crypto/pbkdf2/pbkdf2.go create mode 100644 vendor/golang.org/x/mod/LICENSE create mode 100644 vendor/golang.org/x/mod/PATENTS create mode 100644 vendor/golang.org/x/mod/internal/lazyregexp/lazyre.go create mode 100644 vendor/golang.org/x/mod/modfile/print.go create mode 100644 vendor/golang.org/x/mod/modfile/read.go create mode 100644 vendor/golang.org/x/mod/modfile/rule.go create mode 100644 vendor/golang.org/x/mod/modfile/work.go create mode 100644 vendor/golang.org/x/mod/module/module.go create mode 100644 vendor/golang.org/x/mod/module/pseudo.go create mode 100644 vendor/golang.org/x/mod/semver/semver.go create mode 100644 vendor/golang.org/x/net/LICENSE create mode 100644 vendor/golang.org/x/net/PATENTS create mode 100644 vendor/golang.org/x/net/html/atom/atom.go create mode 100644 vendor/golang.org/x/net/html/atom/table.go create mode 100644 vendor/golang.org/x/net/html/const.go create mode 100644 vendor/golang.org/x/net/html/doc.go create mode 100644 vendor/golang.org/x/net/html/doctype.go create mode 100644 vendor/golang.org/x/net/html/entity.go create mode 100644 vendor/golang.org/x/net/html/escape.go create mode 100644 vendor/golang.org/x/net/html/foreign.go create mode 100644 vendor/golang.org/x/net/html/node.go create mode 100644 vendor/golang.org/x/net/html/parse.go create mode 100644 vendor/golang.org/x/net/html/render.go create mode 100644 vendor/golang.org/x/net/html/token.go create mode 100644 vendor/golang.org/x/net/idna/go118.go create mode 100644 vendor/golang.org/x/net/idna/idna10.0.0.go create mode 100644 vendor/golang.org/x/net/idna/idna9.0.0.go create mode 100644 vendor/golang.org/x/net/idna/pre_go118.go create mode 100644 vendor/golang.org/x/net/idna/punycode.go create mode 100644 vendor/golang.org/x/net/idna/tables10.0.0.go create mode 100644 vendor/golang.org/x/net/idna/tables11.0.0.go create mode 100644 vendor/golang.org/x/net/idna/tables12.0.0.go create mode 100644 vendor/golang.org/x/net/idna/tables13.0.0.go create mode 100644 vendor/golang.org/x/net/idna/tables9.0.0.go create mode 100644 vendor/golang.org/x/net/idna/trie.go create mode 100644 vendor/golang.org/x/net/idna/trieval.go create mode 100644 vendor/golang.org/x/sys/LICENSE create mode 100644 vendor/golang.org/x/sys/PATENTS create mode 100644 vendor/golang.org/x/sys/cpu/asm_aix_ppc64.s create mode 100644 vendor/golang.org/x/sys/cpu/byteorder.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_aix.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_arm.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_arm64.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_arm64.s create mode 100644 vendor/golang.org/x/sys/cpu/cpu_gc_arm64.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_gc_s390x.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_gc_x86.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_gccgo_arm64.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_gccgo_s390x.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.c create mode 100644 vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_linux.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_linux_arm.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_linux_arm64.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_linux_mips64x.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_linux_noinit.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_linux_ppc64x.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_linux_s390x.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_loong64.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_mips64x.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_mipsx.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_netbsd_arm64.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_openbsd_arm64.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_openbsd_arm64.s create mode 100644 vendor/golang.org/x/sys/cpu/cpu_other_arm.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_other_arm64.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_other_mips64x.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_other_ppc64x.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_other_riscv64.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_ppc64x.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_riscv64.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_s390x.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_s390x.s create mode 100644 vendor/golang.org/x/sys/cpu/cpu_wasm.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_x86.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_x86.s create mode 100644 vendor/golang.org/x/sys/cpu/cpu_zos.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_zos_s390x.go create mode 100644 vendor/golang.org/x/sys/cpu/hwcap_linux.go create mode 100644 vendor/golang.org/x/sys/cpu/parse.go create mode 100644 vendor/golang.org/x/sys/cpu/proc_cpuinfo_linux.go create mode 100644 vendor/golang.org/x/sys/cpu/syscall_aix_gccgo.go create mode 100644 vendor/golang.org/x/sys/cpu/syscall_aix_ppc64_gc.go create mode 100644 vendor/golang.org/x/sys/execabs/execabs.go create mode 100644 vendor/golang.org/x/sys/execabs/execabs_go118.go create mode 100644 vendor/golang.org/x/sys/execabs/execabs_go119.go create mode 100644 vendor/golang.org/x/sys/internal/unsafeheader/unsafeheader.go create mode 100644 vendor/golang.org/x/sys/unix/.gitignore create mode 100644 vendor/golang.org/x/sys/unix/README.md create mode 100644 vendor/golang.org/x/sys/unix/affinity_linux.go create mode 100644 vendor/golang.org/x/sys/unix/aliases.go create mode 100644 vendor/golang.org/x/sys/unix/asm_aix_ppc64.s create mode 100644 vendor/golang.org/x/sys/unix/asm_bsd_386.s create mode 100644 vendor/golang.org/x/sys/unix/asm_bsd_amd64.s create mode 100644 vendor/golang.org/x/sys/unix/asm_bsd_arm.s create mode 100644 vendor/golang.org/x/sys/unix/asm_bsd_arm64.s create mode 100644 vendor/golang.org/x/sys/unix/asm_bsd_ppc64.s create mode 100644 vendor/golang.org/x/sys/unix/asm_bsd_riscv64.s create mode 100644 vendor/golang.org/x/sys/unix/asm_linux_386.s create mode 100644 vendor/golang.org/x/sys/unix/asm_linux_amd64.s create mode 100644 vendor/golang.org/x/sys/unix/asm_linux_arm.s create mode 100644 vendor/golang.org/x/sys/unix/asm_linux_arm64.s create mode 100644 vendor/golang.org/x/sys/unix/asm_linux_loong64.s create mode 100644 vendor/golang.org/x/sys/unix/asm_linux_mips64x.s create mode 100644 vendor/golang.org/x/sys/unix/asm_linux_mipsx.s create mode 100644 vendor/golang.org/x/sys/unix/asm_linux_ppc64x.s create mode 100644 vendor/golang.org/x/sys/unix/asm_linux_riscv64.s create mode 100644 vendor/golang.org/x/sys/unix/asm_linux_s390x.s create mode 100644 vendor/golang.org/x/sys/unix/asm_openbsd_mips64.s create mode 100644 vendor/golang.org/x/sys/unix/asm_solaris_amd64.s create mode 100644 vendor/golang.org/x/sys/unix/asm_zos_s390x.s create mode 100644 vendor/golang.org/x/sys/unix/bluetooth_linux.go create mode 100644 vendor/golang.org/x/sys/unix/cap_freebsd.go create mode 100644 vendor/golang.org/x/sys/unix/constants.go create mode 100644 vendor/golang.org/x/sys/unix/dev_aix_ppc.go create mode 100644 vendor/golang.org/x/sys/unix/dev_aix_ppc64.go create mode 100644 vendor/golang.org/x/sys/unix/dev_darwin.go create mode 100644 vendor/golang.org/x/sys/unix/dev_dragonfly.go create mode 100644 vendor/golang.org/x/sys/unix/dev_freebsd.go create mode 100644 vendor/golang.org/x/sys/unix/dev_linux.go create mode 100644 vendor/golang.org/x/sys/unix/dev_netbsd.go create mode 100644 vendor/golang.org/x/sys/unix/dev_openbsd.go create mode 100644 vendor/golang.org/x/sys/unix/dev_zos.go create mode 100644 vendor/golang.org/x/sys/unix/dirent.go create mode 100644 vendor/golang.org/x/sys/unix/endian_big.go create mode 100644 vendor/golang.org/x/sys/unix/endian_little.go create mode 100644 vendor/golang.org/x/sys/unix/env_unix.go create mode 100644 vendor/golang.org/x/sys/unix/epoll_zos.go create mode 100644 vendor/golang.org/x/sys/unix/fcntl.go create mode 100644 vendor/golang.org/x/sys/unix/fcntl_darwin.go create mode 100644 vendor/golang.org/x/sys/unix/fcntl_linux_32bit.go create mode 100644 vendor/golang.org/x/sys/unix/fdset.go create mode 100644 vendor/golang.org/x/sys/unix/fstatfs_zos.go create mode 100644 vendor/golang.org/x/sys/unix/gccgo.go create mode 100644 vendor/golang.org/x/sys/unix/gccgo_c.c create mode 100644 vendor/golang.org/x/sys/unix/gccgo_linux_amd64.go create mode 100644 vendor/golang.org/x/sys/unix/ifreq_linux.go create mode 100644 vendor/golang.org/x/sys/unix/ioctl.go create mode 100644 vendor/golang.org/x/sys/unix/ioctl_linux.go create mode 100644 vendor/golang.org/x/sys/unix/ioctl_zos.go create mode 100644 vendor/golang.org/x/sys/unix/mkall.sh create mode 100644 vendor/golang.org/x/sys/unix/mkerrors.sh create mode 100644 vendor/golang.org/x/sys/unix/pagesize_unix.go create mode 100644 vendor/golang.org/x/sys/unix/pledge_openbsd.go create mode 100644 vendor/golang.org/x/sys/unix/ptrace_darwin.go create mode 100644 vendor/golang.org/x/sys/unix/ptrace_ios.go create mode 100644 vendor/golang.org/x/sys/unix/race.go create mode 100644 vendor/golang.org/x/sys/unix/race0.go create mode 100644 vendor/golang.org/x/sys/unix/readdirent_getdents.go create mode 100644 vendor/golang.org/x/sys/unix/readdirent_getdirentries.go create mode 100644 vendor/golang.org/x/sys/unix/sockcmsg_dragonfly.go create mode 100644 vendor/golang.org/x/sys/unix/sockcmsg_linux.go create mode 100644 vendor/golang.org/x/sys/unix/sockcmsg_unix.go create mode 100644 vendor/golang.org/x/sys/unix/sockcmsg_unix_other.go create mode 100644 vendor/golang.org/x/sys/unix/syscall.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_aix.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_aix_ppc.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_aix_ppc64.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_bsd.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_darwin.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_darwin_amd64.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_darwin_arm64.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_darwin_libSystem.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_dragonfly.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_dragonfly_amd64.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_freebsd.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_freebsd_386.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_freebsd_amd64.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_freebsd_arm.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_freebsd_arm64.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_freebsd_riscv64.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_hurd.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_hurd_386.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_illumos.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_linux.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_linux_386.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_linux_alarm.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_linux_amd64.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_linux_amd64_gc.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_linux_arm.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_linux_arm64.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_linux_gc.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_linux_gc_386.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_linux_gc_arm.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_linux_gccgo_386.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_linux_gccgo_arm.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_linux_loong64.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_linux_mips64x.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_linux_mipsx.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_linux_ppc.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_linux_ppc64x.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_linux_s390x.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_linux_sparc64.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_netbsd.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_netbsd_386.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_netbsd_amd64.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_netbsd_arm.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_netbsd_arm64.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_openbsd.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_openbsd_386.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_openbsd_amd64.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_openbsd_arm.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_openbsd_arm64.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_openbsd_libc.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_openbsd_mips64.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_openbsd_ppc64.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_openbsd_riscv64.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_solaris.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_solaris_amd64.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_unix.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_unix_gc.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_unix_gc_ppc64x.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_zos_s390x.go create mode 100644 vendor/golang.org/x/sys/unix/sysvshm_linux.go create mode 100644 vendor/golang.org/x/sys/unix/sysvshm_unix.go create mode 100644 vendor/golang.org/x/sys/unix/sysvshm_unix_other.go create mode 100644 vendor/golang.org/x/sys/unix/timestruct.go create mode 100644 vendor/golang.org/x/sys/unix/unveil_openbsd.go create mode 100644 vendor/golang.org/x/sys/unix/xattr_bsd.go create mode 100644 vendor/golang.org/x/sys/unix/zerrors_aix_ppc.go create mode 100644 vendor/golang.org/x/sys/unix/zerrors_aix_ppc64.go create mode 100644 vendor/golang.org/x/sys/unix/zerrors_darwin_amd64.go create mode 100644 vendor/golang.org/x/sys/unix/zerrors_darwin_arm64.go create mode 100644 vendor/golang.org/x/sys/unix/zerrors_dragonfly_amd64.go create mode 100644 vendor/golang.org/x/sys/unix/zerrors_freebsd_386.go create mode 100644 vendor/golang.org/x/sys/unix/zerrors_freebsd_amd64.go create mode 100644 vendor/golang.org/x/sys/unix/zerrors_freebsd_arm.go create mode 100644 vendor/golang.org/x/sys/unix/zerrors_freebsd_arm64.go create mode 100644 vendor/golang.org/x/sys/unix/zerrors_freebsd_riscv64.go create mode 100644 vendor/golang.org/x/sys/unix/zerrors_linux.go create mode 100644 vendor/golang.org/x/sys/unix/zerrors_linux_386.go create mode 100644 vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go create mode 100644 vendor/golang.org/x/sys/unix/zerrors_linux_arm.go create mode 100644 vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go create mode 100644 vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go create mode 100644 vendor/golang.org/x/sys/unix/zerrors_linux_mips.go create mode 100644 vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go create mode 100644 vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go create mode 100644 vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go create mode 100644 vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go create mode 100644 vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go create mode 100644 vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go create mode 100644 vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go create mode 100644 vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go create mode 100644 vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go create mode 100644 vendor/golang.org/x/sys/unix/zerrors_netbsd_386.go create mode 100644 vendor/golang.org/x/sys/unix/zerrors_netbsd_amd64.go create mode 100644 vendor/golang.org/x/sys/unix/zerrors_netbsd_arm.go create mode 100644 vendor/golang.org/x/sys/unix/zerrors_netbsd_arm64.go create mode 100644 vendor/golang.org/x/sys/unix/zerrors_openbsd_386.go create mode 100644 vendor/golang.org/x/sys/unix/zerrors_openbsd_amd64.go create mode 100644 vendor/golang.org/x/sys/unix/zerrors_openbsd_arm.go create mode 100644 vendor/golang.org/x/sys/unix/zerrors_openbsd_arm64.go create mode 100644 vendor/golang.org/x/sys/unix/zerrors_openbsd_mips64.go create mode 100644 vendor/golang.org/x/sys/unix/zerrors_openbsd_ppc64.go create mode 100644 vendor/golang.org/x/sys/unix/zerrors_openbsd_riscv64.go create mode 100644 vendor/golang.org/x/sys/unix/zerrors_solaris_amd64.go create mode 100644 vendor/golang.org/x/sys/unix/zerrors_zos_s390x.go create mode 100644 vendor/golang.org/x/sys/unix/zptrace_armnn_linux.go create mode 100644 vendor/golang.org/x/sys/unix/zptrace_linux_arm64.go create mode 100644 vendor/golang.org/x/sys/unix/zptrace_mipsnn_linux.go create mode 100644 vendor/golang.org/x/sys/unix/zptrace_mipsnnle_linux.go create mode 100644 vendor/golang.org/x/sys/unix/zptrace_x86_linux.go create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_aix_ppc.go create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64.go create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gc.go create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gccgo.go create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_dragonfly_amd64.go create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_freebsd_386.go create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_freebsd_amd64.go create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm.go create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm64.go create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_freebsd_riscv64.go create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_illumos_amd64.go create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_linux.go create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_linux_386.go create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_linux_loong64.go create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_linux_mips.go create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_linux_mipsle.go create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_linux_ppc.go create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_linux_riscv64.go create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_linux_s390x.go create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_linux_sparc64.go create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm64.go create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.s create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.s create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.s create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.s create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.go create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.s create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.go create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.s create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.go create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.s create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_zos_s390x.go create mode 100644 vendor/golang.org/x/sys/unix/zsysctl_openbsd_386.go create mode 100644 vendor/golang.org/x/sys/unix/zsysctl_openbsd_amd64.go create mode 100644 vendor/golang.org/x/sys/unix/zsysctl_openbsd_arm.go create mode 100644 vendor/golang.org/x/sys/unix/zsysctl_openbsd_arm64.go create mode 100644 vendor/golang.org/x/sys/unix/zsysctl_openbsd_mips64.go create mode 100644 vendor/golang.org/x/sys/unix/zsysctl_openbsd_ppc64.go create mode 100644 vendor/golang.org/x/sys/unix/zsysctl_openbsd_riscv64.go create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_darwin_amd64.go create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_darwin_arm64.go create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_dragonfly_amd64.go create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_freebsd_386.go create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_freebsd_amd64.go create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_freebsd_arm.go create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_freebsd_arm64.go create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_freebsd_riscv64.go create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_linux_386.go create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_netbsd_386.go create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_netbsd_amd64.go create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_netbsd_arm.go create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_netbsd_arm64.go create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_openbsd_386.go create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_openbsd_amd64.go create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_openbsd_arm.go create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_openbsd_arm64.go create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_openbsd_mips64.go create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_openbsd_ppc64.go create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_openbsd_riscv64.go create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_zos_s390x.go create mode 100644 vendor/golang.org/x/sys/unix/ztypes_aix_ppc.go create mode 100644 vendor/golang.org/x/sys/unix/ztypes_aix_ppc64.go create mode 100644 vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go create mode 100644 vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go create mode 100644 vendor/golang.org/x/sys/unix/ztypes_dragonfly_amd64.go create mode 100644 vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go create mode 100644 vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go create mode 100644 vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go create mode 100644 vendor/golang.org/x/sys/unix/ztypes_freebsd_arm64.go create mode 100644 vendor/golang.org/x/sys/unix/ztypes_freebsd_riscv64.go create mode 100644 vendor/golang.org/x/sys/unix/ztypes_linux.go create mode 100644 vendor/golang.org/x/sys/unix/ztypes_linux_386.go create mode 100644 vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go create mode 100644 vendor/golang.org/x/sys/unix/ztypes_linux_arm.go create mode 100644 vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go create mode 100644 vendor/golang.org/x/sys/unix/ztypes_linux_loong64.go create mode 100644 vendor/golang.org/x/sys/unix/ztypes_linux_mips.go create mode 100644 vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go create mode 100644 vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go create mode 100644 vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go create mode 100644 vendor/golang.org/x/sys/unix/ztypes_linux_ppc.go create mode 100644 vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go create mode 100644 vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go create mode 100644 vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go create mode 100644 vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go create mode 100644 vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go create mode 100644 vendor/golang.org/x/sys/unix/ztypes_netbsd_386.go create mode 100644 vendor/golang.org/x/sys/unix/ztypes_netbsd_amd64.go create mode 100644 vendor/golang.org/x/sys/unix/ztypes_netbsd_arm.go create mode 100644 vendor/golang.org/x/sys/unix/ztypes_netbsd_arm64.go create mode 100644 vendor/golang.org/x/sys/unix/ztypes_openbsd_386.go create mode 100644 vendor/golang.org/x/sys/unix/ztypes_openbsd_amd64.go create mode 100644 vendor/golang.org/x/sys/unix/ztypes_openbsd_arm.go create mode 100644 vendor/golang.org/x/sys/unix/ztypes_openbsd_arm64.go create mode 100644 vendor/golang.org/x/sys/unix/ztypes_openbsd_mips64.go create mode 100644 vendor/golang.org/x/sys/unix/ztypes_openbsd_ppc64.go create mode 100644 vendor/golang.org/x/sys/unix/ztypes_openbsd_riscv64.go create mode 100644 vendor/golang.org/x/sys/unix/ztypes_solaris_amd64.go create mode 100644 vendor/golang.org/x/sys/unix/ztypes_zos_s390x.go create mode 100644 vendor/golang.org/x/sys/windows/aliases.go create mode 100644 vendor/golang.org/x/sys/windows/dll_windows.go create mode 100644 vendor/golang.org/x/sys/windows/empty.s create mode 100644 vendor/golang.org/x/sys/windows/env_windows.go create mode 100644 vendor/golang.org/x/sys/windows/eventlog.go create mode 100644 vendor/golang.org/x/sys/windows/exec_windows.go create mode 100644 vendor/golang.org/x/sys/windows/memory_windows.go create mode 100644 vendor/golang.org/x/sys/windows/mkerrors.bash create mode 100644 vendor/golang.org/x/sys/windows/mkknownfolderids.bash create mode 100644 vendor/golang.org/x/sys/windows/mksyscall.go create mode 100644 vendor/golang.org/x/sys/windows/race.go create mode 100644 vendor/golang.org/x/sys/windows/race0.go create mode 100644 vendor/golang.org/x/sys/windows/security_windows.go create mode 100644 vendor/golang.org/x/sys/windows/service.go create mode 100644 vendor/golang.org/x/sys/windows/setupapi_windows.go create mode 100644 vendor/golang.org/x/sys/windows/str.go create mode 100644 vendor/golang.org/x/sys/windows/syscall.go create mode 100644 vendor/golang.org/x/sys/windows/syscall_windows.go create mode 100644 vendor/golang.org/x/sys/windows/types_windows.go create mode 100644 vendor/golang.org/x/sys/windows/types_windows_386.go create mode 100644 vendor/golang.org/x/sys/windows/types_windows_amd64.go create mode 100644 vendor/golang.org/x/sys/windows/types_windows_arm.go create mode 100644 vendor/golang.org/x/sys/windows/types_windows_arm64.go create mode 100644 vendor/golang.org/x/sys/windows/zerrors_windows.go create mode 100644 vendor/golang.org/x/sys/windows/zknownfolderids_windows.go create mode 100644 vendor/golang.org/x/sys/windows/zsyscall_windows.go create mode 100644 vendor/golang.org/x/text/LICENSE create mode 100644 vendor/golang.org/x/text/PATENTS create mode 100644 vendor/golang.org/x/text/secure/bidirule/bidirule.go create mode 100644 vendor/golang.org/x/text/secure/bidirule/bidirule10.0.0.go create mode 100644 vendor/golang.org/x/text/secure/bidirule/bidirule9.0.0.go create mode 100644 vendor/golang.org/x/text/transform/transform.go create mode 100644 vendor/golang.org/x/text/unicode/bidi/bidi.go create mode 100644 vendor/golang.org/x/text/unicode/bidi/bracket.go create mode 100644 vendor/golang.org/x/text/unicode/bidi/core.go create mode 100644 vendor/golang.org/x/text/unicode/bidi/prop.go create mode 100644 vendor/golang.org/x/text/unicode/bidi/tables10.0.0.go create mode 100644 vendor/golang.org/x/text/unicode/bidi/tables11.0.0.go create mode 100644 vendor/golang.org/x/text/unicode/bidi/tables12.0.0.go create mode 100644 vendor/golang.org/x/text/unicode/bidi/tables13.0.0.go create mode 100644 vendor/golang.org/x/text/unicode/bidi/tables9.0.0.go create mode 100644 vendor/golang.org/x/text/unicode/bidi/trieval.go create mode 100644 vendor/golang.org/x/text/unicode/norm/composition.go create mode 100644 vendor/golang.org/x/text/unicode/norm/forminfo.go create mode 100644 vendor/golang.org/x/text/unicode/norm/input.go create mode 100644 vendor/golang.org/x/text/unicode/norm/iter.go create mode 100644 vendor/golang.org/x/text/unicode/norm/normalize.go create mode 100644 vendor/golang.org/x/text/unicode/norm/readwriter.go create mode 100644 vendor/golang.org/x/text/unicode/norm/tables10.0.0.go create mode 100644 vendor/golang.org/x/text/unicode/norm/tables11.0.0.go create mode 100644 vendor/golang.org/x/text/unicode/norm/tables12.0.0.go create mode 100644 vendor/golang.org/x/text/unicode/norm/tables13.0.0.go create mode 100644 vendor/golang.org/x/text/unicode/norm/tables9.0.0.go create mode 100644 vendor/golang.org/x/text/unicode/norm/transform.go create mode 100644 vendor/golang.org/x/text/unicode/norm/trie.go create mode 100644 vendor/golang.org/x/tools/LICENSE create mode 100644 vendor/golang.org/x/tools/PATENTS create mode 100644 vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go create mode 100644 vendor/golang.org/x/tools/go/gcexportdata/importer.go create mode 100644 vendor/golang.org/x/tools/go/internal/gcimporter/bexport.go create mode 100644 vendor/golang.org/x/tools/go/internal/gcimporter/bimport.go create mode 100644 vendor/golang.org/x/tools/go/internal/gcimporter/exportdata.go create mode 100644 vendor/golang.org/x/tools/go/internal/gcimporter/gcimporter.go create mode 100644 vendor/golang.org/x/tools/go/internal/gcimporter/iexport.go create mode 100644 vendor/golang.org/x/tools/go/internal/gcimporter/iimport.go create mode 100644 vendor/golang.org/x/tools/go/internal/gcimporter/newInterface10.go create mode 100644 vendor/golang.org/x/tools/go/internal/gcimporter/newInterface11.go create mode 100644 vendor/golang.org/x/tools/go/internal/gcimporter/support_go117.go create mode 100644 vendor/golang.org/x/tools/go/internal/gcimporter/support_go118.go create mode 100644 vendor/golang.org/x/tools/go/internal/gcimporter/unified_no.go create mode 100644 vendor/golang.org/x/tools/go/internal/gcimporter/unified_yes.go create mode 100644 vendor/golang.org/x/tools/go/internal/gcimporter/ureader_no.go create mode 100644 vendor/golang.org/x/tools/go/internal/gcimporter/ureader_yes.go create mode 100644 vendor/golang.org/x/tools/go/internal/packagesdriver/sizes.go create mode 100644 vendor/golang.org/x/tools/go/internal/pkgbits/codes.go create mode 100644 vendor/golang.org/x/tools/go/internal/pkgbits/decoder.go create mode 100644 vendor/golang.org/x/tools/go/internal/pkgbits/doc.go create mode 100644 vendor/golang.org/x/tools/go/internal/pkgbits/encoder.go create mode 100644 vendor/golang.org/x/tools/go/internal/pkgbits/flags.go create mode 100644 vendor/golang.org/x/tools/go/internal/pkgbits/frames_go1.go create mode 100644 vendor/golang.org/x/tools/go/internal/pkgbits/frames_go17.go create mode 100644 vendor/golang.org/x/tools/go/internal/pkgbits/reloc.go create mode 100644 vendor/golang.org/x/tools/go/internal/pkgbits/support.go create mode 100644 vendor/golang.org/x/tools/go/internal/pkgbits/sync.go create mode 100644 vendor/golang.org/x/tools/go/internal/pkgbits/syncmarker_string.go create mode 100644 vendor/golang.org/x/tools/go/packages/doc.go create mode 100644 vendor/golang.org/x/tools/go/packages/external.go create mode 100644 vendor/golang.org/x/tools/go/packages/golist.go create mode 100644 vendor/golang.org/x/tools/go/packages/golist_overlay.go create mode 100644 vendor/golang.org/x/tools/go/packages/loadmode_string.go create mode 100644 vendor/golang.org/x/tools/go/packages/packages.go create mode 100644 vendor/golang.org/x/tools/go/packages/visit.go create mode 100644 vendor/golang.org/x/tools/internal/event/core/event.go create mode 100644 vendor/golang.org/x/tools/internal/event/core/export.go create mode 100644 vendor/golang.org/x/tools/internal/event/core/fast.go create mode 100644 vendor/golang.org/x/tools/internal/event/doc.go create mode 100644 vendor/golang.org/x/tools/internal/event/event.go create mode 100644 vendor/golang.org/x/tools/internal/event/keys/keys.go create mode 100644 vendor/golang.org/x/tools/internal/event/keys/standard.go create mode 100644 vendor/golang.org/x/tools/internal/event/label/label.go create mode 100644 vendor/golang.org/x/tools/internal/gocommand/invoke.go create mode 100644 vendor/golang.org/x/tools/internal/gocommand/vendor.go create mode 100644 vendor/golang.org/x/tools/internal/gocommand/version.go create mode 100644 vendor/golang.org/x/tools/internal/packagesinternal/packages.go create mode 100644 vendor/golang.org/x/tools/internal/typeparams/common.go create mode 100644 vendor/golang.org/x/tools/internal/typeparams/coretype.go create mode 100644 vendor/golang.org/x/tools/internal/typeparams/enabled_go117.go create mode 100644 vendor/golang.org/x/tools/internal/typeparams/enabled_go118.go create mode 100644 vendor/golang.org/x/tools/internal/typeparams/normalize.go create mode 100644 vendor/golang.org/x/tools/internal/typeparams/termlist.go create mode 100644 vendor/golang.org/x/tools/internal/typeparams/typeparams_go117.go create mode 100644 vendor/golang.org/x/tools/internal/typeparams/typeparams_go118.go create mode 100644 vendor/golang.org/x/tools/internal/typeparams/typeterm.go create mode 100644 vendor/golang.org/x/tools/internal/typesinternal/errorcode.go create mode 100644 vendor/golang.org/x/tools/internal/typesinternal/errorcode_string.go create mode 100644 vendor/golang.org/x/tools/internal/typesinternal/types.go create mode 100644 vendor/golang.org/x/tools/internal/typesinternal/types_118.go create mode 100644 vendor/google.golang.org/protobuf/LICENSE create mode 100644 vendor/google.golang.org/protobuf/PATENTS create mode 100644 vendor/google.golang.org/protobuf/encoding/prototext/decode.go create mode 100644 vendor/google.golang.org/protobuf/encoding/prototext/doc.go create mode 100644 vendor/google.golang.org/protobuf/encoding/prototext/encode.go create mode 100644 vendor/google.golang.org/protobuf/encoding/protowire/wire.go create mode 100644 vendor/google.golang.org/protobuf/internal/descfmt/stringer.go create mode 100644 vendor/google.golang.org/protobuf/internal/descopts/options.go create mode 100644 vendor/google.golang.org/protobuf/internal/detrand/rand.go create mode 100644 vendor/google.golang.org/protobuf/internal/encoding/defval/default.go create mode 100644 vendor/google.golang.org/protobuf/internal/encoding/messageset/messageset.go create mode 100644 vendor/google.golang.org/protobuf/internal/encoding/tag/tag.go create mode 100644 vendor/google.golang.org/protobuf/internal/encoding/text/decode.go create mode 100644 vendor/google.golang.org/protobuf/internal/encoding/text/decode_number.go create mode 100644 vendor/google.golang.org/protobuf/internal/encoding/text/decode_string.go create mode 100644 vendor/google.golang.org/protobuf/internal/encoding/text/decode_token.go create mode 100644 vendor/google.golang.org/protobuf/internal/encoding/text/doc.go create mode 100644 vendor/google.golang.org/protobuf/internal/encoding/text/encode.go create mode 100644 vendor/google.golang.org/protobuf/internal/errors/errors.go create mode 100644 vendor/google.golang.org/protobuf/internal/errors/is_go112.go create mode 100644 vendor/google.golang.org/protobuf/internal/errors/is_go113.go create mode 100644 vendor/google.golang.org/protobuf/internal/filedesc/build.go create mode 100644 vendor/google.golang.org/protobuf/internal/filedesc/desc.go create mode 100644 vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go create mode 100644 vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go create mode 100644 vendor/google.golang.org/protobuf/internal/filedesc/desc_list.go create mode 100644 vendor/google.golang.org/protobuf/internal/filedesc/desc_list_gen.go create mode 100644 vendor/google.golang.org/protobuf/internal/filedesc/placeholder.go create mode 100644 vendor/google.golang.org/protobuf/internal/filetype/build.go create mode 100644 vendor/google.golang.org/protobuf/internal/flags/flags.go create mode 100644 vendor/google.golang.org/protobuf/internal/flags/proto_legacy_disable.go create mode 100644 vendor/google.golang.org/protobuf/internal/flags/proto_legacy_enable.go create mode 100644 vendor/google.golang.org/protobuf/internal/genid/any_gen.go create mode 100644 vendor/google.golang.org/protobuf/internal/genid/api_gen.go create mode 100644 vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go create mode 100644 vendor/google.golang.org/protobuf/internal/genid/doc.go create mode 100644 vendor/google.golang.org/protobuf/internal/genid/duration_gen.go create mode 100644 vendor/google.golang.org/protobuf/internal/genid/empty_gen.go create mode 100644 vendor/google.golang.org/protobuf/internal/genid/field_mask_gen.go create mode 100644 vendor/google.golang.org/protobuf/internal/genid/goname.go create mode 100644 vendor/google.golang.org/protobuf/internal/genid/map_entry.go create mode 100644 vendor/google.golang.org/protobuf/internal/genid/source_context_gen.go create mode 100644 vendor/google.golang.org/protobuf/internal/genid/struct_gen.go create mode 100644 vendor/google.golang.org/protobuf/internal/genid/timestamp_gen.go create mode 100644 vendor/google.golang.org/protobuf/internal/genid/type_gen.go create mode 100644 vendor/google.golang.org/protobuf/internal/genid/wrappers.go create mode 100644 vendor/google.golang.org/protobuf/internal/genid/wrappers_gen.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/api_export.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/checkinit.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/codec_extension.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/codec_field.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/codec_gen.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/codec_map.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/codec_map_go111.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/codec_map_go112.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/codec_message.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/codec_messageset.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/codec_reflect.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/codec_tables.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/codec_unsafe.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/convert.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/convert_list.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/convert_map.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/decode.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/encode.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/enum.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/extension.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/legacy_enum.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/legacy_export.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/legacy_extension.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/legacy_file.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/legacy_message.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/merge.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/merge_gen.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/message.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/message_reflect.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/message_reflect_field.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/message_reflect_gen.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/pointer_reflect.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/validate.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/weak.go create mode 100644 vendor/google.golang.org/protobuf/internal/order/order.go create mode 100644 vendor/google.golang.org/protobuf/internal/order/range.go create mode 100644 vendor/google.golang.org/protobuf/internal/pragma/pragma.go create mode 100644 vendor/google.golang.org/protobuf/internal/set/ints.go create mode 100644 vendor/google.golang.org/protobuf/internal/strs/strings.go create mode 100644 vendor/google.golang.org/protobuf/internal/strs/strings_pure.go create mode 100644 vendor/google.golang.org/protobuf/internal/strs/strings_unsafe.go create mode 100644 vendor/google.golang.org/protobuf/internal/version/version.go create mode 100644 vendor/google.golang.org/protobuf/proto/checkinit.go create mode 100644 vendor/google.golang.org/protobuf/proto/decode.go create mode 100644 vendor/google.golang.org/protobuf/proto/decode_gen.go create mode 100644 vendor/google.golang.org/protobuf/proto/doc.go create mode 100644 vendor/google.golang.org/protobuf/proto/encode.go create mode 100644 vendor/google.golang.org/protobuf/proto/encode_gen.go create mode 100644 vendor/google.golang.org/protobuf/proto/equal.go create mode 100644 vendor/google.golang.org/protobuf/proto/extension.go create mode 100644 vendor/google.golang.org/protobuf/proto/merge.go create mode 100644 vendor/google.golang.org/protobuf/proto/messageset.go create mode 100644 vendor/google.golang.org/protobuf/proto/proto.go create mode 100644 vendor/google.golang.org/protobuf/proto/proto_methods.go create mode 100644 vendor/google.golang.org/protobuf/proto/proto_reflect.go create mode 100644 vendor/google.golang.org/protobuf/proto/reset.go create mode 100644 vendor/google.golang.org/protobuf/proto/size.go create mode 100644 vendor/google.golang.org/protobuf/proto/size_gen.go create mode 100644 vendor/google.golang.org/protobuf/proto/wrappers.go create mode 100644 vendor/google.golang.org/protobuf/reflect/protodesc/desc.go create mode 100644 vendor/google.golang.org/protobuf/reflect/protodesc/desc_init.go create mode 100644 vendor/google.golang.org/protobuf/reflect/protodesc/desc_resolve.go create mode 100644 vendor/google.golang.org/protobuf/reflect/protodesc/desc_validate.go create mode 100644 vendor/google.golang.org/protobuf/reflect/protodesc/proto.go create mode 100644 vendor/google.golang.org/protobuf/reflect/protoreflect/methods.go create mode 100644 vendor/google.golang.org/protobuf/reflect/protoreflect/proto.go create mode 100644 vendor/google.golang.org/protobuf/reflect/protoreflect/source.go create mode 100644 vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go create mode 100644 vendor/google.golang.org/protobuf/reflect/protoreflect/type.go create mode 100644 vendor/google.golang.org/protobuf/reflect/protoreflect/value.go create mode 100644 vendor/google.golang.org/protobuf/reflect/protoreflect/value_pure.go create mode 100644 vendor/google.golang.org/protobuf/reflect/protoreflect/value_union.go create mode 100644 vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe.go create mode 100644 vendor/google.golang.org/protobuf/reflect/protoregistry/registry.go create mode 100644 vendor/google.golang.org/protobuf/runtime/protoiface/legacy.go create mode 100644 vendor/google.golang.org/protobuf/runtime/protoiface/methods.go create mode 100644 vendor/google.golang.org/protobuf/runtime/protoimpl/impl.go create mode 100644 vendor/google.golang.org/protobuf/runtime/protoimpl/version.go create mode 100644 vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go create mode 100644 vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go create mode 100644 vendor/modules.txt diff --git a/.dockerignore b/.dockerignore new file mode 100644 index 0000000..54aec5f --- /dev/null +++ b/.dockerignore @@ -0,0 +1,8 @@ +/mox +/testdata/ +/node_modules/ +/local/ +/rfc/ +/cover.* +/.go/ +/tmp/ diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..00c44fa --- /dev/null +++ b/.gitignore @@ -0,0 +1,22 @@ +/mox +/rfc/[0-9][0-9]* +/local/ +/testdata/check/ +/testdata/empty/ +/testdata/imap/data/ +/testdata/imaptest/data/ +/testdata/integration/run +/testdata/junk/*.bloom +/testdata/junk/*.db +/testdata/queue/data/ +/testdata/sent/ +/testdata/smtp/data/ +/testdata/smtp/datajunk/ +/testdata/store/data/ +/testdata/train/ +/cover.out +/cover.html +/.go/ +/node_modules/ +/package.json +/package-lock.json diff --git a/.go/empty b/.go/empty new file mode 100644 index 0000000..e69de29 diff --git a/.jshintrc b/.jshintrc new file mode 100644 index 0000000..cac66dc --- /dev/null +++ b/.jshintrc @@ -0,0 +1,12 @@ +{ + "esversion": 9, + "asi": true, + "strict": "implied", + "globals": { + "window": true, + "console": true, + "document": true, + "Node": true, + "api": true + } +} diff --git a/Dockerfile b/Dockerfile new file mode 100644 index 0000000..3cbd281 --- /dev/null +++ b/Dockerfile @@ -0,0 +1,11 @@ +FROM golang:1-alpine AS build +WORKDIR /build +RUN apk add make +COPY . . +env GOPROXY=off +RUN make build + +FROM alpine:3.17 +WORKDIR /mox +COPY --from=build /build/mox /mox/mox +CMD ["/mox/mox", "serve"] diff --git a/Dockerfile.imaptest b/Dockerfile.imaptest new file mode 100644 index 0000000..e76ac98 --- /dev/null +++ b/Dockerfile.imaptest @@ -0,0 +1,7 @@ +FROM alpine:3.17 + +RUN apk update && apk add wget build-base +WORKDIR /src +RUN wget http://dovecot.org/nightly/dovecot-latest.tar.gz && tar -zxvf dovecot-latest.tar.gz && cd dovecot-0.0.0-* && ./configure && make install && cd .. +RUN wget http://dovecot.org/nightly/imaptest/imaptest-latest.tar.gz && tar -zxvf imaptest-latest.tar.gz && cd dovecot-0.0-imaptest-0.0.0-* && ./configure --with-dovecot=$(ls -d ../dovecot-0.0.0-*) && make install +ENTRYPOINT /usr/local/bin/imaptest diff --git a/LICENSE.MIT b/LICENSE.MIT new file mode 100644 index 0000000..c528ee5 --- /dev/null +++ b/LICENSE.MIT @@ -0,0 +1,7 @@ +Copyright 2021 Mechiel Lukkien + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/LICENSE.MPLv2.0 b/LICENSE.MPLv2.0 new file mode 100644 index 0000000..ee6256c --- /dev/null +++ b/LICENSE.MPLv2.0 @@ -0,0 +1,373 @@ +Mozilla Public License Version 2.0 +================================== + +1. Definitions +-------------- + +1.1. "Contributor" + means each individual or legal entity that creates, contributes to + the creation of, or owns Covered Software. + +1.2. "Contributor Version" + means the combination of the Contributions of others (if any) used + by a Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + means Source Code Form to which the initial Contributor has attached + the notice in Exhibit A, the Executable Form of such Source Code + Form, and Modifications of such Source Code Form, in each case + including portions thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + (a) that the initial Contributor has attached the notice described + in Exhibit B to the Covered Software; or + + (b) that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the + terms of a Secondary License. + +1.6. "Executable Form" + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + means a work that combines Covered Software with other material, in + a separate file or files, that is not Covered Software. + +1.8. "License" + means this document. + +1.9. "Licensable" + means having the right to grant, to the maximum extent possible, + whether at the time of the initial grant or subsequently, any and + all of the rights conveyed by this License. + +1.10. "Modifications" + means any of the following: + + (a) any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered + Software; or + + (b) any new file in Source Code Form that contains any Covered + Software. + +1.11. "Patent Claims" of a Contributor + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the + License, by the making, using, selling, offering for sale, having + made, import, or transfer of either its Contributions or its + Contributor Version. + +1.12. "Secondary License" + means either the GNU General Public License, Version 2.0, the GNU + Lesser General Public License, Version 2.1, the GNU Affero General + Public License, Version 3.0, or any later versions of those + licenses. + +1.13. "Source Code Form" + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that + controls, is controlled by, or is under common control with You. For + purposes of this definition, "control" means (a) the power, direct + or indirect, to cause the direction or management of such entity, + whether by contract or otherwise, or (b) ownership of more than + fifty percent (50%) of the outstanding shares or beneficial + ownership of such entity. + +2. License Grants and Conditions +-------------------------------- + +2.1. Grants + +Each Contributor hereby grants You a world-wide, royalty-free, +non-exclusive license: + +(a) under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + +(b) under Patent Claims of such Contributor to make, use, sell, offer + for sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + +The licenses granted in Section 2.1 with respect to any Contribution +become effective for each Contribution on the date the Contributor first +distributes such Contribution. + +2.3. Limitations on Grant Scope + +The licenses granted in this Section 2 are the only rights granted under +this License. No additional rights or licenses will be implied from the +distribution or licensing of Covered Software under this License. +Notwithstanding Section 2.1(b) above, no patent license is granted by a +Contributor: + +(a) for any code that a Contributor has removed from Covered Software; + or + +(b) for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + +(c) under Patent Claims infringed by Covered Software in the absence of + its Contributions. + +This License does not grant any rights in the trademarks, service marks, +or logos of any Contributor (except as may be necessary to comply with +the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + +No Contributor makes additional grants as a result of Your choice to +distribute the Covered Software under a subsequent version of this +License (see Section 10.2) or under the terms of a Secondary License (if +permitted under the terms of Section 3.3). + +2.5. Representation + +Each Contributor represents that the Contributor believes its +Contributions are its original creation(s) or it has sufficient rights +to grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + +This License is not intended to limit any rights You have under +applicable copyright doctrines of fair use, fair dealing, or other +equivalents. + +2.7. Conditions + +Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted +in Section 2.1. + +3. Responsibilities +------------------- + +3.1. Distribution of Source Form + +All distribution of Covered Software in Source Code Form, including any +Modifications that You create or to which You contribute, must be under +the terms of this License. You must inform recipients that the Source +Code Form of the Covered Software is governed by the terms of this +License, and how they can obtain a copy of this License. You may not +attempt to alter or restrict the recipients' rights in the Source Code +Form. + +3.2. Distribution of Executable Form + +If You distribute Covered Software in Executable Form then: + +(a) such Covered Software must also be made available in Source Code + Form, as described in Section 3.1, and You must inform recipients of + the Executable Form how they can obtain a copy of such Source Code + Form by reasonable means in a timely manner, at a charge no more + than the cost of distribution to the recipient; and + +(b) You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter + the recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + +You may create and distribute a Larger Work under terms of Your choice, +provided that You also comply with the requirements of this License for +the Covered Software. If the Larger Work is a combination of Covered +Software with a work governed by one or more Secondary Licenses, and the +Covered Software is not Incompatible With Secondary Licenses, this +License permits You to additionally distribute such Covered Software +under the terms of such Secondary License(s), so that the recipient of +the Larger Work may, at their option, further distribute the Covered +Software under the terms of either this License or such Secondary +License(s). + +3.4. Notices + +You may not remove or alter the substance of any license notices +(including copyright notices, patent notices, disclaimers of warranty, +or limitations of liability) contained within the Source Code Form of +the Covered Software, except that You may alter any license notices to +the extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + +You may choose to offer, and to charge a fee for, warranty, support, +indemnity or liability obligations to one or more recipients of Covered +Software. However, You may do so only on Your own behalf, and not on +behalf of any Contributor. You must make it absolutely clear that any +such warranty, support, indemnity, or liability obligation is offered by +You alone, and You hereby agree to indemnify every Contributor for any +liability incurred by such Contributor as a result of warranty, support, +indemnity or liability terms You offer. You may include additional +disclaimers of warranty and limitations of liability specific to any +jurisdiction. + +4. Inability to Comply Due to Statute or Regulation +--------------------------------------------------- + +If it is impossible for You to comply with any of the terms of this +License with respect to some or all of the Covered Software due to +statute, judicial order, or regulation then You must: (a) comply with +the terms of this License to the maximum extent possible; and (b) +describe the limitations and the code they affect. Such description must +be placed in a text file included with all distributions of the Covered +Software under this License. Except to the extent prohibited by statute +or regulation, such description must be sufficiently detailed for a +recipient of ordinary skill to be able to understand it. + +5. Termination +-------------- + +5.1. The rights granted under this License will terminate automatically +if You fail to comply with any of its terms. However, if You become +compliant, then the rights granted under this License from a particular +Contributor are reinstated (a) provisionally, unless and until such +Contributor explicitly and finally terminates Your grants, and (b) on an +ongoing basis, if such Contributor fails to notify You of the +non-compliance by some reasonable means prior to 60 days after You have +come back into compliance. Moreover, Your grants from a particular +Contributor are reinstated on an ongoing basis if such Contributor +notifies You of the non-compliance by some reasonable means, this is the +first time You have received notice of non-compliance with this License +from such Contributor, and You become compliant prior to 30 days after +Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent +infringement claim (excluding declaratory judgment actions, +counter-claims, and cross-claims) alleging that a Contributor Version +directly or indirectly infringes any patent, then the rights granted to +You by any and all Contributors for the Covered Software under Section +2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all +end user license agreements (excluding distributors and resellers) which +have been validly granted by You or Your distributors under this License +prior to termination shall survive termination. + +************************************************************************ +* * +* 6. Disclaimer of Warranty * +* ------------------------- * +* * +* Covered Software is provided under this License on an "as is" * +* basis, without warranty of any kind, either expressed, implied, or * +* statutory, including, without limitation, warranties that the * +* Covered Software is free of defects, merchantable, fit for a * +* particular purpose or non-infringing. The entire risk as to the * +* quality and performance of the Covered Software is with You. * +* Should any Covered Software prove defective in any respect, You * +* (not any Contributor) assume the cost of any necessary servicing, * +* repair, or correction. This disclaimer of warranty constitutes an * +* essential part of this License. No use of any Covered Software is * +* authorized under this License except under this disclaimer. * +* * +************************************************************************ + +************************************************************************ +* * +* 7. Limitation of Liability * +* -------------------------- * +* * +* Under no circumstances and under no legal theory, whether tort * +* (including negligence), contract, or otherwise, shall any * +* Contributor, or anyone who distributes Covered Software as * +* permitted above, be liable to You for any direct, indirect, * +* special, incidental, or consequential damages of any character * +* including, without limitation, damages for lost profits, loss of * +* goodwill, work stoppage, computer failure or malfunction, or any * +* and all other commercial damages or losses, even if such party * +* shall have been informed of the possibility of such damages. This * +* limitation of liability shall not apply to liability for death or * +* personal injury resulting from such party's negligence to the * +* extent applicable law prohibits such limitation. Some * +* jurisdictions do not allow the exclusion or limitation of * +* incidental or consequential damages, so this exclusion and * +* limitation may not apply to You. * +* * +************************************************************************ + +8. Litigation +------------- + +Any litigation relating to this License may be brought only in the +courts of a jurisdiction where the defendant maintains its principal +place of business and such litigation shall be governed by laws of that +jurisdiction, without reference to its conflict-of-law provisions. +Nothing in this Section shall prevent a party's ability to bring +cross-claims or counter-claims. + +9. Miscellaneous +---------------- + +This License represents the complete agreement concerning the subject +matter hereof. If any provision of this License is held to be +unenforceable, such provision shall be reformed only to the extent +necessary to make it enforceable. Any law or regulation which provides +that the language of a contract shall be construed against the drafter +shall not be used to construe this License against a Contributor. + +10. Versions of the License +--------------------------- + +10.1. New Versions + +Mozilla Foundation is the license steward. Except as provided in Section +10.3, no one other than the license steward has the right to modify or +publish new versions of this License. Each version will be given a +distinguishing version number. + +10.2. Effect of New Versions + +You may distribute the Covered Software under the terms of the version +of the License under which You originally received the Covered Software, +or under the terms of any subsequent version published by the license +steward. + +10.3. Modified Versions + +If you create software not governed by this License, and you want to +create a new license for such software, you may create and use a +modified version of this License if you rename the license and remove +any references to the name of the license steward (except to note that +such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary +Licenses + +If You choose to distribute Source Code Form that is Incompatible With +Secondary Licenses under the terms of this version of the License, the +notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice +------------------------------------------- + + This Source Code Form is subject to the terms of the Mozilla Public + License, v. 2.0. If a copy of the MPL was not distributed with this + file, You can obtain one at https://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular +file, then You may include the notice in a location (such as a LICENSE +file in a relevant directory) where a recipient would be likely to look +for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice +--------------------------------------------------------- + + This Source Code Form is "Incompatible With Secondary Licenses", as + defined by the Mozilla Public License, v. 2.0. diff --git a/Makefile b/Makefile new file mode 100644 index 0000000..85cfb5b --- /dev/null +++ b/Makefile @@ -0,0 +1,76 @@ +default: build + +build: + # build early to catch syntax errors + CGO_ENABLED=0 go build + CGO_ENABLED=0 go vet -tags integration ./... + ./gendoc.sh + (cd http && CGO_ENABLED=0 go run ../vendor/github.com/mjl-/sherpadoc/cmd/sherpadoc/*.go -adjust-function-names none Admin) >http/adminapi.json + (cd http && CGO_ENABLED=0 go run ../vendor/github.com/mjl-/sherpadoc/cmd/sherpadoc/*.go -adjust-function-names none Account) >http/accountapi.json + # build again, files above are embedded + CGO_ENABLED=0 go build + +test: + CGO_ENABLED=0 go test -shuffle=on -coverprofile cover.out ./... + go tool cover -html=cover.out -o cover.html + +test-race: + CGO_ENABLED=1 go test -race -shuffle=on -covermode atomic -coverprofile cover.out ./... + go tool cover -html=cover.out -o cover.html + +check: + staticcheck ./... + staticcheck -tags integration + +# having "err" shadowed is common, best to not have others +check-shadow: + go vet -vettool=$$(which shadow) ./... + +fuzz: + go test -fuzz FuzzParseSignature -fuzztime 5m ./dkim + go test -fuzz FuzzParseRecord -fuzztime 5m ./dkim + go test -fuzz . -fuzztime 5m ./dmarc + go test -fuzz . -fuzztime 5m ./dmarcrpt + go test -fuzz . -parallel 1 -fuzztime 5m ./imapserver + go test -fuzz . -parallel 1 -fuzztime 5m ./junk + go test -fuzz FuzzParseRecord -fuzztime 5m ./mtasts + go test -fuzz FuzzParsePolicy -fuzztime 5m ./mtasts + go test -fuzz . -parallel 1 -fuzztime 5m ./smtpserver + go test -fuzz . -fuzztime 5m ./spf + go test -fuzz FuzzParseRecord -fuzztime 5m ./tlsrpt + go test -fuzz FuzzParseMessage -fuzztime 5m ./tlsrpt + +integration-build: + docker-compose -f docker-compose-integration.yml build --no-cache moxmail + +integration-start: + -MOX_UID=$$(id -u) MOX_GID=$$(id -g) docker-compose -f docker-compose-integration.yml run moxmail /bin/bash + MOX_UID= MOX_GID= docker-compose -f docker-compose-integration.yml down + +# run from within "make integration-start" +integration-test: + CGO_ENABLED=0 go test -tags integration + go tool cover -html=cover.out -o cover.html + +imaptest-build: + -MOX_UID=$$(id -u) MOX_GID=$$(id -g) docker-compose -f docker-compose-imaptest.yml build --no-cache mox + +imaptest-run: + -rm -r testdata/imaptest/data + mkdir testdata/imaptest/data + MOX_UID=$$(id -u) MOX_GID=$$(id -g) docker-compose -f docker-compose-imaptest.yml run --entrypoint /usr/local/bin/imaptest imaptest host=mox port=1143 user=mjl@mox.example pass=testtest mbox=imaptest.mbox + MOX_UID= MOX_GID= docker-compose -f docker-compose-imaptest.yml down + +fmt: + go fmt ./... + gofmt -w -s *.go */*.go + +jswatch: + inotifywait -m -e close_write http/admin.html http/account.html | xargs -n2 sh -c 'echo changed; ./checkhtmljs http/admin.html http/account.html' + +jsinstall: + -mkdir -p node_modules/.bin + npm install jshint@2.13.2 + +docker: + docker build -t mox:latest . diff --git a/README.md b/README.md new file mode 100644 index 0000000..20118b9 --- /dev/null +++ b/README.md @@ -0,0 +1,181 @@ +Mox - modern full-featured open source secure mail server for low-maintenance self-hosted email + +See Quickstart below to get started. + +Mox features: + +- Quick and easy to maintain mail server for your own domain through quickstart. +- SMTP for receiving and submitting email. +- IMAP4 for giving email clients access to email. +- Automatic TLS with ACME, for use with Let's Encrypt and other CA's. +- SPF, verifying that a remote host is allowed to sent email for a domain. +- DKIM, verifying that a message is signed by the claimed sender domain, + and for signing emails sent by mox for others to verify. +- DMARC, for enforcing SPF/DKIM policies set by domains. Incoming DMARC + aggregate reports are analyzed. +- Reputation tracking, learning (per user) host- and domain-based reputation from + (Non-)Junk/Non-Junk email. +- Bayesian spam filtering that learns (per user) from (Non-)Junk email. +- Greylisting of servers with no/low reputation and questionable email content. + Temporarily refused emails are available over IMAP in a special mailbox for a + short period, helping with misclassified legimate synchronous + signup/login/transactional emails. +- Internationalized email, with unicode names in domains and usernames + ("localparts"). +- TLSRPT, parsing reports about TLS usage and issues. +- MTA-STS, for ensuring TLS is used whenever it is required. Both serving of + policies, and tracking and applying policies of remote servers. +- Web admin interface that helps you set up your domains and accounts + (instructions to create DNS records, configure + SPF/DKIM/DMARC/TLSRPT/MTA-STS), for status information, managing + accounts/domains, and modifying the configuration file. +- Autodiscovery (with SRV records, Microsoft-style and Thunderbird-style) for + easy account setup (though not many clients support it). +- Prometheus metrics and structured logging for operational insight. + +Not supported (but perhaps in the future): + +- Webmail +- Functioning as SMTP relay +- HTTP-based API for sending messages and receiving delivery feedback +- Forwarding (to an external address) +- Autoresponders +- POP3 +- Delivery to (unix) OS system users +- Sieve for filtering +- PGP or S/MIME +- Mailing list manager +- Calendaring +- Support for pluggable delivery mechanisms. + +Mox has automated tests, including for interoperability with Postfix for SMTP. + +Mox is manually tested with email clients: Mozilla Thunderbird, mutt, iOS Mail, +macOS Mail, Android Mail, Microsoft Outlook. + +Mox is also manually tested to interoperate with popular cloud providers: +gmail.com, outlook.com, yahoo.com, proton.me. + +Mox is implemented in Go, a modern safe programming language, and has a focus on +security. + +Mox is available under the MIT-license. +Mox includes the Public Suffix List by Mozilla, under Mozilla Public License, v. 2.0. + +Mox was created by Mechiel Lukkien, mechiel@ueber.net. + + +# Download + +You can easily (cross) compile mox if you have a Go toolchain installed: + + go install github.com/mjl-/mox@latest + +Or you can download binaries from: + + https://beta.gobuilds.org/github.com/mjl-/mox + + +# Quickstart + +The easiest way to get started with serving email for your domain is to get a +vm/machine dedicated to serving email named ., login as an admin +user, e.g. /home/service, download mox, and generate a configuration for your +desired email address at your domain: + + ./mox quickstart you@example.com + +This creates an accounts, generates a password and configuration files, prints +the DNS records you need to manually add for your domain and prints commands to +set permissions and install as a service. + +If you already have email configured for your domain, or if you are already +sending email for your domain from other machines/services, you should modify +the suggested configuration and/or DNS records. + +A dedicated machine is convenient because modern email requires HTTPS. You can +combine mox with an existing webserver, but it requires more configuration. + +After starting, you can access the admin web interface on internal IPs. + + +# FAQ - Frequently Asked Questions + +- Why a new mail server implementation? + +Mox aims to make "running a mail server" easy and nearly effortless. Excellent +quality mail server software exists, but getting a working setup typically +requires you configure half a dozen services (SMTP, IMAP, SPF/DKIM/DMARC, spam +filtering). That seems to lead to people no longer running their own mail +servers, instead switching to one of the few centralized email providers. SMTP +is long-time distributed messaging protocol. To keep it distributed, people +need to run their own mail server. Mox aims to make that easy. + +- Where is the documentation? + +Run "mox" without arguments to list its subcommands and usage, run "mox help +" for more details. See all commands and help text at: + + https://pkg.go.dev/github.com/mjl-/mox/ + +The example configuration files annotated with comments can be helpful too. +They are printed by "mox config describe-static" and "mox config +describe-dynamic", and can be viewed at: + + https://pkg.go.dev/github.com/mjl-/mox/config/ + +Mox is still in early stages, and documentation is still limited. Please create +an issue describing what is unclear or confusing, and we'll try to improve the +documentation. + +- How do I import/export email? + +Use the "mox import maildir" or "mox import mbox" subcommands. You could also +use your IMAP email client, add your mox account, and copy or move messages +from one account to the other. + +Similarly, see the "mox export maildir" and "mox export mbox" subcommands to +export email. + +- How can I help? + +Mox needs users and testing in real-life setups! So just give it a try, send +and receive emails through it with your favourite email clients, and file an +issue if you encounter a problem or would like to see a feature/functionality +implemented. + +Instead of switching your email for your domain over to mox, you could simply +configure mox for a subdomain, e.g. @moxtest.. + +If you have experience with how the email protocols are used in the wild, e.g. +compatibility issues, limitations, anti-spam measures, specification +violations, that would be interesting to hear about. + +Pull requests for bug fixes and new code are welcome too. If the changes are +large, it helps to start a discussion (create a ticket) before doing all the +work. + +- How do I change my password? + +Regular users (doing IMAP/SMTP with authentication) can change their password +at the account page, e.g. http://127.0.0.1/account/. Or you can set a password +with "mox setaccountpassword". + +The admin password can be changed with "mox setadminpassword". + +- How do I configure a second mox instance as a backup MX? + +Unfortunately, mox does not yet provide an option for that. Mox does spam +filtering based on reputation of received messages. It will take a good amount +of work to share that information with a backup MX. Without that information, +spammer could use a backup MX to get their spam accepted. Until mox has a +proper solution, you can simply run a single SMTP server. + +- How secure is mox? + +Security is high on the priorit list for mox. Mox is young, so don't expect no +bugs at all. Mox does have automated tests for some security aspects, e.g. for +login, and uses fuzzing. Mox is written in Go, so some classes of bugs such as +buffer mishandling do not typically result in privilege escalation. Of course +logic bugs will still exist. If you find any security issues, please email them +to mechiel@ueber.net. diff --git a/autotls/autotls.go b/autotls/autotls.go new file mode 100644 index 0000000..435e604 --- /dev/null +++ b/autotls/autotls.go @@ -0,0 +1,279 @@ +// Package autotls automatically configures TLS (for SMTP, IMAP, HTTP) by +// requesting certificates with ACME, typically from Let's Encrypt. +package autotls + +// We only do tls-alpn-01. For http-01, we would have to start another +// listener. For DNS we would need a third party tool with an API that can make +// the DNS changes, as we don't want to link in dozens of bespoke API's for DNS +// record manipulation into mox. We can do http-01 relatively easily. It could +// be useful to not depend on a single mechanism. + +import ( + "bytes" + "context" + "crypto" + "crypto/ecdsa" + "crypto/elliptic" + cryptorand "crypto/rand" + "crypto/rsa" + "crypto/tls" + "crypto/x509" + "encoding/pem" + "errors" + "fmt" + "io" + "os" + "path/filepath" + "strings" + "sync" + + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" + "golang.org/x/crypto/acme" + "golang.org/x/crypto/acme/autocert" + + "github.com/mjl-/mox/dns" + "github.com/mjl-/mox/mlog" + "github.com/mjl-/mox/moxvar" +) + +var xlog = mlog.New("autotls") + +var ( + metricCertput = promauto.NewCounter( + prometheus.CounterOpts{ + Name: "mox_autotls_certput_total", + Help: "Number of certificate store puts.", + }, + ) +) + +// Manager is in charge of a single ACME identity, and automatically requests +// certificates for allowlisted hosts. +type Manager struct { + ACMETLSConfig *tls.Config // For serving HTTPS on port 443, which is required for certificate requests to succeed. + TLSConfig *tls.Config // For all TLS servers not used for validating ACME requests. Like SMTP and HTTPS on ports other than 443. + Manager *autocert.Manager + + shutdown <-chan struct{} + + sync.Mutex + hosts map[dns.Domain]struct{} +} + +// Load returns an initialized autotls manager for "name" (used for the ACME key +// file and requested certs and their keys). All files are stored within acmeDir. +// contactEmail must be a valid email address to which notifications about ACME can +// be sent. directoryURL is the ACME starting point. When shutdown is closed, no +// new TLS connections can be created. +func Load(name, acmeDir, contactEmail, directoryURL string, shutdown <-chan struct{}) (*Manager, error) { + if directoryURL == "" { + return nil, fmt.Errorf("empty ACME directory URL") + } + if contactEmail == "" { + return nil, fmt.Errorf("empty contact email") + } + + // Load identity key if it exists. Otherwise, create a new key. + p := filepath.Join(acmeDir + "/" + name + ".key") + var key crypto.Signer + f, err := os.Open(p) + if f != nil { + defer f.Close() + } + if err != nil && os.IsNotExist(err) { + key, err = ecdsa.GenerateKey(elliptic.P256(), cryptorand.Reader) + if err != nil { + return nil, fmt.Errorf("generating ecdsa identity key: %s", err) + } + der, err := x509.MarshalPKCS8PrivateKey(key) + if err != nil { + return nil, fmt.Errorf("marshal identity key: %s", err) + } + block := &pem.Block{ + Type: "PRIVATE KEY", + Headers: map[string]string{ + "Note": fmt.Sprintf("PEM PKCS8 ECDSA private key generated for ACME provider %s by mox", name), + }, + Bytes: der, + } + b := &bytes.Buffer{} + if err := pem.Encode(b, block); err != nil { + return nil, fmt.Errorf("pem encode: %s", err) + } else if err := os.WriteFile(p, b.Bytes(), 0660); err != nil { + return nil, fmt.Errorf("writing identity key: %s", err) + } + } else if err != nil { + return nil, fmt.Errorf("open identity key file: %s", err) + } else { + var privKey any + if buf, err := io.ReadAll(f); err != nil { + return nil, fmt.Errorf("reading identity key: %s", err) + } else if p, _ := pem.Decode(buf); p == nil { + return nil, fmt.Errorf("no pem data") + } else if p.Type != "PRIVATE KEY" { + return nil, fmt.Errorf("got PEM block %q, expected \"PRIVATE KEY\"", p.Type) + } else if privKey, err = x509.ParsePKCS8PrivateKey(p.Bytes); err != nil { + return nil, fmt.Errorf("parsing PKCS8 private key: %s", err) + } + switch k := privKey.(type) { + case *ecdsa.PrivateKey: + key = k + case *rsa.PrivateKey: + key = k + default: + return nil, fmt.Errorf("unsupported private key type %T", key) + } + } + + m := &autocert.Manager{ + Cache: dirCache(acmeDir + "/keycerts/" + name), + Prompt: autocert.AcceptTOS, + Email: contactEmail, + Client: &acme.Client{ + DirectoryURL: directoryURL, + Key: key, + UserAgent: "mox/" + moxvar.Version, + }, + // HostPolicy set below. + } + + loggingGetCertificate := func(hello *tls.ClientHelloInfo) (*tls.Certificate, error) { + log := xlog.WithContext(hello.Context()) + + // Handle missing SNI to prevent logging an error below. + // At startup, during config initialization, we already adjust the tls config to + // inject the listener hostname if there isn't one in the TLS client hello. This is + // common for SMTP STARTTLS connections, which often do not care about the + // validation of the certificate. + if hello.ServerName == "" { + log.Debug("tls request without sni servername, rejecting") + return nil, fmt.Errorf("sni server name required") + } + + cert, err := m.GetCertificate(hello) + if err != nil { + if errors.Is(err, errHostNotAllowed) { + log.Debugx("requesting certificate", err, mlog.Field("host", hello.ServerName)) + } else { + log.Errorx("requesting certificate", err, mlog.Field("host", hello.ServerName)) + } + } + return cert, err + } + + acmeTLSConfig := *m.TLSConfig() + acmeTLSConfig.GetCertificate = loggingGetCertificate + + tlsConfig := tls.Config{ + GetCertificate: loggingGetCertificate, + } + + a := &Manager{ + ACMETLSConfig: &acmeTLSConfig, + TLSConfig: &tlsConfig, + Manager: m, + shutdown: shutdown, + hosts: map[dns.Domain]struct{}{}, + } + m.HostPolicy = a.HostPolicy + return a, nil +} + +// AllowHostname adds hostname for use with ACME. +func (m *Manager) AllowHostname(hostname dns.Domain) { + m.Lock() + defer m.Unlock() + xlog.Debug("autotls add hostname", mlog.Field("hostname", hostname)) + m.hosts[hostname] = struct{}{} +} + +// Hostnames returns the allowed host names for use with ACME. +func (m *Manager) Hostnames() []dns.Domain { + m.Lock() + defer m.Unlock() + var l []dns.Domain + for h := range m.hosts { + l = append(l, h) + } + return l +} + +var errHostNotAllowed = errors.New("autotls: host not in allowlist") + +// HostPolicy decides if a host is allowed for use with ACME, i.e. whether a +// certificate will be returned if present and/or will be requested if not yet +// present. Only hosts added with AllowHostname are allowed. During shutdown, no +// new connections are allowed. +func (m *Manager) HostPolicy(ctx context.Context, host string) (rerr error) { + log := xlog.WithContext(ctx) + defer func() { + log.WithContext(ctx).Debugx("autotls hostpolicy result", rerr, mlog.Field("host", host)) + }() + + // Don't request new TLS certs when we are shutting down. + select { + case <-m.shutdown: + return fmt.Errorf("shutting down") + default: + } + + d, err := dns.ParseDomain(host) + if err != nil { + return fmt.Errorf("invalid host: %v", err) + } + + m.Lock() + defer m.Unlock() + if _, ok := m.hosts[d]; !ok { + return fmt.Errorf("%w: %q", errHostNotAllowed, d) + } + return nil +} + +type dirCache autocert.DirCache + +func (d dirCache) Delete(ctx context.Context, name string) (rerr error) { + log := xlog.WithContext(ctx) + defer func() { + log.Debugx("dircache delete result", rerr, mlog.Field("name", name)) + }() + err := autocert.DirCache(d).Delete(ctx, name) + if err != nil { + log.Errorx("deleting cert from dir cache", err, mlog.Field("name", name)) + } else if !strings.HasSuffix(name, "+token") { + log.Info("autotls cert delete", mlog.Field("name", name)) + } + return err +} + +func (d dirCache) Get(ctx context.Context, name string) (rbuf []byte, rerr error) { + log := xlog.WithContext(ctx) + defer func() { + log.Debugx("dircache get result", rerr, mlog.Field("name", name)) + }() + buf, err := autocert.DirCache(d).Get(ctx, name) + if err != nil && errors.Is(err, autocert.ErrCacheMiss) { + log.Infox("getting cert from dir cache", err, mlog.Field("name", name)) + } else if err != nil { + log.Errorx("getting cert from dir cache", err, mlog.Field("name", name)) + } else if !strings.HasSuffix(name, "+token") { + log.Debug("autotls cert get", mlog.Field("name", name)) + } + return buf, err +} + +func (d dirCache) Put(ctx context.Context, name string, data []byte) (rerr error) { + log := xlog.WithContext(ctx) + defer func() { + log.Debugx("dircache put result", rerr, mlog.Field("name", name)) + }() + metricCertput.Inc() + err := autocert.DirCache(d).Put(ctx, name, data) + if err != nil { + log.Errorx("storing cert in dir cache", err, mlog.Field("name", name)) + } else if !strings.HasSuffix(name, "+token") { + log.Info("autotls cert store", mlog.Field("name", name)) + } + return err +} diff --git a/autotls/autotls_test.go b/autotls/autotls_test.go new file mode 100644 index 0000000..9b86245 --- /dev/null +++ b/autotls/autotls_test.go @@ -0,0 +1,97 @@ +package autotls + +import ( + "context" + "errors" + "os" + "reflect" + "testing" + + "golang.org/x/crypto/acme/autocert" + + "github.com/mjl-/mox/dns" +) + +func TestAutotls(t *testing.T) { + os.RemoveAll("../testdata/autotls") + os.MkdirAll("../testdata/autotls", 0770) + + shutdown := make(chan struct{}) + m, err := Load("test", "../testdata/autotls", "mox@localhost", "https://localhost/", shutdown) + if err != nil { + t.Fatalf("load manager: %v", err) + } + l := m.Hostnames() + if len(l) != 0 { + t.Fatalf("hostnames, got %v, expected empty list", l) + } + if err := m.HostPolicy(context.Background(), "mox.example"); err == nil || !errors.Is(err, errHostNotAllowed) { + t.Fatalf("hostpolicy, got err %v, expected errHostNotAllowed", err) + } + m.AllowHostname(dns.Domain{ASCII: "mox.example"}) + l = m.Hostnames() + if !reflect.DeepEqual(l, []dns.Domain{{ASCII: "mox.example"}}) { + t.Fatalf("hostnames, got %v, expected single mox.example", l) + } + if err := m.HostPolicy(context.Background(), "mox.example"); err != nil { + t.Fatalf("hostpolicy, got err %v, expected no error", err) + } + if err := m.HostPolicy(context.Background(), "other.mox.example"); err == nil || !errors.Is(err, errHostNotAllowed) { + t.Fatalf("hostpolicy, got err %v, expected errHostNotAllowed", err) + } + + ctx := context.Background() + cache := m.Manager.Cache + if _, err := cache.Get(ctx, "mox.example"); err == nil || !errors.Is(err, autocert.ErrCacheMiss) { + t.Fatalf("cache get for absent entry: got err %v, expected autocert.ErrCacheMiss", err) + } + if err := cache.Put(ctx, "mox.example", []byte("test")); err != nil { + t.Fatalf("cache put for absent entry: got err %v, expected error", err) + } + if data, err := cache.Get(ctx, "mox.example"); err != nil || string(data) != "test" { + t.Fatalf("cache get: got err %v data %q, expected nil, 'test'", err, data) + } + if err := cache.Put(ctx, "mox.example", []byte("test2")); err != nil { + t.Fatalf("cache put for absent entry: got err %v, expected error", err) + } + if data, err := cache.Get(ctx, "mox.example"); err != nil || string(data) != "test2" { + t.Fatalf("cache get: got err %v data %q, expected nil, 'test2'", err, data) + } + if err := cache.Delete(ctx, "mox.example"); err != nil { + t.Fatalf("cache delete: got err %v, expected no error", err) + } + if _, err := cache.Get(ctx, "mox.example"); err == nil || !errors.Is(err, autocert.ErrCacheMiss) { + t.Fatalf("cache get for absent entry: got err %v, expected autocert.ErrCacheMiss", err) + } + + close(shutdown) + if err := m.HostPolicy(context.Background(), "mox.example"); err == nil { + t.Fatalf("hostpolicy, got err %v, expected error due to shutdown", err) + } + + key0 := m.Manager.Client.Key + + m, err = Load("test", "../testdata/autotls", "mox@localhost", "https://localhost/", shutdown) + if err != nil { + t.Fatalf("load manager again: %v", err) + } + if !reflect.DeepEqual(m.Manager.Client.Key, key0) { + t.Fatalf("private key changed after reload") + } + m.shutdown = make(chan struct{}) + m.AllowHostname(dns.Domain{ASCII: "mox.example"}) + if err := m.HostPolicy(context.Background(), "mox.example"); err != nil { + t.Fatalf("hostpolicy, got err %v, expected no error", err) + } + + m2, err := Load("test2", "../testdata/autotls", "mox@localhost", "https://localhost/", shutdown) + if err != nil { + t.Fatalf("load another manager: %v", err) + } + if reflect.DeepEqual(m.Manager.Client.Key, m2.Manager.Client.Key) { + t.Fatalf("private key reused between managers") + } + + // Only remove in case of success. + os.RemoveAll("../testdata/autotls") +} diff --git a/checkhtmljs b/checkhtmljs new file mode 100755 index 0000000..0e26d24 --- /dev/null +++ b/checkhtmljs @@ -0,0 +1,2 @@ +#!/bin/sh +exec ./node_modules/.bin/jshint --extract always $@ | fixjshintlines diff --git a/compatibility.txt b/compatibility.txt new file mode 100644 index 0000000..bcfc27e --- /dev/null +++ b/compatibility.txt @@ -0,0 +1,3 @@ +Known compatibility issues. + +- Autodiscovery with Microsoft Outlook (on macOS): Outlook appears to use a Microsoft service to fetch the configuration, instead of connecting directly. Their service makes an invalid TLS handshake (an SNI name with a trailing dot), which is rejected by the Go crypto/tls library. (2023-01) diff --git a/config/config.go b/config/config.go new file mode 100644 index 0000000..a64a271 --- /dev/null +++ b/config/config.go @@ -0,0 +1,245 @@ +package config + +import ( + "crypto" + "crypto/tls" + "crypto/x509" + "net" + "regexp" + "time" + + "github.com/mjl-/mox/autotls" + "github.com/mjl-/mox/dns" + "github.com/mjl-/mox/junk" + "github.com/mjl-/mox/mtasts" + "github.com/mjl-/mox/smtp" +) + +// todo: better default values, so less has to be specified in the config file. junkfilter and rejects mailbox should be enabled by default. other features as well possibly. + +// Port returns port if non-zero, and fallback otherwise. +func Port(port, fallback int) int { + if port == 0 { + return fallback + } + return port +} + +// Static is a parsed form of the mox.conf configuration file, before converting it +// into a mox.Config after additional processing. +type Static struct { + DataDir string `sconf-doc:"Directory where all data is stored, e.g. queue, accounts and messages, ACME TLS certs/keys. If this is a relative path, it is relative to the directory of mox.conf."` + LogLevel string `sconf-doc:"Default log level, one of: error, info, debug, trace. Trace logs full SMTP and IMAP protocol transcripts, which can be a large amount of data."` + PackageLogLevels map[string]string `sconf:"optional" sconf-doc:"Overrides of log level per package (e.g. queue, smtpclient, smtpserver, imapserver, spf, dkim, dmarc, dmarcdb, autotls, junk, mtasts, tlsrpt)."` + Hostname string `sconf-doc:"Full hostname of system, e.g. mail."` + HostnameDomain dns.Domain `sconf:"-" json:"-"` // Parsed form of hostname. + CheckUpdates bool `sconf:"optional" sconf-doc:"If enabled, a single DNS TXT lookup of _updates.xmox.nl is done every 24h to check for a new release. Each time a new release is found, a changelog is fetched from https://updates.xmox.nl and delivered to the postmaster mailbox."` + TLS struct { + CA *struct { + AdditionalToSystem bool `sconf:"optional"` + CertFiles []string `sconf:"optional"` + } `sconf:"optional"` + CertPool *x509.CertPool `sconf:"-" json:"-"` + } `sconf:"optional" sconf-doc:"Global TLS configuration, e.g. for additional Certificate Authorities."` + ACME map[string]ACME `sconf:"optional" sconf-doc:"Automatic TLS configuration with ACME, e.g. through Let's Encrypt. The key is a name referenced in TLS configs, e.g. letsencrypt."` + AdminPasswordFile string `sconf:"optional" sconf-doc:"File containing hash of admin password, for authentication in the web admin pages (if enabled)."` + Listeners map[string]Listener `sconf-doc:"Listeners are groups of IP addresses and services enabled on those IP addresses, such as SMTP/IMAP or internal endpoints for administration or Prometheus metrics. All listeners with SMTP/IMAP services enabled will serve all configured domains."` + Postmaster struct { + Account string + Mailbox string `sconf-doc:"E.g. Postmaster or Inbox."` + } `sconf-doc:"Destination for emails delivered to postmaster address."` + DefaultMailboxes []string `sconf:"optional" sconf-doc:"Mailboxes to create when adding an account. Inbox is always created. If no mailboxes are specified, the following are automatically created: Sent, Archive, Trash, Drafts and Junk."` + + // All IPs that were explicitly listen on for external SMTP. Only set when there + // are no unspecified external SMTP listeners and there is at most 1 for IPv4 and + // at most one for IPv6. Used for setting the local address when making outgoing + // connections. Those IPs are assumed to be in an SPF record for the domain, + // potentially unlike other IPs on the machine. If there is only one address + // family, outgoing connections with the other address family are still made if + // possible. + SpecifiedSMTPListenIPs []net.IP `sconf:"-" json:"-"` +} + +// Dynamic is the parsed form of domains.conf, and is automatically reloaded when changed. +type Dynamic struct { + Domains map[string]Domain `sconf-doc:"Domains for which email is accepted. For internationalized domains, use their IDNA names in UTF-8."` + Accounts map[string]Account `sconf-doc:"Accounts to which email can be delivered. An account can accept email for multiple domains, for multiple localparts, and deliver to multiple mailboxes."` +} + +type ACME struct { + DirectoryURL string `sconf-doc:"For letsencrypt, use https://acme-v02.api.letsencrypt.org/directory."` + RenewBefore time.Duration `sconf:"optional" sconf-doc:"How long before expiration to renew the certificate. Default is 30 days."` + ContactEmail string `sconf-doc:"Email address to register at ACME provider. The provider can email you when certificates are about to expire. If you configure an address for which email is delivered by this server, keep in mind that TLS misconfigurations could result in such notification emails not arriving."` + + Manager *autotls.Manager `sconf:"-" json:"-"` +} + +type Listener struct { + IPs []string `sconf-doc:"Use 0.0.0.0 to listen on all IPv4 and/or :: to listen on all IPv6 addresses."` + Hostname string `sconf:"optional" sconf-doc:"If empty, the config global Hostname is used."` + HostnameDomain dns.Domain `sconf:"-" json:"-"` // Set when parsing config. + + TLS *TLS `sconf:"optional" sconf-doc:"For SMTP/IMAP STARTTLS, direct TLS and HTTPS connections."` + SMTPMaxMessageSize int64 `sconf:"optional" sconf-doc:"Maximum size in bytes accepted incoming and outgoing messages. Default is 100MB."` + SMTP struct { + Enabled bool + Port int `sconf:"optional" sconf-doc:"Default 25."` + NoSTARTTLS bool `sconf:"optional" sconf-doc:"Do not offer STARTTLS to secure the connection. Not recommended."` + RequireSTARTTLS bool `sconf:"optional" sconf-doc:"Do not accept incoming messages if STARTTLS is not active. Can be used in combination with a strict MTA-STS policy. A remote SMTP server may not support TLS and may not be able to deliver messages."` + DNSBLs []string `sconf:"optional" sconf-doc:"Addresses of DNS block lists for incoming messages. Block lists are only consulted for connections/messages without enough reputation to make an accept/reject decision. This prevents sending IPs of all communications to the block list provider. If any of the listed DNSBLs contains a requested IP address, the message is rejected as spam. The DNSBLs are checked for healthiness before use, at most once per 4 hours. Example DNSBLs: sbl.spamhaus.org, bl.spamcop.net"` + DNSBLZones []dns.Domain `sconf:"-"` + } `sconf:"optional"` + Submission struct { + Enabled bool + Port int `sconf:"optional" sconf-doc:"Default 587."` + NoRequireSTARTTLS bool `sconf:"optional" sconf-doc:"Do not require STARTTLS. Since users must login, this means password may be sent without encryption. Not recommended."` + } `sconf:"optional" sconf-doc:"SMTP for submitting email, e.g. by email applications. Starts out in plain text, can be upgraded to TLS with the STARTTLS command. Prefer using Submissions which is always a TLS connection."` + Submissions struct { + Enabled bool + Port int `sconf:"optional" sconf-doc:"Default 465."` + } `sconf:"optional" sconf-doc:"SMTP over TLS for submitting email, by email applications. Requires a TLS config."` + IMAP struct { + Enabled bool + Port int `sconf:"optional" sconf-doc:"Default 143."` + NoRequireSTARTTLS bool `sconf:"optional" sconf-doc:"Enable this only when the connection is otherwise encrypted (e.g. through a VPN)."` + } `sconf:"optional" sconf-doc:"IMAP for reading email, by email applications. Starts out in plain text, can be upgraded to TLS with the STARTTLS command. Prefer using IMAPS instead which is always a TLS connection."` + IMAPS struct { + Enabled bool + Port int `sconf:"optional" sconf-doc:"Default 993."` + } `sconf:"optional" sconf-doc:"IMAP over TLS for reading email, by email applications. Requires a TLS config."` + AdminHTTP struct { + Enabled bool + Port int `sconf:"optional" sconf-doc:"Default 80."` + } `sconf:"optional" sconf-doc:"Admin web interface, for administrators and regular users wanting to change their password."` + AdminHTTPS struct { + Enabled bool + Port int `sconf:"optional" sconf-doc:"Default 443."` + } `sconf:"optional" sconf-doc:"Admin web interface listener for HTTPS. Requires a TLS config."` + MetricsHTTP struct { + Enabled bool + Port int `sconf:"optional" sconf-doc:"Default 8010."` + } `sconf:"optional" sconf-doc:"Serve prometheus metrics, for monitoring. You should not enable this on a public IP."` + PprofHTTP struct { + Enabled bool + Port int `sconf:"optional" sconf-doc:"Default 8011."` + } `sconf:"optional" sconf-doc:"Serve /debug/pprof/ for profiling a running mox instance. Do not enable this on a public IP!"` + AutoconfigHTTPS struct { + Enabled bool + } `sconf:"optional" sconf-doc:"Serve autoconfiguration/autodiscovery to simplify configuring email applications, will use port 443. Requires a TLS config."` + MTASTSHTTPS struct { + Enabled bool + } `sconf:"optional" sconf-doc:"Serve MTA-STS policies describing SMTP TLS requirements, will use port 443. Requires a TLS config."` +} + +type Domain struct { + Description string `sconf:"optional" sconf-doc:"Free-form description of domain."` + LocalpartCatchallSeparator string `sconf:"optional" sconf-doc:"If not empty, only the string before the separator is used to for email delivery decisions. For example, if set to \"+\", you+anything@example.com will be delivered to you@example.com."` + LocalpartCaseSensitive bool `sconf:"optional" sconf-doc:"If set, upper/lower case is relevant for email delivery."` + DKIM DKIM `sconf:"optional" sconf-doc:"With DKIM signing, a domain is taking responsibility for (content of) emails it sends, letting receiving mail servers build up a (hopefully positive) reputation of the domain, which can help with mail delivery."` + DMARC *DMARC `sconf:"optional" sconf-doc:"With DMARC, a domain publishes, in DNS, a policy on how other mail servers should handle incoming messages with the From-header matching this domain and/or subdomain (depending on the configured alignment). Receiving mail servers use this to build up a reputation of this domain, which can help with mail delivery. A domain can also publish an email address to which reports about DMARC verification results can be sent by verifying mail servers, useful for monitoring. Incoming DMARC reports are automatically parsed, validated, added to metrics and stored in the reporting database for later display in the admin web pages."` + MTASTS *MTASTS `sconf:"optional" sconf-doc:"With MTA-STS a domain publishes, in DNS, presence of a policy for using/requiring TLS for SMTP connections. The policy is served over HTTPS."` + TLSRPT *TLSRPT `sconf:"optional" sconf-doc:"With TLSRPT a domain specifies in DNS where reports about encountered SMTP TLS behaviour should be sent. Useful for monitoring. Incoming TLS reports are automatically parsed, validated, added to metrics and stored in the reporting database for later display in the admin web pages."` + + Domain dns.Domain `sconf:"-" json:"-"` +} + +type DMARC struct { + Localpart string `sconf-doc:"Address-part before the @ that accepts DMARC reports. Must be non-internationalized. Recommended value: dmarc-reports."` + Account string `sconf-doc:"Account to deliver to."` + Mailbox string `sconf-doc:"Mailbox to deliver to, e.g. DMARC."` + + ParsedLocalpart smtp.Localpart `sconf:"-"` +} + +type MTASTS struct { + PolicyID string `sconf-doc:"Policies are versioned. The version must be specified in the DNS record. If you change a policy, first change it in mox, then update the DNS record."` + Mode mtasts.Mode `sconf-doc:"testing, enforce or none. If set to enforce, a remote SMTP server will not deliver email to us if it cannot make a TLS connection."` + MaxAge time.Duration `sconf-doc:"How long a remote mail server is allowed to cache a policy. Typically 1 or several weeks."` + MX []string `sconf:"optional" sconf-doc:"List of server names allowed for SMTP. If empty, the configured hostname is set. Host names can contain a wildcard (*) as a leading label (matching a single label, e.g. *.example matches host.example, not sub.host.example)."` + // todo: parse mx as valid mtasts.Policy.MX, with dns.ParseDomain but taking wildcard into account +} + +type TLSRPT struct { + Localpart string `sconf-doc:"Address-part before the @ that accepts TLSRPT reports. Recommended value: tls-reports."` + Account string `sconf-doc:"Account to deliver to."` + Mailbox string `sconf-doc:"Mailbox to deliver to, e.g. TLSRPT."` + + ParsedLocalpart smtp.Localpart `sconf:"-"` +} + +type Selector struct { + Hash string `sconf:"optional" sconf-doc:"sha256 (default) or (older, not recommended) sha1"` + HashEffective string `sconf:"-"` + Canonicalization struct { + HeaderRelaxed bool `sconf-doc:"If set, some modifications to the headers (mostly whitespace) are allowed."` + BodyRelaxed bool `sconf-doc:"If set, some whitespace modifications to the message body are allowed."` + } `sconf:"optional"` + Headers []string `sconf:"optional" sconf-doc:"Headers to sign with DKIM. If empty, a reasonable default set of headers is selected."` + HeadersEffective []string `sconf:"-"` + DontSealHeaders bool `sconf:"optional" sconf-doc:"If set, don't prevent duplicate headers from being added. Not recommended."` + Expiration string `sconf:"optional" sconf-doc:"Period a signature is valid after signing, as duration, e.g. 72h. The period should be enough for delivery at the final destination, potentially with several hops/relays. In the order of days at least."` + PrivateKeyFile string `sconf-doc:"Either an RSA or ed25519 private key file in PKCS8 PEM form."` + + ExpirationSeconds int `sconf:"-" json:"-"` // Parsed from Expiration. + Key crypto.Signer `sconf:"-" json:"-"` // As parsed with x509.ParsePKCS8PrivateKey. + Domain dns.Domain `sconf:"-" json:"-"` // Of selector only, not FQDN. +} + +type DKIM struct { + Selectors map[string]Selector `sconf-doc:"Emails can be DKIM signed. Config parameters are per selector. A DNS record must be created for each selector. Add the name to Sign to use the selector for signing messages."` + Sign []string `sconf:"optional" sconf-doc:"List of selectors that emails will be signed with."` +} + +type Account struct { + Domain string `sconf-doc:"Default domain for addresses specified in Destinations. An address can specify a domain override."` + Description string `sconf:"optional" sconf-doc:"Free form description, e.g. full name or alternative contact info."` + Destinations map[string]Destination `sconf-doc:"Destinations, specified as (encoded) localpart for Domain, or a full address including domain override."` + SubjectPass struct { + Period time.Duration `sconf-doc:"How long unique values are accepted after generating, e.g. 12h."` // todo: have a reasonable default for this? + } `sconf:"optional" sconf-doc:"If configured, messages classified as weakly spam are rejected with instructions to retry delivery, but this time with a signed token added to the subject. During the next delivery attempt, the signed token will bypass the spam filter. Messages with a clear spam signal, such as a known bad reputation, are rejected/delayed without a signed token."` + RejectsMailbox string `sconf:"optional" sconf-doc:"Mail that looks like spam will be rejected, but a copy can be stored temporarily in a mailbox, e.g. Rejects. If mail isn't coming in when you expect, you can look there. The mail still isn't accepted, so the remote mail server may retry (hopefully, if legitimate), or give up (hopefully, if indeed a spammer)."` + JunkFilter *JunkFilter `sconf:"optional" sconf-doc:"Content-based filtering, using the junk-status of individual messages to rank words in such messages as spam or ham. It is recommended you always set the applicable (non)-junk status on messages, and that you do not empty your Trash because those messages contain valuable ham/spam training information."` // todo: sane defaults for junkfilter + + DNSDomain dns.Domain `sconf:"-"` // Parsed form of Domain. +} + +type JunkFilter struct { + Threshold float64 `sconf-doc:"Approximate spaminess score between 0 and 1 above which emails are rejected as spam. Each delivery attempt adds a little noise to make it slightly harder for spammers to identify words that strongly indicate non-spaminess and use it to bypass the filter. E.g. 0.95."` + junk.Params +} + +type Destination struct { + Mailbox string `sconf:"optional" sconf-doc:"Mailbox to deliver to if none of Rulesets match. Default: Inbox."` + Rulesets []Ruleset `sconf:"optional" sconf-doc:"Delivery rules based on message and SMTP transaction. You may want to match each mailing list by SMTP MailFrom address, VerifiedDomain and/or List-ID header (typically if the list address is listname@example.org), delivering them to their own mailbox."` + + DMARCReports bool `sconf:"-" json:"-"` + TLSReports bool `sconf:"-" json:"-"` +} + +type Ruleset struct { + SMTPMailFromRegexp string `sconf:"optional" sconf-doc:"Matches if this regular expression matches (a substring of) the SMTP MAIL FROM address (not the message From-header). E.g. user@example.org."` + VerifiedDomain string `sconf:"optional" sconf-doc:"Matches if this domain or a subdomain matches a SPF- and/or DKIM-verified domain."` + HeadersRegexp map[string]string `sconf:"optional" sconf-doc:"Matches if these header field/value regular expressions all match (substrings of) the message headers. Header fields and valuees are converted to lower case before matching. Whitespace is trimmed from the value before matching. A header field can occur multiple times in a message, only one instance has to match."` + // todo: add a SMTPRcptTo check, and MessageFrom that works on a properly parsed From header. + + ListAllowDomain string `sconf:"optional" sconf-doc:"Influence the spam filtering, this does not change whether this ruleset applies to a message. If this domain matches an SPF- and/or DKIM-verified (sub)domain, the message is accepted without further spam checks, such as a junk filter or DMARC reject evaluation. DMARC rejects should not apply for mailing lists that are not configured to rewrite the From-header of messages that don't have a passing DKIM signature of the From-domain. Otherwise, by rejecting messages, you may be automatically unsubscribed from the mailing list."` + + Mailbox string `sconf-doc:"Mailbox to deliver to if Rules match."` + + SMTPMailFromRegexpCompiled *regexp.Regexp `sconf:"-" json:"-"` + VerifiedDNSDomain dns.Domain `sconf:"-"` + HeadersRegexpCompiled [][2]*regexp.Regexp `sconf:"-" json:"-"` + ListAllowDNSDomain dns.Domain `sconf:"-"` +} + +type TLS struct { + ACME string `sconf:"optional" sconf-doc:"Name of provider from top-level configuration to use for ACME, e.g. letsencrypt."` + KeyCerts []struct { + CertFile string `sconf-doc:"Certificate including intermediate CA certificates, in PEM format."` + KeyFile string `sconf-doc:"Private key for certificate, in PEM format. PKCS8 is recommended, but PKCS1 and EC private keys are recognized as well."` + } `sconf:"optional"` + MinVersion string `sconf:"optional" sconf-doc:"Minimum TLS version. Default: TLSv1.2."` + + Config *tls.Config `sconf:"-" json:"-"` + ACMEConfig *tls.Config `sconf:"-" json:"-"` +} diff --git a/config/doc.go b/config/doc.go new file mode 100644 index 0000000..2c33f7c --- /dev/null +++ b/config/doc.go @@ -0,0 +1,465 @@ +/* +Package config holds the configuration file definitions for mox.conf (Static) +and domains.conf (Dynamic). + +Annotated empty/default configuration files you could use as a starting point +for your mox.conf and domains.conf, as generated by "mox config +describe-static" and "mox config describe-domains": + +# mox.conf + + # Directory where all data is stored, e.g. queue, accounts and messages, ACME TLS + # certs/keys. If this is a relative path, it is relative to the directory of + # mox.conf. + DataDir: + + # Default log level, one of: error, info, debug, trace. Trace logs full SMTP and + # IMAP protocol transcripts, which can be a large amount of data. + LogLevel: + + # Overrides of log level per package (e.g. queue, smtpclient, smtpserver, + # imapserver, spf, dkim, dmarc, dmarcdb, autotls, junk, mtasts, tlsrpt). + # (optional) + PackageLogLevels: + x: + + # Full hostname of system, e.g. mail. + Hostname: + + # If enabled, a single DNS TXT lookup of _updates.xmox.nl is done every 24h to + # check for a new release. Each time a new release is found, a changelog is + # fetched from https://updates.xmox.nl and delivered to the postmaster mailbox. + # (optional) + CheckUpdates: false + + # Global TLS configuration, e.g. for additional Certificate Authorities. + # (optional) + TLS: + + # (optional) + CA: + + # (optional) + AdditionalToSystem: false + + # (optional) + CertFiles: + - + + # Automatic TLS configuration with ACME, e.g. through Let's Encrypt. The key is a + # name referenced in TLS configs, e.g. letsencrypt. (optional) + ACME: + x: + + # For letsencrypt, use https://acme-v02.api.letsencrypt.org/directory. + DirectoryURL: + + # How long before expiration to renew the certificate. Default is 30 days. + # (optional) + RenewBefore: 0s + + # Email address to register at ACME provider. The provider can email you when + # certificates are about to expire. If you configure an address for which email is + # delivered by this server, keep in mind that TLS misconfigurations could result + # in such notification emails not arriving. + ContactEmail: + + # File containing hash of admin password, for authentication in the web admin + # pages (if enabled). (optional) + AdminPasswordFile: + + # Listeners are groups of IP addresses and services enabled on those IP addresses, + # such as SMTP/IMAP or internal endpoints for administration or Prometheus + # metrics. All listeners with SMTP/IMAP services enabled will serve all configured + # domains. + Listeners: + x: + + # Use 0.0.0.0 to listen on all IPv4 and/or :: to listen on all IPv6 addresses. + IPs: + - + + # If empty, the config global Hostname is used. (optional) + Hostname: + + # For SMTP/IMAP STARTTLS, direct TLS and HTTPS connections. (optional) + TLS: + + # Name of provider from top-level configuration to use for ACME, e.g. letsencrypt. + # (optional) + ACME: + + # (optional) + KeyCerts: + - + + # Certificate including intermediate CA certificates, in PEM format. + CertFile: + + # Private key for certificate, in PEM format. PKCS8 is recommended, but PKCS1 and + # EC private keys are recognized as well. + KeyFile: + + # Minimum TLS version. Default: TLSv1.2. (optional) + MinVersion: + + # Maximum size in bytes accepted incoming and outgoing messages. Default is 100MB. + # (optional) + SMTPMaxMessageSize: 0 + + # (optional) + SMTP: + Enabled: false + + # Default 25. (optional) + Port: 0 + + # Do not offer STARTTLS to secure the connection. Not recommended. (optional) + NoSTARTTLS: false + + # Do not accept incoming messages if STARTTLS is not active. Can be used in + # combination with a strict MTA-STS policy. A remote SMTP server may not support + # TLS and may not be able to deliver messages. (optional) + RequireSTARTTLS: false + + # Addresses of DNS block lists for incoming messages. Block lists are only + # consulted for connections/messages without enough reputation to make an + # accept/reject decision. This prevents sending IPs of all communications to the + # block list provider. If any of the listed DNSBLs contains a requested IP + # address, the message is rejected as spam. The DNSBLs are checked for healthiness + # before use, at most once per 4 hours. Example DNSBLs: sbl.spamhaus.org, + # bl.spamcop.net (optional) + DNSBLs: + - + + # SMTP for submitting email, e.g. by email applications. Starts out in plain text, + # can be upgraded to TLS with the STARTTLS command. Prefer using Submissions which + # is always a TLS connection. (optional) + Submission: + Enabled: false + + # Default 587. (optional) + Port: 0 + + # Do not require STARTTLS. Since users must login, this means password may be sent + # without encryption. Not recommended. (optional) + NoRequireSTARTTLS: false + + # SMTP over TLS for submitting email, by email applications. Requires a TLS + # config. (optional) + Submissions: + Enabled: false + + # Default 465. (optional) + Port: 0 + + # IMAP for reading email, by email applications. Starts out in plain text, can be + # upgraded to TLS with the STARTTLS command. Prefer using IMAPS instead which is + # always a TLS connection. (optional) + IMAP: + Enabled: false + + # Default 143. (optional) + Port: 0 + + # Enable this only when the connection is otherwise encrypted (e.g. through a + # VPN). (optional) + NoRequireSTARTTLS: false + + # IMAP over TLS for reading email, by email applications. Requires a TLS config. + # (optional) + IMAPS: + Enabled: false + + # Default 993. (optional) + Port: 0 + + # Admin web interface, for administrators and regular users wanting to change + # their password. (optional) + AdminHTTP: + Enabled: false + + # Default 80. (optional) + Port: 0 + + # Admin web interface listener for HTTPS. Requires a TLS config. (optional) + AdminHTTPS: + Enabled: false + + # Default 443. (optional) + Port: 0 + + # Serve prometheus metrics, for monitoring. You should not enable this on a public + # IP. (optional) + MetricsHTTP: + Enabled: false + + # Default 8010. (optional) + Port: 0 + + # Serve /debug/pprof/ for profiling a running mox instance. Do not enable this on + # a public IP! (optional) + PprofHTTP: + Enabled: false + + # Default 8011. (optional) + Port: 0 + + # Serve autoconfiguration/autodiscovery to simplify configuring email + # applications, will use port 443. Requires a TLS config. (optional) + AutoconfigHTTPS: + Enabled: false + + # Serve MTA-STS policies describing SMTP TLS requirements, will use port 443. + # Requires a TLS config. (optional) + MTASTSHTTPS: + Enabled: false + + # Destination for emails delivered to postmaster address. + Postmaster: + Account: + + # E.g. Postmaster or Inbox. + Mailbox: + + # Mailboxes to create when adding an account. Inbox is always created. If no + # mailboxes are specified, the following are automatically created: Sent, Archive, + # Trash, Drafts and Junk. (optional) + DefaultMailboxes: + - + +# domains.conf + + # Domains for which email is accepted. For internationalized domains, use their + # IDNA names in UTF-8. + Domains: + x: + + # Free-form description of domain. (optional) + Description: + + # If not empty, only the string before the separator is used to for email delivery + # decisions. For example, if set to "+", you+anything@example.com will be + # delivered to you@example.com. (optional) + LocalpartCatchallSeparator: + + # If set, upper/lower case is relevant for email delivery. (optional) + LocalpartCaseSensitive: false + + # With DKIM signing, a domain is taking responsibility for (content of) emails it + # sends, letting receiving mail servers build up a (hopefully positive) reputation + # of the domain, which can help with mail delivery. (optional) + DKIM: + + # Emails can be DKIM signed. Config parameters are per selector. A DNS record must + # be created for each selector. Add the name to Sign to use the selector for + # signing messages. + Selectors: + x: + + # sha256 (default) or (older, not recommended) sha1 (optional) + Hash: + + # (optional) + Canonicalization: + + # If set, some modifications to the headers (mostly whitespace) are allowed. + HeaderRelaxed: false + + # If set, some whitespace modifications to the message body are allowed. + BodyRelaxed: false + + # Headers to sign with DKIM. If empty, a reasonable default set of headers is + # selected. (optional) + Headers: + - + + # If set, don't prevent duplicate headers from being added. Not recommended. + # (optional) + DontSealHeaders: false + + # Period a signature is valid after signing, as duration, e.g. 72h. The period + # should be enough for delivery at the final destination, potentially with several + # hops/relays. In the order of days at least. (optional) + Expiration: + + # Either an RSA or ed25519 private key file in PKCS8 PEM form. + PrivateKeyFile: + + # List of selectors that emails will be signed with. (optional) + Sign: + - + + # With DMARC, a domain publishes, in DNS, a policy on how other mail servers + # should handle incoming messages with the From-header matching this domain and/or + # subdomain (depending on the configured alignment). Receiving mail servers use + # this to build up a reputation of this domain, which can help with mail delivery. + # A domain can also publish an email address to which reports about DMARC + # verification results can be sent by verifying mail servers, useful for + # monitoring. Incoming DMARC reports are automatically parsed, validated, added to + # metrics and stored in the reporting database for later display in the admin web + # pages. (optional) + DMARC: + + # Address-part before the @ that accepts DMARC reports. Must be + # non-internationalized. Recommended value: dmarc-reports. + Localpart: + + # Account to deliver to. + Account: + + # Mailbox to deliver to, e.g. DMARC. + Mailbox: + + # With MTA-STS a domain publishes, in DNS, presence of a policy for + # using/requiring TLS for SMTP connections. The policy is served over HTTPS. + # (optional) + MTASTS: + + # Policies are versioned. The version must be specified in the DNS record. If you + # change a policy, first change it in mox, then update the DNS record. + PolicyID: + + # testing, enforce or none. If set to enforce, a remote SMTP server will not + # deliver email to us if it cannot make a TLS connection. + Mode: + + # How long a remote mail server is allowed to cache a policy. Typically 1 or + # several weeks. + MaxAge: 0s + + # List of server names allowed for SMTP. If empty, the configured hostname is set. + # Host names can contain a wildcard (*) as a leading label (matching a single + # label, e.g. *.example matches host.example, not sub.host.example). (optional) + MX: + - + + # With TLSRPT a domain specifies in DNS where reports about encountered SMTP TLS + # behaviour should be sent. Useful for monitoring. Incoming TLS reports are + # automatically parsed, validated, added to metrics and stored in the reporting + # database for later display in the admin web pages. (optional) + TLSRPT: + + # Address-part before the @ that accepts TLSRPT reports. Recommended value: + # tls-reports. + Localpart: + + # Account to deliver to. + Account: + + # Mailbox to deliver to, e.g. TLSRPT. + Mailbox: + + # Accounts to which email can be delivered. An account can accept email for + # multiple domains, for multiple localparts, and deliver to multiple mailboxes. + Accounts: + x: + + # Default domain for addresses specified in Destinations. An address can specify a + # domain override. + Domain: + + # Free form description, e.g. full name or alternative contact info. (optional) + Description: + + # Destinations, specified as (encoded) localpart for Domain, or a full address + # including domain override. + Destinations: + x: + + # Mailbox to deliver to if none of Rulesets match. Default: Inbox. (optional) + Mailbox: + + # Delivery rules based on message and SMTP transaction. You may want to match each + # mailing list by SMTP MailFrom address, VerifiedDomain and/or List-ID header + # (typically if the list address is listname@example.org), + # delivering them to their own mailbox. (optional) + Rulesets: + - + + # Matches if this regular expression matches (a substring of) the SMTP MAIL FROM + # address (not the message From-header). E.g. user@example.org. (optional) + SMTPMailFromRegexp: + + # Matches if this domain or a subdomain matches a SPF- and/or DKIM-verified + # domain. (optional) + VerifiedDomain: + + # Matches if these header field/value regular expressions all match (substrings + # of) the message headers. Header fields and valuees are converted to lower case + # before matching. Whitespace is trimmed from the value before matching. A header + # field can occur multiple times in a message, only one instance has to match. + # (optional) + HeadersRegexp: + x: + + # Influence the spam filtering, this does not change whether this ruleset applies + # to a message. If this domain matches an SPF- and/or DKIM-verified (sub)domain, + # the message is accepted without further spam checks, such as a junk filter or + # DMARC reject evaluation. DMARC rejects should not apply for mailing lists that + # are not configured to rewrite the From-header of messages that don't have a + # passing DKIM signature of the From-domain. Otherwise, by rejecting messages, you + # may be automatically unsubscribed from the mailing list. (optional) + ListAllowDomain: + + # Mailbox to deliver to if Rules match. + Mailbox: + + # If configured, messages classified as weakly spam are rejected with instructions + # to retry delivery, but this time with a signed token added to the subject. + # During the next delivery attempt, the signed token will bypass the spam filter. + # Messages with a clear spam signal, such as a known bad reputation, are + # rejected/delayed without a signed token. (optional) + SubjectPass: + + # How long unique values are accepted after generating, e.g. 12h. + Period: 0s + + # Mail that looks like spam will be rejected, but a copy can be stored temporarily + # in a mailbox, e.g. Rejects. If mail isn't coming in when you expect, you can + # look there. The mail still isn't accepted, so the remote mail server may retry + # (hopefully, if legitimate), or give up (hopefully, if indeed a spammer). + # (optional) + RejectsMailbox: + + # Content-based filtering, using the junk-status of individual messages to rank + # words in such messages as spam or ham. It is recommended you always set the + # applicable (non)-junk status on messages, and that you do not empty your Trash + # because those messages contain valuable ham/spam training information. + # (optional) + JunkFilter: + + # Approximate spaminess score between 0 and 1 above which emails are rejected as + # spam. Each delivery attempt adds a little noise to make it slightly harder for + # spammers to identify words that strongly indicate non-spaminess and use it to + # bypass the filter. E.g. 0.95. + Threshold: 0.000000 + Params: + + # Track ham/spam ranking for single words. (optional) + Onegrams: false + + # Track ham/spam ranking for each two consecutive words. (optional) + Twograms: false + + # Track ham/spam ranking for each three consecutive words. (optional) + Threegrams: false + + # Maximum power a word (combination) can have. If spaminess is 0.99, and max power + # is 0.1, spaminess of the word will be set to 0.9. Similar for ham words. + MaxPower: 0.000000 + + # Number of most spammy/hammy words to use for calculating probability. E.g. 10. + TopWords: 0 + + # Ignore words that are this much away from 0.5 haminess/spaminess. E.g. 0.1, + # causing word (combinations) of 0.4 to 0.6 to be ignored. (optional) + IgnoreWords: 0.000000 + + # Occurrences in word database until a word is considered rare and its influence + # in calculating probability reduced. E.g. 1 or 2. (optional) + RareWords: 0 +*/ +package config + +// NOTE: DO NOT EDIT, this file is generated by ../gendoc.sh. diff --git a/ctl.go b/ctl.go new file mode 100644 index 0000000..d5a6a65 --- /dev/null +++ b/ctl.go @@ -0,0 +1,607 @@ +package main + +import ( + "bufio" + "context" + "fmt" + "io" + "log" + "net" + "os" + "runtime" + "runtime/debug" + "sort" + "strconv" + "strings" + "syscall" + "time" + + "github.com/mjl-/mox/dns" + "github.com/mjl-/mox/message" + "github.com/mjl-/mox/metrics" + "github.com/mjl-/mox/mlog" + "github.com/mjl-/mox/mox-" + "github.com/mjl-/mox/queue" + "github.com/mjl-/mox/smtp" + "github.com/mjl-/mox/store" +) + +// ctl represents a connection to the ctl unix domain socket of a running mox instance. +// ctl provides functions to read/write commands/responses/data streams. +type ctl struct { + conn net.Conn + r *bufio.Reader // Set for first reader. + x any // If set, errors are handled by calling panic(x) instead of log.Fatal. + log *mlog.Log // If set, along with x, logging is done here. +} + +// xctl opens a ctl connection. +func xctl() *ctl { + p := mox.DataDirPath("ctl") + conn, err := net.Dial("unix", p) + if err != nil { + log.Fatalf("connecting to control socket at %q: %v", p, err) + } + ctl := &ctl{conn: conn} + version := ctl.xread() + if version != "ctlv0" { + log.Fatalf("ctl protocol mismatch, got %q, expected ctlv0", version) + } + return ctl +} + +// Interpret msg as an error. +// If ctl.x is set, the string is also written to the ctl to be interpreted as error by the other party. +func (c *ctl) xerror(msg string) { + if c.x == nil { + log.Fatalln(msg) + } + c.log.Debugx("ctl error", fmt.Errorf("%s", msg)) + c.xwrite(msg) + panic(c.x) +} + +// Check if err is not nil. If so, handle error through ctl.x or log.Fatal. If +// ctl.x is set, the error string is written to ctl, to be interpreted as an error +// by the command reading from ctl. +func (c *ctl) xcheck(err error, msg string) { + if err == nil { + return + } + if c.x == nil { + log.Fatalf("%s: %s", msg, err) + } + c.log.Debugx(msg, err) + fmt.Fprintf(c.conn, "%s: %s\n", msg, err) + panic(c.x) +} + +// Read a line and return it without trailing newline. +func (c *ctl) xread() string { + if c.r == nil { + c.r = bufio.NewReader(c.conn) + } + line, err := c.r.ReadString('\n') + c.xcheck(err, "read from ctl") + return strings.TrimSuffix(line, "\n") +} + +// Read a line. If not "ok", the string is interpreted as an error. +func (c *ctl) xreadok() { + line := c.xread() + if line != "ok" { + c.xerror(line) + } +} + +// Write a string, typically a command or parameter. +func (c *ctl) xwrite(text string) { + _, err := fmt.Fprintln(c.conn, text) + c.xcheck(err, "write") +} + +// Write "ok" to indicate success. +func (c *ctl) xwriteok() { + c.xwrite("ok") +} + +// Copy data from a stream from ctl to dst. +func (c *ctl) xstreamto(dst io.Writer) { + _, err := io.Copy(dst, c.reader()) + c.xcheck(err, "reading message") +} + +// Copy data from src to a stream to ctl. +func (c *ctl) xstreamfrom(src io.Reader) { + w := c.writer() + _, err := io.Copy(w, src) + c.xcheck(err, "copying") + w.xclose() +} + +// Writer returns an io.Writer for a data stream to ctl. +// When done writing, caller must call xclose to signal the end of the stream. +// Behaviour of "x" is copied from ctl. +func (c *ctl) writer() *ctlwriter { + return &ctlwriter{conn: c.conn, x: c.x, log: c.log} +} + +// Reader returns an io.Reader for a data stream from ctl. +// Behaviour of "x" is copied from ctl. +func (c *ctl) reader() *ctlreader { + if c.r == nil { + c.r = bufio.NewReader(c.conn) + } + return &ctlreader{conn: c.conn, r: c.r, x: c.x, log: c.log} +} + +/* +Ctlwriter and ctlreader implement the writing and reading a data stream. They +implement the io.Writer and io.Reader interface. In the protocol below each +non-data message ends with a newline that is typically stripped when +interpreting. + +Zero or more data transactions: + + > "123" (for data size) or an error message + > data, 123 bytes + < "ok" or an error message + +Followed by a end of stream indicated by zero data bytes message: + + > "0" +*/ + +type ctlwriter struct { + conn net.Conn // Ctl socket from which messages are read. + buf []byte // Scratch buffer, for reading response. + x any // If not nil, errors in Write and xcheckf are handled with panic(x), otherwise with a log.Fatal. + log *mlog.Log +} + +func (s *ctlwriter) Write(buf []byte) (int, error) { + _, err := fmt.Fprintf(s.conn, "%d\n", len(buf)) + s.xcheck(err, "write count") + _, err = s.conn.Write(buf) + s.xcheck(err, "write data") + if s.buf == nil { + s.buf = make([]byte, 512) + } + n, err := s.conn.Read(s.buf) + s.xcheck(err, "reading response to write") + line := strings.TrimSuffix(string(s.buf[:n]), "\n") + if line != "ok" { + s.xerror(line) + } + return len(buf), nil +} + +func (s *ctlwriter) xerror(msg string) { + if s.x == nil { + log.Fatalln(msg) + } else { + s.log.Debugx("error", fmt.Errorf("%s", msg)) + panic(s.x) + } +} + +func (s *ctlwriter) xcheck(err error, msg string) { + if err == nil { + return + } + if s.x == nil { + log.Fatalf("%s: %s", msg, err) + } else { + s.log.Debugx(msg, err) + panic(s.x) + } +} + +func (s *ctlwriter) xclose() { + _, err := fmt.Fprintf(s.conn, "0\n") + s.xcheck(err, "write eof") +} + +type ctlreader struct { + conn net.Conn // For writing "ok" after reading. + r *bufio.Reader // Buffered ctl socket. + err error // If set, returned for each read. can also be io.EOF. + npending int // Number of bytes that can still be read until a new count line must be read. + x any // If set, errors are handled with panic(x) instead of log.Fatal. + log *mlog.Log // If x is set, logging goes to log. +} + +func (s *ctlreader) Read(buf []byte) (N int, Err error) { + if s.err != nil { + return 0, s.err + } + if s.npending == 0 { + line, err := s.r.ReadString('\n') + s.xcheck(err, "reading count") + line = strings.TrimSuffix(line, "\n") + n, err := strconv.ParseInt(line, 10, 32) + if err != nil { + s.xerror(line) + } + if n == 0 { + s.err = io.EOF + return 0, s.err + } + s.npending = int(n) + _, err = fmt.Fprintln(s.conn, "ok") + s.xcheck(err, "writing ok after reading") + } + rn := len(buf) + if rn > s.npending { + rn = s.npending + } + n, err := s.r.Read(buf[:rn]) + s.xcheck(err, "read from ctl") + s.npending -= n + return n, err +} + +func (s *ctlreader) xerror(msg string) { + if s.x == nil { + log.Fatalln(msg) + } else { + s.log.Debugx("error", fmt.Errorf("%s", msg)) + panic(s.x) + } +} + +func (s *ctlreader) xcheck(err error, msg string) { + if err == nil { + return + } + if s.x == nil { + log.Fatalf("%s: %s", msg, err) + } else { + s.log.Debugx(msg, err) + panic(s.x) + } +} + +// servectl handles requests on the unix domain socket "ctl", e.g. for graceful shutdown, local mail delivery. +func servectl(ctx context.Context, log *mlog.Log, conn net.Conn, shutdown func()) { + log.Debug("ctl connection") + + var cmd string + + var stop = struct{}{} // Sentinel value for panic and recover. + defer func() { + x := recover() + if x == nil || x == stop { + return + } + log.Error("servectl panic", mlog.Field("err", x), mlog.Field("cmd", cmd)) + debug.PrintStack() + metrics.PanicInc("ctl") + }() + + defer conn.Close() + + ctl := &ctl{conn: conn, x: stop, log: log} + ctl.xwrite("ctlv0") + + for { + servectlcmd(ctx, log, ctl, &cmd, shutdown) + } +} + +func servectlcmd(ctx context.Context, log *mlog.Log, ctl *ctl, xcmd *string, shutdown func()) { + cmd := ctl.xread() + log.Info("ctl command", mlog.Field("cmd", cmd)) + *xcmd = cmd + switch cmd { + case "stop": + shutdown() + os.Exit(0) + + case "restart": + // First test the config. + _, errs := mox.ParseConfig(ctx, mox.ConfigStaticPath, true) + if len(errs) > 1 { + log.Error("multiple configuration errors before restart") + for _, err := range errs { + log.Errorx("config error", err) + } + ctl.xerror("restart aborted") + } else if len(errs) == 1 { + log.Errorx("configuration error, restart aborted", errs[0]) + ctl.xerror(errs[0].Error()) + } + + binary, err := os.Executable() + ctl.xcheck(err, "finding executable") + cfr, ok := ctl.conn.(interface{ File() (*os.File, error) }) + if !ok { + ctl.xerror("cannot dup ctl socket") + } + cf, err := cfr.File() + ctl.xcheck(err, "dup ctl socket") + defer cf.Close() + _, _, err = syscall.Syscall(syscall.SYS_FCNTL, cf.Fd(), syscall.F_SETFD, 0) + if err != syscall.Errno(0) { + ctl.xcheck(err, "clear close-on-exec on ctl socket") + } + ctl.xwriteok() + + shutdown() + + // todo future: we could gather all listen fd's, keep them open, passing them to the new process and indicate (in env var or cli flag) for which addresses they are, then exec and have the new process pick them up. not worth the trouble at the moment, our shutdown is typically quick enough. + // todo future: does this actually cleanup all M's on all platforms? + + env := os.Environ() + var found bool + envv := fmt.Sprintf("MOX_RESTART_CTL_SOCKET=%d", cf.Fd()) + for i, s := range env { + if strings.HasPrefix(s, "MOX_RESTART_CTL_SOCKET=") { + found = true + env[i] = envv + break + } + } + if !found { + env = append(env, envv) + } + // On success, we never get here and "serve" will write the OK on the MOX_RESTART_CTL_SOCKET and close it. + err = syscall.Exec(binary, os.Args, env) + runtime.KeepAlive(cf) + ctl.xcheck(err, "exec") + + case "deliver": + /* The protocol, double quoted are literals. + + > "deliver" + > address + < "ok" + > stream + < "ok" + */ + + to := ctl.xread() + a, addr, err := store.OpenEmail(to) + ctl.xcheck(err, "lookup destination address") + + msgFile, err := store.CreateMessageTemp("ctl-deliver") + ctl.xcheck(err, "creating temporary message file") + defer func() { + if msgFile != nil { + if err := os.Remove(msgFile.Name()); err != nil { + log.Errorx("removing temporary message file", err, mlog.Field("path", msgFile.Name())) + } + msgFile.Close() + } + }() + mw := &message.Writer{Writer: msgFile} + ctl.xwriteok() + + ctl.xstreamto(mw) + err = msgFile.Sync() + ctl.xcheck(err, "syncing message to storage") + msgPrefix := []byte{} + if !mw.HaveHeaders { + msgPrefix = []byte("\r\n\r\n") + } + + m := &store.Message{ + Received: time.Now(), + Size: int64(len(msgPrefix)) + mw.Size, + MsgPrefix: msgPrefix, + } + + a.WithWLock(func() { + err := a.Deliver(log, addr, m, msgFile, true) + ctl.xcheck(err, "delivering message") + log.Info("message delivered through ctl", mlog.Field("to", to)) + }) + + msgFile.Close() + msgFile = nil + err = a.Close() + ctl.xcheck(err, "closing account") + ctl.xwriteok() + + case "queue": + /* protocol: + > "queue" + < "ok" + < stream + */ + qmsgs, err := queue.List() + ctl.xcheck(err, "listing queue") + ctl.xwriteok() + + xw := ctl.writer() + fmt.Fprintln(xw, "queue:") + for _, qm := range qmsgs { + var lastAttempt string + if qm.LastAttempt != nil { + lastAttempt = time.Since(*qm.LastAttempt).Round(time.Second).String() + } + fmt.Fprintf(xw, "%5d %s from:%s@%s to:%s@%s next %s last %s error %q\n", qm.ID, qm.Queued.Format(time.RFC3339), qm.SenderLocalpart, qm.SenderDomain, qm.RecipientLocalpart, qm.RecipientDomain, -time.Since(qm.NextAttempt).Round(time.Second), lastAttempt, qm.LastError) + } + if len(qmsgs) == 0 { + fmt.Fprint(xw, "(empty)\n") + } + xw.xclose() + + case "queuekick", "queuedrop": + /* protocol: + > "queuekick" or "queuedrop" + > id + > todomain + > recipient + < count + < "ok" or error + */ + + idstr := ctl.xread() + todomain := ctl.xread() + recipient := ctl.xread() + id, err := strconv.ParseInt(idstr, 10, 64) + if err != nil { + ctl.xwrite("0") + ctl.xcheck(err, "parsing id") + } + + var count int + if cmd == "queuekick" { + count, err = queue.Kick(id, todomain, recipient) + ctl.xcheck(err, "kicking queue") + } else { + count, err = queue.Drop(id, todomain, recipient) + ctl.xcheck(err, "dropping messages from queue") + } + ctl.xwrite(fmt.Sprintf("%d", count)) + ctl.xwriteok() + + case "queuedump": + /* protocol: + > "queuedump" + > id + < "ok" or error + < stream + */ + + idstr := ctl.xread() + id, err := strconv.ParseInt(idstr, 10, 64) + if err != nil { + ctl.xcheck(err, "parsing id") + } + mr, err := queue.OpenMessage(id) + ctl.xcheck(err, "opening message") + defer mr.Close() + ctl.xwriteok() + ctl.xstreamfrom(mr) + + case "importmaildir", "importmbox": + mbox := cmd == "importmbox" + importctl(ctl, mbox) + + case "domainadd": + /* protocol: + > "domainadd" + > domain + > account + > localpart + < "ok" or error + */ + domain := ctl.xread() + account := ctl.xread() + localpart := ctl.xread() + d, err := dns.ParseDomain(domain) + ctl.xcheck(err, "parsing domain") + err = mox.DomainAdd(ctx, d, account, smtp.Localpart(localpart)) + ctl.xcheck(err, "adding domain") + ctl.xwriteok() + + case "domainrm": + /* protocol: + > "domainrm" + > domain + < "ok" or error + */ + domain := ctl.xread() + d, err := dns.ParseDomain(domain) + ctl.xcheck(err, "parsing domain") + err = mox.DomainRemove(ctx, d) + ctl.xcheck(err, "removing domain") + ctl.xwriteok() + + case "accountadd": + /* protocol: + > "accountadd" + > account + > address + < "ok" or error + */ + account := ctl.xread() + address := ctl.xread() + err := mox.AccountAdd(ctx, account, address) + ctl.xcheck(err, "adding account") + ctl.xwriteok() + + case "accountrm": + /* protocol: + > "accountrm" + > account + < "ok" or error + */ + account := ctl.xread() + err := mox.AccountRemove(ctx, account) + ctl.xcheck(err, "removing account") + ctl.xwriteok() + + case "addressadd": + /* protocol: + > "addressadd" + > address + > account + < "ok" or error + */ + address := ctl.xread() + account := ctl.xread() + err := mox.AddressAdd(ctx, address, account) + ctl.xcheck(err, "adding address") + ctl.xwriteok() + + case "addressrm": + /* protocol: + > "addressrm" + > address + < "ok" or error + */ + address := ctl.xread() + err := mox.AddressRemove(ctx, address) + ctl.xcheck(err, "removing address") + ctl.xwriteok() + + case "loglevels": + /* protocol: + > "loglevels" + < "ok" + < stream + */ + ctl.xwriteok() + l := mox.Conf.LogLevels() + keys := []string{} + for k := range l { + keys = append(keys, k) + } + sort.Slice(keys, func(i, j int) bool { + return keys[i] < keys[j] + }) + s := "" + for _, k := range keys { + ks := k + if ks == "" { + ks = "(default)" + } + s += ks + ": " + mlog.LevelStrings[l[k]] + "\n" + } + ctl.xstreamfrom(strings.NewReader(s)) + + case "setloglevels": + /* protocol: + > "setloglevels" + > pkg + > level + < "ok" or error + */ + pkg := ctl.xread() + levelstr := ctl.xread() + level, ok := mlog.Levels[levelstr] + if !ok { + ctl.xerror("bad level") + } + mox.Conf.SetLogLevel(pkg, level) + ctl.xwriteok() + + default: + log.Info("unrecognized command", mlog.Field("cmd", cmd)) + ctl.xwrite("unrecognized command") + return + } +} diff --git a/dkim/dkim.go b/dkim/dkim.go new file mode 100644 index 0000000..18f3856 --- /dev/null +++ b/dkim/dkim.go @@ -0,0 +1,849 @@ +// Package dkim (DomainKeys Identified Mail signatures, RFC 6376) signs and +// verifies DKIM signatures. +// +// Signatures are added to email messages in DKIM-Signature headers. By signing a +// message, a domain takes responsibility for the message. A message can have +// signatures for multiple domains, and the domain does not necessarily have to +// match a domain in a From header. Receiving mail servers can build a spaminess +// reputation based on domains that signed the message, along with other +// mechanisms. +package dkim + +import ( + "bufio" + "bytes" + "context" + "crypto" + "crypto/ed25519" + cryptorand "crypto/rand" + "crypto/rsa" + "errors" + "fmt" + "hash" + "io" + "strings" + "time" + + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" + + "github.com/mjl-/mox/config" + "github.com/mjl-/mox/dns" + "github.com/mjl-/mox/mlog" + "github.com/mjl-/mox/moxio" + "github.com/mjl-/mox/publicsuffix" + "github.com/mjl-/mox/smtp" +) + +var xlog = mlog.New("dkim") + +var ( + metricDKIMSign = promauto.NewCounterVec( + prometheus.CounterOpts{ + Name: "mox_dkim_sign_total", + Help: "DKIM messages signings.", + }, + []string{ + "key", + }, + ) + metricDKIMVerify = promauto.NewHistogramVec( + prometheus.HistogramOpts{ + Name: "mox_dkim_verify_duration_seconds", + Help: "DKIM verify, including lookup, duration and result.", + Buckets: []float64{0.001, 0.005, 0.01, 0.05, 0.100, 0.5, 1, 5, 10, 20}, + }, + []string{ + "algorithm", + "status", + }, + ) +) + +var timeNow = time.Now // Replaced during tests. + +// Status is the result of verifying a DKIM-Signature as described by RFC 8601, +// "Message Header Field for Indicating Message Authentication Status". +type Status string + +// ../rfc/8601:959 ../rfc/6376:1770 ../rfc/6376:2459 + +const ( + StatusNone Status = "none" // Message was not signed. + StatusPass Status = "pass" // Message was signed and signature was verified. + StatusFail Status = "fail" // Message was signed, but signature was invalid. + StatusPolicy Status = "policy" // Message was signed, but signature is not accepted by policy. + StatusNeutral Status = "neutral" // Message was signed, but the signature contains an error or could not be processed. This status is also used for errors not covered by other statuses. + StatusTemperror Status = "temperror" // Message could not be verified. E.g. because of DNS resolve error. A later attempt may succeed. A missing DNS record is treated as temporary error, a new key may not have propagated through DNS shortly after it was taken into use. + StatusPermerror Status = "permerror" // Message cannot be verified. E.g. when a required header field is absent or for invalid (combination of) parameters. Typically set if a DNS record does not allow the signature, e.g. due to algorithm mismatch or expiry. +) + +// Lookup errors. +var ( + ErrNoRecord = errors.New("dkim: no dkim dns record for selector and domain") + ErrMultipleRecords = errors.New("dkim: multiple dkim dns record for selector and domain") + ErrDNS = errors.New("dkim: lookup of dkim dns record") + ErrSyntax = errors.New("dkim: syntax error in dkim dns record") +) + +// Signature verification errors. +var ( + ErrSigAlgMismatch = errors.New("dkim: signature algorithm mismatch with dns record") + ErrHashAlgNotAllowed = errors.New("dkim: hash algorithm not allowed by dns record") + ErrKeyNotForEmail = errors.New("dkim: dns record not allowed for use with email") + ErrDomainIdentityMismatch = errors.New("dkim: dns record disallows mismatch of domain (d=) and identity (i=)") + ErrSigExpired = errors.New("dkim: signature has expired") + ErrHashAlgorithmUnknown = errors.New("dkim: unknown hash algorithm") + ErrBodyhashMismatch = errors.New("dkim: body hash does not match") + ErrSigVerify = errors.New("dkim: signature verification failed") + ErrSigAlgorithmUnknown = errors.New("dkim: unknown signature algorithm") + ErrCanonicalizationUnknown = errors.New("dkim: unknown canonicalization") + ErrHeaderMalformed = errors.New("dkim: mail message header is malformed") + ErrFrom = errors.New("dkim: bad from headers") + ErrQueryMethod = errors.New("dkim: no recognized query method") + ErrKeyRevoked = errors.New("dkim: key has been revoked") + ErrTLD = errors.New("dkim: signed domain is top-level domain, above organizational domain") + ErrPolicy = errors.New("dkim: signature rejected by policy") + ErrWeakKey = errors.New("dkim: key is too weak, need at least 1024 bits for rsa") +) + +// Result is the conclusion of verifying one DKIM-Signature header. An email can +// have multiple signatures, each with different parameters. +// +// To decide what to do with a message, both the signature parameters and the DNS +// TXT record have to be consulted. +type Result struct { + Status Status + Sig *Sig // Parsed form of DKIM-Signature header. Can be nil for invalid DKIM-Signature header. + Record *Record // Parsed form of DKIM DNS record for selector and domain in Sig. Optional. + Err error // If Status is not StatusPass, this error holds the details and can be checked using errors.Is. +} + +// todo: use some io.Writer to hash the body and the header. + +// Sign returns line(s) with DKIM-Signature headers, generated according to the configuration. +func Sign(ctx context.Context, localpart smtp.Localpart, domain dns.Domain, c config.DKIM, smtputf8 bool, msg io.ReaderAt) (headers string, rerr error) { + log := xlog.WithContext(ctx) + start := timeNow() + defer func() { + log.Debugx("dkim sign result", rerr, mlog.Field("localpart", localpart), mlog.Field("domain", domain), mlog.Field("smtputf8", smtputf8), mlog.Field("duration", time.Since(start))) + }() + + hdrs, bodyOffset, err := parseHeaders(bufio.NewReader(&moxio.AtReader{R: msg})) + if err != nil { + return "", fmt.Errorf("%w: %s", ErrHeaderMalformed, err) + } + nfrom := 0 + for _, h := range hdrs { + if h.lkey == "from" { + nfrom++ + } + } + if nfrom != 1 { + return "", fmt.Errorf("%w: message has %d from headers, need exactly 1", ErrFrom, nfrom) + } + + type hashKey struct { + simple bool // Canonicalization. + hash string // lower-case hash. + } + + var bodyHashes = map[hashKey][]byte{} + + for _, sign := range c.Sign { + sel := c.Selectors[sign] + sig := newSigWithDefaults() + sig.Version = 1 + switch sel.Key.(type) { + case *rsa.PrivateKey: + sig.AlgorithmSign = "rsa" + metricDKIMSign.WithLabelValues("rsa").Inc() + case ed25519.PrivateKey: + sig.AlgorithmSign = "ed25519" + metricDKIMSign.WithLabelValues("ed25519").Inc() + default: + return "", fmt.Errorf("internal error, unknown pivate key %T", sel.Key) + } + sig.AlgorithmHash = sel.HashEffective + sig.Domain = domain + sig.Selector = sel.Domain + sig.Identity = &Identity{&localpart, domain} + sig.SignedHeaders = append([]string{}, sel.HeadersEffective...) + if !sel.DontSealHeaders { + // ../rfc/6376:2156 + // Each time a header name is added to the signature, the next unused value is + // signed (in reverse order as they occur in the message). So we can add each + // header name as often as it occurs. But now we'll add the header names one + // additional time, preventing someone from adding one more header later on. + counts := map[string]int{} + for _, h := range hdrs { + counts[h.lkey]++ + } + for _, h := range sel.HeadersEffective { + for j := counts[strings.ToLower(h)]; j > 0; j-- { + sig.SignedHeaders = append(sig.SignedHeaders, h) + } + } + } + sig.SignTime = timeNow().Unix() + if sel.ExpirationSeconds > 0 { + sig.ExpireTime = sig.SignTime + int64(sel.ExpirationSeconds) + } + + sig.Canonicalization = "simple" + if sel.Canonicalization.HeaderRelaxed { + sig.Canonicalization = "relaxed" + } + sig.Canonicalization += "/" + if sel.Canonicalization.BodyRelaxed { + sig.Canonicalization += "relaxed" + } else { + sig.Canonicalization += "simple" + } + + h, hok := algHash(sig.AlgorithmHash) + if !hok { + return "", fmt.Errorf("unrecognized hash algorithm %q", sig.AlgorithmHash) + } + + // We must now first calculate the hash over the body. Then include that hash in a + // new DKIM-Signature header. Then hash that and the signed headers into a data + // hash. Then that hash is finally signed and the signature included in the new + // DKIM-Signature header. + // ../rfc/6376:1700 + + hk := hashKey{!sel.Canonicalization.BodyRelaxed, strings.ToLower(sig.AlgorithmHash)} + if bh, ok := bodyHashes[hk]; ok { + sig.BodyHash = bh + } else { + br := bufio.NewReader(&moxio.AtReader{R: msg, Offset: int64(bodyOffset)}) + bh, err = bodyHash(h.New(), !sel.Canonicalization.BodyRelaxed, br) + if err != nil { + return "", err + } + sig.BodyHash = bh + bodyHashes[hk] = bh + } + + sigh, err := sig.Header() + if err != nil { + return "", err + } + verifySig := []byte(strings.TrimSuffix(sigh, "\r\n")) + + dh, err := dataHash(h.New(), !sel.Canonicalization.HeaderRelaxed, sig, hdrs, verifySig) + if err != nil { + return "", err + } + + switch key := sel.Key.(type) { + case *rsa.PrivateKey: + sig.Signature, err = key.Sign(cryptorand.Reader, dh, h) + if err != nil { + return "", fmt.Errorf("signing data: %v", err) + } + case ed25519.PrivateKey: + // crypto.Hash(0) indicates data isn't prehashed (ed25519ph). We are using + // PureEdDSA to sign the sha256 hash. ../rfc/8463:123 ../rfc/8032:427 + sig.Signature, err = key.Sign(cryptorand.Reader, dh, crypto.Hash(0)) + if err != nil { + return "", fmt.Errorf("signing data: %v", err) + } + default: + return "", fmt.Errorf("unsupported private key type: %s", err) + } + + sigh, err = sig.Header() + if err != nil { + return "", err + } + headers += sigh + } + + return headers, nil +} + +// Lookup looks up the DKIM TXT record and parses it. +// +// A requested record is ._domainkey.. Exactly one valid DKIM +// record should be present. +func Lookup(ctx context.Context, resolver dns.Resolver, selector, domain dns.Domain) (rstatus Status, rrecord *Record, rtxt string, rerr error) { + log := xlog.WithContext(ctx) + start := timeNow() + defer func() { + log.Debugx("dkim lookup result", rerr, mlog.Field("selector", selector), mlog.Field("domain", domain), mlog.Field("status", rstatus), mlog.Field("record", rrecord), mlog.Field("duration", time.Since(start))) + }() + + name := selector.ASCII + "._domainkey." + domain.ASCII + "." + records, err := dns.WithPackage(resolver, "dkim").LookupTXT(ctx, name) + if dns.IsNotFound(err) { + // ../rfc/6376:2608 + // We must return StatusPermerror. We may want to return StatusTemperror because in + // practice someone will start using a new key before DNS changes have propagated. + return StatusPermerror, nil, "", fmt.Errorf("%w: dns name %q", ErrNoRecord, name) + } else if err != nil { + return StatusTemperror, nil, "", fmt.Errorf("%w: dns name %q: %s", ErrDNS, name, err) + } + + // ../rfc/6376:2612 + var status = StatusTemperror + var record *Record + var txt string + err = nil + for _, s := range records { + // We interpret ../rfc/6376:2621 to mean that a record that claims to be v=DKIM1, + // but isn't actually valid, results in a StatusPermFail. But a record that isn't + // claiming to be DKIM1 is ignored. + var r *Record + var isdkim bool + r, isdkim, err = ParseRecord(s) + if err != nil && isdkim { + return StatusPermerror, nil, txt, fmt.Errorf("%w: %s", ErrSyntax, err) + } else if err != nil { + // Hopefully the remote MTA admin discovers the configuration error and fix it for + // an upcoming delivery attempt, in case we rejected with temporary status. + status = StatusTemperror + err = fmt.Errorf("%w: not a dkim record: %s", ErrSyntax, err) + continue + } + // If there are multiple valid records, return a temporary error. Perhaps the error is fixed soon. + // ../rfc/6376:1609 + // ../rfc/6376:2584 + if record != nil { + return StatusTemperror, nil, "", fmt.Errorf("%w: dns name %q", ErrMultipleRecords, name) + } + record = r + txt = s + err = nil + } + + if record == nil { + return status, nil, "", err + } + return StatusNeutral, record, txt, nil +} + +// Verify parses the DKIM-Signature headers in a message and verifies each of them. +// +// If the headers of the message cannot be found, an error is returned. +// Otherwise, each DKIM-Signature header is reflected in the returned results. +// +// NOTE: Verify does not check if the domain (d=) that signed the message is +// the domain of the sender. The caller, e.g. through DMARC, should do this. +// +// If ignoreTestMode is true and the DKIM record is in test mode (t=y), a +// verification failure is treated as actual failure. With ignoreTestMode +// false, such verification failures are treated as if there is no signature by +// returning StatusNone. +func Verify(ctx context.Context, resolver dns.Resolver, smtputf8 bool, policy func(*Sig) error, r io.ReaderAt, ignoreTestMode bool) (results []Result, rerr error) { + log := xlog.WithContext(ctx) + start := timeNow() + defer func() { + duration := float64(time.Since(start)) / float64(time.Second) + for _, r := range results { + var alg string + if r.Sig != nil { + alg = r.Sig.Algorithm() + } + status := string(r.Status) + metricDKIMVerify.WithLabelValues(alg, status).Observe(duration) + } + + if len(results) == 0 { + log.Debugx("dkim verify result", rerr, mlog.Field("smtputf8", smtputf8), mlog.Field("duration", time.Since(start))) + } + for _, result := range results { + log.Debugx("dkim verify result", result.Err, mlog.Field("smtputf8", smtputf8), mlog.Field("status", result.Status), mlog.Field("sig", result.Sig), mlog.Field("record", result.Record), mlog.Field("duration", time.Since(start))) + } + }() + + hdrs, bodyOffset, err := parseHeaders(bufio.NewReader(&moxio.AtReader{R: r})) + if err != nil { + return nil, fmt.Errorf("%w: %s", ErrHeaderMalformed, err) + } + + // todo: reuse body hashes and possibly verify signatures in parallel. and start the dns lookup immediately. ../rfc/6376:2697 + + for _, h := range hdrs { + if h.lkey != "dkim-signature" { + continue + } + + sig, verifySig, err := parseSignature(h.raw, smtputf8) + if err != nil { + // ../rfc/6376:2503 + err := fmt.Errorf("parsing DKIM-Signature header: %w", err) + results = append(results, Result{StatusPermerror, nil, nil, err}) + continue + } + + h, canonHeaderSimple, canonDataSimple, err := checkSignatureParams(ctx, sig) + if err != nil { + results = append(results, Result{StatusPermerror, nil, nil, err}) + continue + } + + // ../rfc/6376:2560 + if err := policy(sig); err != nil { + err := fmt.Errorf("%w: %s", ErrPolicy, err) + results = append(results, Result{StatusPolicy, nil, nil, err}) + continue + } + + br := bufio.NewReader(&moxio.AtReader{R: r, Offset: int64(bodyOffset)}) + status, txt, err := verifySignature(ctx, resolver, sig, h, canonHeaderSimple, canonDataSimple, hdrs, verifySig, br, ignoreTestMode) + results = append(results, Result{status, sig, txt, err}) + } + return results, nil +} + +// check if signature is acceptable. +// Only looks at the signature parameters, not at the DNS record. +func checkSignatureParams(ctx context.Context, sig *Sig) (hash crypto.Hash, canonHeaderSimple, canonBodySimple bool, rerr error) { + // "From" header is required, ../rfc/6376:2122 ../rfc/6376:2546 + var from bool + for _, h := range sig.SignedHeaders { + if strings.EqualFold(h, "from") { + from = true + break + } + } + if !from { + return 0, false, false, fmt.Errorf(`%w: required "from" header not signed`, ErrFrom) + } + + // ../rfc/6376:2550 + if sig.ExpireTime >= 0 && sig.ExpireTime < timeNow().Unix() { + return 0, false, false, fmt.Errorf("%w: expiration time %q", ErrSigExpired, time.Unix(sig.ExpireTime, 0).Format(time.RFC3339)) + } + + // ../rfc/6376:2554 + // ../rfc/6376:3284 + // Refuse signatures that reach beyond declared scope. We use the existing + // publicsuffix.Lookup to lookup a fake subdomain of the signing domain. If this + // supposed subdomain is actually an organizational domain, the signing domain + // shouldn't be signing for its organizational domain. + subdom := sig.Domain + subdom.ASCII = "x." + subdom.ASCII + if subdom.Unicode != "" { + subdom.Unicode = "x." + subdom.Unicode + } + if orgDom := publicsuffix.Lookup(ctx, subdom); subdom.ASCII == orgDom.ASCII { + return 0, false, false, fmt.Errorf("%w: %s", ErrTLD, sig.Domain) + } + + h, hok := algHash(sig.AlgorithmHash) + if !hok { + return 0, false, false, fmt.Errorf("%w: %q", ErrHashAlgorithmUnknown, sig.AlgorithmHash) + } + + t := strings.SplitN(sig.Canonicalization, "/", 2) + + switch strings.ToLower(t[0]) { + case "simple": + canonHeaderSimple = true + case "relaxed": + default: + return 0, false, false, fmt.Errorf("%w: header canonicalization %q", ErrCanonicalizationUnknown, sig.Canonicalization) + } + + canon := "simple" + if len(t) == 2 { + canon = t[1] + } + switch strings.ToLower(canon) { + case "simple": + canonBodySimple = true + case "relaxed": + default: + return 0, false, false, fmt.Errorf("%w: body canonicalization %q", ErrCanonicalizationUnknown, sig.Canonicalization) + } + + // We only recognize query method dns/txt, which is the default. ../rfc/6376:1268 + if len(sig.QueryMethods) > 0 { + var dnstxt bool + for _, m := range sig.QueryMethods { + if strings.EqualFold(m, "dns/txt") { + dnstxt = true + break + } + } + if !dnstxt { + return 0, false, false, fmt.Errorf("%w: need dns/txt", ErrQueryMethod) + } + } + + return h, canonHeaderSimple, canonBodySimple, nil +} + +// lookup the public key in the DNS and verify the signature. +func verifySignature(ctx context.Context, resolver dns.Resolver, sig *Sig, hash crypto.Hash, canonHeaderSimple, canonDataSimple bool, hdrs []header, verifySig []byte, body *bufio.Reader, ignoreTestMode bool) (Status, *Record, error) { + // ../rfc/6376:2604 + status, record, _, err := Lookup(ctx, resolver, sig.Selector, sig.Domain) + if err != nil { + // todo: for temporary errors, we could pass on information so caller returns a 4.7.5 ecode, ../rfc/6376:2777 + return status, nil, err + } + status, err = verifySignatureRecord(record, sig, hash, canonHeaderSimple, canonDataSimple, hdrs, verifySig, body, ignoreTestMode) + return status, record, err +} + +// verify a DKIM signature given the record from dns and signature from the email message. +func verifySignatureRecord(r *Record, sig *Sig, hash crypto.Hash, canonHeaderSimple, canonDataSimple bool, hdrs []header, verifySig []byte, body *bufio.Reader, ignoreTestMode bool) (rstatus Status, rerr error) { + if !ignoreTestMode { + // ../rfc/6376:1558 + y := false + for _, f := range r.Flags { + if strings.EqualFold(f, "y") { + y = true + break + } + } + if y { + defer func() { + if rstatus != StatusPass { + rstatus = StatusNone + } + }() + } + } + + // ../rfc/6376:2639 + if len(r.Hashes) > 0 { + ok := false + for _, h := range r.Hashes { + if strings.EqualFold(h, sig.AlgorithmHash) { + ok = true + break + } + } + if !ok { + return StatusPermerror, fmt.Errorf("%w: dkim dns record expects one of %q, message uses %q", ErrHashAlgNotAllowed, strings.Join(r.Hashes, ","), sig.AlgorithmHash) + } + } + + // ../rfc/6376:2651 + if !strings.EqualFold(r.Key, sig.AlgorithmSign) { + return StatusPermerror, fmt.Errorf("%w: dkim dns record requires algorithm %q, message has %q", ErrSigAlgMismatch, r.Key, sig.AlgorithmSign) + } + + // ../rfc/6376:2645 + if r.PublicKey == nil { + return StatusPermerror, ErrKeyRevoked + } else if rsaKey, ok := r.PublicKey.(*rsa.PublicKey); ok && rsaKey.N.BitLen() < 1024 { + // todo: find a reference that supports this. + return StatusPermerror, ErrWeakKey + } + + // ../rfc/6376:1541 + if !r.ServiceAllowed("email") { + return StatusPermerror, ErrKeyNotForEmail + } + for _, t := range r.Flags { + // ../rfc/6376:1575 + // ../rfc/6376:1805 + if strings.EqualFold(t, "s") && sig.Identity != nil { + if sig.Identity.Domain.ASCII != sig.Domain.ASCII { + return StatusPermerror, fmt.Errorf("%w: i= identity domain %q must match d= domain %q", ErrDomainIdentityMismatch, sig.Domain.ASCII, sig.Identity.Domain.ASCII) + } + } + } + + if sig.Length >= 0 { + // todo future: implement l= parameter in signatures. we don't currently allow this through policy check. + return StatusPermerror, fmt.Errorf("l= (length) parameter in signature not yet implemented") + } + + // We first check the signature is with the claimed body hash is valid. Then we + // verify the body hash. In case of invalid signatures, we won't read the entire + // body. + // ../rfc/6376:1700 + // ../rfc/6376:2656 + + dh, err := dataHash(hash.New(), canonHeaderSimple, sig, hdrs, verifySig) + if err != nil { + // Any error is likely an invalid header field in the message, hence permanent error. + return StatusPermerror, fmt.Errorf("calculating data hash: %w", err) + } + + switch k := r.PublicKey.(type) { + case *rsa.PublicKey: + if err := rsa.VerifyPKCS1v15(k, hash, dh, sig.Signature); err != nil { + return StatusFail, fmt.Errorf("%w: rsa verification: %s", ErrSigVerify, err) + } + case ed25519.PublicKey: + if ok := ed25519.Verify(k, dh, sig.Signature); !ok { + return StatusFail, fmt.Errorf("%w: ed25519 verification", ErrSigVerify) + } + default: + return StatusPermerror, fmt.Errorf("%w: unrecognized signature algorithm %q", ErrSigAlgorithmUnknown, r.Key) + } + + bh, err := bodyHash(hash.New(), canonDataSimple, body) + if err != nil { + // Any error is likely some internal error, hence temporary error. + return StatusTemperror, fmt.Errorf("calculating body hash: %w", err) + } + if !bytes.Equal(sig.BodyHash, bh) { + return StatusFail, fmt.Errorf("%w: signature bodyhash %x != calculated bodyhash %x", ErrBodyhashMismatch, sig.BodyHash, bh) + } + + return StatusPass, nil +} + +func algHash(s string) (crypto.Hash, bool) { + if strings.EqualFold(s, "sha1") { + return crypto.SHA1, true + } else if strings.EqualFold(s, "sha256") { + return crypto.SHA256, true + } + return 0, false +} + +// bodyHash calculates the hash over the body. +func bodyHash(h hash.Hash, canonSimple bool, body *bufio.Reader) ([]byte, error) { + // todo: take l= into account. we don't currently allow it for policy reasons. + + var crlf = []byte("\r\n") + + if canonSimple { + // ../rfc/6376:864, ensure body ends with exactly one trailing crlf. + ncrlf := 0 + for { + buf, err := body.ReadBytes('\n') + if len(buf) == 0 && err == io.EOF { + break + } + if err != nil && err != io.EOF { + return nil, err + } + hascrlf := bytes.HasSuffix(buf, crlf) + if hascrlf { + buf = buf[:len(buf)-2] + } + if len(buf) > 0 { + for ; ncrlf > 0; ncrlf-- { + h.Write(crlf) + } + h.Write(buf) + } + if hascrlf { + ncrlf++ + } + } + h.Write(crlf) + } else { + hb := bufio.NewWriter(h) + + // We go through the body line by line, replacing WSP with a single space and removing whitespace at the end of lines. + // We stash "empty" lines. If they turn out to be at the end of the file, we must drop them. + stash := &bytes.Buffer{} + var line bool // Whether buffer read is for continuation of line. + var prev byte // Previous byte read for line. + linesEmpty := true // Whether stash contains only empty lines and may need to be dropped. + var bodynonempty bool // Whether body is non-empty, for adding missing crlf. + var hascrlf bool // Whether current/last line ends with crlf, for adding missing crlf. + for { + // todo: should not read line at a time, count empty lines. reduces max memory usage. a message with lots of empty lines can cause high memory use. + buf, err := body.ReadBytes('\n') + if len(buf) == 0 && err == io.EOF { + break + } + if err != nil && err != io.EOF { + return nil, err + } + bodynonempty = true + + hascrlf = bytes.HasSuffix(buf, crlf) + if hascrlf { + buf = buf[:len(buf)-2] + + // ../rfc/6376:893, "ignore all whitespace at the end of lines". + // todo: what is "whitespace"? it isn't WSP (space and tab), the next line mentions WSP explicitly for another rule. should we drop trailing \r, \n, \v, more? + buf = bytes.TrimRight(buf, " \t") + } + + // Replace one or more WSP to a single SP. + for i, c := range buf { + wsp := c == ' ' || c == '\t' + if (i >= 0 || line) && wsp { + if prev == ' ' { + continue + } + prev = ' ' + c = ' ' + } else { + prev = c + } + if !wsp { + linesEmpty = false + } + stash.WriteByte(c) + } + if hascrlf { + stash.Write(crlf) + } + line = !hascrlf + if !linesEmpty { + hb.Write(stash.Bytes()) + stash.Reset() + linesEmpty = true + } + } + // ../rfc/6376:886 + // Only for non-empty bodies without trailing crlf do we add the missing crlf. + if bodynonempty && !hascrlf { + hb.Write(crlf) + } + + hb.Flush() + } + return h.Sum(nil), nil +} + +func dataHash(h hash.Hash, canonSimple bool, sig *Sig, hdrs []header, verifySig []byte) ([]byte, error) { + headers := "" + revHdrs := map[string][]header{} + for _, h := range hdrs { + revHdrs[h.lkey] = append([]header{h}, revHdrs[h.lkey]...) + } + + for _, key := range sig.SignedHeaders { + lkey := strings.ToLower(key) + h := revHdrs[lkey] + if len(h) == 0 { + continue + } + revHdrs[lkey] = h[1:] + s := string(h[0].raw) + if canonSimple { + // ../rfc/6376:823 + // Add unmodified. + headers += s + } else { + ch, err := relaxedCanonicalHeaderWithoutCRLF(s) + if err != nil { + return nil, fmt.Errorf("canonicalizing header: %w", err) + } + headers += ch + "\r\n" + } + } + // ../rfc/6376:2377, canonicalization does not apply to the dkim-signature header. + h.Write([]byte(headers)) + dkimSig := verifySig + if !canonSimple { + ch, err := relaxedCanonicalHeaderWithoutCRLF(string(verifySig)) + if err != nil { + return nil, fmt.Errorf("canonicalizing DKIM-Signature header: %w", err) + } + dkimSig = []byte(ch) + } + h.Write(dkimSig) + return h.Sum(nil), nil +} + +// a single header, can be multiline. +func relaxedCanonicalHeaderWithoutCRLF(s string) (string, error) { + // ../rfc/6376:831 + t := strings.SplitN(s, ":", 2) + if len(t) != 2 { + return "", fmt.Errorf("%w: invalid header %q", ErrHeaderMalformed, s) + } + + // Unfold, we keep the leading WSP on continuation lines and fix it up below. + v := strings.ReplaceAll(t[1], "\r\n", "") + + // Replace one or more WSP to a single SP. + var nv []byte + var prev byte + for i, c := range []byte(v) { + if i >= 0 && c == ' ' || c == '\t' { + if prev == ' ' { + continue + } + prev = ' ' + c = ' ' + } else { + prev = c + } + nv = append(nv, c) + } + + ch := strings.ToLower(strings.TrimRight(t[0], " \t")) + ":" + strings.Trim(string(nv), " \t") + return ch, nil +} + +type header struct { + key string // Key in original case. + lkey string // Key in lower-case, for canonical case. + value []byte // Literal header value, possibly spanning multiple lines, not modified in any way, including crlf, excluding leading key and colon. + raw []byte // Like value, but including original leading key and colon. Ready for use as simple header canonicalized use. +} + +func parseHeaders(br *bufio.Reader) ([]header, int, error) { + var o int + var l []header + var key, lkey string + var value []byte + var raw []byte + for { + line, err := readline(br) + if err != nil { + return nil, 0, err + } + o += len(line) + if bytes.Equal(line, []byte("\r\n")) { + break + } + if line[0] == ' ' || line[0] == '\t' { + if len(l) == 0 && key == "" { + return nil, 0, fmt.Errorf("malformed message, starts with space/tab") + } + value = append(value, line...) + raw = append(raw, line...) + continue + } + if key != "" { + l = append(l, header{key, lkey, value, raw}) + } + t := bytes.SplitN(line, []byte(":"), 2) + if len(t) != 2 { + return nil, 0, fmt.Errorf("malformed message, header without colon") + } + + key = strings.TrimRight(string(t[0]), " \t") // todo: where is this specified? + // Check for valid characters. ../rfc/5322:1689 ../rfc/6532:193 + for _, c := range key { + if c <= ' ' || c >= 0x7f { + return nil, 0, fmt.Errorf("invalid header field name") + } + } + if key == "" { + return nil, 0, fmt.Errorf("empty header key") + } + lkey = strings.ToLower(key) + value = append([]byte{}, t[1]...) + raw = append([]byte{}, line...) + } + if key != "" { + l = append(l, header{key, lkey, value, raw}) + } + return l, o, nil +} + +func readline(r *bufio.Reader) ([]byte, error) { + var buf []byte + for { + line, err := r.ReadBytes('\n') + if err != nil { + return nil, err + } + if bytes.HasSuffix(line, []byte("\r\n")) { + if len(buf) == 0 { + return line, nil + } + return append(buf, line...), nil + } + buf = append(buf, line...) + } +} diff --git a/dkim/dkim_test.go b/dkim/dkim_test.go new file mode 100644 index 0000000..f16e479 --- /dev/null +++ b/dkim/dkim_test.go @@ -0,0 +1,702 @@ +package dkim + +import ( + "bufio" + "bytes" + "context" + "crypto" + "crypto/ed25519" + "crypto/rsa" + "crypto/sha256" + "crypto/x509" + "encoding/base64" + "encoding/pem" + "errors" + "strings" + "testing" + + "github.com/mjl-/mox/config" + "github.com/mjl-/mox/dns" +) + +func policyOK(sig *Sig) error { + return nil +} + +func parseRSAKey(t *testing.T, rsaText string) *rsa.PrivateKey { + rsab, _ := pem.Decode([]byte(rsaText)) + if rsab == nil { + t.Fatalf("no pem in privKey") + } + + key, err := x509.ParsePKCS8PrivateKey(rsab.Bytes) + if err != nil { + t.Fatalf("parsing private key: %s", err) + } + return key.(*rsa.PrivateKey) +} + +func getRSAKey(t *testing.T) *rsa.PrivateKey { + // Generated with: + // openssl genrsa -out pkcs1.pem 2048 + // openssl pkcs8 -topk8 -inform pem -in pkcs1.pem -outform pem -nocrypt -out pkcs8.pem + const rsaText = `-----BEGIN PRIVATE KEY----- +MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQCu7iTF/AAvJQ3U +WRlcXd+n6HXOSYvmDlqjLsuCKn6/T+Ma0ZtobCRfzyXh5pFQBCHffW6fpEzJs/2o ++e896zb1QKjD8Xxsjarjdw1iXzgMj/lhDGWyNyUHC34+k77UfpQBZgPLvZHyYyQG +sVMzzmvURE+GMFmXYUiGI581PdCx4bNba/4gYQnc/eqQ8oX0T//2RdRqdhdDM2d7 +CYALtkxKetH1F+Rz7XDjFmI3GjPs1KwVdh+Cl8kejThi0SVxXpqnoqB2WGsr/lGG +GxsxcpLb/+KWFjI0go3OJjMaxFCmhB0pGdW8I7kNwNrZsCdSvmjMDojNuegx6WMg +/T7go3CvAgMBAAECggEAQA3AlmSDtr+lNDvZ7voKwwN6W6qPmRJpevZQG54u4iPA +/5mAA/kRSqnh77mLPRb+RkU6RCeX3IXVXNIEGhKugZiHE5Sx4FfxmrAFzR8buXHg +uXoeJOdPXiiFtilIh6u/y1FNE4YbUnud/fthgYdU8Zl/2x2KOMWtFj0l94tmhzOI +b2y8/U8r85anI5XGYuzRCqKS1WskXhkXH8LZUB+9yAxX7V5ysgxjofM4FW8ns7yj +K4cBS8KY2v3t7TZ4FgwkAhPcTfBc/E2UWT1Ztmr+18LFV5bqI8g2YlN+BgCxU7U/ +1tawxqFhs+xowEpzNwAvjAIPpptIRiY1rz7sBB9g5QKBgQDLo/5rTUwNOPR9dYvA ++DYUSCfxvNamI4GI66AgwOeN8O+W+dRDF/Ewbk/SJsBPSLIYzEiQ2uYKcNEmIjo+ +7WwSCJZjKujovw77s9JAHexhpd8uLD2w9l3KeTg41LEYm2uVwoXWEHYSYJ9Ynz0M +PWxvi2Hm0IoQ7gJIfxng/wIw3QKBgQDb6GFvPH/OTs40+dopwtm3irmkBAmT8N0b +3TpehONCOiL4GPxmn2DN6ELhHFV27Jj/1CfpGVbcBlaS1xYUGUGsB9gYukhdaBST +KGHRoeZDcf0gaQLKG15EEfFOvcKI9aGljV8FdFfG+Z4fW3LA8khvpvjLLkv1A1jM +MrEBthco+wKBgD45EM9GohtUMNh450gCT7voxFPICKphJP5qSNZZOyeS3BJ8qdAK +a8cJndgvwQk4xDpxiSbBzBKaoD2Prc52i1QDTbhlbx9W6cQdEPxIaGb54PThzcPZ +s5Tfbz9mNeq36qqq8mwTQZCh926D0YqA5jY7F6IITHeZ0hbGx2iJYuj9AoGARIyK +ms8kE95y3wanX+8ySMmAlsT/a1NgyUfL4xzPbpyKvAWl4CN8XJMzDdL0PS8BfnXW +vw28CrgbEojjg/5ff02uqf6fgiZoi3rCC0PJcGq++fRh/zhKyTNCokX6txDCg8Wu +wheDKS40gRfTjJu5wrwsv8E9wjF546VFkf/99jMCgYEAm/x+kEfWKuzx8pQT66TY +pxnC41upJOO1htTHNIN24J7XrrFI5+OZq90G+t/VgWX08Z8RlhejX+ukBf+SRu3u +5VMGcAs4px+iECX/FHo21YQFnrmArN1zdFxPU3rBWoBueqmGO6FT0HBbKzTuS7N0 +7fIv3GQqImz3+ZbYWlXfkPI= +-----END PRIVATE KEY-----` + return parseRSAKey(t, rsaText) +} + +func getWeakRSAKey(t *testing.T) *rsa.PrivateKey { + const rsaText = `-----BEGIN PRIVATE KEY----- +MIIBUwIBADANBgkqhkiG9w0BAQEFAASCAT0wggE5AgEAAkEAsQo3ATJAZ4aAZz+l +ndXl27ODOY+49DjYxwhgtg+OU8A1WEYCfWaZ7ozYtpsqH8GNFvlKtK38eKbdDuLw +gsFYMQIDAQABAkBwstb2/P1Aqb9deoe8JOiw5eJYJySO2w0sDio6W0a4Cqi7XQ7r +/yZ1gOp+ZnShX/sJq0Pd16UkJUUEtEPoZyptAiEA4KLP8pz/9R0t7Envqph1oVjQ +CVDIL/UKRmdnMiwwDosCIQDJwiu08UgNNeliAygbkC2cdszjf4a3laGmYbfWrtAn +swIgUBfc+w0degDgadpm2LWpY1DuRBQIfIjrE/U0Z0A4FkcCIHxEuoLycjygziTu +aM/BWDac/cnKDIIbCbvfSEpU1iT9AiBsbkAcYCQ8mR77BX6gZKEc74nSce29gmR7 +mtrKWknTDQ== +-----END PRIVATE KEY-----` + return parseRSAKey(t, rsaText) +} + +func TestParseSignature(t *testing.T) { + // Domain name must always be A-labels, not U-labels. We do allow localpart with non-ascii. + hdr := `DKIM-Signature: v=1; a=rsa-sha256; d=xn--h-bga.mox.example; s=xn--yr2021-pua; + i=møx@xn--h-bga.mox.example; t=1643719203; h=From:To:Cc:Bcc:Reply-To: + References:In-Reply-To:Subject:Date:Message-ID:Content-Type:From:To:Subject: + Date:Message-ID:Content-Type; + bh=g3zLYH4xKxcPrHOD18z9YfpQcnk/GaJedfustWU5uGs=; b=dtgAOl71h/dNPQrmZTi3SBVkm+ + EjMnF7sWGT123fa5g+m6nGpPue+I+067wwtkWQhsedbDkqT7gZb5WaG5baZsr9e/XpJ/iX4g6YXpr + 07aLY8eF9jazcGcRCVCqLtyq0UJQ2Oz/ML74aYu1beh3jXsoI+k3fJ+0/gKSVC7enCFpNe1HhbXVS + 4HRy/Rw261OEIy2e20lyPT4iDk2oODabzYa28HnXIciIMELjbc/sSawG68SAnhwdkWBrRzBDMCCHm + wvkmgDsVJWtdzjJqjxK2mYVxBMJT0lvsutXgYQ+rr6BLtjHsOb8GMSbQGzY5SJ3N8TP02pw5OykBu + B/aHff1A== +` + smtputf8 := true + _, _, err := parseSignature([]byte(strings.ReplaceAll(hdr, "\n", "\r\n")), smtputf8) + if err != nil { + t.Fatalf("parsing signature: %s", err) + } +} + +func TestVerifyRSA(t *testing.T) { + message := strings.ReplaceAll(`Return-Path: +X-Original-To: mechiel@ueber.net +Delivered-To: mechiel@ueber.net +Received: from [IPV6:2a02:a210:4a3:b80:ca31:30ee:74a7:56e0] (unknown [IPv6:2a02:a210:4a3:b80:ca31:30ee:74a7:56e0]) + by koriander.ueber.net (Postfix) with ESMTPSA id E119EDEB0B + for ; Fri, 10 Dec 2021 20:09:08 +0100 (CET) +DKIM-Signature: v=1; a=rsa-sha256; c=simple/simple; d=ueber.net; + s=koriander; t=1639163348; + bh=g3zLYH4xKxcPrHOD18z9YfpQcnk/GaJedfustWU5uGs=; + h=Date:To:From:Subject:From; + b=rpWruWprs2TB7/MnulA2n2WtfUIfrrnAvRoSrip1ruX5ORN4AOYPPMmk/gGBDdc6O + grRpSsNzR9BrWcooYfbNfSbl04nPKMp0acsZGfpvkj0+mqk5b8lqZs3vncG1fHlQc7 + 0KXfnAHyEs7bjyKGbrw2XG1p/EDoBjIjUsdpdCAtamMGv3A3irof81oSqvwvi2KQks + 17aB1YAL9Xzkq9ipo1aWvDf2W6h6qH94YyNocyZSVJ+SlVm3InNaF8APkV85wOm19U + 9OW81eeuQbvSPcQZJVOmrWzp7XKHaXH0MYE3+hdH/2VtpCnPbh5Zj9SaIgVbaN6NPG + Ua0E07rwC86sg== +Message-ID: <427999f6-114f-e59c-631e-ab2a5f6bfe4c@ueber.net> +Date: Fri, 10 Dec 2021 20:09:08 +0100 +MIME-Version: 1.0 +User-Agent: Mozilla/5.0 (X11; Linux x86_64; rv:91.0) Gecko/20100101 + Thunderbird/91.4.0 +Content-Language: nl +To: mechiel@ueber.net +From: Mechiel Lukkien +Subject: test +Content-Type: text/plain; charset=UTF-8; format=flowed +Content-Transfer-Encoding: 7bit + +test +`, "\n", "\r\n") + + resolver := dns.MockResolver{ + TXT: map[string][]string{ + "koriander._domainkey.ueber.net.": {"v=DKIM1; k=rsa; s=email; p=MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAy3Z9ffZe8gUTJrdGuKj6IwEembmKYpp0jMa8uhudErcI4gFVUaFiiRWxc4jP/XR9NAEv3XwHm+CVcHu+L/n6VWt6g59U7vHXQicMfKGmEp2VplsgojNy/Y5X9HdVYM0azsI47NcJCDW9UVfeOHdOSgFME4F8dNtUKC4KTB2d1pqj/yixz+V8Sv8xkEyPfSRHcNXIw0LvelqJ1MRfN3hO/3uQSVrPYYk4SyV0b6wfnkQs28fpiIpGQvzlGI5WkrdOQT5k4YHaEvZDLNdwiMeVZOEL7dDoFs2mQsovm+tH0StUAZTnr61NLVFfD5V6Ip1V9zVtspPHvYSuOWwyArFZ9QIDAQAB"}, + }, + } + + results, err := Verify(context.Background(), resolver, false, policyOK, strings.NewReader(message), false) + if err != nil { + t.Fatalf("dkim verify: %v", err) + } + if len(results) != 1 || results[0].Status != StatusPass { + t.Fatalf("verify: unexpected results %v", results) + } +} + +func TestVerifyEd25519(t *testing.T) { + // ../rfc/8463:287 + message := strings.ReplaceAll(`DKIM-Signature: v=1; a=ed25519-sha256; c=relaxed/relaxed; + d=football.example.com; i=@football.example.com; + q=dns/txt; s=brisbane; t=1528637909; h=from : to : + subject : date : message-id : from : subject : date; + bh=2jUSOH9NhtVGCQWNr9BrIAPreKQjO6Sn7XIkfJVOzv8=; + b=/gCrinpcQOoIfuHNQIbq4pgh9kyIK3AQUdt9OdqQehSwhEIug4D11Bus + Fa3bT3FY5OsU7ZbnKELq+eXdp1Q1Dw== +DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; + d=football.example.com; i=@football.example.com; + q=dns/txt; s=test; t=1528637909; h=from : to : subject : + date : message-id : from : subject : date; + bh=2jUSOH9NhtVGCQWNr9BrIAPreKQjO6Sn7XIkfJVOzv8=; + b=F45dVWDfMbQDGHJFlXUNB2HKfbCeLRyhDXgFpEL8GwpsRe0IeIixNTe3 + DhCVlUrSjV4BwcVcOF6+FF3Zo9Rpo1tFOeS9mPYQTnGdaSGsgeefOsk2Jz + dA+L10TeYt9BgDfQNZtKdN1WO//KgIqXP7OdEFE4LjFYNcUxZQ4FADY+8= +From: Joe SixPack +To: Suzie Q +Subject: Is dinner ready? +Date: Fri, 11 Jul 2003 21:00:37 -0700 (PDT) +Message-ID: <20030712040037.46341.5F8J@football.example.com> + +Hi. + +We lost the game. Are you hungry yet? + +Joe. + +`, "\n", "\r\n") + + resolver := dns.MockResolver{ + TXT: map[string][]string{ + "brisbane._domainkey.football.example.com.": {"v=DKIM1; k=ed25519; p=11qYAYKxCrfVS/7TyWQHOg7hcvPapiMlrwIaaPcHURo="}, + "test._domainkey.football.example.com.": {"v=DKIM1; k=rsa; p=MIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQDkHlOQoBTzWRiGs5V6NpP3idY6Wk08a5qhdR6wy5bdOKb2jLQiY/J16JYi0Qvx/byYzCNb3W91y3FutACDfzwQ/BC/e/8uBsCR+yz1Lxj+PL6lHvqMKrM3rG4hstT5QjvHO9PzoxZyVYLzBfO2EeC3Ip3G+2kryOTIKT+l/K4w3QIDAQAB"}, + }, + } + + results, err := Verify(context.Background(), resolver, false, policyOK, strings.NewReader(message), false) + if err != nil { + t.Fatalf("dkim verify: %v", err) + } + if len(results) != 2 || results[0].Status != StatusPass || results[1].Status != StatusPass { + t.Fatalf("verify: unexpected results %#v", results) + } +} + +func TestSign(t *testing.T) { + message := strings.ReplaceAll(`Message-ID: <427999f6-114f-e59c-631e-ab2a5f6bfe4c@ueber.net> +Date: Fri, 10 Dec 2021 20:09:08 +0100 +MIME-Version: 1.0 +User-Agent: Mozilla/5.0 (X11; Linux x86_64; rv:91.0) Gecko/20100101 + Thunderbird/91.4.0 +Content-Language: nl +To: mechiel@ueber.net +From: Mechiel Lukkien +Subject: test + test +Content-Type: text/plain; charset=UTF-8; format=flowed +Content-Transfer-Encoding: 7bit + +test +`, "\n", "\r\n") + + rsaKey := getRSAKey(t) + ed25519Key := ed25519.NewKeyFromSeed(make([]byte, 32)) + + selrsa := config.Selector{ + HashEffective: "sha256", + Key: rsaKey, + HeadersEffective: strings.Split("From,To,Cc,Bcc,Reply-To,References,In-Reply-To,Subject,Date,Message-ID,Content-Type", ","), + Domain: dns.Domain{ASCII: "testrsa"}, + } + + // Now with sha1 and relaxed canonicalization. + selrsa2 := config.Selector{ + HashEffective: "sha1", + Key: rsaKey, + HeadersEffective: strings.Split("From,To,Cc,Bcc,Reply-To,References,In-Reply-To,Subject,Date,Message-ID,Content-Type", ","), + Domain: dns.Domain{ASCII: "testrsa2"}, + } + selrsa2.Canonicalization.HeaderRelaxed = true + selrsa2.Canonicalization.BodyRelaxed = true + + // Ed25519 key. + seled25519 := config.Selector{ + HashEffective: "sha256", + Key: ed25519Key, + HeadersEffective: strings.Split("From,To,Cc,Bcc,Reply-To,References,In-Reply-To,Subject,Date,Message-ID,Content-Type", ","), + Domain: dns.Domain{ASCII: "tested25519"}, + } + // Again ed25519, but without sealing headers. Use sha256 again, for reusing the body hash from the previous dkim-signature. + seled25519b := config.Selector{ + HashEffective: "sha256", + Key: ed25519Key, + HeadersEffective: strings.Split("From,To,Cc,Bcc,Reply-To,Subject,Date", ","), + DontSealHeaders: true, + Domain: dns.Domain{ASCII: "tested25519b"}, + } + dkimConf := config.DKIM{ + Selectors: map[string]config.Selector{ + "testrsa": selrsa, + "testrsa2": selrsa2, + "tested25519": seled25519, + "tested25519b": seled25519b, + }, + Sign: []string{"testrsa", "testrsa2", "tested25519", "tested25519b"}, + } + + ctx := context.Background() + headers, err := Sign(ctx, "mjl", dns.Domain{ASCII: "mox.example"}, dkimConf, false, strings.NewReader(message)) + if err != nil { + t.Fatalf("sign: %v", err) + } + + makeRecord := func(k string, publicKey any) string { + tr := &Record{ + Version: "DKIM1", + Key: k, + PublicKey: publicKey, + Flags: []string{"s"}, + } + txt, err := tr.Record() + if err != nil { + t.Fatalf("making dns txt record: %s", err) + } + //log.Infof("txt record: %s", txt) + return txt + } + + resolver := dns.MockResolver{ + TXT: map[string][]string{ + "testrsa._domainkey.mox.example.": {makeRecord("rsa", rsaKey.Public())}, + "testrsa2._domainkey.mox.example.": {makeRecord("rsa", rsaKey.Public())}, + "tested25519._domainkey.mox.example.": {makeRecord("ed25519", ed25519Key.Public())}, + "tested25519b._domainkey.mox.example.": {makeRecord("ed25519", ed25519Key.Public())}, + }, + } + + nmsg := headers + message + + results, err := Verify(ctx, resolver, false, policyOK, strings.NewReader(nmsg), false) + if err != nil { + t.Fatalf("verify: %s", err) + } + if len(results) != 4 || results[0].Status != StatusPass || results[1].Status != StatusPass || results[2].Status != StatusPass || results[3].Status != StatusPass { + t.Fatalf("verify: unexpected results %v\nheaders:\n%s", results, headers) + } + //log.Infof("headers:%s", headers) + //log.Infof("nmsg\n%s", nmsg) + + // Multiple From headers. + _, err = Sign(ctx, "mjl", dns.Domain{ASCII: "mox.example"}, dkimConf, false, strings.NewReader("From: \r\nFrom: \r\n\r\ntest")) + if !errors.Is(err, ErrFrom) { + t.Fatalf("sign, got err %v, expected ErrFrom", err) + } + + // No From header. + _, err = Sign(ctx, "mjl", dns.Domain{ASCII: "mox.example"}, dkimConf, false, strings.NewReader("Brom: \r\n\r\ntest")) + if !errors.Is(err, ErrFrom) { + t.Fatalf("sign, got err %v, expected ErrFrom", err) + } + + // Malformed headers. + _, err = Sign(ctx, "mjl", dns.Domain{ASCII: "mox.example"}, dkimConf, false, strings.NewReader(":\r\n\r\ntest")) + if !errors.Is(err, ErrHeaderMalformed) { + t.Fatalf("sign, got err %v, expected ErrHeaderMalformed", err) + } + _, err = Sign(ctx, "mjl", dns.Domain{ASCII: "mox.example"}, dkimConf, false, strings.NewReader(" From:\r\n\r\ntest")) + if !errors.Is(err, ErrHeaderMalformed) { + t.Fatalf("sign, got err %v, expected ErrHeaderMalformed", err) + } + _, err = Sign(ctx, "mjl", dns.Domain{ASCII: "mox.example"}, dkimConf, false, strings.NewReader("Frøm:\r\n\r\ntest")) + if !errors.Is(err, ErrHeaderMalformed) { + t.Fatalf("sign, got err %v, expected ErrHeaderMalformed", err) + } + _, err = Sign(ctx, "mjl", dns.Domain{ASCII: "mox.example"}, dkimConf, false, strings.NewReader("From:")) + if !errors.Is(err, ErrHeaderMalformed) { + t.Fatalf("sign, got err %v, expected ErrHeaderMalformed", err) + } +} + +func TestVerify(t *testing.T) { + // We do many Verify calls, each time starting out with a valid configuration, then + // we modify one thing to trigger an error, which we check for. + + const message = `From: +To: +Subject: test +Date: Fri, 10 Dec 2021 20:09:08 +0100 +Message-ID: +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8; format=flowed +Content-Transfer-Encoding: 7bit + +test +` + + key := ed25519.NewKeyFromSeed(make([]byte, 32)) + var resolver dns.MockResolver + var record *Record + var recordTxt string + var msg string + var sel config.Selector + var dkimConf config.DKIM + var policy func(*Sig) error + var signed bool + var signDomain dns.Domain + + prepare := func() { + t.Helper() + + policy = DefaultPolicy + signDomain = dns.Domain{ASCII: "mox.example"} + + record = &Record{ + Version: "DKIM1", + Key: "ed25519", + PublicKey: key.Public(), + Flags: []string{"s"}, + } + + txt, err := record.Record() + if err != nil { + t.Fatalf("making dns txt record: %s", err) + } + recordTxt = txt + + resolver = dns.MockResolver{ + TXT: map[string][]string{ + "test._domainkey.mox.example.": {txt}, + }, + } + + sel = config.Selector{ + HashEffective: "sha256", + Key: key, + HeadersEffective: strings.Split("From,To,Cc,Bcc,Reply-To,References,In-Reply-To,Subject,Date,Message-ID,Content-Type", ","), + Domain: dns.Domain{ASCII: "test"}, + } + dkimConf = config.DKIM{ + Selectors: map[string]config.Selector{ + "test": sel, + }, + Sign: []string{"test"}, + } + + msg = message + signed = false + } + + sign := func() { + t.Helper() + + msg = strings.ReplaceAll(msg, "\n", "\r\n") + + headers, err := Sign(context.Background(), "mjl", signDomain, dkimConf, false, strings.NewReader(msg)) + if err != nil { + t.Fatalf("sign: %v", err) + } + msg = headers + msg + signed = true + } + + test := func(expErr error, expStatus Status, expResultErr error, mod func()) { + t.Helper() + + prepare() + mod() + if !signed { + sign() + } + + results, err := Verify(context.Background(), resolver, true, policy, strings.NewReader(msg), false) + if (err == nil) != (expErr == nil) || err != nil && !errors.Is(err, expErr) { + t.Fatalf("got verify error %v, expected %v", err, expErr) + } + if expStatus != "" && (len(results) == 0 || results[0].Status != expStatus) { + var status Status + if len(results) > 0 { + status = results[0].Status + } + t.Fatalf("got status %q, expected %q", status, expStatus) + } + var resultErr error + if len(results) > 0 { + resultErr = results[0].Err + } + if (resultErr == nil) != (expResultErr == nil) || resultErr != nil && !errors.Is(resultErr, expResultErr) { + t.Fatalf("got result error %v, expected %v", resultErr, expResultErr) + } + } + + test(nil, StatusPass, nil, func() {}) + + // Cannot parse message, so not much more to do. + test(ErrHeaderMalformed, "", nil, func() { + sign() + msg = ":\r\n\r\n" // Empty header key. + }) + + // From Lookup. + // No DKIM record. ../rfc/6376:2608 + test(nil, StatusPermerror, ErrNoRecord, func() { + resolver.TXT = nil + }) + // DNS request is failing temporarily. + test(nil, StatusTemperror, ErrDNS, func() { + resolver.Fail = map[dns.Mockreq]struct{}{ + {Type: "txt", Name: "test._domainkey.mox.example."}: {}, + } + }) + // Claims to be DKIM through v=, but cannot be parsed. ../rfc/6376:2621 + test(nil, StatusPermerror, ErrSyntax, func() { + resolver.TXT = map[string][]string{ + "test._domainkey.mox.example.": {"v=DKIM1; bogus"}, + } + }) + // Not a DKIM record. ../rfc/6376:2621 + test(nil, StatusTemperror, ErrSyntax, func() { + resolver.TXT = map[string][]string{ + "test._domainkey.mox.example.": {"bogus"}, + } + }) + // Multiple dkim records. ../rfc/6376:1609 + test(nil, StatusTemperror, ErrMultipleRecords, func() { + resolver.TXT["test._domainkey.mox.example."] = []string{recordTxt, recordTxt} + }) + + // Invalid DKIM-Signature header. ../rfc/6376:2503 + test(nil, StatusPermerror, errSigMissingTag, func() { + msg = strings.ReplaceAll("DKIM-Signature: v=1\n"+msg, "\n", "\r\n") + signed = true + }) + + // Signature has valid syntax, but parameters aren't acceptable. + // "From" not signed. ../rfc/6376:2546 + test(nil, StatusPermerror, ErrFrom, func() { + sign() + // Remove "from" from signed headers (h=). + msg = strings.ReplaceAll(msg, ":From:", ":") + msg = strings.ReplaceAll(msg, "=From:", "=") + }) + // todo: check expired signatures with StatusPermerror and ErrSigExpired. ../rfc/6376:2550 + // Domain in signature is higher-level than organizational domain. ../rfc/6376:2554 + test(nil, StatusPermerror, ErrTLD, func() { + // Pretend to sign as .com + msg = strings.ReplaceAll(msg, "From: \n", "From: \n") + signDomain = dns.Domain{ASCII: "com"} + resolver.TXT = map[string][]string{ + "test._domainkey.com.": {recordTxt}, + } + }) + // Unknown hash algorithm. + test(nil, StatusPermerror, ErrHashAlgorithmUnknown, func() { + sign() + msg = strings.ReplaceAll(msg, "sha256", "sha257") + }) + // Unknown canonicalization. + test(nil, StatusPermerror, ErrCanonicalizationUnknown, func() { + sel.Canonicalization.HeaderRelaxed = true + sel.Canonicalization.BodyRelaxed = true + dkimConf.Selectors = map[string]config.Selector{ + "test": sel, + } + + sign() + msg = strings.ReplaceAll(msg, "relaxed/relaxed", "bogus/bogus") + }) + // Query methods without dns/txt. ../rfc/6376:1268 + test(nil, StatusPermerror, ErrQueryMethod, func() { + sign() + msg = strings.ReplaceAll(msg, "DKIM-Signature: ", "DKIM-Signature: q=other;") + }) + + // Unacceptable through policy. ../rfc/6376:2560 + test(nil, StatusPolicy, ErrPolicy, func() { + sign() + msg = strings.ReplaceAll(msg, "DKIM-Signature: ", "DKIM-Signature: l=1;") + }) + // Hash algorithm not allowed by DNS record. ../rfc/6376:2639 + test(nil, StatusPermerror, ErrHashAlgNotAllowed, func() { + recordTxt += ";h=sha1" + resolver.TXT = map[string][]string{ + "test._domainkey.mox.example.": {recordTxt}, + } + }) + // Signature algorithm mismatch. ../rfc/6376:2651 + test(nil, StatusPermerror, ErrSigAlgMismatch, func() { + record.PublicKey = getRSAKey(t).Public() + record.Key = "rsa" + txt, err := record.Record() + if err != nil { + t.Fatalf("making dns txt record: %s", err) + } + resolver.TXT = map[string][]string{ + "test._domainkey.mox.example.": {txt}, + } + }) + // Empty public key means revoked key. ../rfc/6376:2645 + test(nil, StatusPermerror, ErrKeyRevoked, func() { + record.PublicKey = nil + txt, err := record.Record() + if err != nil { + t.Fatalf("making dns txt record: %s", err) + } + resolver.TXT = map[string][]string{ + "test._domainkey.mox.example.": {txt}, + } + }) + // We refuse rsa keys smaller than 1024 bits. + test(nil, StatusPermerror, ErrWeakKey, func() { + key := getWeakRSAKey(t) + record.Key = "rsa" + record.PublicKey = key.Public() + txt, err := record.Record() + if err != nil { + t.Fatalf("making dns txt record: %s", err) + } + resolver.TXT = map[string][]string{ + "test._domainkey.mox.example.": {txt}, + } + sel.Key = key + dkimConf.Selectors = map[string]config.Selector{ + "test": sel, + } + }) + // Key not allowed for email by DNS record. ../rfc/6376:1541 + test(nil, StatusPermerror, ErrKeyNotForEmail, func() { + recordTxt += ";s=other" + resolver.TXT = map[string][]string{ + "test._domainkey.mox.example.": {recordTxt}, + } + }) + // todo: Record has flag "s" but identity does not have exact domain match. Cannot currently easily implement this test because Sign() always uses the same domain. ../rfc/6376:1575 + // Wrong signature, different datahash, and thus signature. + test(nil, StatusFail, ErrSigVerify, func() { + sign() + msg = strings.ReplaceAll(msg, "Subject: test\r\n", "Subject: modified header\r\n") + }) + // Signature is correct for bodyhash, but the body has changed. + test(nil, StatusFail, ErrBodyhashMismatch, func() { + sign() + msg = strings.ReplaceAll(msg, "\r\ntest\r\n", "\r\nmodified body\r\n") + }) + + // Check that last-occurring header field is used. + test(nil, StatusFail, ErrSigVerify, func() { + sel.DontSealHeaders = true + dkimConf.Selectors = map[string]config.Selector{ + "test": sel, + } + sign() + msg = strings.ReplaceAll(msg, "\r\n\r\n", "\r\nsubject: another\r\n\r\n") + }) + test(nil, StatusPass, nil, func() { + sel.DontSealHeaders = true + dkimConf.Selectors = map[string]config.Selector{ + "test": sel, + } + sign() + msg = "subject: another\r\n" + msg + }) +} + +func TestBodyHash(t *testing.T) { + simpleGot, err := bodyHash(crypto.SHA256.New(), true, bufio.NewReader(strings.NewReader(""))) + if err != nil { + t.Fatalf("body hash, simple, empty string: %s", err) + } + simpleWant := base64Decode("frcCV1k9oG9oKj3dpUqdJg1PxRT2RSN/XKdLCPjaYaY=") + if !bytes.Equal(simpleGot, simpleWant) { + t.Fatalf("simple body hash for empty string, got %s, expected %s", base64Encode(simpleGot), base64Encode(simpleWant)) + } + + relaxedGot, err := bodyHash(crypto.SHA256.New(), false, bufio.NewReader(strings.NewReader(""))) + if err != nil { + t.Fatalf("body hash, relaxed, empty string: %s", err) + } + relaxedWant := base64Decode("47DEQpj8HBSa+/TImW+5JCeuQeRkm5NMpJWZG3hSuFU=") + if !bytes.Equal(relaxedGot, relaxedWant) { + t.Fatalf("relaxed body hash for empty string, got %s, expected %s", base64Encode(relaxedGot), base64Encode(relaxedWant)) + } + + compare := func(a, b []byte) { + t.Helper() + if !bytes.Equal(a, b) { + t.Fatalf("hash not equal") + } + } + + // NOTE: the trailing space in the strings below are part of the test for canonicalization. + + // ../rfc/6376:936 + exampleIn := strings.ReplaceAll(` c +d e + + +`, "\n", "\r\n") + relaxedOut := strings.ReplaceAll(` c +d e +`, "\n", "\r\n") + relaxedBh, err := bodyHash(crypto.SHA256.New(), false, bufio.NewReader(strings.NewReader(exampleIn))) + if err != nil { + t.Fatalf("bodyhash: %s", err) + } + relaxedOutHash := sha256.Sum256([]byte(relaxedOut)) + compare(relaxedBh, relaxedOutHash[:]) + + simpleOut := strings.ReplaceAll(` c +d e +`, "\n", "\r\n") + simpleBh, err := bodyHash(crypto.SHA256.New(), true, bufio.NewReader(strings.NewReader(exampleIn))) + if err != nil { + t.Fatalf("bodyhash: %s", err) + } + simpleOutHash := sha256.Sum256([]byte(simpleOut)) + compare(simpleBh, simpleOutHash[:]) + + // ../rfc/8463:343 + relaxedBody := strings.ReplaceAll(`Hi. + +We lost the game. Are you hungry yet? + +Joe. + +`, "\n", "\r\n") + relaxedGot, err = bodyHash(crypto.SHA256.New(), false, bufio.NewReader(strings.NewReader(relaxedBody))) + if err != nil { + t.Fatalf("body hash, relaxed, ed25519 example: %s", err) + } + relaxedWant = base64Decode("2jUSOH9NhtVGCQWNr9BrIAPreKQjO6Sn7XIkfJVOzv8=") + if !bytes.Equal(relaxedGot, relaxedWant) { + t.Fatalf("relaxed body hash for ed25519 example, got %s, expected %s", base64Encode(relaxedGot), base64Encode(relaxedWant)) + } +} + +func base64Decode(s string) []byte { + buf, err := base64.StdEncoding.DecodeString(s) + if err != nil { + panic(err) + } + return buf +} + +func base64Encode(buf []byte) string { + return base64.StdEncoding.EncodeToString(buf) +} diff --git a/dkim/fuzz_test.go b/dkim/fuzz_test.go new file mode 100644 index 0000000..42e0776 --- /dev/null +++ b/dkim/fuzz_test.go @@ -0,0 +1,25 @@ +package dkim + +import ( + "testing" +) + +func FuzzParseSignature(f *testing.F) { + f.Add([]byte("")) + f.Fuzz(func(t *testing.T, buf []byte) { + parseSignature(buf, false) + }) +} + +func FuzzParseRecord(f *testing.F) { + f.Add("") + f.Add("v=DKIM1; p=bad") + f.Fuzz(func(t *testing.T, s string) { + r, _, err := ParseRecord(s) + if err == nil { + if _, err := r.Record(); err != nil { + t.Errorf("r.Record() for parsed record %s, %#v: %s", s, r, err) + } + } + }) +} diff --git a/dkim/parser.go b/dkim/parser.go new file mode 100644 index 0000000..bc8a334 --- /dev/null +++ b/dkim/parser.go @@ -0,0 +1,474 @@ +package dkim + +import ( + "encoding/base64" + "fmt" + "strconv" + "strings" + + "github.com/mjl-/mox/dns" + "github.com/mjl-/mox/smtp" +) + +type parseErr string + +func (e parseErr) Error() string { + return string(e) +} + +var _ error = parseErr("") + +type parser struct { + s string + o int // Offset into s. + tracked string // All data consumed, except when "drop" is true. To be set by caller when parsing the value for "b=". + drop bool + smtputf8 bool // If set, allow characters > 0x7f. +} + +func (p *parser) xerrorf(format string, args ...any) { + msg := fmt.Sprintf(format, args...) + if p.o < len(p.s) { + msg = fmt.Sprintf("%s (leftover %q)", msg, p.s[p.o:]) + } + panic(parseErr(msg)) +} + +func (p *parser) track(s string) { + if !p.drop { + p.tracked += s + } +} + +func (p *parser) hasPrefix(s string) bool { + return strings.HasPrefix(p.s[p.o:], s) +} + +func (p *parser) xtaken(n int) string { + r := p.s[p.o : p.o+n] + p.o += n + p.track(r) + return r +} + +func (p *parser) xtakefn(fn func(c rune, i int) bool) string { + for i, c := range p.s[p.o:] { + if !fn(c, i) { + return p.xtaken(i) + } + } + return p.xtaken(len(p.s) - p.o) +} + +func (p *parser) empty() bool { + return p.o >= len(p.s) +} + +func (p *parser) xnonempty() { + if p.o >= len(p.s) { + p.xerrorf("expected at least 1 more char") + } +} + +func (p *parser) xtakefn1(fn func(c rune, i int) bool) string { + p.xnonempty() + for i, c := range p.s[p.o:] { + if !fn(c, i) { + if i == 0 { + p.xerrorf("expected at least 1 char") + } + return p.xtaken(i) + } + } + return p.xtaken(len(p.s) - p.o) +} + +func (p *parser) wsp() { + p.xtakefn(func(c rune, i int) bool { + return c == ' ' || c == '\t' + }) +} + +func (p *parser) fws() { + p.wsp() + if p.hasPrefix("\r\n ") || p.hasPrefix("\r\n\t") { + p.xtaken(3) + p.wsp() + } +} + +// peekfws returns whether remaining text starts with s, optionally prefix with fws. +func (p *parser) peekfws(s string) bool { + o := p.o + p.fws() + r := p.hasPrefix(s) + p.o = o + return r +} + +func (p *parser) xtake(s string) string { + if !strings.HasPrefix(p.s[p.o:], s) { + p.xerrorf("expected %q", s) + } + return p.xtaken(len(s)) +} + +func (p *parser) take(s string) bool { + if strings.HasPrefix(p.s[p.o:], s) { + p.o += len(s) + p.track(s) + return true + } + return false +} + +// ../rfc/6376:657 +func (p *parser) xtagName() string { + return p.xtakefn1(func(c rune, i int) bool { + return isalpha(c) || i > 0 && (isdigit(c) || c == '_') + }) +} + +func (p *parser) xalgorithm() (string, string) { + // ../rfc/6376:1046 + xtagx := func(c rune, i int) bool { + return isalpha(c) || i > 0 && isdigit(c) + } + algk := p.xtakefn1(xtagx) + p.xtake("-") + algv := p.xtakefn1(xtagx) + return algk, algv +} + +// fws in value is ignored. empty/no base64 characters is valid. +// ../rfc/6376:1021 +// ../rfc/6376:1076 +func (p *parser) xbase64() []byte { + s := "" + p.xtakefn(func(c rune, i int) bool { + if isalphadigit(c) || c == '+' || c == '/' || c == '=' { + s += string(c) + return true + } + if c == ' ' || c == '\t' { + return true + } + rem := p.s[p.o+i:] + if strings.HasPrefix(rem, "\r\n ") || strings.HasPrefix(rem, "\r\n\t") { + return true + } + if (strings.HasPrefix(rem, "\n ") || strings.HasPrefix(rem, "\n\t")) && p.o+i-1 > 0 && p.s[p.o+i-1] == '\r' { + return true + } + return false + }) + buf, err := base64.StdEncoding.DecodeString(s) + if err != nil { + p.xerrorf("decoding base64: %v", err) + } + return buf +} + +// parses canonicalization in original case. +func (p *parser) xcanonical() string { + // ../rfc/6376:1100 + s := p.xhyphenatedWord() + if p.take("/") { + return s + "/" + p.xhyphenatedWord() + } + return s +} + +func (p *parser) xdomain() dns.Domain { + subdomain := func(c rune, i int) bool { + // domain names must always be a-labels, ../rfc/6376:1115 ../rfc/6376:1187 ../rfc/6376:1303 + // todo: add a "lax" mode where underscore is allowed if this is a selector? seen in the wild, but invalid: ../rfc/6376:581 ../rfc/5321:2303 + return isalphadigit(c) || (i > 0 && c == '-' && p.o+1 < len(p.s)) + } + s := p.xtakefn1(subdomain) + for p.hasPrefix(".") { + s += p.xtake(".") + p.xtakefn1(subdomain) + } + d, err := dns.ParseDomain(s) + if err != nil { + p.xerrorf("parsing domain %q: %s", s, err) + } + return d +} + +func (p *parser) xhdrName() string { + // ../rfc/6376:473 + // ../rfc/5322:1689 + // BNF for hdr-name (field-name) allows ";", but DKIM disallows unencoded semicolons. ../rfc/6376:643 + return p.xtakefn1(func(c rune, i int) bool { + return c > ' ' && c < 0x7f && c != ':' && c != ';' + }) +} + +func (p *parser) xsignedHeaderFields() []string { + // ../rfc/6376:1157 + l := []string{p.xhdrName()} + for p.peekfws(":") { + p.fws() + p.xtake(":") + p.fws() + l = append(l, p.xhdrName()) + } + return l +} + +func (p *parser) xauid() Identity { + // ../rfc/6376:1192 + // Localpart is optional. + if p.take("@") { + return Identity{Domain: p.xdomain()} + } + lp := p.xlocalpart() + p.xtake("@") + dom := p.xdomain() + return Identity{&lp, dom} +} + +// todo: reduce duplication between implementations: ../smtp/address.go:/xlocalpart ../dkim/parser.go:/xlocalpart ../smtpserver/parse.go:/xlocalpart +func (p *parser) xlocalpart() smtp.Localpart { + // ../rfc/6376:434 + // ../rfc/5321:2316 + var s string + if p.hasPrefix(`"`) { + s = p.xquotedString() + } else { + s = p.xatom() + for p.take(".") { + s += "." + p.xatom() + } + } + // todo: have a strict parser that only allows the actual max of 64 bytes. some services have large localparts because of generated (bounce) addresses. + if len(s) > 128 { + // ../rfc/5321:3486 + p.xerrorf("localpart longer than 64 octets") + } + return smtp.Localpart(s) +} + +func (p *parser) xquotedString() string { + p.xtake(`"`) + var s string + var esc bool + for { + c := p.xchar() + if esc { + if c >= ' ' && c < 0x7f { + s += string(c) + esc = false + continue + } + p.xerrorf("invalid localpart, bad escaped char %c", c) + } + if c == '\\' { + esc = true + continue + } + if c == '"' { + return s + } + if c >= ' ' && c < 0x7f && c != '\\' && c != '"' || (c > 0x7f && p.smtputf8) { + s += string(c) + continue + } + p.xerrorf("invalid localpart, invalid character %c", c) + } +} + +func (p *parser) xchar() rune { + // We are careful to track invalid utf-8 properly. + if p.empty() { + p.xerrorf("need another character") + } + var r rune + var o int + for i, c := range p.s[p.o:] { + if i > 0 { + o = i + break + } + r = c + } + if o == 0 { + p.track(p.s[p.o:]) + p.o = len(p.s) + } else { + p.track(p.s[p.o : p.o+o]) + p.o += o + } + return r +} + +func (p *parser) xatom() string { + return p.xtakefn1(func(c rune, i int) bool { + switch c { + case '!', '#', '$', '%', '&', '\'', '*', '+', '-', '/', '=', '?', '^', '_', '`', '{', '|', '}', '~': + return true + } + return isalphadigit(c) || (c > 0x7f && p.smtputf8) + }) +} + +func (p *parser) xbodyLength() int64 { + // ../rfc/6376:1265 + return p.xnumber(76) +} + +func (p *parser) xnumber(maxdigits int) int64 { + o := -1 + for i, c := range p.s[p.o:] { + if c >= '0' && c <= '9' { + o = i + } else { + break + } + } + if o == -1 { + p.xerrorf("expected digits") + } + if o+1 > maxdigits { + p.xerrorf("too many digits") + } + v, err := strconv.ParseInt(p.xtaken(o+1), 10, 64) + if err != nil { + p.xerrorf("parsing digits: %s", err) + } + return v +} + +func (p *parser) xqueryMethods() []string { + // ../rfc/6376:1285 + l := []string{p.xqtagmethod()} + for p.peekfws(":") { + p.fws() + p.xtake(":") + l = append(l, p.xqtagmethod()) + } + return l +} + +func (p *parser) xqtagmethod() string { + // ../rfc/6376:1295 ../rfc/6376-eid4810 + s := p.xhyphenatedWord() + // ABNF production "x-sig-q-tag-args" should probably just have been + // "hyphenated-word". As qp-hdr-value, it will consume ":". A similar problem does + // not occur for "z" because it is also "|"-delimited. We work around the potential + // issue by parsing "dns/txt" explicitly. + rem := p.s[p.o:] + if strings.EqualFold(s, "dns") && len(rem) >= len("/txt") && strings.EqualFold(rem[:len("/txt")], "/txt") { + s += p.xtaken(4) + } else if p.take("/") { + s += "/" + p.xqp(true, true) + } + return s +} + +func isalpha(c rune) bool { + return c >= 'a' && c <= 'z' || c >= 'A' && c <= 'Z' +} + +func isdigit(c rune) bool { + return c >= '0' && c <= '9' +} + +func isalphadigit(c rune) bool { + return isalpha(c) || isdigit(c) +} + +// ../rfc/6376:469 +func (p *parser) xhyphenatedWord() string { + return p.xtakefn1(func(c rune, i int) bool { + return isalpha(c) || i > 0 && isdigit(c) || i > 0 && c == '-' && p.o+i+1 < len(p.s) && isalphadigit(rune(p.s[p.o+i+1])) + }) +} + +// ../rfc/6376:474 +func (p *parser) xqphdrvalue() string { + return p.xqp(true, false) +} + +func (p *parser) xqpSection() string { + return p.xqp(false, false) +} + +// dkim-quoted-printable (pipeEncoded true) or qp-section. +// +// It is described in terms of (lots of) modifications to MIME quoted-printable, +// but it may be simpler to just ignore that reference. +func (p *parser) xqp(pipeEncoded, colonEncoded bool) string { + // ../rfc/6376:494 ../rfc/2045:1260 + + hex := func(c byte) rune { + if c >= '0' && c <= '9' { + return rune(c - '0') + } + return rune(10 + c - 'A') + } + + s := "" + for !p.empty() { + p.fws() + if pipeEncoded && p.hasPrefix("|") { + break + } + if colonEncoded && p.hasPrefix(":") { + break + } + if p.hasPrefix("=") { + p.xtake("=") + // note: \r\n before the full hex-octet has been encountered in the wild. Could be + // a sender just wrapping their headers after escaping, or not escaping an "=". We + // currently don't compensate for it. + h := p.xtakefn(func(c rune, i int) bool { + return i < 2 && (c >= '0' && c <= '9' || c >= 'A' && c <= 'Z') + }) + if len(h) != 2 { + p.xerrorf("expected qp-hdr-value") + } + c := (hex(h[0]) << 4) | hex(h[1]) + s += string(c) + continue + } + x := p.xtakefn(func(c rune, i int) bool { + return c > ' ' && c < 0x7f && c != ';' && c != '=' && !(pipeEncoded && c == '|') + }) + if x == "" { + break + } + s += x + } + return s +} + +func (p *parser) xselector() dns.Domain { + return p.xdomain() +} + +func (p *parser) xtimestamp() int64 { + // ../rfc/6376:1325 ../rfc/6376:1358 + return p.xnumber(12) +} + +func (p *parser) xcopiedHeaderFields() []string { + // ../rfc/6376:1384 + l := []string{p.xztagcopy()} + for p.hasPrefix("|") { + p.xtake("|") + p.fws() + l = append(l, p.xztagcopy()) + } + return l +} + +func (p *parser) xztagcopy() string { + // ../rfc/6376:1386 + f := p.xhdrName() + p.fws() + p.xtake(":") + v := p.xqphdrvalue() + return f + ":" + v +} diff --git a/dkim/policy.go b/dkim/policy.go new file mode 100644 index 0000000..540cb39 --- /dev/null +++ b/dkim/policy.go @@ -0,0 +1,49 @@ +package dkim + +import ( + "fmt" + "strings" +) + +// DefaultPolicy is the default DKIM policy. +// +// Signatures with a length restriction are rejected because it is hard to decide +// how many signed bytes should be required (none? at least half? all except +// max N bytes?). Also, it isn't likely email applications (MUAs) will be +// displaying the signed vs unsigned (partial) content differently, mostly +// because the encoded data is signed. E.g. half a base64 image could be +// signed, and the rest unsigned. +// +// Signatures without Subject field are rejected. The From header field is +// always required and does not need to be checked in the policy. +// Other signatures are accepted. +func DefaultPolicy(sig *Sig) error { + // ../rfc/6376:2088 + // ../rfc/6376:2307 + // ../rfc/6376:2706 + // ../rfc/6376:1558 + if sig.Length >= 0 { + return fmt.Errorf("l= for length not acceptable") + } + + // ../rfc/6376:2139 + // We require at least the following headers: From, Subject. + // You would expect To, Cc and Message-ID to also always be present. + // Microsoft appears to leave out To. + // Yahoo appears to leave out Message-ID. + // Multiple leave out Cc and other address headers. + // At least one newsletter did not sign Date. + var subject bool + for _, h := range sig.SignedHeaders { + subject = subject || strings.EqualFold(h, "subject") + } + var missing []string + if !subject { + missing = append(missing, "subject") + } + if len(missing) > 0 { + return fmt.Errorf("required header fields missing from signature: %s", strings.Join(missing, ", ")) + } + + return nil +} diff --git a/dkim/sig.go b/dkim/sig.go new file mode 100644 index 0000000..dc2be42 --- /dev/null +++ b/dkim/sig.go @@ -0,0 +1,353 @@ +package dkim + +import ( + "bytes" + "encoding/base64" + "errors" + "fmt" + "strings" + + "github.com/mjl-/mox/dns" + "github.com/mjl-/mox/message" + "github.com/mjl-/mox/smtp" +) + +// Sig is a DKIM-Signature header. +// +// String values must be compared case insensitively. +type Sig struct { + // Required fields. + Version int // Version, 1. Field "v". Always the first field. + AlgorithmSign string // "rsa" or "ed25519". Field "a". + AlgorithmHash string // "sha256" or the deprecated "sha1" (deprecated). Field "a". + Signature []byte // Field "b". + BodyHash []byte // Field "bh". + Domain dns.Domain // Field "d". + SignedHeaders []string // Duplicates are meaningful. Field "h". + Selector dns.Domain // Selector, for looking DNS TXT record at ._domainkey.. Field "s". + + // Optional fields. + // Canonicalization is the transformation of header and/or body before hashing. The + // value is in original case, but must be compared case-insensitively. Normally two + // slash-separated values: header canonicalization and body canonicalization. But + // the "simple" means "simple/simple" and "relaxed" means "relaxed/simple". Field + // "c". + Canonicalization string + Length int64 // Body length to verify, default -1 for whole body. Field "l". + Identity *Identity // AUID (agent/user id). If nil and an identity is needed, should be treated as an Identity without localpart and Domain from d= field. Field "i". + QueryMethods []string // For public key, currently known value is "dns/txt" (should be compared case-insensitively). If empty, dns/txt must be assumed. Field "q". + SignTime int64 // Unix epoch. -1 if unset. Field "t". + ExpireTime int64 // Unix epoch. -1 if unset. Field "x". + CopiedHeaders []string // Copied header fields. Field "z". +} + +// Identity is used for the optional i= field in a DKIM-Signature header. It uses +// the syntax of an email address, but does not necessarily represent one. +type Identity struct { + Localpart *smtp.Localpart // Optional. + Domain dns.Domain +} + +// String returns a value for use in the i= DKIM-Signature field. +func (i Identity) String() string { + s := "@" + i.Domain.ASCII + // We need localpart as pointer to indicate it is missing because localparts can be + // "" which we store (decoded) as empty string and we need to differentiate. + if i.Localpart != nil { + s = i.Localpart.String() + s + } + return s +} + +func newSigWithDefaults() *Sig { + return &Sig{ + Canonicalization: "simple/simple", + Length: -1, + SignTime: -1, + ExpireTime: -1, + } +} + +// Algorithm returns an algorithm string for use in the "a" field. E.g. +// "ed25519-sha256". +func (s Sig) Algorithm() string { + return s.AlgorithmSign + "-" + s.AlgorithmHash +} + +// Header returns the DKIM-Signature header in string form, to be prepended to a +// message, including DKIM-Signature field name and trailing \r\n. +func (s *Sig) Header() (string, error) { + // ../rfc/6376:1021 + // todo: make a higher-level writer that accepts pairs, and only folds to next line when needed. + w := &message.HeaderWriter{} + w.Addf("", "DKIM-Signature: v=%d;", s.Version) + // Domain names must always be in ASCII. ../rfc/6376:1115 ../rfc/6376:1187 ../rfc/6376:1303 + w.Addf(" ", "d=%s;", s.Domain.ASCII) + w.Addf(" ", "s=%s;", s.Selector.ASCII) + if s.Identity != nil { + w.Addf(" ", "i=%s;", s.Identity.String()) // todo: Is utf-8 ok here? + } + w.Addf(" ", "a=%s;", s.Algorithm()) + + if s.Canonicalization != "" && !strings.EqualFold(s.Canonicalization, "simple") && !strings.EqualFold(s.Canonicalization, "simple/simple") { + w.Addf(" ", "c=%s;", s.Canonicalization) + } + if s.Length >= 0 { + w.Addf(" ", "l=%d;", s.Length) + } + if len(s.QueryMethods) > 0 && !(len(s.QueryMethods) == 1 && strings.EqualFold(s.QueryMethods[0], "dns/txt")) { + w.Addf(" ", "q=%s;", strings.Join(s.QueryMethods, ":")) + } + if s.SignTime >= 0 { + w.Addf(" ", "t=%d;", s.SignTime) + } + if s.ExpireTime >= 0 { + w.Addf(" ", "x=%d;", s.ExpireTime) + } + + if len(s.SignedHeaders) > 0 { + for i, v := range s.SignedHeaders { + sep := "" + if i == 0 { + v = "h=" + v + sep = " " + } + if i < len(s.SignedHeaders)-1 { + v += ":" + } else if i == len(s.SignedHeaders)-1 { + v += ";" + } + w.Addf(sep, v) + } + } + if len(s.CopiedHeaders) > 0 { + // todo: wrap long headers? we can at least add FWS before the : + for i, v := range s.CopiedHeaders { + t := strings.SplitN(v, ":", 2) + if len(t) == 2 { + v = t[0] + ":" + packQpHdrValue(t[1]) + } else { + return "", fmt.Errorf("invalid header in copied headers (z=): %q", v) + } + sep := "" + if i == 0 { + v = "z=" + v + sep = " " + } + if i < len(s.CopiedHeaders)-1 { + v += "|" + } else if i == len(s.CopiedHeaders)-1 { + v += ";" + } + w.Addf(sep, v) + } + } + + w.Addf(" ", "bh=%s;", base64.StdEncoding.EncodeToString(s.BodyHash)) + + w.Addf(" ", "b=") + if len(s.Signature) > 0 { + w.AddWrap([]byte(base64.StdEncoding.EncodeToString(s.Signature))) + } + w.Add("\r\n") + return w.String(), nil +} + +// Like quoted printable, but with "|" encoded as well. +// We also encode ":" because it is used as separator in DKIM headers which can +// cause trouble for "q", even though it is listed in dkim-safe-char, +// ../rfc/6376:497. +func packQpHdrValue(s string) string { + // ../rfc/6376:474 + const hex = "0123456789ABCDEF" + var r string + for _, b := range []byte(s) { + if b > ' ' && b < 0x7f && b != ';' && b != '=' && b != '|' && b != ':' { + r += string(b) + } else { + r += "=" + string(hex[b>>4]) + string(hex[(b>>0)&0xf]) + } + } + return r +} + +var ( + errSigHeader = errors.New("not DKIM-Signature header") + errSigDuplicateTag = errors.New("duplicate tag") + errSigMissingCRLF = errors.New("missing crlf at end") + errSigExpired = errors.New("signature timestamp (t=) must be before signature expiration (x=)") + errSigIdentityDomain = errors.New("identity domain (i=) not under domain (d=)") + errSigMissingTag = errors.New("missing required tag") + errSigUnknownVersion = errors.New("unknown version") + errSigBodyHash = errors.New("bad body hash size given algorithm") +) + +// parseSignatures returns the parsed form of a DKIM-Signature header. +// +// buf must end in crlf, as it should have occurred in the mail message. +// +// The dkim signature with signature left empty ("b=") and without trailing +// crlf is returned, for use in verification. +func parseSignature(buf []byte, smtputf8 bool) (sig *Sig, verifySig []byte, err error) { + defer func() { + if x := recover(); x == nil { + return + } else if xerr, ok := x.(error); ok { + sig = nil + verifySig = nil + err = xerr + } else { + panic(x) + } + }() + + xerrorf := func(format string, args ...any) { + panic(fmt.Errorf(format, args...)) + } + + if !bytes.HasSuffix(buf, []byte("\r\n")) { + xerrorf("%w", errSigMissingCRLF) + } + buf = buf[:len(buf)-2] + + ds := newSigWithDefaults() + seen := map[string]struct{}{} + p := parser{s: string(buf), smtputf8: smtputf8} + name := p.xhdrName() + if !strings.EqualFold(name, "DKIM-Signature") { + xerrorf("%w", errSigHeader) + } + p.wsp() + p.xtake(":") + p.wsp() + // ../rfc/6376:655 + // ../rfc/6376:656 ../rfc/6376-eid5070 + // ../rfc/6376:658 ../rfc/6376-eid5070 + for { + p.fws() + k := p.xtagName() + p.fws() + p.xtake("=") + // Special case for "b", see below. + if k != "b" { + p.fws() + } + // Keys are case-sensitive: ../rfc/6376:679 + if _, ok := seen[k]; ok { + // Duplicates not allowed: ../rfc/6376:683 + xerrorf("%w: %q", errSigDuplicateTag, k) + break + } + seen[k] = struct{}{} + + // ../rfc/6376:1021 + switch k { + case "v": + // ../rfc/6376:1025 + ds.Version = int(p.xnumber(10)) + if ds.Version != 1 { + xerrorf("%w: version %d", errSigUnknownVersion, ds.Version) + } + case "a": + // ../rfc/6376:1038 + ds.AlgorithmSign, ds.AlgorithmHash = p.xalgorithm() + case "b": + // ../rfc/6376:1054 + // To calculate the hash, we have to feed the DKIM-Signature header to the hash + // function, but with the value for "b=" (the signature) left out. The parser + // tracks all data that is read, except when drop is true. + // ../rfc/6376:997 + // Surrounding whitespace must be cleared as well. ../rfc/6376:1659 + // Note: The RFC says "surrounding" whitespace, but whitespace is only allowed + // before the value as part of the ABNF production for "b". Presumably the + // intention is to ignore the trailing "[FWS]" for the tag-spec production, + // ../rfc/6376:656 + // Another indication is the term "value portion", ../rfc/6376:1667. It appears to + // mean everything after the "b=" part, instead of the actual value (either encoded + // or decoded). + p.drop = true + p.fws() + ds.Signature = p.xbase64() + p.fws() + p.drop = false + case "bh": + // ../rfc/6376:1076 + ds.BodyHash = p.xbase64() + case "c": + // ../rfc/6376:1088 + ds.Canonicalization = p.xcanonical() + // ../rfc/6376:810 + case "d": + // ../rfc/6376:1105 + ds.Domain = p.xdomain() + case "h": + // ../rfc/6376:1134 + ds.SignedHeaders = p.xsignedHeaderFields() + case "i": + // ../rfc/6376:1171 + id := p.xauid() + ds.Identity = &id + case "l": + // ../rfc/6376:1244 + ds.Length = p.xbodyLength() + case "q": + // ../rfc/6376:1268 + ds.QueryMethods = p.xqueryMethods() + case "s": + // ../rfc/6376:1300 + ds.Selector = p.xselector() + case "t": + // ../rfc/6376:1310 + ds.SignTime = p.xtimestamp() + case "x": + // ../rfc/6376:1327 + ds.ExpireTime = p.xtimestamp() + case "z": + // ../rfc/6376:1361 + ds.CopiedHeaders = p.xcopiedHeaderFields() + default: + // We must ignore unknown fields. ../rfc/6376:692 ../rfc/6376:1022 + p.xchar() // ../rfc/6376-eid5070 + for !p.empty() && !p.hasPrefix(";") { + p.xchar() + } + } + p.fws() + + if p.empty() { + break + } + p.xtake(";") + if p.empty() { + break + } + } + + // ../rfc/6376:2532 + required := []string{"v", "a", "b", "bh", "d", "h", "s"} + for _, req := range required { + if _, ok := seen[req]; !ok { + xerrorf("%w: %q", errSigMissingTag, req) + } + } + + if strings.EqualFold(ds.AlgorithmHash, "sha1") && len(ds.BodyHash) != 20 { + xerrorf("%w: got %d bytes, must be 20 for sha1", errSigBodyHash, len(ds.BodyHash)) + } else if strings.EqualFold(ds.AlgorithmHash, "sha256") && len(ds.BodyHash) != 32 { + xerrorf("%w: got %d bytes, must be 32 for sha256", errSigBodyHash, len(ds.BodyHash)) + } + + // ../rfc/6376:1337 + if ds.SignTime >= 0 && ds.ExpireTime >= 0 && ds.SignTime >= ds.ExpireTime { + xerrorf("%w", errSigExpired) + } + + // Default identity is "@" plus domain. We don't set this value because we want to + // keep the distinction between absent value. + // ../rfc/6376:1172 ../rfc/6376:2537 ../rfc/6376:2541 + if ds.Identity != nil && ds.Identity.Domain.ASCII != ds.Domain.ASCII && !strings.HasSuffix(ds.Identity.Domain.ASCII, "."+ds.Domain.ASCII) { + xerrorf("%w: identity domain %q not under domain %q", errSigIdentityDomain, ds.Identity.Domain.ASCII, ds.Domain.ASCII) + } + + return ds, []byte(p.tracked), nil +} diff --git a/dkim/sig_test.go b/dkim/sig_test.go new file mode 100644 index 0000000..e69df0d --- /dev/null +++ b/dkim/sig_test.go @@ -0,0 +1,180 @@ +package dkim + +import ( + "encoding/base64" + "errors" + "reflect" + "strings" + "testing" + + "github.com/mjl-/mox/dns" + "github.com/mjl-/mox/smtp" +) + +func TestSig(t *testing.T) { + test := func(s string, smtputf8 bool, expSig *Sig, expErr error) { + t.Helper() + + isParseErr := func(err error) bool { + _, ok := err.(parseErr) + return ok + } + + sig, _, err := parseSignature([]byte(s), smtputf8) + if (err == nil) != (expErr == nil) || err != nil && !errors.Is(err, expErr) && !(isParseErr(err) && isParseErr(expErr)) { + t.Fatalf("got err %v, expected %v", err, expErr) + } + if !reflect.DeepEqual(sig, expSig) { + t.Fatalf("got sig %#v, expected %#v", sig, expSig) + } + + if sig == nil { + return + } + h, err := sig.Header() + if err != nil { + t.Fatalf("making signature header: %v", err) + } + nsig, _, err := parseSignature([]byte(h), smtputf8) + if err != nil { + t.Fatalf("parse signature again: %v", err) + } + if !reflect.DeepEqual(nsig, sig) { + t.Fatalf("parsed signature again, got %#v, expected %#v", nsig, sig) + } + } + + xbase64 := func(s string) []byte { + t.Helper() + buf, err := base64.StdEncoding.DecodeString(s) + if err != nil { + t.Fatalf("parsing base64: %v", err) + } + return buf + } + + xdomain := func(s string) dns.Domain { + t.Helper() + d, err := dns.ParseDomain(s) + if err != nil { + t.Fatalf("parsing domain: %v", err) + } + return d + } + + var empty smtp.Localpart + sig1 := &Sig{ + Version: 1, + AlgorithmSign: "ed25519", + AlgorithmHash: "sha256", + Signature: xbase64("dGVzdAo="), + BodyHash: xbase64("LjkN2rUhrS3zKXfH2vNgUzz5ERRJkgP9CURXBX0JP0Q="), + Domain: xdomain("mox.example"), + SignedHeaders: []string{"from", "to", "cc", "bcc", "date", "subject", "message-id"}, + Selector: xdomain("test"), + Canonicalization: "simple/relaxed", + Length: 10, + Identity: &Identity{&empty, xdomain("sub.mox.example")}, + QueryMethods: []string{"dns/txt", "other"}, + SignTime: 10, + ExpireTime: 100, + CopiedHeaders: []string{"From:", "Subject:test | with pipe"}, + } + test("dkim-signature: v = 1 ; a=ed25519-sha256; s=test; d=mox.example; h=from:to:cc:bcc:date:subject:message-id; b=dGVzdAo=; bh=LjkN2rUhrS3zKXfH2vNgUzz5ERRJkgP9CURXBX0JP0Q= ; c=simple/relaxed; l=10; i=\"\"@sub.mox.example; q= dns/txt:other; t=10; x=100; z=From:|Subject:test=20=7C=20with=20pipe; unknown = must be ignored \r\n", true, sig1, nil) + + ulp := smtp.Localpart("møx") + sig2 := &Sig{ + Version: 1, + AlgorithmSign: "ed25519", + AlgorithmHash: "sha256", + Signature: xbase64("dGVzdAo="), + BodyHash: xbase64("LjkN2rUhrS3zKXfH2vNgUzz5ERRJkgP9CURXBX0JP0Q="), + Domain: xdomain("xn--mx-lka.example"), // møx.example + SignedHeaders: []string{"from"}, + Selector: xdomain("xn--tst-bma"), // tést + Identity: &Identity{&ulp, xdomain("xn--tst-bma.xn--mx-lka.example")}, // tést.møx.example + Canonicalization: "simple/simple", + Length: -1, + SignTime: -1, + ExpireTime: -1, + } + test("dkim-signature: v = 1 ; a=ed25519-sha256; s=xn--tst-bma; d=xn--mx-lka.example; h=from; b=dGVzdAo=; bh=LjkN2rUhrS3zKXfH2vNgUzz5ERRJkgP9CURXBX0JP0Q= ; i=møx@xn--tst-bma.xn--mx-lka.example;\r\n", true, sig2, nil) + test("dkim-signature: v = 1 ; a=ed25519-sha256; s=xn--tst-bma; d=xn--mx-lka.example; h=from; b=dGVzdAo=; bh=LjkN2rUhrS3zKXfH2vNgUzz5ERRJkgP9CURXBX0JP0Q= ; i=møx@xn--tst-bma.xn--mx-lka.example;\r\n", false, nil, parseErr("")) // No UTF-8 allowed. + + multiatom := smtp.Localpart("a.b.c") + sig3 := &Sig{ + Version: 1, + AlgorithmSign: "ed25519", + AlgorithmHash: "sha256", + Signature: xbase64("dGVzdAo="), + BodyHash: xbase64("LjkN2rUhrS3zKXfH2vNgUzz5ERRJkgP9CURXBX0JP0Q="), + Domain: xdomain("mox.example"), + SignedHeaders: []string{"from"}, + Selector: xdomain("test"), + Identity: &Identity{&multiatom, xdomain("mox.example")}, + Canonicalization: "simple/simple", + Length: -1, + SignTime: -1, + ExpireTime: -1, + } + test("dkim-signature: v = 1 ; a=ed25519-sha256; s=test; d=mox.example; h=from; b=dGVzdAo=; bh=LjkN2rUhrS3zKXfH2vNgUzz5ERRJkgP9CURXBX0JP0Q= ; i=a.b.c@mox.example\r\n", true, sig3, nil) + + quotedlp := smtp.Localpart(`test "\test`) + sig4 := &Sig{ + Version: 1, + AlgorithmSign: "ed25519", + AlgorithmHash: "sha256", + Signature: xbase64("dGVzdAo="), + BodyHash: xbase64("LjkN2rUhrS3zKXfH2vNgUzz5ERRJkgP9CURXBX0JP0Q="), + Domain: xdomain("mox.example"), + SignedHeaders: []string{"from"}, + Selector: xdomain("test"), + Identity: &Identity{"edlp, xdomain("mox.example")}, + Canonicalization: "simple/simple", + Length: -1, + SignTime: -1, + ExpireTime: -1, + } + test("dkim-signature: v = 1 ; a=ed25519-sha256; s=test; d=mox.example; h=from; b=dGVzdAo=; bh=LjkN2rUhrS3zKXfH2vNgUzz5ERRJkgP9CURXBX0JP0Q= ; i=\"test \\\"\\\\test\"@mox.example\r\n", true, sig4, nil) + + test("", true, nil, errSigMissingCRLF) + test("other: ...\r\n", true, nil, errSigHeader) + test("dkim-signature: v=2\r\n", true, nil, errSigUnknownVersion) + test("dkim-signature: v=1\r\n", true, nil, errSigMissingTag) + test("dkim-signature: v=1;v=1\r\n", true, nil, errSigDuplicateTag) + test("dkim-signature: v=1; d=mox.example; i=@unrelated.example; s=test; a=ed25519-sha256; h=from; b=dGVzdAo=; bh=LjkN2rUhrS3zKXfH2vNgUzz5ERRJkgP9CURXBX0JP0Q=\r\n", true, nil, errSigIdentityDomain) + test("dkim-signature: v=1; t=10; x=9; d=mox.example; s=test; a=ed25519-sha256; h=from; b=dGVzdAo=; bh=LjkN2rUhrS3zKXfH2vNgUzz5ERRJkgP9CURXBX0JP0Q=\r\n", true, nil, errSigExpired) + test("dkim-signature: v=1; d=møx.example\r\n", true, nil, parseErr("")) // Unicode domain not allowed. + test("dkim-signature: v=1; s=tést\r\n", true, nil, parseErr("")) // Unicode selector not allowed. + test("dkim-signature: v=1; ;\r\n", true, nil, parseErr("")) // Empty tag not allowed. + test("dkim-signature: v=1; \r\n", true, nil, parseErr("")) // Cannot have whitespace after last colon. + test("dkim-signature: v=1; d=mox.example; s=test; a=ed25519-sha256; h=from; b=dGVzdAo=; bh=dGVzdAo=\r\n", true, nil, errSigBodyHash) + test("dkim-signature: v=1; d=mox.example; s=test; a=rsa-sha1; h=from; b=dGVzdAo=; bh=dGVzdAo=\r\n", true, nil, errSigBodyHash) +} + +func TestCopiedHeadersSig(t *testing.T) { + // ../rfc/6376:1391 + sigHeader := strings.ReplaceAll(`DKIM-Signature: v=1; a=rsa-sha256; d=example.net; s=brisbane; + c=simple; q=dns/txt; i=@eng.example.net; + t=1117574938; x=1118006938; + h=from:to:subject:date; + z=From:foo@eng.example.net|To:joe@example.com| + Subject:demo=20run|Date:July=205,=202005=203:44:08=20PM=20-0700; + bh=MTIzNDU2Nzg5MDEyMzQ1Njc4OTAxMjM0NTY3ODkwMTI=; + b=dzdVyOfAKCdLXdJOc9G2q8LoXSlEniSbav+yuU4zGeeruD00lszZVoG4ZHRNiYzR +`, "\n", "\r\n") + + sig, _, err := parseSignature([]byte(sigHeader), false) + if err != nil { + t.Fatalf("parsing dkim signature with copied headers: %v", err) + } + exp := []string{ + "From:foo@eng.example.net", + "To:joe@example.com", + "Subject:demo run", + "Date:July 5, 2005 3:44:08 PM -0700", + } + if !reflect.DeepEqual(sig.CopiedHeaders, exp) { + t.Fatalf("copied headers, got %v, expected %v", sig.CopiedHeaders, exp) + } +} diff --git a/dkim/txt.go b/dkim/txt.go new file mode 100644 index 0000000..9fc3690 --- /dev/null +++ b/dkim/txt.go @@ -0,0 +1,278 @@ +package dkim + +import ( + "crypto/ed25519" + "crypto/rsa" + "crypto/x509" + "encoding/base64" + "errors" + "fmt" + "strings" +) + +// Record is a DKIM DNS record, served on ._domainkey. for a +// given selector and domain (s= and d= in the DKIM-Signature). +// +// The record is a semicolon-separated list of "="-separated field value pairs. +// Strings should be compared case-insensitively, e.g. k=ed25519 is equivalent to k=ED25519. +// +// Example: +// +// v=DKIM1;h=sha256;k=ed25519;p=ln5zd/JEX4Jy60WAhUOv33IYm2YZMyTQAdr9stML504= +type Record struct { + Version string // Version, fixed "DKIM1" (case sensitive). Field "v". + Hashes []string // Acceptable hash algorithms, e.g. "sha1", "sha256". Optional, defaults to all algorithms. Field "h". + Key string // Key type, "rsa" or "ed25519". Optional, default "rsa". Field "k". + Notes string // Debug notes. Field "n". + Pubkey []byte // Public key, as base64 in record. If empty, the key has been revoked. Field "p". + Services []string // Service types. Optional, default "*" for all services. Other values: "email". Field "s". + Flags []string // Flags, colon-separated. Optional, default is no flags. Other values: "y" for testing DKIM, "s" for "i=" must have same domain as "d" in signatures. Field "t". + + PublicKey any `json:"-"` // Parsed form of public key, an *rsa.PublicKey or ed25519.PublicKey. +} + +// ../rfc/6376:1438 + +// ServiceAllowed returns whether service s is allowed by this key. +// +// The optional field "s" can specify purposes for which the key can be used. If +// value was specified, both "*" and "email" are enough for use with DKIM. +func (r *Record) ServiceAllowed(s string) bool { + if len(r.Services) == 0 { + return true + } + for _, ss := range r.Services { + if ss == "*" || strings.EqualFold(s, ss) { + return true + } + } + return false +} + +// Record returns a DNS TXT record that should be served at +// ._domainkey.. +// +// Only values that are not the default values are included. +func (r *Record) Record() (string, error) { + var l []string + add := func(s string) { + l = append(l, s) + } + + if r.Version != "DKIM1" { + return "", fmt.Errorf("bad version, must be \"DKIM1\"") + } + add("v=DKIM1") + if len(r.Hashes) > 0 { + add("h=" + strings.Join(r.Hashes, ":")) + } + if r.Key != "" && !strings.EqualFold(r.Key, "rsa") { + add("k=" + r.Key) + } + if r.Notes != "" { + add("n=" + qpSection(r.Notes)) + } + if len(r.Services) > 0 && (len(r.Services) != 1 || r.Services[0] != "*") { + add("s=" + strings.Join(r.Services, ":")) + } + if len(r.Flags) > 0 { + add("t=" + strings.Join(r.Flags, ":")) + } + // A missing public key is valid, it means the key has been revoked. ../rfc/6376:1501 + pk := r.Pubkey + if len(pk) == 0 && r.PublicKey != nil { + switch k := r.PublicKey.(type) { + case *rsa.PublicKey: + var err error + pk, err = x509.MarshalPKIXPublicKey(k) + if err != nil { + return "", fmt.Errorf("marshal rsa public key: %v", err) + } + case ed25519.PublicKey: + pk = []byte(k) + default: + return "", fmt.Errorf("unknown public key type %T", r.PublicKey) + } + } + add("p=" + base64.StdEncoding.EncodeToString(pk)) + return strings.Join(l, ";"), nil +} + +func qpSection(s string) string { + const hex = "0123456789ABCDEF" + + // ../rfc/2045:1260 + var r string + for i, b := range []byte(s) { + if i > 0 && (b == ' ' || b == '\t') || b > ' ' && b < 0x7f && b != '=' { + r += string(rune(b)) + } else { + r += "=" + string(hex[b>>4]) + string(hex[(b>>0)&0xf]) + } + } + return r +} + +var ( + errRecordDuplicateTag = errors.New("duplicate tag") + errRecordMissingField = errors.New("missing field") + errRecordBadPublicKey = errors.New("bad public key") + errRecordUnknownAlgorithm = errors.New("unknown algorithm") + errRecordVersionFirst = errors.New("first field must be version") +) + +// ParseRecord parses a DKIM DNS TXT record. +// +// If the record is a dkim record, but an error occurred, isdkim will be true and +// err will be the error. Such errors must be treated differently from parse errors +// where the record does not appear to be DKIM, which can happen with misconfigured +// DNS (e.g. wildcard records). +func ParseRecord(s string) (record *Record, isdkim bool, err error) { + defer func() { + x := recover() + if x == nil { + return + } + if xerr, ok := x.(error); ok { + record = nil + err = xerr + return + } + panic(x) + }() + + xerrorf := func(format string, args ...any) { + panic(fmt.Errorf(format, args...)) + } + + record = &Record{ + Version: "DKIM1", + Key: "rsa", + Services: []string{"*"}, + } + + p := parser{s: s, drop: true} + seen := map[string]struct{}{} + // ../rfc/6376:655 + // ../rfc/6376:656 ../rfc/6376-eid5070 + // ../rfc/6376:658 ../rfc/6376-eid5070 + // ../rfc/6376:1438 + for { + p.fws() + k := p.xtagName() + p.fws() + p.xtake("=") + p.fws() + // Keys are case-sensitive: ../rfc/6376:679 + if _, ok := seen[k]; ok { + // Duplicates not allowed: ../rfc/6376:683 + xerrorf("%w: %q", errRecordDuplicateTag, k) + break + } + seen[k] = struct{}{} + // Version must be the first. + switch k { + case "v": + // ../rfc/6376:1443 + v := p.xtake("DKIM1") + // Version being set is a signal this appears to be a valid record. We must not + // treat e.g. DKIM1.1 as valid, so we explicitly check there is no more data before + // we decide this record is DKIM. + p.fws() + if !p.empty() { + p.xtake(";") + } + record.Version = v + if len(seen) != 1 { + // If version is present, it must be the first. + xerrorf("%w", errRecordVersionFirst) + } + isdkim = true + if p.empty() { + break + } + continue + + case "h": + // ../rfc/6376:1463 + record.Hashes = []string{p.xhyphenatedWord()} + for p.peekfws(":") { + p.fws() + p.xtake(":") + p.fws() + record.Hashes = append(record.Hashes, p.xhyphenatedWord()) + } + case "k": + // ../rfc/6376:1478 + record.Key = p.xhyphenatedWord() + case "n": + // ../rfc/6376:1491 + record.Notes = p.xqpSection() + case "p": + // ../rfc/6376:1501 + record.Pubkey = p.xbase64() + case "s": + // ../rfc/6376:1533 + record.Services = []string{p.xhyphenatedWord()} + for p.peekfws(":") { + p.fws() + p.xtake(":") + p.fws() + record.Services = append(record.Services, p.xhyphenatedWord()) + } + case "t": + // ../rfc/6376:1554 + record.Flags = []string{p.xhyphenatedWord()} + for p.peekfws(":") { + p.fws() + p.xtake(":") + p.fws() + record.Flags = append(record.Flags, p.xhyphenatedWord()) + } + default: + // We must ignore unknown fields. ../rfc/6376:692 ../rfc/6376:1439 + for !p.empty() && !p.hasPrefix(";") { + p.xchar() + } + } + + isdkim = true + p.fws() + if p.empty() { + break + } + p.xtake(";") + if p.empty() { + break + } + } + + if _, ok := seen["p"]; !ok { + xerrorf("%w: public key", errRecordMissingField) + } + + switch strings.ToLower(record.Key) { + case "", "rsa": + if len(record.Pubkey) == 0 { + // Revoked key, nothing to do. + } else if pk, err := x509.ParsePKIXPublicKey(record.Pubkey); err != nil { + xerrorf("%w: %s", errRecordBadPublicKey, err) + } else if _, ok := pk.(*rsa.PublicKey); !ok { + xerrorf("%w: got %T, need an RSA key", errRecordBadPublicKey, record.PublicKey) + } else { + record.PublicKey = pk + } + case "ed25519": + if len(record.Pubkey) == 0 { + // Revoked key, nothing to do. + } else if len(record.Pubkey) != ed25519.PublicKeySize { + xerrorf("%w: got %d bytes, need %d", errRecordBadPublicKey, len(record.Pubkey), ed25519.PublicKeySize) + } else { + record.PublicKey = ed25519.PublicKey(record.Pubkey) + } + default: + xerrorf("%w: %q", errRecordUnknownAlgorithm, record.Key) + } + + return record, true, nil +} diff --git a/dkim/txt_test.go b/dkim/txt_test.go new file mode 100644 index 0000000..97d358b --- /dev/null +++ b/dkim/txt_test.go @@ -0,0 +1,133 @@ +package dkim + +import ( + "crypto/x509" + "encoding/base64" + "errors" + "reflect" + "testing" +) + +func TestParseRecord(t *testing.T) { + test := func(txt string, expRec *Record, expIsDKIM bool, expErr error) { + t.Helper() + + isParseErr := func(err error) bool { + _, ok := err.(parseErr) + return ok + } + + r, isdkim, err := ParseRecord(txt) + if (err == nil) != (expErr == nil) || err != nil && !errors.Is(err, expErr) && !(isParseErr(err) && isParseErr(expErr)) { + t.Fatalf("parsing record: got error %v %#v, expected %#v, txt %q", err, err, expErr, txt) + } + if isdkim != expIsDKIM { + t.Fatalf("got isdkim %v, expected %v", isdkim, expIsDKIM) + } + if r != nil && expRec != nil { + expRec.PublicKey = r.PublicKey + } + if !reflect.DeepEqual(r, expRec) { + t.Fatalf("got record %#v, expected %#v, for txt %q", r, expRec, txt) + } + if r != nil { + pk := r.Pubkey + for i := 0; i < 2; i++ { + ntxt, err := r.Record() + if err != nil { + t.Fatalf("making record: %v", err) + } + nr, _, _ := ParseRecord(ntxt) + r.Pubkey = pk + if !reflect.DeepEqual(r, nr) { + t.Fatalf("after packing and parsing, got %#v, expected %#v", nr, r) + } + + // Generate again, now based on parsed public key. + pk = r.Pubkey + r.Pubkey = nil + } + } + } + + xbase64 := func(s string) []byte { + t.Helper() + buf, err := base64.StdEncoding.DecodeString(s) + if err != nil { + t.Fatalf("parsing base64: %v", err) + } + return buf + } + + test("", nil, false, parseErr("")) + test("v=DKIM1", nil, true, errRecordMissingField) // Missing p=. + test("p=; v=DKIM1", nil, true, errRecordVersionFirst) + test("v=DKIM1; p=; ", nil, true, parseErr("")) // Whitespace after last ; is not allowed. + test("v=dkim1; p=; ", nil, false, parseErr("")) // dkim1-value is case-sensitive. + test("v=DKIM1; p=JDcbZ0Hpba5NKXI4UAW3G0IDhhFOxhJTDybZEwe1FeA=", nil, true, errRecordBadPublicKey) // Not an rsa key. + test("v=DKIM1; p=; p=", nil, true, errRecordDuplicateTag) // Duplicate tag. + test("v=DKIM1; k=ed25519; p=HbawiMnQXTCopHTkR0jlKQ==", nil, true, errRecordBadPublicKey) // Short key. + test("v=DKIM1; k=unknown; p=", nil, true, errRecordUnknownAlgorithm) + + empty := &Record{ + Version: "DKIM1", + Key: "rsa", + Services: []string{"*"}, + Pubkey: []uint8{}, + } + test("V=DKIM2; p=;", empty, true, nil) // Tag names are case-sensitive. + + record := &Record{ + Version: "DKIM1", + Hashes: []string{"sha1", "SHA256", "unknown"}, + Key: "ed25519", + Notes: "notes...", + Pubkey: xbase64("JDcbZ0Hpba5NKXI4UAW3G0IDhhFOxhJTDybZEwe1FeA="), + Services: []string{"email", "tlsrpt"}, + Flags: []string{"y", "t"}, + } + test("v = DKIM1 ; h\t=\tsha1 \t:\t SHA256:unknown\t;k=ed25519; n = notes...; p = JDc bZ0Hpb a5NK\tXI4UAW3G0IDhhFOxhJTDybZEwe1FeA= ;s = email : tlsrpt; t = y\t: t; unknown = bogus;", record, true, nil) + + edpkix, err := x509.MarshalPKIXPublicKey(record.PublicKey) + if err != nil { + t.Fatalf("marshal ed25519 public key") + } + recordx := &Record{ + Version: "DKIM1", + Key: "rsa", + Pubkey: edpkix, + } + txtx, err := recordx.Record() + if err != nil { + t.Fatalf("making record: %v", err) + } + test(txtx, nil, true, errRecordBadPublicKey) + + record2 := &Record{ + Version: "DKIM1", + Key: "rsa", + Services: []string{"*"}, + Pubkey: xbase64("MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAy3Z9ffZe8gUTJrdGuKj6IwEembmKYpp0jMa8uhudErcI4gFVUaFiiRWxc4jP/XR9NAEv3XwHm+CVcHu+L/n6VWt6g59U7vHXQicMfKGmEp2VplsgojNy/Y5X9HdVYM0azsI47NcJCDW9UVfeOHdOSgFME4F8dNtUKC4KTB2d1pqj/yixz+V8Sv8xkEyPfSRHcNXIw0LvelqJ1MRfN3hO/3uQSVrPYYk4SyV0b6wfnkQs28fpiIpGQvzlGI5WkrdOQT5k4YHaEvZDLNdwiMeVZOEL7dDoFs2mQsovm+tH0StUAZTnr61NLVFfD5V6Ip1V9zVtspPHvYSuOWwyArFZ9QIDAQAB"), + } + test("v=DKIM1;p=MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAy3Z9ffZe8gUTJrdGuKj6IwEembmKYpp0jMa8uhudErcI4gFVUaFiiRWxc4jP/XR9NAEv3XwHm+CVcHu+L/n6VWt6g59U7vHXQicMfKGmEp2VplsgojNy/Y5X9HdVYM0azsI47NcJCDW9UVfeOHdOSgFME4F8dNtUKC4KTB2d1pqj/yixz+V8Sv8xkEyPfSRHcNXIw0LvelqJ1MRfN3hO/3uQSVrPYYk4SyV0b6wfnkQs28fpiIpGQvzlGI5WkrdOQT5k4YHaEvZDLNdwiMeVZOEL7dDoFs2mQsovm+tH0StUAZTnr61NLVFfD5V6Ip1V9zVtspPHvYSuOWwyArFZ9QIDAQAB", record2, true, nil) + +} + +func TestQPSection(t *testing.T) { + var tests = []struct { + input string + expect string + }{ + {"test", "test"}, + {"hi=", "hi=3D"}, + {"hi there", "hi there"}, + {" hi", "=20hi"}, + {"t\x7f", "t=7F"}, + } + for _, v := range tests { + r := qpSection(v.input) + if r != v.expect { + t.Fatalf("qpSection: input %q, expected %q, got %q", v.input, v.expect, r) + } + } +} diff --git a/dmarc/dmarc.go b/dmarc/dmarc.go new file mode 100644 index 0000000..81dddbf --- /dev/null +++ b/dmarc/dmarc.go @@ -0,0 +1,239 @@ +// Package dmarc implements DMARC (Domain-based Message Authentication, +// Reporting, and Conformance; RFC 7489) verification. +// +// DMARC is a mechanism for verifying ("authenticating") the address in the "From" +// message header, since users will look at that header to identify the sender of a +// message. DMARC compares the "From"-(sub)domain against the SPF and/or +// DKIM-validated domains, based on the DMARC policy that a domain has published in +// DNS as TXT record under "_dmarc.". A DMARC policy can also ask for +// feedback about evaluations by other email servers, for monitoring/debugging +// problems with email delivery. +package dmarc + +import ( + "context" + "errors" + "fmt" + mathrand "math/rand" + "time" + + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" + + "github.com/mjl-/mox/dkim" + "github.com/mjl-/mox/dns" + "github.com/mjl-/mox/mlog" + "github.com/mjl-/mox/publicsuffix" + "github.com/mjl-/mox/spf" +) + +var xlog = mlog.New("dmarc") + +var ( + metricDMARCVerify = promauto.NewHistogramVec( + prometheus.HistogramOpts{ + Name: "mox_dmarc_verify_duration_seconds", + Help: "DMARC verify, including lookup, duration and result.", + Buckets: []float64{0.001, 0.005, 0.01, 0.05, 0.100, 0.5, 1, 5, 10, 20}, + }, + []string{ + "status", + "reject", // yes/no + "use", // yes/no, if policy is used after random selection + }, + ) +) + +// link errata: +// ../rfc/7489-eid5440 ../rfc/7489:1585 + +// Lookup errors. +var ( + ErrNoRecord = errors.New("dmarc: no dmarc dns record") + ErrMultipleRecords = errors.New("dmarc: multiple dmarc dns records") // Must also be treated as if domain does not implement DMARC. + ErrDNS = errors.New("dmarc: dns lookup") + ErrSyntax = errors.New("dmarc: malformed dmarc dns record") +) + +// Status is the result of DMARC policy evaluation, for use in an Authentication-Results header. +type Status string + +// ../rfc/7489:2339 + +const ( + StatusNone Status = "none" // No DMARC TXT DNS record found. + StatusPass Status = "pass" // SPF and/or DKIM pass with identifier alignment. + StatusFail Status = "fail" // Either both SPF and DKIM failed or identifier did not align with a pass. + StatusTemperror Status = "temperror" // Typically a DNS lookup. A later attempt may results in a conclusion. + StatusPermerror Status = "permerror" // Typically a malformed DMARC DNS record. +) + +// Result is a DMARC policy evaluation. +type Result struct { + // Whether to reject the message based on policies. If false, the message should + // not necessarily be accepted, e.g. due to reputation or content-based analysis. + Reject bool + // Result of DMARC validation. A message can fail validation, but still + // not be rejected, e.g. if the policy is "none". + Status Status + // Domain with the DMARC DNS record. May be the organizational domain instead of + // the domain in the From-header. + Domain dns.Domain + // Parsed DMARC record. + Record *Record + // Details about possible error condition, e.g. when parsing the DMARC record failed. + Err error +} + +// Lookup looks up the DMARC TXT record at "_dmarc." for the domain in the +// "From"-header of a message. +// +// If no DMARC record is found for the "From"-domain, another lookup is done at +// the organizational domain of the domain (if different). The organizational +// domain is determined using the public suffix list. E.g. for +// "sub.example.com", the organizational domain is "example.com". The returned +// domain is the domain with the DMARC record. +func Lookup(ctx context.Context, resolver dns.Resolver, from dns.Domain) (status Status, domain dns.Domain, record *Record, txt string, rerr error) { + log := xlog.WithContext(ctx) + start := time.Now() + defer func() { + log.Debugx("dmarc lookup result", rerr, mlog.Field("fromdomain", from), mlog.Field("status", status), mlog.Field("domain", domain), mlog.Field("record", record), mlog.Field("duration", time.Since(start))) + }() + + // ../rfc/7489:859 ../rfc/7489:1370 + domain = from + status, record, txt, err := lookupRecord(ctx, resolver, domain) + if status != StatusNone { + return status, domain, record, txt, err + } + if record == nil { + // ../rfc/7489:761 ../rfc/7489:1377 + domain = publicsuffix.Lookup(ctx, from) + if domain == from { + return StatusNone, domain, nil, txt, err + } + + status, record, txt, err = lookupRecord(ctx, resolver, domain) + } + return status, domain, record, txt, err +} + +func lookupRecord(ctx context.Context, resolver dns.Resolver, domain dns.Domain) (Status, *Record, string, error) { + name := "_dmarc." + domain.ASCII + "." + txts, err := dns.WithPackage(resolver, "dmarc").LookupTXT(ctx, name) + if err != nil && !dns.IsNotFound(err) { + return StatusTemperror, nil, "", fmt.Errorf("%w: %s", ErrDNS, err) + } + var record *Record + var text string + var rerr error = ErrNoRecord + for _, txt := range txts { + r, isdmarc, err := ParseRecord(txt) + if !isdmarc { + // ../rfc/7489:1374 + continue + } else if err != nil { + return StatusPermerror, nil, text, fmt.Errorf("%w: %s", ErrSyntax, err) + } + if record != nil { + // ../ ../rfc/7489:1388 + return StatusNone, nil, "", ErrMultipleRecords + } + text = txt + record = r + rerr = nil + } + return StatusNone, record, text, rerr +} + +// Verify evaluates the DMARC policy for the domain in the From-header of a +// message given the DKIM and SPF evaluation results. +// +// applyRandomPercentage determines whether the records "pct" is honored. This +// field specifies the percentage of messages the DMARC policy is applied to. It +// is used for slow rollout of DMARC policies and should be honored during normal +// email processing +// +// Verify always returns the result of verifying the DMARC policy +// against the message (for inclusion in Authentication-Result headers). +// +// useResult indicates if the result should be applied in a policy decision. +func Verify(ctx context.Context, resolver dns.Resolver, from dns.Domain, dkimResults []dkim.Result, spfResult spf.Status, spfIdentity *dns.Domain, applyRandomPercentage bool) (useResult bool, result Result) { + log := xlog.WithContext(ctx) + start := time.Now() + defer func() { + use := "no" + if useResult { + use = "yes" + } + reject := "no" + if result.Reject { + reject = "yes" + } + metricDMARCVerify.WithLabelValues(string(result.Status), reject, use).Observe(float64(time.Since(start)) / float64(time.Second)) + log.Debugx("dmarc verify result", result.Err, mlog.Field("fromdomain", from), mlog.Field("dkimresults", dkimResults), mlog.Field("spfresult", spfResult), mlog.Field("status", result.Status), mlog.Field("reject", result.Reject), mlog.Field("use", useResult), mlog.Field("duration", time.Since(start))) + }() + + status, recordDomain, record, _, err := Lookup(ctx, resolver, from) + if record == nil { + return false, Result{false, status, recordDomain, record, err} + } + result.Domain = recordDomain + result.Record = record + + // Record can request sampling of messages to apply policy. + // See ../rfc/7489:1432 + useResult = !applyRandomPercentage || record.Percentage == 100 || mathrand.Intn(100) < record.Percentage + + // We reject treat "quarantine" and "reject" the same. Thus, we also don't + // "downgrade" from reject to quarantine if this message was sampled out. + // ../rfc/7489:1446 ../rfc/7489:1024 + if recordDomain != from && record.SubdomainPolicy != PolicyEmpty { + result.Reject = record.SubdomainPolicy != PolicyNone + } else { + result.Reject = record.Policy != PolicyNone + } + + // ../rfc/7489:1338 + result.Status = StatusFail + if spfResult == spf.StatusTemperror { + result.Status = StatusTemperror + result.Reject = false + } + + // Below we can do a bunch of publicsuffix lookups. Cache the results, mostly to + // reduce log polution. + pubsuffixes := map[dns.Domain]dns.Domain{} + pubsuffix := func(name dns.Domain) dns.Domain { + if r, ok := pubsuffixes[name]; ok { + return r + } + r := publicsuffix.Lookup(ctx, name) + pubsuffixes[name] = r + return r + } + + // ../rfc/7489:1319 + // ../rfc/7489:544 + if spfResult == spf.StatusPass && spfIdentity != nil && (*spfIdentity == from || result.Record.ASPF == "r" && pubsuffix(from) == pubsuffix(*spfIdentity)) { + result.Reject = false + result.Status = StatusPass + return + } + + for _, dkimResult := range dkimResults { + if dkimResult.Status == dkim.StatusTemperror { + result.Reject = false + result.Status = StatusTemperror + continue + } + // ../rfc/7489:511 + if dkimResult.Status == dkim.StatusPass && dkimResult.Sig != nil && (dkimResult.Sig.Domain == from || result.Record.ADKIM == "r" && pubsuffix(from) == pubsuffix(dkimResult.Sig.Domain)) { + // ../rfc/7489:535 + result.Reject = false + result.Status = StatusPass + return + } + } + return +} diff --git a/dmarc/dmarc_test.go b/dmarc/dmarc_test.go new file mode 100644 index 0000000..067c3a8 --- /dev/null +++ b/dmarc/dmarc_test.go @@ -0,0 +1,275 @@ +package dmarc + +import ( + "context" + "errors" + "reflect" + "testing" + + "github.com/mjl-/mox/dkim" + "github.com/mjl-/mox/dns" + "github.com/mjl-/mox/spf" +) + +func TestLookup(t *testing.T) { + resolver := dns.MockResolver{ + TXT: map[string][]string{ + "_dmarc.simple.example.": {"v=DMARC1; p=none;"}, + "_dmarc.one.example.": {"v=DMARC1; p=none;", "other"}, + "_dmarc.temperror.example.": {"v=DMARC1; p=none;"}, + "_dmarc.multiple.example.": {"v=DMARC1; p=none;", "v=DMARC1; p=none;"}, + "_dmarc.malformed.example.": {"v=DMARC1; p=none; bogus;"}, + "_dmarc.example.com.": {"v=DMARC1; p=none;"}, + }, + Fail: map[dns.Mockreq]struct{}{ + {Type: "txt", Name: "_dmarc.temperror.example."}: {}, + }, + } + + test := func(d string, expStatus Status, expDomain string, expRecord *Record, expErr error) { + t.Helper() + + status, dom, record, _, err := Lookup(context.Background(), resolver, dns.Domain{ASCII: d}) + if (err == nil) != (expErr == nil) || err != nil && !errors.Is(err, expErr) { + t.Fatalf("got err %#v, expected %#v", err, expErr) + } + expd := dns.Domain{ASCII: expDomain} + if status != expStatus || dom != expd || !reflect.DeepEqual(record, expRecord) { + t.Fatalf("got status %v, dom %v, record %#v, expected %v %v %#v", status, dom, record, expStatus, expDomain, expRecord) + } + } + + r := DefaultRecord + r.Policy = PolicyNone + test("simple.example", StatusNone, "simple.example", &r, nil) + test("one.example", StatusNone, "one.example", &r, nil) + test("absent.example", StatusNone, "absent.example", nil, ErrNoRecord) + test("multiple.example", StatusNone, "multiple.example", nil, ErrMultipleRecords) + test("malformed.example", StatusPermerror, "malformed.example", nil, ErrSyntax) + test("temperror.example", StatusTemperror, "temperror.example", nil, ErrDNS) + test("sub.example.com", StatusNone, "example.com", &r, nil) // Policy published at organizational domain, public suffix. +} + +func TestVerify(t *testing.T) { + resolver := dns.MockResolver{ + TXT: map[string][]string{ + "_dmarc.reject.example.": {"v=DMARC1; p=reject"}, + "_dmarc.strict.example.": {"v=DMARC1; p=reject; adkim=s; aspf=s"}, + "_dmarc.test.example.": {"v=DMARC1; p=reject; pct=0"}, + "_dmarc.subnone.example.": {"v=DMARC1; p=reject; sp=none"}, + "_dmarc.none.example.": {"v=DMARC1; p=none"}, + "_dmarc.malformed.example.": {"v=DMARC1; p=none; bogus"}, + "_dmarc.example.com.": {"v=DMARC1; p=reject"}, + }, + Fail: map[dns.Mockreq]struct{}{ + {Type: "txt", Name: "_dmarc.temperror.example."}: {}, + }, + } + + equalResult := func(got, exp Result) bool { + if reflect.DeepEqual(got, exp) { + return true + } + if got.Err != nil && exp.Err != nil && (got.Err == exp.Err || errors.Is(got.Err, exp.Err)) { + got.Err = nil + exp.Err = nil + return reflect.DeepEqual(got, exp) + } + return false + } + + test := func(fromDom string, dkimResults []dkim.Result, spfResult spf.Status, spfIdentity *dns.Domain, expUseResult bool, expResult Result) { + t.Helper() + + from, err := dns.ParseDomain(fromDom) + if err != nil { + t.Fatalf("parsing domain: %v", err) + } + useResult, result := Verify(context.Background(), resolver, from, dkimResults, spfResult, spfIdentity, true) + if useResult != expUseResult || !equalResult(result, expResult) { + t.Fatalf("verify: got useResult %v, result %#v, expected %v %#v", useResult, result, expUseResult, expResult) + } + } + + // Basic case, reject policy and no dkim or spf results. + reject := DefaultRecord + reject.Policy = PolicyReject + test("reject.example", + []dkim.Result{}, + spf.StatusNone, + nil, + true, Result{true, StatusFail, dns.Domain{ASCII: "reject.example"}, &reject, nil}, + ) + + // Accept with spf pass. + test("reject.example", + []dkim.Result{}, + spf.StatusPass, + &dns.Domain{ASCII: "sub.reject.example"}, + true, Result{false, StatusPass, dns.Domain{ASCII: "reject.example"}, &reject, nil}, + ) + + // Accept with dkim pass. + test("reject.example", + []dkim.Result{ + { + Status: dkim.StatusPass, + Sig: &dkim.Sig{ // Just the minimum fields needed. + Domain: dns.Domain{ASCII: "sub.reject.example"}, + }, + Record: &dkim.Record{}, + }, + }, + spf.StatusFail, + &dns.Domain{ASCII: "reject.example"}, + true, Result{false, StatusPass, dns.Domain{ASCII: "reject.example"}, &reject, nil}, + ) + + // Reject due to spf and dkim "strict". + strict := DefaultRecord + strict.Policy = PolicyReject + strict.ADKIM = AlignStrict + strict.ASPF = AlignStrict + test("strict.example", + []dkim.Result{ + { + Status: dkim.StatusPass, + Sig: &dkim.Sig{ // Just the minimum fields needed. + Domain: dns.Domain{ASCII: "sub.strict.example"}, + }, + Record: &dkim.Record{}, + }, + }, + spf.StatusPass, + &dns.Domain{ASCII: "sub.strict.example"}, + true, Result{true, StatusFail, dns.Domain{ASCII: "strict.example"}, &strict, nil}, + ) + + // No dmarc policy, nothing to say. + test("absent.example", + []dkim.Result{}, + spf.StatusNone, + nil, + false, Result{false, StatusNone, dns.Domain{ASCII: "absent.example"}, nil, ErrNoRecord}, + ) + + // No dmarc policy, spf pass does nothing. + test("absent.example", + []dkim.Result{}, + spf.StatusPass, + &dns.Domain{ASCII: "absent.example"}, + false, Result{false, StatusNone, dns.Domain{ASCII: "absent.example"}, nil, ErrNoRecord}, + ) + + none := DefaultRecord + none.Policy = PolicyNone + // Policy none results in no reject. + test("none.example", + []dkim.Result{}, + spf.StatusPass, + &dns.Domain{ASCII: "none.example"}, + true, Result{false, StatusPass, dns.Domain{ASCII: "none.example"}, &none, nil}, + ) + + // No actual reject due to pct=0. + testr := DefaultRecord + testr.Policy = PolicyReject + testr.Percentage = 0 + test("test.example", + []dkim.Result{}, + spf.StatusNone, + nil, + false, Result{true, StatusFail, dns.Domain{ASCII: "test.example"}, &testr, nil}, + ) + + // No reject if subdomain has "none" policy. + sub := DefaultRecord + sub.Policy = PolicyReject + sub.SubdomainPolicy = PolicyNone + test("sub.subnone.example", + []dkim.Result{}, + spf.StatusFail, + &dns.Domain{ASCII: "sub.subnone.example"}, + true, Result{false, StatusFail, dns.Domain{ASCII: "subnone.example"}, &sub, nil}, + ) + + // No reject if spf temperror and no other pass. + test("reject.example", + []dkim.Result{}, + spf.StatusTemperror, + &dns.Domain{ASCII: "mail.reject.example"}, + true, Result{false, StatusTemperror, dns.Domain{ASCII: "reject.example"}, &reject, nil}, + ) + + // No reject if dkim temperror and no other pass. + test("reject.example", + []dkim.Result{ + { + Status: dkim.StatusTemperror, + Sig: &dkim.Sig{ // Just the minimum fields needed. + Domain: dns.Domain{ASCII: "sub.reject.example"}, + }, + Record: &dkim.Record{}, + }, + }, + spf.StatusNone, + nil, + true, Result{false, StatusTemperror, dns.Domain{ASCII: "reject.example"}, &reject, nil}, + ) + + // No reject if spf temperror but still dkim pass. + test("reject.example", + []dkim.Result{ + { + Status: dkim.StatusPass, + Sig: &dkim.Sig{ // Just the minimum fields needed. + Domain: dns.Domain{ASCII: "sub.reject.example"}, + }, + Record: &dkim.Record{}, + }, + }, + spf.StatusTemperror, + &dns.Domain{ASCII: "mail.reject.example"}, + true, Result{false, StatusPass, dns.Domain{ASCII: "reject.example"}, &reject, nil}, + ) + + // No reject if dkim temperror but still spf pass. + test("reject.example", + []dkim.Result{ + { + Status: dkim.StatusTemperror, + Sig: &dkim.Sig{ // Just the minimum fields needed. + Domain: dns.Domain{ASCII: "sub.reject.example"}, + }, + Record: &dkim.Record{}, + }, + }, + spf.StatusPass, + &dns.Domain{ASCII: "mail.reject.example"}, + true, Result{false, StatusPass, dns.Domain{ASCII: "reject.example"}, &reject, nil}, + ) + + // Bad DMARC record results in permerror without reject. + test("malformed.example", + []dkim.Result{}, + spf.StatusNone, + nil, + false, Result{false, StatusPermerror, dns.Domain{ASCII: "malformed.example"}, nil, ErrSyntax}, + ) + + // DKIM domain that is higher-level than organizational can not result in a pass. ../rfc/7489:525 + test("example.com", + []dkim.Result{ + { + Status: dkim.StatusPass, + Sig: &dkim.Sig{ // Just the minimum fields needed. + Domain: dns.Domain{ASCII: "com"}, + }, + Record: &dkim.Record{}, + }, + }, + spf.StatusNone, + nil, + true, Result{true, StatusFail, dns.Domain{ASCII: "example.com"}, &reject, nil}, + ) +} diff --git a/dmarc/fuzz_test.go b/dmarc/fuzz_test.go new file mode 100644 index 0000000..70be8ed --- /dev/null +++ b/dmarc/fuzz_test.go @@ -0,0 +1,17 @@ +package dmarc + +import ( + "testing" +) + +func FuzzParseRecord(f *testing.F) { + f.Add("") + f.Add("V = DMARC1; P = reject ;\tSP=none; unknown \t=\t ignored-future-value \t ; adkim=s; aspf=s; rua=mailto:dmarc-feedback@example.com ,\t\tmailto:tld-test@thirdparty.example.net!10m; RUF=mailto:auth-reports@example.com ,\t\tmailto:tld-test@thirdparty.example.net!0G; RI = 123; FO = 0:1:d:s ; RF= afrf : other; Pct = 0") + f.Add("v=DMARC1; rua=mailto:dmarc-feedback@example.com!99999999999999999999999999999999999999999999999") + f.Fuzz(func(t *testing.T, s string) { + r, _, err := ParseRecord(s) + if err == nil { + _ = r.String() + } + }) +} diff --git a/dmarc/parse.go b/dmarc/parse.go new file mode 100644 index 0000000..05c2a8e --- /dev/null +++ b/dmarc/parse.go @@ -0,0 +1,343 @@ +package dmarc + +import ( + "fmt" + "net/url" + "strconv" + "strings" +) + +type parseErr string + +func (e parseErr) Error() string { + return string(e) +} + +// ParseRecord parses a DMARC TXT record. +// +// Fields and values are are case-insensitive in DMARC are returned in lower case +// for easy comparison. +// +// DefaultRecord provides default values for tags not present in s. +func ParseRecord(s string) (record *Record, isdmarc bool, rerr error) { + defer func() { + x := recover() + if x == nil { + return + } + if err, ok := x.(parseErr); ok { + rerr = err + return + } + panic(x) + }() + + r := DefaultRecord + p := newParser(s) + + // v= is required and must be first. ../rfc/7489:1099 + p.xtake("v") + p.wsp() + p.xtake("=") + p.wsp() + r.Version = p.xtakecase("DMARC1") + p.wsp() + p.xtake(";") + isdmarc = true + seen := map[string]bool{} + for { + p.wsp() + if p.empty() { + break + } + W := p.xword() + w := strings.ToLower(W) + if seen[w] { + // RFC does not say anything about duplicate tags. They can only confuse, so we + // don't allow them. + p.xerrorf("duplicate tag %q", W) + } + seen[w] = true + p.wsp() + p.xtake("=") + p.wsp() + switch w { + default: + // ../rfc/7489:924 implies that we should know how to parse unknown tags. + // The formal definition at ../rfc/7489:1127 does not allow for unknown tags. + // We just parse until the next semicolon or end. + for !p.empty() { + if p.peek(';') { + break + } + p.xtaken(1) + } + case "p": + if len(seen) != 1 { + // ../rfc/7489:1105 + p.xerrorf("p= (policy) must be first tag") + } + r.Policy = DMARCPolicy(p.xtakelist("none", "quarantine", "reject")) + case "sp": + r.SubdomainPolicy = DMARCPolicy(p.xkeyword()) + // note: we check if the value is valid before returning. + case "rua": + r.AggregateReportAddresses = append(r.AggregateReportAddresses, p.xuri()) + p.wsp() + for p.take(",") { + p.wsp() + r.AggregateReportAddresses = append(r.AggregateReportAddresses, p.xuri()) + p.wsp() + } + case "ruf": + r.FailureReportAddresses = append(r.FailureReportAddresses, p.xuri()) + p.wsp() + for p.take(",") { + p.wsp() + r.FailureReportAddresses = append(r.FailureReportAddresses, p.xuri()) + p.wsp() + } + case "adkim": + r.ADKIM = Align(p.xtakelist("r", "s")) + case "aspf": + r.ASPF = Align(p.xtakelist("r", "s")) + case "ri": + r.AggregateReportingInterval = p.xnumber() + case "fo": + r.FailureReportingOptions = []string{p.xtakelist("0", "1", "d", "s")} + p.wsp() + for p.take(":") { + p.wsp() + r.FailureReportingOptions = append(r.FailureReportingOptions, p.xtakelist("0", "1", "d", "s")) + p.wsp() + } + case "rf": + r.ReportingFormat = []string{p.xkeyword()} + p.wsp() + for p.take(":") { + p.wsp() + r.ReportingFormat = append(r.ReportingFormat, p.xkeyword()) + p.wsp() + } + case "pct": + r.Percentage = p.xnumber() + if r.Percentage > 100 { + p.xerrorf("bad percentage %d", r.Percentage) + } + } + p.wsp() + if !p.take(";") && !p.empty() { + p.xerrorf("expected ;") + } + } + + // ../rfc/7489:1106 says "p" is required, but ../rfc/7489:1407 implies we must be + // able to parse a record without a "p" or with invalid "sp" tag. + sp := r.SubdomainPolicy + if !seen["p"] || sp != PolicyEmpty && sp != PolicyNone && sp != PolicyQuarantine && sp != PolicyReject { + if len(r.AggregateReportAddresses) > 0 { + r.Policy = PolicyNone + r.SubdomainPolicy = PolicyEmpty + } else { + p.xerrorf("invalid (subdomain)policy and no valid aggregate reporting address") + } + } + + return &r, true, nil +} + +type parser struct { + s string + lower string + o int +} + +// toLower lower cases bytes that are A-Z. strings.ToLower does too much. and +// would replace invalid bytes with unicode replacement characters, which would +// break our requirement that offsets into the original and upper case strings +// point to the same character. +func toLower(s string) string { + r := []byte(s) + for i, c := range r { + if c >= 'A' && c <= 'Z' { + r[i] = c + 0x20 + } + } + return string(r) +} + +func newParser(s string) *parser { + return &parser{ + s: s, + lower: toLower(s), + } +} + +func (p *parser) xerrorf(format string, args ...any) { + msg := fmt.Sprintf(format, args...) + if p.o < len(p.s) { + msg += fmt.Sprintf(" (remain %q)", p.s[p.o:]) + } + panic(parseErr(msg)) +} + +func (p *parser) empty() bool { + return p.o >= len(p.s) +} + +func (p *parser) peek(b byte) bool { + return p.o < len(p.s) && p.s[p.o] == b +} + +// case insensitive prefix +func (p *parser) prefix(s string) bool { + return strings.HasPrefix(p.lower[p.o:], s) +} + +func (p *parser) take(s string) bool { + if p.prefix(s) { + p.o += len(s) + return true + } + return false +} + +func (p *parser) xtaken(n int) string { + r := p.lower[p.o : p.o+n] + p.o += n + return r +} + +func (p *parser) xtake(s string) string { + if !p.prefix(s) { + p.xerrorf("expected %q", s) + } + return p.xtaken(len(s)) +} + +func (p *parser) xtakecase(s string) string { + if !strings.HasPrefix(p.s[p.o:], s) { + p.xerrorf("expected %q", s) + } + r := p.s[p.o : p.o+len(s)] + p.o += len(s) + return r +} + +// *WSP +func (p *parser) wsp() { + for !p.empty() && (p.s[p.o] == ' ' || p.s[p.o] == '\t') { + p.o++ + } +} + +// take one of the strings in l. +func (p *parser) xtakelist(l ...string) string { + for _, s := range l { + if p.prefix(s) { + return p.xtaken(len(s)) + } + } + p.xerrorf("expected on one %v", l) + panic("not reached") +} + +func (p *parser) xtakefn1case(fn func(byte, int) bool) string { + for i, b := range []byte(p.lower[p.o:]) { + if !fn(b, i) { + if i == 0 { + p.xerrorf("expected at least one char") + } + return p.xtaken(i) + } + } + if p.empty() { + p.xerrorf("expected at least 1 char") + } + r := p.s[p.o:] + p.o += len(r) + return r +} + +// used for the tag keys. +func (p *parser) xword() string { + return p.xtakefn1case(func(c byte, i int) bool { + return c >= 'a' && c <= 'z' || c >= 'A' && c <= 'Z' || c >= '0' && c <= '9' + }) +} + +func (p *parser) xdigits() string { + return p.xtakefn1case(func(b byte, i int) bool { + return isdigit(b) + }) +} + +// ../rfc/7489:883 +// Syntax: ../rfc/7489:1132 +func (p *parser) xuri() URI { + // Ideally, we would simply parse an URI here. But a URI can contain a semicolon so + // could consume the rest of the DMARC record. Instead, we'll assume no one uses + // semicolons in URIs in DMARC records and first collect + // space/comma/semicolon/end-separated characters, then parse. + // ../rfc/3986:684 + v := p.xtakefn1case(func(b byte, i int) bool { + return b != ',' && b != ' ' && b != '\t' && b != ';' + }) + t := strings.SplitN(v, "!", 2) + u, err := url.Parse(t[0]) + if err != nil { + p.xerrorf("parsing uri %q: %s", t[0], err) + } + if u.Scheme == "" { + p.xerrorf("missing scheme in uri") + } + uri := URI{ + Address: t[0], + } + if len(t) == 2 { + o := t[1] + if o != "" { + c := o[len(o)-1] + switch c { + case 'k', 'K', 'm', 'M', 'g', 'G', 't', 'T': + uri.Unit = strings.ToLower(o[len(o)-1:]) + o = o[:len(o)-1] + } + } + uri.MaxSize, err = strconv.ParseUint(o, 10, 64) + if err != nil { + p.xerrorf("parsing max size for uri: %s", err) + } + } + return uri +} + +func (p *parser) xnumber() int { + digits := p.xdigits() + v, err := strconv.Atoi(digits) + if err != nil { + p.xerrorf("parsing %q: %s", digits, err) + } + return v +} + +func (p *parser) xkeyword() string { + // ../rfc/7489:1195, keyword is imported from smtp. + // ../rfc/5321:2287 + n := len(p.s) - p.o + return p.xtakefn1case(func(b byte, i int) bool { + return isalphadigit(b) || (b == '-' && i < n-1 && isalphadigit(p.s[p.o+i+1])) + }) +} + +func isdigit(b byte) bool { + return b >= '0' && b <= '9' +} + +func isalpha(b byte) bool { + return b >= 'a' && b <= 'z' || b >= 'A' && b <= 'Z' +} + +func isalphadigit(b byte) bool { + return isdigit(b) || isalpha(b) +} diff --git a/dmarc/parse_test.go b/dmarc/parse_test.go new file mode 100644 index 0000000..6460c60 --- /dev/null +++ b/dmarc/parse_test.go @@ -0,0 +1,142 @@ +package dmarc + +import ( + "reflect" + "testing" +) + +func TestParse(t *testing.T) { + // ../rfc/7489:3224 + + // bad cases + bad := func(s string) { + t.Helper() + _, _, err := ParseRecord(s) + if err == nil { + t.Fatalf("got parse success, expected error") + } + } + bad("") + bad("v=") + bad("v=DMARC12") // "2" leftover + bad("v=DMARC1") // semicolon required + bad("v=dmarc1; p=none") // dmarc1 is case-sensitive + bad("v=DMARC1 p=none") // missing ; + bad("v=DMARC1;") // missing p, no rua + bad("v=DMARC1; sp=invalid") // invalid sp, no rua + bad("v=DMARC1; sp=reject; p=reject") // p must be directly after v + bad("v=DMARC1; p=none; p=none") // dup + bad("v=DMARC1; p=none; p=reject") // dup + bad("v=DMARC1;;") // missing tag + bad("v=DMARC1; adkim=x") // bad value + bad("v=DMARC1; aspf=123") // bad value + bad("v=DMARC1; ri=") // missing value + bad("v=DMARC1; ri=-1") // invalid, must be >= 0 + bad("v=DMARC1; ri=99999999999999999999999999999999999999") // does not fit in int + bad("v=DMARC1; ri=123bad") // leftover data + bad("v=DMARC1; ri=bad") // not a number + bad("v=DMARC1; fo=") + bad("v=DMARC1; fo=01") + bad("v=DMARC1; fo=bad") + bad("v=DMARC1; rf=bad-trailing-dash-") + bad("v=DMARC1; rf=") + bad("v=DMARC1; rf=bad.non-alphadigitdash") + bad("v=DMARC1; p=badvalue") + bad("v=DMARC1; sp=bad") + bad("v=DMARC1; pct=110") + bad("v=DMARC1; pct=bogus") + bad("v=DMARC1; pct=") + bad("v=DMARC1; rua=") + bad("v=DMARC1; rua=bogus") + bad("v=DMARC1; rua=mailto:dmarc-feedback@example.com!") + bad("v=DMARC1; rua=mailto:dmarc-feedback@example.com!99999999999999999999999999999999999999999999999") + bad("v=DMARC1; rua=mailto:dmarc-feedback@example.com!10p") + + valid := func(s string, exp Record) { + t.Helper() + + r, _, err := ParseRecord(s) + if err != nil { + t.Fatalf("unexpected error: %s", err) + } + if !reflect.DeepEqual(r, &exp) { + t.Fatalf("got:\n%#v\nexpected:\n%#v", r, &exp) + } + } + + // Return a record with default values, and overrides from r. Only for the fields used below. + record := func(r Record) Record { + rr := DefaultRecord + if r.Policy != "" { + rr.Policy = r.Policy + } + if r.AggregateReportAddresses != nil { + rr.AggregateReportAddresses = r.AggregateReportAddresses + } + if r.FailureReportAddresses != nil { + rr.FailureReportAddresses = r.FailureReportAddresses + } + if r.Percentage != 0 { + rr.Percentage = r.Percentage + } + return rr + } + + valid("v=DMARC1; rua=mailto:mjl@mox.example", record(Record{ + Policy: "none", + AggregateReportAddresses: []URI{ + {Address: "mailto:mjl@mox.example"}, + }, + })) // ../rfc/7489:1407 + valid("v=DMARC1; p=reject; sp=invalid; rua=mailto:mjl@mox.example", record(Record{ + Policy: "none", + AggregateReportAddresses: []URI{ + {Address: "mailto:mjl@mox.example"}, + }, + })) // ../rfc/7489:1407 + valid("v=DMARC1; p=none; rua=mailto:dmarc-feedback@example.com", record(Record{ + Policy: "none", + AggregateReportAddresses: []URI{ + {Address: "mailto:dmarc-feedback@example.com"}, + }, + })) + valid("v=DMARC1; p=none; rua=mailto:dmarc-feedback@example.com;ruf=mailto:auth-reports@example.com", record(Record{ + Policy: "none", + AggregateReportAddresses: []URI{ + {Address: "mailto:dmarc-feedback@example.com"}, + }, + FailureReportAddresses: []URI{ + {Address: "mailto:auth-reports@example.com"}, + }, + })) + valid("v=DMARC1; p=quarantine; rua=mailto:dmarc-feedback@example.com,mailto:tld-test@thirdparty.example.net!10m; pct=25", record(Record{ + Policy: "quarantine", + AggregateReportAddresses: []URI{ + {Address: "mailto:dmarc-feedback@example.com"}, + {Address: "mailto:tld-test@thirdparty.example.net", MaxSize: 10, Unit: "m"}, + }, + Percentage: 25, + })) + + valid("V = DMARC1 ; P = reject ;\tSP=none; unknown \t=\t ignored-future-value \t ; adkim=s; aspf=s; rua=mailto:dmarc-feedback@example.com ,\t\tmailto:tld-test@thirdparty.example.net!10m; RUF=mailto:auth-reports@example.com ,\t\tmailto:tld-test@thirdparty.example.net!0G; RI = 123; FO = 0:1:d:s ; RF= afrf : other; Pct = 0", + Record{ + Version: "DMARC1", + Policy: "reject", + SubdomainPolicy: "none", + ADKIM: "s", + ASPF: "s", + AggregateReportAddresses: []URI{ + {Address: "mailto:dmarc-feedback@example.com"}, + {Address: "mailto:tld-test@thirdparty.example.net", MaxSize: 10, Unit: "m"}, + }, + FailureReportAddresses: []URI{ + {Address: "mailto:auth-reports@example.com"}, + {Address: "mailto:tld-test@thirdparty.example.net", MaxSize: 0, Unit: "g"}, + }, + AggregateReportingInterval: 123, + FailureReportingOptions: []string{"0", "1", "d", "s"}, + ReportingFormat: []string{"afrf", "other"}, + Percentage: 0, + }, + ) +} diff --git a/dmarc/txt.go b/dmarc/txt.go new file mode 100644 index 0000000..710baac --- /dev/null +++ b/dmarc/txt.go @@ -0,0 +1,127 @@ +package dmarc + +import ( + "fmt" + "strings" +) + +// todo: DMARCPolicy should be named just Policy, but this is causing conflicting types in sherpadoc output. should somehow get the dmarc-prefix only in the sherpadoc. + +// Policy as used in DMARC DNS record for "p=" or "sp=". +type DMARCPolicy string + +// ../rfc/7489:1157 + +const ( + PolicyEmpty DMARCPolicy = "" // Only for the optional Record.SubdomainPolicy. + PolicyNone DMARCPolicy = "none" + PolicyQuarantine DMARCPolicy = "quarantine" + PolicyReject DMARCPolicy = "reject" +) + +// URI is a destination address for reporting. +type URI struct { + Address string // Should start with "mailto:". + MaxSize uint64 // Optional maximum message size, subject to Unit. + Unit string // "" (b), "k", "g", "t" (case insensitive), unit size, where k is 2^10 etc. +} + +// String returns a string representation of the URI for inclusion in a DMARC +// record. +func (u URI) String() string { + s := u.Address + s = strings.ReplaceAll(s, ",", "%2C") + s = strings.ReplaceAll(s, "!", "%21") + if u.MaxSize > 0 { + s += fmt.Sprintf("%d", u.MaxSize) + } + s += u.Unit + return s +} + +// ../rfc/7489:1127 + +// Align specifies the required alignment of a domain name. +type Align string + +const ( + AlignStrict Align = "s" // Strict requires an exact domain name match. + AlignRelaxed Align = "r" // Relaxed requires either an exact or subdomain name match. +) + +// Record is a DNS policy or reporting record. +// +// Example: +// +// v=DMARC1; p=reject; rua=mailto:postmaster@mox.example +type Record struct { + Version string // "v=DMARC1" + Policy DMARCPolicy // Required, for "p=". + SubdomainPolicy DMARCPolicy // Like policy but for subdomains. Optional, for "sp=". + AggregateReportAddresses []URI // Optional, for "rua=". + FailureReportAddresses []URI // Optional, for "ruf=" + ADKIM Align // "r" (default) for relaxed or "s" for simple. For "adkim=". + ASPF Align // "r" (default) for relaxed or "s" for simple. For "aspf=". + AggregateReportingInterval int // Default 86400. For "ri=" + FailureReportingOptions []string // "0" (default), "1", "d", "s". For "fo=". + ReportingFormat []string // "afrf" (default). Ffor "rf=". + Percentage int // Between 0 and 100, default 100. For "pct=". +} + +// DefaultRecord holds the defaults for a DMARC record. +var DefaultRecord = Record{ + Version: "DMARC1", + ADKIM: "r", + ASPF: "r", + AggregateReportingInterval: 86400, + FailureReportingOptions: []string{"0"}, + ReportingFormat: []string{"afrf"}, + Percentage: 100, +} + +// String returns the DMARC record for use as DNS TXT record. +func (r Record) String() string { + b := &strings.Builder{} + b.WriteString("v=" + r.Version) + + wrote := false + write := func(do bool, tag, value string) { + if do { + fmt.Fprintf(b, ";%s=%s", tag, value) + wrote = true + } + } + write(r.Policy != "", "p", string(r.Policy)) + write(r.SubdomainPolicy != "", "sp", string(r.SubdomainPolicy)) + if len(r.AggregateReportAddresses) > 0 { + l := make([]string, len(r.AggregateReportAddresses)) + for i, a := range r.AggregateReportAddresses { + l[i] = a.String() + } + s := strings.Join(l, ",") + write(true, "rua", s) + } + if len(r.FailureReportAddresses) > 0 { + l := make([]string, len(r.FailureReportAddresses)) + for i, a := range r.FailureReportAddresses { + l[i] = a.String() + } + s := strings.Join(l, ",") + write(true, "ruf", s) + } + write(r.ADKIM != "", "adkim", string(r.ADKIM)) + write(r.ASPF != "", "aspf", string(r.ASPF)) + write(r.AggregateReportingInterval != DefaultRecord.AggregateReportingInterval, "ri", fmt.Sprintf("%d", r.AggregateReportingInterval)) + if len(r.FailureReportingOptions) > 1 || (len(r.FailureReportingOptions) == 1 && r.FailureReportingOptions[0] != "0") { + write(true, "fo", strings.Join(r.FailureReportingOptions, ":")) + } + if len(r.ReportingFormat) > 1 || (len(r.ReportingFormat) == 1 && strings.EqualFold(r.ReportingFormat[0], "afrf")) { + write(true, "rf", strings.Join(r.FailureReportingOptions, ":")) + } + write(r.Percentage != 100, "pct", fmt.Sprintf("%d", r.Percentage)) + + if !wrote { + b.WriteString(";") + } + return b.String() +} diff --git a/dmarcdb/db.go b/dmarcdb/db.go new file mode 100644 index 0000000..a616722 --- /dev/null +++ b/dmarcdb/db.go @@ -0,0 +1,186 @@ +// Package dmarcdb stores incoming DMARC reports. +// +// With DMARC, a domain can request emails with DMARC verification results by +// remote mail servers to be sent to a specified address. Mox parses such +// reports, stores them in its database and makes them available through its +// admin web interface. +package dmarcdb + +import ( + "context" + "fmt" + "os" + "path/filepath" + "sync" + "time" + + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" + + "github.com/mjl-/bstore" + + "github.com/mjl-/mox/dmarcrpt" + "github.com/mjl-/mox/dns" + "github.com/mjl-/mox/mlog" + "github.com/mjl-/mox/mox-" +) + +var xlog = mlog.New("dmarcdb") + +var ( + dmarcDB *bstore.DB + mutex sync.Mutex +) + +var ( + metricEvaluated = promauto.NewCounterVec( + prometheus.CounterOpts{ + Name: "mox_dmarcdb_policy_evaluated_total", + Help: "Number of policy evaluations.", + }, + // We only register validated domains for which we have a config. + []string{"domain", "disposition", "dkim", "spf"}, + ) + metricDKIM = promauto.NewCounterVec( + prometheus.CounterOpts{ + Name: "mox_dmarcdb_dkim_result_total", + Help: "Number of DKIM results.", + }, + []string{"result"}, + ) + metricSPF = promauto.NewCounterVec( + prometheus.CounterOpts{ + Name: "mox_dmarcdb_spf_result_total", + Help: "Number of SPF results.", + }, + []string{"result"}, + ) +) + +// DomainFeedback is a single report stored in the database. +type DomainFeedback struct { + ID int64 + // Domain where DMARC DNS record was found, could be organizational domain. + Domain string `bstore:"index"` + // Domain in From-header. + FromDomain string `bstore:"index"` + dmarcrpt.Feedback +} + +func database() (rdb *bstore.DB, rerr error) { + mutex.Lock() + defer mutex.Unlock() + if dmarcDB == nil { + p := mox.DataDirPath("dmarcrpt.db") + os.MkdirAll(filepath.Dir(p), 0770) + db, err := bstore.Open(p, &bstore.Options{Timeout: 5 * time.Second, Perm: 0660}, DomainFeedback{}) + if err != nil { + return nil, err + } + dmarcDB = db + } + return dmarcDB, nil +} + +// Init opens the database. +func Init() error { + _, err := database() + return err +} + +// AddReport adds a DMARC aggregate feedback report from an email to the database, +// and updates prometheus metrics. +// +// fromDomain is the domain in the report message From header. +func AddReport(ctx context.Context, f *dmarcrpt.Feedback, fromDomain dns.Domain) error { + db, err := database() + if err != nil { + return err + } + + d, err := dns.ParseDomain(f.PolicyPublished.Domain) + if err != nil { + return fmt.Errorf("parsing domain in report: %v", err) + } + + df := DomainFeedback{0, d.Name(), fromDomain.Name(), *f} + if err := db.Insert(&df); err != nil { + return err + } + + for _, r := range f.Records { + for _, dkim := range r.AuthResults.DKIM { + count := r.Row.Count + if count > 0 { + metricDKIM.With(prometheus.Labels{ + "result": string(dkim.Result), + }).Add(float64(count)) + } + } + + for _, spf := range r.AuthResults.SPF { + count := r.Row.Count + if count > 0 { + metricSPF.With(prometheus.Labels{ + "result": string(spf.Result), + }).Add(float64(count)) + } + } + + count := r.Row.Count + if count > 0 { + pe := r.Row.PolicyEvaluated + metricEvaluated.With(prometheus.Labels{ + "domain": f.PolicyPublished.Domain, + "disposition": string(pe.Disposition), + "dkim": string(pe.DKIM), + "spf": string(pe.SPF), + }).Add(float64(count)) + } + } + return nil +} + +// Records returns all reports in the database. +func Records(ctx context.Context) ([]DomainFeedback, error) { + db, err := database() + if err != nil { + return nil, err + } + + return bstore.QueryDB[DomainFeedback](db).List() +} + +// RecordID returns the report for the ID. +func RecordID(ctx context.Context, id int64) (DomainFeedback, error) { + db, err := database() + if err != nil { + return DomainFeedback{}, err + } + + e := DomainFeedback{ID: id} + err = db.Get(&e) + return e, err +} + +// RecordsPeriodDomain returns the reports overlapping start and end, for the given +// domain. If domain is empty, all records match for domain. +func RecordsPeriodDomain(ctx context.Context, start, end time.Time, domain string) ([]DomainFeedback, error) { + db, err := database() + if err != nil { + return nil, err + } + + s := start.Unix() + e := end.Unix() + + q := bstore.QueryDB[DomainFeedback](db) + if domain != "" { + q.FilterNonzero(DomainFeedback{Domain: domain}) + } + q.FilterFn(func(d DomainFeedback) bool { + m := d.Feedback.ReportMetadata.DateRange + return m.Begin >= s && m.Begin < e || m.End > s && m.End <= e + }) + return q.List() +} diff --git a/dmarcdb/db_test.go b/dmarcdb/db_test.go new file mode 100644 index 0000000..0365684 --- /dev/null +++ b/dmarcdb/db_test.go @@ -0,0 +1,108 @@ +package dmarcdb + +import ( + "context" + "os" + "path/filepath" + "reflect" + "testing" + "time" + + "github.com/mjl-/mox/dmarcrpt" + "github.com/mjl-/mox/dns" + "github.com/mjl-/mox/mox-" +) + +func TestDMARCDB(t *testing.T) { + mox.ConfigStaticPath = "../testdata/dmarcdb/fake.conf" + mox.Conf.Static.DataDir = "." + + dbpath := mox.DataDirPath("dmarcrpt.db") + os.MkdirAll(filepath.Dir(dbpath), 0770) + defer os.Remove(dbpath) + + if err := Init(); err != nil { + t.Fatalf("init database: %s", err) + } + + feedback := &dmarcrpt.Feedback{ + ReportMetadata: dmarcrpt.ReportMetadata{ + OrgName: "google.com", + Email: "noreply-dmarc-support@google.com", + ExtraContactInfo: "https://support.google.com/a/answer/2466580", + ReportID: "10051505501689795560", + DateRange: dmarcrpt.DateRange{ + Begin: 1596412800, + End: 1596499199, + }, + }, + PolicyPublished: dmarcrpt.PolicyPublished{ + Domain: "example.org", + ADKIM: "r", + ASPF: "r", + Policy: "reject", + SubdomainPolicy: "reject", + Percentage: 100, + }, + Records: []dmarcrpt.ReportRecord{ + { + Row: dmarcrpt.Row{ + SourceIP: "127.0.0.1", + Count: 1, + PolicyEvaluated: dmarcrpt.PolicyEvaluated{ + Disposition: dmarcrpt.DispositionNone, + DKIM: dmarcrpt.DMARCPass, + SPF: dmarcrpt.DMARCPass, + }, + }, + Identifiers: dmarcrpt.Identifiers{ + HeaderFrom: "example.org", + }, + AuthResults: dmarcrpt.AuthResults{ + DKIM: []dmarcrpt.DKIMAuthResult{ + { + Domain: "example.org", + Result: dmarcrpt.DKIMPass, + Selector: "example", + }, + }, + SPF: []dmarcrpt.SPFAuthResult{ + { + Domain: "example.org", + Result: dmarcrpt.SPFPass, + }, + }, + }, + }, + }, + } + if err := AddReport(context.Background(), feedback, dns.Domain{ASCII: "google.com"}); err != nil { + t.Fatalf("adding report: %s", err) + } + + records, err := Records(context.Background()) + if err != nil || len(records) != 1 || !reflect.DeepEqual(&records[0].Feedback, feedback) { + t.Fatalf("records: got err %v, records %#v, expected no error, single record with feedback %#v", err, records, feedback) + } + + record, err := RecordID(context.Background(), records[0].ID) + if err != nil || !reflect.DeepEqual(&record.Feedback, feedback) { + t.Fatalf("record id: got err %v, record %#v, expected feedback %#v", err, record, feedback) + } + + start := time.Unix(1596412800, 0) + end := time.Unix(1596499199, 0) + records, err = RecordsPeriodDomain(context.Background(), start, end, "example.org") + if err != nil || len(records) != 1 || !reflect.DeepEqual(&records[0].Feedback, feedback) { + t.Fatalf("records: got err %v, records %#v, expected no error, single record with feedback %#v", err, records, feedback) + } + + records, err = RecordsPeriodDomain(context.Background(), end, end, "example.org") + if err != nil || len(records) != 0 { + t.Fatalf("records: got err %v, records %#v, expected no error and no records", err, records) + } + records, err = RecordsPeriodDomain(context.Background(), start, end, "other.example") + if err != nil || len(records) != 0 { + t.Fatalf("records: got err %v, records %#v, expected no error and no records", err, records) + } +} diff --git a/dmarcrpt/feedback.go b/dmarcrpt/feedback.go new file mode 100644 index 0000000..dc9358e --- /dev/null +++ b/dmarcrpt/feedback.go @@ -0,0 +1,157 @@ +package dmarcrpt + +// Initially generated by xsdgen, then modified. + +// Feedback is the top-level XML field returned. +type Feedback struct { + Version string `xml:"version"` + ReportMetadata ReportMetadata `xml:"report_metadata"` + PolicyPublished PolicyPublished `xml:"policy_published"` + Records []ReportRecord `xml:"record"` +} + +type ReportMetadata struct { + OrgName string `xml:"org_name"` + Email string `xml:"email"` + ExtraContactInfo string `xml:"extra_contact_info,omitempty"` + ReportID string `xml:"report_id"` + DateRange DateRange `xml:"date_range"` + Errors []string `xml:"error,omitempty"` +} + +type DateRange struct { + Begin int64 `xml:"begin"` + End int64 `xml:"end"` +} + +// PolicyPublished is the policy as found in DNS for the domain. +type PolicyPublished struct { + Domain string `xml:"domain"` + ADKIM Alignment `xml:"adkim,omitempty"` + ASPF Alignment `xml:"aspf,omitempty"` + Policy Disposition `xml:"p"` + SubdomainPolicy Disposition `xml:"sp"` + Percentage int `xml:"pct"` + ReportingOptions string `xml:"fo"` +} + +// Alignment is the identifier alignment. +type Alignment string + +const ( + AlignmentRelaxed Alignment = "r" // Subdomains match the DMARC from-domain. + AlignmentStrict Alignment = "s" // Only exact from-domain match. +) + +// Disposition is the requested action for a DMARC fail as specified in the +// DMARC policy in DNS. +type Disposition string + +const ( + DispositionNone Disposition = "none" + DispositionQuarantine Disposition = "quarantine" + DispositionReject Disposition = "reject" +) + +type ReportRecord struct { + Row Row `xml:"row"` + Identifiers Identifiers `xml:"identifiers"` + AuthResults AuthResults `xml:"auth_results"` +} + +type Row struct { + // SourceIP must match the pattern ((1?[0-9]?[0-9]|2[0-4][0-9]|25[0-5]).){3} + // (1?[0-9]?[0-9]|2[0-4][0-9]|25[0-5])| + // ([A-Fa-f0-9]{1,4}:){7}[A-Fa-f0-9]{1,4} + SourceIP string `xml:"source_ip"` + Count int `xml:"count"` + PolicyEvaluated PolicyEvaluated `xml:"policy_evaluated"` +} + +type PolicyEvaluated struct { + Disposition Disposition `xml:"disposition"` + DKIM DMARCResult `xml:"dkim"` + SPF DMARCResult `xml:"spf"` + Reasons []PolicyOverrideReason `xml:"reason,omitempty"` +} + +// DMARCResult is the final validation and alignment verdict for SPF and DKIM. +type DMARCResult string + +const ( + DMARCPass DMARCResult = "pass" + DMARCFail DMARCResult = "fail" +) + +type PolicyOverrideReason struct { + Type PolicyOverride `xml:"type"` + Comment string `xml:"comment,omitempty"` +} + +// PolicyOverride is a reason the requested DMARC policy from the DNS record +// was not applied. +type PolicyOverride string + +const ( + PolicyOverrideForwarded PolicyOverride = "forwarded" + PolicyOverrideSampledOut PolicyOverride = "sampled_out" + PolicyOverrideTrustedForwarder PolicyOverride = "trusted_forwarder" + PolicyOverrideMailingList PolicyOverride = "mailing_list" + PolicyOverrideLocalPolicy PolicyOverride = "local_policy" + PolicyOverrideOther PolicyOverride = "other" +) + +type Identifiers struct { + EnvelopeTo string `xml:"envelope_to,omitempty"` + EnvelopeFrom string `xml:"envelope_from"` + HeaderFrom string `xml:"header_from"` +} + +type AuthResults struct { + DKIM []DKIMAuthResult `xml:"dkim,omitempty"` + SPF []SPFAuthResult `xml:"spf"` +} + +type DKIMAuthResult struct { + Domain string `xml:"domain"` + Selector string `xml:"selector,omitempty"` + Result DKIMResult `xml:"result"` + HumanResult string `xml:"human_result,omitempty"` +} + +type DKIMResult string + +const ( + DKIMNone DKIMResult = "none" + DKIMPass DKIMResult = "pass" + DKIMFail DKIMResult = "fail" + DKIMPolicy DKIMResult = "policy" + DKIMNeutral DKIMResult = "neutral" + DKIMTemperror DKIMResult = "temperror" + DKIMPermerror DKIMResult = "permerror" +) + +type SPFAuthResult struct { + Domain string `xml:"domain"` + Scope SPFDomainScope `xml:"scope"` + Result SPFResult `xml:"result"` +} + +type SPFDomainScope string + +const ( + SPFDomainScopeHelo SPFDomainScope = "helo" // SMTP EHLO + SPFDomainScopeMailFrom SPFDomainScope = "mfrom" // SMTP "MAIL FROM". +) + +type SPFResult string + +const ( + SPFNone SPFResult = "none" + SPFNeutral SPFResult = "neutral" + SPFPass SPFResult = "pass" + SPFFail SPFResult = "fail" + SPFSoftfail SPFResult = "softfail" + SPFTemperror SPFResult = "temperror" + SPFPermerror SPFResult = "permerror" +) diff --git a/dmarcrpt/parse.go b/dmarcrpt/parse.go new file mode 100644 index 0000000..5106065 --- /dev/null +++ b/dmarcrpt/parse.go @@ -0,0 +1,124 @@ +// Package dmarcrpt parses DMARC aggregate feedback reports. +package dmarcrpt + +import ( + "archive/zip" + "bytes" + "compress/gzip" + "encoding/xml" + "errors" + "fmt" + "io" + "net/http" + "strings" + + "github.com/mjl-/mox/message" + "github.com/mjl-/mox/moxio" +) + +var ErrNoReport = errors.New("no dmarc report found in message") + +// ParseReport parses an XML aggregate feedback report. +// The maximum report size is 20MB. +func ParseReport(r io.Reader) (*Feedback, error) { + r = &moxio.LimitReader{R: r, Limit: 20 * 1024 * 1024} + var feedback Feedback + d := xml.NewDecoder(r) + if err := d.Decode(&feedback); err != nil { + return nil, err + } + return &feedback, nil +} + +// ParseMessageReport parses an aggregate feedback report from a mail message. The +// maximum message size is 15MB, the maximum report size after decompression is +// 20MB. +func ParseMessageReport(r io.ReaderAt) (*Feedback, error) { + // ../rfc/7489:1801 + p, err := message.Parse(&moxio.LimitAtReader{R: r, Limit: 15 * 1024 * 1024}) + if err != nil { + return nil, fmt.Errorf("parsing mail message: %s", err) + } + + return parseMessageReport(p) +} + +func parseMessageReport(p message.Part) (*Feedback, error) { + // Pretty much any mime structure is allowed. ../rfc/7489:1861 + // In practice, some parties will send the report as the only (non-multipart) + // content of the message. + + if p.MediaType != "MULTIPART" { + return parseReport(p) + } + + for { + sp, err := p.ParseNextPart() + if err == io.EOF { + return nil, ErrNoReport + } + if err != nil { + return nil, err + } + report, err := parseMessageReport(*sp) + if err == ErrNoReport { + continue + } else if err != nil || report != nil { + return report, err + } + } +} + +func parseReport(p message.Part) (*Feedback, error) { + ct := strings.ToLower(p.MediaType + "/" + p.MediaSubType) + r := p.Reader() + + // If no (useful) content-type is set, try to detect it. + if ct == "" || ct == "application/octect-stream" { + data := make([]byte, 512) + n, err := io.ReadFull(r, data) + if err == io.EOF { + return nil, ErrNoReport + } else if err != nil && err != io.ErrUnexpectedEOF { + return nil, fmt.Errorf("reading application/octet-stream for content-type detection: %v", err) + } + data = data[:n] + ct = http.DetectContentType(data) + r = io.MultiReader(bytes.NewReader(data), r) + } + + switch ct { + case "application/zip": + // Google sends messages with direct application/zip content-type. + return parseZip(r) + case "application/gzip": + gzr, err := gzip.NewReader(r) + if err != nil { + return nil, fmt.Errorf("decoding gzip xml report: %s", err) + } + return ParseReport(gzr) + case "text/xml", "application/xml": + return ParseReport(r) + } + return nil, ErrNoReport +} + +func parseZip(r io.Reader) (*Feedback, error) { + buf, err := io.ReadAll(r) + if err != nil { + return nil, fmt.Errorf("reading feedback: %s", err) + } + zr, err := zip.NewReader(bytes.NewReader(buf), int64(len(buf))) + if err != nil { + return nil, fmt.Errorf("parsing zip file: %s", err) + } + if len(zr.File) != 1 { + return nil, fmt.Errorf("zip contains %d files, expected 1", len(zr.File)) + } + f, err := zr.File[0].Open() + if err != nil { + return nil, fmt.Errorf("opening file in zip: %s", err) + } + defer f.Close() + return ParseReport(f) +} diff --git a/dmarcrpt/parse_test.go b/dmarcrpt/parse_test.go new file mode 100644 index 0000000..96f1ed1 --- /dev/null +++ b/dmarcrpt/parse_test.go @@ -0,0 +1,179 @@ +package dmarcrpt + +import ( + "os" + "reflect" + "strings" + "testing" +) + +const reportExample = ` + + + google.com + noreply-dmarc-support@google.com + https://support.google.com/a/answer/2466580 + 10051505501689795560 + + 1596412800 + 1596499199 + + + + example.org + r + r +

reject

+ reject + 100 +
+ + + 127.0.0.1 + 1 + + none + pass + pass + + + + example.org + + + + example.org + pass + example + + + example.org + pass + + + +
+` + +func TestParseReport(t *testing.T) { + var expect = &Feedback{ + ReportMetadata: ReportMetadata{ + OrgName: "google.com", + Email: "noreply-dmarc-support@google.com", + ExtraContactInfo: "https://support.google.com/a/answer/2466580", + ReportID: "10051505501689795560", + DateRange: DateRange{ + Begin: 1596412800, + End: 1596499199, + }, + }, + PolicyPublished: PolicyPublished{ + Domain: "example.org", + ADKIM: "r", + ASPF: "r", + Policy: "reject", + SubdomainPolicy: "reject", + Percentage: 100, + }, + Records: []ReportRecord{ + { + Row: Row{ + SourceIP: "127.0.0.1", + Count: 1, + PolicyEvaluated: PolicyEvaluated{ + Disposition: DispositionNone, + DKIM: DMARCPass, + SPF: DMARCPass, + }, + }, + Identifiers: Identifiers{ + HeaderFrom: "example.org", + }, + AuthResults: AuthResults{ + DKIM: []DKIMAuthResult{ + { + Domain: "example.org", + Result: DKIMPass, + Selector: "example", + }, + }, + SPF: []SPFAuthResult{ + { + Domain: "example.org", + Result: SPFPass, + }, + }, + }, + }, + }, + } + + feedback, err := ParseReport(strings.NewReader(reportExample)) + if err != nil { + t.Fatalf("parsing report: %s", err) + } + if !reflect.DeepEqual(expect, feedback) { + t.Fatalf("expected:\n%#v\ngot:\n%#v", expect, feedback) + } +} + +func TestParseMessageReport(t *testing.T) { + const dir = "../testdata/dmarc-reports" + files, err := os.ReadDir(dir) + if err != nil { + t.Fatalf("listing dmarc report emails: %s", err) + } + + for _, file := range files { + p := dir + "/" + file.Name() + f, err := os.Open(p) + if err != nil { + t.Fatalf("open %q: %s", p, err) + } + _, err = ParseMessageReport(f) + if err != nil { + t.Fatalf("ParseMessageReport: %q: %s", p, err) + } + f.Close() + } + + // No report in a non-multipart message. + _, err = ParseMessageReport(strings.NewReader("From: \r\n\r\nNo report.\r\n")) + if err != ErrNoReport { + t.Fatalf("message without report, got err %#v, expected ErrNoreport", err) + } + + // No report in a multipart message. + var multipartNoreport = strings.ReplaceAll(`From: +To: +Subject: Report Domain: mox.example Submitter: mail.mox.example +MIME-Version: 1.0 +Content-Type: multipart/alternative; boundary="===============5735553800636657282==" + +--===============5735553800636657282== +Content-Type: text/plain +MIME-Version: 1.0 + +test + +--===============5735553800636657282== +Content-Type: text/html +MIME-Version: 1.0 + + + +--===============5735553800636657282==-- +`, "\n", "\r\n") + _, err = ParseMessageReport(strings.NewReader(multipartNoreport)) + if err != ErrNoReport { + t.Fatalf("message without report, got err %#v, expected ErrNoreport", err) + } +} + +func FuzzParseReport(f *testing.F) { + f.Add("") + f.Add(reportExample) + f.Fuzz(func(t *testing.T, s string) { + ParseReport(strings.NewReader(s)) + }) +} diff --git a/dns/dns.go b/dns/dns.go new file mode 100644 index 0000000..f5d77af --- /dev/null +++ b/dns/dns.go @@ -0,0 +1,109 @@ +// Package dns helps parse internationalized domain names (IDNA), canonicalize +// names and provides a strict and metrics-keeping logging DNS resolver. +package dns + +import ( + "errors" + "fmt" + "net" + "strings" + + "golang.org/x/net/idna" +) + +var errTrailingDot = errors.New("dns name has trailing dot") + +// Domain is a domain name, with one or more labels, with at least an ASCII +// representation, and for IDNA non-ASCII domains a unicode representation. +// The ASCII string must be used for DNS lookups. +type Domain struct { + // A non-unicode domain, e.g. with A-labels (xn--...) or NR-LDH (non-reserved + // letters/digits/hyphens) labels. Always in lower case. + ASCII string + + // Name as U-labels. Empty if this is an ASCII-only domain. + Unicode string +} + +// Name returns the unicode name if set, otherwise the ASCII name. +func (d Domain) Name() string { + if d.Unicode != "" { + return d.Unicode + } + return d.ASCII +} + +// XName is like Name, but only returns a unicode name when utf8 is true. +func (d Domain) XName(utf8 bool) string { + if utf8 && d.Unicode != "" { + return d.Unicode + } + return d.ASCII +} + +// ASCIIExtra returns the ASCII version of the domain name if smtputf8 is true and +// this is a unicode domain name. Otherwise it returns an empty string. +// +// This function is used to add the punycode name in a comment to SMTP message +// headers, e.g. Received and Authentication-Results. +func (d Domain) ASCIIExtra(smtputf8 bool) string { + if smtputf8 && d.Unicode != "" { + return d.ASCII + } + return "" +} + +// Strings returns a human-readable string. +// For IDNA names, the string contains both the unicode and ASCII name. +func (d Domain) String() string { + if d.Unicode == "" { + return d.ASCII + } + return d.Unicode + "/" + d.ASCII +} + +// IsZero returns if this is an empty Domain. +func (d Domain) IsZero() bool { + return d == Domain{} +} + +// ParseDomain parses a domain name that can consist of ASCII-only labels or U +// labels (unicode). +// Names are IDN-canonicalized and lower-cased. +// Characters in unicode can be replaced by equivalents. E.g. "Ⓡ" to "r". This +// means you should only compare parsed domain names, never strings directly. +func ParseDomain(s string) (Domain, error) { + if strings.HasSuffix(s, ".") { + return Domain{}, errTrailingDot + } + ascii, err := idna.Lookup.ToASCII(s) + if err != nil { + return Domain{}, fmt.Errorf("to ascii: %w", err) + } + unicode, err := idna.Lookup.ToUnicode(s) + if err != nil { + return Domain{}, fmt.Errorf("to unicode: %w", err) + } + // todo: should we cause errors for unicode domains that were not in + // canonical form? we are now accepting all kinds of obscure spellings + // for even a basic ASCII domain name. + // Also see https://daniel.haxx.se/blog/2022/12/14/idn-is-crazy/ + if ascii == unicode { + return Domain{ascii, ""}, nil + } + return Domain{ascii, unicode}, nil +} + +// IsNotFound returns whether an error is a net.DNSError with IsNotFound set. +// IsNotFound means the requested type does not exist for the given domain (a +// nodata or nxdomain response). It doesn't not necessarily mean no other types +// for that name exist. +// +// A DNS server can respond to a lookup with an error "nxdomain" to indicate a +// name does not exist (at all), or with a success status with an empty list. +// The Go resolver returns an IsNotFound error for both cases, there is no need +// to explicitly check for zero entries. +func IsNotFound(err error) bool { + var dnsErr *net.DNSError + return err != nil && errors.As(err, &dnsErr) && dnsErr.IsNotFound +} diff --git a/dns/dns_test.go b/dns/dns_test.go new file mode 100644 index 0000000..9ad84df --- /dev/null +++ b/dns/dns_test.go @@ -0,0 +1,27 @@ +package dns + +import ( + "errors" + "testing" +) + +func TestParseDomain(t *testing.T) { + test := func(s string, exp Domain, expErr error) { + t.Helper() + dom, err := ParseDomain(s) + if (err == nil) != (expErr == nil) || expErr != nil && !errors.Is(err, expErr) { + t.Fatalf("parse domain %q: err %v, expected %v", s, err, expErr) + } + if expErr == nil && dom != exp { + t.Fatalf("parse domain %q: got %#v, epxected %#v", s, dom, exp) + } + } + + // We rely on normalization of names throughout the code base. + test("xmox.nl", Domain{"xmox.nl", ""}, nil) + test("XMOX.NL", Domain{"xmox.nl", ""}, nil) + test("TEST☺.XMOX.NL", Domain{"xn--test-3o3b.xmox.nl", "test☺.xmox.nl"}, nil) + test("TEST☺.XMOX.NL", Domain{"xn--test-3o3b.xmox.nl", "test☺.xmox.nl"}, nil) + test("ℂᵤⓇℒ。𝐒🄴", Domain{"curl.se", ""}, nil) // https://daniel.haxx.se/blog/2022/12/14/idn-is-crazy/ + test("xmox.nl.", Domain{}, errTrailingDot) +} diff --git a/dns/ipdomain.go b/dns/ipdomain.go new file mode 100644 index 0000000..9d1d50a --- /dev/null +++ b/dns/ipdomain.go @@ -0,0 +1,42 @@ +package dns + +import ( + "net" +) + +// IPDomain is an ip address, a domain, or empty. +type IPDomain struct { + IP net.IP + Domain Domain +} + +// IsZero returns if both IP and Domain are zero. +func (d IPDomain) IsZero() bool { + return d.IP == nil && d.Domain == Domain{} +} + +// String returns a string representation of either the IP or domain (with +// UTF-8). +func (d IPDomain) String() string { + if len(d.IP) > 0 { + return d.IP.String() + } + return d.Domain.Name() +} + +// XString is like String, but only returns UTF-8 domains if utf8 is true. +func (d IPDomain) XString(utf8 bool) string { + if d.IsIP() { + // todo: check callers if this is valid syntax for them. should we add [] for ipv6? perhaps also ipv4? probably depends on context. in smtp, the syntax is [] and [IPv6:]. + return d.IP.String() + } + return d.Domain.XName(utf8) +} + +func (d IPDomain) IsIP() bool { + return len(d.IP) > 0 +} + +func (d IPDomain) IsDomain() bool { + return !d.Domain.IsZero() +} diff --git a/dns/mock.go b/dns/mock.go new file mode 100644 index 0000000..2a8e84b --- /dev/null +++ b/dns/mock.go @@ -0,0 +1,156 @@ +package dns + +import ( + "context" + "fmt" + "net" +) + +// MockResolver is a Resolver used for testing. +// Set DNS records in the fields, which map FQDNs (with trailing dot) to values. +type MockResolver struct { + PTR map[string][]string + A map[string][]string + AAAA map[string][]string + TXT map[string][]string + MX map[string][]*net.MX + CNAME map[string]string + Fail map[Mockreq]struct{} +} + +type Mockreq struct { + Type string // E.g. "cname", "txt", "mx", "ptr", etc. + Name string +} + +var _ Resolver = MockResolver{} + +func (r MockResolver) nxdomain(s string) *net.DNSError { + return &net.DNSError{ + Err: "no record", + Name: s, + Server: "localhost", + IsNotFound: true, + } +} + +func (r MockResolver) servfail(s string) *net.DNSError { + return &net.DNSError{ + Err: "temp error", + Name: s, + Server: "localhost", + IsTemporary: true, + } +} + +func (r MockResolver) LookupCNAME(ctx context.Context, name string) (string, error) { + if _, ok := r.Fail[Mockreq{"cname", name}]; ok { + return "", r.servfail(name) + } + if cname, ok := r.CNAME[name]; ok { + return cname, nil + } + return "", r.nxdomain(name) +} + +func (r MockResolver) LookupAddr(ctx context.Context, ip string) ([]string, error) { + if _, ok := r.Fail[Mockreq{"ptr", ip}]; ok { + return nil, r.servfail(ip) + } + l, ok := r.PTR[ip] + if !ok { + return nil, r.nxdomain(ip) + } + return l, nil +} + +func (r MockResolver) LookupNS(ctx context.Context, name string) ([]*net.NS, error) { + return nil, r.servfail("ns not implemented") +} + +func (r MockResolver) LookupPort(ctx context.Context, network, service string) (port int, err error) { + return 0, r.servfail("port not implemented") +} + +func (r MockResolver) LookupSRV(ctx context.Context, service, proto, name string) (string, []*net.SRV, error) { + return "", nil, r.servfail("srv not implemented") +} + +func (r MockResolver) LookupIPAddr(ctx context.Context, host string) ([]net.IPAddr, error) { + if _, ok := r.Fail[Mockreq{"ipaddr", host}]; ok { + return nil, r.servfail(host) + } + addrs, err := r.LookupHost(ctx, host) + if err != nil { + return nil, err + } + ips := make([]net.IPAddr, len(addrs)) + for i, a := range addrs { + ip := net.ParseIP(a) + if ip == nil { + return nil, fmt.Errorf("malformed ip %q", a) + } + ips[i] = net.IPAddr{IP: ip} + } + return ips, nil +} + +func (r MockResolver) LookupHost(ctx context.Context, host string) (addrs []string, err error) { + if _, ok := r.Fail[Mockreq{"host", host}]; ok { + return nil, r.servfail(host) + } + addrs = append(addrs, r.A[host]...) + addrs = append(addrs, r.AAAA[host]...) + if len(addrs) > 0 { + return addrs, nil + } + if cname, ok := r.CNAME[host]; ok { + return []string{cname}, nil + } + return nil, r.nxdomain(host) +} + +func (r MockResolver) LookupIP(ctx context.Context, network, host string) ([]net.IP, error) { + if _, ok := r.Fail[Mockreq{"ip", host}]; ok { + return nil, r.servfail(host) + } + var ips []net.IP + switch network { + case "ip", "ip4": + for _, ip := range r.A[host] { + ips = append(ips, net.ParseIP(ip)) + } + } + switch network { + case "ip", "ip6": + for _, ip := range r.AAAA[host] { + ips = append(ips, net.ParseIP(ip)) + } + } + if len(ips) == 0 { + return nil, r.nxdomain(host) + } + return ips, nil +} + +func (r MockResolver) LookupMX(ctx context.Context, name string) ([]*net.MX, error) { + if _, ok := r.Fail[Mockreq{"mx", name}]; ok { + return nil, r.servfail(name) + } + l, ok := r.MX[name] + if !ok { + return nil, r.nxdomain(name) + } + return l, nil +} + +func (r MockResolver) LookupTXT(ctx context.Context, name string) ([]string, error) { + if _, ok := r.Fail[Mockreq{"txt", name}]; ok { + return nil, r.servfail(name) + } + l, ok := r.TXT[name] + if !ok { + return nil, r.nxdomain(name) + } + return l, nil +} diff --git a/dns/resolver.go b/dns/resolver.go new file mode 100644 index 0000000..ac04341 --- /dev/null +++ b/dns/resolver.go @@ -0,0 +1,248 @@ +package dns + +import ( + "context" + "errors" + "net" + "os" + "strings" + "time" + + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" + + "github.com/mjl-/mox/mlog" +) + +// todo future: replace with a dnssec capable resolver +// todo future: change to interface that is closer to DNS. 1. expose nxdomain vs success with zero entries: nxdomain means the name does not exist for any dns resource record type, success with zero records means the name exists for other types than the requested type; 2. add ability to not follow cname records when resolving. the net resolver automatically follows cnames for LookupHost, LookupIP, LookupIPAddr. when resolving names found in mx records, we explicitly must not follow cnames. that seems impossible at the moment. 3. when looking up a cname, actually lookup the record? "net" LookupCNAME will return the requested name with no error if there is no CNAME record. because it returns the canonical name. +// todo future: add option to not use anything in the cache, for the admin pages where you check the latest DNS settings, ignoring old cached info. + +var xlog = mlog.New("dns") + +var ( + metricLookup = promauto.NewHistogramVec( + prometheus.HistogramOpts{ + Name: "mox_dns_lookup_duration_seconds", + Help: "DNS lookups.", + Buckets: []float64{0.001, 0.005, 0.01, 0.05, 0.100, 0.5, 1, 5, 10, 20, 30}, + }, + []string{ + "pkg", + "type", // Lower-case Resolver method name without leading Lookup. + "result", // ok, nxdomain, temporary, timeout, canceled, error + }, + ) +) + +// Resolver is the interface strict resolver implements. +type Resolver interface { + LookupAddr(ctx context.Context, addr string) ([]string, error) + LookupCNAME(ctx context.Context, host string) (string, error) // NOTE: returns an error if no CNAME record is present. + LookupHost(ctx context.Context, host string) (addrs []string, err error) + LookupIP(ctx context.Context, network, host string) ([]net.IP, error) + LookupIPAddr(ctx context.Context, host string) ([]net.IPAddr, error) + LookupMX(ctx context.Context, name string) ([]*net.MX, error) + LookupNS(ctx context.Context, name string) ([]*net.NS, error) + LookupPort(ctx context.Context, network, service string) (port int, err error) + LookupSRV(ctx context.Context, service, proto, name string) (string, []*net.SRV, error) + LookupTXT(ctx context.Context, name string) ([]string, error) +} + +// WithPackage sets Pkg on resolver if it is a StrictResolve and does not have a package set yet. +func WithPackage(resolver Resolver, name string) Resolver { + r, ok := resolver.(StrictResolver) + if ok && r.Pkg == "" { + nr := r + r.Pkg = name + return nr + } + return resolver +} + +// StrictResolver is a net.Resolver that enforces that DNS names end with a dot, +// preventing "search"-relative lookups. +type StrictResolver struct { + Pkg string // Name of subsystem that is making DNS requests, for metrics. + Resolver *net.Resolver // Where the actual lookups are done. If nil, net.DefaultResolver is used for lookups. +} + +var _ Resolver = StrictResolver{} + +var ErrRelativeDNSName = errors.New("dns: host to lookup must be absolute, ending with a dot") + +func metricLookupObserve(pkg, typ string, err error, start time.Time) { + var result string + var dnsErr *net.DNSError + switch { + case err == nil: + result = "ok" + case errors.As(err, &dnsErr) && dnsErr.IsNotFound: + result = "nxdomain" + case errors.As(err, &dnsErr) && dnsErr.IsTemporary: + result = "temporary" + case errors.Is(err, os.ErrDeadlineExceeded) || errors.Is(err, context.DeadlineExceeded) || errors.As(err, &dnsErr) && dnsErr.IsTimeout: + result = "timeout" + case errors.Is(err, context.Canceled): + result = "canceled" + default: + result = "error" + } + metricLookup.WithLabelValues(pkg, typ, result).Observe(float64(time.Since(start)) / float64(time.Second)) +} + +func (r StrictResolver) WithPackage(name string) Resolver { + nr := r + nr.Pkg = name + return nr +} + +func (r StrictResolver) resolver() Resolver { + if r.Resolver == nil { + return net.DefaultResolver + } + return r.Resolver +} + +func (r StrictResolver) LookupAddr(ctx context.Context, addr string) (resp []string, err error) { + start := time.Now() + defer func() { + metricLookupObserve(r.Pkg, "addr", err, start) + xlog.WithContext(ctx).Debugx("dns lookup result", err, mlog.Field("pkg", r.Pkg), mlog.Field("type", "addr"), mlog.Field("addr", addr), mlog.Field("resp", resp), mlog.Field("duration", time.Since(start))) + }() + + resp, err = r.resolver().LookupAddr(ctx, addr) + return +} + +// LookupCNAME looks up a CNAME. Unlike "net" LookupCNAME, it returns a "not found" +// error if there is no CNAME record. +func (r StrictResolver) LookupCNAME(ctx context.Context, host string) (resp string, err error) { + start := time.Now() + defer func() { + metricLookupObserve(r.Pkg, "cname", err, start) + xlog.WithContext(ctx).Debugx("dns lookup result", err, mlog.Field("pkg", r.Pkg), mlog.Field("type", "cname"), mlog.Field("host", host), mlog.Field("resp", resp), mlog.Field("duration", time.Since(start))) + }() + + if !strings.HasSuffix(host, ".") { + return "", ErrRelativeDNSName + } + resp, err = r.resolver().LookupCNAME(ctx, host) + if err == nil && resp == host { + return "", &net.DNSError{ + Err: "no cname record", + Name: host, + Server: "", + IsNotFound: true, + } + } + return +} +func (r StrictResolver) LookupHost(ctx context.Context, host string) (resp []string, err error) { + start := time.Now() + defer func() { + metricLookupObserve(r.Pkg, "host", err, start) + xlog.WithContext(ctx).Debugx("dns lookup result", err, mlog.Field("pkg", r.Pkg), mlog.Field("type", "host"), mlog.Field("host", host), mlog.Field("resp", resp), mlog.Field("duration", time.Since(start))) + }() + + if !strings.HasSuffix(host, ".") { + return nil, ErrRelativeDNSName + } + resp, err = r.resolver().LookupHost(ctx, host) + return +} + +func (r StrictResolver) LookupIP(ctx context.Context, network, host string) (resp []net.IP, err error) { + start := time.Now() + defer func() { + metricLookupObserve(r.Pkg, "ip", err, start) + xlog.WithContext(ctx).Debugx("dns lookup result", err, mlog.Field("pkg", r.Pkg), mlog.Field("type", "ip"), mlog.Field("network", network), mlog.Field("host", host), mlog.Field("resp", resp), mlog.Field("duration", time.Since(start))) + }() + + if !strings.HasSuffix(host, ".") { + return nil, ErrRelativeDNSName + } + resp, err = r.resolver().LookupIP(ctx, network, host) + return +} + +func (r StrictResolver) LookupIPAddr(ctx context.Context, host string) (resp []net.IPAddr, err error) { + start := time.Now() + defer func() { + metricLookupObserve(r.Pkg, "ipaddr", err, start) + xlog.WithContext(ctx).Debugx("dns lookup result", err, mlog.Field("pkg", r.Pkg), mlog.Field("type", "ipaddr"), mlog.Field("host", host), mlog.Field("resp", resp), mlog.Field("duration", time.Since(start))) + }() + + if !strings.HasSuffix(host, ".") { + return nil, ErrRelativeDNSName + } + resp, err = r.resolver().LookupIPAddr(ctx, host) + return +} + +func (r StrictResolver) LookupMX(ctx context.Context, name string) (resp []*net.MX, err error) { + start := time.Now() + defer func() { + metricLookupObserve(r.Pkg, "mx", err, start) + xlog.WithContext(ctx).Debugx("dns lookup result", err, mlog.Field("pkg", r.Pkg), mlog.Field("type", "mx"), mlog.Field("name", name), mlog.Field("resp", resp), mlog.Field("duration", time.Since(start))) + }() + + if !strings.HasSuffix(name, ".") { + return nil, ErrRelativeDNSName + } + resp, err = r.resolver().LookupMX(ctx, name) + return +} + +func (r StrictResolver) LookupNS(ctx context.Context, name string) (resp []*net.NS, err error) { + start := time.Now() + defer func() { + metricLookupObserve(r.Pkg, "ns", err, start) + xlog.WithContext(ctx).Debugx("dns lookup result", err, mlog.Field("pkg", r.Pkg), mlog.Field("type", "ns"), mlog.Field("name", name), mlog.Field("resp", resp), mlog.Field("duration", time.Since(start))) + }() + + if !strings.HasSuffix(name, ".") { + return nil, ErrRelativeDNSName + } + resp, err = r.resolver().LookupNS(ctx, name) + return +} + +func (r StrictResolver) LookupPort(ctx context.Context, network, service string) (resp int, err error) { + start := time.Now() + defer func() { + metricLookupObserve(r.Pkg, "port", err, start) + xlog.WithContext(ctx).Debugx("dns lookup result", err, mlog.Field("pkg", r.Pkg), mlog.Field("type", "port"), mlog.Field("network", network), mlog.Field("service", service), mlog.Field("resp", resp), mlog.Field("duration", time.Since(start))) + }() + + resp, err = r.resolver().LookupPort(ctx, network, service) + return +} + +func (r StrictResolver) LookupSRV(ctx context.Context, service, proto, name string) (resp0 string, resp1 []*net.SRV, err error) { + start := time.Now() + defer func() { + metricLookupObserve(r.Pkg, "srv", err, start) + xlog.WithContext(ctx).Debugx("dns lookup result", err, mlog.Field("pkg", r.Pkg), mlog.Field("type", "srv"), mlog.Field("service", service), mlog.Field("proto", proto), mlog.Field("name", name), mlog.Field("resp0", resp0), mlog.Field("resp1", resp1), mlog.Field("duration", time.Since(start))) + }() + + if !strings.HasSuffix(name, ".") { + return "", nil, ErrRelativeDNSName + } + resp0, resp1, err = r.resolver().LookupSRV(ctx, service, proto, name) + return +} + +func (r StrictResolver) LookupTXT(ctx context.Context, name string) (resp []string, err error) { + start := time.Now() + defer func() { + metricLookupObserve(r.Pkg, "txt", err, start) + xlog.WithContext(ctx).Debugx("dns lookup result", err, mlog.Field("pkg", r.Pkg), mlog.Field("type", "txt"), mlog.Field("name", name), mlog.Field("resp", resp), mlog.Field("duration", time.Since(start))) + }() + + if !strings.HasSuffix(name, ".") { + return nil, ErrRelativeDNSName + } + resp, err = r.resolver().LookupTXT(ctx, name) + return +} diff --git a/dnsbl/dnsbl.go b/dnsbl/dnsbl.go new file mode 100644 index 0000000..7eb708d --- /dev/null +++ b/dnsbl/dnsbl.go @@ -0,0 +1,130 @@ +// Package dnsbl implements DNS block lists (RFC 5782), for checking incoming messages from sources without reputation. +package dnsbl + +import ( + "context" + "errors" + "fmt" + "net" + "strconv" + "strings" + "time" + + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" + + "github.com/mjl-/mox/dns" + "github.com/mjl-/mox/mlog" +) + +var xlog = mlog.New("dnsbl") + +var ( + metricLookup = promauto.NewHistogramVec( + prometheus.HistogramOpts{ + Name: "mox_dnsbl_lookup_duration_seconds", + Help: "DNSBL lookup", + Buckets: []float64{0.001, 0.005, 0.01, 0.05, 0.100, 0.5, 1, 5, 10, 20}, + }, + []string{ + "zone", + "status", + }, + ) +) + +var ErrDNS = errors.New("dnsbl: dns error") + +// Status is the result of a DNSBL lookup. +type Status string + +var ( + StatusTemperr Status = "temperror" // Temporary failure. + StatusPass Status = "pass" // Not present in block list. + StatusFail Status = "fail" // Present in block list. +) + +// Lookup checks if "ip" occurs in the DNS block list "zone" (e.g. dnsbl.example.org). +func Lookup(ctx context.Context, resolver dns.Resolver, zone dns.Domain, ip net.IP) (rstatus Status, rexplanation string, rerr error) { + log := xlog.WithContext(ctx) + start := time.Now() + defer func() { + metricLookup.WithLabelValues(zone.Name(), string(rstatus)).Observe(float64(time.Since(start)) / float64(time.Second)) + log.Debugx("dnsbl lookup result", rerr, mlog.Field("zone", zone), mlog.Field("ip", ip), mlog.Field("status", rstatus), mlog.Field("explanation", rexplanation), mlog.Field("duration", time.Since(start))) + }() + + b := &strings.Builder{} + v4 := ip.To4() + if v4 != nil { + // ../rfc/5782:148 + s := len(v4) - 1 + for i := s; i >= 0; i-- { + if i < s { + b.WriteByte('.') + } + b.WriteString(strconv.Itoa(int(v4[i]))) + } + } else { + // ../rfc/5782:270 + s := len(ip) - 1 + const chars = "0123456789abcdef" + for i := s; i >= 0; i-- { + if i < s { + b.WriteByte('.') + } + v := ip[i] + b.WriteByte(chars[v>>0&0xf]) + b.WriteByte('.') + b.WriteByte(chars[v>>4&0xf]) + } + } + b.WriteString("." + zone.ASCII + ".") + addr := b.String() + + // ../rfc/5782:175 + _, err := dns.WithPackage(resolver, "dnsbl").LookupIP(ctx, "ip4", addr) + if dns.IsNotFound(err) { + return StatusPass, "", nil + } else if err != nil { + return StatusTemperr, "", fmt.Errorf("%w: %s", ErrDNS, err) + } + + txts, err := dns.WithPackage(resolver, "dnsbl").LookupTXT(ctx, addr) + if dns.IsNotFound(err) { + return StatusFail, "", nil + } else if err != nil { + log.Debugx("looking up txt record from dnsbl", err, mlog.Field("addr", addr)) + return StatusFail, "", nil + } + return StatusFail, strings.Join(txts, "; "), nil +} + +// CheckHealth checks whether the DNSBL "zone" is operating correctly by +// querying for 127.0.0.2 (must be present) and 127.0.0.1 (must not be present). +// Users of a DNSBL should periodically check if the DNSBL is still operating +// properly. +// For temporary errors, ErrDNS is returned. +func CheckHealth(ctx context.Context, resolver dns.Resolver, zone dns.Domain) (rerr error) { + log := xlog.WithContext(ctx) + start := time.Now() + defer func() { + log.Debugx("dnsbl healthcheck result", rerr, mlog.Field("zone", zone), mlog.Field("duration", time.Since(start))) + }() + + // ../rfc/5782:355 + status1, _, err1 := Lookup(ctx, resolver, zone, net.IPv4(127, 0, 0, 1)) + status2, _, err2 := Lookup(ctx, resolver, zone, net.IPv4(127, 0, 0, 2)) + if status1 == StatusPass && status2 == StatusFail { + return nil + } else if status1 == StatusFail { + return fmt.Errorf("dnsbl contains unwanted test address 127.0.0.1") + } else if status2 == StatusPass { + return fmt.Errorf("dnsbl does not contain required test address 127.0.0.2") + } + if err1 != nil { + return err1 + } else if err2 != nil { + return err2 + } + return ErrDNS +} diff --git a/dnsbl/dnsbl_test.go b/dnsbl/dnsbl_test.go new file mode 100644 index 0000000..1d9f0ee --- /dev/null +++ b/dnsbl/dnsbl_test.go @@ -0,0 +1,64 @@ +package dnsbl + +import ( + "context" + "net" + "testing" + + "github.com/mjl-/mox/dns" +) + +func TestDNSBL(t *testing.T) { + ctx := context.Background() + + resolver := dns.MockResolver{ + A: map[string][]string{ + "2.0.0.127.example.com.": {"127.0.0.2"}, // required for health + "1.0.0.10.example.com.": {"127.0.0.2"}, + "b.a.9.8.7.6.5.0.4.0.0.0.3.0.0.0.2.0.0.0.1.0.0.0.8.b.d.0.1.0.0.2.example.com.": {"127.0.0.2"}, + }, + TXT: map[string][]string{ + "1.0.0.10.example.com.": {"listed!"}, + "b.a.9.8.7.6.5.0.4.0.0.0.3.0.0.0.2.0.0.0.1.0.0.0.8.b.d.0.1.0.0.2.example.com.": {"listed!"}, + }, + } + + if status, expl, err := Lookup(ctx, resolver, dns.Domain{ASCII: "example.com"}, net.ParseIP("10.0.0.1")); err != nil { + t.Fatalf("lookup: %v", err) + } else if status != StatusFail { + t.Fatalf("lookup, got status %v, expected fail", status) + } else if expl != "listed!" { + t.Fatalf("lookup, got explanation %q", expl) + } + + if status, expl, err := Lookup(ctx, resolver, dns.Domain{ASCII: "example.com"}, net.ParseIP("2001:db8:1:2:3:4:567:89ab")); err != nil { + t.Fatalf("lookup: %v", err) + } else if status != StatusFail { + t.Fatalf("lookup, got status %v, expected fail", status) + } else if expl != "listed!" { + t.Fatalf("lookup, got explanation %q", expl) + } + + if status, _, err := Lookup(ctx, resolver, dns.Domain{ASCII: "example.com"}, net.ParseIP("10.0.0.2")); err != nil { + t.Fatalf("lookup: %v", err) + } else if status != StatusPass { + t.Fatalf("lookup, got status %v, expected pass", status) + } + + // ../rfc/5782:357 + if err := CheckHealth(ctx, resolver, dns.Domain{ASCII: "example.com"}); err != nil { + t.Fatalf("dnsbl not healthy: %v", err) + } + if err := CheckHealth(ctx, resolver, dns.Domain{ASCII: "example.org"}); err == nil { + t.Fatalf("bad dnsbl is healthy") + } + + unhealthyResolver := dns.MockResolver{ + A: map[string][]string{ + "1.0.0.127.example.com.": {"127.0.0.2"}, // Should not be present in healthy dnsbl. + }, + } + if err := CheckHealth(ctx, unhealthyResolver, dns.Domain{ASCII: "example.com"}); err == nil { + t.Fatalf("bad dnsbl is healthy") + } +} diff --git a/doc.go b/doc.go new file mode 100644 index 0000000..1c5c545 --- /dev/null +++ b/doc.go @@ -0,0 +1,613 @@ +/* +Command mox is a modern full-featured open source secure mail server for +low-maintenance self-hosted email. + + - Quick and easy to set up with quickstart and automatic TLS with ACME and + Let's Encrypt. + - IMAP4 with extensions for accessing email. + - SMTP with SPF, DKIM, DMARC, DNSBL, MTA-STS, TLSRPT for exchanging email. + - Reputation-based and content-based spam filtering. + - Internationalized email. + - Admin web interface. + +# Commands + + mox [-config mox.conf] ... + mox serve + mox quickstart user@domain + mox restart + mox stop + mox setaccountpassword address + mox setadminpassword + mox loglevels [level [pkg]] + mox queue list + mox queue kick [-id id] [-todomain domain] [-recipient address] + mox queue drop [-id id] [-todomain domain] [-recipient address] + mox queue dump id + mox import maildir accountname mailboxname maildir + mox import mbox accountname mailboxname mbox + mox export maildir dst-path account-path [mailbox] + mox export mbox dst-path account-path [mailbox] + mox help [command ...] + mox config test + mox config dnscheck domain + mox config dnsrecords domain + mox config describe-domains >domains.conf + mox config describe-static >mox.conf + mox config account add account address + mox config account rm account + mox config address add address account + mox config address rm address + mox config domain add domain account [localpart] + mox config domain rm domain + mox checkupdate + mox cid cid + mox clientconfig domain + mox dkim gened25519 >$selector._domainkey.$domain.ed25519key.pkcs8.pem + mox dkim genrsa >$selector._domainkey.$domain.rsakey.pkcs8.pem + mox dkim lookup selector domain + mox dkim txt <$selector._domainkey.$domain.key.pkcs8.pem + mox dkim verify message + mox dmarc lookup domain + mox dmarc parsereportmsg message ... + mox dmarc verify remoteip mailfromaddress helodomain < message + mox dnsbl check zone ip + mox dnsbl checkhealth zone + mox mtasts lookup domain + mox sendmail [-Fname] [ignoredflags] [-t] [. We escape all ">*From ", +otherwise reconstructing the original could lose a ">". + + usage: mox export mbox dst-path account-path [mailbox] + +# mox help + +Prints help about matching commands. + +If multiple commands match, they are listed along with the first line of their help text. +If a single command matches, its usage and full help text is printed. + + usage: mox help [command ...] + +# mox config test + +Parses and validates the configuration files. + +If valid, the command exits with status 0. If not valid, all errors encountered +are printed. + + usage: mox config test + +# mox config dnscheck + +Check the DNS records with the configuration for the domain, and print any errors/warnings. + + usage: mox config dnscheck domain + +# mox config dnsrecords + +Prints annotated DNS records as zone file that should be created for the domain. + +The zone file can be imported into existing DNS software. You should review the +DNS records, especially if your domain previously/currently has email +configured. + + usage: mox config dnsrecords domain + +# mox config describe-domains + +Prints an annotated empty configuration for use as domains.conf. + +The domains configuration file contains the domains and their configuration, +and accounts and their configuration. This includes the configured email +addresses. The mox admin web interface, and the mox command line interface, can +make changes to this file. Mox automatically reloads this file when it changes. + +Like the static configuration, the example domains.conf printed by this command +needs modifications to make it valid. + + usage: mox config describe-domains >domains.conf + +# mox config describe-static + +Prints an annotated empty configuration for use as mox.conf. + +The static configuration file cannot be reloaded while mox is running. Mox has +to be restarted for changes to the static configuration file to take effect. + +This configuration file needs modifications to make it valid. For example, it +may contain unfinished list items. + + usage: mox config describe-static >mox.conf + +# mox config account add + +Add an account with an email address and reload the configuration. + +Email can be delivered to this address/account. A password has to be configured +explicitly, see the setaccountpassword command. + + usage: mox config account add account address + +# mox config account rm + +Remove an account and reload the configuration. + +Email addresses for this account will also be removed, and incoming email for +these addresses will be rejected. + + usage: mox config account rm account + +# mox config address add + +Adds an address to an account and reloads the configuration. + + usage: mox config address add address account + +# mox config address rm + +Remove an address and reload the configuration. + +Incoming email for this address will be rejected. + + usage: mox config address rm address + +# mox config domain add + +Adds a new domain to the configuration and reloads the configuration. + +The account is used for the postmaster mailboxes the domain, including as DMARC and +TLS reporting. Localpart is the "username" at the domain for this account. If +must be set if and only if account does not yet exist. + + usage: mox config domain add domain account [localpart] + +# mox config domain rm + +Remove a domain from the configuration and reload the configuration. + +This is a dangerous operation. Incoming email delivery for this domain will be +rejected. + + usage: mox config domain rm domain + +# mox checkupdate + +Check if a newer version of mox is available. + +A single DNS TXT lookup to _updates.xmox.nl tells if a new version is +available. If so, a changelog is fetched from https://updates.xmox.nl, and the +individual entries validated with a builtin public key. The changelog is +printed. + + usage: mox checkupdate + +# mox cid + +Turn an ID from a Received header into a cid, for looking up in logs. + +A cid is essentially a connection counter initialized when mox starts. Each log +line contains a cid. Received headers added by mox contain a unique ID that can +be decrypted to a cid by admin of a mox instance only. + + usage: mox cid cid + +# mox clientconfig + +Print the configuration for email clients for a domain. + +Sending email is typically not done on the SMTP port 25, but on submission +ports 465 (with TLS) and 587 (without initial TLS, but usually added to the +connection with STARTTLS). For IMAP, the port with TLS is 993 and without is +143. + +Without TLS/STARTTLS, passwords are sent in clear text, which should only be +configured over otherwise secured connections, like a VPN. + + usage: mox clientconfig domain + +# mox dkim gened25519 + +Generate a new ed25519 key for use with DKIM. + +Ed25519 keys are much smaller than RSA keys of comparable cryptographic +strength. This is convenient because of maximum DNS message sizes. At the time +of writing, not many mail servers appear to support ed25519 DKIM keys though, +so it is recommended to sign messages with both RSA and ed25519 keys. + + usage: mox dkim gened25519 >$selector._domainkey.$domain.ed25519key.pkcs8.pem + +# mox dkim genrsa + +Generate a new 2048 bit RSA private key for use with DKIM. + +The generated file is in PEM format, and has a comment it is generated for use +with DKIM, by mox. + + usage: mox dkim genrsa >$selector._domainkey.$domain.rsakey.pkcs8.pem + +# mox dkim lookup + +Lookup and print the DKIM record for the selector at the domain. + + usage: mox dkim lookup selector domain + +# mox dkim txt + +Print a DKIM DNS TXT record with the public key derived from the private key read from stdin. + +The DNS should be configured as a TXT record at $selector._domainkey.$domain. + + usage: mox dkim txt <$selector._domainkey.$domain.key.pkcs8.pem + +# mox dkim verify + +Verify the DKIM signatures in a message and print the results. + +The message is parsed, and the DKIM-Signature headers are validated. Validation +of older messages may fail because the DNS records have been removed or changed +by now, or because the signature header may have specified an expiration time +that was passed. + + usage: mox dkim verify message + +# mox dmarc lookup + +Lookup dmarc policy for domain, a DNS TXT record at _dmarc., validate and print it. + + usage: mox dmarc lookup domain + +# mox dmarc parsereportmsg + +Parse a DMARC report from an email message, and print its extracted details. + +DMARC reports are periodically mailed, if requested in the DMARC DNS record of +a domain. Reports are sent by mail servers that received messages with our +domain in a From header. This may or may not be legatimate email. DMARC reports +contain summaries of evaluations of DMARC and DKIM/SPF, which can help +understand email deliverability problems. + + usage: mox dmarc parsereportmsg message ... + +# mox dmarc verify + +Parse an email message and evaluate it against the DMARC policy of the domain in the From-header. + +mailfromaddress and helodomain are used for SPF validation. If both are empty, +SPF validation is skipped. + +mailfromaddress should be the address used as MAIL FROM in the SMTP session. +For DSN messages, that address may be empty. The helo domain was specified at +the beginning of the SMTP transaction that delivered the message. These values +can be found in message headers. + + usage: mox dmarc verify remoteip mailfromaddress helodomain < message + +# mox dnsbl check + +Test if IP is in the DNS blocklist of the zone, e.g. bl.spamcop.net. + +If the IP is in the blocklist, an explanation is printed. This is typically a +URL with more information. + + usage: mox dnsbl check zone ip + +# mox dnsbl checkhealth + +Check the health of the DNS blocklist represented by zone, e.g. bl.spamcop.net. + +The health of a DNS blocklist can be checked by querying for 127.0.0.1 and +127.0.0.2. The second must and the first must not be present. + + usage: mox dnsbl checkhealth zone + +# mox mtasts lookup + +Lookup the MTASTS record and policy for the domain. + +MTA-STS is a mechanism for a domain to specify if it requires TLS connections +for delivering email. If a domain has a valid MTA-STS DNS TXT record at +_mta-sts. it signals it implements MTA-STS. A policy can then be +fetched at https://mta-sts./.well-known/mta-sts.txt. The policy +specifies the mode (enforce, testing, none), which MX servers support TLS and +should be used, and how long the policy can be cached. + + usage: mox mtasts lookup domain + +# mox sendmail + +Sendmail is a drop-in replacement for /usr/sbin/sendmail to deliver emails sent by unix processes like cron. + +If invoked as "sendmail", it will act as sendmail for sending messages. Its +intention is to let processes like cron send emails. Messages are submitted to +an actual mail server over SMTP. The destination mail server and credentials are +configured in /etc/moxsubmit.conf. The From message header is rewritten to the +configured address. + +If submitting an email fails, it is added to a directory moxsubmit.failures in +the user's home directory. + +Most flags are ignored to fake compatibility with other sendmail +implementations. A single recipient is required, or the tflag. + +/etc/moxsubmit.conf should be group-readable and not readable by others and this +binary should be setgid that group: + + groupadd moxsubmit + install -m 2755 -o root -g moxsubmit mox /usr/sbin/sendmail + touch /etc/moxsubmit.conf + chown root:moxsubmit /etc/moxsubmit.conf + chmod 640 /etc/moxsubmit.conf + # edit /etc/moxsubmit.conf + + + usage: mox sendmail [-Fname] [ignoredflags] [-t] [>/etc/postfix/main.cf; echo 'root: moxtest1@mox1.example' >>/etc/postfix/aliases; newaliases; postfix start-fg"] + healthcheck: + test: netstat -nlt | grep ':25 ' + interval: 1s + timeout: 1s + retries: 10 + depends_on: + dns: + condition: service_healthy + networks: + mailnet1: + ipv4_address: 172.28.1.20 + + dns: + hostname: dns.example + build: + dockerfile: Dockerfile.dns + # todo: figure out how to build from dockerfile with empty context without creating empty dirs in file system. + context: testdata/integration + volumes: + - ./testdata/integration/resolv.conf:/etc/resolv.conf + - ./testdata/integration:/integration + command: ["sh", "-c", "set -e; chmod o+r /etc/resolv.conf; install -m 640 -o unbound /integration/unbound.conf /integration/*.zone /etc/unbound/; unbound -d -p -v"] + healthcheck: + test: netstat -nlu | grep '172.28.1.30:53 ' + interval: 1s + timeout: 1s + retries: 10 + networks: + mailnet1: + ipv4_address: 172.28.1.30 + +networks: + mailnet1: + driver: bridge + ipam: + driver: default + config: + - subnet: "172.28.1.0/24" + mailnet2: + driver: bridge + ipam: + driver: default + config: + - subnet: "172.28.2.0/24" + mailnet3: + driver: bridge + ipam: + driver: default + config: + - subnet: "172.28.3.0/24" diff --git a/dsn/dsn.go b/dsn/dsn.go new file mode 100644 index 0000000..8d6bc5d --- /dev/null +++ b/dsn/dsn.go @@ -0,0 +1,405 @@ +// Package dsn parses and composes Delivery Status Notification messages, see +// RFC 3464 and RFC 6533. +package dsn + +import ( + "bufio" + "bytes" + "context" + "encoding/base64" + "errors" + "fmt" + "io" + "mime/multipart" + "net/textproto" + "strconv" + "strings" + "time" + + "github.com/mjl-/mox/dkim" + "github.com/mjl-/mox/message" + "github.com/mjl-/mox/mlog" + "github.com/mjl-/mox/mox-" + "github.com/mjl-/mox/smtp" +) + +// Message represents a DSN message, with basic message headers, human-readable text, +// machine-parsable data, and optional original message/headers. +// +// A DSN represents a delayed, failed or successful delivery. Failing incoming +// deliveries over SMTP, and failing outgoing deliveries from the message queue, +// can result in a DSN being sent. +type Message struct { + SMTPUTF8 bool // Whether the original was received with smtputf8. + + // DSN message From header. E.g. postmaster@ourdomain.example. NOTE: + // DSNs should be sent with a null reverse path to prevent mail loops. + // ../rfc/3464:421 + From smtp.Path + + // "To" header, and also SMTP RCP TO to deliver DSN to. Should be taken + // from original SMTP transaction MAIL FROM. + // ../rfc/3464:415 + To smtp.Path + + // Message subject header, e.g. describing mail delivery failure. + Subject string + + // Human-readable text explaining the failure. Line endings should be + // bare newlines, not \r\n. They are converted to \r\n when composing. + TextBody string + + // Per-message fields. + OriginalEnvelopeID string + ReportingMTA string // Required. + DSNGateway string + ReceivedFromMTA smtp.Ehlo // Host from which message was received. + ArrivalDate time.Time + + // All per-message fields, including extensions. Only used for parsing, + // not composing. + MessageHeader textproto.MIMEHeader + + // One or more per-recipient fields. + // ../rfc/3464:436 + Recipients []Recipient + + // Original message or headers to include in DSN as third MIME part. + // Optional. Only used for generating DSNs, not set for parsed DNSs. + Original []byte +} + +// Action is a field in a DSN. +type Action string + +// ../rfc/3464:890 + +const ( + Failed Action = "failed" + Delayed Action = "delayed" + Delivered Action = "delivered" + Relayed Action = "relayed" + Expanded Action = "expanded" +) + +// ../rfc/3464:1530 ../rfc/6533:370 + +// Recipient holds the per-recipient delivery-status lines in a DSN. +type Recipient struct { + // Required fields. + FinalRecipient smtp.Path // Final recipient of message. + Action Action + + // Enhanced status code. First digit indicates permanent or temporary + // error. If the string contains more than just a status, that + // additional text is added as comment when composing a DSN. + Status string + + // Optional fields. + // Original intended recipient of message. Used with the DSN extensions ORCPT + // parameter. + // ../rfc/3464:1197 + OriginalRecipient smtp.Path + + // Remote host that returned an error code. Can also be empty for + // deliveries. + RemoteMTA NameIP + + // If RemoteMTA is present, DiagnosticCode is from remote. When + // creating a DSN, additional text in the string will be added to the + // DSN as comment. + DiagnosticCode string + LastAttemptDate time.Time + FinalLogID string + + // For delayed deliveries, deliveries may be retried until this time. + WillRetryUntil *time.Time + + // All fields, including extensions. Only used for parsing, not + // composing. + Header textproto.MIMEHeader +} + +// Compose returns a DSN message. +// +// smtputf8 indicates whether the remote MTA that is receiving the DSN +// supports smtputf8. This influences the message media (sub)types used for the +// DSN. +// +// DKIM signatures are added if DKIM signing is configured for the "from" domain. +func (m *Message) Compose(log *mlog.Log, smtputf8 bool) ([]byte, error) { + // ../rfc/3462:119 + // ../rfc/3464:377 + // We'll make a multipart/report with 2 or 3 parts: + // - 1. human-readable explanation; + // - 2. message/delivery-status; + // - 3. (optional) original message (either in full, or only headers). + + // todo future: add option to send full message. but only do so if the message is <100kb. + // todo future: possibly write to a file directly, instead of building up message in memory. + + // If message does not require smtputf8, we are never generating a utf-8 DSN. + if !m.SMTPUTF8 { + smtputf8 = false + } + + // We check for errors once after all the writes. + msgw := &errWriter{w: &bytes.Buffer{}} + + header := func(k, v string) { + fmt.Fprintf(msgw, "%s: %s\r\n", k, v) + } + + line := func(w io.Writer) { + w.Write([]byte("\r\n")) + } + + // Outer message headers. + header("From", fmt.Sprintf("<%s>", m.From.XString(smtputf8))) // todo: would be good to have a local ascii-only name for this address. + header("To", fmt.Sprintf("<%s>", m.To.XString(smtputf8))) // todo: we could just leave this out if it has utf-8 and remote does not support utf-8. + header("Subject", m.Subject) + header("Message-Id", fmt.Sprintf("<%s>", mox.MessageIDGen(smtputf8))) + header("Date", time.Now().Format(message.RFC5322Z)) + header("MIME-Version", "1.0") + mp := multipart.NewWriter(msgw) + header("Content-Type", fmt.Sprintf(`multipart/report; report-type="delivery-status"; boundary="%s"`, mp.Boundary())) + + line(msgw) + + // First part, human-readable message. + msgHdr := textproto.MIMEHeader{} + if smtputf8 { + msgHdr.Set("Content-Type", "text/plain; charset=utf-8") + msgHdr.Set("Content-Transfer-Encoding", "8BIT") + } else { + msgHdr.Set("Content-Type", "text/plain") + msgHdr.Set("Content-Transfer-Encoding", "7BIT") + } + msgp, err := mp.CreatePart(msgHdr) + if err != nil { + return nil, err + } + msgp.Write([]byte(strings.ReplaceAll(m.TextBody, "\n", "\r\n"))) + + // Machine-parsable message. ../rfc/3464:455 + statusHdr := textproto.MIMEHeader{} + if smtputf8 { + // ../rfc/6533:325 + statusHdr.Set("Content-Type", "message/global-delivery-status") + statusHdr.Set("Content-Transfer-Encoding", "8BIT") + } else { + statusHdr.Set("Content-Type", "message/delivery-status") + statusHdr.Set("Content-Transfer-Encoding", "7BIT") + } + statusp, err := mp.CreatePart(statusHdr) + if err != nil { + return nil, err + } + + // ../rfc/3464:470 + // examples: ../rfc/3464:1855 + // type fields: ../rfc/3464:536 https://www.iana.org/assignments/dsn-types/dsn-types.xhtml + + status := func(k, v string) { + fmt.Fprintf(statusp, "%s: %s\r\n", k, v) + } + + // Per-message fields first. ../rfc/3464:575 + // todo future: once we support the smtp dsn extension, the envid should be saved/set as OriginalEnvelopeID. ../rfc/3464:583 ../rfc/3461:1139 + if m.OriginalEnvelopeID != "" { + status("Original-Envelope-ID", m.OriginalEnvelopeID) + } + status("Reporting-MTA", "dns; "+m.ReportingMTA) // ../rfc/3464:628 + if m.DSNGateway != "" { + // ../rfc/3464:714 + status("DSN-Gateway", "dns; "+m.DSNGateway) + } + if !m.ReceivedFromMTA.IsZero() { + // ../rfc/3464:735 + status("Received-From-MTA", fmt.Sprintf("dns;%s (%s)", m.ReceivedFromMTA.Name, smtp.AddressLiteral(m.ReceivedFromMTA.ConnIP))) + } + status("Arrival-Date", m.ArrivalDate.Format(message.RFC5322Z)) // ../rfc/3464:758 + + // Then per-recipient fields. ../rfc/3464:769 + // todo: should also handle other address types. at least recognize "unknown". Probably just store this field. ../rfc/3464:819 + addrType := "rfc822;" // ../rfc/3464:514 + if smtputf8 { + addrType = "utf-8;" // ../rfc/6533:250 + } + if len(m.Recipients) == 0 { + return nil, fmt.Errorf("missing per-recipient fields") + } + for _, r := range m.Recipients { + line(statusp) + if !r.OriginalRecipient.IsZero() { + // ../rfc/3464:807 + status("Original-Recipient", addrType+r.OriginalRecipient.DSNString(smtputf8)) + } + status("Final-Recipient", addrType+r.FinalRecipient.DSNString(smtputf8)) // ../rfc/3464:829 + status("Action", string(r.Action)) // ../rfc/3464:879 + st := r.Status + if st == "" { + // ../rfc/3464:944 + // Making up a status code is not great, but the field is required. We could simply + // require the caller to make one up... + switch r.Action { + case Delayed: + st = "4.0.0" + case Failed: + st = "5.0.0" + default: + st = "2.0.0" + } + } + var rest string + st, rest = codeLine(st) + statusLine := st + if rest != "" { + statusLine += " (" + rest + ")" + } + status("Status", statusLine) // ../rfc/3464:975 + if !r.RemoteMTA.IsZero() { + // ../rfc/3464:1015 + status("Remote-MTA", fmt.Sprintf("dns;%s (%s)", r.RemoteMTA.Name, smtp.AddressLiteral(r.RemoteMTA.IP))) + } + // Presence of Diagnostic-Code indicates the code is from Remote-MTA. ../rfc/3464:1053 + if r.DiagnosticCode != "" { + diagCode, rest := codeLine(r.DiagnosticCode) + diagLine := diagCode + if rest != "" { + diagLine += " (" + rest + ")" + } + // ../rfc/6533:589 + status("Diagnostic-Code", "smtp; "+diagLine) + } + if !r.LastAttemptDate.IsZero() { + status("Last-Attempt-Date", r.LastAttemptDate.Format(message.RFC5322Z)) // ../rfc/3464:1076 + } + if r.FinalLogID != "" { + // todo future: think about adding cid as "Final-Log-Id"? + status("Final-Log-ID", r.FinalLogID) // ../rfc/3464:1098 + } + if r.WillRetryUntil != nil { + status("Will-Retry-Until", r.WillRetryUntil.Format(message.RFC5322Z)) // ../rfc/3464:1108 + } + } + + // We include only the header of the original message. + // todo: add the textual version of the original message, if it exists and isn't too large. + if m.Original != nil { + headers, err := message.ReadHeaders(bufio.NewReader(bytes.NewReader(m.Original))) + if err != nil && errors.Is(err, message.ErrHeaderSeparator) { + // Whole data is a header. + headers = m.Original + } else if err != nil { + return nil, err + } else { + // This is a whole message. We still only include the headers. + // todo: include the whole body. + } + + origHdr := textproto.MIMEHeader{} + if smtputf8 { + // ../rfc/6533:431 + // ../rfc/6533:605 + origHdr.Set("Content-Type", "message/global-headers") // ../rfc/6533:625 + origHdr.Set("Content-Transfer-Encoding", "8BIT") + } else { + // ../rfc/3462:175 + if m.SMTPUTF8 { + // ../rfc/6533:480 + origHdr.Set("Content-Type", "text/rfc822-headers; charset=utf-8") + origHdr.Set("Content-Transfer-Encoding", "BASE64") + } else { + origHdr.Set("Content-Type", "text/rfc822-headers") + origHdr.Set("Content-Transfer-Encoding", "7BIT") + } + } + origp, err := mp.CreatePart(origHdr) + if err != nil { + return nil, err + } + + if !smtputf8 && m.SMTPUTF8 { + data := base64.StdEncoding.EncodeToString(headers) + for len(data) > 0 { + line := data + n := len(line) + if n > 78 { + n = 78 + } + line, data = data[:n], data[n:] + origp.Write([]byte(line + "\r\n")) + } + } else { + origp.Write(headers) + } + } + + if err := mp.Close(); err != nil { + return nil, err + } + + if msgw.err != nil { + return nil, err + } + + data := msgw.w.Bytes() + + fd := m.From.IPDomain.Domain + confDom, _ := mox.Conf.Domain(fd) + if len(confDom.DKIM.Sign) > 0 { + if dkimHeaders, err := dkim.Sign(context.Background(), m.From.Localpart, fd, confDom.DKIM, smtputf8, bytes.NewReader(data)); err != nil { + log.Errorx("dsn: dkim sign for domain, returning unsigned dsn", err, mlog.Field("domain", fd)) + } else { + data = append([]byte(dkimHeaders), data...) + } + } + + return data, nil +} + +type errWriter struct { + w *bytes.Buffer + err error +} + +func (w *errWriter) Write(buf []byte) (int, error) { + if w.err != nil { + return -1, w.err + } + n, err := w.w.Write(buf) + w.err = err + return n, err +} + +// split a line into enhanced status code and rest. +func codeLine(s string) (string, string) { + t := strings.SplitN(s, " ", 2) + l := strings.Split(t[0], ".") + if len(l) != 3 { + return "", s + } + for i, e := range l { + _, err := strconv.ParseInt(e, 10, 32) + if err != nil { + return "", s + } + if i == 0 && len(e) != 1 { + return "", s + } + } + + var rest string + if len(t) == 2 { + rest = t[1] + } + return t[0], rest +} + +// HasCode returns whether line starts with an enhanced SMTP status code. +func HasCode(line string) bool { + // ../rfc/3464:986 + ecode, _ := codeLine(line) + return ecode != "" +} diff --git a/dsn/dsn_test.go b/dsn/dsn_test.go new file mode 100644 index 0000000..10e3e15 --- /dev/null +++ b/dsn/dsn_test.go @@ -0,0 +1,243 @@ +package dsn + +import ( + "bytes" + "context" + "fmt" + "io" + "net" + "reflect" + "strings" + "testing" + "time" + + "github.com/mjl-/mox/dkim" + "github.com/mjl-/mox/dns" + "github.com/mjl-/mox/message" + "github.com/mjl-/mox/mlog" + "github.com/mjl-/mox/mox-" + "github.com/mjl-/mox/smtp" +) + +func xparseDomain(s string) dns.Domain { + d, err := dns.ParseDomain(s) + if err != nil { + panic(fmt.Sprintf("parsing domain %q: %v", s, err)) + } + return d +} + +func xparseIPDomain(s string) dns.IPDomain { + return dns.IPDomain{Domain: xparseDomain(s)} +} + +func tparseMessage(t *testing.T, data []byte, nparts int) (*Message, *message.Part) { + t.Helper() + m, p, err := Parse(bytes.NewReader(data)) + if err != nil { + t.Fatalf("parsing dsn: %v", err) + } + if len(p.Parts) != nparts { + t.Fatalf("got %d parts, expected %d", len(p.Parts), nparts) + } + return m, p +} + +func tcheckType(t *testing.T, p *message.Part, mt, mst, cte string) { + t.Helper() + if !strings.EqualFold(p.MediaType, mt) { + t.Fatalf("got mediatype %q, expected %q", p.MediaType, mt) + } + if !strings.EqualFold(p.MediaSubType, mst) { + t.Fatalf("got mediasubtype %q, expected %q", p.MediaSubType, mst) + } + if !strings.EqualFold(p.ContentTransferEncoding, cte) { + t.Fatalf("got content-transfer-encoding %q, expected %q", p.ContentTransferEncoding, cte) + } +} + +func tcompare(t *testing.T, got, exp any) { + t.Helper() + if !reflect.DeepEqual(got, exp) { + t.Fatalf("got %#v, expected %#v", got, exp) + } +} + +func tcompareReader(t *testing.T, r io.Reader, exp []byte) { + t.Helper() + buf, err := io.ReadAll(r) + if err != nil { + t.Fatalf("data read, got %q, expected %q", buf, exp) + } +} + +func TestDSN(t *testing.T) { + log := mlog.New("dsn") + + now := time.Now() + + // An ascii-only message. + m := Message{ + SMTPUTF8: false, + + From: smtp.Path{Localpart: "postmaster", IPDomain: xparseIPDomain("mox.example")}, + To: smtp.Path{Localpart: "mjl", IPDomain: xparseIPDomain("remote.example")}, + Subject: "dsn", + TextBody: "delivery failure\n", + + ReportingMTA: "mox.example", + ReceivedFromMTA: smtp.Ehlo{Name: xparseIPDomain("relay.example"), ConnIP: net.ParseIP("10.10.10.10")}, + ArrivalDate: now, + + Recipients: []Recipient{ + { + FinalRecipient: smtp.Path{Localpart: "mjl", IPDomain: xparseIPDomain("remote.example")}, + Action: Failed, + Status: "5.0.0", + LastAttemptDate: now, + }, + }, + + Original: []byte("Subject: test\r\n"), + } + msgbuf, err := m.Compose(log, false) + if err != nil { + t.Fatalf("composing dsn: %v", err) + } + pmsg, part := tparseMessage(t, msgbuf, 3) + tcheckType(t, part, "multipart", "report", "") + tcheckType(t, &part.Parts[0], "text", "plain", "7bit") + tcheckType(t, &part.Parts[1], "message", "delivery-status", "7bit") + tcheckType(t, &part.Parts[2], "text", "rfc822-headers", "7bit") + tcompare(t, part.Parts[2].ContentTypeParams["charset"], "") + tcompareReader(t, part.Parts[2].Reader(), m.Original) + tcompare(t, pmsg.Recipients[0].FinalRecipient, m.Recipients[0].FinalRecipient) + // todo: test more fields + + msgbufutf8, err := m.Compose(log, true) + if err != nil { + t.Fatalf("composing dsn with utf-8: %v", err) + } + pmsg, part = tparseMessage(t, msgbufutf8, 3) + tcheckType(t, part, "multipart", "report", "") + tcheckType(t, &part.Parts[0], "text", "plain", "7bit") + tcheckType(t, &part.Parts[1], "message", "delivery-status", "7bit") + tcheckType(t, &part.Parts[2], "text", "rfc822-headers", "7bit") + tcompare(t, part.Parts[2].ContentTypeParams["charset"], "") + tcompareReader(t, part.Parts[2].Reader(), m.Original) + tcompare(t, pmsg.Recipients[0].FinalRecipient, m.Recipients[0].FinalRecipient) + + // Test for valid DKIM signature. + mox.Context = context.Background() + mox.ConfigStaticPath = "../testdata/dsn/mox.conf" + mox.MustLoadConfig() + msgbuf, err = m.Compose(log, false) + if err != nil { + t.Fatalf("composing utf-8 dsn with utf-8 support: %v", err) + } + resolver := &dns.MockResolver{ + TXT: map[string][]string{ + "testsel._domainkey.mox.example.": {"v=DKIM1;h=sha256;t=s;p=MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA3ZId3ys70VFspp/VMFaxMOrNjHNPg04NOE1iShih16b3Ex7hHBOgC1UvTGSmrMlbCB1OxTXkvf6jW6S4oYRnZYVNygH6zKUwYYhaSaGIg1xA/fDn+IgcTRyLoXizMUgUgpTGyxhNrwIIWv+i7jjbs3TKpP3NU4owQ/rxowmSNqg+fHIF1likSvXvljYS" + "jaFXXnWfYibW7TdDCFFpN4sB5o13+as0u4vLw6MvOi59B1tLype1LcHpi1b9PfxNtznTTdet3kL0paxIcWtKHT0LDPUos8YYmiPa5nGbUqlC7d+4YT2jQPvwGxCws1oo2Tw6nj1UaihneYGAyvEky49FBwIDAQAB"}, + }, + } + results, err := dkim.Verify(context.Background(), resolver, false, func(*dkim.Sig) error { return nil }, bytes.NewReader(msgbuf), false) + if err != nil { + t.Fatalf("dkim verify: %v", err) + } + if len(results) != 1 || results[0].Status != dkim.StatusPass { + t.Fatalf("dkim result not pass, %#v", results) + } + + // An utf-8 message. + m = Message{ + SMTPUTF8: true, + + From: smtp.Path{Localpart: "postmæster", IPDomain: xparseIPDomain("møx.example")}, + To: smtp.Path{Localpart: "møx", IPDomain: xparseIPDomain("remøte.example")}, + Subject: "dsn¡", + TextBody: "delivery failure¿\n", + + ReportingMTA: "mox.example", + ReceivedFromMTA: smtp.Ehlo{Name: xparseIPDomain("reläy.example"), ConnIP: net.ParseIP("10.10.10.10")}, + ArrivalDate: now, + + Recipients: []Recipient{ + { + Action: Failed, + FinalRecipient: smtp.Path{Localpart: "møx", IPDomain: xparseIPDomain("remøte.example")}, + Status: "5.0.0", + LastAttemptDate: now, + }, + }, + + Original: []byte("Subject: tést\r\n"), + } + msgbuf, err = m.Compose(log, false) + if err != nil { + t.Fatalf("composing utf-8 dsn without utf-8 support: %v", err) + } + pmsg, part = tparseMessage(t, msgbuf, 3) + tcheckType(t, part, "multipart", "report", "") + tcheckType(t, &part.Parts[0], "text", "plain", "7bit") + tcheckType(t, &part.Parts[1], "message", "delivery-status", "7bit") + tcheckType(t, &part.Parts[2], "text", "rfc822-headers", "base64") + tcompare(t, part.Parts[2].ContentTypeParams["charset"], "utf-8") + tcompareReader(t, part.Parts[2].Reader(), m.Original) + tcompare(t, pmsg.Recipients[0].FinalRecipient, m.Recipients[0].FinalRecipient) + + msgbufutf8, err = m.Compose(log, true) + if err != nil { + t.Fatalf("composing utf-8 dsn with utf-8 support: %v", err) + } + pmsg, part = tparseMessage(t, msgbufutf8, 3) + tcheckType(t, part, "multipart", "report", "") + tcheckType(t, &part.Parts[0], "text", "plain", "8bit") + tcheckType(t, &part.Parts[1], "message", "global-delivery-status", "8bit") + tcheckType(t, &part.Parts[2], "message", "global-headers", "8bit") + tcompare(t, part.Parts[2].ContentTypeParams["charset"], "") + tcompareReader(t, part.Parts[2].Reader(), m.Original) + tcompare(t, pmsg.Recipients[0].FinalRecipient, m.Recipients[0].FinalRecipient) + + // Now a message without 3rd multipart. + m.Original = nil + msgbufutf8, err = m.Compose(log, true) + if err != nil { + t.Fatalf("composing utf-8 dsn with utf-8 support: %v", err) + } + pmsg, part = tparseMessage(t, msgbufutf8, 2) + tcheckType(t, part, "multipart", "report", "") + tcheckType(t, &part.Parts[0], "text", "plain", "8bit") + tcheckType(t, &part.Parts[1], "message", "global-delivery-status", "8bit") + tcompare(t, pmsg.Recipients[0].FinalRecipient, m.Recipients[0].FinalRecipient) +} + +func TestCode(t *testing.T) { + testCodeLine := func(line, ecode, rest string) { + t.Helper() + e, r := codeLine(line) + if e != ecode || r != rest { + t.Fatalf("codeLine %q: got %q %q, expected %q %q", line, e, r, ecode, rest) + } + } + testCodeLine("4.0.0", "4.0.0", "") + testCodeLine("4.0.0 more", "4.0.0", "more") + testCodeLine("other", "", "other") + testCodeLine("other more", "", "other more") + + testHasCode := func(line string, exp bool) { + t.Helper() + got := HasCode(line) + if got != exp { + t.Fatalf("HasCode %q: got %v, expected %v", line, got, exp) + } + } + testHasCode("4.0.0", true) + testHasCode("5.7.28", true) + testHasCode("10.0.0", false) // first number must be single digit. + testHasCode("4.1.1 more", true) + testHasCode("other ", false) + testHasCode("4.2.", false) + testHasCode("4.2. ", false) + testHasCode(" 4.2.4", false) + testHasCode(" 4.2.4 ", false) +} diff --git a/dsn/nameip.go b/dsn/nameip.go new file mode 100644 index 0000000..8f42c1f --- /dev/null +++ b/dsn/nameip.go @@ -0,0 +1,15 @@ +package dsn + +import ( + "net" +) + +// NameIP represents a name and possibly IP, e.g. representing a connection destination. +type NameIP struct { + Name string + IP net.IP +} + +func (n NameIP) IsZero() bool { + return n.Name == "" && n.IP == nil +} diff --git a/dsn/parse.go b/dsn/parse.go new file mode 100644 index 0000000..33fe3b8 --- /dev/null +++ b/dsn/parse.go @@ -0,0 +1,360 @@ +package dsn + +import ( + "bufio" + "fmt" + "io" + "net/textproto" + "strconv" + "strings" + "time" + + "github.com/mjl-/mox/dns" + "github.com/mjl-/mox/message" + "github.com/mjl-/mox/smtp" +) + +// Parse reads a DSN message. +// +// A DSN is a multipart internet mail message with 2 or 3 parts: human-readable +// text, machine-parsable text, and optional original message or headers. +// +// The first return value is the machine-parsed DSN message. The second value is +// the entire MIME multipart message. Use its Parts field to access the +// human-readable text and optional original message/headers. +func Parse(r io.ReaderAt) (*Message, *message.Part, error) { + // DSNs can mix and match subtypes with and without utf-8. ../rfc/6533:441 + + part, err := message.Parse(r) + if err != nil { + return nil, nil, fmt.Errorf("parsing message: %v", err) + } + if part.MediaType != "MULTIPART" || part.MediaSubType != "REPORT" { + return nil, nil, fmt.Errorf(`message has content-type %q, must have "message/report"`, strings.ToLower(part.MediaType+"/"+part.MediaSubType)) + } + err = part.Walk() + if err != nil { + return nil, nil, fmt.Errorf("parsing message parts: %v", err) + } + nparts := len(part.Parts) + if nparts != 2 && nparts != 3 { + return nil, nil, fmt.Errorf("invalid dsn, got %d multipart parts, 2 or 3 required", nparts) + } + p0 := part.Parts[0] + if !(p0.MediaType == "" && p0.MediaSubType == "") && !(p0.MediaType == "TEXT" && p0.MediaSubType == "PLAIN") { + return nil, nil, fmt.Errorf(`invalid dsn, first part has content-type %q, must have "text/plain"`, strings.ToLower(p0.MediaType+"/"+p0.MediaSubType)) + } + + p1 := part.Parts[1] + var m *Message + if !(p1.MediaType == "MESSAGE" && (p1.MediaSubType == "DELIVERY-STATUS" || p1.MediaSubType == "GLOBAL-DELIVERY-STATUS")) { + return nil, nil, fmt.Errorf(`invalid dsn, second part has content-type %q, must have "message/delivery-status" or "message/global-delivery-status"`, strings.ToLower(p1.MediaType+"/"+p1.MediaSubType)) + } + utf8 := p1.MediaSubType == "GLOBAL-DELIVERY-STATUS" + m, err = Decode(p1.Reader(), utf8) + if err != nil { + return nil, nil, fmt.Errorf("parsing dsn delivery-status part: %v", err) + } + + addressPath := func(a message.Address) (smtp.Path, error) { + d, err := dns.ParseDomain(a.Host) + if err != nil { + return smtp.Path{}, fmt.Errorf("parsing domain: %v", err) + } + return smtp.Path{Localpart: smtp.Localpart(a.User), IPDomain: dns.IPDomain{Domain: d}}, nil + } + if len(part.Envelope.From) == 1 { + m.From, err = addressPath(part.Envelope.From[0]) + if err != nil { + return nil, nil, fmt.Errorf("parsing From-header: %v", err) + } + } + if len(part.Envelope.To) == 1 { + m.To, err = addressPath(part.Envelope.To[0]) + if err != nil { + return nil, nil, fmt.Errorf("parsing To-header: %v", err) + } + } + m.Subject = part.Envelope.Subject + buf, err := io.ReadAll(p0.Reader()) + if err != nil { + return nil, nil, fmt.Errorf("reading human-readable text part: %v", err) + } + m.TextBody = strings.ReplaceAll(string(buf), "\r\n", "\n") + + if nparts == 2 { + return m, &part, nil + } + + p2 := part.Parts[2] + ct := strings.ToLower(p2.MediaType + "/" + p2.MediaSubType) + switch ct { + case "text/rfc822-headers": + case "message/global-headers": + case "message/rfc822": + case "message/global": + default: + return nil, nil, fmt.Errorf("invalid content-type %q for optional third part with original message/headers", ct) + } + + return m, &part, nil +} + +// Decode parses the (global) delivery-status part of a DSN. +// +// utf8 indicates if UTF-8 is allowed for this message, if used by the media +// subtype of the message parts. +func Decode(r io.Reader, utf8 bool) (*Message, error) { + m := Message{SMTPUTF8: utf8} + + // We are using textproto.Reader to read mime headers. It requires a header section ending in \r\n. + // ../rfc/3464:486 + b := bufio.NewReader(io.MultiReader(r, strings.NewReader("\r\n"))) + mr := textproto.NewReader(b) + + // Read per-message lines. + // ../rfc/3464:1522 ../rfc/6533:366 + msgh, err := mr.ReadMIMEHeader() + if err != nil { + return nil, fmt.Errorf("reading per-message lines: %v", err) + } + for k, l := range msgh { + if len(l) != 1 { + return nil, fmt.Errorf("multiple values for %q: %v", k, l) + } + v := l[0] + // note: headers are in canonical form, as parsed by textproto. + switch k { + case "Original-Envelope-Id": + m.OriginalEnvelopeID = v + case "Reporting-Mta": + mta, err := parseMTA(v, utf8) + if err != nil { + return nil, fmt.Errorf("parsing reporting-mta: %v", err) + } + m.ReportingMTA = mta + case "Dsn-Gateway": + mta, err := parseMTA(v, utf8) + if err != nil { + return nil, fmt.Errorf("parsing dsn-gateway: %v", err) + } + m.DSNGateway = mta + case "Received-From-Mta": + mta, err := parseMTA(v, utf8) + if err != nil { + return nil, fmt.Errorf("parsing received-from-mta: %v", err) + } + d, err := dns.ParseDomain(mta) + if err != nil { + return nil, fmt.Errorf("parsing received-from-mta domain %q: %v", mta, err) + } + m.ReceivedFromMTA = smtp.Ehlo{Name: dns.IPDomain{Domain: d}} + case "Arrival-Date": + tm, err := parseDateTime(v) + if err != nil { + return nil, fmt.Errorf("parsing arrival-date: %v", err) + } + m.ArrivalDate = tm + default: + // We'll assume it is an extension field, we'll ignore it for now. + } + } + m.MessageHeader = msgh + + required := []string{"Reporting-Mta"} + for _, req := range required { + if _, ok := msgh[req]; !ok { + return nil, fmt.Errorf("missing required recipient field %q", req) + } + } + + rh, err := parseRecipientHeader(mr, utf8) + if err != nil { + return nil, fmt.Errorf("reading per-recipient header: %v", err) + } + m.Recipients = []Recipient{rh} + for { + if _, err := b.Peek(1); err == io.EOF { + break + } + rh, err := parseRecipientHeader(mr, utf8) + if err != nil { + return nil, fmt.Errorf("reading another per-recipient header: %v", err) + } + m.Recipients = append(m.Recipients, rh) + } + return &m, nil +} + +// ../rfc/3464:1530 ../rfc/6533:370 +func parseRecipientHeader(mr *textproto.Reader, utf8 bool) (Recipient, error) { + var r Recipient + h, err := mr.ReadMIMEHeader() + if err != nil { + return Recipient{}, err + } + + for k, l := range h { + if len(l) != 1 { + return Recipient{}, fmt.Errorf("multiple values for %q: %v", k, l) + } + v := l[0] + // note: headers are in canonical form, as parsed by textproto. + var err error + switch k { + case "Original-Recipient": + r.OriginalRecipient, err = parseAddress(v, utf8) + case "Final-Recipient": + r.FinalRecipient, err = parseAddress(v, utf8) + case "Action": + a := Action(strings.ToLower(v)) + actions := []Action{Failed, Delayed, Delivered, Relayed, Expanded} + var ok bool + for _, x := range actions { + if a == x { + ok = true + break + } + } + if !ok { + err = fmt.Errorf("unrecognized action %q", v) + } + case "Status": + // todo: parse the enhanced status code? + r.Status = v + case "Remote-Mta": + r.RemoteMTA = NameIP{Name: v} + case "Diagnostic-Code": + // ../rfc/3464:518 + t := strings.SplitN(v, ";", 2) + dt := strings.TrimSpace(t[0]) + if strings.ToLower(dt) != "smtp" { + err = fmt.Errorf("unknown diagnostic-type %q, expected smtp", dt) + } else if len(t) != 2 { + err = fmt.Errorf("missing semicolon to separate diagnostic-type from code") + } else { + r.DiagnosticCode = strings.TrimSpace(t[1]) + } + case "Last-Attempt-Date": + r.LastAttemptDate, err = parseDateTime(v) + case "Final-Log-Id": + r.FinalLogID = v + case "Will-Retry-Until": + tm, err := parseDateTime(v) + if err == nil { + r.WillRetryUntil = &tm + } + default: + // todo future: parse localized diagnostic text field? + // We'll assume it is an extension field, we'll ignore it for now. + } + if err != nil { + return Recipient{}, fmt.Errorf("parsing field %q %q: %v", k, v, err) + } + } + + required := []string{"Final-Recipient", "Action", "Status"} + for _, req := range required { + if _, ok := h[req]; !ok { + return Recipient{}, fmt.Errorf("missing required recipient field %q", req) + } + } + + r.Header = h + return r, nil +} + +// ../rfc/3464:525 +func parseMTA(s string, utf8 bool) (string, error) { + s = removeComments(s) + t := strings.SplitN(s, ";", 2) + if len(t) != 2 { + return "", fmt.Errorf("missing semicolon that splits type and name") + } + k := strings.TrimSpace(t[0]) + if !strings.EqualFold(k, "dns") { + return "", fmt.Errorf("unknown type %q, expected dns", k) + } + return strings.TrimSpace(t[1]), nil +} + +func parseDateTime(s string) (time.Time, error) { + s = removeComments(s) + return time.Parse(message.RFC5322Z, s) +} + +func parseAddress(s string, utf8 bool) (smtp.Path, error) { + s = removeComments(s) + t := strings.SplitN(s, ";", 2) + // ../rfc/3464:513 ../rfc/6533:250 + addrType := strings.ToLower(strings.TrimSpace(t[0])) + if len(t) != 2 { + return smtp.Path{}, fmt.Errorf("missing semicolon that splits address type and address") + } else if addrType == "utf-8" { + if !utf8 { + return smtp.Path{}, fmt.Errorf("utf-8 address type for non-utf-8 dsn") + } + } else if addrType != "rfc822" { + return smtp.Path{}, fmt.Errorf("unrecognized address type %q, expected rfc822", addrType) + } + s = strings.TrimSpace(t[1]) + if !utf8 { + for _, c := range s { + if c > 0x7f { + return smtp.Path{}, fmt.Errorf("non-ascii without utf-8 enabled") + } + } + } + // todo: more proper parser + t = strings.SplitN(s, "@", 2) + if len(t) != 2 || t[0] == "" || t[1] == "" { + return smtp.Path{}, fmt.Errorf("invalid email address") + } + d, err := dns.ParseDomain(t[1]) + if err != nil { + return smtp.Path{}, fmt.Errorf("parsing domain: %v", err) + } + var lp string + var esc string + for _, c := range t[0] { + if esc == "" && c == '\\' || esc == `\` && (c == 'x' || c == 'X') || esc == `\x` && c == '{' { + if c == 'X' { + c = 'x' + } + esc += string(c) + } else if strings.HasPrefix(esc, `\x{`) { + if c == '}' { + c, err := strconv.ParseInt(esc[3:], 16, 32) + if err != nil { + return smtp.Path{}, fmt.Errorf("parsing localpart with hexpoint: %v", err) + } + lp += string(rune(c)) + esc = "" + } else { + esc += string(c) + } + } else { + lp += string(c) + } + } + if esc != "" { + return smtp.Path{}, fmt.Errorf("parsing localpart: unfinished embedded unicode char") + } + p := smtp.Path{Localpart: smtp.Localpart(lp), IPDomain: dns.IPDomain{Domain: d}} + return p, nil +} + +func removeComments(s string) string { + n := 0 + r := "" + for _, c := range s { + if c == '(' { + n++ + } else if c == ')' && n > 0 { + n-- + } else if n == 0 { + r += string(c) + } + } + return r +} diff --git a/export.go b/export.go new file mode 100644 index 0000000..d3215a3 --- /dev/null +++ b/export.go @@ -0,0 +1,264 @@ +package main + +import ( + "bufio" + "bytes" + "fmt" + "io" + "log" + "os" + "path/filepath" + "sort" + "time" + + "github.com/mjl-/bstore" + + "github.com/mjl-/mox/store" +) + +func cmdExportMaildir(c *cmd) { + c.params = "dst-path account-path [mailbox]" + c.help = `Export one or all mailboxes from an account in maildir format. + +Export bypasses a running mox instance. It opens the account mailbox/message +database file directly. This may block if a running mox instance also has the +database open, e.g. for IMAP connections. +` + args := c.Parse() + xcmdExport(false, args, c) +} + +func cmdExportMbox(c *cmd) { + c.params = "dst-path account-path [mailbox]" + c.help = `Export messages from one or all mailboxes in an account in mbox format. + +Using mbox is not recommended. Maildir is a better format. + +Export bypasses a running mox instance. It opens the account mailbox/message +database file directly. This may block if a running mox instance also has the +database open, e.g. for IMAP connections. + +For mbox export, we use "mboxrd" where message lines starting with the magic +"From " string are escaped by prepending a >. We escape all ">*From ", +otherwise reconstructing the original could lose a ">". +` + args := c.Parse() + xcmdExport(true, args, c) +} + +func xcmdExport(mbox bool, args []string, c *cmd) { + if len(args) != 2 && len(args) != 3 { + c.Usage() + } + + dst := args[0] + accountDir := args[1] + var mailbox string + if len(args) == 3 { + mailbox = args[2] + } + + dbpath := filepath.Join(accountDir, "index.db") + db, err := bstore.Open(dbpath, &bstore.Options{Timeout: 5 * time.Second, Perm: 0660}, store.Message{}, store.Recipient{}, store.Mailbox{}) + xcheckf(err, "open database %q", dbpath) + + err = db.Read(func(tx *bstore.Tx) error { + exporttx(tx, mbox, dst, accountDir, mailbox) + return nil + }) + xcheckf(err, "transaction") +} + +func exporttx(tx *bstore.Tx, mbox bool, dst, accountDir, mailbox string) { + id2name := map[int64]string{} + name2id := map[string]int64{} + + mailboxes, err := bstore.QueryTx[store.Mailbox](tx).List() + xcheckf(err, "query mailboxes") + for _, mb := range mailboxes { + id2name[mb.ID] = mb.Name + name2id[mb.Name] = mb.ID + } + + var mailboxID int64 + if mailbox != "" { + var ok bool + mailboxID, ok = name2id[mailbox] + if !ok { + log.Fatalf("mailbox %q not found", mailbox) + } + } + + mboxes := map[string]*os.File{} + + // Open mbox files or create dirs. + var names []string + for _, name := range id2name { + if mailbox != "" && name != mailbox { + continue + } + names = append(names, name) + } + // We need to sort the names because maildirs can create subdirs. Ranging over + // id2name directly would randomize the directory names, we would create a sub + // maildir before the parent, and fail with "dir exists" when creating the parent + // dir. + sort.Slice(names, func(i, j int) bool { + return names[i] < names[j] + }) + for _, name := range names { + p := dst + if mailbox == "" { + p = filepath.Join(p, name) + } + + os.MkdirAll(filepath.Dir(p), 0770) + if mbox { + mbp := p + if mailbox == "" { + mbp += ".mbox" + } + f, err := os.OpenFile(mbp, os.O_WRONLY|os.O_CREATE|os.O_EXCL, 0660) + xcheckf(err, "creating mbox file") + log.Printf("creating mbox file %s", mbp) + mboxes[name] = f + } else { + err = os.Mkdir(p, 0770) + xcheckf(err, "making maildir") + log.Printf("creating maildir %s", p) + subdirs := []string{"new", "cur", "tmp"} + for _, subdir := range subdirs { + err = os.Mkdir(filepath.Join(p, subdir), 0770) + xcheckf(err, "making maildir subdir") + } + } + } + + q := bstore.QueryTx[store.Message](tx) + if mailboxID > 0 { + q.FilterNonzero(store.Message{MailboxID: mailboxID}) + } + defer q.Close() + for { + m, err := q.Next() + if err == bstore.ErrAbsent { + break + } + xcheckf(err, "next message") + + mbname := id2name[m.MailboxID] + + p := dst + if mailbox == "" { + p = filepath.Join(p, mbname) + } + + mp := filepath.Join(accountDir, "msg", store.MessagePath(m.ID)) + var mr io.ReadCloser + if m.Size == int64(len(m.MsgPrefix)) { + log.Printf("message size is prefix size for m id %d", m.ID) + mr = io.NopCloser(bytes.NewReader(m.MsgPrefix)) + } else { + mpf, err := os.Open(mp) + xcheckf(err, "open message file") + st, err := mpf.Stat() + xcheckf(err, "stat message file") + size := st.Size() + int64(len(m.MsgPrefix)) + if size != m.Size { + log.Fatalf("message size mismatch, database has %d, size is %d+%d=%d", m.Size, len(m.MsgPrefix), st.Size(), size) + } + mr = store.FileMsgReader(m.MsgPrefix, mpf) + } + + if mbox { + // todo: should we put status flags in Status or X-Status header inside the message? + // todo: should we do anything with Content-Length headers? changing the escaping could invalidate those. is anything checking that field? + + f := mboxes[mbname] + mailfrom := "mox" + if m.MailFrom != "" { + mailfrom = m.MailFrom + } + _, err := fmt.Fprintf(f, "From %s %s\n", mailfrom, m.Received.Format(time.ANSIC)) + xcheckf(err, "writing from header") + r := bufio.NewReader(mr) + for { + line, rerr := r.ReadBytes('\n') + if rerr != io.EOF { + xcheckf(rerr, "reading from message") + } + if len(line) > 0 { + if bytes.HasSuffix(line, []byte("\r\n")) { + line = line[:len(line)-1] + line[len(line)-1] = '\n' + } + if bytes.HasPrefix(bytes.TrimLeft(line, ">"), []byte("From ")) { + _, err = fmt.Fprint(f, ">") + xcheckf(err, "writing escaping >") + } + _, err = f.Write(line) + xcheckf(err, "writing line") + } + if rerr == io.EOF { + break + } + } + _, err = fmt.Fprint(f, "\n") + xcheckf(err, "writing end of message newline") + } else { + if m.Flags.Seen { + p = filepath.Join(p, "cur") + } else { + p = filepath.Join(p, "new") + } + name := fmt.Sprintf("%d.%d.mox:2,", m.Received.Unix(), m.ID) + // todo: more flags? forwarded, (non)junk, phishing, mdnsent would be nice. but what is the convention. dovecot-keywords sounds non-standard. + if m.Flags.Seen { + name += "S" + } + if m.Flags.Answered { + name += "R" + } + if m.Flags.Flagged { + name += "F" + } + if m.Flags.Draft { + name += "D" + } + p = filepath.Join(p, name) + f, err := os.OpenFile(p, os.O_WRONLY|os.O_CREATE|os.O_EXCL, 0660) + xcheckf(err, "creating message file in maildir") + + r := bufio.NewReader(mr) + for { + line, rerr := r.ReadBytes('\n') + if rerr != io.EOF { + xcheckf(rerr, "reading from message") + } + if len(line) > 0 { + if bytes.HasSuffix(line, []byte("\r\n")) { + line = line[:len(line)-1] + line[len(line)-1] = '\n' + } + _, err = f.Write(line) + xcheckf(err, "writing line") + } + if rerr == io.EOF { + break + } + } + mr.Close() + err = f.Close() + xcheckf(err, "closing new file in maildir") + } + + mr.Close() + } + + if mbox { + for _, f := range mboxes { + err = f.Close() + xcheckf(err, "closing mbox file") + } + } +} diff --git a/gendoc.sh b/gendoc.sh new file mode 100755 index 0000000..0254290 --- /dev/null +++ b/gendoc.sh @@ -0,0 +1,71 @@ +#!/bin/sh + +( +cat <&1 | sed 's/^\( *\|usage: \)/\t/' + +cat <&1 + +cat <doc.go +gofmt -w doc.go + +( +cat <config/doc.go +gofmt -w config/doc.go diff --git a/go.mod b/go.mod new file mode 100644 index 0000000..05d24fe --- /dev/null +++ b/go.mod @@ -0,0 +1,31 @@ +module github.com/mjl-/mox + +go 1.18 + +require ( + github.com/mjl-/bstore v0.0.0-20230114150735-9d9c0a2dcc79 + github.com/mjl-/sconf v0.0.4 + github.com/mjl-/sherpa v0.6.5 + github.com/mjl-/sherpadoc v0.0.10 + github.com/mjl-/sherpaprom v0.0.2 + github.com/prometheus/client_golang v1.14.0 + golang.org/x/crypto v0.5.0 + golang.org/x/net v0.5.0 + golang.org/x/text v0.6.0 +) + +require ( + github.com/beorn7/perks v1.0.1 // indirect + github.com/cespare/xxhash/v2 v2.1.2 // indirect + github.com/golang/protobuf v1.5.2 // indirect + github.com/matttproud/golang_protobuf_extensions v1.0.1 // indirect + github.com/mjl-/xfmt v0.0.0-20190521151243-39d9c00752ce // indirect + github.com/prometheus/client_model v0.3.0 // indirect + github.com/prometheus/common v0.37.0 // indirect + github.com/prometheus/procfs v0.8.0 // indirect + go.etcd.io/bbolt v1.3.6 // indirect + golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4 // indirect + golang.org/x/sys v0.4.0 // indirect + golang.org/x/tools v0.1.12 // indirect + google.golang.org/protobuf v1.28.1 // indirect +) diff --git a/go.sum b/go.sum new file mode 100644 index 0000000..7a5f520 --- /dev/null +++ b/go.sum @@ -0,0 +1,507 @@ +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= +cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= +cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= +cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= +cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= +cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= +cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= +cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= +cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= +cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= +cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= +cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= +cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= +cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= +cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= +cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= +cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= +cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= +cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= +cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= +cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= +cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= +cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= +cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= +cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= +cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= +cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= +cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= +cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= +cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= +dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= +github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= +github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE= +github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= +github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= +github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= +github.com/go-kit/log v0.2.0/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0= +github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= +github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= +github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= +github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= +github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= +github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= +github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= +github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.8 h1:e6P7q2lk1O+qJJb4BtCQXlK8vWEO8V1ZeuEdJNOqZyg= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= +github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= +github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= +github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= +github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= +github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= +github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= +github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= +github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/mjl-/bstore v0.0.0-20230114150735-9d9c0a2dcc79 h1:bptDsTAvgtmIOrhKjMVrUm4JBkF0jekpVmsZdkgALPM= +github.com/mjl-/bstore v0.0.0-20230114150735-9d9c0a2dcc79/go.mod h1:/cD25FNBaDfvL/plFRxI3Ba3E+wcB0XVOS8nJDqndg0= +github.com/mjl-/sconf v0.0.4 h1:uyfn4vv5qOULSgiwQsPbbgkiONKnMFMsSOhsHfAiYwI= +github.com/mjl-/sconf v0.0.4/go.mod h1:ezf7YOn7gtClo8y71SqgZKaEkyMQ5Te7vkv4PmTTfwM= +github.com/mjl-/sherpa v0.6.5 h1:d90uG/j8fw+2M+ohCTAcVwTSUURGm8ktYDScJO1nKog= +github.com/mjl-/sherpa v0.6.5/go.mod h1:dSpAOdgpwdqQZ72O4n3EHo/tR68eKyan8tYYraUMPNc= +github.com/mjl-/sherpadoc v0.0.0-20190505200843-c0a7f43f5f1d/go.mod h1:5khTKxoKKNXcB8bkVUO6GlzC7PFtMmkHq578lPbmnok= +github.com/mjl-/sherpadoc v0.0.10 h1:tvRVd37IIGg70ZmNkNKNnjDSPtKI5/DdEIukMkWtZYE= +github.com/mjl-/sherpadoc v0.0.10/go.mod h1:vh5zcsk3j/Tvm725EY+unTZb3EZcZcpiEQzrODSa6+I= +github.com/mjl-/sherpaprom v0.0.2 h1:1dlbkScsNafM5jURI44uiWrZMSwfZtcOFEEq7vx2C1Y= +github.com/mjl-/sherpaprom v0.0.2/go.mod h1:cl5nMNOvqhzMiQJ2FzccQ9ReivjHXe53JhOVkPfSvw4= +github.com/mjl-/xfmt v0.0.0-20190521151243-39d9c00752ce h1:oyFmIHo3GLWZzb0odAzN9QUy0MTW6P8JaNRnNVGCBCk= +github.com/mjl-/xfmt v0.0.0-20190521151243-39d9c00752ce/go.mod h1:DIEOLmETMQHHr4OgwPG7iC37rDiN9MaZIZxNm5hBtL8= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= +github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= +github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= +github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY= +github.com/prometheus/client_golang v1.14.0 h1:nJdhIvne2eSX/XRAFV9PcvFFRbrjbcTUj0VP62TMhnw= +github.com/prometheus/client_golang v1.14.0/go.mod h1:8vpkKitgIVNcqrRBWh1C4TIUQgYNtG/XQE4E/Zae36Y= +github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.3.0 h1:UBgGFHqYdG/TPFD1B1ogZywDqEkwp3fBMvqdiQ7Xew4= +github.com/prometheus/client_model v0.3.0/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w= +github.com/prometheus/common v0.3.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= +github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= +github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= +github.com/prometheus/common v0.37.0 h1:ccBbHCgIiT9uSoFY0vX8H3zsNR5eLt17/RQLUvn8pXE= +github.com/prometheus/common v0.37.0/go.mod h1:phzohg0JFMnBEFGxTDbfu3QyL5GI8gTQJFhYO5B3mfA= +github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.0-20190503130316-740c07785007/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= +github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= +github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= +github.com/prometheus/procfs v0.8.0 h1:ODq8ZFEaYeCaZOJlZZdJA2AbQR98dSHSM1KW/You5mo= +github.com/prometheus/procfs v0.8.0/go.mod h1:z7EfXMXOkbkqb9IINtpCn86r/to3BnA0uaxHdg830/4= +github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= +github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +go.etcd.io/bbolt v1.3.6 h1:/ecaJf0sk1l4l6V4awd65v2C3ILy7MSj+s/x1ADCIMU= +go.etcd.io/bbolt v1.3.6/go.mod h1:qXsaaIqmgQH0T+OPdb99Bf+PKfBBQVAdyD6TY9G8XM4= +go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= +go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= +go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.5.0 h1:U/0M97KRkSFvyD/3FSmdP5W5swImpNgle/EHFhOsQPE= +golang.org/x/crypto v0.5.0/go.mod h1:NK/OQwhpMQP3MwtdjgLlYHnH9ebylxKWv3e0fK+mkQU= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= +golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= +golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= +golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= +golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= +golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= +golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= +golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= +golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= +golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= +golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= +golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.5.1/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= +golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4 h1:6zppjxzCulZykYSLyVDYbneBfbaBIQPYMevg0bEwv2s= +golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.5.0 h1:GyT4nK/YDHSqa1c4753ouYCDajOYKTja9Xb/OHtgvSw= +golang.org/x/net v0.5.0/go.mod h1:DivGGAXEgPSlEBzxGzZI+ZLohi+xUj054jfeKui00ws= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200923182605-d9f96fdee20d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.4.0 h1:Zr2JFtRQNX3BCZ8YtxRE9hNJYC8J6I1MVbMg6owUp18= +golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.6.0 h1:3XmdazWV+ubf7QgHSTWeykHOci5oeekaGJBLkrkaw4k= +golang.org/x/text v0.6.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= +golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.1.12 h1:VveCTK38A2rkS8ZqFY25HIDFscX5X9OoEhJd3quQmXU= +golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= +google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= +google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= +google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= +google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= +google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= +google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= +google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= +google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= +google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= +google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= +google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= +google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= +google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w= +google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= +honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= +rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= +rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= diff --git a/http/account.go b/http/account.go new file mode 100644 index 0000000..bd2fe09 --- /dev/null +++ b/http/account.go @@ -0,0 +1,114 @@ +package http + +import ( + "context" + "encoding/base64" + "errors" + "fmt" + "io" + "net/http" + "os" + "strings" + + _ "embed" + + "github.com/mjl-/sherpa" + "github.com/mjl-/sherpaprom" + + "github.com/mjl-/mox/metrics" + "github.com/mjl-/mox/mlog" + "github.com/mjl-/mox/mox-" + "github.com/mjl-/mox/moxvar" + "github.com/mjl-/mox/store" +) + +//go:embed accountapi.json +var accountapiJSON []byte + +//go:embed account.html +var accountHTML []byte + +var accountDoc = mustParseAPI(accountapiJSON) + +var accountSherpaHandler http.Handler + +func init() { + collector, err := sherpaprom.NewCollector("moxaccount", nil) + if err != nil { + xlog.Fatalx("creating sherpa prometheus collector", err) + } + + accountSherpaHandler, err = sherpa.NewHandler("/account/api/", moxvar.Version, Account{}, &accountDoc, &sherpa.HandlerOpts{Collector: collector, AdjustFunctionNames: "none"}) + if err != nil { + xlog.Fatalx("sherpa handler", err) + } +} + +// Account exports web API functions for the account web interface. All its +// methods are exported under /account/api/. Function calls require valid HTTP +// Authentication credentials of a user. +type Account struct{} + +func accountHandle(w http.ResponseWriter, r *http.Request) { + ctx := context.WithValue(r.Context(), mlog.CidKey, mox.Cid()) + log := xlog.WithContext(ctx).Fields(mlog.Field("userauth", "")) + var accountName string + authResult := "error" + defer func() { + metrics.AuthenticationInc("httpaccount", "httpbasic", authResult) + }() + // todo: should probably add a cache here instead of looking up password in database all the time, just like in admin.go + if auth := r.Header.Get("Authorization"); auth == "" || !strings.HasPrefix(auth, "Basic ") { + } else if authBuf, err := base64.StdEncoding.DecodeString(strings.TrimPrefix(auth, "Basic ")); err != nil { + log.Infox("parsing base64", err) + } else if t := strings.SplitN(string(authBuf), ":", 2); len(t) != 2 { + log.Info("bad user:pass form") + } else if acc, err := store.OpenEmailAuth(t[0], t[1]); err != nil { + if errors.Is(err, store.ErrUnknownCredentials) { + authResult = "badcreds" + } + log.Infox("open account", err) + } else { + accountName = acc.Name + authResult = "ok" + } + if accountName == "" { + w.Header().Set("WWW-Authenticate", `Basic realm="mox account - login with email address and password"`) + w.WriteHeader(http.StatusUnauthorized) + fmt.Fprintln(w, "http 401 - unauthorized - mox account - login with email address and password") + return + } + + if r.Method == "GET" && r.URL.Path == "/account/" { + w.Header().Set("Content-Type", "text/html; charset=utf-8") + w.Header().Set("Cache-Control", "no-cache; max-age=0") + f, err := os.Open("http/account.html") + if err == nil { + defer f.Close() + io.Copy(w, f) + } else { + w.Write(accountHTML) + } + return + } + accountSherpaHandler.ServeHTTP(w, r.WithContext(context.WithValue(ctx, authCtxKey, accountName))) +} + +type ctxKey string + +var authCtxKey ctxKey = "account" + +// SetPassword saves a new password for the account, invalidating the previous password. +// Sessions are not interrupted, and will keep working. New login attempts must use the new password. +// Password must be at least 8 characters. +func (Account) SetPassword(ctx context.Context, password string) { + if len(password) < 8 { + panic(&sherpa.Error{Code: "user:error", Message: "password must be at least 8 characters"}) + } + accountName := ctx.Value(authCtxKey).(string) + acc, err := store.OpenAccount(accountName) + xcheckf(ctx, err, "open account") + defer acc.Close() + err = acc.SetPassword(password) + xcheckf(ctx, err, "setting password") +} diff --git a/http/account.html b/http/account.html new file mode 100644 index 0000000..0d3aff7 --- /dev/null +++ b/http/account.html @@ -0,0 +1,214 @@ + + + + Mox Account + + + + + + +
Loading...
+ + + + diff --git a/http/account_test.go b/http/account_test.go new file mode 100644 index 0000000..187f2bc --- /dev/null +++ b/http/account_test.go @@ -0,0 +1,3 @@ +package http + +// todo: write test for account api calls, at least for authentation and SetPassword. diff --git a/http/accountapi.json b/http/accountapi.json new file mode 100644 index 0000000..73c0ff1 --- /dev/null +++ b/http/accountapi.json @@ -0,0 +1,25 @@ +{ + "Name": "Account", + "Docs": "Account exports web API functions for the account web interface. All its\nmethods are exported under /account/api/. Function calls require valid HTTP\nAuthentication credentials of a user.", + "Functions": [ + { + "Name": "SetPassword", + "Docs": "SetPassword saves a new password for the account, invalidating the previous password.\nSessions are not interrupted, and will keep working. New login attempts must use the new password.\nPassword must be at least 8 characters.", + "Params": [ + { + "Name": "password", + "Typewords": [ + "string" + ] + } + ], + "Returns": [] + } + ], + "Sections": [], + "Structs": [], + "Ints": [], + "Strings": [], + "SherpaVersion": 0, + "SherpadocVersion": 1 +} diff --git a/http/admin.go b/http/admin.go new file mode 100644 index 0000000..ab307df --- /dev/null +++ b/http/admin.go @@ -0,0 +1,1382 @@ +package http + +import ( + "bufio" + "bytes" + "context" + "crypto/ed25519" + "crypto/rsa" + "crypto/tls" + "crypto/x509" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "io" + "net" + "net/http" + "os" + "runtime/debug" + "sort" + "strings" + "sync" + "time" + + _ "embed" + + "golang.org/x/crypto/bcrypt" + + "github.com/mjl-/bstore" + "github.com/mjl-/sherpa" + "github.com/mjl-/sherpadoc" + "github.com/mjl-/sherpaprom" + + "github.com/mjl-/mox/dkim" + "github.com/mjl-/mox/dmarc" + "github.com/mjl-/mox/dmarcdb" + "github.com/mjl-/mox/dmarcrpt" + "github.com/mjl-/mox/dns" + "github.com/mjl-/mox/dnsbl" + "github.com/mjl-/mox/metrics" + "github.com/mjl-/mox/mlog" + mox "github.com/mjl-/mox/mox-" + "github.com/mjl-/mox/moxvar" + "github.com/mjl-/mox/mtasts" + "github.com/mjl-/mox/mtastsdb" + "github.com/mjl-/mox/queue" + "github.com/mjl-/mox/smtp" + "github.com/mjl-/mox/spf" + "github.com/mjl-/mox/store" + "github.com/mjl-/mox/tlsrpt" + "github.com/mjl-/mox/tlsrptdb" +) + +//go:embed adminapi.json +var adminapiJSON []byte + +//go:embed admin.html +var adminHTML []byte + +var adminDoc = mustParseAPI(adminapiJSON) + +var adminSherpaHandler http.Handler + +func mustParseAPI(buf []byte) (doc sherpadoc.Section) { + err := json.Unmarshal(buf, &doc) + if err != nil { + xlog.Fatalx("parsing api docs", err) + } + return doc +} + +func init() { + collector, err := sherpaprom.NewCollector("moxadmin", nil) + if err != nil { + xlog.Fatalx("creating sherpa prometheus collector", err) + } + + adminSherpaHandler, err = sherpa.NewHandler("/admin/api/", moxvar.Version, Admin{}, &adminDoc, &sherpa.HandlerOpts{Collector: collector, AdjustFunctionNames: "none"}) + if err != nil { + xlog.Fatalx("sherpa handler", err) + } +} + +// Admin exports web API functions for the admin web interface. All its methods are +// exported under /admin/api/. Function calls require valid HTTP Authentication +// credentials of a user. +type Admin struct{} + +// We keep a cache for authentication so we don't bcrypt for each incoming HTTP request with HTTP basic auth. +// We keep track of the last successful password hash and Authorization header. +// The cache is cleared periodically, see below. +var authCache struct { + sync.Mutex + lastSuccessHash, lastSuccessAuth string +} + +func init() { + go func() { + for { + authCache.Lock() + authCache.lastSuccessHash = "" + authCache.lastSuccessAuth = "" + authCache.Unlock() + time.Sleep(15 * time.Minute) + } + }() +} + +// check whether authentication from the config (passwordfile with bcrypt hash) +// matches the authorization header "authHdr". we don't care about any username. +func checkAdminAuth(ctx context.Context, passwordfile, authHdr string) bool { + log := xlog.WithContext(ctx) + + authResult := "error" + defer func() { + metrics.AuthenticationInc("httpadmin", "httpbasic", authResult) + }() + + if !strings.HasPrefix(authHdr, "Basic ") || passwordfile == "" { + return false + } + buf, err := os.ReadFile(passwordfile) + if err != nil { + log.Errorx("reading admin password file", err, mlog.Field("path", passwordfile)) + return false + } + passwordhash := strings.TrimSpace(string(buf)) + authCache.Lock() + defer authCache.Unlock() + if passwordhash != "" && passwordhash == authCache.lastSuccessHash && authHdr != "" && authCache.lastSuccessAuth == authHdr { + authResult = "ok" + return true + } + auth, err := base64.StdEncoding.DecodeString(strings.TrimPrefix(authHdr, "Basic ")) + if err != nil { + return false + } + t := strings.SplitN(string(auth), ":", 2) + if len(t) != 2 || len(t[1]) < 8 { + return false + } + if err := bcrypt.CompareHashAndPassword([]byte(passwordhash), []byte(t[1])); err != nil { + authResult = "badcreds" + return false + } + authCache.lastSuccessHash = passwordhash + authCache.lastSuccessAuth = authHdr + authResult = "ok" + return true +} + +func adminHandle(w http.ResponseWriter, r *http.Request) { + ctx := context.WithValue(r.Context(), mlog.CidKey, mox.Cid()) + if !checkAdminAuth(ctx, mox.ConfigDirPath(mox.Conf.Static.AdminPasswordFile), r.Header.Get("Authorization")) { + w.Header().Set("WWW-Authenticate", `Basic realm="mox admin - login with empty username and admin password"`) + w.WriteHeader(http.StatusUnauthorized) + fmt.Fprintln(w, "http 401 - unauthorized - mox admin - login with empty username and admin password") + return + } + + if r.Method == "GET" && r.URL.Path == "/admin/" { + w.Header().Set("Content-Type", "text/html; charset=utf-8") + w.Header().Set("Cache-Control", "no-cache; max-age=0") + f, err := os.Open("http/admin.html") + if err == nil { + defer f.Close() + io.Copy(w, f) + } else { + w.Write(adminHTML) + } + return + } + adminSherpaHandler.ServeHTTP(w, r.WithContext(ctx)) +} + +type Result struct { + Errors []string + Warnings []string + Instructions []string +} + +type TLSCheckResult struct { + Result +} + +type MX struct { + Host string + Pref int + IPs []string +} + +type MXCheckResult struct { + Records []MX + Result +} + +type SPFRecord struct { + spf.Record +} + +type SPFCheckResult struct { + DomainTXT string + DomainRecord *SPFRecord + HostTXT string + HostRecord *SPFRecord + Result +} + +type DKIMCheckResult struct { + Records []DKIMRecord + Result +} + +type DKIMRecord struct { + Selector string + TXT string + Record *dkim.Record +} + +type DMARCRecord struct { + dmarc.Record +} + +type DMARCCheckResult struct { + Domain string + TXT string + Record *DMARCRecord + Result +} + +type TLSRPTRecord struct { + tlsrpt.Record +} + +type TLSRPTCheckResult struct { + TXT string + Record *TLSRPTRecord + Result +} + +type MTASTSRecord struct { + mtasts.Record +} +type MTASTSCheckResult struct { + CNAMEs []string + TXT string + Record *MTASTSRecord + PolicyText string + Policy *mtasts.Policy + Result +} + +type SRVConfCheckResult struct { + SRVs map[string][]*net.SRV // Service (e.g. "_imaps") to records. + Result +} + +type AutoconfCheckResult struct { + IPs []string + Result +} + +type AutodiscoverSRV struct { + net.SRV + IPs []string +} + +type AutodiscoverCheckResult struct { + Records []AutodiscoverSRV + Result +} + +// CheckResult is the analysis of a domain, its actual configuration (DNS, TLS, +// connectivity) and the mox configuration. It includes configuration instructions +// (e.g. DNS records), and warnings and errors encountered. +type CheckResult struct { + Domain string + MX MXCheckResult + TLS TLSCheckResult + SPF SPFCheckResult + DKIM DKIMCheckResult + DMARC DMARCCheckResult + TLSRPT TLSRPTCheckResult + MTASTS MTASTSCheckResult + SRVConf SRVConfCheckResult + Autoconf AutoconfCheckResult + Autodiscover AutodiscoverCheckResult +} + +// logPanic can be called with a defer from a goroutine to prevent the entire program from being shutdown in case of a panic. +func logPanic(ctx context.Context) { + x := recover() + if x == nil { + return + } + log := xlog.WithContext(ctx) + log.Error("recover from panic", mlog.Field("panic", x)) + debug.PrintStack() + metrics.PanicInc("http") +} + +// return IPs we may be listening on. +func xlistenIPs(ctx context.Context) []net.IP { + ips, err := mox.IPs(ctx) + xcheckf(ctx, err, "listing ips") + return ips +} + +// CheckDomain checks the configuration for the domain, such as MX, SMTP STARTTLS, +// SPF, DKIM, DMARC, TLSRPT, MTASTS, autoconfig, autodiscover. +func (Admin) CheckDomain(ctx context.Context, domainName string) (r CheckResult) { + // todo future: should run these checks without a DNS cache so recent changes are picked up. + + resolver := dns.StrictResolver{Pkg: "check"} + dialer := &net.Dialer{Timeout: 5 * time.Second} + return checkDomain(ctx, resolver, dialer, domainName) +} + +func checkDomain(ctx context.Context, resolver dns.Resolver, dialer *net.Dialer, domainName string) (r CheckResult) { + d, err := dns.ParseDomain(domainName) + xcheckf(ctx, err, "parsing domain") + + domain, ok := mox.Conf.Domain(d) + if !ok { + panic(&sherpa.Error{Code: "user:notFound", Message: "domain not found"}) + } + + listenIPs := xlistenIPs(ctx) + isListenIP := func(ip net.IP) bool { + for _, lip := range listenIPs { + if ip.Equal(lip) { + return true + } + } + return false + } + + addf := func(l *[]string, format string, args ...any) { + *l = append(*l, fmt.Sprintf(format, args...)) + } + + // host must be an absolute dns name, ending with a dot. + lookupIPs := func(errors *[]string, host string) (ips []string, ourIPs, notOurIPs []net.IP, rerr error) { + addrs, err := resolver.LookupHost(ctx, host) + if err != nil { + addf(errors, "Looking up %q: %s", host, err) + return nil, nil, nil, err + } + for _, addr := range addrs { + ip := net.ParseIP(addr) + if ip == nil { + addf(errors, "Bad IP %q", addr) + continue + } + ips = append(ips, ip.String()) + if isListenIP(ip) { + ourIPs = append(ourIPs, ip) + } else { + notOurIPs = append(notOurIPs, ip) + } + } + return ips, ourIPs, notOurIPs, nil + } + + checkTLS := func(errors *[]string, host string, ips []string, port string) { + d := tls.Dialer{ + NetDialer: dialer, + Config: &tls.Config{ + ServerName: host, + MinVersion: tls.VersionTLS12, // ../rfc/8996:31 ../rfc/8997:66 + }, + } + for _, ip := range ips { + conn, err := d.DialContext(ctx, "tcp", net.JoinHostPort(ip, port)) + if err != nil { + addf(errors, "TLS connection to hostname %q, IP %q: %s", host, ip, err) + } else { + conn.Close() + } + } + } + + var wg sync.WaitGroup + + // MX + wg.Add(1) + go func() { + defer logPanic(ctx) + defer wg.Done() + + mxs, err := resolver.LookupMX(ctx, d.ASCII+".") + if err != nil { + addf(&r.MX.Errors, "Looking up MX records for %q: %s", d, err) + } + r.MX.Records = make([]MX, len(mxs)) + for i, mx := range mxs { + r.MX.Records[i] = MX{mx.Host, int(mx.Pref), nil} + } + if len(mxs) == 1 && mxs[0].Host == "." { + addf(&r.MX.Errors, `MX records consists of explicit null mx record (".") indicating that domain does not accept email.`) + return + } + for i, mx := range mxs { + ips, ourIPs, notOurIPs, err := lookupIPs(&r.MX.Errors, mx.Host) + if err != nil { + addf(&r.MX.Errors, "Looking up IP addresses for mx host %q: %s", mx.Host, err) + } + r.MX.Records[i].IPs = ips + if len(ourIPs) == 0 { + addf(&r.MX.Errors, "None of the IPs that mx %q points to is ours: %v", mx.Host, notOurIPs) + } else if len(notOurIPs) > 0 { + addf(&r.MX.Errors, "Some of the IPs that mx %q points to are not ours: %v", mx.Host, notOurIPs) + } + + } + r.MX.Instructions = []string{ + fmt.Sprintf("Ensure a DNS MX record like the following exists:\n\n\t%s MX 10 %s\n\nWithout the trailing dot, the name would be interpreted as relative to the domain.", d.ASCII+".", mox.Conf.Static.HostnameDomain.ASCII+"."), + } + }() + + // TLS, mostly checking certificate expiration and CA trust. + // todo: should add checks about the listeners (which aren't specific to domains) somewhere else, not on the domain page with this checkDomain call. i.e. submissions, imap starttls, imaps. + wg.Add(1) + go func() { + defer logPanic(ctx) + defer wg.Done() + + // MTA-STS, autoconfig, autodiscover are checked in their sections. + + // Dial a single MX host with given IP and perform STARTTLS handshake. + dialSMTPSTARTTLS := func(host, ip string) error { + conn, err := dialer.DialContext(ctx, "tcp", net.JoinHostPort(ip, "25")) + if err != nil { + return err + } + defer func() { + if conn != nil { + conn.Close() + } + }() + + end := time.Now().Add(10 * time.Second) + cctx, cancel := context.WithTimeout(ctx, 10*time.Second) + defer cancel() + conn.SetReadDeadline(end) + conn.SetWriteDeadline(end) + + br := bufio.NewReader(conn) + _, err = br.ReadString('\n') + if err != nil { + return fmt.Errorf("reading SMTP banner from remote: %s", err) + } + if _, err := fmt.Fprintf(conn, "EHLO moxtest\r\n"); err != nil { + return fmt.Errorf("writing SMTP EHLO to remote: %s", err) + } + for { + line, err := br.ReadString('\n') + if err != nil { + return fmt.Errorf("reading SMTP EHLO response from remote: %s", err) + } + if strings.HasPrefix(line, "250-") { + continue + } + if strings.HasPrefix(line, "250 ") { + break + } + return fmt.Errorf("unexpected response to SMTP EHLO from remote: %q", strings.TrimSuffix(line, "\r\n")) + } + if _, err := fmt.Fprintf(conn, "STARTTLS\r\n"); err != nil { + return fmt.Errorf("writing SMTP STARTTLS to remote: %s", err) + } + line, err := br.ReadString('\n') + if err != nil { + return fmt.Errorf("reading response to SMTP STARTTLS from remote: %s", err) + } + if !strings.HasPrefix(line, "220 ") { + return fmt.Errorf("SMTP STARTTLS response from remote not 220 OK: %q", strings.TrimSuffix(line, "\r\n")) + } + tlsconn := tls.Client(conn, &tls.Config{ServerName: host}) + if err := tlsconn.HandshakeContext(cctx); err != nil { + return fmt.Errorf("TLS handshake after SMTP STARTTLS: %s", err) + } + cancel() + conn.Close() + conn = nil + return nil + } + + checkSMTPSTARTTLS := func() { + // Initial errors are ignored, will already have been warned about by MX checks. + mxs, err := resolver.LookupMX(ctx, d.ASCII+".") + if err != nil { + return + } + if len(mxs) == 1 && mxs[0].Host == "." { + return + } + for _, mx := range mxs { + ips, _, _, err := lookupIPs(&r.MX.Errors, mx.Host) + if err != nil { + continue + } + + for _, ip := range ips { + if err := dialSMTPSTARTTLS(mx.Host, ip); err != nil { + addf(&r.TLS.Errors, "SMTP connection with STARTTLS to MX hostname %q IP %s: %s", mx.Host, ip, err) + } + } + } + } + + checkSMTPSTARTTLS() + + }() + + // SPF + wg.Add(1) + go func() { + defer logPanic(ctx) + defer wg.Done() + + // Verify a domain with the configured IPs that do SMTP. + verifySPF := func(kind string, domain dns.Domain) (string, *SPFRecord, spf.Record) { + _, txt, record, err := spf.Lookup(ctx, resolver, domain) + if err != nil { + addf(&r.SPF.Errors, "Looking up %s SPF record: %s", kind, err) + } + var xrecord *SPFRecord + if record != nil { + xrecord = &SPFRecord{*record} + } + + spfr := spf.Record{ + Version: "spf1", + } + for _, l := range mox.Conf.Static.Listeners { + if !l.SMTP.Enabled { + continue + } + for _, ipstr := range l.IPs { + ip := net.ParseIP(ipstr) + mechanism := "ip4" + if ip.To4() == nil { + mechanism = "ip6" + } + spfr.Directives = append(spfr.Directives, spf.Directive{Mechanism: mechanism, IP: ip}) + + if record == nil { + continue + } + + args := spf.Args{ + RemoteIP: ip, + MailFromLocalpart: "postmaster", + MailFromDomain: domain, + HelloDomain: dns.IPDomain{Domain: domain}, + LocalIP: net.ParseIP("127.0.0.1"), + LocalHostname: dns.Domain{ASCII: "localhost"}, + } + status, mechanism, expl, err := spf.Evaluate(ctx, record, resolver, args) + if err != nil { + addf(&r.SPF.Errors, "Evaluating IP address %q against %s SPF record: %s", ip, kind, err) + } else if status != spf.StatusPass { + addf(&r.SPF.Errors, "IP address %q does not pass %s SPF evaluation, status not \"pass\" but %q (mechanism %q, explanation %q)", ip, kind, status, mechanism, expl) + } + } + } + spfr.Directives = append(spfr.Directives, spf.Directive{Qualifier: "-", Mechanism: "all"}) + return txt, xrecord, spfr + } + + // Check SPF record for domain. + var dspfr spf.Record + r.SPF.DomainTXT, r.SPF.DomainRecord, dspfr = verifySPF("domain", d) + // todo: possibly check all hosts for MX records? assuming they are also sending mail servers. + r.SPF.HostTXT, r.SPF.HostRecord, _ = verifySPF("host", mox.Conf.Static.HostnameDomain) + + dtxt, err := dspfr.Record() + if err != nil { + addf(&r.SPF.Errors, "Making SPF record for instructions: %s", err) + } + domainspf := fmt.Sprintf("%s IN TXT %s", d.ASCII+".", mox.TXTStrings(dtxt)) + + // Check SPF record for sending host. ../rfc/7208:2263 ../rfc/7208:2287 + hostspf := fmt.Sprintf(`%s IN TXT "v=spf1 a -all"`, mox.Conf.Static.HostnameDomain.ASCII+".") + + addf(&r.SPF.Instructions, "Ensure DNS TXT records like the following exists:\n\n\t%s\n\t%s\n\nIf you have an existing mail setup, with other hosts also sending mail for you domain, you should add those IPs as well. You could replace \"-all\" with \"~all\" to treat mail sent from unlisted IPs as \"softfail\", or with \"?all\" for \"neutral\".", domainspf, hostspf) + }() + + // DKIM + wg.Add(1) + go func() { + defer logPanic(ctx) + defer wg.Done() + + var missing []string + var haveEd25519 bool + for sel, selc := range domain.DKIM.Selectors { + if _, ok := selc.Key.(ed25519.PrivateKey); ok { + haveEd25519 = true + } + + _, record, txt, err := dkim.Lookup(ctx, resolver, selc.Domain, d) + if err != nil { + missing = append(missing, sel) + if errors.Is(err, dkim.ErrNoRecord) { + addf(&r.DKIM.Errors, "No DKIM DNS record for selector %q.", sel) + } else if errors.Is(err, dkim.ErrSyntax) { + addf(&r.DKIM.Errors, "Parsing DKIM DNS record for selector %q: %s", sel, err) + } else { + addf(&r.DKIM.Errors, "Fetching DKIM record for selector %q: %s", sel, err) + } + } + if txt != "" { + r.DKIM.Records = append(r.DKIM.Records, DKIMRecord{sel, txt, record}) + pubKey := selc.Key.Public() + var pk []byte + switch k := pubKey.(type) { + case *rsa.PublicKey: + var err error + pk, err = x509.MarshalPKIXPublicKey(k) + if err != nil { + addf(&r.DKIM.Errors, "Marshal public key for %q to compare against DNS: %s", sel, err) + continue + } + case ed25519.PublicKey: + pk = []byte(k) + default: + addf(&r.DKIM.Errors, "Internal error: unknown public key type %T.", pubKey) + continue + } + + if record != nil && !bytes.Equal(record.Pubkey, pk) { + addf(&r.DKIM.Errors, "For selector %q, the public key in DKIM DNS TXT record does not match with configured private key.", sel) + missing = append(missing, sel) + } + } + } + if len(domain.DKIM.Selectors) == 0 { + addf(&r.DKIM.Errors, "No DKIM configuration, add a key to the configuration file, and instructions for DNS records will appear here.") + } else if !haveEd25519 { + addf(&r.DKIM.Warnings, "Consider adding an ed25519 key: the keys are smaller, the cryptography faster and more modern.") + } + instr := "" + for _, sel := range missing { + dkimr := dkim.Record{ + Version: "DKIM1", + Hashes: []string{"sha256"}, + PublicKey: domain.DKIM.Selectors[sel].Key.Public(), + } + switch dkimr.PublicKey.(type) { + case *rsa.PublicKey: + case ed25519.PublicKey: + dkimr.Key = "ed25519" + default: + addf(&r.DKIM.Errors, "Internal error: unknown public key type %T.", dkimr.PublicKey) + } + txt, err := dkimr.Record() + if err != nil { + addf(&r.DKIM.Errors, "Making DKIM record for instructions: %s", err) + continue + } + instr += fmt.Sprintf("\n\t%s._domainkey IN TXT %s\n", sel, mox.TXTStrings(txt)) + } + if instr != "" { + instr = "Ensure the following DNS record(s) exists, so mail servers receiving emails from this domain can verify the signatures in the mail headers:\n" + instr + addf(&r.DKIM.Instructions, "%s", instr) + } + }() + + // DMARC + wg.Add(1) + go func() { + defer logPanic(ctx) + defer wg.Done() + + _, dmarcDomain, record, txt, err := dmarc.Lookup(ctx, resolver, d) + if err != nil { + addf(&r.DMARC.Errors, "Looking up DMARC record: %s", err) + } else if record == nil { + addf(&r.DMARC.Errors, "No DMARC record") + } + r.DMARC.Domain = dmarcDomain.Name() + r.DMARC.TXT = txt + if record != nil { + r.DMARC.Record = &DMARCRecord{*record} + } + if record != nil && record.Policy == "none" { + addf(&r.DMARC.Warnings, "DMARC policy is in test mode (p=none), do not forget to change to p=reject or p=quarantine after test period has been completed.") + } + if record != nil && record.SubdomainPolicy == "none" { + addf(&r.DMARC.Warnings, "DMARC subdomain policy is in test mode (sp=none), do not forget to change to sp=reject or sp=quarantine after test period has been completed.") + } + if record != nil && len(record.AggregateReportAddresses) == 0 { + addf(&r.DMARC.Warnings, "It is recommended you specify you would like aggregate reports about delivery success in the DMARC record, see instructions.") + } + localpart := smtp.Localpart("dmarc-reports") + if domain.DMARC != nil { + localpart = domain.DMARC.ParsedLocalpart + } else { + addf(&r.DMARC.Instructions, `Configure a DMARC destination in domain in config file. Localpart could be %q.`, localpart) + } + dmarcr := dmarc.Record{ + Version: "DMARC1", + Policy: "reject", + AggregateReportAddresses: []dmarc.URI{ + {Address: fmt.Sprintf("mailto:%s!10m", smtp.NewAddress(localpart, d).Pack(false))}, + }, + AggregateReportingInterval: 86400, + Percentage: 100, + } + instr := fmt.Sprintf("Ensure a DNS TXT record like the following exists:\n\n\t_dmarc IN TXT %s\n\nYou can start with testing mode by replacing p=reject with p=none. You can also request for the policy to be applied to a percentage of emails instead of all, by adding pct=X, with X between 0 and 100. Keep in mind that receiving mail servers will apply some anti-spam assessment regardless of the policy and whether it is applied to the message. The ruf= part requests daily aggregate reports to be sent to the specified address, which is automatically configured and reports automatically analyzed.", mox.TXTStrings(dmarcr.String())) + addf(&r.DMARC.Instructions, instr) + }() + + // TLSRPT + wg.Add(1) + go func() { + defer logPanic(ctx) + defer wg.Done() + + record, txt, err := tlsrpt.Lookup(ctx, resolver, d) + if err != nil { + addf(&r.TLSRPT.Errors, "Looking up TLSRPT record: %s", err) + } + r.TLSRPT.TXT = txt + if record != nil { + r.TLSRPT.Record = &TLSRPTRecord{*record} + } + + localpart := smtp.Localpart("tls-reports") + if domain.TLSRPT != nil { + localpart = domain.TLSRPT.ParsedLocalpart + } else { + addf(&r.TLSRPT.Errors, `Configure a TLSRPT destination in domain in config file. Localpart could be %q.`, localpart) + } + tlsrptr := &tlsrpt.Record{ + Version: "TLSRPTv1", + // todo: should URI-encode the URI, including ',', '!' and ';'. + RUAs: [][]string{{fmt.Sprintf("mailto:%s", smtp.NewAddress(localpart, d).Pack(false))}}, + } + instr := fmt.Sprintf(`TLSRPT is an opt-in mechanism to request feedback about TLS connectivity from remote SMTP servers when they connect to us. It allows detecting delivery problems and unwanted downgrades to plaintext SMTP connections. With TLSRPT you configure an email address to which reports should be sent. Remote SMTP servers will send a report once a day with the number of successful connections, and the number of failed connections including details that should help debugging/resolving any issues. + +Ensure a DNS TXT record like the following exists: + + _smtp._tls IN TXT %s +`, mox.TXTStrings(tlsrptr.String())) + addf(&r.TLSRPT.Instructions, instr) + }() + + // MTA-STS + wg.Add(1) + go func() { + defer logPanic(ctx) + defer wg.Done() + + record, txt, cnames, err := mtasts.LookupRecord(ctx, resolver, d) + if err != nil { + addf(&r.MTASTS.Errors, "Looking up MTA-STS record: %s", err) + } + if cnames != nil { + r.MTASTS.CNAMEs = cnames + } else { + r.MTASTS.CNAMEs = []string{} + } + r.MTASTS.TXT = txt + if record != nil { + r.MTASTS.Record = &MTASTSRecord{*record} + } + + policy, text, err := mtasts.FetchPolicy(ctx, d) + if err != nil { + addf(&r.MTASTS.Errors, "Fetching MTA-STS policy: %s", err) + } else if policy.Mode == mtasts.ModeNone { + addf(&r.MTASTS.Warnings, "MTA-STS policy is present, but does not require TLS.") + } else if policy.Mode == mtasts.ModeTesting { + addf(&r.MTASTS.Warnings, "MTA-STS policy is in testing mode, do not forget to change to mode enforce after testing period.") + } + r.MTASTS.PolicyText = text + r.MTASTS.Policy = policy + if policy != nil && policy.Mode != mtasts.ModeNone { + if !policy.Matches(mox.Conf.Static.HostnameDomain) { + addf(&r.MTASTS.Warnings, "Configured hostname is missing from policy MX list.") + } + if policy.MaxAgeSeconds <= 24*3600 { + addf(&r.MTASTS.Warnings, "Policy has a MaxAge of less than 1 day. For stable configurations, the recommended period is in weeks.") + } + + mxl, _ := resolver.LookupMX(ctx, d.ASCII+".") + // We do not check for errors, the MX check will complain about mx errors, we assume we will get the same error here. + mxs := map[dns.Domain]struct{}{} + for _, mx := range mxl { + domain, err := dns.ParseDomain(strings.TrimSuffix(mx.Host, ".")) + if err != nil { + addf(&r.MTASTS.Warnings, "MX record %q is invalid: %s", mx.Host, err) + continue + } + mxs[domain] = struct{}{} + } + for mx := range mxs { + if !policy.Matches(mx) { + addf(&r.MTASTS.Warnings, "MX record %q does not match MTA-STS policy MX list.", mx) + } + } + for _, mx := range policy.MX { + if mx.Wildcard { + continue + } + if _, ok := mxs[mx.Domain]; !ok { + addf(&r.MTASTS.Warnings, "MX %q in MTA-STS policy is not in MX record.", mx) + } + } + } + + intro := `MTA-STS is an opt-in mechanism to signal to remote SMTP servers which MX records are valid and that they must use the STARTTLS command and verify the TLS connecction. Email servers should already be using STARTTLS to protect communication, but active attackers can, and have in the past, removed the indication of support for the optional STARTTLS support from SMTP sessions, or added additional MX records in DNS responses. MTA-STS protects against compromised DNS and compromised plaintext SMTP sessions, but not against compromised internet PKI infrastructure. If an attacker controls a certificate authority, and is willing to use it, MTA-STS does not prevent an attack. MTA-STS does not protect against attackers on first contact with a domain. Only on subsequent contacts, with MTA-STS policies in the cache, can attacks can be detected. + +After enabling MTA-STS for this domain, remote SMTP servers may still deliver in plain text, without TLS-protection. MTA-STS is an opt-in mechanism, not all servers support it yet. + +You can opt-in to MTA-STS by creating a DNS record, _mta-sts., and serving a policy at https://mta-sts./.well-known/mta-sts.txt. Mox will serve the policy, you must create the DNS records. + +You can start with a policy in "testing" mode. Remote SMTP servers will apply the MTA-STS policy, but not abort delivery in case of failure. Instead, you will receive a report if you have TLSRPT configured. By starting in testing mode for a representative period, verifying all mail can be deliverd, you can safely switch to "enforce" mode. While in enforce mode, plaintext deliveries to mox are refused. + +The _mta-sts DNS TXT record has an "id" field. The id serves as a version of the policy. A policy specifies the mode: none, testing, enforce. For "none", no TLS is required. A policy has a "max age", indicating how long the policy can be cached. Allowing the policy to be cached for a long time provides stronger counter measures to active attackers, but reduces configuration change agility. After enabling "enforce" mode, remote SMTP servers may and will cache your policy for as long as "max age" was configured. Keep this in mind when enabling/disabling MTA-STS. To disable MTA-STS after having it enabled, publish a new record with mode "none" until all past policy expiration times have passed. + +When enabling MTA-STS, or updating a policy, always update the policy first (through a configuration change and reload/restart), and the DNS record second. +` + addf(&r.MTASTS.Instructions, intro) + + addf(&r.MTASTS.Instructions, `Enable a policy through the configuration file. For new deployments, it is best to start with mode "testing" while enabling TLSRPT. Start with a short "max_age", so updates to your policy are picked up quickly. When confidence in the deployment is high enough, switch to "enforce" mode and a longer "max age". A max age in the order of weeks is recommended. If you foresee a change to your setup in the future, requiring different policies or MX records, you may want to dial back the "max age" ahead of time, similar to how you would handle TTL's in DNS record updates.`) + + host := fmt.Sprintf("Ensure DNS CNAME/A/AAAA records exist that resolve mta-sts.%s to this mail server. For example:\n\n\t%s IN CNAME %s\n\n", d.ASCII, "mta-sts."+d.ASCII+".", mox.Conf.Static.HostnameDomain.ASCII+".") + addf(&r.MTASTS.Instructions, host) + + mtastsr := mtasts.Record{ + Version: "STSv1", + ID: time.Now().Format("20060102T150405"), + } + dns := fmt.Sprintf("Ensure a DNS TXT record like the following exists:\n\n\t_mta-sts IN TXT %s\n\nConfigure the ID in the configuration file, it must be of the form [a-zA-Z0-9]{1,31}. It represents the version of the policy. For each policy change, you must change the ID to a new unique value. You could use a timestamp like 20220621T123000. When this field exists, an SMTP server will fetch a policy at https://mta-sts.%s/.well-known/mta-sts.txt. This policy is served by mox.", mox.TXTStrings(mtastsr.String()), d.Name()) + addf(&r.MTASTS.Instructions, dns) + }() + + // SRVConf + wg.Add(1) + go func() { + defer logPanic(ctx) + defer wg.Done() + + type srvReq struct { + name string + port uint16 + host string + srvs []*net.SRV + err error + } + + // We'll assume if any submissions is configured, it is public. Same for imap. And + // if not, that there is a plain option. + var submissions, imaps bool + for _, l := range mox.Conf.Static.Listeners { + if l.TLS != nil && l.Submissions.Enabled { + submissions = true + } + if l.TLS != nil && l.IMAPS.Enabled { + imaps = true + } + } + srvhost := func(ok bool) string { + if ok { + return mox.Conf.Static.HostnameDomain.ASCII + "." + } + return "." + } + var reqs = []srvReq{ + {name: "_submissions", port: 465, host: srvhost(submissions)}, + {name: "_submission", port: 587, host: srvhost(!submissions)}, + {name: "_imaps", port: 993, host: srvhost(imaps)}, + {name: "_imap", port: 143, host: srvhost(!imaps)}, + {name: "_pop3", port: 110, host: "."}, + {name: "_pop3s", port: 995, host: "."}, + } + var srvwg sync.WaitGroup + srvwg.Add(len(reqs)) + for i := range reqs { + go func(i int) { + defer srvwg.Done() + _, reqs[i].srvs, reqs[i].err = resolver.LookupSRV(ctx, reqs[i].name[1:], "tcp", d.ASCII+".") + }(i) + } + srvwg.Wait() + + instr := "Ensure DNS records like the following exist:\n\n" + r.SRVConf.SRVs = map[string][]*net.SRV{} + for _, req := range reqs { + name := req.name + "_.tcp." + d.ASCII + instr += fmt.Sprintf("\t%s._tcp.%s IN SRV 0 1 %d %s\n", req.name, d.ASCII+".", req.port, req.host) + r.SRVConf.SRVs[req.name] = req.srvs + if err != nil { + addf(&r.SRVConf.Errors, "Looking up SRV record %q: %s", name, err) + } else if len(req.srvs) == 0 { + addf(&r.SRVConf.Errors, "Missing SRV record %q", name) + } else if len(req.srvs) != 1 || req.srvs[0].Target != req.host || req.srvs[0].Port != req.port { + addf(&r.SRVConf.Errors, "Unexpected SRV record(s) for %q", name) + } + } + addf(&r.SRVConf.Instructions, instr) + }() + + // Autoconf + wg.Add(1) + go func() { + defer logPanic(ctx) + defer wg.Done() + + addf(&r.Autoconf.Instructions, "Ensure a DNS CNAME record like the following exists:\n\n\tautoconfig.%s IN CNAME %s\n\nNote: the trailing dot is relevant, it makes the host name absolute instead of relative to the domain name.", d.ASCII+".", mox.Conf.Static.HostnameDomain.ASCII+".") + + host := "autoconfig." + d.ASCII + "." + ips, ourIPs, notOurIPs, err := lookupIPs(&r.Autoconf.Errors, host) + if err != nil { + addf(&r.Autoconf.Errors, "Looking up autoconfig host: %s", err) + return + } + + r.Autoconf.IPs = ips + if len(ourIPs) == 0 { + addf(&r.Autoconf.Errors, "Autoconfig does not point to one of our IP addresses.") + } else if len(notOurIPs) > 0 { + addf(&r.Autoconf.Errors, "Autoconfig does not point to some IP addresses that are not ours: %v", notOurIPs) + } + + checkTLS(&r.Autoconf.Errors, "autoconfig."+d.ASCII, ips, "443") + }() + + // Autodiscover + wg.Add(1) + go func() { + defer logPanic(ctx) + defer wg.Done() + + addf(&r.Autodiscover.Instructions, "Ensure DNS records like the following exist:\n\n\t_autodiscover._tcp.%s IN SRV 0 1 443 autoconfig.%s\n\tautoconfig.%s IN CNAME %s\n\nNote: the trailing dots are relevant, it makes the host names absolute instead of relative to the domain name.", d.ASCII+".", d.ASCII+".", d.ASCII+".", mox.Conf.Static.HostnameDomain.ASCII+".") + + _, srvs, err := resolver.LookupSRV(ctx, "autodiscover", "tcp", d.ASCII+".") + if err != nil { + addf(&r.Autodiscover.Errors, "Looking up SRV record %q: %s", "autodiscover", err) + return + } + match := false + for _, srv := range srvs { + ips, ourIPs, notOurIPs, err := lookupIPs(&r.Autodiscover.Errors, srv.Target) + if err != nil { + addf(&r.Autodiscover.Errors, "Looking up target %q from SRV record: %s", srv.Target, err) + continue + } + if srv.Port != 443 { + continue + } + match = true + r.Autodiscover.Records = append(r.Autodiscover.Records, AutodiscoverSRV{*srv, ips}) + if len(ourIPs) == 0 { + addf(&r.Autodiscover.Errors, "SRV target %q does not point to our IPs.", srv.Target) + } else if len(notOurIPs) > 0 { + addf(&r.Autodiscover.Errors, "SRV target %q points to some IPs that are not ours: %v", srv.Target, notOurIPs) + } + + checkTLS(&r.Autodiscover.Errors, strings.TrimSuffix(srv.Target, "."), ips, "443") + } + if !match { + addf(&r.Autodiscover.Errors, "No SRV record for port 443 for https.") + } + }() + + wg.Wait() + return +} + +// Domains returns all configured domain names, in UTF-8 for IDNA domains. +func (Admin) Domains(ctx context.Context) []dns.Domain { + l := []dns.Domain{} + for _, s := range mox.Conf.Domains() { + d, _ := dns.ParseDomain(s) + l = append(l, d) + } + return l +} + +// Domain returns the dns domain for a (potentially unicode as IDNA) domain name. +func (Admin) Domain(ctx context.Context, domain string) dns.Domain { + d, err := dns.ParseDomain(domain) + xcheckf(ctx, err, "parse domain") + _, ok := mox.Conf.Domain(d) + if !ok { + xcheckf(ctx, errors.New("no such domain"), "looking up domain") + } + return d +} + +// DomainLocalparts returns the localparts and accounts configured in domain. +func (Admin) DomainLocalparts(ctx context.Context, domain string) (localpartAccounts map[smtp.Localpart]string) { + d, err := dns.ParseDomain(domain) + xcheckf(ctx, err, "parsing domain") + _, ok := mox.Conf.Domain(d) + if !ok { + xcheckf(ctx, errors.New("no such domain"), "looking up domain") + } + return mox.Conf.DomainLocalparts(d) +} + +// Accounts returns the names of all configured accounts. +func (Admin) Accounts(ctx context.Context) []string { + l := mox.Conf.Accounts() + sort.Slice(l, func(i, j int) bool { + return l[i] < l[j] + }) + return l +} + +// Account returns the parsed configuration of an account. +func (Admin) Account(ctx context.Context, account string) map[string]any { + ac, ok := mox.Conf.Account(account) + if !ok { + xcheckf(ctx, errors.New("no such account"), "looking up account") + } + + // todo: should change sherpa to understand config.Account directly, with its anonymous structs. + buf, err := json.Marshal(ac) + xcheckf(ctx, err, "marshal to json") + r := map[string]any{} + err = json.Unmarshal(buf, &r) + xcheckf(ctx, err, "unmarshal from json") + + return r +} + +// ConfigFiles returns the paths and contents of the static and dynamic configuration files. +func (Admin) ConfigFiles(ctx context.Context) (staticPath, dynamicPath, static, dynamic string) { + buf0, err := os.ReadFile(mox.ConfigStaticPath) + xcheckf(ctx, err, "read static config file") + buf1, err := os.ReadFile(mox.ConfigDynamicPath) + xcheckf(ctx, err, "read dynamic config file") + return mox.ConfigStaticPath, mox.ConfigDynamicPath, string(buf0), string(buf1) +} + +func xcheckf(ctx context.Context, err error, format string, args ...any) { + if err == nil { + return + } + msg := fmt.Sprintf(format, args...) + errmsg := fmt.Sprintf("%s: %s", msg, err) + log := xlog.WithContext(ctx) + log.Errorx(msg, err) + panic(&sherpa.Error{Code: "server:error", Message: errmsg}) +} + +// MTASTSPolicies returns all mtasts policies from the cache. +func (Admin) MTASTSPolicies(ctx context.Context) (records []mtastsdb.PolicyRecord) { + records, err := mtastsdb.PolicyRecords(ctx) + xcheckf(ctx, err, "fetching mtasts policies from database") + return records +} + +// TLSReports returns TLS reports overlapping with period start/end, for the given +// domain (or all domains if empty). The reports are sorted first by period end +// (most recent first), then by domain. +func (Admin) TLSReports(ctx context.Context, start, end time.Time, domain string) (reports []tlsrptdb.TLSReportRecord) { + records, err := tlsrptdb.RecordsPeriodDomain(ctx, start, end, domain) + xcheckf(ctx, err, "fetching tlsrpt report records from database") + sort.Slice(records, func(i, j int) bool { + iend := records[i].Report.DateRange.End + jend := records[j].Report.DateRange.End + if iend == jend { + return records[i].Domain < records[j].Domain + } + return iend.After(jend) + }) + return records +} + +// TLSReportID returns a single TLS report. +func (Admin) TLSReportID(ctx context.Context, domain string, reportID int64) tlsrptdb.TLSReportRecord { + record, err := tlsrptdb.RecordID(ctx, reportID) + if err == nil && record.Domain != domain { + err = bstore.ErrAbsent + } + xcheckf(ctx, err, "fetching tls report from database") + return record +} + +// TLSRPTSummary presents TLS reporting statistics for a single domain +// over a period. +type TLSRPTSummary struct { + Domain string + Success int64 + Failure int64 + ResultTypeCounts map[tlsrpt.ResultType]int +} + +// TLSRPTSummaries returns a summary of received TLS reports overlapping with +// period start/end for one or all domains (when domain is empty). +// The returned summaries are ordered by domain name. +func (Admin) TLSRPTSummaries(ctx context.Context, start, end time.Time, domain string) (domainSummaries []TLSRPTSummary) { + reports, err := tlsrptdb.RecordsPeriodDomain(ctx, start, end, domain) + xcheckf(ctx, err, "fetching tlsrpt reports from database") + summaries := map[string]TLSRPTSummary{} + for _, r := range reports { + sum := summaries[r.Domain] + sum.Domain = r.Domain + for _, result := range r.Report.Policies { + sum.Success += result.Summary.TotalSuccessfulSessionCount + sum.Failure += result.Summary.TotalFailureSessionCount + for _, details := range result.FailureDetails { + if sum.ResultTypeCounts == nil { + sum.ResultTypeCounts = map[tlsrpt.ResultType]int{} + } + sum.ResultTypeCounts[details.ResultType]++ + } + } + summaries[r.Domain] = sum + } + sums := make([]TLSRPTSummary, 0, len(summaries)) + for _, sum := range summaries { + sums = append(sums, sum) + } + sort.Slice(sums, func(i, j int) bool { + return sums[i].Domain < sums[j].Domain + }) + return sums +} + +// DMARCReports returns DMARC reports overlapping with period start/end, for the +// given domain (or all domains if empty). The reports are sorted first by period +// end (most recent first), then by domain. +func (Admin) DMARCReports(ctx context.Context, start, end time.Time, domain string) (reports []dmarcdb.DomainFeedback) { + reports, err := dmarcdb.RecordsPeriodDomain(ctx, start, end, domain) + xcheckf(ctx, err, "fetching dmarc reports from database") + sort.Slice(reports, func(i, j int) bool { + iend := reports[i].ReportMetadata.DateRange.End + jend := reports[j].ReportMetadata.DateRange.End + if iend == jend { + return reports[i].Domain < reports[j].Domain + } + return iend > jend + }) + return reports +} + +// DMARCReportID returns a single DMARC report. +func (Admin) DMARCReportID(ctx context.Context, domain string, reportID int64) (report dmarcdb.DomainFeedback) { + report, err := dmarcdb.RecordID(ctx, reportID) + if err == nil && report.Domain != domain { + err = bstore.ErrAbsent + } + xcheckf(ctx, err, "fetching dmarc report from database") + return report +} + +// DMARCSummary presents DMARC aggregate reporting statistics for a single domain +// over a period. +type DMARCSummary struct { + Domain string + Total int + DispositionNone int + DispositionQuarantine int + DispositionReject int + DKIMFail int + SPFFail int + PolicyOverrides map[dmarcrpt.PolicyOverride]int +} + +// DMARCSummaries returns a summary of received DMARC reports overlapping with +// period start/end for one or all domains (when domain is empty). +// The returned summaries are ordered by domain name. +func (Admin) DMARCSummaries(ctx context.Context, start, end time.Time, domain string) (domainSummaries []DMARCSummary) { + reports, err := dmarcdb.RecordsPeriodDomain(ctx, start, end, domain) + xcheckf(ctx, err, "fetching dmarc reports from database") + summaries := map[string]DMARCSummary{} + for _, r := range reports { + sum := summaries[r.Domain] + sum.Domain = r.Domain + for _, record := range r.Records { + n := record.Row.Count + + sum.Total += n + + switch record.Row.PolicyEvaluated.Disposition { + case dmarcrpt.DispositionNone: + sum.DispositionNone += n + case dmarcrpt.DispositionQuarantine: + sum.DispositionQuarantine += n + case dmarcrpt.DispositionReject: + sum.DispositionReject += n + } + + if record.Row.PolicyEvaluated.DKIM == dmarcrpt.DMARCFail { + sum.DKIMFail += n + } + if record.Row.PolicyEvaluated.SPF == dmarcrpt.DMARCFail { + sum.SPFFail += n + } + + for _, reason := range record.Row.PolicyEvaluated.Reasons { + if sum.PolicyOverrides == nil { + sum.PolicyOverrides = map[dmarcrpt.PolicyOverride]int{} + } + sum.PolicyOverrides[reason.Type] += n + } + } + summaries[r.Domain] = sum + } + sums := make([]DMARCSummary, 0, len(summaries)) + for _, sum := range summaries { + sums = append(sums, sum) + } + sort.Slice(sums, func(i, j int) bool { + return sums[i].Domain < sums[j].Domain + }) + return sums +} + +// Reverse is the result of a reverse lookup. +type Reverse struct { + Hostnames []string + + // In the future, we can add a iprev-validated host name, and possibly the IPs of the host names. +} + +// LookupIP does a reverse lookup of ip. +func (Admin) LookupIP(ctx context.Context, ip string) Reverse { + resolver := dns.StrictResolver{Pkg: "adminapi"} + names, err := resolver.LookupAddr(ctx, ip) + xcheckf(ctx, err, "looking up ip") + return Reverse{names} +} + +// DNSBLStatus returns the IPs from which outgoing connections may be made and +// their current status in DNSBLs that are configured. The IPs are typically the +// configured listen IPs, or otherwise IPs on the machines network interfaces, with +// internal/private IPs removed. +// +// The returned value maps IPs to per DNSBL statuses, where "pass" means not listed and +// anything else is an error string, e.g. "fail: ..." or "temperror: ...". +func (Admin) DNSBLStatus(ctx context.Context) map[string]map[string]string { + resolver := dns.StrictResolver{Pkg: "check"} + return dnsblsStatus(ctx, resolver) +} + +func dnsblsStatus(ctx context.Context, resolver dns.Resolver) map[string]map[string]string { + // todo: check health before using dnsbl? + var dnsbls []dns.Domain + if l, ok := mox.Conf.Static.Listeners["public"]; ok { + for _, dnsbl := range l.SMTP.DNSBLs { + zone, err := dns.ParseDomain(dnsbl) + xcheckf(ctx, err, "parse dnsbl zone") + dnsbls = append(dnsbls, zone) + } + } + + r := map[string]map[string]string{} + for _, ip := range xlistenIPs(ctx) { + if ip.IsLoopback() || ip.IsPrivate() { + continue + } + ipstr := ip.String() + r[ipstr] = map[string]string{} + for _, zone := range dnsbls { + status, expl, err := dnsbl.Lookup(ctx, resolver, zone, ip) + result := string(status) + if err != nil { + result += ": " + err.Error() + } + if expl != "" { + result += ": " + expl + } + r[ipstr][zone.String()] = result + } + } + return r +} + +// DomainRecords returns lines describing DNS records that should exist for the +// configured domain. +func (Admin) DomainRecords(ctx context.Context, domain string) []string { + d, err := dns.ParseDomain(domain) + xcheckf(ctx, err, "parsing domain") + dc, ok := mox.Conf.Domain(d) + if !ok { + xcheckf(ctx, errors.New("unknown domain"), "lookup domain") + } + records, err := mox.DomainRecords(dc, d) + xcheckf(ctx, err, "dns records") + return records +} + +// DomainAdd adds a new domain and reloads the configuration. +func (Admin) DomainAdd(ctx context.Context, domain, accountName, localpart string) { + d, err := dns.ParseDomain(domain) + xcheckf(ctx, err, "parsing domain") + + err = mox.DomainAdd(ctx, d, accountName, smtp.Localpart(localpart)) + xcheckf(ctx, err, "adding domain") +} + +// DomainRemove removes an existing domain and reloads the configuration. +func (Admin) DomainRemove(ctx context.Context, domain string) { + d, err := dns.ParseDomain(domain) + xcheckf(ctx, err, "parsing domain") + + err = mox.DomainRemove(ctx, d) + xcheckf(ctx, err, "removing domain") +} + +// AccountAdd adds existing a new account, with an initial email address, and reloads the configuration. +func (Admin) AccountAdd(ctx context.Context, accountName, address string) { + err := mox.AccountAdd(ctx, accountName, address) + xcheckf(ctx, err, "adding account") +} + +// AccountRemove removes an existing account and reloads the configuration. +func (Admin) AccountRemove(ctx context.Context, accountName string) { + err := mox.AccountRemove(ctx, accountName) + xcheckf(ctx, err, "removing account") +} + +// AddressAdd adds a new address to the account, which must already exist. +func (Admin) AddressAdd(ctx context.Context, address, accountName string) { + err := mox.AddressAdd(ctx, address, accountName) + xcheckf(ctx, err, "adding address") +} + +// AddressRemove removes an existing address. +func (Admin) AddressRemove(ctx context.Context, address string) { + err := mox.AddressRemove(ctx, address) + xcheckf(ctx, err, "removing address") +} + +// SetPassword saves a new password for an account, invalidating the previous password. +// Sessions are not interrupted, and will keep working. New login attempts must use the new password. +// Password must be at least 8 characters. +func (Admin) SetPassword(ctx context.Context, accountName, password string) { + if len(password) < 8 { + panic(&sherpa.Error{Code: "user:error", Message: "password must be at least 8 characters"}) + } + acc, err := store.OpenAccount(accountName) + xcheckf(ctx, err, "open account") + defer acc.Close() + err = acc.SetPassword(password) + xcheckf(ctx, err, "setting password") +} + +// ClientConfigDomain returns configurations for email clients, IMAP and +// Submission (SMTP) for the domain. +func (Admin) ClientConfigDomain(ctx context.Context, domain string) mox.ClientConfig { + d, err := dns.ParseDomain(domain) + xcheckf(ctx, err, "parsing domain") + + cc, err := mox.ClientConfigDomain(d) + xcheckf(ctx, err, "client config for domain") + return cc +} + +// QueueList returns the messages currently in the outgoing queue. +func (Admin) QueueList(ctx context.Context) []queue.Msg { + l, err := queue.List() + xcheckf(ctx, err, "listing messages in queue") + return l +} + +// QueueKick initiates delivery of a message from the queue. +func (Admin) QueueKick(ctx context.Context, id int64) { + n, err := queue.Kick(id, "", "") + if err == nil && n == 0 { + err = errors.New("message not found") + } + xcheckf(ctx, err, "kick message in queue") +} + +// QueueDrop removes a message from the queue. +func (Admin) QueueDrop(ctx context.Context, id int64) { + n, err := queue.Drop(id, "", "") + if err == nil && n == 0 { + err = errors.New("message not found") + } + xcheckf(ctx, err, "drop message from queue") +} diff --git a/http/admin.html b/http/admin.html new file mode 100644 index 0000000..56055b5 --- /dev/null +++ b/http/admin.html @@ -0,0 +1,1480 @@ + + + + Mox Admin + + + + + + +
Loading...
+ + + + diff --git a/http/admin_test.go b/http/admin_test.go new file mode 100644 index 0000000..c83c3d7 --- /dev/null +++ b/http/admin_test.go @@ -0,0 +1,123 @@ +package http + +import ( + "context" + "crypto/ed25519" + "net" + "os" + "testing" + "time" + + "golang.org/x/crypto/bcrypt" + + "github.com/mjl-/mox/config" + "github.com/mjl-/mox/dns" + "github.com/mjl-/mox/mox-" +) + +func TestAdminAuth(t *testing.T) { + test := func(passwordfile, authHdr string, expect bool) { + t.Helper() + + ok := checkAdminAuth(context.Background(), passwordfile, authHdr) + if ok != expect { + t.Fatalf("got %v, expected %v", ok, expect) + } + } + + const authOK = "Basic YWRtaW46bW94dGVzdDEyMw==" // admin:moxtest123 + const authBad = "Basic YWRtaW46YmFkcGFzc3dvcmQ=" // admin:badpassword + + const path = "../testdata/http-passwordfile" + os.Remove(path) + defer os.Remove(path) + + test(path, authOK, false) // Password file does not exist. + + adminpwhash, err := bcrypt.GenerateFromPassword([]byte("moxtest123"), bcrypt.DefaultCost) + if err != nil { + t.Fatalf("generate bcrypt hash: %v", err) + } + if err := os.WriteFile(path, adminpwhash, 0660); err != nil { + t.Fatalf("write password file: %v", err) + } + // We loop to also exercise the auth cache. + for i := 0; i < 2; i++ { + test(path, "", false) // Empty/missing header. + test(path, "Malformed ", false) // Not "Basic" + test(path, "Basic malformed ", false) // Bad base64. + test(path, "Basic dGVzdA== ", false) // base64 is ok, but wrong tokens inside. + test(path, authBad, false) // Wrong password. + test(path, authOK, true) + } +} + +func TestCheckDomain(t *testing.T) { + // NOTE: we aren't currently looking at the results, having the code paths executed is better than nothing. + + resolver := dns.MockResolver{ + MX: map[string][]*net.MX{ + "mox.example.": {{Host: "mail.mox.example.", Pref: 10}}, + }, + A: map[string][]string{ + "mail.mox.example.": {"127.0.0.2"}, + }, + AAAA: map[string][]string{ + "mail.mox.example.": {"127.0.0.2"}, + }, + TXT: map[string][]string{ + "mox.example.": {"v=spf1 mx -all"}, + "test._domainkey.mox.example.": {"v=DKIM1;h=sha256;k=ed25519;p=ln5zd/JEX4Jy60WAhUOv33IYm2YZMyTQAdr9stML504="}, + "_dmarc.mox.example.": {"v=DMARC1; p=reject; rua=mailto:mjl@mox.example"}, + "_smtp._tls.mox.example": {"v=TLSRPTv1; rua=mailto:tlsrpt@mox.example;"}, + "_mta-sts.mox.example": {"v=STSv1; id=20160831085700Z"}, + }, + CNAME: map[string]string{}, + } + + listener := config.Listener{ + IPs: []string{"127.0.0.2"}, + Hostname: "mox.example", + HostnameDomain: dns.Domain{ASCII: "mox.example"}, + } + listener.SMTP.Enabled = true + listener.AutoconfigHTTPS.Enabled = true + listener.MTASTSHTTPS.Enabled = true + + mox.Conf.Static.Listeners = map[string]config.Listener{ + "public": listener, + } + domain := config.Domain{ + DKIM: config.DKIM{ + Selectors: map[string]config.Selector{ + "test": { + HashEffective: "sha256", + HeadersEffective: []string{"From", "Date", "Subject"}, + Key: ed25519.NewKeyFromSeed(make([]byte, 32)), // warning: fake zero key, do not copy this code. + Domain: dns.Domain{ASCII: "test"}, + }, + "missing": { + HashEffective: "sha256", + HeadersEffective: []string{"From", "Date", "Subject"}, + Key: ed25519.NewKeyFromSeed(make([]byte, 32)), // warning: fake zero key, do not copy this code. + Domain: dns.Domain{ASCII: "missing"}, + }, + }, + Sign: []string{"test", "test2"}, + }, + } + mox.Conf.Dynamic.Domains = map[string]config.Domain{ + "mox.example": domain, + } + + // Make a dialer that fails immediately before actually connecting. + done := make(chan struct{}) + close(done) + dialer := &net.Dialer{Deadline: time.Now().Add(-time.Second), Cancel: done} + + checkDomain(context.Background(), resolver, dialer, "mox.example") + // todo: check returned data + + Admin{}.Domains(context.Background()) // todo: check results + dnsblsStatus(context.Background(), resolver) // todo: check results +} diff --git a/http/adminapi.json b/http/adminapi.json new file mode 100644 index 0000000..2c3277a --- /dev/null +++ b/http/adminapi.json @@ -0,0 +1,3104 @@ +{ + "Name": "Admin", + "Docs": "Admin exports web API functions for the admin web interface. All its methods are\nexported under /admin/api/. Function calls require valid HTTP Authentication\ncredentials of a user.", + "Functions": [ + { + "Name": "CheckDomain", + "Docs": "CheckDomain checks the configuration for the domain, such as MX, SMTP STARTTLS,\nSPF, DKIM, DMARC, TLSRPT, MTASTS, autoconfig, autodiscover.", + "Params": [ + { + "Name": "domainName", + "Typewords": [ + "string" + ] + } + ], + "Returns": [ + { + "Name": "r", + "Typewords": [ + "CheckResult" + ] + } + ] + }, + { + "Name": "Domains", + "Docs": "Domains returns all configured domain names, in UTF-8 for IDNA domains.", + "Params": [], + "Returns": [ + { + "Name": "r0", + "Typewords": [ + "[]", + "Domain" + ] + } + ] + }, + { + "Name": "Domain", + "Docs": "Domain returns the dns domain for a (potentially unicode as IDNA) domain name.", + "Params": [ + { + "Name": "domain", + "Typewords": [ + "string" + ] + } + ], + "Returns": [ + { + "Name": "r0", + "Typewords": [ + "Domain" + ] + } + ] + }, + { + "Name": "DomainLocalparts", + "Docs": "DomainLocalparts returns the localparts and accounts configured in domain.", + "Params": [ + { + "Name": "domain", + "Typewords": [ + "string" + ] + } + ], + "Returns": [ + { + "Name": "localpartAccounts", + "Typewords": [ + "{}", + "string" + ] + } + ] + }, + { + "Name": "Accounts", + "Docs": "Accounts returns the names of all configured accounts.", + "Params": [], + "Returns": [ + { + "Name": "r0", + "Typewords": [ + "[]", + "string" + ] + } + ] + }, + { + "Name": "Account", + "Docs": "Account returns the parsed configuration of an account.", + "Params": [ + { + "Name": "account", + "Typewords": [ + "string" + ] + } + ], + "Returns": [ + { + "Name": "r0", + "Typewords": [ + "{}", + "any" + ] + } + ] + }, + { + "Name": "ConfigFiles", + "Docs": "ConfigFiles returns the paths and contents of the static and dynamic configuration files.", + "Params": [], + "Returns": [ + { + "Name": "staticPath", + "Typewords": [ + "string" + ] + }, + { + "Name": "dynamicPath", + "Typewords": [ + "string" + ] + }, + { + "Name": "static", + "Typewords": [ + "string" + ] + }, + { + "Name": "dynamic", + "Typewords": [ + "string" + ] + } + ] + }, + { + "Name": "MTASTSPolicies", + "Docs": "MTASTSPolicies returns all mtasts policies from the cache.", + "Params": [], + "Returns": [ + { + "Name": "records", + "Typewords": [ + "[]", + "PolicyRecord" + ] + } + ] + }, + { + "Name": "TLSReports", + "Docs": "TLSReports returns TLS reports overlapping with period start/end, for the given\ndomain (or all domains if empty). The reports are sorted first by period end\n(most recent first), then by domain.", + "Params": [ + { + "Name": "start", + "Typewords": [ + "timestamp" + ] + }, + { + "Name": "end", + "Typewords": [ + "timestamp" + ] + }, + { + "Name": "domain", + "Typewords": [ + "string" + ] + } + ], + "Returns": [ + { + "Name": "reports", + "Typewords": [ + "[]", + "TLSReportRecord" + ] + } + ] + }, + { + "Name": "TLSReportID", + "Docs": "TLSReportID returns a single TLS report.", + "Params": [ + { + "Name": "domain", + "Typewords": [ + "string" + ] + }, + { + "Name": "reportID", + "Typewords": [ + "int64" + ] + } + ], + "Returns": [ + { + "Name": "r0", + "Typewords": [ + "TLSReportRecord" + ] + } + ] + }, + { + "Name": "TLSRPTSummaries", + "Docs": "TLSRPTSummaries returns a summary of received TLS reports overlapping with\nperiod start/end for one or all domains (when domain is empty).\nThe returned summaries are ordered by domain name.", + "Params": [ + { + "Name": "start", + "Typewords": [ + "timestamp" + ] + }, + { + "Name": "end", + "Typewords": [ + "timestamp" + ] + }, + { + "Name": "domain", + "Typewords": [ + "string" + ] + } + ], + "Returns": [ + { + "Name": "domainSummaries", + "Typewords": [ + "[]", + "TLSRPTSummary" + ] + } + ] + }, + { + "Name": "DMARCReports", + "Docs": "DMARCReports returns DMARC reports overlapping with period start/end, for the\ngiven domain (or all domains if empty). The reports are sorted first by period\nend (most recent first), then by domain.", + "Params": [ + { + "Name": "start", + "Typewords": [ + "timestamp" + ] + }, + { + "Name": "end", + "Typewords": [ + "timestamp" + ] + }, + { + "Name": "domain", + "Typewords": [ + "string" + ] + } + ], + "Returns": [ + { + "Name": "reports", + "Typewords": [ + "[]", + "DomainFeedback" + ] + } + ] + }, + { + "Name": "DMARCReportID", + "Docs": "DMARCReportID returns a single DMARC report.", + "Params": [ + { + "Name": "domain", + "Typewords": [ + "string" + ] + }, + { + "Name": "reportID", + "Typewords": [ + "int64" + ] + } + ], + "Returns": [ + { + "Name": "report", + "Typewords": [ + "DomainFeedback" + ] + } + ] + }, + { + "Name": "DMARCSummaries", + "Docs": "DMARCSummaries returns a summary of received DMARC reports overlapping with\nperiod start/end for one or all domains (when domain is empty).\nThe returned summaries are ordered by domain name.", + "Params": [ + { + "Name": "start", + "Typewords": [ + "timestamp" + ] + }, + { + "Name": "end", + "Typewords": [ + "timestamp" + ] + }, + { + "Name": "domain", + "Typewords": [ + "string" + ] + } + ], + "Returns": [ + { + "Name": "domainSummaries", + "Typewords": [ + "[]", + "DMARCSummary" + ] + } + ] + }, + { + "Name": "LookupIP", + "Docs": "LookupIP does a reverse lookup of ip.", + "Params": [ + { + "Name": "ip", + "Typewords": [ + "string" + ] + } + ], + "Returns": [ + { + "Name": "r0", + "Typewords": [ + "Reverse" + ] + } + ] + }, + { + "Name": "DNSBLStatus", + "Docs": "DNSBLStatus returns the IPs from which outgoing connections may be made and\ntheir current status in DNSBLs that are configured. The IPs are typically the\nconfigured listen IPs, or otherwise IPs on the machines network interfaces, with\ninternal/private IPs removed.\n\nThe returned value maps IPs to per DNSBL statuses, where \"pass\" means not listed and\nanything else is an error string, e.g. \"fail: ...\" or \"temperror: ...\".", + "Params": [], + "Returns": [ + { + "Name": "r0", + "Typewords": [ + "{}", + "{}", + "string" + ] + } + ] + }, + { + "Name": "DomainRecords", + "Docs": "DomainRecords returns lines describing DNS records that should exist for the\nconfigured domain.", + "Params": [ + { + "Name": "domain", + "Typewords": [ + "string" + ] + } + ], + "Returns": [ + { + "Name": "r0", + "Typewords": [ + "[]", + "string" + ] + } + ] + }, + { + "Name": "DomainAdd", + "Docs": "DomainAdd adds a new domain and reloads the configuration.", + "Params": [ + { + "Name": "domain", + "Typewords": [ + "string" + ] + }, + { + "Name": "accountName", + "Typewords": [ + "string" + ] + }, + { + "Name": "localpart", + "Typewords": [ + "string" + ] + } + ], + "Returns": [] + }, + { + "Name": "DomainRemove", + "Docs": "DomainRemove removes an existing domain and reloads the configuration.", + "Params": [ + { + "Name": "domain", + "Typewords": [ + "string" + ] + } + ], + "Returns": [] + }, + { + "Name": "AccountAdd", + "Docs": "AccountAdd adds existing a new account, with an initial email address, and reloads the configuration.", + "Params": [ + { + "Name": "accountName", + "Typewords": [ + "string" + ] + }, + { + "Name": "address", + "Typewords": [ + "string" + ] + } + ], + "Returns": [] + }, + { + "Name": "AccountRemove", + "Docs": "AccountRemove removes an existing account and reloads the configuration.", + "Params": [ + { + "Name": "accountName", + "Typewords": [ + "string" + ] + } + ], + "Returns": [] + }, + { + "Name": "AddressAdd", + "Docs": "AddressAdd adds a new address to the account, which must already exist.", + "Params": [ + { + "Name": "address", + "Typewords": [ + "string" + ] + }, + { + "Name": "accountName", + "Typewords": [ + "string" + ] + } + ], + "Returns": [] + }, + { + "Name": "AddressRemove", + "Docs": "AddressRemove removes an existing address.", + "Params": [ + { + "Name": "address", + "Typewords": [ + "string" + ] + } + ], + "Returns": [] + }, + { + "Name": "SetPassword", + "Docs": "SetPassword saves a new password for an account, invalidating the previous password.\nSessions are not interrupted, and will keep working. New login attempts must use the new password.\nPassword must be at least 8 characters.", + "Params": [ + { + "Name": "accountName", + "Typewords": [ + "string" + ] + }, + { + "Name": "password", + "Typewords": [ + "string" + ] + } + ], + "Returns": [] + }, + { + "Name": "ClientConfigDomain", + "Docs": "ClientConfigDomain returns configurations for email clients, IMAP and\nSubmission (SMTP) for the domain.", + "Params": [ + { + "Name": "domain", + "Typewords": [ + "string" + ] + } + ], + "Returns": [ + { + "Name": "r0", + "Typewords": [ + "ClientConfig" + ] + } + ] + }, + { + "Name": "QueueList", + "Docs": "QueueList returns the messages currently in the outgoing queue.", + "Params": [], + "Returns": [ + { + "Name": "r0", + "Typewords": [ + "[]", + "Msg" + ] + } + ] + }, + { + "Name": "QueueKick", + "Docs": "QueueKick initiates delivery of a message from the queue.", + "Params": [ + { + "Name": "id", + "Typewords": [ + "int64" + ] + } + ], + "Returns": [] + }, + { + "Name": "QueueDrop", + "Docs": "QueueDrop removes a message from the queue.", + "Params": [ + { + "Name": "id", + "Typewords": [ + "int64" + ] + } + ], + "Returns": [] + } + ], + "Sections": [], + "Structs": [ + { + "Name": "CheckResult", + "Docs": "CheckResult is the analysis of a domain, its actual configuration (DNS, TLS,\nconnectivity) and the mox configuration. It includes configuration instructions\n(e.g. DNS records), and warnings and errors encountered.", + "Fields": [ + { + "Name": "Domain", + "Docs": "", + "Typewords": [ + "string" + ] + }, + { + "Name": "MX", + "Docs": "", + "Typewords": [ + "MXCheckResult" + ] + }, + { + "Name": "TLS", + "Docs": "", + "Typewords": [ + "TLSCheckResult" + ] + }, + { + "Name": "SPF", + "Docs": "", + "Typewords": [ + "SPFCheckResult" + ] + }, + { + "Name": "DKIM", + "Docs": "", + "Typewords": [ + "DKIMCheckResult" + ] + }, + { + "Name": "DMARC", + "Docs": "", + "Typewords": [ + "DMARCCheckResult" + ] + }, + { + "Name": "TLSRPT", + "Docs": "", + "Typewords": [ + "TLSRPTCheckResult" + ] + }, + { + "Name": "MTASTS", + "Docs": "", + "Typewords": [ + "MTASTSCheckResult" + ] + }, + { + "Name": "SRVConf", + "Docs": "", + "Typewords": [ + "SRVConfCheckResult" + ] + }, + { + "Name": "Autoconf", + "Docs": "", + "Typewords": [ + "AutoconfCheckResult" + ] + }, + { + "Name": "Autodiscover", + "Docs": "", + "Typewords": [ + "AutodiscoverCheckResult" + ] + } + ] + }, + { + "Name": "MXCheckResult", + "Docs": "", + "Fields": [ + { + "Name": "Records", + "Docs": "", + "Typewords": [ + "[]", + "MX" + ] + }, + { + "Name": "Errors", + "Docs": "", + "Typewords": [ + "[]", + "string" + ] + }, + { + "Name": "Warnings", + "Docs": "", + "Typewords": [ + "[]", + "string" + ] + }, + { + "Name": "Instructions", + "Docs": "", + "Typewords": [ + "[]", + "string" + ] + } + ] + }, + { + "Name": "MX", + "Docs": "", + "Fields": [ + { + "Name": "Host", + "Docs": "", + "Typewords": [ + "string" + ] + }, + { + "Name": "Pref", + "Docs": "", + "Typewords": [ + "int32" + ] + }, + { + "Name": "IPs", + "Docs": "", + "Typewords": [ + "[]", + "string" + ] + } + ] + }, + { + "Name": "TLSCheckResult", + "Docs": "", + "Fields": [ + { + "Name": "Errors", + "Docs": "", + "Typewords": [ + "[]", + "string" + ] + }, + { + "Name": "Warnings", + "Docs": "", + "Typewords": [ + "[]", + "string" + ] + }, + { + "Name": "Instructions", + "Docs": "", + "Typewords": [ + "[]", + "string" + ] + } + ] + }, + { + "Name": "SPFCheckResult", + "Docs": "", + "Fields": [ + { + "Name": "DomainTXT", + "Docs": "", + "Typewords": [ + "string" + ] + }, + { + "Name": "DomainRecord", + "Docs": "", + "Typewords": [ + "nullable", + "SPFRecord" + ] + }, + { + "Name": "HostTXT", + "Docs": "", + "Typewords": [ + "string" + ] + }, + { + "Name": "HostRecord", + "Docs": "", + "Typewords": [ + "nullable", + "SPFRecord" + ] + }, + { + "Name": "Errors", + "Docs": "", + "Typewords": [ + "[]", + "string" + ] + }, + { + "Name": "Warnings", + "Docs": "", + "Typewords": [ + "[]", + "string" + ] + }, + { + "Name": "Instructions", + "Docs": "", + "Typewords": [ + "[]", + "string" + ] + } + ] + }, + { + "Name": "SPFRecord", + "Docs": "", + "Fields": [ + { + "Name": "Version", + "Docs": "Must be \"spf1\".", + "Typewords": [ + "string" + ] + }, + { + "Name": "Directives", + "Docs": "An IP is evaluated against each directive until a match is found.", + "Typewords": [ + "[]", + "Directive" + ] + }, + { + "Name": "Redirect", + "Docs": "Modifier that redirects SPF checks to other domain after directives did not match. Optional. For \"redirect=\".", + "Typewords": [ + "string" + ] + }, + { + "Name": "Explanation", + "Docs": "Modifier for creating a user-friendly error message when an IP results in status \"fail\".", + "Typewords": [ + "string" + ] + }, + { + "Name": "Other", + "Docs": "Other modifiers.", + "Typewords": [ + "[]", + "Modifier" + ] + } + ] + }, + { + "Name": "Directive", + "Docs": "Directive consists of a mechanism that describes how to check if an IP matches,\nan (optional) qualifier indicating the policy for a match, and optional\nparameters specific to the mechanism.", + "Fields": [ + { + "Name": "Qualifier", + "Docs": "Sets the result if this directive matches. \"\" and \"+\" are \"pass\", \"-\" is \"fail\", \"?\" is \"neutral\", \"~\" is \"softfail\".", + "Typewords": [ + "string" + ] + }, + { + "Name": "Mechanism", + "Docs": "\"all\", \"include\", \"a\", \"mx\", \"ptr\", \"ip4\", \"ip6\", \"exists\".", + "Typewords": [ + "string" + ] + }, + { + "Name": "DomainSpec", + "Docs": "For include, a, mx, ptr, exists. Always in lower-case when parsed using ParseRecord.", + "Typewords": [ + "string" + ] + }, + { + "Name": "IPstr", + "Docs": "Original string for IP, always with /subnet.", + "Typewords": [ + "string" + ] + }, + { + "Name": "IP4CIDRLen", + "Docs": "For a, mx, ip4.", + "Typewords": [ + "nullable", + "int32" + ] + }, + { + "Name": "IP6CIDRLen", + "Docs": "For a, mx, ip6.", + "Typewords": [ + "nullable", + "int32" + ] + } + ] + }, + { + "Name": "Modifier", + "Docs": "Modifier provides additional information for a policy.\n\"redirect\" and \"exp\" are not represented as a Modifier but explicitly in a Record.", + "Fields": [ + { + "Name": "Key", + "Docs": "Key is case-insensitive.", + "Typewords": [ + "string" + ] + }, + { + "Name": "Value", + "Docs": "", + "Typewords": [ + "string" + ] + } + ] + }, + { + "Name": "DKIMCheckResult", + "Docs": "", + "Fields": [ + { + "Name": "Records", + "Docs": "", + "Typewords": [ + "[]", + "DKIMRecord" + ] + }, + { + "Name": "Errors", + "Docs": "", + "Typewords": [ + "[]", + "string" + ] + }, + { + "Name": "Warnings", + "Docs": "", + "Typewords": [ + "[]", + "string" + ] + }, + { + "Name": "Instructions", + "Docs": "", + "Typewords": [ + "[]", + "string" + ] + } + ] + }, + { + "Name": "DKIMRecord", + "Docs": "", + "Fields": [ + { + "Name": "Selector", + "Docs": "", + "Typewords": [ + "string" + ] + }, + { + "Name": "TXT", + "Docs": "", + "Typewords": [ + "string" + ] + }, + { + "Name": "Record", + "Docs": "", + "Typewords": [ + "nullable", + "Record" + ] + } + ] + }, + { + "Name": "Record", + "Docs": "Record is a DKIM DNS record, served on \u003cselector\u003e._domainkey.\u003cdomain\u003e for a\ngiven selector and domain (s= and d= in the DKIM-Signature).\n\nThe record is a semicolon-separated list of \"=\"-separated field value pairs.\nStrings should be compared case-insensitively, e.g. k=ed25519 is equivalent to k=ED25519.\n\nExample:\n\n\tv=DKIM1;h=sha256;k=ed25519;p=ln5zd/JEX4Jy60WAhUOv33IYm2YZMyTQAdr9stML504=", + "Fields": [ + { + "Name": "Version", + "Docs": "Version, fixed \"DKIM1\" (case sensitive). Field \"v\".", + "Typewords": [ + "string" + ] + }, + { + "Name": "Hashes", + "Docs": "Acceptable hash algorithms, e.g. \"sha1\", \"sha256\". Optional, defaults to all algorithms. Field \"h\".", + "Typewords": [ + "[]", + "string" + ] + }, + { + "Name": "Key", + "Docs": "Key type, \"rsa\" or \"ed25519\". Optional, default \"rsa\". Field \"k\".", + "Typewords": [ + "string" + ] + }, + { + "Name": "Notes", + "Docs": "Debug notes. Field \"n\".", + "Typewords": [ + "string" + ] + }, + { + "Name": "Pubkey", + "Docs": "Public key, as base64 in record. If empty, the key has been revoked. Field \"p\".", + "Typewords": [ + "[]", + "uint8" + ] + }, + { + "Name": "Services", + "Docs": "Service types. Optional, default \"*\" for all services. Other values: \"email\". Field \"s\".", + "Typewords": [ + "[]", + "string" + ] + }, + { + "Name": "Flags", + "Docs": "Flags, colon-separated. Optional, default is no flags. Other values: \"y\" for testing DKIM, \"s\" for \"i=\" must have same domain as \"d\" in signatures. Field \"t\".", + "Typewords": [ + "[]", + "string" + ] + } + ] + }, + { + "Name": "DMARCCheckResult", + "Docs": "", + "Fields": [ + { + "Name": "Domain", + "Docs": "", + "Typewords": [ + "string" + ] + }, + { + "Name": "TXT", + "Docs": "", + "Typewords": [ + "string" + ] + }, + { + "Name": "Record", + "Docs": "", + "Typewords": [ + "nullable", + "DMARCRecord" + ] + }, + { + "Name": "Errors", + "Docs": "", + "Typewords": [ + "[]", + "string" + ] + }, + { + "Name": "Warnings", + "Docs": "", + "Typewords": [ + "[]", + "string" + ] + }, + { + "Name": "Instructions", + "Docs": "", + "Typewords": [ + "[]", + "string" + ] + } + ] + }, + { + "Name": "DMARCRecord", + "Docs": "", + "Fields": [ + { + "Name": "Version", + "Docs": "\"v=DMARC1\"", + "Typewords": [ + "string" + ] + }, + { + "Name": "Policy", + "Docs": "Required, for \"p=\".", + "Typewords": [ + "DMARCPolicy" + ] + }, + { + "Name": "SubdomainPolicy", + "Docs": "Like policy but for subdomains. Optional, for \"sp=\".", + "Typewords": [ + "DMARCPolicy" + ] + }, + { + "Name": "AggregateReportAddresses", + "Docs": "Optional, for \"rua=\".", + "Typewords": [ + "[]", + "URI" + ] + }, + { + "Name": "FailureReportAddresses", + "Docs": "Optional, for \"ruf=\"", + "Typewords": [ + "[]", + "URI" + ] + }, + { + "Name": "ADKIM", + "Docs": "\"r\" (default) for relaxed or \"s\" for simple. For \"adkim=\".", + "Typewords": [ + "Align" + ] + }, + { + "Name": "ASPF", + "Docs": "\"r\" (default) for relaxed or \"s\" for simple. For \"aspf=\".", + "Typewords": [ + "Align" + ] + }, + { + "Name": "AggregateReportingInterval", + "Docs": "Default 86400. For \"ri=\"", + "Typewords": [ + "int32" + ] + }, + { + "Name": "FailureReportingOptions", + "Docs": "\"0\" (default), \"1\", \"d\", \"s\". For \"fo=\".", + "Typewords": [ + "[]", + "string" + ] + }, + { + "Name": "ReportingFormat", + "Docs": "\"afrf\" (default). Ffor \"rf=\".", + "Typewords": [ + "[]", + "string" + ] + }, + { + "Name": "Percentage", + "Docs": "Between 0 and 100, default 100. For \"pct=\".", + "Typewords": [ + "int32" + ] + } + ] + }, + { + "Name": "URI", + "Docs": "URI is a destination address for reporting.", + "Fields": [ + { + "Name": "Address", + "Docs": "Should start with \"mailto:\".", + "Typewords": [ + "string" + ] + }, + { + "Name": "MaxSize", + "Docs": "Optional maximum message size, subject to Unit.", + "Typewords": [ + "uint64" + ] + }, + { + "Name": "Unit", + "Docs": "\"\" (b), \"k\", \"g\", \"t\" (case insensitive), unit size, where k is 2^10 etc.", + "Typewords": [ + "string" + ] + } + ] + }, + { + "Name": "TLSRPTCheckResult", + "Docs": "", + "Fields": [ + { + "Name": "TXT", + "Docs": "", + "Typewords": [ + "string" + ] + }, + { + "Name": "Record", + "Docs": "", + "Typewords": [ + "nullable", + "TLSRPTRecord" + ] + }, + { + "Name": "Errors", + "Docs": "", + "Typewords": [ + "[]", + "string" + ] + }, + { + "Name": "Warnings", + "Docs": "", + "Typewords": [ + "[]", + "string" + ] + }, + { + "Name": "Instructions", + "Docs": "", + "Typewords": [ + "[]", + "string" + ] + } + ] + }, + { + "Name": "TLSRPTRecord", + "Docs": "", + "Fields": [ + { + "Name": "Version", + "Docs": "\"TLSRPTv1\", for \"v=\".", + "Typewords": [ + "string" + ] + }, + { + "Name": "RUAs", + "Docs": "Aggregate reporting URI, for \"rua=\". \"rua=\" can occur multiple times, each can be a list. Must be URL-encoded strings, with \",\", \"!\" and \";\" encoded.", + "Typewords": [ + "[]", + "[]", + "string" + ] + }, + { + "Name": "Extensions", + "Docs": "", + "Typewords": [ + "[]", + "Extension" + ] + } + ] + }, + { + "Name": "Extension", + "Docs": "Extension is an additional key/value pair for a TLSRPT record.", + "Fields": [ + { + "Name": "Key", + "Docs": "", + "Typewords": [ + "string" + ] + }, + { + "Name": "Value", + "Docs": "", + "Typewords": [ + "string" + ] + } + ] + }, + { + "Name": "MTASTSCheckResult", + "Docs": "", + "Fields": [ + { + "Name": "CNAMEs", + "Docs": "", + "Typewords": [ + "[]", + "string" + ] + }, + { + "Name": "TXT", + "Docs": "", + "Typewords": [ + "string" + ] + }, + { + "Name": "Record", + "Docs": "", + "Typewords": [ + "nullable", + "MTASTSRecord" + ] + }, + { + "Name": "PolicyText", + "Docs": "", + "Typewords": [ + "string" + ] + }, + { + "Name": "Policy", + "Docs": "", + "Typewords": [ + "nullable", + "Policy" + ] + }, + { + "Name": "Errors", + "Docs": "", + "Typewords": [ + "[]", + "string" + ] + }, + { + "Name": "Warnings", + "Docs": "", + "Typewords": [ + "[]", + "string" + ] + }, + { + "Name": "Instructions", + "Docs": "", + "Typewords": [ + "[]", + "string" + ] + } + ] + }, + { + "Name": "MTASTSRecord", + "Docs": "", + "Fields": [ + { + "Name": "Version", + "Docs": "\"STSv1\", for \"v=\". Required.", + "Typewords": [ + "string" + ] + }, + { + "Name": "ID", + "Docs": "Record version, for \"id=\". Required.", + "Typewords": [ + "string" + ] + }, + { + "Name": "Extensions", + "Docs": "Optional extensions.", + "Typewords": [ + "[]", + "Pair" + ] + } + ] + }, + { + "Name": "Pair", + "Docs": "Pair is an extension key/value pair in a MTA-STS DNS record or policy.", + "Fields": [ + { + "Name": "Key", + "Docs": "", + "Typewords": [ + "string" + ] + }, + { + "Name": "Value", + "Docs": "", + "Typewords": [ + "string" + ] + } + ] + }, + { + "Name": "Policy", + "Docs": "Policy is an MTA-STS policy as served at \"https://mta-sts.\u003cdomain\u003e/.well-known/mta-sts.txt\".", + "Fields": [ + { + "Name": "Version", + "Docs": "\"STSv1\"", + "Typewords": [ + "string" + ] + }, + { + "Name": "Mode", + "Docs": "", + "Typewords": [ + "Mode" + ] + }, + { + "Name": "MX", + "Docs": "", + "Typewords": [ + "[]", + "STSMX" + ] + }, + { + "Name": "MaxAgeSeconds", + "Docs": "How long this policy can be cached. Suggested values are in weeks or more.", + "Typewords": [ + "int32" + ] + }, + { + "Name": "Extensions", + "Docs": "", + "Typewords": [ + "[]", + "Pair" + ] + } + ] + }, + { + "Name": "STSMX", + "Docs": "STSMX is an allowlisted MX host name/pattern.\ntodo: find a way to name this just STSMX without getting duplicate names for \"MX\" in the sherpa api.", + "Fields": [ + { + "Name": "Wildcard", + "Docs": "\"*.\" wildcard, e.g. if a subdomain matches. A wildcard must match exactly one label. *.example.com matches mail.example.com, but not example.com, and not foor.bar.example.com.", + "Typewords": [ + "bool" + ] + }, + { + "Name": "Domain", + "Docs": "", + "Typewords": [ + "Domain" + ] + } + ] + }, + { + "Name": "Domain", + "Docs": "Domain is a domain name, with one or more labels, with at least an ASCII\nrepresentation, and for IDNA non-ASCII domains a unicode representation.\nThe ASCII string must be used for DNS lookups.", + "Fields": [ + { + "Name": "ASCII", + "Docs": "A non-unicode domain, e.g. with A-labels (xn--...) or NR-LDH (non-reserved letters/digits/hyphens) labels. Always in lower case.", + "Typewords": [ + "string" + ] + }, + { + "Name": "Unicode", + "Docs": "Name as U-labels. Empty if this is an ASCII-only domain.", + "Typewords": [ + "string" + ] + } + ] + }, + { + "Name": "SRVConfCheckResult", + "Docs": "", + "Fields": [ + { + "Name": "SRVs", + "Docs": "Service (e.g. \"_imaps\") to records.", + "Typewords": [ + "{}", + "[]", + "nullable", + "SRV" + ] + }, + { + "Name": "Errors", + "Docs": "", + "Typewords": [ + "[]", + "string" + ] + }, + { + "Name": "Warnings", + "Docs": "", + "Typewords": [ + "[]", + "string" + ] + }, + { + "Name": "Instructions", + "Docs": "", + "Typewords": [ + "[]", + "string" + ] + } + ] + }, + { + "Name": "SRV", + "Docs": "An SRV represents a single DNS SRV record.", + "Fields": [ + { + "Name": "Target", + "Docs": "", + "Typewords": [ + "string" + ] + }, + { + "Name": "Port", + "Docs": "", + "Typewords": [ + "uint16" + ] + }, + { + "Name": "Priority", + "Docs": "", + "Typewords": [ + "uint16" + ] + }, + { + "Name": "Weight", + "Docs": "", + "Typewords": [ + "uint16" + ] + } + ] + }, + { + "Name": "AutoconfCheckResult", + "Docs": "", + "Fields": [ + { + "Name": "IPs", + "Docs": "", + "Typewords": [ + "[]", + "string" + ] + }, + { + "Name": "Errors", + "Docs": "", + "Typewords": [ + "[]", + "string" + ] + }, + { + "Name": "Warnings", + "Docs": "", + "Typewords": [ + "[]", + "string" + ] + }, + { + "Name": "Instructions", + "Docs": "", + "Typewords": [ + "[]", + "string" + ] + } + ] + }, + { + "Name": "AutodiscoverCheckResult", + "Docs": "", + "Fields": [ + { + "Name": "Records", + "Docs": "", + "Typewords": [ + "[]", + "AutodiscoverSRV" + ] + }, + { + "Name": "Errors", + "Docs": "", + "Typewords": [ + "[]", + "string" + ] + }, + { + "Name": "Warnings", + "Docs": "", + "Typewords": [ + "[]", + "string" + ] + }, + { + "Name": "Instructions", + "Docs": "", + "Typewords": [ + "[]", + "string" + ] + } + ] + }, + { + "Name": "AutodiscoverSRV", + "Docs": "", + "Fields": [ + { + "Name": "Target", + "Docs": "", + "Typewords": [ + "string" + ] + }, + { + "Name": "Port", + "Docs": "", + "Typewords": [ + "uint16" + ] + }, + { + "Name": "Priority", + "Docs": "", + "Typewords": [ + "uint16" + ] + }, + { + "Name": "Weight", + "Docs": "", + "Typewords": [ + "uint16" + ] + }, + { + "Name": "IPs", + "Docs": "", + "Typewords": [ + "[]", + "string" + ] + } + ] + }, + { + "Name": "PolicyRecord", + "Docs": "PolicyRecord is a cached policy or absence of a policy.", + "Fields": [ + { + "Name": "Domain", + "Docs": "Domain name, with unicode characters.", + "Typewords": [ + "string" + ] + }, + { + "Name": "Inserted", + "Docs": "", + "Typewords": [ + "timestamp" + ] + }, + { + "Name": "ValidEnd", + "Docs": "", + "Typewords": [ + "timestamp" + ] + }, + { + "Name": "LastUpdate", + "Docs": "Policies are refreshed on use and periodically.", + "Typewords": [ + "timestamp" + ] + }, + { + "Name": "LastUse", + "Docs": "", + "Typewords": [ + "timestamp" + ] + }, + { + "Name": "Backoff", + "Docs": "", + "Typewords": [ + "bool" + ] + }, + { + "Name": "RecordID", + "Docs": "As retrieved from DNS.", + "Typewords": [ + "string" + ] + }, + { + "Name": "Version", + "Docs": "\"STSv1\"", + "Typewords": [ + "string" + ] + }, + { + "Name": "Mode", + "Docs": "", + "Typewords": [ + "Mode" + ] + }, + { + "Name": "MX", + "Docs": "", + "Typewords": [ + "[]", + "STSMX" + ] + }, + { + "Name": "MaxAgeSeconds", + "Docs": "How long this policy can be cached. Suggested values are in weeks or more.", + "Typewords": [ + "int32" + ] + }, + { + "Name": "Extensions", + "Docs": "", + "Typewords": [ + "[]", + "Pair" + ] + } + ] + }, + { + "Name": "TLSReportRecord", + "Docs": "TLSReportRecord is a TLS report as a database record, including information\nabout the sender.\n\ntodo: should be named just Record, but it would cause a sherpa type name conflict.", + "Fields": [ + { + "Name": "ID", + "Docs": "", + "Typewords": [ + "int64" + ] + }, + { + "Name": "Domain", + "Docs": "Domain to which the TLS report applies.", + "Typewords": [ + "string" + ] + }, + { + "Name": "FromDomain", + "Docs": "", + "Typewords": [ + "string" + ] + }, + { + "Name": "MailFrom", + "Docs": "", + "Typewords": [ + "string" + ] + }, + { + "Name": "Report", + "Docs": "", + "Typewords": [ + "Report" + ] + } + ] + }, + { + "Name": "Report", + "Docs": "Report is a TLSRPT report, transmitted in JSON format.", + "Fields": [ + { + "Name": "organization-name", + "Docs": "", + "Typewords": [ + "string" + ] + }, + { + "Name": "date-range", + "Docs": "", + "Typewords": [ + "TLSRPTDateRange" + ] + }, + { + "Name": "contact-info", + "Docs": "Email address.", + "Typewords": [ + "string" + ] + }, + { + "Name": "report-id", + "Docs": "", + "Typewords": [ + "string" + ] + }, + { + "Name": "policies", + "Docs": "", + "Typewords": [ + "[]", + "Result" + ] + } + ] + }, + { + "Name": "TLSRPTDateRange", + "Docs": "note: with TLSRPT prefix to prevent clash in sherpadoc types.", + "Fields": [ + { + "Name": "start-datetime", + "Docs": "", + "Typewords": [ + "timestamp" + ] + }, + { + "Name": "end-datetime", + "Docs": "", + "Typewords": [ + "timestamp" + ] + } + ] + }, + { + "Name": "Result", + "Docs": "", + "Fields": [ + { + "Name": "policy", + "Docs": "", + "Typewords": [ + "ResultPolicy" + ] + }, + { + "Name": "summary", + "Docs": "", + "Typewords": [ + "Summary" + ] + }, + { + "Name": "failure-details", + "Docs": "", + "Typewords": [ + "[]", + "FailureDetails" + ] + } + ] + }, + { + "Name": "ResultPolicy", + "Docs": "", + "Fields": [ + { + "Name": "policy-type", + "Docs": "", + "Typewords": [ + "string" + ] + }, + { + "Name": "policy-string", + "Docs": "", + "Typewords": [ + "[]", + "string" + ] + }, + { + "Name": "policy-domain", + "Docs": "", + "Typewords": [ + "string" + ] + }, + { + "Name": "mx-host", + "Docs": "Example in RFC has errata, it originally was a single string. ../rfc/8460-eid6241 ../rfc/8460:1779", + "Typewords": [ + "[]", + "string" + ] + } + ] + }, + { + "Name": "Summary", + "Docs": "", + "Fields": [ + { + "Name": "total-successful-session-count", + "Docs": "", + "Typewords": [ + "int64" + ] + }, + { + "Name": "total-failure-session-count", + "Docs": "", + "Typewords": [ + "int64" + ] + } + ] + }, + { + "Name": "FailureDetails", + "Docs": "", + "Fields": [ + { + "Name": "result-type", + "Docs": "", + "Typewords": [ + "ResultType" + ] + }, + { + "Name": "sending-mta-ip", + "Docs": "", + "Typewords": [ + "string" + ] + }, + { + "Name": "receiving-mx-hostname", + "Docs": "", + "Typewords": [ + "string" + ] + }, + { + "Name": "receiving-mx-helo", + "Docs": "", + "Typewords": [ + "string" + ] + }, + { + "Name": "receiving-ip", + "Docs": "", + "Typewords": [ + "string" + ] + }, + { + "Name": "failed-session-count", + "Docs": "", + "Typewords": [ + "int64" + ] + }, + { + "Name": "additional-information", + "Docs": "", + "Typewords": [ + "string" + ] + }, + { + "Name": "failure-reason-code", + "Docs": "", + "Typewords": [ + "string" + ] + } + ] + }, + { + "Name": "TLSRPTSummary", + "Docs": "TLSRPTSummary presents TLS reporting statistics for a single domain\nover a period.", + "Fields": [ + { + "Name": "Domain", + "Docs": "", + "Typewords": [ + "string" + ] + }, + { + "Name": "Success", + "Docs": "", + "Typewords": [ + "int64" + ] + }, + { + "Name": "Failure", + "Docs": "", + "Typewords": [ + "int64" + ] + }, + { + "Name": "ResultTypeCounts", + "Docs": "", + "Typewords": [ + "{}", + "int32" + ] + } + ] + }, + { + "Name": "DomainFeedback", + "Docs": "DomainFeedback is a single report stored in the database.", + "Fields": [ + { + "Name": "ID", + "Docs": "", + "Typewords": [ + "int64" + ] + }, + { + "Name": "Domain", + "Docs": "Domain where DMARC DNS record was found, could be organizational domain.", + "Typewords": [ + "string" + ] + }, + { + "Name": "FromDomain", + "Docs": "Domain in From-header.", + "Typewords": [ + "string" + ] + }, + { + "Name": "Version", + "Docs": "", + "Typewords": [ + "string" + ] + }, + { + "Name": "ReportMetadata", + "Docs": "", + "Typewords": [ + "ReportMetadata" + ] + }, + { + "Name": "PolicyPublished", + "Docs": "", + "Typewords": [ + "PolicyPublished" + ] + }, + { + "Name": "Records", + "Docs": "", + "Typewords": [ + "[]", + "ReportRecord" + ] + } + ] + }, + { + "Name": "ReportMetadata", + "Docs": "", + "Fields": [ + { + "Name": "OrgName", + "Docs": "", + "Typewords": [ + "string" + ] + }, + { + "Name": "Email", + "Docs": "", + "Typewords": [ + "string" + ] + }, + { + "Name": "ExtraContactInfo", + "Docs": "", + "Typewords": [ + "string" + ] + }, + { + "Name": "ReportID", + "Docs": "", + "Typewords": [ + "string" + ] + }, + { + "Name": "DateRange", + "Docs": "", + "Typewords": [ + "DateRange" + ] + }, + { + "Name": "Errors", + "Docs": "", + "Typewords": [ + "[]", + "string" + ] + } + ] + }, + { + "Name": "DateRange", + "Docs": "", + "Fields": [ + { + "Name": "Begin", + "Docs": "", + "Typewords": [ + "int64" + ] + }, + { + "Name": "End", + "Docs": "", + "Typewords": [ + "int64" + ] + } + ] + }, + { + "Name": "PolicyPublished", + "Docs": "PolicyPublished is the policy as found in DNS for the domain.", + "Fields": [ + { + "Name": "Domain", + "Docs": "", + "Typewords": [ + "string" + ] + }, + { + "Name": "ADKIM", + "Docs": "", + "Typewords": [ + "Alignment" + ] + }, + { + "Name": "ASPF", + "Docs": "", + "Typewords": [ + "Alignment" + ] + }, + { + "Name": "Policy", + "Docs": "", + "Typewords": [ + "Disposition" + ] + }, + { + "Name": "SubdomainPolicy", + "Docs": "", + "Typewords": [ + "Disposition" + ] + }, + { + "Name": "Percentage", + "Docs": "", + "Typewords": [ + "int32" + ] + }, + { + "Name": "ReportingOptions", + "Docs": "", + "Typewords": [ + "string" + ] + } + ] + }, + { + "Name": "ReportRecord", + "Docs": "", + "Fields": [ + { + "Name": "Row", + "Docs": "", + "Typewords": [ + "Row" + ] + }, + { + "Name": "Identifiers", + "Docs": "", + "Typewords": [ + "Identifiers" + ] + }, + { + "Name": "AuthResults", + "Docs": "", + "Typewords": [ + "AuthResults" + ] + } + ] + }, + { + "Name": "Row", + "Docs": "", + "Fields": [ + { + "Name": "SourceIP", + "Docs": "SourceIP must match the pattern ((1?[0-9]?[0-9]|2[0-4][0-9]|25[0-5]).){3} (1?[0-9]?[0-9]|2[0-4][0-9]|25[0-5])| ([A-Fa-f0-9]{1,4}:){7}[A-Fa-f0-9]{1,4}", + "Typewords": [ + "string" + ] + }, + { + "Name": "Count", + "Docs": "", + "Typewords": [ + "int32" + ] + }, + { + "Name": "PolicyEvaluated", + "Docs": "", + "Typewords": [ + "PolicyEvaluated" + ] + } + ] + }, + { + "Name": "PolicyEvaluated", + "Docs": "", + "Fields": [ + { + "Name": "Disposition", + "Docs": "", + "Typewords": [ + "Disposition" + ] + }, + { + "Name": "DKIM", + "Docs": "", + "Typewords": [ + "DMARCResult" + ] + }, + { + "Name": "SPF", + "Docs": "", + "Typewords": [ + "DMARCResult" + ] + }, + { + "Name": "Reasons", + "Docs": "", + "Typewords": [ + "[]", + "PolicyOverrideReason" + ] + } + ] + }, + { + "Name": "PolicyOverrideReason", + "Docs": "", + "Fields": [ + { + "Name": "Type", + "Docs": "", + "Typewords": [ + "PolicyOverride" + ] + }, + { + "Name": "Comment", + "Docs": "", + "Typewords": [ + "string" + ] + } + ] + }, + { + "Name": "Identifiers", + "Docs": "", + "Fields": [ + { + "Name": "EnvelopeTo", + "Docs": "", + "Typewords": [ + "string" + ] + }, + { + "Name": "EnvelopeFrom", + "Docs": "", + "Typewords": [ + "string" + ] + }, + { + "Name": "HeaderFrom", + "Docs": "", + "Typewords": [ + "string" + ] + } + ] + }, + { + "Name": "AuthResults", + "Docs": "", + "Fields": [ + { + "Name": "DKIM", + "Docs": "", + "Typewords": [ + "[]", + "DKIMAuthResult" + ] + }, + { + "Name": "SPF", + "Docs": "", + "Typewords": [ + "[]", + "SPFAuthResult" + ] + } + ] + }, + { + "Name": "DKIMAuthResult", + "Docs": "", + "Fields": [ + { + "Name": "Domain", + "Docs": "", + "Typewords": [ + "string" + ] + }, + { + "Name": "Selector", + "Docs": "", + "Typewords": [ + "string" + ] + }, + { + "Name": "Result", + "Docs": "", + "Typewords": [ + "DKIMResult" + ] + }, + { + "Name": "HumanResult", + "Docs": "", + "Typewords": [ + "string" + ] + } + ] + }, + { + "Name": "SPFAuthResult", + "Docs": "", + "Fields": [ + { + "Name": "Domain", + "Docs": "", + "Typewords": [ + "string" + ] + }, + { + "Name": "Scope", + "Docs": "", + "Typewords": [ + "SPFDomainScope" + ] + }, + { + "Name": "Result", + "Docs": "", + "Typewords": [ + "SPFResult" + ] + } + ] + }, + { + "Name": "DMARCSummary", + "Docs": "DMARCSummary presents DMARC aggregate reporting statistics for a single domain\nover a period.", + "Fields": [ + { + "Name": "Domain", + "Docs": "", + "Typewords": [ + "string" + ] + }, + { + "Name": "Total", + "Docs": "", + "Typewords": [ + "int32" + ] + }, + { + "Name": "DispositionNone", + "Docs": "", + "Typewords": [ + "int32" + ] + }, + { + "Name": "DispositionQuarantine", + "Docs": "", + "Typewords": [ + "int32" + ] + }, + { + "Name": "DispositionReject", + "Docs": "", + "Typewords": [ + "int32" + ] + }, + { + "Name": "DKIMFail", + "Docs": "", + "Typewords": [ + "int32" + ] + }, + { + "Name": "SPFFail", + "Docs": "", + "Typewords": [ + "int32" + ] + }, + { + "Name": "PolicyOverrides", + "Docs": "", + "Typewords": [ + "{}", + "int32" + ] + } + ] + }, + { + "Name": "Reverse", + "Docs": "Reverse is the result of a reverse lookup.", + "Fields": [ + { + "Name": "Hostnames", + "Docs": "", + "Typewords": [ + "[]", + "string" + ] + } + ] + }, + { + "Name": "ClientConfig", + "Docs": "ClientConfig holds the client configuration for IMAP/Submission for a\ndomain.", + "Fields": [ + { + "Name": "Entries", + "Docs": "", + "Typewords": [ + "[]", + "ClientConfigEntry" + ] + } + ] + }, + { + "Name": "ClientConfigEntry", + "Docs": "", + "Fields": [ + { + "Name": "Protocol", + "Docs": "", + "Typewords": [ + "string" + ] + }, + { + "Name": "Host", + "Docs": "", + "Typewords": [ + "Domain" + ] + }, + { + "Name": "Port", + "Docs": "", + "Typewords": [ + "int32" + ] + }, + { + "Name": "Listener", + "Docs": "", + "Typewords": [ + "string" + ] + }, + { + "Name": "Note", + "Docs": "", + "Typewords": [ + "string" + ] + } + ] + }, + { + "Name": "Msg", + "Docs": "Msg is a message in the queue.", + "Fields": [ + { + "Name": "ID", + "Docs": "", + "Typewords": [ + "int64" + ] + }, + { + "Name": "Queued", + "Docs": "", + "Typewords": [ + "timestamp" + ] + }, + { + "Name": "SenderAccount", + "Docs": "Failures are delivered back to this local account.", + "Typewords": [ + "string" + ] + }, + { + "Name": "SenderLocalpart", + "Docs": "Should be a local user and domain.", + "Typewords": [ + "Localpart" + ] + }, + { + "Name": "SenderDomain", + "Docs": "", + "Typewords": [ + "IPDomain" + ] + }, + { + "Name": "RecipientLocalpart", + "Docs": "Typically a remote user and domain.", + "Typewords": [ + "Localpart" + ] + }, + { + "Name": "RecipientDomain", + "Docs": "", + "Typewords": [ + "IPDomain" + ] + }, + { + "Name": "RecipientDomainStr", + "Docs": "For filtering.", + "Typewords": [ + "string" + ] + }, + { + "Name": "Attempts", + "Docs": "Next attempt is based on last attempt and exponential back off based on attempts.", + "Typewords": [ + "int32" + ] + }, + { + "Name": "DialedIPs", + "Docs": "For each host, the IPs that were dialed. Used for IP selection for later attempts.", + "Typewords": [ + "{}", + "[]", + "IP" + ] + }, + { + "Name": "NextAttempt", + "Docs": "For scheduling.", + "Typewords": [ + "timestamp" + ] + }, + { + "Name": "LastAttempt", + "Docs": "", + "Typewords": [ + "nullable", + "timestamp" + ] + }, + { + "Name": "LastError", + "Docs": "", + "Typewords": [ + "string" + ] + }, + { + "Name": "Has8bit", + "Docs": "Whether message contains bytes with high bit set, determines whether 8BITMIME SMTP extension is needed.", + "Typewords": [ + "bool" + ] + }, + { + "Name": "SMTPUTF8", + "Docs": "Whether message requires use of SMTPUTF8.", + "Typewords": [ + "bool" + ] + }, + { + "Name": "Size", + "Docs": "Full size of message, combined MsgPrefix with contents of message file.", + "Typewords": [ + "int64" + ] + }, + { + "Name": "MsgPrefix", + "Docs": "", + "Typewords": [ + "[]", + "uint8" + ] + }, + { + "Name": "DSNUTF8", + "Docs": "If set, this message is a DSN and this is a version using utf-8, for the case the remote MTA supports smtputf8. In this case, Size and MsgPrefix are not relevant.", + "Typewords": [ + "[]", + "uint8" + ] + } + ] + }, + { + "Name": "IPDomain", + "Docs": "IPDomain is an ip address, a domain, or empty.", + "Fields": [ + { + "Name": "IP", + "Docs": "", + "Typewords": [ + "IP" + ] + }, + { + "Name": "Domain", + "Docs": "", + "Typewords": [ + "Domain" + ] + } + ] + } + ], + "Ints": [], + "Strings": [ + { + "Name": "DMARCPolicy", + "Docs": "Policy as used in DMARC DNS record for \"p=\" or \"sp=\".", + "Values": [ + { + "Name": "PolicyEmpty", + "Value": "", + "Docs": "Only for the optional Record.SubdomainPolicy." + }, + { + "Name": "PolicyNone", + "Value": "none", + "Docs": "" + }, + { + "Name": "PolicyQuarantine", + "Value": "quarantine", + "Docs": "" + }, + { + "Name": "PolicyReject", + "Value": "reject", + "Docs": "" + } + ] + }, + { + "Name": "Align", + "Docs": "Align specifies the required alignment of a domain name.", + "Values": [ + { + "Name": "AlignStrict", + "Value": "s", + "Docs": "Strict requires an exact domain name match." + }, + { + "Name": "AlignRelaxed", + "Value": "r", + "Docs": "Relaxed requires either an exact or subdomain name match." + } + ] + }, + { + "Name": "Mode", + "Docs": "Mode indicates how the policy should be interpreted.", + "Values": [ + { + "Name": "ModeEnforce", + "Value": "enforce", + "Docs": "Policy must be followed, i.e. deliveries must fail if a TLS connection cannot be made." + }, + { + "Name": "ModeTesting", + "Value": "testing", + "Docs": "In case TLS cannot be negotiated, plain SMTP can be used, but failures must be reported, e.g. with TLS-RPT." + }, + { + "Name": "ModeNone", + "Value": "none", + "Docs": "In case MTA-STS is not or no longer implemented." + } + ] + }, + { + "Name": "Localpart", + "Docs": "Localpart is a decoded local part of an email address, before the \"@\".\nFor quoted strings, values do not hold the double quote or escaping backslashes.\nAn empty string can be a valid localpart.", + "Values": null + }, + { + "Name": "ResultType", + "Docs": "ResultType represents a TLS error.", + "Values": [ + { + "Name": "ResultSTARTTLSNotSupported", + "Value": "starttls-not-supported", + "Docs": "" + }, + { + "Name": "ResultCertificateHostMismatch", + "Value": "certificate-host-mismatch", + "Docs": "" + }, + { + "Name": "ResultCertificateExpired", + "Value": "certificate-expired", + "Docs": "" + }, + { + "Name": "ResultTLSAInvalid", + "Value": "tlsa-invalid", + "Docs": "" + }, + { + "Name": "ResultDNSSECInvalid", + "Value": "dnssec-invalid", + "Docs": "" + }, + { + "Name": "ResultDANERequired", + "Value": "dane-required", + "Docs": "" + }, + { + "Name": "ResultCertificateNotTrusted", + "Value": "certificate-not-trusted", + "Docs": "" + }, + { + "Name": "ResultSTSPolicyInvalid", + "Value": "sts-policy-invalid", + "Docs": "" + }, + { + "Name": "ResultSTSWebPKIInvalid", + "Value": "sts-webpki-invalid", + "Docs": "" + }, + { + "Name": "ResultValidationFailure", + "Value": "validation-failure", + "Docs": "Other error." + }, + { + "Name": "ResultSTSPolicyFetch", + "Value": "sts-policy-fetch-error", + "Docs": "" + } + ] + }, + { + "Name": "Alignment", + "Docs": "Alignment is the identifier alignment.", + "Values": [ + { + "Name": "AlignmentRelaxed", + "Value": "r", + "Docs": "Subdomains match the DMARC from-domain." + }, + { + "Name": "AlignmentStrict", + "Value": "s", + "Docs": "Only exact from-domain match." + } + ] + }, + { + "Name": "Disposition", + "Docs": "Disposition is the requested action for a DMARC fail as specified in the\nDMARC policy in DNS.", + "Values": [ + { + "Name": "DispositionNone", + "Value": "none", + "Docs": "" + }, + { + "Name": "DispositionQuarantine", + "Value": "quarantine", + "Docs": "" + }, + { + "Name": "DispositionReject", + "Value": "reject", + "Docs": "" + } + ] + }, + { + "Name": "DMARCResult", + "Docs": "DMARCResult is the final validation and alignment verdict for SPF and DKIM.", + "Values": [ + { + "Name": "DMARCPass", + "Value": "pass", + "Docs": "" + }, + { + "Name": "DMARCFail", + "Value": "fail", + "Docs": "" + } + ] + }, + { + "Name": "PolicyOverride", + "Docs": "PolicyOverride is a reason the requested DMARC policy from the DNS record\nwas not applied.", + "Values": [ + { + "Name": "PolicyOverrideForwarded", + "Value": "forwarded", + "Docs": "" + }, + { + "Name": "PolicyOverrideSampledOut", + "Value": "sampled_out", + "Docs": "" + }, + { + "Name": "PolicyOverrideTrustedForwarder", + "Value": "trusted_forwarder", + "Docs": "" + }, + { + "Name": "PolicyOverrideMailingList", + "Value": "mailing_list", + "Docs": "" + }, + { + "Name": "PolicyOverrideLocalPolicy", + "Value": "local_policy", + "Docs": "" + }, + { + "Name": "PolicyOverrideOther", + "Value": "other", + "Docs": "" + } + ] + }, + { + "Name": "DKIMResult", + "Docs": "", + "Values": [ + { + "Name": "DKIMNone", + "Value": "none", + "Docs": "" + }, + { + "Name": "DKIMPass", + "Value": "pass", + "Docs": "" + }, + { + "Name": "DKIMFail", + "Value": "fail", + "Docs": "" + }, + { + "Name": "DKIMPolicy", + "Value": "policy", + "Docs": "" + }, + { + "Name": "DKIMNeutral", + "Value": "neutral", + "Docs": "" + }, + { + "Name": "DKIMTemperror", + "Value": "temperror", + "Docs": "" + }, + { + "Name": "DKIMPermerror", + "Value": "permerror", + "Docs": "" + } + ] + }, + { + "Name": "SPFDomainScope", + "Docs": "", + "Values": [ + { + "Name": "SPFDomainScopeHelo", + "Value": "helo", + "Docs": "SMTP EHLO" + }, + { + "Name": "SPFDomainScopeMailFrom", + "Value": "mfrom", + "Docs": "SMTP \"MAIL FROM\"." + } + ] + }, + { + "Name": "SPFResult", + "Docs": "", + "Values": [ + { + "Name": "SPFNone", + "Value": "none", + "Docs": "" + }, + { + "Name": "SPFNeutral", + "Value": "neutral", + "Docs": "" + }, + { + "Name": "SPFPass", + "Value": "pass", + "Docs": "" + }, + { + "Name": "SPFFail", + "Value": "fail", + "Docs": "" + }, + { + "Name": "SPFSoftfail", + "Value": "softfail", + "Docs": "" + }, + { + "Name": "SPFTemperror", + "Value": "temperror", + "Docs": "" + }, + { + "Name": "SPFPermerror", + "Value": "permerror", + "Docs": "" + } + ] + }, + { + "Name": "IP", + "Docs": "An IP is a single IP address, a slice of bytes.\nFunctions in this package accept either 4-byte (IPv4)\nor 16-byte (IPv6) slices as input.\n\nNote that in this documentation, referring to an\nIP address as an IPv4 address or an IPv6 address\nis a semantic property of the address, not just the\nlength of the byte slice: a 16-byte slice can still\nbe an IPv4 address.", + "Values": [] + } + ], + "SherpaVersion": 0, + "SherpadocVersion": 1 +} diff --git a/http/autoconf.go b/http/autoconf.go new file mode 100644 index 0000000..1e87b16 --- /dev/null +++ b/http/autoconf.go @@ -0,0 +1,344 @@ +package http + +import ( + "encoding/xml" + "fmt" + "net/http" + + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" + + "github.com/mjl-/mox/config" + "github.com/mjl-/mox/mlog" + "github.com/mjl-/mox/mox-" + "github.com/mjl-/mox/smtp" +) + +var ( + metricAutoconf = promauto.NewCounterVec( + prometheus.CounterOpts{ + Name: "mox_autoconf_request_total", + Help: "Number of autoconf requests.", + }, + []string{"domain"}, + ) + metricAutodiscover = promauto.NewCounterVec( + prometheus.CounterOpts{ + Name: "mox_autodiscover_request_total", + Help: "Number of autodiscover requests.", + }, + []string{"domain"}, + ) +) + +// Autoconfiguration/Autodiscovery: +// +// - Thunderbird will request an "autoconfig" xml file. +// - Microsoft tools will request an "autodiscovery" xml file. +// - In my tests on an internal domain, iOS mail only talks to Apple servers, then +// does not attempt autoconfiguration. Possibly due to them being private DNS names. +// +// DNS records seem optional, but autoconfig. and autodiscover. +// (both CNAME or A) are useful, and so is SRV _autodiscovery._tcp. 0 0 443 +// autodiscover. (or just directly). +// +// Autoconf/discovery only works with valid TLS certificates, not with self-signed +// certs. So use it on public endpoints with certs signed by common CA's, or run +// your own (internal) CA and import the CA cert on your devices. +// +// Also see https://roll.urown.net/server/mail/autoconfig.html + +// Autoconfiguration for Mozilla Thunderbird. +// User should create a DNS record: autoconfig. (CNAME or A). +// See https://wiki.mozilla.org/Thunderbird:Autoconfiguration:ConfigFileFormat +func autoconfHandle(l config.Listener) http.HandlerFunc { + return func(w http.ResponseWriter, r *http.Request) { + log := xlog.WithContext(r.Context()) + + var addrDom string + defer func() { + metricAutoconf.WithLabelValues(addrDom).Inc() + }() + + email := r.FormValue("emailaddress") + log.Debug("autoconfig request", mlog.Field("email", email)) + addr, err := smtp.ParseAddress(email) + if err != nil { + http.Error(w, "400 - bad request - invalid parameter emailaddress", http.StatusBadRequest) + return + } + + if _, ok := mox.Conf.Domain(addr.Domain); !ok { + http.Error(w, "400 - bad request - unknown domain", http.StatusBadRequest) + return + } + addrDom = addr.Domain.Name() + + hostname := l.HostnameDomain + if hostname.IsZero() { + hostname = mox.Conf.Static.HostnameDomain + } + + // Thunderbird doesn't seem to allow U-labels, always return ASCII names. + var resp autoconfigResponse + resp.Version = "1.1" + resp.EmailProvider.ID = addr.Domain.ASCII + resp.EmailProvider.Domain = addr.Domain.ASCII + resp.EmailProvider.DisplayName = email + resp.EmailProvider.DisplayShortName = addr.Domain.ASCII + + var imapPort int + var imapSocket string + if l.IMAPS.Enabled { + imapPort = config.Port(l.IMAPS.Port, 993) + imapSocket = "SSL" + } else if l.IMAP.Enabled { + imapPort = config.Port(l.IMAP.Port, 143) + if l.TLS != nil { + imapSocket = "STARTTLS" + } else { + imapSocket = "plain" + } + } else { + log.Error("autoconfig: no imap configured?") + } + + // todo: specify SCRAM-SHA256 once thunderbird and autoconfig supports it. we could implement CRAM-MD5 and use it. + + resp.EmailProvider.IncomingServer.Type = "imap" + resp.EmailProvider.IncomingServer.Hostname = hostname.ASCII + resp.EmailProvider.IncomingServer.Port = imapPort + resp.EmailProvider.IncomingServer.SocketType = imapSocket + resp.EmailProvider.IncomingServer.Username = email + resp.EmailProvider.IncomingServer.Authentication = "password-cleartext" + + var smtpPort int + var smtpSocket string + if l.Submissions.Enabled { + smtpPort = config.Port(l.Submissions.Port, 465) + smtpSocket = "SSL" + } else if l.Submission.Enabled { + smtpPort = config.Port(l.Submission.Port, 587) + if l.TLS != nil { + smtpSocket = "STARTTLS" + } else { + smtpSocket = "plain" + } + } else { + log.Error("autoconfig: no smtp submission configured?") + } + + resp.EmailProvider.OutgoingServer.Type = "smtp" + resp.EmailProvider.OutgoingServer.Hostname = hostname.ASCII + resp.EmailProvider.OutgoingServer.Port = smtpPort + resp.EmailProvider.OutgoingServer.SocketType = smtpSocket + resp.EmailProvider.OutgoingServer.Username = email + resp.EmailProvider.OutgoingServer.Authentication = "password-cleartext" + + // todo: should we put the email address in the URL? + resp.ClientConfigUpdate.URL = fmt.Sprintf("https://%s/mail/config-v1.1.xml", hostname.ASCII) + + w.Header().Set("Content-Type", "application/xml; charset=utf-8") + enc := xml.NewEncoder(w) + enc.Indent("", "\t") + fmt.Fprint(w, xml.Header) + if err := enc.Encode(resp); err != nil { + log.Errorx("marshal autoconfig response", err) + } + } +} + +// Autodiscover from Microsoft, also used by Thunderbird. +// User should create a DNS record: _autodiscover._tcp. IN SRV 0 0 443 > +func autodiscoverHandle(l config.Listener) http.HandlerFunc { + return func(w http.ResponseWriter, r *http.Request) { + log := xlog.WithContext(r.Context()) + + var addrDom string + defer func() { + metricAutodiscover.WithLabelValues(addrDom).Inc() + }() + + if r.Method != "POST" { + http.Error(w, "405 - method not allowed - post required", http.StatusMethodNotAllowed) + return + } + + var req autodiscoverRequest + if err := xml.NewDecoder(r.Body).Decode(&req); err != nil { + http.Error(w, "400 - bad request - parsing autodiscover request: "+err.Error(), http.StatusMethodNotAllowed) + return + } + + log.Debug("autodiscover request", mlog.Field("email", req.Request.EmailAddress)) + + addr, err := smtp.ParseAddress(req.Request.EmailAddress) + if err != nil { + http.Error(w, "400 - bad request - invalid parameter emailaddress", http.StatusBadRequest) + return + } + + if _, ok := mox.Conf.Domain(addr.Domain); !ok { + http.Error(w, "400 - bad request - unknown domain", http.StatusBadRequest) + return + } + addrDom = addr.Domain.Name() + + hostname := l.HostnameDomain + if hostname.IsZero() { + hostname = mox.Conf.Static.HostnameDomain + } + + // The docs are generated and fragmented in many tiny pages, hard to follow. + // High-level starting point, https://learn.microsoft.com/en-us/openspecs/exchange_server_protocols/ms-oxdscli/78530279-d042-4eb0-a1f4-03b18143cd19 + // Request: https://learn.microsoft.com/en-us/openspecs/exchange_server_protocols/ms-oxdscli/2096fab2-9c3c-40b9-b123-edf6e8d55a9b + // Response, protocol: https://learn.microsoft.com/en-us/openspecs/exchange_server_protocols/ms-oxdscli/f4238db6-a983-435c-807a-b4b4a624c65b + // It appears autodiscover does not allow specifying SCRAM-SHA256 as authentication method. See https://learn.microsoft.com/en-us/openspecs/exchange_server_protocols/ms-oxdscli/21fd2dd5-c4ee-485b-94fb-e7db5da93726 + + var imapPort int + imapSSL := "off" + var imapEncryption string + if l.IMAPS.Enabled { + imapPort = config.Port(l.IMAPS.Port, 993) + imapSSL = "on" + imapEncryption = "TLS" // Assuming this means direct TLS. + } else if l.IMAP.Enabled { + imapPort = config.Port(l.IMAP.Port, 143) + if l.TLS != nil { + imapSSL = "on" + } + } else { + log.Error("autoconfig: no imap configured?") + } + + var smtpPort int + smtpSSL := "off" + var smtpEncryption string + if l.Submissions.Enabled { + smtpPort = config.Port(l.Submissions.Port, 465) + smtpSSL = "on" + smtpEncryption = "TLS" // Assuming this means direct TLS. + } else if l.Submission.Enabled { + smtpPort = config.Port(l.Submission.Port, 587) + if l.TLS != nil { + smtpSSL = "on" + } + } else { + log.Error("autoconfig: no smtp submission configured?") + } + + w.Header().Set("Content-Type", "application/xml; charset=utf-8") + + resp := autodiscoverResponse{} + resp.XMLName.Local = "Autodiscover" + resp.XMLName.Space = "http://schemas.microsoft.com/exchange/autodiscover/responseschema/2006" + resp.Response.XMLName.Local = "Response" + resp.Response.XMLName.Space = "http://schemas.microsoft.com/exchange/autodiscover/outlook/responseschema/2006a" + resp.Response.Account = autodiscoverAccount{ + AccountType: "email", + Action: "settings", + Protocol: []autodiscoverProtocol{ + { + Type: "IMAP", + Server: hostname.ASCII, + Port: imapPort, + LoginName: req.Request.EmailAddress, + SSL: imapSSL, + Encryption: imapEncryption, + SPA: "off", // Override default "on", this is Microsofts proprietary authentication protocol. + AuthRequired: "on", + }, + { + Type: "SMTP", + Server: hostname.ASCII, + Port: smtpPort, + LoginName: req.Request.EmailAddress, + SSL: smtpSSL, + Encryption: smtpEncryption, + SPA: "off", // Override default "on", this is Microsofts proprietary authentication protocol. + AuthRequired: "on", + }, + }, + } + enc := xml.NewEncoder(w) + enc.Indent("", "\t") + fmt.Fprint(w, xml.Header) + if err := enc.Encode(resp); err != nil { + log.Errorx("marshal autodiscover response", err) + } + } +} + +// Thunderbird requests these URLs for autoconfig/autodiscover: +// https://autoconfig.example.org/mail/config-v1.1.xml?emailaddress=user%40example.org +// https://autodiscover.example.org/autodiscover/autodiscover.xml +// https://example.org/.well-known/autoconfig/mail/config-v1.1.xml?emailaddress=user%40example.org +// https://example.org/autodiscover/autodiscover.xml +type autoconfigResponse struct { + XMLName xml.Name `xml:"clientConfig"` + Version string `xml:"version,attr"` + + EmailProvider struct { + ID string `xml:"id,attr"` + Domain string `xml:"domain"` + DisplayName string `xml:"displayName"` + DisplayShortName string `xml:"displayShortName"` + + IncomingServer struct { + Type string `xml:"type,attr"` + Hostname string `xml:"hostname"` + Port int `xml:"port"` + SocketType string `xml:"socketType"` + Username string `xml:"username"` + Authentication string `xml:"authentication"` + } `xml:"incomingServer"` + + OutgoingServer struct { + Type string `xml:"type,attr"` + Hostname string `xml:"hostname"` + Port int `xml:"port"` + SocketType string `xml:"socketType"` + Username string `xml:"username"` + Authentication string `xml:"authentication"` + } `xml:"outgoingServer"` + } `xml:"emailProvider"` + + ClientConfigUpdate struct { + URL string `xml:"url,attr"` + } `xml:"clientConfigUpdate"` +} + +type autodiscoverRequest struct { + XMLName xml.Name `xml:"Autodiscover"` + Request struct { + EmailAddress string `xml:"EMailAddress"` + AcceptableResponseSchema string `xml:"AcceptableResponseSchema"` + } +} + +type autodiscoverResponse struct { + XMLName xml.Name + Response struct { + XMLName xml.Name + Account autodiscoverAccount + } +} + +type autodiscoverAccount struct { + AccountType string + Action string + Protocol []autodiscoverProtocol +} + +type autodiscoverProtocol struct { + Type string + Server string + Port int + DirectoryPort int + ReferralPort int + LoginName string + SSL string + Encryption string `xml:",omitempty"` + SPA string + AuthRequired string +} diff --git a/http/autoconf_test.go b/http/autoconf_test.go new file mode 100644 index 0000000..c371c85 --- /dev/null +++ b/http/autoconf_test.go @@ -0,0 +1,26 @@ +package http + +import ( + "encoding/xml" + "testing" +) + +func TestAutodiscover(t *testing.T) { + // Request by Thunderbird. + const body = ` + + + test@example.org + http://schemas.microsoft.com/exchange/autodiscover/outlook/responseschema/2006a + + +` + var req autodiscoverRequest + if err := xml.Unmarshal([]byte(body), &req); err != nil { + t.Fatalf("unmarshal autodiscover request: %v", err) + } + + if req.Request.EmailAddress != "test@example.org" { + t.Fatalf("emailaddress: got %q, expected %q", req.Request.EmailAddress, "test@example.org") + } +} diff --git a/http/mtasts.go b/http/mtasts.go new file mode 100644 index 0000000..740b66e --- /dev/null +++ b/http/mtasts.go @@ -0,0 +1,64 @@ +package http + +import ( + "net/http" + "strings" + "time" + + "github.com/mjl-/mox/dns" + "github.com/mjl-/mox/mlog" + "github.com/mjl-/mox/mox-" + "github.com/mjl-/mox/mtasts" +) + +func mtastsPolicyHandle(w http.ResponseWriter, r *http.Request) { + log := xlog.WithCid(mox.Cid()) + + if !strings.HasPrefix(r.Host, "mta-sts.") { + http.NotFound(w, r) + return + } + domain, err := dns.ParseDomain(strings.TrimPrefix(r.Host, "mta-sts.")) + if err != nil { + log.Errorx("mtasts policy request: bad domain", err, mlog.Field("host", r.Host)) + http.NotFound(w, r) + return + } + + conf, _ := mox.Conf.Domain(domain) + sts := conf.MTASTS + if sts == nil { + http.NotFound(w, r) + return + } + + var mxs []mtasts.STSMX + for _, s := range sts.MX { + var mx mtasts.STSMX + if strings.HasPrefix(s, "*.") { + mx.Wildcard = true + s = s[2:] + } + d, err := dns.ParseDomain(s) + if err != nil { + log.Errorx("bad domain in mtasts config", err, mlog.Field("domain", s)) + http.Error(w, "500 - internal server error - invalid domain in configuration", http.StatusInternalServerError) + return + } + mx.Domain = d + mxs = append(mxs, mx) + } + if len(mxs) == 0 { + mxs = []mtasts.STSMX{{Domain: mox.Conf.Static.HostnameDomain}} + } + + policy := mtasts.Policy{ + Version: "STSv1", + Mode: sts.Mode, + MaxAgeSeconds: int(sts.MaxAge / time.Second), + MX: mxs, + } + w.Header().Set("Content-Type", "text/plain") + w.Header().Set("Cache-Control", "no-cache, max-age=0") + w.Write([]byte(policy.String())) +} diff --git a/http/mtasts_test.go b/http/mtasts_test.go new file mode 100644 index 0000000..8dc5b1c --- /dev/null +++ b/http/mtasts_test.go @@ -0,0 +1,3 @@ +package http + +// todo: write tests for mtasts handler diff --git a/http/web.go b/http/web.go new file mode 100644 index 0000000..5d9da03 --- /dev/null +++ b/http/web.go @@ -0,0 +1,240 @@ +// Package http provides HTTP listeners/servers, for +// autoconfiguration/autodiscovery, the account and admin web interface and +// MTA-STS policies. +package http + +import ( + "crypto/tls" + "fmt" + golog "log" + "net" + "net/http" + "strings" + "time" + + _ "net/http/pprof" + + "github.com/prometheus/client_golang/prometheus/promhttp" + + "github.com/mjl-/mox/config" + "github.com/mjl-/mox/dns" + "github.com/mjl-/mox/mlog" + "github.com/mjl-/mox/mox-" +) + +var xlog = mlog.New("http") + +// Set some http headers that should prevent potential abuse. Better safe than sorry. +func safeHeaders(fn http.HandlerFunc) http.HandlerFunc { + return func(w http.ResponseWriter, r *http.Request) { + h := w.Header() + h.Set("X-Frame-Options", "deny") + h.Set("X-Content-Type-Options", "nosniff") + h.Set("Content-Security-Policy", "default-src 'self' 'unsafe-inline' data:") + h.Set("Referrer-Policy", "same-origin") + fn(w, r) + } +} + +// ListenAndServe starts listeners for HTTP, including those required for ACME to +// generate TLS certificates. +func ListenAndServe() { + type serve struct { + kinds []string + tlsConfig *tls.Config + mux *http.ServeMux + } + + for name, l := range mox.Conf.Static.Listeners { + portServe := map[int]serve{} + + var ensureServe func(https bool, port int, kind string) serve + ensureServe = func(https bool, port int, kind string) serve { + s, ok := portServe[port] + if !ok { + s = serve{nil, nil, &http.ServeMux{}} + } + s.kinds = append(s.kinds, kind) + if https && port == 443 && l.TLS.ACME != "" { + s.tlsConfig = l.TLS.ACMEConfig + } else if https { + s.tlsConfig = l.TLS.Config + if l.TLS.ACME != "" { + ensureServe(true, 443, "acme-tls-alpn-01") + } + } + portServe[port] = s + return s + } + + if l.SMTP.Enabled && !l.SMTP.NoSTARTTLS || l.Submissions.Enabled || l.IMAPS.Enabled { + ensureServe(true, 443, "acme-tls-alpn01") + } + + if l.AdminHTTP.Enabled { + srv := ensureServe(false, config.Port(l.AdminHTTP.Port, 80), "admin-http") + srv.mux.HandleFunc("/", safeHeaders(adminIndex)) + srv.mux.HandleFunc("/admin/", safeHeaders(adminHandle)) + srv.mux.HandleFunc("/account/", safeHeaders(accountHandle)) + } + if l.AdminHTTPS.Enabled { + srv := ensureServe(true, config.Port(l.AdminHTTPS.Port, 443), "admin-https") + srv.mux.HandleFunc("/", safeHeaders(adminIndex)) + srv.mux.HandleFunc("/admin/", safeHeaders(adminHandle)) + srv.mux.HandleFunc("/account/", safeHeaders(accountHandle)) + } + if l.MetricsHTTP.Enabled { + srv := ensureServe(false, config.Port(l.MetricsHTTP.Port, 8010), "metrics-http") + srv.mux.Handle("/metrics", safeHeaders(promhttp.Handler().ServeHTTP)) + srv.mux.HandleFunc("/", safeHeaders(func(w http.ResponseWriter, r *http.Request) { + if r.URL.Path != "/" { + http.NotFound(w, r) + return + } else if r.Method != "GET" { + http.Error(w, http.StatusText(http.StatusMethodNotAllowed), http.StatusMethodNotAllowed) + return + } + w.Header().Set("Content-Type", "text/html") + fmt.Fprint(w, `see /metrics`) + })) + } + if l.AutoconfigHTTPS.Enabled { + srv := ensureServe(true, 443, "autoconfig-https") + srv.mux.HandleFunc("/mail/config-v1.1.xml", safeHeaders(autoconfHandle(l))) + srv.mux.HandleFunc("/autodiscover/autodiscover.xml", safeHeaders(autodiscoverHandle(l))) + } + if l.MTASTSHTTPS.Enabled { + srv := ensureServe(true, 443, "mtasts-https") + srv.mux.HandleFunc("/.well-known/mta-sts.txt", safeHeaders(mtastsPolicyHandle)) + } + if l.PprofHTTP.Enabled { + // Importing net/http/pprof registers handlers on the default serve mux. + port := config.Port(l.PprofHTTP.Port, 8011) + if _, ok := portServe[port]; ok { + xlog.Fatal("cannot serve pprof on same endpoint as other http services") + } + portServe[port] = serve{[]string{"pprof-http"}, nil, http.DefaultServeMux} + } + + // We'll explicitly ensure these TLS certs exist (e.g. are created with ACME) + // immediately after startup. We only do so for our explicitly hostnames, not for + // autoconfig or mta-sts DNS records, they can be requested on demand (perhaps + // never). + ensureHosts := map[dns.Domain]struct{}{} + + if l.TLS != nil && l.TLS.ACME != "" { + m := mox.Conf.Static.ACME[l.TLS.ACME].Manager + + m.AllowHostname(mox.Conf.Static.HostnameDomain) + ensureHosts[mox.Conf.Static.HostnameDomain] = struct{}{} + if l.HostnameDomain.ASCII != "" { + m.AllowHostname(l.HostnameDomain) + ensureHosts[l.HostnameDomain] = struct{}{} + } + + go func() { + // Just in case someone adds quite some domains to their config. We don't want to + // hit any ACME rate limits. + if len(ensureHosts) > 10 { + return + } + + time.Sleep(1 * time.Second) + i := 0 + for hostname := range ensureHosts { + if i > 0 { + // Sleep just a little. We don't want to hammer our ACME provider, e.g. Let's Encrypt. + time.Sleep(10 * time.Second) + } + i++ + + hello := &tls.ClientHelloInfo{ + ServerName: hostname.ASCII, + + // Make us fetch an ECDSA P256 cert. + // We add TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 to get around the ecDSA check in autocert. + CipherSuites: []uint16{tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, tls.TLS_AES_128_GCM_SHA256}, + SupportedCurves: []tls.CurveID{tls.CurveP256}, + SignatureSchemes: []tls.SignatureScheme{tls.ECDSAWithP256AndSHA256}, + SupportedVersions: []uint16{tls.VersionTLS13}, + } + xlog.Print("ensuring certificate availability", mlog.Field("hostname", hostname)) + if _, err := m.Manager.GetCertificate(hello); err != nil { + xlog.Errorx("requesting automatic certificate", err, mlog.Field("hostname", hostname)) + } + } + }() + } + + for port, srv := range portServe { + for _, ip := range l.IPs { + listenAndServe(ip, port, srv.tlsConfig, name, srv.kinds, srv.mux) + } + } + } +} + +func adminIndex(w http.ResponseWriter, r *http.Request) { + if r.URL.Path != "/" { + http.NotFound(w, r) + return + } + if r.Method != "GET" { + http.Error(w, http.StatusText(http.StatusMethodNotAllowed), http.StatusMethodNotAllowed) + return + } + + const html = ` + + + mox + + + + +

mox

+
/account/, for regular login
+
/admin/, for adminstrators
+ +` + w.Header().Set("Content-Type", "text/html; charset=utf-8") + w.Write([]byte(html)) +} + +func listenAndServe(ip string, port int, tlsConfig *tls.Config, name string, kinds []string, mux *http.ServeMux) { + addr := net.JoinHostPort(ip, fmt.Sprintf("%d", port)) + + var protocol string + var ln net.Listener + var err error + if tlsConfig == nil { + protocol = "http" + xlog.Print("http listener", mlog.Field("name", name), mlog.Field("kinds", strings.Join(kinds, ",")), mlog.Field("address", addr)) + ln, err = net.Listen(mox.Network(ip), addr) + if err != nil { + xlog.Fatalx("http: listen"+mox.LinuxSetcapHint(err), err, mlog.Field("addr", addr)) + } + } else { + protocol = "https" + xlog.Print("https listener", mlog.Field("name", name), mlog.Field("kinds", strings.Join(kinds, ",")), mlog.Field("address", addr)) + ln, err = tls.Listen(mox.Network(ip), addr, tlsConfig) + if err != nil { + xlog.Fatalx("https: listen"+mox.LinuxSetcapHint(err), err, mlog.Field("addr", addr)) + } + } + + server := &http.Server{ + Handler: mux, + TLSConfig: tlsConfig, + ErrorLog: golog.New(mlog.ErrWriter(xlog.Fields(mlog.Field("pkg", "net/http")), mlog.LevelInfo, protocol+" error"), "", 0), + } + go func() { + err := server.Serve(ln) + xlog.Fatalx(protocol+": serve", err) + }() +} diff --git a/imapclient/client.go b/imapclient/client.go new file mode 100644 index 0000000..91bfe09 --- /dev/null +++ b/imapclient/client.go @@ -0,0 +1,293 @@ +/* +Package imapclient provides an IMAP4 client, primarily for testing the IMAP4 server. + +Commands can be sent to the server free-form, but responses are parsed strictly. +Behaviour that may not be required by the IMAP4 specification may be expected by +this client. +*/ +package imapclient + +/* +- Try to keep the parsing method names and the types similar to the ABNF names in the RFCs. + +- todo: have mode for imap4rev1 vs imap4rev2, refusing what is not allowed. we are accepting too much now. +- todo: stricter parsing. xnonspace() and xword() should be replaced by proper parsers. +*/ + +import ( + "bufio" + "fmt" + "net" + "reflect" + "strings" +) + +// Conn is an IMAP connection to a server. +type Conn struct { + conn net.Conn + r *bufio.Reader + panic bool + tagGen int + record bool // If true, bytes read are added to recordBuf. recorded() resets. + recordBuf []byte + + LastTag string + CapAvailable map[Capability]struct{} // Capabilities available at server, from CAPABILITY command or response code. + CapEnabled map[Capability]struct{} // Capabilities enabled through ENABLE command. +} + +// Error is a parse or other protocol error. +type Error struct{ err error } + +func (e Error) Error() string { + return e.err.Error() +} + +func (e Error) Unwrap() error { + return e.err +} + +// New creates a new client on conn. +// +// If xpanic is true, functions that would return an error instead panic. For parse +// errors, the resulting stack traces show typically show what was being parsed. +// +// The initial untagged greeting response is read and must be "OK". +func New(conn net.Conn, xpanic bool) (client *Conn, rerr error) { + c := Conn{ + conn: conn, + r: bufio.NewReader(conn), + panic: xpanic, + CapAvailable: map[Capability]struct{}{}, + CapEnabled: map[Capability]struct{}{}, + } + + defer c.recover(&rerr) + tag := c.xnonspace() + if tag != "*" { + c.xerrorf("expected untagged *, got %q", tag) + } + c.xspace() + ut := c.xuntagged() + switch x := ut.(type) { + case UntaggedResult: + if x.Status != OK { + c.xerrorf("greeting, got status %q, expected OK", x.Status) + } + return &c, nil + case UntaggedPreauth: + c.xerrorf("greeting: unexpected preauth") + case UntaggedBye: + c.xerrorf("greeting: server sent bye") + default: + c.xerrorf("unexpected untagged %v", ut) + } + panic("not reached") +} + +func (c *Conn) recover(rerr *error) { + if c.panic { + return + } + + x := recover() + if x == nil { + return + } + err, ok := x.(Error) + if !ok { + panic(x) + } + *rerr = err +} + +func (c *Conn) xerrorf(format string, args ...any) { + panic(Error{fmt.Errorf(format, args...)}) +} + +func (c *Conn) xcheckf(err error, format string, args ...any) { + if err != nil { + c.xerrorf("%s: %w", fmt.Sprintf(format, args...), err) + } +} + +func (c *Conn) xcheck(err error) { + if err != nil { + panic(err) + } +} + +// Commandf writes a free-form IMAP command to the server. +// If tag is empty, a next unique tag is assigned. +func (c *Conn) Commandf(tag string, format string, args ...any) (rerr error) { + defer c.recover(&rerr) + + if tag == "" { + tag = c.nextTag() + } + c.LastTag = tag + + _, err := fmt.Fprintf(c.conn, "%s %s\r\n", tag, fmt.Sprintf(format, args...)) + c.xcheckf(err, "write command") + return +} + +func (c *Conn) nextTag() string { + c.tagGen++ + return fmt.Sprintf("x%03d", c.tagGen) +} + +// Response reads from the IMAP server until a tagged response line is found. +// The tag must be the same as the tag for the last written command. +// Result holds the status of the command. The caller must check if this the status is OK. +func (c *Conn) Response() (untagged []Untagged, result Result, rerr error) { + defer c.recover(&rerr) + + for { + tag := c.xnonspace() + c.xspace() + if tag == "*" { + untagged = append(untagged, c.xuntagged()) + continue + } + + if tag != c.LastTag { + c.xerrorf("got tag %q, expected %q", tag, c.LastTag) + } + + status := c.xstatus() + c.xspace() + result = c.xresult(status) + c.xcrlf() + return + } +} + +// ReadUntagged reads a single untagged response line. +// Useful for reading lines from IDLE. +func (c *Conn) ReadUntagged() (untagged Untagged, rerr error) { + defer c.recover(&rerr) + + tag := c.xnonspace() + if tag != "*" { + c.xerrorf("got tag %q, expected untagged", tag) + } + c.xspace() + ut := c.xuntagged() + return ut, nil +} + +// Readline reads a line, including CRLF. +// Used with IDLE and synchronous literals. +func (c *Conn) Readline() (line string, rerr error) { + defer c.recover(&rerr) + + line, err := c.r.ReadString('\n') + c.xcheckf(err, "read line") + return line, nil +} + +// ReadContinuation reads a line. If it is a continuation, i.e. starts with a +, it +// is returned without leading "+ " and without trailing crlf. Otherwise, a command +// response is returned. A successfully read continuation can return an empty line. +// Callers should check rerr and result.Status being empty to check if a +// continuation was read. +func (c *Conn) ReadContinuation() (line string, untagged []Untagged, result Result, rerr error) { + if !c.peek('+') { + untagged, result, rerr = c.Response() + c.xcheckf(rerr, "reading non-continuation response") + c.xerrorf("response status %q, expected OK", result.Status) + } + c.xtake("+ ") + line, err := c.Readline() + c.xcheckf(err, "read line") + line = strings.TrimSuffix(line, "\r\n") + return +} + +// Writelinef writes the formatted format and args as a single line, adding CRLF. +// Used with IDLE and synchronous literals. +func (c *Conn) Writelinef(format string, args ...any) (rerr error) { + defer c.recover(&rerr) + + s := fmt.Sprintf(format, args...) + _, err := fmt.Fprintf(c.conn, "%s\r\n", s) + c.xcheckf(err, "writeline") + return nil +} + +// Write writes directly to the connection. Write errors do take the connections +// panic mode into account, i.e. Write can panic. +func (c *Conn) Write(buf []byte) (n int, rerr error) { + defer c.recover(&rerr) + + n, rerr = c.conn.Write(buf) + c.xcheckf(rerr, "write") + return n, nil +} + +// WriteSyncLiteral first writes the synchronous literal size, then read the +// continuation "+" and finally writes the data. +func (c *Conn) WriteSyncLiteral(s string) (rerr error) { + defer c.recover(&rerr) + + _, err := fmt.Fprintf(c.conn, "{%d}\r\n", len(s)) + c.xcheckf(err, "write sync literal size") + line, err := c.Readline() + c.xcheckf(err, "read line") + if !strings.HasPrefix(line, "+") { + c.xerrorf("no continuation received for sync literal") + } + _, err = c.conn.Write([]byte(s)) + c.xcheckf(err, "write literal data") + return nil +} + +// Transactf writes format and args as an IMAP command, using Commandf with an +// empty tag. I.e. format must not contain a tag. Transactf then reads a response +// using ReadResponse and checks the result status is OK. +func (c *Conn) Transactf(format string, args ...any) (untagged []Untagged, result Result, rerr error) { + defer c.recover(&rerr) + + err := c.Commandf("", format, args...) + if err != nil { + return nil, Result{}, err + } + return c.ResponseOK() +} + +func (c *Conn) ResponseOK() (untagged []Untagged, result Result, rerr error) { + untagged, result, rerr = c.Response() + if rerr != nil { + return nil, Result{}, rerr + } + if result.Status != OK { + c.xerrorf("response status %q, expected OK", result.Status) + } + return untagged, result, rerr +} + +func (c *Conn) xgetUntagged(l []Untagged, dst any) { + if len(l) != 1 { + c.xerrorf("got %d untagged, expected 1: %v", len(l), l) + } + got := l[0] + gotv := reflect.ValueOf(got) + dstv := reflect.ValueOf(dst) + if gotv.Type() != dstv.Type().Elem() { + c.xerrorf("got %v, expected %v", gotv.Type(), dstv.Type().Elem()) + } + dstv.Elem().Set(gotv) +} + +// Close closes the connection without writing anything to the server. +// You may want to call Logout. Closing a connection with a mailbox with deleted +// message not yet expunged will not expunge those messages. +func (c *Conn) Close() error { + var err error + if c.conn != nil { + err = c.conn.Close() + c.conn = nil + } + return err +} diff --git a/imapclient/cmds.go b/imapclient/cmds.go new file mode 100644 index 0000000..9b6025d --- /dev/null +++ b/imapclient/cmds.go @@ -0,0 +1,292 @@ +package imapclient + +import ( + "bufio" + "crypto/tls" + "encoding/base64" + "fmt" + "strings" + "time" + + "github.com/mjl-/mox/scram" +) + +// Capability requests a list of capabilities from the server. They are returned in +// an UntaggedCapability response. The server also sends capabilities in initial +// server greeting, in the response code. +func (c *Conn) Capability() (untagged []Untagged, result Result, rerr error) { + defer c.recover(&rerr) + return c.Transactf("capability") +} + +// Noop does nothing on its own, but a server will return any pending untagged +// responses for new message delivery and changes to mailboxes. +func (c *Conn) Noop() (untagged []Untagged, result Result, rerr error) { + defer c.recover(&rerr) + return c.Transactf("capability") +} + +// Logout ends the IMAP session by writing a LOGOUT command. Close must still be +// closed on this client to close the socket. +func (c *Conn) Logout() (untagged []Untagged, result Result, rerr error) { + defer c.recover(&rerr) + return c.Transactf("logout") +} + +// Starttls enables TLS on the connection with the STARTTLS command. +func (c *Conn) Starttls(config *tls.Config) (untagged []Untagged, result Result, rerr error) { + defer c.recover(&rerr) + untagged, result, rerr = c.Transactf("starttls") + c.xcheckf(rerr, "starttls command") + conn := tls.Client(c.conn, config) + err := conn.Handshake() + c.xcheckf(err, "tls handshake") + c.conn = conn + c.r = bufio.NewReader(conn) + return untagged, result, nil +} + +// Login authenticates with username and password +func (c *Conn) Login(username, password string) (untagged []Untagged, result Result, rerr error) { + defer c.recover(&rerr) + return c.Transactf("login %s %s", astring(username), astring(password)) +} + +// Authenticate with plaintext password using AUTHENTICATE PLAIN. +func (c *Conn) AuthenticatePlain(username, password string) (untagged []Untagged, result Result, rerr error) { + defer c.recover(&rerr) + + untagged, result, rerr = c.Transactf("authenticate plain %s", base64.StdEncoding.EncodeToString(fmt.Appendf(nil, "\u0000%s\u0000%s", username, password))) + return +} + +// Authenticate with SCRAM-SHA-256, where the password is not exchanged in original +// plaintext form, but only derived hashes are exchanged by both parties as proof +// of knowledge of password. +func (c *Conn) AuthenticateSCRAMSHA256(username, password string) (untagged []Untagged, result Result, rerr error) { + defer c.recover(&rerr) + + sc := scram.NewClient(username, "") + clientFirst, err := sc.ClientFirst() + c.xcheckf(err, "scram clientFirst") + c.LastTag = c.nextTag() + err = c.Writelinef("%s authenticate scram-sha-256 %s", c.LastTag, base64.StdEncoding.EncodeToString([]byte(clientFirst))) + c.xcheckf(err, "writing command line") + + xreadContinuation := func() []byte { + var line string + line, untagged, result, rerr = c.ReadContinuation() + c.xcheckf(err, "read continuation") + if result.Status != "" { + c.xerrorf("unexpected status %q", result.Status) + } + buf, err := base64.StdEncoding.DecodeString(line) + c.xcheckf(err, "parsing base64 from remote") + return buf + } + + serverFirst := xreadContinuation() + clientFinal, err := sc.ServerFirst(serverFirst, password) + c.xcheckf(err, "scram clientFinal") + err = c.Writelinef("%s", base64.StdEncoding.EncodeToString([]byte(clientFinal))) + c.xcheckf(err, "write scram clientFinal") + + serverFinal := xreadContinuation() + err = sc.ServerFinal(serverFinal) + c.xcheckf(err, "scram serverFinal") + + // We must send a response to the server continuation line, but we have nothing to say. ../rfc/9051:6221 + err = c.Writelinef("%s", base64.StdEncoding.EncodeToString(nil)) + c.xcheckf(err, "scram client end") + + return c.ResponseOK() +} + +// Enable enables capabilities for use with the connection, verifying the server has indeed enabled them. +func (c *Conn) Enable(capabilities ...string) (untagged []Untagged, result Result, rerr error) { + defer c.recover(&rerr) + + untagged, result, rerr = c.Transactf("enable %s", strings.Join(capabilities, " ")) + c.xcheck(rerr) + var enabled UntaggedEnabled + c.xgetUntagged(untagged, &enabled) + got := map[string]struct{}{} + for _, cap := range enabled { + got[cap] = struct{}{} + } + for _, cap := range capabilities { + if _, ok := got[cap]; !ok { + c.xerrorf("capability %q not enabled by server", cap) + } + } + return +} + +// Select opens mailbox as active mailbox. +func (c *Conn) Select(mailbox string) (untagged []Untagged, result Result, rerr error) { + defer c.recover(&rerr) + return c.Transactf("select %s", astring(mailbox)) +} + +// Examine opens mailbox as active mailbox read-only. +func (c *Conn) Examine(mailbox string) (untagged []Untagged, result Result, rerr error) { + defer c.recover(&rerr) + return c.Transactf("examine %s", astring(mailbox)) +} + +// Create makes a new mailbox on the server. +func (c *Conn) Create(mailbox string) (untagged []Untagged, result Result, rerr error) { + defer c.recover(&rerr) + return c.Transactf("create %s", astring(mailbox)) +} + +// Delete removes an entire mailbox and its messages. +func (c *Conn) Delete(mailbox string) (untagged []Untagged, result Result, rerr error) { + defer c.recover(&rerr) + return c.Transactf("delete %s", astring(mailbox)) +} + +// Rename changes the name of a mailbox and all its child mailboxes. +func (c *Conn) Rename(omailbox, nmailbox string) (untagged []Untagged, result Result, rerr error) { + defer c.recover(&rerr) + return c.Transactf("rename %s %s", astring(omailbox), astring(nmailbox)) +} + +// Subscribe marks a mailbox as subscribed. The mailbox does not have to exist. It +// is not an error if the mailbox is already subscribed. +func (c *Conn) Subscribe(mailbox string) (untagged []Untagged, result Result, rerr error) { + defer c.recover(&rerr) + return c.Transactf("subscribe %s", astring(mailbox)) +} + +// Unsubscribe marks a mailbox as unsubscribed. +func (c *Conn) Unsubscribe(mailbox string) (untagged []Untagged, result Result, rerr error) { + defer c.recover(&rerr) + return c.Transactf("unsubscribe %s", astring(mailbox)) +} + +// List lists mailboxes with the basic LIST syntax. +// Pattern can contain * (match any) or % (match any except hierarchy delimiter). +func (c *Conn) List(pattern string) (untagged []Untagged, result Result, rerr error) { + defer c.recover(&rerr) + return c.Transactf(`list "" %s`, astring(pattern)) +} + +// ListFull lists mailboxes with the extended LIST syntax requesting all supported data. +// Pattern can contain * (match any) or % (match any except hierarchy delimiter). +func (c *Conn) ListFull(subscribedOnly bool, patterns ...string) (untagged []Untagged, result Result, rerr error) { + defer c.recover(&rerr) + var subscribedStr string + if subscribedOnly { + subscribedStr = "subscribed recursivematch" + } + for i, s := range patterns { + patterns[i] = astring(s) + } + return c.Transactf(`list (%s) "" (%s) return (subscribed children special-use status (messages uidnext uidvalidity unseen deleted size recent appendlimit))`, subscribedStr, strings.Join(patterns, " ")) +} + +// Namespace returns the hiearchy separator in an UntaggedNamespace response with personal/shared/other namespaces if present. +func (c *Conn) Namespace() (untagged []Untagged, result Result, rerr error) { + defer c.recover(&rerr) + return c.Transactf("namespace") +} + +// Status requests information about a mailbox, such as number of messages, size, etc. +func (c *Conn) Status(mailbox string) (untagged []Untagged, result Result, rerr error) { + defer c.recover(&rerr) + return c.Transactf("status %s", astring(mailbox)) +} + +// Append adds message to mailbox with flags and optional receive time. +func (c *Conn) Append(mailbox string, flags []string, received *time.Time, message []byte) (untagged []Untagged, result Result, rerr error) { + defer c.recover(&rerr) + var date string + if received != nil { + date = ` "` + received.Format("_2-Jan-2006 15:04:05 -0700") + `"` + } + return c.Transactf("append %s (%s)%s {%d+}\r\n%s", astring(mailbox), strings.Join(flags, " "), date, len(message), message) +} + +// note: No idle command. Idle is better implemented by writing the request and reading and handling the responses as they come in. + +// CloseMailbox closes the currently selected/active mailbox, permanently removing +// any messages marked with \Deleted. +func (c *Conn) CloseMailbox() (untagged []Untagged, result Result, rerr error) { + return c.Transactf("close") +} + +// Unselect closes the currently selected/active mailbox, but unlike CloseMailbox +// does not permanently remove any messages marked with \Deleted. +func (c *Conn) Unselect() (untagged []Untagged, result Result, rerr error) { + return c.Transactf("unselect") +} + +// Expunge removes messages marked as deleted for the selected mailbox. +func (c *Conn) Expunge() (untagged []Untagged, result Result, rerr error) { + defer c.recover(&rerr) + return c.Transactf("expunge") +} + +// UIDExpunge is like expunge, but only removes messages matched uidSet. +func (c *Conn) UIDExpunge(uidSet NumSet) (untagged []Untagged, result Result, rerr error) { + defer c.recover(&rerr) + return c.Transactf("uid expunge %s", uidSet.String()) +} + +// Note: No search, fetch command yet due to its large syntax. + +// StoreFlagsSet stores a new set of flags for messages from seqset with the STORE command. +// If silent, no untagged responses with the updated flags will be sent by the server. +func (c *Conn) StoreFlagsSet(seqset string, silent bool, flags ...string) (untagged []Untagged, result Result, rerr error) { + defer c.recover(&rerr) + item := "flags" + if silent { + item += ".silent" + } + return c.Transactf("store %s %s (%s)", seqset, item, strings.Join(flags, " ")) +} + +// StoreFlagsAdd is like StoreFlagsSet, but only adds flags, leaving current flags on the message intact. +func (c *Conn) StoreFlagsAdd(seqset string, silent bool, flags ...string) (untagged []Untagged, result Result, rerr error) { + defer c.recover(&rerr) + item := "+flags" + if silent { + item += ".silent" + } + return c.Transactf("store %s %s (%s)", seqset, item, strings.Join(flags, " ")) +} + +// StoreFlagsClear is like StoreFlagsSet, but only removes flags, leaving other flags on the message intact. +func (c *Conn) StoreFlagsClear(seqset string, silent bool, flags ...string) (untagged []Untagged, result Result, rerr error) { + defer c.recover(&rerr) + item := "-flags" + if silent { + item += ".silent" + } + return c.Transactf("store %s %s (%s)", seqset, item, strings.Join(flags, " ")) +} + +// Copy adds messages from the sequences in seqSet in the currently selected/active mailbox to dstMailbox. +func (c *Conn) Copy(seqSet NumSet, dstMailbox string) (untagged []Untagged, result Result, rerr error) { + defer c.recover(&rerr) + return c.Transactf("copy %s %s", seqSet.String(), astring(dstMailbox)) +} + +// UIDCopy is like copy, but operates on UIDs. +func (c *Conn) UIDCopy(uidSet NumSet, dstMailbox string) (untagged []Untagged, result Result, rerr error) { + defer c.recover(&rerr) + return c.Transactf("uid copy %s %s", uidSet.String(), astring(dstMailbox)) +} + +// Move moves messages from the sequences in seqSet in the currently selected/active mailbox to dstMailbox. +func (c *Conn) Move(seqSet NumSet, dstMailbox string) (untagged []Untagged, result Result, rerr error) { + defer c.recover(&rerr) + return c.Transactf("move %s %s", seqSet.String(), astring(dstMailbox)) +} + +// UIDMove is like move, but operates on UIDs. +func (c *Conn) UIDMove(uidSet NumSet, dstMailbox string) (untagged []Untagged, result Result, rerr error) { + defer c.recover(&rerr) + return c.Transactf("uid move %s %s", uidSet.String(), astring(dstMailbox)) +} diff --git a/imapclient/parse.go b/imapclient/parse.go new file mode 100644 index 0000000..fea71c8 --- /dev/null +++ b/imapclient/parse.go @@ -0,0 +1,1223 @@ +package imapclient + +import ( + "fmt" + "io" + "strconv" + "strings" +) + +func (c *Conn) recorded() string { + s := string(c.recordBuf) + c.recordBuf = nil + c.record = false + return s +} + +func (c *Conn) recordAdd(buf []byte) { + if c.record { + c.recordBuf = append(c.recordBuf, buf...) + } +} + +func (c *Conn) xtake(s string) { + buf := make([]byte, len(s)) + _, err := io.ReadFull(c.r, buf) + c.xcheckf(err, "taking %q", s) + if !strings.EqualFold(string(buf), s) { + c.xerrorf("got %q, expected %q", buf, s) + } + c.recordAdd(buf) +} + +func (c *Conn) readbyte() (byte, error) { + b, err := c.r.ReadByte() + if err == nil { + c.recordAdd([]byte{b}) + } + return b, err +} + +func (c *Conn) unreadbyte() { + if c.record { + c.recordBuf = c.recordBuf[:len(c.recordBuf)-1] + } + err := c.r.UnreadByte() + c.xcheckf(err, "unread byte") +} + +func (c *Conn) readrune() (rune, error) { + x, _, err := c.r.ReadRune() + if err == nil { + c.recordAdd([]byte(string(x))) + } + return x, err +} + +func (c *Conn) xspace() { + c.xtake(" ") +} + +func (c *Conn) xcrlf() { + c.xtake("\r\n") +} + +func (c *Conn) peek(exp byte) bool { + b, err := c.readbyte() + if err == nil { + c.unreadbyte() + } + return err == nil && strings.EqualFold(string(rune(b)), string(rune(exp))) +} + +func (c *Conn) take(exp byte) bool { + if c.peek(exp) { + c.readbyte() + return true + } + return false +} + +func (c *Conn) xstatus() Status { + w := c.xword() + W := strings.ToUpper(w) + switch W { + case "OK": + return OK + case "NO": + return NO + case "BAD": + return BAD + } + c.xerrorf("expected status, got %q", w) + panic("not reached") +} + +// Already consumed: tag SP status SP +func (c *Conn) xresult(status Status) Result { + respText := c.xrespText() + return Result{status, respText} +} + +func (c *Conn) xrespText() RespText { + var code string + var codeArg CodeArg + if c.take('[') { + code, codeArg = c.xrespCode() + c.xtake("]") + c.xspace() + } + more := "" + for !c.peek('\r') { + more += string(rune(c.xbyte())) + } + return RespText{code, codeArg, more} +} + +var knownCodes = stringMap( + // Without parameters. + "ALERT", "PARSE", "READ-ONLY", "READ-WRITE", "TRYCREATE", "UIDNOTSTICKY", "UNAVAILABLE", "AUTHENTICATIONFAILED", "AUTHORIZATIONFAILED", "EXPIRED", "PRIVACYREQUIRED", "CONTACTADMIN", "NOPERM", "INUSE", "EXPUNGEISSUED", "CORRUPTION", "SERVERBUG", "CLIENTBUG", "CANNOT", "LIMIT", "OVERQUOTA", "ALREADYEXISTS", "NONEXISTENT", "NOTSAVED", "HASCHILDREN", "CLOSED", "UNKNOWN-CTE", + // With parameters. + "BADCHARSET", "CAPABILITY", "PERMANENTFLAGS", "UIDNEXT", "UIDVALIDITY", "UNSEEN", "APPENDUID", "COPYUID", +) + +func stringMap(l ...string) map[string]struct{} { + r := map[string]struct{}{} + for _, s := range l { + r[s] = struct{}{} + } + return r +} + +// ../rfc/9051:6895 +func (c *Conn) xrespCode() (string, CodeArg) { + w := "" + for !c.peek(' ') && !c.peek(']') { + w += string(rune(c.xbyte())) + } + W := strings.ToUpper(w) + + if _, ok := knownCodes[W]; !ok { + var args []string + for c.take(' ') { + arg := "" + for !c.peek(' ') && !c.peek(']') { + arg += string(rune(c.xbyte())) + } + args = append(args, arg) + } + return W, CodeOther{W, args} + } + + var codeArg CodeArg + switch W { + case "BADCHARSET": + var l []string // Must be nil initially. + if c.take(' ') { + c.xtake("(") + l = []string{c.xcharset()} + for c.take(' ') { + l = append(l, c.xcharset()) + } + c.xtake(")") + } + codeArg = CodeList{W, l} + case "CAPABILITY": + c.xtake(" ") + caps := []string{c.xatom()} + for c.take(' ') { + caps = append(caps, c.xatom()) + } + c.CapAvailable = map[Capability]struct{}{} + for _, cap := range caps { + c.CapAvailable[Capability(cap)] = struct{}{} + } + codeArg = CodeWords{W, caps} + + case "PERMANENTFLAGS": + l := []string{} // Must be non-nil. + if c.take(' ') { + c.xtake("(") + l = []string{c.xflag()} + for c.take(' ') { + l = append(l, c.xflag()) + } + c.xtake(")") + } + codeArg = CodeList{W, l} + case "UIDNEXT", "UIDVALIDITY", "UNSEEN": + c.xspace() + codeArg = CodeUint{W, c.xnzuint32()} + case "APPENDUID": + c.xspace() + destUIDValidity := c.xnzuint32() + c.xspace() + uid := c.xnzuint32() + codeArg = CodeAppendUID{destUIDValidity, uid} + case "COPYUID": + c.xspace() + destUIDValidity := c.xnzuint32() + c.xspace() + from := c.xuidset() + c.xspace() + to := c.xuidset() + codeArg = CodeCopyUID{destUIDValidity, from, to} + } + return W, codeArg +} + +func (c *Conn) xbyte() byte { + b, err := c.readbyte() + c.xcheckf(err, "read byte") + return b +} + +// take until b is seen. don't take b itself. +func (c *Conn) xtakeuntil(b byte) string { + var s string + for { + x, err := c.readbyte() + c.xcheckf(err, "read byte") + if x == b { + c.unreadbyte() + return s + } + s += string(rune(x)) + } +} + +func (c *Conn) xdigits() string { + var s string + for { + b, err := c.readbyte() + if err == nil && (b >= '0' && b <= '9') { + s += string(rune(b)) + continue + } + c.unreadbyte() + return s + } +} + +func (c *Conn) xint32() int32 { + s := c.xdigits() + num, err := strconv.ParseInt(s, 10, 32) + c.xcheckf(err, "parsing int32") + return int32(num) +} + +func (c *Conn) xint64() int64 { + s := c.xdigits() + num, err := strconv.ParseInt(s, 10, 64) + c.xcheckf(err, "parsing int64") + return num +} + +func (c *Conn) xuint32() uint32 { + s := c.xdigits() + num, err := strconv.ParseUint(s, 10, 32) + c.xcheckf(err, "parsing uint32") + return uint32(num) +} + +func (c *Conn) xnzuint32() uint32 { + v := c.xuint32() + if v == 0 { + c.xerrorf("got 0, expected nonzero uint") + } + return v +} + +// todo: replace with proper parsing. +func (c *Conn) xnonspace() string { + var s string + for !c.peek(' ') && !c.peek('\r') && !c.peek('\n') { + s += string(rune(c.xbyte())) + } + if s == "" { + c.xerrorf("expected non-space") + } + return s +} + +// todo: replace with proper parsing +func (c *Conn) xword() string { + return c.xatom() +} + +// "*" SP is already consumed +// ../rfc/9051:6868 +func (c *Conn) xuntagged() Untagged { + w := c.xnonspace() + W := strings.ToUpper(w) + switch W { + case "PREAUTH": + c.xspace() + r := UntaggedPreauth(c.xrespText()) + c.xcrlf() + return r + + case "BYE": + c.xspace() + r := UntaggedBye(c.xrespText()) + c.xcrlf() + return r + + case "OK", "NO", "BAD": + c.xspace() + r := UntaggedResult(c.xresult(Status(W))) + c.xcrlf() + return r + + case "CAPABILITY": + // ../rfc/9051:6427 + var caps []string + for c.take(' ') { + caps = append(caps, c.xnonspace()) + } + c.CapAvailable = map[Capability]struct{}{} + for _, cap := range caps { + c.CapAvailable[Capability(cap)] = struct{}{} + } + r := UntaggedCapability(caps) + c.xcrlf() + return r + + case "ENABLED": + // ../rfc/9051:6520 + var caps []string + for c.take(' ') { + caps = append(caps, c.xnonspace()) + } + for _, cap := range caps { + c.CapEnabled[Capability(cap)] = struct{}{} + } + r := UntaggedEnabled(caps) + c.xcrlf() + return r + + case "FLAGS": + c.xspace() + r := UntaggedFlags(c.xflagList()) + c.xcrlf() + return r + + case "LIST": + c.xspace() + r := c.xmailboxList() + c.xcrlf() + return r + + case "STATUS": + // ../rfc/9051:6681 + c.xspace() + mailbox := c.xastring() + c.xspace() + c.xtake("(") + attrs := map[string]int64{} + for !c.take(')') { + if len(attrs) > 0 { + c.xspace() + } + s := c.xword() + c.xspace() + S := strings.ToUpper(s) + var num int64 + // ../rfc/9051:7059 + switch S { + case "MESSAGES": + num = int64(c.xuint32()) + case "UIDNEXT": + num = int64(c.xnzuint32()) + case "UIDVALIDITY": + num = int64(c.xnzuint32()) + case "UNSEEN": + num = int64(c.xuint32()) + case "DELETED": + num = int64(c.xuint32()) + case "SIZE": + num = c.xint64() + case "RECENT": + c.xneedDisabled("RECENT status flag", CapIMAP4rev2) + num = int64(c.xuint32()) + case "APPENDLIMIT": + if c.peek('n') || c.peek('N') { + c.xtake("nil") + } else { + num = c.xint64() + } + default: + c.xerrorf("status: unknown attribute %q", s) + } + if _, ok := attrs[S]; ok { + c.xerrorf("status: duplicate attribute %q", s) + } + attrs[S] = num + } + r := UntaggedStatus{mailbox, attrs} + c.xcrlf() + return r + + case "NAMESPACE": + // ../rfc/9051:6778 + c.xspace() + personal := c.xnamespace() + c.xspace() + other := c.xnamespace() + c.xspace() + shared := c.xnamespace() + r := UntaggedNamespace{personal, other, shared} + c.xcrlf() + return r + + case "SEARCH": + // ../rfc/9051:6809 + c.xneedDisabled("untagged SEARCH response", CapIMAP4rev2) + var nums []uint32 + for c.take(' ') { + nums = append(nums, c.xnzuint32()) + } + r := UntaggedSearch(nums) + c.xcrlf() + return r + + case "ESEARCH": + r := c.xesearchResponse() + c.xcrlf() + return r + + case "LSUB": + c.xneedDisabled("untagged LSUB response", CapIMAP4rev2) + r := c.xlsub() + c.xcrlf() + return r + + case "ID": + // ../rfc/2971:243 + c.xspace() + var params map[string]string + if c.take('(') { + params = map[string]string{} + for !c.take(')') { + if len(params) > 0 { + c.xspace() + } + k := c.xstring() + c.xspace() + v := c.xnilString() + if _, ok := params[k]; ok { + c.xerrorf("duplicate key %q", k) + } + params[k] = v + } + } else { + c.xtake("NIL") + } + c.xcrlf() + return UntaggedID(params) + + default: + v, err := strconv.ParseUint(w, 10, 32) + if err == nil { + num := uint32(v) + c.xspace() + w = c.xword() + W = strings.ToUpper(w) + switch W { + case "FETCH": + if num == 0 { + c.xerrorf("invalid zero number for untagged fetch response") + } + c.xspace() + r := c.xfetch(num) + c.xcrlf() + return r + + case "EXPUNGE": + if num == 0 { + c.xerrorf("invalid zero number for untagged expunge response") + } + c.xcrlf() + return UntaggedExpunge(num) + + case "EXISTS": + c.xcrlf() + return UntaggedExists(num) + + case "RECENT": + c.xneedDisabled("should not send RECENT in IMAP4rev2", CapIMAP4rev2) + c.xcrlf() + return UntaggedRecent(num) + + default: + c.xerrorf("unknown untagged numbered response %q", w) + panic("not reached") + } + } + c.xerrorf("unknown untagged response %q", w) + } + panic("not reached") +} + +// ../rfc/3501:4864 ../rfc/9051:6742 +// Already parsed: "*" SP nznumber SP "FETCH" SP +func (c *Conn) xfetch(num uint32) UntaggedFetch { + c.xtake("(") + attrs := []FetchAttr{c.xmsgatt1()} + for c.take(' ') { + attrs = append(attrs, c.xmsgatt1()) + } + c.xtake(")") + return UntaggedFetch{num, attrs} +} + +// ../rfc/9051:6746 +func (c *Conn) xmsgatt1() FetchAttr { + f := "" + for { + b := c.xbyte() + if b >= 'a' && b <= 'z' || b >= 'A' && b <= 'Z' || b >= '0' && b <= '9' || b == '.' { + f += string(rune(b)) + continue + } + c.unreadbyte() + break + } + + F := strings.ToUpper(f) + switch F { + case "FLAGS": + c.xspace() + c.xtake("(") + var flags []string + if !c.take(')') { + flags = []string{c.xflag()} + for c.take(' ') { + flags = append(flags, c.xflag()) + } + c.xtake(")") + } + return FetchFlags(flags) + + case "ENVELOPE": + c.xspace() + return FetchEnvelope(c.xenvelope()) + + case "INTERNALDATE": + c.xspace() + return FetchInternalDate(c.xquoted()) // todo: parsed time + + case "RFC822.SIZE": + c.xspace() + return FetchRFC822Size(c.xint64()) + + case "RFC822": + c.xspace() + s := c.xnilString() + return FetchRFC822(s) + + case "RFC822.HEADER": + c.xspace() + s := c.xnilString() + return FetchRFC822Header(s) + + case "RFC822.TEXT": + c.xspace() + s := c.xnilString() + return FetchRFC822Text(s) + + case "BODY": + if c.take(' ') { + return FetchBodystructure{F, c.xbodystructure()} + } + c.record = true + section := c.xsection() + var offset int32 + if c.take('<') { + offset = c.xint32() + c.xtake(">") + } + F += c.recorded() + c.xspace() + body := c.xnilString() + return FetchBody{F, section, offset, body} + + case "BODYSTRUCTURE": + c.xspace() + return FetchBodystructure{F, c.xbodystructure()} + + case "BINARY": + c.record = true + nums := c.xsectionBinary() + F += c.recorded() + c.xspace() + buf := c.xnilStringLiteral8() + return FetchBinary{F, nums, string(buf)} + + case "BINARY.SIZE": + c.record = true + nums := c.xsectionBinary() + F += c.recorded() + c.xspace() + size := c.xint64() + return FetchBinarySize{F, nums, size} + + case "UID": + c.xspace() + return FetchUID(c.xuint32()) + } + c.xerrorf("unknown fetch attribute %q", f) + panic("not reached") +} + +func (c *Conn) xnilString() string { + if c.peek('"') { + return c.xquoted() + } else if c.peek('{') { + return string(c.xliteral()) + } else { + c.xtake("NIL") + return "" + } +} + +func (c *Conn) xstring() string { + if c.peek('"') { + return c.xquoted() + } + return string(c.xliteral()) +} + +func (c *Conn) xastring() string { + if c.peek('"') { + return c.xquoted() + } else if c.peek('{') { + return string(c.xliteral()) + } + return c.xatom() +} + +func (c *Conn) xatom() string { + var s string + for { + b, err := c.readbyte() + c.xcheckf(err, "read byte for flag") + if b <= ' ' || strings.IndexByte("(){%*\"\\]", b) >= 0 { + c.r.UnreadByte() + if s == "" { + c.xerrorf("expected atom") + } + return s + } + s += string(rune(b)) + } +} + +// ../rfc/9051:6856 ../rfc/6855:153 +func (c *Conn) xquoted() string { + c.xtake(`"`) + s := "" + for !c.take('"') { + r, err := c.readrune() + c.xcheckf(err, "reading rune in quoted string") + if r == '\\' { + r, err = c.readrune() + c.xcheckf(err, "reading escaped char in quoted string") + if r != '\\' && r != '"' { + c.xerrorf("quoted char not backslash or dquote: %c", r) + } + } + // todo: probably refuse some more chars. like \0 and all ctl and backspace. + s += string(r) + } + return s +} + +func (c *Conn) xliteral() []byte { + c.xtake("{") + size := c.xint64() + sync := c.take('+') + c.xtake("}") + c.xcrlf() + if size > 1<<20 { + c.xerrorf("refusing to read more than 1MB: %d", size) + } + if sync { + _, err := fmt.Fprintf(c.conn, "+ ok\r\n") + c.xcheckf(err, "write continuation") + } + buf := make([]byte, int(size)) + _, err := io.ReadFull(c.r, buf) + c.xcheckf(err, "reading data for literal") + return buf +} + +// ../rfc/9051:6565 +// todo: stricter +func (c *Conn) xflag() string { + s := "" + if c.take('\\') { + s = "\\" + } else if c.take('$') { + s = "$" + } + s += c.xatom() + return s +} + +func (c *Conn) xsection() string { + c.xtake("[") + s := c.xtakeuntil(']') + c.xtake("]") + return s +} + +func (c *Conn) xsectionBinary() []uint32 { + c.xtake("[") + var nums []uint32 + for !c.take(']') { + if len(nums) > 0 { + c.xtake(".") + } + nums = append(nums, c.xnzuint32()) + } + return nums +} + +func (c *Conn) xnilStringLiteral8() []byte { + // todo: should make difference for literal8 and literal from string, which bytes are allowed + if c.take('~') || c.peek('{') { + return c.xliteral() + } + return []byte(c.xnilString()) +} + +// ../rfc/9051:6355 +func (c *Conn) xbodystructure() any { + c.xtake("(") + if c.peek('(') { + // ../rfc/9051:6411 + parts := []any{c.xbodystructure()} + for c.peek('(') { + parts = append(parts, c.xbodystructure()) + } + c.xspace() + mediaSubtype := c.xstring() + // todo: parse optional body-ext-mpart + c.xtake(")") + return BodyTypeMpart{parts, mediaSubtype} + } + + mediaType := c.xstring() + c.xspace() + mediaSubtype := c.xstring() + c.xspace() + bodyFields := c.xbodyFields() + if c.take(' ') { + if c.peek('(') { + // ../rfc/9051:6415 + envelope := c.xenvelope() + c.xspace() + bodyStructure := c.xbodystructure() + c.xspace() + lines := c.xint64() + c.xtake(")") + return BodyTypeMsg{mediaType, mediaSubtype, bodyFields, envelope, bodyStructure, lines} + } + // ../rfc/9051:6418 + lines := c.xint64() + c.xtake(")") + return BodyTypeText{mediaType, mediaSubtype, bodyFields, lines} + } + // ../rfc/9051:6407 + c.xtake(")") + return BodyTypeBasic{mediaType, mediaSubtype, bodyFields} + + // todo: verify the media(sub)type is valid for returned data. +} + +// ../rfc/9051:6376 +func (c *Conn) xbodyFields() BodyFields { + params := c.xbodyFldParam() + c.xspace() + contentID := c.xnilString() + c.xspace() + contentDescr := c.xnilString() + c.xspace() + cte := c.xnilString() + c.xspace() + octets := c.xint32() + return BodyFields{params, contentID, contentDescr, cte, octets} +} + +// ../rfc/9051:6401 +func (c *Conn) xbodyFldParam() [][2]string { + if c.take('(') { + k := c.xstring() + c.xspace() + v := c.xstring() + l := [][2]string{{k, v}} + for c.take(' ') { + k = c.xstring() + c.xspace() + v = c.xstring() + l = append(l, [2]string{k, v}) + } + c.xtake(")") + return l + } + c.xtake("NIL") + return nil +} + +// ../rfc/9051:6522 +func (c *Conn) xenvelope() Envelope { + c.xtake("(") + date := c.xnilString() + c.xspace() + subject := c.xnilString() + c.xspace() + from := c.xaddresses() + c.xspace() + sender := c.xaddresses() + c.xspace() + replyTo := c.xaddresses() + c.xspace() + to := c.xaddresses() + c.xspace() + cc := c.xaddresses() + c.xspace() + bcc := c.xaddresses() + c.xspace() + inReplyTo := c.xnilString() + c.xspace() + messageID := c.xnilString() + c.xtake(")") + return Envelope{date, subject, from, sender, replyTo, to, cc, bcc, inReplyTo, messageID} +} + +// ../rfc/9051:6526 +func (c *Conn) xaddresses() []Address { + if !c.take('(') { + c.xtake("NIL") + return nil + } + l := []Address{c.xaddress()} + for !c.take(')') { + l = append(l, c.xaddress()) + } + return l +} + +// ../rfc/9051:6303 +func (c *Conn) xaddress() Address { + c.xtake("(") + name := c.xnilString() + c.xspace() + adl := c.xnilString() + c.xspace() + mailbox := c.xnilString() + c.xspace() + host := c.xnilString() + c.xtake(")") + return Address{name, adl, mailbox, host} +} + +// ../rfc/9051:6584 +func (c *Conn) xflagList() []string { + c.xtake("(") + var l []string + if !c.take(')') { + l = []string{c.xflag()} + for c.take(' ') { + l = append(l, c.xflag()) + } + c.xtake(")") + } + return l +} + +// ../rfc/9051:6690 +func (c *Conn) xmailboxList() UntaggedList { + c.xtake("(") + var flags []string + if !c.peek(')') { + flags = append(flags, c.xflag()) + for c.take(' ') { + flags = append(flags, c.xflag()) + } + } + c.xtake(")") + c.xspace() + var quoted string + var b byte + if c.peek('"') { + quoted = c.xquoted() + if len(quoted) != 1 { + c.xerrorf("mailbox-list has multichar quoted part: %q", quoted) + } + b = byte(quoted[0]) + } else if !c.peek(' ') { + c.xtake("NIL") + } + c.xspace() + mailbox := c.xastring() + ul := UntaggedList{flags, b, mailbox, nil, ""} + if c.take(' ') { + c.xtake("(") + if !c.peek(')') { + c.xmboxListExtendedItem(&ul) + for c.take(' ') { + c.xmboxListExtendedItem(&ul) + } + } + c.xtake(")") + } + return ul +} + +// ../rfc/9051:6699 +func (c *Conn) xmboxListExtendedItem(ul *UntaggedList) { + tag := c.xastring() + c.xspace() + if strings.ToUpper(tag) == "OLDNAME" { + // ../rfc/9051:6811 + c.xtake("(") + name := c.xastring() + c.xtake(")") + ul.OldName = name + return + } + val := c.xtaggedExtVal() + ul.Extended = append(ul.Extended, MboxListExtendedItem{tag, val}) +} + +// ../rfc/9051:7111 +func (c *Conn) xtaggedExtVal() TaggedExtVal { + if c.take('(') { + var r TaggedExtVal + if !c.take(')') { + comp := c.xtaggedExtComp() + r.Comp = &comp + c.xtake(")") + } + return r + } + // We cannot just parse sequence-set, because we also have to accept number/number64. So first look for a number. If it is not, we continue parsing the rest of the sequence set. + b, err := c.readbyte() + c.xcheckf(err, "read byte for tagged-ext-val") + if b < '0' || b > '9' { + c.unreadbyte() + ss := c.xsequenceSet() + return TaggedExtVal{SeqSet: &ss} + } + s := c.xdigits() + num, err := strconv.ParseInt(s, 10, 64) + c.xcheckf(err, "parsing int") + if !c.peek(':') && !c.peek(',') { + // not a larger sequence-set + return TaggedExtVal{Number: &num} + } + var sr NumRange + sr.First = uint32(num) + if c.take(':') { + var num uint32 + if !c.take('*') { + num = c.xnzuint32() + } + sr.Last = &num + } + ss := c.xsequenceSet() + ss.Ranges = append([]NumRange{sr}, ss.Ranges...) + return TaggedExtVal{SeqSet: &ss} +} + +// ../rfc/9051:7034 +func (c *Conn) xsequenceSet() NumSet { + if c.take('$') { + return NumSet{SearchResult: true} + } + var ss NumSet + for { + var sr NumRange + if !c.take('*') { + sr.First = c.xnzuint32() + } + if c.take(':') { + var num uint32 + if !c.take('*') { + num = c.xnzuint32() + } + sr.Last = &num + } + ss.Ranges = append(ss.Ranges, sr) + if !c.take(',') { + break + } + } + return ss +} + +// ../rfc/9051:7097 +func (c *Conn) xtaggedExtComp() TaggedExtComp { + if c.take('(') { + r := c.xtaggedExtComp() + c.xtake(")") + return TaggedExtComp{Comps: []TaggedExtComp{r}} + } + s := c.xastring() + if !c.peek(' ') { + return TaggedExtComp{String: s} + } + l := []TaggedExtComp{{String: s}} + for c.take(' ') { + l = append(l, c.xtaggedExtComp()) + } + return TaggedExtComp{Comps: l} +} + +// ../rfc/9051:6765 +func (c *Conn) xnamespace() []NamespaceDescr { + if !c.take('(') { + c.xtake("NIL") + return nil + } + + l := []NamespaceDescr{c.xnamespaceDescr()} + for !c.take(')') { + l = append(l, c.xnamespaceDescr()) + } + return l +} + +// ../rfc/9051:6769 +func (c *Conn) xnamespaceDescr() NamespaceDescr { + c.xtake("(") + prefix := c.xstring() + c.xspace() + var b byte + if c.peek('"') { + s := c.xquoted() + if len(s) != 1 { + c.xerrorf("namespace-descr: expected single char, got %q", s) + } + b = byte(s[0]) + } else { + c.xtake("NIL") + } + var exts []NamespaceExtension + for !c.take(')') { + c.xspace() + key := c.xstring() + c.xspace() + c.xtake("(") + values := []string{c.xstring()} + for c.take(' ') { + values = append(values, c.xstring()) + } + c.xtake(")") + exts = append(exts, NamespaceExtension{key, values}) + } + return NamespaceDescr{prefix, b, exts} +} + +// require one of caps to be enabled. +func (c *Conn) xneedEnabled(msg string, caps ...Capability) { + for _, cap := range caps { + if _, ok := c.CapEnabled[cap]; ok { + return + } + } + c.xerrorf("%s: need one of following enabled capabilities: %v", msg, caps) +} + +// require all of caps to be disabled. +func (c *Conn) xneedDisabled(msg string, caps ...Capability) { + for _, cap := range caps { + if _, ok := c.CapEnabled[cap]; ok { + c.xerrorf("%s: invalid because of enabled capability %q", msg, cap) + } + } +} + +// ../rfc/9051:6546 +// Already consumed: "ESEARCH" +func (c *Conn) xesearchResponse() (r UntaggedEsearch) { + + if !c.take(' ') { + return + } + if c.take('(') { + // ../rfc/9051:6921 + c.xtake("TAG") + c.xspace() + r.Correlator = c.xastring() + c.xtake(")") + } + if !c.take(' ') { + return + } + w := c.xnonspace() + W := strings.ToUpper(w) + if W == "UID" { + r.UID = true + if !c.take(' ') { + return + } + w = c.xnonspace() + W = strings.ToUpper(w) + } + for { + // ../rfc/9051:6957 + switch W { + case "MIN": + if r.Min != 0 { + c.xerrorf("duplicate MIN in ESEARCH") + } + c.xspace() + num := c.xnzuint32() + r.Min = num + + case "MAX": + if r.Max != 0 { + c.xerrorf("duplicate MAX in ESEARCH") + } + c.xspace() + num := c.xnzuint32() + r.Max = num + + case "ALL": + if !r.All.IsZero() { + c.xerrorf("duplicate ALL in ESEARCH") + } + c.xspace() + ss := c.xsequenceSet() + if ss.SearchResult { + c.xerrorf("$ for last not valid in ESEARCH") + } + r.All = ss + + case "COUNT": + if r.Count != nil { + c.xerrorf("duplicate COUNT in ESEARCH") + } + c.xspace() + num := c.xuint32() + r.Count = &num + + default: + // Validate ../rfc/9051:7090 + for i, b := range []byte(w) { + if !(b >= 'A' && b <= 'Z' || strings.IndexByte("-_.", b) >= 0 || i > 0 && strings.IndexByte("0123456789:", b) >= 0) { + c.xerrorf("invalid tag %q", w) + } + } + c.xspace() + ext := EsearchDataExt{w, c.xtaggedExtVal()} + r.Exts = append(r.Exts, ext) + } + + if !c.take(' ') { + break + } + w = c.xnonspace() // todo: this is too loose + W = strings.ToUpper(w) + } + return +} + +// ../rfc/9051:6441 +func (c *Conn) xcharset() string { + if c.peek('"') { + return c.xquoted() + } + return c.xatom() +} + +// ../rfc/9051:7133 +func (c *Conn) xuidset() []NumRange { + ranges := []NumRange{c.xuidrange()} + for c.take(',') { + ranges = append(ranges, c.xuidrange()) + } + return ranges +} + +func (c *Conn) xuidrange() NumRange { + uid := c.xnzuint32() + var end *uint32 + if c.take(':') { + x := c.xnzuint32() + end = &x + } + return NumRange{uid, end} +} + +// ../rfc/3501:4833 +func (c *Conn) xlsub() UntaggedLsub { + c.xspace() + c.xtake("(") + r := UntaggedLsub{} + for !c.take(')') { + if len(r.Flags) > 0 { + c.xspace() + } + r.Flags = append(r.Flags, c.xflag()) + } + c.xspace() + if c.peek('"') { + s := c.xquoted() + if !c.peek(' ') { + r.Mailbox = s + return r + } + if len(s) != 1 { + // todo: check valid char + c.xerrorf("invalid separator %q", s) + } + r.Separator = byte(s[0]) + } + c.xspace() + r.Mailbox = c.xastring() + return r +} diff --git a/imapclient/protocol.go b/imapclient/protocol.go new file mode 100644 index 0000000..ab3b4f1 --- /dev/null +++ b/imapclient/protocol.go @@ -0,0 +1,452 @@ +package imapclient + +import ( + "fmt" + "strings" +) + +// Capability is a known string for with the ENABLED and CAPABILITY command. +type Capability string + +const ( + CapIMAP4rev1 Capability = "IMAP4rev1" + CapIMAP4rev2 Capability = "IMAP4rev2" + CapLoginDisabled Capability = "LOGINDISABLED" + CapStarttls Capability = "STARTTLS" + CapAuthPlain Capability = "AUTH=PLAIN" + CapLiteralPlus Capability = "LITERAL+" + CapLiteralMinus Capability = "LITERAL-" + CapIdle Capability = "IDLE" + CapNamespace Capability = "NAMESPACE" + CapBinary Capability = "BINARY" + CapUnselect Capability = "UNSELECT" + CapUidplus Capability = "UIDPLUS" + CapEsearch Capability = "ESEARCH" + CapEnable Capability = "ENABLE" + CapSave Capability = "SAVE" + CapListExtended Capability = "LIST-EXTENDED" + CapSpecialUse Capability = "SPECIAL-USE" + CapMove Capability = "MOVE" + CapUTF8Only Capability = "UTF8=ONLY" + CapUTF8Accept Capability = "UTF8=ACCEPT" + CapID Capability = "ID" // ../rfc/2971:80 +) + +// Status is the tagged final result of a command. +type Status string + +const ( + BAD Status = "BAD" // Syntax error. + NO Status = "NO" // Command failed. + OK Status = "OK" // Command succeeded. +) + +// Result is the final response for a command, indicating success or failure. +type Result struct { + Status Status + RespText +} + +// CodeArg represents a response code with arguments, i.e. the data between [] in the response line. +type CodeArg interface { + CodeString() string +} + +// CodeOther is a valid but unrecognized response code. +type CodeOther struct { + Code string + Args []string +} + +func (c CodeOther) CodeString() string { + return c.Code + " " + strings.Join(c.Args, " ") +} + +// CodeWords is a code with space-separated string parameters. E.g. CAPABILITY. +type CodeWords struct { + Code string + Args []string +} + +func (c CodeWords) CodeString() string { + s := c.Code + for _, w := range c.Args { + s += " " + w + } + return s +} + +// CodeList is a code with a list with space-separated strings as parameters. E.g. BADCHARSET, PERMANENTFLAGS. +type CodeList struct { + Code string + Args []string // If nil, no list was present. List can also be empty. +} + +func (c CodeList) CodeString() string { + s := c.Code + if c.Args == nil { + return s + } + return s + "(" + strings.Join(c.Args, " ") + ")" +} + +// CodeUint is a code with a uint32 parameter, e.g. UIDNEXT and UIDVALIDITY. +type CodeUint struct { + Code string + Num uint32 +} + +func (c CodeUint) CodeString() string { + return fmt.Sprintf("%s %d", c.Code, c.Num) +} + +// "APPENDUID" response code. +type CodeAppendUID struct { + UIDValidity uint32 + UID uint32 +} + +func (c CodeAppendUID) CodeString() string { + return fmt.Sprintf("APPENDUID %d %d", c.UIDValidity, c.UID) +} + +// "COPYUID" response code. +type CodeCopyUID struct { + DestUIDValidity uint32 + From []NumRange + To []NumRange +} + +func (c CodeCopyUID) CodeString() string { + str := func(l []NumRange) string { + s := "" + for i, e := range l { + if i > 0 { + s += "," + } + s += fmt.Sprintf("%d", e.First) + if e.Last != nil { + s += fmt.Sprintf(":%d", *e.Last) + } + } + return s + } + return fmt.Sprintf("COPYUID %d %s %s", c.DestUIDValidity, str(c.From), str(c.To)) +} + +// RespText represents a response line minus the leading tag. +type RespText struct { + Code string // The first word between [] after the status. + CodeArg CodeArg // Set if code has a parameter. + More string // Any remaining text. +} + +// atom or string. +func astring(s string) string { + if len(s) == 0 { + return stringx(s) + } + for _, c := range s { + if c <= ' ' || c >= 0x7f || c == '(' || c == ')' || c == '{' || c == '%' || c == '*' || c == '"' || c == '\\' { + stringx(s) + } + } + return s +} + +// imap "string", i.e. double-quoted string or syncliteral. +func stringx(s string) string { + r := `"` + for _, c := range s { + if c == '\x00' || c == '\r' || c == '\n' { + return syncliteral(s) + } + if c == '\\' || c == '"' { + r += `\` + } + r += string(c) + } + r += `"` + return r +} + +// sync literal, i.e. {}\r\n. +func syncliteral(s string) string { + return fmt.Sprintf("{%d}\r\n", len(s)) + s +} + +// Untagged is a parsed untagged response. See types starting with Untagged. +// todo: make an interface that the untagged responses implement? +type Untagged any + +type UntaggedBye RespText +type UntaggedPreauth RespText +type UntaggedExpunge uint32 +type UntaggedExists uint32 +type UntaggedRecent uint32 +type UntaggedCapability []string +type UntaggedEnabled []string +type UntaggedResult Result +type UntaggedFlags []string +type UntaggedList struct { + // ../rfc/9051:6690 + Flags []string + Separator byte // 0 for NIL + Mailbox string + Extended []MboxListExtendedItem + OldName string // If present, taken out of Extended. +} +type UntaggedFetch struct { + Seq uint32 + Attrs []FetchAttr +} +type UntaggedSearch []uint32 +type UntaggedStatus struct { + Mailbox string + Attrs map[string]int64 // Upper case status attributes. ../rfc/9051:7059 +} +type UntaggedNamespace struct { + Personal, Other, Shared []NamespaceDescr +} +type UntaggedLsub struct { + // ../rfc/3501:4833 + Flags []string + Separator byte + Mailbox string +} + +// Fields are optional and zero if absent. +type UntaggedEsearch struct { + // ../rfc/9051:6546 + Correlator string + UID bool + Min uint32 + Max uint32 + All NumSet + Count *uint32 + Exts []EsearchDataExt +} + +// ../rfc/2971:184 + +type UntaggedID map[string]string + +// Extended data in an ESEARCH response. +type EsearchDataExt struct { + Tag string + Value TaggedExtVal +} + +type NamespaceDescr struct { + // ../rfc/9051:6769 + Prefix string + Separator byte // If 0 then separator was absent. + Exts []NamespaceExtension +} + +type NamespaceExtension struct { + // ../rfc/9051:6773 + Key string + Values []string +} + +// FetchAttr represents a FETCH response attribute. +type FetchAttr interface { + Attr() string // Name of attribute. +} + +type NumSet struct { + SearchResult bool // True if "$", in which case Ranges is irrelevant. + Ranges []NumRange +} + +func (ns NumSet) IsZero() bool { + return !ns.SearchResult && ns.Ranges == nil +} + +func (ns NumSet) String() string { + if ns.SearchResult { + return "$" + } + var r string + for i, x := range ns.Ranges { + if i > 0 { + r += "," + } + r += x.String() + } + return r +} + +// NumRange is a single number or range. +type NumRange struct { + First uint32 // 0 for "*". + Last *uint32 // Nil if absent, 0 for "*". +} + +func (nr NumRange) String() string { + var r string + if nr.First == 0 { + r += "*" + } else { + r += fmt.Sprintf("%d", nr.First) + } + if nr.Last == nil { + return r + } + r += ":" + v := *nr.Last + if v == 0 { + r += "*" + } else { + r += fmt.Sprintf("%d", v) + } + return r +} + +type TaggedExtComp struct { + String string + Comps []TaggedExtComp // Used for both space-separated and (). +} + +type TaggedExtVal struct { + // ../rfc/9051:7111 + Number *int64 + SeqSet *NumSet + Comp *TaggedExtComp // If SimpleNumber and SimpleSeqSet is nil, this is a Comp. But Comp is optional and can also be nil. Not great. +} + +type MboxListExtendedItem struct { + // ../rfc/9051:6699 + Tag string + Val TaggedExtVal +} + +// "FLAGS" fetch response. +type FetchFlags []string + +func (f FetchFlags) Attr() string { return "FLAGS" } + +// "ENVELOPE" fetch response. +type FetchEnvelope Envelope + +func (f FetchEnvelope) Attr() string { return "ENVELOPE" } + +// Envelope holds the basic email message fields. +type Envelope struct { + Date string + Subject string + From, Sender, ReplyTo, To, CC, BCC []Address + InReplyTo, MessageID string +} + +// Address is an address field in an email message, e.g. To. +type Address struct { + Name, Adl, Mailbox, Host string +} + +// "INTERNALDATE" fetch response. +type FetchInternalDate string // todo: parsed time +func (f FetchInternalDate) Attr() string { return "INTERNALDATE" } + +// "RFC822.SIZE" fetch response. +type FetchRFC822Size int64 + +func (f FetchRFC822Size) Attr() string { return "RFC822.SIZE" } + +// "RFC822" fetch response. +type FetchRFC822 string + +func (f FetchRFC822) Attr() string { return "RFC822" } + +// "RFC822.HEADER" fetch response. +type FetchRFC822Header string + +func (f FetchRFC822Header) Attr() string { return "RFC822.HEADER" } + +// "RFC82.TEXT" fetch response. +type FetchRFC822Text string + +func (f FetchRFC822Text) Attr() string { return "RFC822.TEXT" } + +// "BODYSTRUCTURE" fetch response. +type FetchBodystructure struct { + // ../rfc/9051:6355 + RespAttr string + Body any // BodyType* +} + +func (f FetchBodystructure) Attr() string { return f.RespAttr } + +// "BODY" fetch response. +type FetchBody struct { + // ../rfc/9051:6756 ../rfc/9051:6985 + RespAttr string + Section string // todo: parse more ../rfc/9051:6985 + Offset int32 + Body string +} + +func (f FetchBody) Attr() string { return f.RespAttr } + +// BodyFields is part of a FETCH BODY[] response. +type BodyFields struct { + Params [][2]string + ContentID, ContentDescr, CTE string + Octets int32 +} + +// BodyTypeMpart represents the body structure a multipart message, with subparts and the multipart media subtype. Used in a FETCH response. +type BodyTypeMpart struct { + // ../rfc/9051:6411 + Bodies []any // BodyTypeBasic, BodyTypeMsg, BodyTypeText + MediaSubtype string +} + +// BodyTypeBasic represents basic information about a part, used in a FETCH response. +type BodyTypeBasic struct { + // ../rfc/9051:6407 + MediaType, MediaSubtype string + BodyFields BodyFields +} + +// BodyTypeMsg represents an email message as a body structure, used in a FETCH response. +type BodyTypeMsg struct { + // ../rfc/9051:6415 + MediaType, MediaSubtype string + BodyFields BodyFields + Envelope Envelope + Bodystructure any // One of the BodyType* + Lines int64 +} + +// BodyTypeText represents a text part as a body structure, used in a FETCH response. +type BodyTypeText struct { + // ../rfc/9051:6418 + MediaType, MediaSubtype string + BodyFields BodyFields + Lines int64 +} + +// "BINARY" fetch response. +type FetchBinary struct { + RespAttr string + Parts []uint32 // Can be nil. + Data string +} + +func (f FetchBinary) Attr() string { return f.RespAttr } + +// "BINARY.SIZE" fetch response. +type FetchBinarySize struct { + RespAttr string + Parts []uint32 + Size int64 +} + +func (f FetchBinarySize) Attr() string { return f.RespAttr } + +// "UID" fetch response. +type FetchUID uint32 + +func (f FetchUID) Attr() string { return "UID" } diff --git a/imapserver/append_test.go b/imapserver/append_test.go new file mode 100644 index 0000000..16445a1 --- /dev/null +++ b/imapserver/append_test.go @@ -0,0 +1,77 @@ +package imapserver + +import ( + "testing" + + "github.com/mjl-/mox/imapclient" +) + +func TestAppend(t *testing.T) { + defer mockUIDValidity()() + + tc := start(t) // note: with switchboard because this connection stays alive unlike tc2. + defer tc.close() + + tc2 := startNoSwitchboard(t) // note: without switchboard because this connection will break during tests. + defer tc2.close() + + tc3 := startNoSwitchboard(t) + defer tc3.close() + + tc2.client.Login("mjl@mox.example", "testtest") + tc2.client.Select("inbox") + tc.client.Login("mjl@mox.example", "testtest") + tc.client.Select("inbox") + tc3.client.Login("mjl@mox.example", "testtest") + + tc2.transactf("bad", "append") // Missing params. + tc2.transactf("bad", `append inbox`) // Missing message. + tc2.transactf("bad", `append inbox "test"`) // Message must be literal. + + // Syntax error for line ending in literal causes connection abort. + tc2.transactf("bad", "append inbox (\\Badflag) {1+}\r\nx") // Unknown flag. + tc2 = startNoSwitchboard(t) + defer tc2.close() + tc2.client.Login("mjl@mox.example", "testtest") + tc2.client.Select("inbox") + + tc2.transactf("bad", "append inbox () \"bad time\" {1+}\r\nx") // Bad time. + tc2 = startNoSwitchboard(t) + defer tc2.close() + tc2.client.Login("mjl@mox.example", "testtest") + tc2.client.Select("inbox") + + tc2.transactf("no", "append nobox (\\Seen) \" 1-Jan-2022 10:10:00 +0100\" {1}") + tc2.xcode("TRYCREATE") + + tc2.transactf("ok", "append inbox (\\Seen) \" 1-Jan-2022 10:10:00 +0100\" {1+}\r\nx") + tc2.xuntagged(imapclient.UntaggedExists(1)) + tc2.xcodeArg(imapclient.CodeAppendUID{UIDValidity: 1, UID: 1}) + + tc.transactf("ok", "noop") + uid1 := imapclient.FetchUID(1) + flagsSeen := imapclient.FetchFlags{`\Seen`} + tc.xuntagged(imapclient.UntaggedExists(1), imapclient.UntaggedFetch{Seq: 1, Attrs: []imapclient.FetchAttr{uid1, flagsSeen}}) + tc3.transactf("ok", "noop") + tc3.xuntagged() // Inbox is not selected, nothing to report. + + tc2.transactf("ok", "append inbox (\\Seen) \" 1-Jan-2022 10:10:00 +0100\" UTF8 ({34+}\r\ncontent-type: text/plain;;\r\n\r\ntest)") + tc2.xuntagged(imapclient.UntaggedExists(2)) + tc2.xcodeArg(imapclient.CodeAppendUID{UIDValidity: 1, UID: 2}) + + // Messages that we cannot parse are marked as application/octet-stream. Perhaps + // the imap client knows how to deal with them. + tc2.transactf("ok", "uid fetch 2 body") + uid2 := imapclient.FetchUID(2) + xbs := imapclient.FetchBodystructure{ + RespAttr: "BODY", + Body: imapclient.BodyTypeBasic{ + MediaType: "APPLICATION", + MediaSubtype: "OCTET-STREAM", + BodyFields: imapclient.BodyFields{ + Octets: 4, + }, + }, + } + tc2.xuntagged(imapclient.UntaggedFetch{Seq: 2, Attrs: []imapclient.FetchAttr{uid2, xbs}}) +} diff --git a/imapserver/authenticate_test.go b/imapserver/authenticate_test.go new file mode 100644 index 0000000..427cdc4 --- /dev/null +++ b/imapserver/authenticate_test.go @@ -0,0 +1,110 @@ +package imapserver + +import ( + "encoding/base64" + "errors" + "strings" + "testing" + + "github.com/mjl-/mox/scram" +) + +func TestAuthenticatePlain(t *testing.T) { + tc := start(t) + + tc.transactf("no", "authenticate bogus ") + tc.transactf("bad", "authenticate plain not base64...") + tc.transactf("no", "authenticate plain %s", base64.StdEncoding.EncodeToString([]byte("\u0000baduser\u0000badpass"))) + tc.xcode("AUTHENTICATIONFAILED") + tc.transactf("no", "authenticate plain %s", base64.StdEncoding.EncodeToString([]byte("\u0000mjl@mox.example\u0000badpass"))) + tc.xcode("AUTHENTICATIONFAILED") + tc.transactf("no", "authenticate plain %s", base64.StdEncoding.EncodeToString([]byte("\u0000mjl\u0000badpass"))) // Need email, not account. + tc.xcode("AUTHENTICATIONFAILED") + tc.transactf("no", "authenticate plain %s", base64.StdEncoding.EncodeToString([]byte("\u0000mjl@mox.example\u0000test"))) + tc.xcode("AUTHENTICATIONFAILED") + tc.transactf("no", "authenticate plain %s", base64.StdEncoding.EncodeToString([]byte("\u0000mjl@mox.example\u0000testtesttest"))) + tc.xcode("AUTHENTICATIONFAILED") + tc.transactf("bad", "authenticate plain %s", base64.StdEncoding.EncodeToString([]byte("\u0000"))) + tc.xcode("") + tc.transactf("no", "authenticate plain %s", base64.StdEncoding.EncodeToString([]byte("other\u0000mjl@mox.example\u0000testtest"))) + tc.xcode("AUTHORIZATIONFAILED") + tc.transactf("ok", "authenticate plain %s", base64.StdEncoding.EncodeToString([]byte("\u0000mjl@mox.example\u0000testtest"))) + tc.close() + + tc = start(t) + tc.transactf("ok", "authenticate plain %s", base64.StdEncoding.EncodeToString([]byte("mjl@mox.example\u0000mjl@mox.example\u0000testtest"))) + tc.close() + + tc = start(t) + tc.client.AuthenticatePlain("mjl@mox.example", "testtest") + tc.close() + + tc = start(t) + defer tc.close() + + tc.cmdf("", "authenticate plain") + tc.readprefixline("+ ") + tc.writelinef("*") // Aborts. + tc.readstatus("bad") + + tc.cmdf("", "authenticate plain") + tc.readprefixline("+") + tc.writelinef("%s", base64.StdEncoding.EncodeToString([]byte("\u0000mjl@mox.example\u0000testtest"))) + tc.readstatus("ok") +} + +func TestAuthenticateSCRAMSHA256(t *testing.T) { + tc := start(t) + tc.client.AuthenticateSCRAMSHA256("mjl@mox.example", "testtest") + tc.close() + + auth := func(status string, serverFinalError error, username, password string) { + t.Helper() + + sc := scram.NewClient(username, "") + clientFirst, err := sc.ClientFirst() + tc.check(err, "scram clientFirst") + tc.client.LastTag = "x001" + tc.writelinef("%s authenticate scram-sha-256 %s", tc.client.LastTag, base64.StdEncoding.EncodeToString([]byte(clientFirst))) + + xreadContinuation := func() []byte { + line, _, result, rerr := tc.client.ReadContinuation() + tc.check(rerr, "read continuation") + if result.Status != "" { + tc.t.Fatalf("expected continuation") + } + buf, err := base64.StdEncoding.DecodeString(line) + tc.check(err, "parsing base64 from remote") + return buf + } + + serverFirst := xreadContinuation() + clientFinal, err := sc.ServerFirst(serverFirst, password) + tc.check(err, "scram clientFinal") + tc.writelinef("%s", base64.StdEncoding.EncodeToString([]byte(clientFinal))) + + serverFinal := xreadContinuation() + err = sc.ServerFinal(serverFinal) + if serverFinalError == nil { + tc.check(err, "scram serverFinal") + } else if err == nil || !errors.Is(err, serverFinalError) { + t.Fatalf("server final, got err %#v, expected %#v", err, serverFinalError) + } + _, result, err := tc.client.Response() + tc.check(err, "read response") + if string(result.Status) != strings.ToUpper(status) { + tc.t.Fatalf("got status %q, expected %q", result.Status, strings.ToUpper(status)) + } + } + + tc = start(t) + auth("no", scram.ErrInvalidProof, "mjl@mox.example", "badpass") + auth("no", scram.ErrInvalidProof, "mjl@mox.example", "") + // todo: server aborts due to invalid username. we should probably make client continue with fake determinisitically generated salt and result in error in the end. + // auth("no", nil, "other@mox.example", "testtest") + + tc.transactf("no", "authenticate bogus ") + tc.transactf("bad", "authenticate scram-sha-256 not base64...") + tc.transactf("bad", "authenticate scram-sha-256 %s", base64.StdEncoding.EncodeToString([]byte("bad data"))) + tc.close() +} diff --git a/imapserver/copy_test.go b/imapserver/copy_test.go new file mode 100644 index 0000000..f5339c1 --- /dev/null +++ b/imapserver/copy_test.go @@ -0,0 +1,53 @@ +package imapserver + +import ( + "testing" + + "github.com/mjl-/mox/imapclient" +) + +func TestCopy(t *testing.T) { + defer mockUIDValidity()() + tc := start(t) + defer tc.close() + + tc2 := startNoSwitchboard(t) + defer tc2.close() + + tc.client.Login("mjl@mox.example", "testtest") + tc.client.Select("inbox") + + tc2.client.Login("mjl@mox.example", "testtest") + tc2.client.Select("Trash") + + tc.transactf("bad", "copy") // Missing params. + tc.transactf("bad", "copy 1") // Missing params. + tc.transactf("bad", "copy 1 inbox ") // Leftover. + + // Seqs 1,2 and UIDs 3,4. + tc.client.Append("inbox", nil, nil, []byte(exampleMsg)) + tc.client.Append("inbox", nil, nil, []byte(exampleMsg)) + tc.client.StoreFlagsSet("1:2", true, `\Deleted`) + tc.client.Expunge() + tc.client.Append("inbox", nil, nil, []byte(exampleMsg)) + tc.client.Append("inbox", nil, nil, []byte(exampleMsg)) + + tc.transactf("no", "copy 1 nonexistent") + tc.xcode("TRYCREATE") + + tc.transactf("no", "copy 1 inbox") // Cannot copy to same mailbox. + + tc2.transactf("ok", "noop") // Drain. + + tc.transactf("ok", "copy 1:* Trash") + ptr := func(v uint32) *uint32 { return &v } + tc.xcodeArg(imapclient.CodeCopyUID{DestUIDValidity: 1, From: []imapclient.NumRange{{First: 3, Last: ptr(4)}}, To: []imapclient.NumRange{{First: 1, Last: ptr(2)}}}) + tc2.transactf("ok", "noop") + tc2.xuntagged(imapclient.UntaggedExists(2), imapclient.UntaggedFetch{Seq: 1, Attrs: []imapclient.FetchAttr{imapclient.FetchUID(1), imapclient.FetchFlags(nil)}}, imapclient.UntaggedFetch{Seq: 2, Attrs: []imapclient.FetchAttr{imapclient.FetchUID(2), imapclient.FetchFlags(nil)}}) + + tc.transactf("no", "uid copy 1,2 Trash") // No match. + tc.transactf("ok", "uid copy 4,3 Trash") + tc.xcodeArg(imapclient.CodeCopyUID{DestUIDValidity: 1, From: []imapclient.NumRange{{First: 3, Last: ptr(4)}}, To: []imapclient.NumRange{{First: 3, Last: ptr(4)}}}) + tc2.transactf("ok", "noop") + tc2.xuntagged(imapclient.UntaggedExists(4), imapclient.UntaggedFetch{Seq: 3, Attrs: []imapclient.FetchAttr{imapclient.FetchUID(3), imapclient.FetchFlags(nil)}}, imapclient.UntaggedFetch{Seq: 4, Attrs: []imapclient.FetchAttr{imapclient.FetchUID(4), imapclient.FetchFlags(nil)}}) +} diff --git a/imapserver/create_test.go b/imapserver/create_test.go new file mode 100644 index 0000000..eeda056 --- /dev/null +++ b/imapserver/create_test.go @@ -0,0 +1,69 @@ +package imapserver + +import ( + "testing" + + "github.com/mjl-/mox/imapclient" +) + +func TestCreate(t *testing.T) { + tc := start(t) + defer tc.close() + + tc2 := startNoSwitchboard(t) + defer tc2.close() + + tc.client.Login("mjl@mox.example", "testtest") + tc2.client.Login("mjl@mox.example", "testtest") + + tc.transactf("no", "create inbox") // Already exists and not allowed. ../rfc/9051:1913 + tc.transactf("no", "create Inbox") // Idem. + + // ../rfc/9051:1937 + tc.transactf("ok", "create inbox/a/c") + tc.xuntagged(imapclient.UntaggedList{Flags: []string{`\Subscribed`}, Separator: '/', Mailbox: "Inbox/a"}, imapclient.UntaggedList{Flags: []string{`\Subscribed`}, Separator: '/', Mailbox: "Inbox/a/c"}) + + tc2.transactf("ok", "noop") + tc2.xuntagged(imapclient.UntaggedList{Flags: []string{`\Subscribed`}, Separator: '/', Mailbox: "Inbox/a"}, imapclient.UntaggedList{Flags: []string{`\Subscribed`}, Separator: '/', Mailbox: "Inbox/a/c"}) + + tc.transactf("no", "create inbox/a/c") // Exists. + + tc.transactf("ok", "create inbox/a/x") + tc.xuntagged(imapclient.UntaggedList{Flags: []string{`\Subscribed`}, Separator: '/', Mailbox: "Inbox/a/x"}) + + tc2.transactf("ok", "noop") + tc2.xuntagged(imapclient.UntaggedList{Flags: []string{`\Subscribed`}, Separator: '/', Mailbox: "Inbox/a/x"}) + + // ../rfc/9051:1934 + tc.transactf("ok", "create mailbox/") + tc.xuntagged(imapclient.UntaggedList{Flags: []string{`\Subscribed`}, Separator: '/', Mailbox: "mailbox", OldName: "mailbox/"}) + + tc2.transactf("ok", "noop") + tc2.xuntagged(imapclient.UntaggedList{Flags: []string{`\Subscribed`}, Separator: '/', Mailbox: "mailbox"}) + + // If we are already subscribed, create should still work, and we still want to see the subscribed flag. + tc.transactf("ok", "subscribe newbox") + tc2.transactf("ok", "noop") + tc2.xuntagged(imapclient.UntaggedList{Flags: []string{`\Subscribed`, `\NonExistent`}, Separator: '/', Mailbox: "newbox"}) + + tc.transactf("ok", "create newbox") + tc.xuntagged(imapclient.UntaggedList{Flags: []string{`\Subscribed`}, Separator: '/', Mailbox: "newbox"}) + tc2.transactf("ok", "noop") + tc2.xuntagged(imapclient.UntaggedList{Flags: []string{`\Subscribed`}, Separator: '/', Mailbox: "newbox"}) + + // todo: test create+delete+create of a name results in a higher uidvalidity. + + tc.transactf("no", "create /bad/root") + tc.transactf("no", "create bad//root") // Cannot have internal duplicate slashes. + tc.transactf("no", `create ""`) // Refuse empty mailbox name. + // We are not allowing special characters. + tc.transactf("bad", `create "\n"`) + tc.transactf("bad", `create "\x7f"`) + tc.transactf("bad", `create "\x9f"`) + tc.transactf("bad", `create "\u2028"`) + tc.transactf("bad", `create "\u2029"`) + tc.transactf("no", `create "%%"`) + tc.transactf("no", `create "*"`) + tc.transactf("no", `create "#"`) + tc.transactf("no", `create "&"`) +} diff --git a/imapserver/delete_test.go b/imapserver/delete_test.go new file mode 100644 index 0000000..b655cb8 --- /dev/null +++ b/imapserver/delete_test.go @@ -0,0 +1,56 @@ +package imapserver + +import ( + "testing" + + "github.com/mjl-/mox/imapclient" +) + +func TestDelete(t *testing.T) { + tc := start(t) + defer tc.close() + + tc2 := startNoSwitchboard(t) + defer tc2.close() + + tc.client.Login("mjl@mox.example", "testtest") + tc2.client.Login("mjl@mox.example", "testtest") + + tc.transactf("bad", "delete") // Missing mailbox. + tc.transactf("no", "delete inbox") // Cannot delete inbox. + tc.transactf("no", "delete nonexistent") // Cannot delete mailbox that does not exist. + tc.transactf("no", `delete "nonexistent"`) // Again, with quoted string syntax. + + tc.client.Subscribe("x") + tc.transactf("no", "delete x") // Subscription does not mean there is a mailbox that can be deleted. + + tc.client.Create("a/b") + tc2.transactf("ok", "noop") // Drain changes. + + // ../rfc/9051:2000 + tc.transactf("no", "delete a") // Still has child. + tc.xcode("HASCHILDREN") + + tc.transactf("ok", "delete a/b") + tc2.transactf("ok", "noop") + tc2.xuntagged(imapclient.UntaggedList{Flags: []string{`\NonExistent`}, Separator: '/', Mailbox: "a/b"}) + + tc.transactf("no", "delete a/b") // Already removed. + tc.transactf("ok", "delete a") // Parent can now be removed. + tc.transactf("ok", `list (subscribed) "" (a/b a) return (subscribed)`) + // Subscriptions still exist. + tc.xuntagged( + imapclient.UntaggedList{Flags: []string{`\Subscribed`, `\NonExistent`}, Separator: '/', Mailbox: "a"}, + imapclient.UntaggedList{Flags: []string{`\Subscribed`, `\NonExistent`}, Separator: '/', Mailbox: "a/b"}, + ) + + // Let's try again with a message present. + tc.client.Create("msgs") + tc.client.Append("msgs", nil, nil, []byte(exampleMsg)) + tc.transactf("ok", "delete msgs") + + // Delete for inbox/* is allowed. + tc.client.Create("inbox/a") + tc.transactf("ok", "delete inbox/a") + +} diff --git a/imapserver/error.go b/imapserver/error.go new file mode 100644 index 0000000..3f8e399 --- /dev/null +++ b/imapserver/error.go @@ -0,0 +1,55 @@ +package imapserver + +import ( + "fmt" +) + +func xcheckf(err error, format string, args ...any) { + if err != nil { + xserverErrorf("%s: %w", fmt.Sprintf(format, args...), err) + } +} + +type userError struct { + code string // Optional response code in brackets. + err error +} + +func (e userError) Error() string { return e.err.Error() } +func (e userError) Unwrap() error { return e.err } + +func xuserErrorf(format string, args ...any) { + panic(userError{err: fmt.Errorf(format, args...)}) +} + +func xusercodeErrorf(code, format string, args ...any) { + panic(userError{code: code, err: fmt.Errorf(format, args...)}) +} + +type serverError struct{ err error } + +func (e serverError) Error() string { return e.err.Error() } +func (e serverError) Unwrap() error { return e.err } + +func xserverErrorf(format string, args ...any) { + panic(serverError{fmt.Errorf(format, args...)}) +} + +type syntaxError struct { + line string // Optional line to write before BAD result. For untagged response. CRLF will be added. + code string // Optional result code (between []) to write in BAD result. + err error // BAD response message. +} + +func (e syntaxError) Error() string { + s := "bad syntax: " + e.err.Error() + if e.code != "" { + s += " [" + e.code + "]" + } + return s +} +func (e syntaxError) Unwrap() error { return e.err } + +func xsyntaxErrorf(format string, args ...any) { + panic(syntaxError{"", "", fmt.Errorf(format, args...)}) +} diff --git a/imapserver/expunge_test.go b/imapserver/expunge_test.go new file mode 100644 index 0000000..97d04d8 --- /dev/null +++ b/imapserver/expunge_test.go @@ -0,0 +1,74 @@ +package imapserver + +import ( + "testing" + + "github.com/mjl-/mox/imapclient" +) + +func TestExpunge(t *testing.T) { + defer mockUIDValidity()() + tc := start(t) + defer tc.close() + + tc2 := startNoSwitchboard(t) + defer tc2.close() + + tc.client.Login("mjl@mox.example", "testtest") + tc.client.Select("inbox") + + tc2.client.Login("mjl@mox.example", "testtest") + tc2.client.Select("inbox") + + tc.transactf("bad", "expunge leftover") // Leftover data. + tc.transactf("ok", "expunge") // Nothing to remove though. + tc.xuntagged() + + tc.client.Unselect() + tc.client.Examine("inbox") + tc.transactf("no", "expunge") // Read-only. + tc.transactf("no", "uid expunge 1") // Read-only. + + tc.client.Unselect() + tc.client.Select("inbox") + tc.client.Append("inbox", nil, nil, []byte(exampleMsg)) + tc.client.Append("inbox", nil, nil, []byte(exampleMsg)) + tc.client.Append("inbox", nil, nil, []byte(exampleMsg)) + tc.transactf("ok", "expunge") // Still nothing to remove. + tc.xuntagged() + + tc.client.StoreFlagsAdd("1,3", true, `\Deleted`) + + tc2.transactf("ok", "noop") // Drain. + + tc.transactf("ok", "expunge") + tc.xuntagged(imapclient.UntaggedExpunge(1), imapclient.UntaggedExpunge(2)) + + tc2.transactf("ok", "noop") // Drain. + tc2.xuntagged(imapclient.UntaggedExpunge(1), imapclient.UntaggedExpunge(2)) + + tc.transactf("ok", "expunge") // Nothing to remove anymore. + tc.xuntagged() + + // Only UID 2 is still left. We'll add 3 more. Getting us to UIDs 2,4,5,6. + tc.client.Append("inbox", nil, nil, []byte(exampleMsg)) + tc.client.Append("inbox", nil, nil, []byte(exampleMsg)) + tc.client.Append("inbox", nil, nil, []byte(exampleMsg)) + + tc.transactf("bad", "uid expunge") // Missing uid set. + tc.transactf("bad", "uid expunge 1 leftover") // Leftover data. + tc.transactf("bad", "uid expunge 1 leftover") // Leftover data. + + tc.client.StoreFlagsAdd("1,2,4", true, `\Deleted`) // Marks UID 2,4,6 as deleted. + + tc.transactf("ok", "uid expunge 1") + tc.xuntagged() // No match. + + tc2.transactf("ok", "noop") // Drain. + + tc.transactf("ok", "uid expunge 4:6") // Removes UID 4,6 at seqs 2,4. + tc.xuntagged(imapclient.UntaggedExpunge(2), imapclient.UntaggedExpunge(3)) + + tc2.transactf("ok", "noop") + tc.xuntagged(imapclient.UntaggedExpunge(2), imapclient.UntaggedExpunge(3)) +} diff --git a/imapserver/fetch.go b/imapserver/fetch.go new file mode 100644 index 0000000..39d4359 --- /dev/null +++ b/imapserver/fetch.go @@ -0,0 +1,738 @@ +package imapserver + +// todo: if fetch fails part-way through the command, we wouldn't be storing the messages that were parsed. should we try harder to get parsed form of messages stored in db? + +import ( + "bytes" + "errors" + "fmt" + "io" + "net/textproto" + "sort" + "strings" + + "github.com/mjl-/bstore" + + "github.com/mjl-/mox/message" + "github.com/mjl-/mox/mlog" + "github.com/mjl-/mox/moxio" + "github.com/mjl-/mox/store" +) + +// functions to handle fetch attribute requests are defined on fetchCmd. +type fetchCmd struct { + conn *conn + mailboxID int64 + uid store.UID + tx *bstore.Tx // Writable tx, for storing message when first parsed as mime parts. + changes []store.Change // For updated Seen flag. + markSeen bool + needFlags bool + expungeIssued bool // Set if a message cannot be read. Can happen for expunged messages. + + // Loaded when first needed, closed when message was processed. + m *store.Message // Message currently being processed. + msgr *store.MsgReader + part *message.Part +} + +// error when processing an attribute. we typically just don't respond with requested attributes that encounter a failure. +type attrError struct{ err error } + +func (e attrError) Error() string { + return e.err.Error() +} + +// raise error processing an attribute. +func (cmd *fetchCmd) xerrorf(format string, args ...any) { + panic(attrError{fmt.Errorf(format, args...)}) +} + +func (cmd *fetchCmd) xcheckf(err error, format string, args ...any) { + if err != nil { + msg := fmt.Sprintf(format, args...) + cmd.xerrorf("%s: %w", msg, err) + } +} + +// Fetch returns information about messages, be it email envelopes, headers, +// bodies, full messages, flags. +// +// State: Selected +func (c *conn) cmdxFetch(isUID bool, tag, cmdstr string, p *parser) { + // Command: ../rfc/9051:4330 ../rfc/3501:2992 + // Examples: ../rfc/9051:4463 ../rfc/9051:4520 + // Response syntax: ../rfc/9051:6742 ../rfc/3501:4864 + + // Request syntax: ../rfc/9051:6553 ../rfc/3501:4748 + p.xspace() + nums := p.xnumSet() + p.xspace() + atts := p.xfetchAtts() + p.xempty() + + // We don't use c.account.WithRLock because we write to the client while reading messages. + // We get the rlock, then we check the mailbox, release the lock and read the messages. + // The db transaction still locks out any changes to the database... + c.account.RLock() + runlock := c.account.RUnlock + // Note: we call runlock in a closure because we replace it below. + defer func() { + runlock() + }() + + cmd := &fetchCmd{conn: c, mailboxID: c.mailboxID} + c.xdbwrite(func(tx *bstore.Tx) { + cmd.tx = tx + + // Ensure the mailbox still exists. + c.xmailboxID(tx, c.mailboxID) + + uids := c.xnumSetUIDs(isUID, nums) + + // Release the account lock. + runlock() + runlock = func() {} // Prevent defer from unlocking again. + + for _, uid := range uids { + cmd.uid = uid + cmd.process(atts) + } + }) + + if len(cmd.changes) > 0 { + // Broadcast seen updates to other connections. + c.broadcast(cmd.changes) + } + + if cmd.expungeIssued { + // ../rfc/2180:343 + c.writeresultf("%s NO [EXPUNGEISSUED] at least one message was expunged", tag) + } else { + c.ok(tag, cmdstr) + } +} + +func (cmd *fetchCmd) xensureMessage() *store.Message { + if cmd.m != nil { + return cmd.m + } + + q := bstore.QueryTx[store.Message](cmd.tx) + q.FilterNonzero(store.Message{MailboxID: cmd.mailboxID, UID: cmd.uid}) + m, err := q.Get() + cmd.xcheckf(err, "get message for uid %d", cmd.uid) + cmd.m = &m + return cmd.m +} + +func (cmd *fetchCmd) xensureParsed() (*store.MsgReader, *message.Part) { + if cmd.msgr != nil { + return cmd.msgr, cmd.part + } + + m := cmd.xensureMessage() + + cmd.msgr = cmd.conn.account.MessageReader(*m) + defer func() { + if cmd.part == nil { + err := cmd.msgr.Close() + cmd.conn.xsanity(err, "closing messagereader") + cmd.msgr = nil + } + }() + + p, err := m.LoadPart(cmd.msgr) + xcheckf(err, "load parsed message") + cmd.part = &p + return cmd.msgr, cmd.part +} + +func (cmd *fetchCmd) process(atts []fetchAtt) { + defer func() { + cmd.m = nil + cmd.part = nil + if cmd.msgr != nil { + err := cmd.msgr.Close() + cmd.conn.xsanity(err, "closing messagereader") + cmd.msgr = nil + } + + x := recover() + if x == nil { + return + } + err, ok := x.(attrError) + if !ok { + panic(x) + } + if errors.Is(err, bstore.ErrAbsent) { + cmd.expungeIssued = true + return + } + cmd.conn.log.Infox("processing fetch attribute", err, mlog.Field("uid", cmd.uid)) + xuserErrorf("processing fetch attribute: %v", err) + }() + + data := listspace{bare("UID"), number(cmd.uid)} + + cmd.markSeen = false + cmd.needFlags = false + + for _, a := range atts { + data = append(data, cmd.xprocessAtt(a)...) + } + + if cmd.markSeen { + m := cmd.xensureMessage() + m.Seen = true + err := cmd.tx.Update(m) + xcheckf(err, "marking message as seen") + + cmd.changes = append(cmd.changes, store.ChangeFlags{MailboxID: cmd.mailboxID, UID: cmd.uid, Mask: store.Flags{Seen: true}, Flags: m.Flags}) + } + + if cmd.needFlags { + m := cmd.xensureMessage() + data = append(data, bare("FLAGS"), flaglist(m.Flags)) + } + + // Write errors are turned into panics because we write through c. + fmt.Fprintf(cmd.conn.bw, "* %d FETCH ", cmd.conn.xsequence(cmd.uid)) + data.writeTo(cmd.conn, cmd.conn.bw) + cmd.conn.bw.Write([]byte("\r\n")) +} + +// result for one attribute. if processing fails, e.g. because data was requested +// that doesn't exist and cannot be represented in imap, the attribute is simply +// not returned to the user. in this case, the returned value is a nil list. +func (cmd *fetchCmd) xprocessAtt(a fetchAtt) []token { + switch a.field { + case "UID": + // Always present. + return nil + case "ENVELOPE": + _, part := cmd.xensureParsed() + envelope := xenvelope(part) + return []token{bare("ENVELOPE"), envelope} + + case "INTERNALDATE": + // ../rfc/9051:6753 ../rfc/9051:6502 + m := cmd.xensureMessage() + return []token{bare("INTERNALDATE"), dquote(m.Received.Format("_2-Jan-2006 15:04:05 -0700"))} + + case "BODYSTRUCTURE": + _, part := cmd.xensureParsed() + bs := xbodystructure(part) + return []token{bare("BODYSTRUCTURE"), bs} + + case "BODY": + respField, t := cmd.xbody(a) + if respField == "" { + return nil + } + return []token{bare(respField), t} + + case "BINARY.SIZE": + _, p := cmd.xensureParsed() + if len(a.sectionBinary) == 0 { + // Must return the size of the entire message but with decoded body. + // todo: make this less expensive and/or cache the result? + n, err := io.Copy(io.Discard, cmd.xbinaryMessageReader(p)) + cmd.xcheckf(err, "reading message as binary for its size") + return []token{bare(cmd.sectionRespField(a)), number(uint32(n))} + } + p = cmd.xpartnumsDeref(a.sectionBinary, p) + if len(p.Parts) > 0 || p.Message != nil { + // ../rfc/9051:4385 + cmd.xerrorf("binary only allowed on leaf parts, not multipart/* or message/rfc822 or message/global") + } + return []token{bare(cmd.sectionRespField(a)), number(p.DecodedSize)} + + case "BINARY": + respField, t := cmd.xbinary(a) + if respField == "" { + return nil + } + return []token{bare(respField), t} + + case "RFC822.SIZE": + m := cmd.xensureMessage() + return []token{bare("RFC822.SIZE"), number(m.Size)} + + case "RFC822.HEADER": + ba := fetchAtt{ + field: "BODY", + peek: true, + section: §ionSpec{ + msgtext: §ionMsgtext{s: "HEADER"}, + }, + } + respField, t := cmd.xbody(ba) + if respField == "" { + return nil + } + return []token{bare(a.field), t} + + case "RFC822": + ba := fetchAtt{ + field: "BODY", + section: §ionSpec{}, + } + respField, t := cmd.xbody(ba) + if respField == "" { + return nil + } + return []token{bare(a.field), t} + + case "RFC822.TEXT": + ba := fetchAtt{ + field: "BODY", + section: §ionSpec{ + msgtext: §ionMsgtext{s: "TEXT"}, + }, + } + respField, t := cmd.xbody(ba) + if respField == "" { + return nil + } + return []token{bare(a.field), t} + + case "FLAGS": + cmd.needFlags = true + + default: + xserverErrorf("field %q not yet implemented", a.field) + } + return nil +} + +// ../rfc/9051:6522 +func xenvelope(p *message.Part) token { + var env message.Envelope + if p.Envelope != nil { + env = *p.Envelope + } + var date token = nilt + if !env.Date.IsZero() { + // ../rfc/5322:791 + date = string0(env.Date.Format("Mon, 2 Jan 2006 15:04:05 -0700")) + } + var subject token = nilt + if env.Subject != "" { + subject = string0(env.Subject) + } + var inReplyTo token = nilt + if env.InReplyTo != "" { + inReplyTo = string0(env.InReplyTo) + } + var messageID token = nilt + if env.MessageID != "" { + messageID = string0(env.MessageID) + } + + addresses := func(l []message.Address) token { + if len(l) == 0 { + return nilt + } + r := listspace{} + for _, a := range l { + var name token = nilt + if a.Name != "" { + name = string0(a.Name) + } + user := string0(a.User) + var host token = nilt + if a.Host != "" { + host = string0(a.Host) + } + r = append(r, listspace{name, nilt, user, host}) + } + return r + } + + // Empty sender or reply-to result in fall-back to from. ../rfc/9051:6140 + sender := env.Sender + if len(sender) == 0 { + sender = env.From + } + replyTo := env.ReplyTo + if len(replyTo) == 0 { + replyTo = env.From + } + + return listspace{ + date, + subject, + addresses(env.From), + addresses(sender), + addresses(replyTo), + addresses(env.To), + addresses(env.CC), + addresses(env.BCC), + inReplyTo, + messageID, + } +} + +func (cmd *fetchCmd) peekOrSeen(peek bool) { + if cmd.conn.readonly || peek { + return + } + m := cmd.xensureMessage() + if !m.Seen { + cmd.markSeen = true + cmd.needFlags = true + } +} + +// reader that returns the message, but with header Content-Transfer-Encoding left out. +func (cmd *fetchCmd) xbinaryMessageReader(p *message.Part) io.Reader { + hr := cmd.xmodifiedHeader(p, []string{"Content-Transfer-Encoding"}, true) + return io.MultiReader(hr, p.Reader()) +} + +// return header with only fields, or with everything except fields if "not" is set. +func (cmd *fetchCmd) xmodifiedHeader(p *message.Part, fields []string, not bool) io.Reader { + h, err := io.ReadAll(p.HeaderReader()) + cmd.xcheckf(err, "reading header") + + matchesFields := func(line []byte) bool { + k := bytes.TrimRight(bytes.SplitN(line, []byte(":"), 2)[0], " \t") + for _, f := range fields { + if bytes.EqualFold(k, []byte(f)) { + return true + } + } + return false + } + + var match bool + hb := &bytes.Buffer{} + for len(h) > 0 { + line := h + i := bytes.Index(line, []byte("\r\n")) + if i >= 0 { + line = line[:i+2] + } + h = h[len(line):] + + match = matchesFields(line) || match && (bytes.HasPrefix(line, []byte(" ")) || bytes.HasPrefix(line, []byte("\t"))) + if match != not || len(line) == 2 { + hb.Write(line) + } + } + return hb +} + +func (cmd *fetchCmd) xbinary(a fetchAtt) (string, token) { + _, part := cmd.xensureParsed() + + cmd.peekOrSeen(a.peek) + if len(a.sectionBinary) == 0 { + r := cmd.xbinaryMessageReader(part) + if a.partial != nil { + r = cmd.xpartialReader(a.partial, r) + } + return cmd.sectionRespField(a), readerSyncliteral{r} + } + + p := part + if len(a.sectionBinary) > 0 { + p = cmd.xpartnumsDeref(a.sectionBinary, p) + } + if len(p.Parts) != 0 || p.Message != nil { + // ../rfc/9051:4385 + cmd.xerrorf("binary only allowed on leaf parts, not multipart/* or message/rfc822 or message/global") + } + + switch p.ContentTransferEncoding { + case "", "7BIT", "8BIT", "BINARY", "BASE64", "QUOTED-PRINTABLE": + default: + // ../rfc/9051:5913 + xusercodeErrorf("UNKNOWN-CTE", "unknown Content-Transfer-Encoding %q", p.ContentTransferEncoding) + } + + r := p.Reader() + if a.partial != nil { + r = cmd.xpartialReader(a.partial, r) + } + return cmd.sectionRespField(a), readerSyncliteral{r} +} + +func (cmd *fetchCmd) xpartialReader(partial *partial, r io.Reader) io.Reader { + n, err := io.Copy(io.Discard, io.LimitReader(r, int64(partial.offset))) + cmd.xcheckf(err, "skipping to offset for partial") + if n != int64(partial.offset) { + return strings.NewReader("") // ../rfc/3501:3143 ../rfc/9051:4418 + } + return io.LimitReader(r, int64(partial.count)) +} + +func (cmd *fetchCmd) xbody(a fetchAtt) (string, token) { + msgr, part := cmd.xensureParsed() + + if a.section == nil { + // Non-extensible form of BODYSTRUCTURE. + return a.field, xbodystructure(part) + } + + cmd.peekOrSeen(a.peek) + + respField := cmd.sectionRespField(a) + + if a.section.msgtext == nil && a.section.part == nil { + m := cmd.xensureMessage() + var offset int64 + count := m.Size + if a.partial != nil { + offset = int64(a.partial.offset) + if offset > m.Size { + offset = m.Size + } + count = int64(a.partial.count) + if offset+count > m.Size { + count = m.Size - offset + } + } + return respField, readerSizeSyncliteral{&moxio.AtReader{R: msgr, Offset: offset}, count} + } + + sr := cmd.xsection(a.section, part) + + if a.partial != nil { + n, err := io.Copy(io.Discard, io.LimitReader(sr, int64(a.partial.offset))) + cmd.xcheckf(err, "skipping to offset for partial") + if n != int64(a.partial.offset) { + return respField, syncliteral("") // ../rfc/3501:3143 ../rfc/9051:4418 + } + return respField, readerSyncliteral{io.LimitReader(sr, int64(a.partial.count))} + } + return respField, readerSyncliteral{sr} +} + +func (cmd *fetchCmd) xpartnumsDeref(nums []uint32, p *message.Part) *message.Part { + // ../rfc/9051:4481 + if (len(p.Parts) == 0 && p.Message == nil) && len(nums) == 1 && nums[0] == 1 { + return p + } + + // ../rfc/9051:4485 + for i, num := range nums { + index := int(num - 1) + if p.Message != nil { + err := p.SetMessageReaderAt() + cmd.xcheckf(err, "preparing submessage") + return cmd.xpartnumsDeref(nums[i:], p.Message) + } + if index < 0 || index >= len(p.Parts) { + cmd.xerrorf("requested part does not exist") + } + p = &p.Parts[index] + } + return p +} + +func (cmd *fetchCmd) xsection(section *sectionSpec, p *message.Part) io.Reader { + if section.part == nil { + return cmd.xsectionMsgtext(section.msgtext, p) + } + + p = cmd.xpartnumsDeref(section.part.part, p) + + if section.part.text == nil { + return p.RawReader() + } + + // ../rfc/9051:4535 + if p.Message != nil { + err := p.SetMessageReaderAt() + cmd.xcheckf(err, "preparing submessage") + p = p.Message + } + + if !section.part.text.mime { + return cmd.xsectionMsgtext(section.part.text.msgtext, p) + } + + // MIME header, see ../rfc/9051:4534 ../rfc/2045:1645 + h, err := io.ReadAll(p.HeaderReader()) + cmd.xcheckf(err, "reading header") + + matchesFields := func(line []byte) bool { + k := textproto.CanonicalMIMEHeaderKey(string(bytes.TrimRight(bytes.SplitN(line, []byte(":"), 2)[0], " \t"))) + // Only add MIME-Version and additional CRLF for messages, not other parts. ../rfc/2045:1645 ../rfc/2045:1652 + return (p.Envelope != nil && k == "Mime-Version") || strings.HasPrefix(k, "Content-") + } + + var match bool + hb := &bytes.Buffer{} + for len(h) > 0 { + line := h + i := bytes.Index(line, []byte("\r\n")) + if i >= 0 { + line = line[:i+2] + } + h = h[len(line):] + + match = matchesFields(line) || match && (bytes.HasPrefix(line, []byte(" ")) || bytes.HasPrefix(line, []byte("\t"))) + if match || len(line) == 2 { + hb.Write(line) + } + } + return hb +} + +func (cmd *fetchCmd) xsectionMsgtext(smt *sectionMsgtext, p *message.Part) io.Reader { + if smt.s == "HEADER" { + return p.HeaderReader() + } + + switch smt.s { + case "HEADER.FIELDS": + return cmd.xmodifiedHeader(p, smt.headers, false) + + case "HEADER.FIELDS.NOT": + return cmd.xmodifiedHeader(p, smt.headers, true) + + case "TEXT": + // It appears imap clients expect to get the body of the message, not a "text body" + // which sounds like it means a text/* part of a message. ../rfc/9051:4517 + return p.RawReader() + } + panic(serverError{fmt.Errorf("missing case")}) +} + +func (cmd *fetchCmd) sectionRespField(a fetchAtt) string { + s := a.field + "[" + if len(a.sectionBinary) > 0 { + s += fmt.Sprintf("%d", a.sectionBinary[0]) + for _, v := range a.sectionBinary[1:] { + s += "." + fmt.Sprintf("%d", v) + } + } else if a.section != nil { + if a.section.part != nil { + p := a.section.part + s += fmt.Sprintf("%d", p.part[0]) + for _, v := range p.part[1:] { + s += "." + fmt.Sprintf("%d", v) + } + if p.text != nil { + if p.text.mime { + s += ".MIME" + } else { + s += "." + cmd.sectionMsgtextName(p.text.msgtext) + } + } + } else if a.section.msgtext != nil { + s += cmd.sectionMsgtextName(a.section.msgtext) + } + } + s += "]" + // binary does not have partial in field, unlike BODY ../rfc/9051:6757 + if a.field != "BINARY" && a.partial != nil { + s += fmt.Sprintf("<%d>", a.partial.offset) + } + return s +} + +func (cmd *fetchCmd) sectionMsgtextName(smt *sectionMsgtext) string { + s := smt.s + if strings.HasPrefix(smt.s, "HEADER.FIELDS") { + l := listspace{} + for _, h := range smt.headers { + l = append(l, astring(h)) + } + s += " " + l.pack(cmd.conn) + } + return s +} + +func bodyFldParams(params map[string]string) token { + if len(params) == 0 { + return nilt + } + // Ensure same ordering, easier for testing. + var keys []string + for k := range params { + keys = append(keys, k) + } + sort.Strings(keys) + l := make(listspace, 2*len(keys)) + i := 0 + for _, k := range keys { + l[i] = string0(strings.ToUpper(k)) + l[i+1] = string0(params[k]) + i += 2 + } + return l +} + +func bodyFldEnc(s string) token { + up := strings.ToUpper(s) + switch up { + case "7BIT", "8BIT", "BINARY", "BASE64", "QUOTED-PRINTABLE": + return dquote(up) + } + return string0(s) +} + +// xbodystructure returns a "body". +// calls itself for multipart messages and message/{rfc822,global}. +func xbodystructure(p *message.Part) token { + if p.MediaType == "MULTIPART" { + // Multipart, ../rfc/9051:6355 ../rfc/9051:6411 + var bodies concat + for i := range p.Parts { + bodies = append(bodies, xbodystructure(&p.Parts[i])) + } + return listspace{bodies, string0(p.MediaSubType)} + } + + // ../rfc/9051:6355 + if p.MediaType == "TEXT" { + // ../rfc/9051:6404 ../rfc/9051:6418 + return listspace{ + dquote("TEXT"), string0(p.MediaSubType), // ../rfc/9051:6739 + // ../rfc/9051:6376 + bodyFldParams(p.ContentTypeParams), // ../rfc/9051:6401 + nilOrString(p.ContentID), + nilOrString(p.ContentDescription), + bodyFldEnc(p.ContentTransferEncoding), + number(p.EndOffset - p.BodyOffset), + number(p.RawLineCount), + } + } else if p.MediaType == "MESSAGE" && (p.MediaSubType == "RFC822" || p.MediaSubType == "GLOBAL") { + // ../rfc/9051:6415 + // note: we don't have to prepare p.Message for reading, because we aren't going to read from it. + return listspace{ + dquote("MESSAGE"), dquote(p.MediaSubType), // ../rfc/9051:6732 + // ../rfc/9051:6376 + bodyFldParams(p.ContentTypeParams), // ../rfc/9051:6401 + nilOrString(p.ContentID), + nilOrString(p.ContentDescription), + bodyFldEnc(p.ContentTransferEncoding), + number(p.EndOffset - p.BodyOffset), + xenvelope(p.Message), + xbodystructure(p.Message), + number(p.RawLineCount), // todo: or mp.RawLineCount? + } + } + var media token + switch p.MediaType { + case "APPLICATION", "AUDIO", "IMAGE", "FONT", "MESSAGE", "MODEL", "VIDEO": + media = dquote(p.MediaType) + default: + media = string0(p.MediaType) + } + // ../rfc/9051:6404 ../rfc/9051:6407 + return listspace{ + media, string0(p.MediaSubType), // ../rfc/9051:6723 + // ../rfc/9051:6376 + bodyFldParams(p.ContentTypeParams), // ../rfc/9051:6401 + nilOrString(p.ContentID), + nilOrString(p.ContentDescription), + bodyFldEnc(p.ContentTransferEncoding), + number(p.EndOffset - p.BodyOffset), + } +} diff --git a/imapserver/fetch_test.go b/imapserver/fetch_test.go new file mode 100644 index 0000000..9f52418 --- /dev/null +++ b/imapserver/fetch_test.go @@ -0,0 +1,403 @@ +package imapserver + +import ( + "strings" + "testing" + "time" + + "github.com/mjl-/mox/imapclient" +) + +func TestFetch(t *testing.T) { + tc := start(t) + defer tc.close() + + tc.client.Login("mjl@mox.example", "testtest") + tc.client.Enable("imap4rev2") + received, err := time.Parse(time.RFC3339, "2022-11-16T10:01:00+01:00") + tc.check(err, "parse time") + tc.client.Append("inbox", nil, &received, []byte(exampleMsg)) + tc.client.Select("inbox") + + uid1 := imapclient.FetchUID(1) + date1 := imapclient.FetchInternalDate("16-Nov-2022 10:01:00 +0100") + rfcsize1 := imapclient.FetchRFC822Size(len(exampleMsg)) + env1 := imapclient.FetchEnvelope{ + Date: "Mon, 7 Feb 1994 21:52:25 -0800", + Subject: "afternoon meeting", + From: []imapclient.Address{{Name: "Fred Foobar", Mailbox: "foobar", Host: "blurdybloop.example"}}, + Sender: []imapclient.Address{{Name: "Fred Foobar", Mailbox: "foobar", Host: "blurdybloop.example"}}, + ReplyTo: []imapclient.Address{{Name: "Fred Foobar", Mailbox: "foobar", Host: "blurdybloop.example"}}, + To: []imapclient.Address{{Mailbox: "mooch", Host: "owatagu.siam.edu.example"}}, + MessageID: "", + } + noflags := imapclient.FetchFlags(nil) + bodyxstructure1 := imapclient.FetchBodystructure{ + RespAttr: "BODY", + Body: imapclient.BodyTypeText{ + MediaType: "TEXT", + MediaSubtype: "PLAIN", + BodyFields: imapclient.BodyFields{ + Params: [][2]string{[...]string{"CHARSET", "US-ASCII"}}, + Octets: 57, + }, + Lines: 2, + }, + } + bodystructure1 := bodyxstructure1 + bodystructure1.RespAttr = "BODYSTRUCTURE" + + split := strings.SplitN(exampleMsg, "\r\n\r\n", 2) + exampleMsgHeader := split[0] + "\r\n\r\n" + exampleMsgBody := split[1] + + binary1 := imapclient.FetchBinary{RespAttr: "BINARY[]", Data: exampleMsg} + binarypart1 := imapclient.FetchBinary{RespAttr: "BINARY[1]", Parts: []uint32{1}, Data: exampleMsgBody} + binarypartial1 := imapclient.FetchBinary{RespAttr: "BINARY[]", Data: exampleMsg[1:2]} + binarypartpartial1 := imapclient.FetchBinary{RespAttr: "BINARY[1]", Parts: []uint32{1}, Data: exampleMsgBody[1:2]} + binaryend1 := imapclient.FetchBinary{RespAttr: "BINARY[]", Data: ""} + binarypartend1 := imapclient.FetchBinary{RespAttr: "BINARY[1]", Parts: []uint32{1}, Data: ""} + binarysize1 := imapclient.FetchBinarySize{RespAttr: "BINARY.SIZE[]", Size: int64(len(exampleMsg))} + binarysizepart1 := imapclient.FetchBinarySize{RespAttr: "BINARY.SIZE[1]", Parts: []uint32{1}, Size: int64(len(exampleMsgBody))} + bodyheader1 := imapclient.FetchBody{RespAttr: "BODY[HEADER]", Section: "HEADER", Body: exampleMsgHeader} + bodytext1 := imapclient.FetchBody{RespAttr: "BODY[TEXT]", Section: "TEXT", Body: exampleMsgBody} + body1 := imapclient.FetchBody{RespAttr: "BODY[]", Body: exampleMsg} + bodypart1 := imapclient.FetchBody{RespAttr: "BODY[1]", Section: "1", Body: exampleMsgBody} + bodyoff1 := imapclient.FetchBody{RespAttr: "BODY[]<1>", Section: "", Offset: 1, Body: exampleMsg[1:3]} + body1off1 := imapclient.FetchBody{RespAttr: "BODY[1]<1>", Section: "1", Offset: 1, Body: exampleMsgBody[1:3]} + bodyend1 := imapclient.FetchBody{RespAttr: "BODY[1]<100000>", Section: "1", Offset: 100000, Body: ""} // todo: should offset be what was requested, or the size of the message? + rfcheader1 := imapclient.FetchRFC822Header(exampleMsgHeader) + rfctext1 := imapclient.FetchRFC822Text(exampleMsgBody) + rfc1 := imapclient.FetchRFC822(exampleMsg) + headerSplit := strings.SplitN(exampleMsgHeader, "\r\n", 2) + dateheader1 := imapclient.FetchBody{RespAttr: "BODY[HEADER.FIELDS (Date)]", Section: "HEADER.FIELDS (Date)", Body: headerSplit[0] + "\r\n\r\n"} + nodateheader1 := imapclient.FetchBody{RespAttr: "BODY[HEADER.FIELDS.NOT (Date)]", Section: "HEADER.FIELDS.NOT (Date)", Body: headerSplit[1]} + date1header1 := imapclient.FetchBody{RespAttr: "BODY[1.HEADER.FIELDS (Date)]", Section: "1.HEADER.FIELDS (Date)", Body: headerSplit[0] + "\r\n\r\n"} + nodate1header1 := imapclient.FetchBody{RespAttr: "BODY[1.HEADER.FIELDS.NOT (Date)]", Section: "1.HEADER.FIELDS.NOT (Date)", Body: headerSplit[1]} + mime1 := imapclient.FetchBody{RespAttr: "BODY[1.MIME]", Section: "1.MIME", Body: "MIME-Version: 1.0\r\nContent-Type: TEXT/PLAIN; CHARSET=US-ASCII\r\n\r\n"} + + flagsSeen := imapclient.FetchFlags{`\Seen`} + + tc.transactf("ok", "fetch 1 all") + tc.xuntagged(imapclient.UntaggedFetch{Seq: 1, Attrs: []imapclient.FetchAttr{uid1, date1, rfcsize1, env1, noflags}}) + + tc.transactf("ok", "fetch 1 fast") + tc.xuntagged(imapclient.UntaggedFetch{Seq: 1, Attrs: []imapclient.FetchAttr{uid1, date1, rfcsize1, noflags}}) + + tc.transactf("ok", "fetch 1 full") + tc.xuntagged(imapclient.UntaggedFetch{Seq: 1, Attrs: []imapclient.FetchAttr{uid1, date1, rfcsize1, env1, bodyxstructure1, noflags}}) + + tc.transactf("ok", "fetch 1 flags") + tc.xuntagged(imapclient.UntaggedFetch{Seq: 1, Attrs: []imapclient.FetchAttr{uid1, noflags}}) + + tc.transactf("ok", "fetch 1 bodystructure") + tc.xuntagged(imapclient.UntaggedFetch{Seq: 1, Attrs: []imapclient.FetchAttr{uid1, bodystructure1}}) + + // Should be returned unmodified, because there is no content-transfer-encoding. + tc.transactf("ok", "fetch 1 binary[]") + tc.xuntagged(imapclient.UntaggedFetch{Seq: 1, Attrs: []imapclient.FetchAttr{uid1, binary1, flagsSeen}}) + + tc.transactf("ok", "fetch 1 binary[1]") + tc.xuntagged(imapclient.UntaggedFetch{Seq: 1, Attrs: []imapclient.FetchAttr{uid1, binarypart1}}) // Seen flag not changed. + + tc.client.StoreFlagsClear("1", true, `\Seen`) + tc.transactf("ok", "fetch 1 binary[]<1.1>") + tc.xuntagged(imapclient.UntaggedFetch{Seq: 1, Attrs: []imapclient.FetchAttr{uid1, binarypartial1, flagsSeen}}) + + tc.client.StoreFlagsClear("1", true, `\Seen`) + tc.transactf("ok", "fetch 1 binary[1]<1.1>") + tc.xuntagged(imapclient.UntaggedFetch{Seq: 1, Attrs: []imapclient.FetchAttr{uid1, binarypartpartial1, flagsSeen}}) + + tc.client.StoreFlagsClear("1", true, `\Seen`) + tc.transactf("ok", "fetch 1 binary[]<10000.10001>") + tc.xuntagged(imapclient.UntaggedFetch{Seq: 1, Attrs: []imapclient.FetchAttr{uid1, binaryend1, flagsSeen}}) + + tc.client.StoreFlagsClear("1", true, `\Seen`) + tc.transactf("ok", "fetch 1 binary[1]<10000.10001>") + tc.xuntagged(imapclient.UntaggedFetch{Seq: 1, Attrs: []imapclient.FetchAttr{uid1, binarypartend1, flagsSeen}}) + + tc.client.StoreFlagsClear("1", true, `\Seen`) + tc.transactf("ok", "fetch 1 binary.size[]") + tc.xuntagged(imapclient.UntaggedFetch{Seq: 1, Attrs: []imapclient.FetchAttr{uid1, binarysize1}}) + + tc.transactf("ok", "fetch 1 binary.size[1]") + tc.xuntagged(imapclient.UntaggedFetch{Seq: 1, Attrs: []imapclient.FetchAttr{uid1, binarysizepart1}}) + + tc.client.StoreFlagsClear("1", true, `\Seen`) + tc.transactf("ok", "fetch 1 body[]") + tc.xuntagged(imapclient.UntaggedFetch{Seq: 1, Attrs: []imapclient.FetchAttr{uid1, body1, flagsSeen}}) + tc.transactf("ok", "fetch 1 body[]<1.2>") + tc.xuntagged(imapclient.UntaggedFetch{Seq: 1, Attrs: []imapclient.FetchAttr{uid1, bodyoff1}}) // Already seen. + + tc.client.StoreFlagsClear("1", true, `\Seen`) + tc.transactf("ok", "fetch 1 body[1]") + tc.xuntagged(imapclient.UntaggedFetch{Seq: 1, Attrs: []imapclient.FetchAttr{uid1, bodypart1, flagsSeen}}) + + tc.client.StoreFlagsClear("1", true, `\Seen`) + tc.transactf("ok", "fetch 1 body[1]<1.2>") + tc.xuntagged(imapclient.UntaggedFetch{Seq: 1, Attrs: []imapclient.FetchAttr{uid1, body1off1, flagsSeen}}) + + tc.client.StoreFlagsClear("1", true, `\Seen`) + tc.transactf("ok", "fetch 1 body[1]<100000.100000>") + tc.xuntagged(imapclient.UntaggedFetch{Seq: 1, Attrs: []imapclient.FetchAttr{uid1, bodyend1, flagsSeen}}) + + tc.client.StoreFlagsClear("1", true, `\Seen`) + tc.transactf("ok", "fetch 1 body[header]") + tc.xuntagged(imapclient.UntaggedFetch{Seq: 1, Attrs: []imapclient.FetchAttr{uid1, bodyheader1, flagsSeen}}) + + tc.client.StoreFlagsClear("1", true, `\Seen`) + tc.transactf("ok", "fetch 1 body[text]") + tc.xuntagged(imapclient.UntaggedFetch{Seq: 1, Attrs: []imapclient.FetchAttr{uid1, bodytext1, flagsSeen}}) + + // equivalent to body.peek[header], ../rfc/3501:3183 + tc.client.StoreFlagsClear("1", true, `\Seen`) + tc.transactf("ok", "fetch 1 rfc822.header") + tc.xuntagged(imapclient.UntaggedFetch{Seq: 1, Attrs: []imapclient.FetchAttr{uid1, rfcheader1}}) + + // equivalent to body[text], ../rfc/3501:3199 + tc.client.StoreFlagsClear("1", true, `\Seen`) + tc.transactf("ok", "fetch 1 rfc822.text") + tc.xuntagged(imapclient.UntaggedFetch{Seq: 1, Attrs: []imapclient.FetchAttr{uid1, rfctext1, flagsSeen}}) + + // equivalent to body[], ../rfc/3501:3179 + tc.client.StoreFlagsClear("1", true, `\Seen`) + tc.transactf("ok", "fetch 1 rfc822") + tc.xuntagged(imapclient.UntaggedFetch{Seq: 1, Attrs: []imapclient.FetchAttr{uid1, rfc1, flagsSeen}}) + + // With PEEK, we should not get the \Seen flag. + tc.client.StoreFlagsClear("1", true, `\Seen`) + tc.transactf("ok", "fetch 1 body.peek[]") + tc.xuntagged(imapclient.UntaggedFetch{Seq: 1, Attrs: []imapclient.FetchAttr{uid1, body1}}) + + tc.transactf("ok", "fetch 1 binary.peek[]") + tc.xuntagged(imapclient.UntaggedFetch{Seq: 1, Attrs: []imapclient.FetchAttr{uid1, binary1}}) + + // HEADER.FIELDS and .NOT + tc.transactf("ok", "fetch 1 body.peek[header.fields (date)]") + tc.xuntagged(imapclient.UntaggedFetch{Seq: 1, Attrs: []imapclient.FetchAttr{uid1, dateheader1}}) + tc.transactf("ok", "fetch 1 body.peek[header.fields.not (date)]") + tc.xuntagged(imapclient.UntaggedFetch{Seq: 1, Attrs: []imapclient.FetchAttr{uid1, nodateheader1}}) + // For non-multipart messages, 1 means the whole message. ../rfc/9051:4481 + tc.transactf("ok", "fetch 1 body.peek[1.header.fields (date)]") + tc.xuntagged(imapclient.UntaggedFetch{Seq: 1, Attrs: []imapclient.FetchAttr{uid1, date1header1}}) + tc.transactf("ok", "fetch 1 body.peek[1.header.fields.not (date)]") + tc.xuntagged(imapclient.UntaggedFetch{Seq: 1, Attrs: []imapclient.FetchAttr{uid1, nodate1header1}}) + + // MIME, part 1 for non-multipart messages is the message itself. ../rfc/9051:4481 + tc.transactf("ok", "fetch 1 body.peek[1.mime]") + tc.xuntagged(imapclient.UntaggedFetch{Seq: 1, Attrs: []imapclient.FetchAttr{uid1, mime1}}) + + // Missing sequence number. ../rfc/9051:7018 + tc.transactf("bad", "fetch 2 body[]") + + tc.transactf("ok", "fetch 1:1 body[]") + tc.xuntagged(imapclient.UntaggedFetch{Seq: 1, Attrs: []imapclient.FetchAttr{uid1, body1, flagsSeen}}) + + // UID fetch + tc.transactf("ok", "uid fetch 1 body[]") + tc.xuntagged(imapclient.UntaggedFetch{Seq: 1, Attrs: []imapclient.FetchAttr{uid1, body1}}) + + // UID fetch + tc.transactf("ok", "uid fetch 2 body[]") + tc.xuntagged() + + // Test some invalid syntax. + tc.transactf("bad", "fetch") + tc.transactf("bad", "fetch ") + tc.transactf("bad", "fetch ") + tc.transactf("bad", "fetch 1") // At least one requested item required. + tc.transactf("bad", "fetch 1 ()") // Empty list not allowed + tc.transactf("bad", "fetch 1 unknown") + tc.transactf("bad", "fetch 1 (unknown)") + tc.transactf("bad", "fetch 1 (all)") // Macro's not allowed in list. + tc.transactf("bad", "fetch 1 binary") // [] required + tc.transactf("bad", "fetch 1 binary[text]") // Text/header etc only allowed for body[]. + tc.transactf("bad", "fetch 1 binary[]<1>") // Count required. + tc.transactf("bad", "fetch 1 binary[]<1.0>") // Count must be > 0. + tc.transactf("bad", "fetch 1 binary[]<1..1>") // Single dot. + tc.transactf("bad", "fetch 1 body[]<1>") // Count required. + tc.transactf("bad", "fetch 1 body[]<1.0>") // Count must be > 0. + tc.transactf("bad", "fetch 1 body[]<1..1>") // Single dot. + tc.transactf("bad", "fetch 1 body[header.fields]") // List of headers required. + tc.transactf("bad", "fetch 1 body[header.fields ()]") // List must be non-empty. + tc.transactf("bad", "fetch 1 body[header.fields.not]") // List of headers required. + tc.transactf("bad", "fetch 1 body[header.fields.not ()]") // List must be non-empty. + tc.transactf("bad", "fetch 1 body[mime]") // MIME must be prefixed with a number. ../rfc/9051:4497 + + tc.transactf("no", "fetch 1 body[2]") // No such part. + + // Add more complex message. + + uid2 := imapclient.FetchUID(2) + bodystructure2 := imapclient.FetchBodystructure{ + RespAttr: "BODYSTRUCTURE", + Body: imapclient.BodyTypeMpart{ + Bodies: []any{ + imapclient.BodyTypeBasic{BodyFields: imapclient.BodyFields{Octets: 275}}, + imapclient.BodyTypeText{MediaType: "TEXT", MediaSubtype: "PLAIN", BodyFields: imapclient.BodyFields{Params: [][2]string{{"CHARSET", "US-ASCII"}}, Octets: 114}, Lines: 3}, + imapclient.BodyTypeMpart{ + Bodies: []any{ + imapclient.BodyTypeBasic{MediaType: "AUDIO", MediaSubtype: "BASIC", BodyFields: imapclient.BodyFields{CTE: "BASE64", Octets: 22}}, + imapclient.BodyTypeBasic{MediaType: "IMAGE", MediaSubtype: "JPEG", BodyFields: imapclient.BodyFields{CTE: "BASE64"}}, + }, + MediaSubtype: "PARALLEL", + }, + imapclient.BodyTypeText{MediaType: "TEXT", MediaSubtype: "ENRICHED", BodyFields: imapclient.BodyFields{Octets: 145}, Lines: 5}, + imapclient.BodyTypeMsg{ + MediaType: "MESSAGE", + MediaSubtype: "RFC822", + BodyFields: imapclient.BodyFields{Octets: 228}, + Envelope: imapclient.Envelope{ + Subject: "(subject in US-ASCII)", + From: []imapclient.Address{{Name: "", Adl: "", Mailbox: "info", Host: "mox.example"}}, + Sender: []imapclient.Address{{Name: "", Adl: "", Mailbox: "info", Host: "mox.example"}}, + ReplyTo: []imapclient.Address{{Name: "", Adl: "", Mailbox: "info", Host: "mox.example"}}, + To: []imapclient.Address{{Name: "mox", Adl: "", Mailbox: "info", Host: "mox.example"}}, + }, + Bodystructure: imapclient.BodyTypeText{ + MediaType: "TEXT", MediaSubtype: "PLAIN", BodyFields: imapclient.BodyFields{Params: [][2]string{{"CHARSET", "ISO-8859-1"}}, CTE: "QUOTED-PRINTABLE", Octets: 51}, Lines: 1}, + Lines: 7, + }, + }, + MediaSubtype: "MIXED", + }, + } + tc.client.Append("inbox", nil, &received, []byte(nestedMessage)) + tc.transactf("ok", "fetch 2 bodystructure") + tc.xuntagged(imapclient.UntaggedFetch{Seq: 2, Attrs: []imapclient.FetchAttr{uid2, bodystructure2}}) + + // Multiple responses. + tc.transactf("ok", "fetch 1:2 bodystructure") + tc.xuntagged(imapclient.UntaggedFetch{Seq: 1, Attrs: []imapclient.FetchAttr{uid1, bodystructure1}}, imapclient.UntaggedFetch{Seq: 2, Attrs: []imapclient.FetchAttr{uid2, bodystructure2}}) + tc.transactf("ok", "fetch 1,2 bodystructure") + tc.xuntagged(imapclient.UntaggedFetch{Seq: 1, Attrs: []imapclient.FetchAttr{uid1, bodystructure1}}, imapclient.UntaggedFetch{Seq: 2, Attrs: []imapclient.FetchAttr{uid2, bodystructure2}}) + tc.transactf("ok", "fetch 2:1 bodystructure") + tc.xuntagged(imapclient.UntaggedFetch{Seq: 1, Attrs: []imapclient.FetchAttr{uid1, bodystructure1}}, imapclient.UntaggedFetch{Seq: 2, Attrs: []imapclient.FetchAttr{uid2, bodystructure2}}) + tc.transactf("ok", "fetch 1:* bodystructure") + tc.xuntagged(imapclient.UntaggedFetch{Seq: 1, Attrs: []imapclient.FetchAttr{uid1, bodystructure1}}, imapclient.UntaggedFetch{Seq: 2, Attrs: []imapclient.FetchAttr{uid2, bodystructure2}}) + tc.transactf("ok", "fetch *:1 bodystructure") + tc.xuntagged(imapclient.UntaggedFetch{Seq: 1, Attrs: []imapclient.FetchAttr{uid1, bodystructure1}}, imapclient.UntaggedFetch{Seq: 2, Attrs: []imapclient.FetchAttr{uid2, bodystructure2}}) + tc.transactf("ok", "fetch *:2 bodystructure") + tc.xuntagged(imapclient.UntaggedFetch{Seq: 2, Attrs: []imapclient.FetchAttr{uid2, bodystructure2}}) + + tc.transactf("ok", "fetch * bodystructure") // Highest msgseq. + tc.xuntagged(imapclient.UntaggedFetch{Seq: 2, Attrs: []imapclient.FetchAttr{uid2, bodystructure2}}) + + tc.transactf("ok", "uid fetch 1:* bodystructure") + tc.xuntagged(imapclient.UntaggedFetch{Seq: 1, Attrs: []imapclient.FetchAttr{uid1, bodystructure1}}, imapclient.UntaggedFetch{Seq: 2, Attrs: []imapclient.FetchAttr{uid2, bodystructure2}}) + + tc.transactf("ok", "uid fetch 1:2 bodystructure") + tc.xuntagged(imapclient.UntaggedFetch{Seq: 1, Attrs: []imapclient.FetchAttr{uid1, bodystructure1}}, imapclient.UntaggedFetch{Seq: 2, Attrs: []imapclient.FetchAttr{uid2, bodystructure2}}) + + tc.transactf("ok", "uid fetch 1,2 bodystructure") + tc.xuntagged(imapclient.UntaggedFetch{Seq: 1, Attrs: []imapclient.FetchAttr{uid1, bodystructure1}}, imapclient.UntaggedFetch{Seq: 2, Attrs: []imapclient.FetchAttr{uid2, bodystructure2}}) + + tc.transactf("ok", "uid fetch 2:2 bodystructure") + tc.xuntagged(imapclient.UntaggedFetch{Seq: 2, Attrs: []imapclient.FetchAttr{uid2, bodystructure2}}) + + // todo: read the bodies/headers of the parts, and of the nested message. + tc.transactf("ok", "fetch 2 body.peek[]") + tc.xuntagged(imapclient.UntaggedFetch{Seq: 2, Attrs: []imapclient.FetchAttr{uid2, imapclient.FetchBody{RespAttr: "BODY[]", Body: nestedMessage}}}) + + part1 := tocrlf(` ... Some text appears here ... + +[Note that the blank between the boundary and the start + of the text in this part means no header fields were + given and this is text in the US-ASCII character set. + It could have been done with explicit typing as in the + next part.] +`) + tc.transactf("ok", "fetch 2 body.peek[1]") + tc.xuntagged(imapclient.UntaggedFetch{Seq: 2, Attrs: []imapclient.FetchAttr{uid2, imapclient.FetchBody{RespAttr: "BODY[1]", Section: "1", Body: part1}}}) + + tc.transactf("no", "fetch 2 binary.peek[3]") // Only allowed on leaf parts, not multiparts. + tc.transactf("no", "fetch 2 binary.peek[5]") // Only allowed on leaf parts, not messages. + + part31 := "aGVsbG8NCndvcmxkDQo=\r\n" + part31dec := "hello\r\nworld\r\n" + tc.transactf("ok", "fetch 2 binary.size[3.1]") + tc.xuntagged(imapclient.UntaggedFetch{Seq: 2, Attrs: []imapclient.FetchAttr{uid2, imapclient.FetchBinarySize{RespAttr: "BINARY.SIZE[3.1]", Parts: []uint32{3, 1}, Size: int64(len(part31dec))}}}) + + tc.transactf("ok", "fetch 2 body.peek[3.1]") + tc.xuntagged(imapclient.UntaggedFetch{Seq: 2, Attrs: []imapclient.FetchAttr{uid2, imapclient.FetchBody{RespAttr: "BODY[3.1]", Section: "3.1", Body: part31}}}) + + tc.transactf("ok", "fetch 2 binary.peek[3.1]") + tc.xuntagged(imapclient.UntaggedFetch{Seq: 2, Attrs: []imapclient.FetchAttr{uid2, imapclient.FetchBinary{RespAttr: "BINARY[3.1]", Parts: []uint32{3, 1}, Data: part31dec}}}) + + part3 := tocrlf(`--unique-boundary-2 +Content-Type: audio/basic +Content-Transfer-Encoding: base64 + +aGVsbG8NCndvcmxkDQo= + +--unique-boundary-2 +Content-Type: image/jpeg +Content-Transfer-Encoding: base64 + + +--unique-boundary-2-- + +`) + tc.transactf("ok", "fetch 2 body.peek[3]") + tc.xuntagged(imapclient.UntaggedFetch{Seq: 2, Attrs: []imapclient.FetchAttr{uid2, imapclient.FetchBody{RespAttr: "BODY[3]", Section: "3", Body: part3}}}) + + part2mime := tocrlf(`Content-type: text/plain; charset=US-ASCII + +`) + tc.transactf("ok", "fetch 2 body.peek[2.mime]") + tc.xuntagged(imapclient.UntaggedFetch{Seq: 2, Attrs: []imapclient.FetchAttr{uid2, imapclient.FetchBody{RespAttr: "BODY[2.MIME]", Section: "2.MIME", Body: part2mime}}}) + + part5 := tocrlf(`From: info@mox.example +To: mox +Subject: (subject in US-ASCII) +Content-Type: Text/plain; charset=ISO-8859-1 +Content-Transfer-Encoding: Quoted-printable + + ... Additional text in ISO-8859-1 goes here ... +`) + tc.transactf("ok", "fetch 2 body.peek[5]") + tc.xuntagged(imapclient.UntaggedFetch{Seq: 2, Attrs: []imapclient.FetchAttr{uid2, imapclient.FetchBody{RespAttr: "BODY[5]", Section: "5", Body: part5}}}) + + part5header := tocrlf(`From: info@mox.example +To: mox +Subject: (subject in US-ASCII) +Content-Type: Text/plain; charset=ISO-8859-1 +Content-Transfer-Encoding: Quoted-printable + +`) + tc.transactf("ok", "fetch 2 body.peek[5.header]") + tc.xuntagged(imapclient.UntaggedFetch{Seq: 2, Attrs: []imapclient.FetchAttr{uid2, imapclient.FetchBody{RespAttr: "BODY[5.HEADER]", Section: "5.HEADER", Body: part5header}}}) + + part5mime := tocrlf(`Content-Type: Text/plain; charset=ISO-8859-1 +Content-Transfer-Encoding: Quoted-printable + +`) + tc.transactf("ok", "fetch 2 body.peek[5.mime]") + tc.xuntagged(imapclient.UntaggedFetch{Seq: 2, Attrs: []imapclient.FetchAttr{uid2, imapclient.FetchBody{RespAttr: "BODY[5.MIME]", Section: "5.MIME", Body: part5mime}}}) + + part5text := " ... Additional text in ISO-8859-1 goes here ...\r\n" + tc.transactf("ok", "fetch 2 body.peek[5.text]") + tc.xuntagged(imapclient.UntaggedFetch{Seq: 2, Attrs: []imapclient.FetchAttr{uid2, imapclient.FetchBody{RespAttr: "BODY[5.TEXT]", Section: "5.TEXT", Body: part5text}}}) + + tc.transactf("ok", "fetch 2 body.peek[5.1]") + tc.xuntagged(imapclient.UntaggedFetch{Seq: 2, Attrs: []imapclient.FetchAttr{uid2, imapclient.FetchBody{RespAttr: "BODY[5.1]", Section: "5.1", Body: part5text}}}) + + // In case of EXAMINE instead of SELECT, we should not be seeing any changed \Seen flags for non-peek commands. + tc.client.StoreFlagsClear("1", true, `\Seen`) + tc.client.Unselect() + tc.client.Examine("inbox") + + tc.transactf("ok", "fetch 1 binary[]") + tc.xuntagged(imapclient.UntaggedFetch{Seq: 1, Attrs: []imapclient.FetchAttr{uid1, binary1}}) + + tc.transactf("ok", "fetch 1 body[]") + tc.xuntagged(imapclient.UntaggedFetch{Seq: 1, Attrs: []imapclient.FetchAttr{uid1, body1}}) + + tc.transactf("ok", "fetch 1 rfc822.text") + tc.xuntagged(imapclient.UntaggedFetch{Seq: 1, Attrs: []imapclient.FetchAttr{uid1, rfctext1}}) + + tc.transactf("ok", "fetch 1 rfc822") + tc.xuntagged(imapclient.UntaggedFetch{Seq: 1, Attrs: []imapclient.FetchAttr{uid1, rfc1}}) + + tc.client.Logout() +} diff --git a/imapserver/fuzz_test.go b/imapserver/fuzz_test.go new file mode 100644 index 0000000..590e317 --- /dev/null +++ b/imapserver/fuzz_test.go @@ -0,0 +1,140 @@ +package imapserver + +import ( + "context" + "encoding/base64" + "errors" + "fmt" + "net" + "os" + "testing" + "time" + + "github.com/mjl-/mox/imapclient" + "github.com/mjl-/mox/mox-" + "github.com/mjl-/mox/store" +) + +// Fuzz the server. For each fuzz string, we set up servers in various connection states, and write the string as command. +func FuzzServer(f *testing.F) { + seed := []string{ + fmt.Sprintf("authenticate plain %s", base64.StdEncoding.EncodeToString([]byte("\u0000mjl@mox.example\u0000testtest"))), + "*", + "capability", + "noop", + "logout", + "select inbox", + "examine inbox", + "unselect", + "close", + "expunge", + "subscribe inbox", + "unsubscribe inbox", + `lsub "" "*"`, + `list "" ""`, + `namespace`, + "enable utf8=accept", + "create inbox", + "create tmpbox", + "rename tmpbox ntmpbox", + "delete ntmpbox", + "status inbox (uidnext messages uidvalidity deleted size unseen recent)", + "append inbox (\\seen) {2+}\r\nhi", + "fetch 1 all", + "fetch 1 body", + "fetch 1 (bodystructure)", + `store 1 flags (\seen \answered)`, + `store 1 +flags ($junk)`, + `store 1 -flags ($junk)`, + "noop", + "copy 1Trash", + "copy 1 Trash", + "move 1 Trash", + "search 1 all", + } + for _, cmd := range seed { + const tag = "x " + f.Add(tag + cmd) + } + + mox.Context = context.Background() + mox.ConfigStaticPath = "../testdata/imap/mox.conf" + mox.MustLoadConfig() + dataDir := mox.ConfigDirPath(mox.Conf.Static.DataDir) + os.RemoveAll(dataDir) + acc, err := store.OpenAccount("mjl") + if err != nil { + f.Fatalf("open account: %v", err) + } + defer acc.Close() + err = acc.SetPassword("testtest") + if err != nil { + f.Fatalf("set password: %v", err) + } + done := store.Switchboard() + defer close(done) + + comm := store.RegisterComm(acc) + defer comm.Unregister() + + var cid int64 = 1 + + var fl *os.File + if false { + fl, err = os.Create("fuzz.log") + if err != nil { + f.Fatalf("fuzz log") + } + defer fl.Close() + } + flog := func(err error, msg string) { + if fl != nil && err != nil { + fmt.Fprintf(fl, "%s: %v\n", msg, err) + } + } + + f.Fuzz(func(t *testing.T, s string) { + run := func(cmds []string) { + serverConn, clientConn := net.Pipe() + defer serverConn.Close() + + go func() { + defer func() { + x := recover() + // Protocol can become botched, when fuzzer sends literals. + if x == nil { + return + } + err, ok := x.(error) + if !ok || !errors.Is(err, os.ErrDeadlineExceeded) { + panic(x) + } + }() + + defer clientConn.Close() + + err := clientConn.SetDeadline(time.Now().Add(time.Second)) + flog(err, "set client deadline") + client, _ := imapclient.New(clientConn, true) + + for _, cmd := range cmds { + client.Commandf("", "%s", cmd) + client.Response() + } + client.Commandf("", "%s", s) + client.Response() + }() + + err = serverConn.SetDeadline(time.Now().Add(time.Second)) + flog(err, "set server deadline") + serve("test", cid, nil, serverConn, false, true) + cid++ + } + + run([]string{}) + run([]string{"login mjl@mox.example testtest"}) + run([]string{"login mjl@mox.example testtest", "select inbox"}) + xappend := fmt.Sprintf("append inbox () {%d+}\r\n%s", len(exampleMsg), exampleMsg) + run([]string{"login mjl@mox.example testtest", "select inbox", xappend}) + }) +} diff --git a/imapserver/idle_test.go b/imapserver/idle_test.go new file mode 100644 index 0000000..6c16ee9 --- /dev/null +++ b/imapserver/idle_test.go @@ -0,0 +1,52 @@ +package imapserver + +import ( + "fmt" + "testing" + "time" + + "github.com/mjl-/mox/imapclient" +) + +func TestIdle(t *testing.T) { + tc1 := start(t) + defer tc1.close() + tc1.transactf("ok", "login mjl@mox.example testtest") + + tc2 := startNoSwitchboard(t) + defer tc2.close() + tc2.transactf("ok", "login mjl@mox.example testtest") + + tc1.transactf("ok", "select inbox") + tc2.transactf("ok", "select inbox") + + // todo: test with delivery through smtp + + tc2.cmdf("", "idle") + tc2.readprefixline("+") + done := make(chan error) + go func() { + defer func() { + x := recover() + if x != nil { + done <- fmt.Errorf("%v", x) + } + }() + untagged, _ := tc2.client.ReadUntagged() + var exists imapclient.UntaggedExists + tuntagged(tc2.t, untagged, &exists) + // todo: validate the data we got back. + tc2.writelinef("done") + done <- nil + }() + + tc1.transactf("ok", "append inbox () {%d+}\r\n%s", len(exampleMsg), exampleMsg) + timer := time.NewTimer(time.Second) + defer timer.Stop() + select { + case err := <-done: + tc1.check(err, "idle") + case <-timer.C: + t.Fatalf("idle did not finish") + } +} diff --git a/imapserver/list.go b/imapserver/list.go new file mode 100644 index 0000000..10f06a8 --- /dev/null +++ b/imapserver/list.go @@ -0,0 +1,228 @@ +package imapserver + +import ( + "fmt" + "path/filepath" + "sort" + "strings" + + "github.com/mjl-/bstore" + "github.com/mjl-/mox/store" +) + +// LIST command, for listing mailboxes with various attributes, including about subscriptions and children. +// We don't have flags Marked, Unmarked, NoSelect and NoInferiors and we don't have REMOTE mailboxes. +// +// State: Authenticated and selected. +func (c *conn) cmdList(tag, cmd string, p *parser) { + // Command: ../rfc/9051:2224 ../rfc/6154:144 ../rfc/5258:193 ../rfc/3501:2191 + // Examples: ../rfc/9051:2755 ../rfc/6154:347 ../rfc/5258:679 ../rfc/3501:2359 + + // Request syntax: ../rfc/9051:6600 ../rfc/6154:478 ../rfc/5258:1095 ../rfc/3501:4793 + p.xspace() + var isExtended bool + var listSubscribed bool + var listRecursive bool + if p.take("(") { + // ../rfc/9051:6633 + isExtended = true + selectOptions := map[string]bool{} + var nbase int + for !p.take(")") { + if len(selectOptions) > 0 { + p.xspace() + } + w := p.xatom() + W := strings.ToUpper(w) + switch W { + case "REMOTE": + case "RECURSIVEMATCH": + listRecursive = true + case "SUBSCRIBED": + nbase++ + listSubscribed = true + default: + // ../rfc/9051:2398 + xsyntaxErrorf("bad list selection option %q", w) + } + // Duplicates must be accepted. ../rfc/9051:2399 + selectOptions[W] = true + } + if listRecursive && nbase == 0 { + // ../rfc/9051:6640 + xsyntaxErrorf("cannot have RECURSIVEMATCH selection option without other (base) selection option") + } + p.xspace() + } + reference := p.xmailbox() + p.xspace() + patterns, isList := p.xmboxOrPat() + isExtended = isExtended || isList + var retSubscribed, retChildren, retSpecialUse bool + var retStatusAttrs []string + if p.take(" RETURN (") { + isExtended = true + // ../rfc/9051:6613 ../rfc/9051:6915 ../rfc/9051:7072 ../rfc/9051:6821 ../rfc/5819:95 + n := 0 + for !p.take(")") { + if n > 0 { + p.xspace() + } + n++ + w := p.xatom() + W := strings.ToUpper(w) + switch W { + case "SUBSCRIBED": + retSubscribed = true + case "CHILDREN": + // ../rfc/3348:44 + retChildren = true + case "SPECIAL-USE": + // ../rfc/6154:478 + retSpecialUse = true + case "STATUS": + // ../rfc/9051:7072 ../rfc/5819:181 + p.xspace() + p.xtake("(") + retStatusAttrs = []string{p.xstatusAtt()} + for p.take(" ") { + retStatusAttrs = append(retStatusAttrs, p.xstatusAtt()) + } + p.xtake(")") + default: + // ../rfc/9051:2398 + xsyntaxErrorf("bad list return option %q", w) + } + } + } + p.xempty() + + if !isExtended && reference == "" && patterns[0] == "" { + // ../rfc/9051:2277 ../rfc/3501:2221 + c.bwritelinef(`* LIST () "/" ""`) + c.ok(tag, cmd) + return + } + + if isExtended { + // ../rfc/9051:2286 + n := make([]string, 0, len(patterns)) + for _, p := range patterns { + if p != "" { + n = append(n, p) + } + } + patterns = n + } + re := xmailboxPatternMatcher(reference, patterns) + var responseLines []string + + c.account.WithRLock(func() { + c.xdbread(func(tx *bstore.Tx) { + type info struct { + mailbox *store.Mailbox + subscribed bool + } + names := map[string]info{} + hasSubscribedChild := map[string]bool{} + hasChild := map[string]bool{} + var nameList []string + + q := bstore.QueryTx[store.Mailbox](tx) + err := q.ForEach(func(mb store.Mailbox) error { + names[mb.Name] = info{mailbox: &mb} + nameList = append(nameList, mb.Name) + for p := filepath.Dir(mb.Name); p != "."; p = filepath.Dir(p) { + hasChild[p] = true + } + return nil + }) + xcheckf(err, "listing mailboxes") + + qs := bstore.QueryTx[store.Subscription](tx) + err = qs.ForEach(func(sub store.Subscription) error { + info, ok := names[sub.Name] + info.subscribed = true + names[sub.Name] = info + if !ok { + nameList = append(nameList, sub.Name) + } + for p := filepath.Dir(sub.Name); p != "."; p = filepath.Dir(p) { + hasSubscribedChild[p] = true + } + return nil + }) + xcheckf(err, "listing subscriptions") + + sort.Strings(nameList) // For predictable order in tests. + + for _, name := range nameList { + if !re.MatchString(name) { + continue + } + info := names[name] + + var flags listspace + var extended listspace + if listRecursive && hasSubscribedChild[name] { + extended = listspace{bare("CHILDINFO"), listspace{dquote("SUBSCRIBED")}} + } + if listSubscribed && info.subscribed { + flags = append(flags, bare(`\Subscribed`)) + if info.mailbox == nil { + flags = append(flags, bare(`\NonExistent`)) + } + } + if (info.mailbox == nil || listSubscribed) && flags == nil && extended == nil { + continue + } + + if retChildren { + var f string + if hasChild[name] { + f = `\HasChildren` + } else { + f = `\HasNoChildren` + } + flags = append(flags, bare(f)) + } + if !listSubscribed && retSubscribed && info.subscribed { + flags = append(flags, bare(`\Subscribed`)) + } + if retSpecialUse && info.mailbox != nil { + if info.mailbox.Archive { + flags = append(flags, bare(`\Archive`)) + } + if info.mailbox.Draft { + flags = append(flags, bare(`\Draft`)) + } + if info.mailbox.Junk { + flags = append(flags, bare(`\Junk`)) + } + if info.mailbox.Sent { + flags = append(flags, bare(`\Sent`)) + } + if info.mailbox.Trash { + flags = append(flags, bare(`\Trash`)) + } + } + + var extStr string + if extended != nil { + extStr = " " + extended.pack(c) + } + line := fmt.Sprintf(`* LIST %s "/" %s%s`, flags.pack(c), astring(name).pack(c), extStr) + responseLines = append(responseLines, line) + + if retStatusAttrs != nil && info.mailbox != nil { + responseLines = append(responseLines, c.xstatusLine(tx, *info.mailbox, retStatusAttrs)) + } + } + }) + }) + + for _, line := range responseLines { + c.bwritelinef("%s", line) + } + c.ok(tag, cmd) +} diff --git a/imapserver/list_test.go b/imapserver/list_test.go new file mode 100644 index 0000000..4fe1f6b --- /dev/null +++ b/imapserver/list_test.go @@ -0,0 +1,215 @@ +package imapserver + +import ( + "testing" + + "github.com/mjl-/mox/imapclient" + "github.com/mjl-/mox/store" +) + +func TestListBasic(t *testing.T) { + tc := start(t) + defer tc.close() + + tc.client.Login("mjl@mox.example", "testtest") + + ulist := func(name string, flags ...string) imapclient.UntaggedList { + if len(flags) == 0 { + flags = nil + } + return imapclient.UntaggedList{Flags: flags, Separator: '/', Mailbox: name} + } + + tc.last(tc.client.List("INBOX")) + tc.xuntagged(ulist("Inbox")) + + tc.last(tc.client.List("Inbox")) + tc.xuntagged(ulist("Inbox")) + + tc.last(tc.client.List("%")) + tc.xuntagged(ulist("Archive"), ulist("Drafts"), ulist("Inbox"), ulist("Junk"), ulist("Sent"), ulist("Trash")) + + tc.last(tc.client.List("*")) + tc.xuntagged(ulist("Archive"), ulist("Drafts"), ulist("Inbox"), ulist("Junk"), ulist("Sent"), ulist("Trash")) + + tc.last(tc.client.List("A*")) + tc.xuntagged(ulist("Archive")) + + tc.client.Create("Inbox/todo") + + tc.last(tc.client.List("Inbox*")) + tc.xuntagged(ulist("Inbox"), ulist("Inbox/todo")) + + tc.last(tc.client.List("Inbox/%")) + tc.xuntagged(ulist("Inbox/todo")) + + tc.last(tc.client.List("Inbox/*")) + tc.xuntagged(ulist("Inbox/todo")) + + // Leading full INBOX is turned into Inbox, so mailbox matches. + tc.last(tc.client.List("INBOX/*")) + tc.xuntagged(ulist("Inbox/todo")) + + // No match because we are only touching various casings of the full "INBOX". + tc.last(tc.client.List("INBO*")) + tc.xuntagged() +} + +func TestListExtended(t *testing.T) { + defer mockUIDValidity()() + + tc := start(t) + defer tc.close() + + tc.client.Login("mjl@mox.example", "testtest") + + ulist := func(name string, flags ...string) imapclient.UntaggedList { + if len(flags) == 0 { + flags = nil + } + return imapclient.UntaggedList{Flags: flags, Separator: '/', Mailbox: name} + } + + uidvals := map[string]uint32{} + for _, name := range store.InitialMailboxes { + uidvals[name] = 1 + } + var uidvalnext uint32 = 2 + uidval := func(name string) uint32 { + v, ok := uidvals[name] + if !ok { + v = uidvalnext + uidvals[name] = v + uidvalnext++ + } + return v + } + + ustatus := func(name string) imapclient.UntaggedStatus { + attrs := map[string]int64{ + "MESSAGES": 0, + "UIDNEXT": 1, + "UIDVALIDITY": int64(uidval(name)), + "UNSEEN": 0, + "DELETED": 0, + "SIZE": 0, + "RECENT": 0, + "APPENDLIMIT": 0, + } + return imapclient.UntaggedStatus{Mailbox: name, Attrs: attrs} + } + + const ( + Fsubscribed = `\Subscribed` + Fhaschildren = `\HasChildren` + Fhasnochildren = `\HasNoChildren` + Fnonexistent = `\NonExistent` + Farchive = `\Archive` + Fdraft = `\Draft` + Fjunk = `\Junk` + Fsent = `\Sent` + Ftrash = `\Trash` + ) + + // untaggedlist with flags subscribed and hasnochildren + xlist := func(name string, flags ...string) imapclient.UntaggedList { + flags = append([]string{Fhasnochildren, Fsubscribed}, flags...) + return ulist(name, flags...) + } + + xchildlist := func(name string, flags ...string) imapclient.UntaggedList { + u := ulist(name, flags...) + comp := imapclient.TaggedExtComp{String: "SUBSCRIBED"} + u.Extended = []imapclient.MboxListExtendedItem{{Tag: "CHILDINFO", Val: imapclient.TaggedExtVal{Comp: &comp}}} + return u + } + + tc.last(tc.client.ListFull(false, "INBOX")) + tc.xuntagged(xlist("Inbox"), ustatus("Inbox")) + + tc.last(tc.client.ListFull(false, "Inbox")) + tc.xuntagged(xlist("Inbox"), ustatus("Inbox")) + + tc.last(tc.client.ListFull(false, "%")) + tc.xuntagged(xlist("Archive", Farchive), ustatus("Archive"), xlist("Drafts", Fdraft), ustatus("Drafts"), xlist("Inbox"), ustatus("Inbox"), xlist("Junk", Fjunk), ustatus("Junk"), xlist("Sent", Fsent), ustatus("Sent"), xlist("Trash", Ftrash), ustatus("Trash")) + + tc.last(tc.client.ListFull(false, "*")) + tc.xuntagged(xlist("Archive", Farchive), ustatus("Archive"), xlist("Drafts", Fdraft), ustatus("Drafts"), xlist("Inbox"), ustatus("Inbox"), xlist("Junk", Fjunk), ustatus("Junk"), xlist("Sent", Fsent), ustatus("Sent"), xlist("Trash", Ftrash), ustatus("Trash")) + + tc.last(tc.client.ListFull(false, "A*")) + tc.xuntagged(xlist("Archive", Farchive), ustatus("Archive")) + + tc.last(tc.client.ListFull(false, "A*", "Junk")) + tc.xuntagged(xlist("Archive", Farchive), ustatus("Archive"), xlist("Junk", Fjunk), ustatus("Junk")) + + tc.client.Create("Inbox/todo") + + tc.last(tc.client.ListFull(false, "Inbox*")) + tc.xuntagged(ulist("Inbox", Fhaschildren, Fsubscribed), ustatus("Inbox"), xlist("Inbox/todo"), ustatus("Inbox/todo")) + + tc.last(tc.client.ListFull(false, "Inbox/%")) + tc.xuntagged(xlist("Inbox/todo"), ustatus("Inbox/todo")) + + tc.last(tc.client.ListFull(false, "Inbox/*")) + tc.xuntagged(xlist("Inbox/todo"), ustatus("Inbox/todo")) + + // Leading full INBOX is turned into Inbox, so mailbox matches. + tc.last(tc.client.ListFull(false, "INBOX/*")) + tc.xuntagged(xlist("Inbox/todo"), ustatus("Inbox/todo")) + + // No match because we are only touching various casings of the full "INBOX". + tc.last(tc.client.ListFull(false, "INBO*")) + tc.xuntagged() + + tc.last(tc.client.ListFull(true, "Inbox")) + tc.xuntagged(xchildlist("Inbox", Fsubscribed, Fhaschildren), ustatus("Inbox")) + + tc.client.Unsubscribe("Inbox") + tc.last(tc.client.ListFull(true, "Inbox")) + tc.xuntagged(xchildlist("Inbox", Fhaschildren), ustatus("Inbox")) + + tc.client.Delete("Inbox/todo") // Still subscribed. + tc.last(tc.client.ListFull(true, "Inbox")) + tc.xuntagged(xchildlist("Inbox", Fhasnochildren), ustatus("Inbox")) + + // Simple extended list without RETURN options. + tc.transactf("ok", `list "" ("inbox")`) + tc.xuntagged(ulist("Inbox")) + + tc.transactf("ok", `list () "" ("inbox") return ()`) + tc.xuntagged(ulist("Inbox")) + + tc.transactf("ok", `list "" ("inbox") return ()`) + tc.xuntagged(ulist("Inbox")) + + tc.transactf("ok", `list () "" ("inbox")`) + tc.xuntagged(ulist("Inbox")) + + tc.transactf("ok", `list (remote) "" ("inbox")`) + tc.xuntagged(ulist("Inbox")) + + tc.transactf("ok", `list (remote) "" "/inbox"`) + tc.xuntagged() + + tc.transactf("ok", `list (remote) "/inbox" ""`) + tc.xuntagged() + + tc.transactf("ok", `list (remote) "inbox" ""`) + tc.xuntagged() + + tc.transactf("ok", `list (remote) "inbox" "a"`) + tc.xuntagged() + + tc.client.Create("inbox/a") + tc.transactf("ok", `list (remote) "inbox" "a"`) + tc.xuntagged(ulist("Inbox/a")) + + tc.client.Subscribe("x") + tc.transactf("ok", `list (subscribed) "" x return (subscribed)`) + tc.xuntagged(imapclient.UntaggedList{Flags: []string{`\Subscribed`, `\NonExistent`}, Separator: '/', Mailbox: "x"}) + + tc.transactf("bad", `list (recursivematch) "" "*"`) // Cannot have recursivematch without a base selection option like subscribed. + tc.transactf("bad", `list (recursivematch remote) "" "*"`) // "remote" is not a base selection option. + tc.transactf("bad", `list (unknown) "" "*"`) // Unknown selection options must result in BAD. + tc.transactf("bad", `list () "" "*" return (unknown)`) // Unknown return options must result in BAD. +} diff --git a/imapserver/lsub_test.go b/imapserver/lsub_test.go new file mode 100644 index 0000000..e29403e --- /dev/null +++ b/imapserver/lsub_test.go @@ -0,0 +1,35 @@ +package imapserver + +import ( + "testing" + + "github.com/mjl-/mox/imapclient" +) + +func TestLsub(t *testing.T) { + tc := start(t) + defer tc.close() + + tc.client.Login("mjl@mox.example", "testtest") + + tc.transactf("bad", "lsub") // Missing params. + tc.transactf("bad", `lsub ""`) // Missing param. + tc.transactf("bad", `lsub "" x `) // Leftover data. + + tc.transactf("ok", `lsub "" x*`) + tc.xuntagged() + + tc.transactf("ok", "create a/b/c") + tc.transactf("ok", `lsub "" a/*`) + tc.xuntagged(imapclient.UntaggedLsub{Separator: '/', Mailbox: "a/b"}, imapclient.UntaggedLsub{Separator: '/', Mailbox: "a/b/c"}) + + // ../rfc/3501:2394 + tc.transactf("ok", "unsubscribe a") + tc.transactf("ok", "unsubscribe a/b") + tc.transactf("ok", `lsub "" a/%%`) + tc.xuntagged(imapclient.UntaggedLsub{Flags: []string{`\NoSelect`}, Separator: '/', Mailbox: "a/b"}) + + tc.transactf("ok", "unsubscribe a/b/c") + tc.transactf("ok", `lsub "" a/%%`) + tc.xuntagged() +} diff --git a/imapserver/move_test.go b/imapserver/move_test.go new file mode 100644 index 0000000..0938dac --- /dev/null +++ b/imapserver/move_test.go @@ -0,0 +1,92 @@ +package imapserver + +import ( + "testing" + + "github.com/mjl-/mox/imapclient" +) + +func TestMove(t *testing.T) { + defer mockUIDValidity()() + tc := start(t) + defer tc.close() + + tc2 := startNoSwitchboard(t) + defer tc2.close() + + tc3 := startNoSwitchboard(t) + defer tc3.close() + + tc.client.Login("mjl@mox.example", "testtest") + tc.client.Select("inbox") + + tc2.client.Login("mjl@mox.example", "testtest") + tc2.client.Select("Trash") + + tc3.client.Login("mjl@mox.example", "testtest") + tc3.client.Select("inbox") + + tc.transactf("bad", "move") // Missing params. + tc.transactf("bad", "move 1") // Missing params. + tc.transactf("bad", "move 1 inbox ") // Leftover. + + // Seqs 1,2 and UIDs 3,4. + tc.client.Append("inbox", nil, nil, []byte(exampleMsg)) + tc.client.Append("inbox", nil, nil, []byte(exampleMsg)) + tc.client.StoreFlagsSet("1:2", true, `\Deleted`) + tc.client.Expunge() + tc.client.Append("inbox", nil, nil, []byte(exampleMsg)) + tc.client.Append("inbox", nil, nil, []byte(exampleMsg)) + + tc.client.Unselect() + tc.client.Examine("inbox") + tc.transactf("no", "move 1 Trash") // Opened readonly. + tc.client.Unselect() + tc.client.Select("inbox") + + tc.transactf("no", "move 1 nonexistent") + tc.xcode("TRYCREATE") + + tc.transactf("no", "move 1 inbox") // Cannot move to same mailbox. + + tc2.transactf("ok", "noop") // Drain. + tc3.transactf("ok", "noop") // Drain. + + tc.transactf("ok", "move 1:* Trash") + ptr := func(v uint32) *uint32 { return &v } + tc.xuntagged( + imapclient.UntaggedResult{Status: "OK", RespText: imapclient.RespText{Code: "COPYUID", CodeArg: imapclient.CodeCopyUID{DestUIDValidity: 1, From: []imapclient.NumRange{{First: 3, Last: ptr(4)}}, To: []imapclient.NumRange{{First: 1, Last: ptr(2)}}}, More: "moved"}}, + imapclient.UntaggedExpunge(1), + imapclient.UntaggedExpunge(1), + ) + tc2.transactf("ok", "noop") + tc2.xuntagged( + imapclient.UntaggedExists(2), + imapclient.UntaggedFetch{Seq: 1, Attrs: []imapclient.FetchAttr{imapclient.FetchUID(1), imapclient.FetchFlags(nil)}}, + imapclient.UntaggedFetch{Seq: 2, Attrs: []imapclient.FetchAttr{imapclient.FetchUID(2), imapclient.FetchFlags(nil)}}, + ) + tc3.transactf("ok", "noop") + tc3.xuntagged(imapclient.UntaggedExpunge(1), imapclient.UntaggedExpunge(1)) + + // UIDs 5,6 + tc.client.Append("inbox", nil, nil, []byte(exampleMsg)) + tc.client.Append("inbox", nil, nil, []byte(exampleMsg)) + tc2.transactf("ok", "noop") // Drain. + tc3.transactf("ok", "noop") // Drain. + + tc.transactf("no", "uid move 1:4 Trash") // No match. + tc.transactf("ok", "uid move 6:5 Trash") + tc.xuntagged( + imapclient.UntaggedResult{Status: "OK", RespText: imapclient.RespText{Code: "COPYUID", CodeArg: imapclient.CodeCopyUID{DestUIDValidity: 1, From: []imapclient.NumRange{{First: 5, Last: ptr(6)}}, To: []imapclient.NumRange{{First: 3, Last: ptr(4)}}}, More: "moved"}}, + imapclient.UntaggedExpunge(1), + imapclient.UntaggedExpunge(1), + ) + tc2.transactf("ok", "noop") + tc2.xuntagged( + imapclient.UntaggedExists(4), + imapclient.UntaggedFetch{Seq: 3, Attrs: []imapclient.FetchAttr{imapclient.FetchUID(3), imapclient.FetchFlags(nil)}}, + imapclient.UntaggedFetch{Seq: 4, Attrs: []imapclient.FetchAttr{imapclient.FetchUID(4), imapclient.FetchFlags(nil)}}, + ) + tc3.transactf("ok", "noop") + tc3.xuntagged(imapclient.UntaggedExpunge(1), imapclient.UntaggedExpunge(1)) +} diff --git a/imapserver/pack.go b/imapserver/pack.go new file mode 100644 index 0000000..beb0575 --- /dev/null +++ b/imapserver/pack.go @@ -0,0 +1,213 @@ +package imapserver + +import ( + "fmt" + "io" +) + +type token interface { + pack(c *conn) string + writeTo(c *conn, w io.Writer) +} + +type bare string + +func (t bare) pack(c *conn) string { + return string(t) +} + +func (t bare) writeTo(c *conn, w io.Writer) { + w.Write([]byte(t.pack(c))) +} + +type niltoken struct{} + +var nilt niltoken + +func (t niltoken) pack(c *conn) string { + return "NIL" +} + +func (t niltoken) writeTo(c *conn, w io.Writer) { + w.Write([]byte(t.pack(c))) +} + +func nilOrString(s string) token { + if s == "" { + return nilt + } + return string0(s) +} + +type string0 string + +// ../rfc/9051:7081 +// ../rfc/9051:6856 ../rfc/6855:153 +func (t string0) pack(c *conn) string { + r := `"` + for _, ch := range t { + if ch == '\x00' || ch == '\r' || ch == '\n' || ch > 0x7f && !c.utf8strings() { + return syncliteral(t).pack(c) + } + if ch == '\\' || ch == '"' { + r += `\` + } + r += string(ch) + } + r += `"` + return r +} + +func (t string0) writeTo(c *conn, w io.Writer) { + w.Write([]byte(t.pack(c))) +} + +type dquote string + +func (t dquote) pack(c *conn) string { + r := `"` + for _, c := range t { + if c == '\\' || c == '"' { + r += `\` + } + r += string(c) + } + r += `"` + return r +} + +func (t dquote) writeTo(c *conn, w io.Writer) { + w.Write([]byte(t.pack(c))) +} + +type syncliteral string + +func (t syncliteral) pack(c *conn) string { + return fmt.Sprintf("{%d}\r\n", len(t)) + string(t) +} + +func (t syncliteral) writeTo(c *conn, w io.Writer) { + fmt.Fprintf(w, "{%d}\r\n", len(t)) + w.Write([]byte(t)) +} + +// data from reader with known size. +type readerSizeSyncliteral struct { + r io.Reader + size int64 +} + +func (t readerSizeSyncliteral) pack(c *conn) string { + buf, err := io.ReadAll(t.r) + if err != nil { + panic(err) + } + return fmt.Sprintf("{%d}\r\n", t.size) + string(buf) +} + +func (t readerSizeSyncliteral) writeTo(c *conn, w io.Writer) { + fmt.Fprintf(w, "{%d}\r\n", t.size) + if _, err := io.Copy(w, io.LimitReader(t.r, t.size)); err != nil { + panic(err) + } +} + +// data from reader without known size. +type readerSyncliteral struct { + r io.Reader +} + +func (t readerSyncliteral) pack(c *conn) string { + buf, err := io.ReadAll(t.r) + if err != nil { + panic(err) + } + return fmt.Sprintf("{%d}\r\n", len(buf)) + string(buf) +} + +func (t readerSyncliteral) writeTo(c *conn, w io.Writer) { + buf, err := io.ReadAll(t.r) + if err != nil { + panic(err) + } + fmt.Fprintf(w, "{%d}\r\n", len(buf)) + _, err = w.Write(buf) + if err != nil { + panic(err) + } +} + +// list with tokens space-separated +type listspace []token + +func (t listspace) pack(c *conn) string { + s := "(" + for i, e := range t { + if i > 0 { + s += " " + } + s += e.pack(c) + } + s += ")" + return s +} + +func (t listspace) writeTo(c *conn, w io.Writer) { + fmt.Fprint(w, "(") + for i, e := range t { + if i > 0 { + fmt.Fprint(w, " ") + } + e.writeTo(c, w) + } + fmt.Fprint(w, ")") +} + +// Concatenated tokens, no spaces or list syntax. +type concat []token + +func (t concat) pack(c *conn) string { + var s string + for _, e := range t { + s += e.pack(c) + } + return s +} + +func (t concat) writeTo(c *conn, w io.Writer) { + for _, e := range t { + e.writeTo(c, w) + } +} + +type astring string + +func (t astring) pack(c *conn) string { + if len(t) == 0 { + return string0(t).pack(c) + } +next: + for _, ch := range t { + for _, x := range atomChar { + if ch == x { + continue next + } + } + return string0(t).pack(c) + } + return string(t) +} + +func (t astring) writeTo(c *conn, w io.Writer) { + w.Write([]byte(t.pack(c))) +} + +type number uint32 + +func (t number) pack(c *conn) string { + return fmt.Sprintf("%d", t) +} + +func (t number) writeTo(c *conn, w io.Writer) { + w.Write([]byte(t.pack(c))) +} diff --git a/imapserver/parse.go b/imapserver/parse.go new file mode 100644 index 0000000..115d41b --- /dev/null +++ b/imapserver/parse.go @@ -0,0 +1,942 @@ +package imapserver + +import ( + "fmt" + "net/textproto" + "strconv" + "strings" + "time" + + "github.com/mjl-/mox/mlog" +) + +var ( + listWildcards = "%*" + char = charRange('\x01', '\x7f') + ctl = charRange('\x01', '\x19') + atomChar = charRemove(char, "(){ "+listWildcards+ctl) + respSpecials = atomChar + "]" + astringChar = atomChar + respSpecials +) + +func charRange(first, last rune) string { + r := "" + c := first + r += string(c) + for c < last { + c++ + r += string(c) + } + return r +} + +func charRemove(s, remove string) string { + r := "" +next: + for _, c := range s { + for _, x := range remove { + if c == x { + continue next + } + } + r += string(c) + } + return r +} + +type parser struct { + // Orig is the line in original casing, and upper in upper casing. We often match + // against upper for easy case insensitive handling as IMAP requires, but sometimes + // return from orig to keep the original case. + orig string + upper string + o int // Current offset in parsing. + contexts []string // What we're parsing, for error messages. + conn *conn +} + +// toUpper upper cases bytes that are a-z. strings.ToUpper does too much. and +// would replace invalid bytes with unicode replacement characters, which would +// break our requirement that offsets into the original and upper case strings +// point to the same character. +func toUpper(s string) string { + r := []byte(s) + for i, c := range r { + if c >= 'a' && c <= 'z' { + r[i] = c - 0x20 + } + } + return string(r) +} + +func newParser(s string, conn *conn) *parser { + return &parser{s, toUpper(s), 0, nil, conn} +} + +func (p *parser) xerrorf(format string, args ...any) { + var context string + if len(p.contexts) > 0 { + context = strings.Join(p.contexts, ",") + } + panic(syntaxError{"", "", fmt.Errorf("%s (%sremaining data %q)", fmt.Sprintf(format, args...), context, p.orig[p.o:])}) +} + +func (p *parser) context(s string) func() { + p.contexts = append(p.contexts, s) + return func() { + p.contexts = p.contexts[:len(p.contexts)-1] + } +} + +func (p *parser) empty() bool { + return p.o == len(p.upper) +} + +func (p *parser) xempty() { + if !p.empty() { + p.xerrorf("leftover data") + } +} + +func (p *parser) hasPrefix(s string) bool { + return strings.HasPrefix(p.upper[p.o:], s) +} + +func (p *parser) take(s string) bool { + if !p.hasPrefix(s) { + return false + } + p.o += len(s) + return true +} + +func (p *parser) xtake(s string) { + if !p.take(s) { + p.xerrorf("expected %q", s) + } +} + +func (p *parser) xnonempty() { + if p.empty() { + p.xerrorf("unexpected end") + } +} + +func (p *parser) xtakeall() string { + r := p.orig[p.o:] + p.o = len(p.orig) + return r +} + +func (p *parser) xtake1n(n int, what string) string { + if n == 0 { + p.xerrorf("expected chars from %s", what) + } + return p.xtaken(n) +} + +func (p *parser) xtake1fn(fn func(i int, c rune) bool) string { + i := 0 + s := "" + for _, c := range p.upper[p.o:] { + if !fn(i, c) { + break + } + s += string(c) + i++ + } + if s == "" { + p.xerrorf("expected at least one character") + } + p.o += len(s) + return s +} + +func (p *parser) xtakechars(s string, what string) string { + p.xnonempty() + for i, c := range p.orig[p.o:] { + if !contains(s, c) { + return p.xtake1n(i, what) + } + } + return p.xtakeall() +} + +func (p *parser) xtaken(n int) string { + if p.o+n > len(p.orig) { + p.xerrorf("not enough data") + } + r := p.orig[p.o : p.o+n] + p.o += n + return r +} + +func (p *parser) peekn(n int) (string, bool) { + if len(p.upper[p.o:]) < n { + return "", false + } + return p.upper[p.o : p.o+n], true +} + +func (p *parser) space() bool { + return p.take(" ") +} + +func (p *parser) xspace() { + if !p.space() { + p.xerrorf("expected space") + } +} + +func (p *parser) digits() string { + var n int + for _, c := range p.upper[p.o:] { + if c >= '0' && c <= '9' { + n++ + } + } + if n == 0 { + return "" + } + s := p.upper[p.o : p.o+n] + p.o += n + return s +} + +func (p *parser) nznumber() (uint32, bool) { + o := p.o + for o < len(p.upper) && p.upper[o] >= '0' && p.upper[o] <= '9' { + o++ + } + if o == p.o { + return 0, false + } + if n, err := strconv.ParseUint(p.upper[p.o:o], 10, 32); err != nil { + return 0, false + } else if n == 0 { + return 0, false + } else { + p.o = o + return uint32(n), true + } +} + +func (p *parser) xnznumber() uint32 { + n, ok := p.nznumber() + if !ok { + p.xerrorf("expected non-zero number") + } + return n +} + +func (p *parser) number() (uint32, bool) { + o := p.o + for o < len(p.upper) && p.upper[o] >= '0' && p.upper[o] <= '9' { + o++ + } + if o == p.o { + return 0, false + } + n, err := strconv.ParseUint(p.upper[p.o:o], 10, 32) + if err != nil { + return 0, false + } + p.o = o + return uint32(n), true +} + +func (p *parser) xnumber() uint32 { + n, ok := p.number() + if !ok { + p.xerrorf("expected number") + } + return n +} + +func (p *parser) xnumber64() int64 { + s := p.digits() + if s == "" { + p.xerrorf("expected number64") + } + v, err := strconv.ParseInt(s, 10, 64) + if err != nil { + p.xerrorf("parsing number64 %q: %v", s, err) + } + return v +} + +// l should be a list of uppercase words, the first match is returned +func (p *parser) takelist(l ...string) (string, bool) { + for _, w := range l { + if p.take(w) { + return w, true + } + } + return "", false +} + +func (p *parser) xtakelist(l ...string) string { + w, ok := p.takelist(l...) + if !ok { + p.xerrorf("expected one of %s", strings.Join(l, ",")) + } + return w +} + +func (p *parser) xstring() (r string) { + if p.take(`"`) { + esc := false + r := "" + for i, c := range p.orig[p.o:] { + if c == '\\' { + esc = true + } else if c == '\x00' || c == '\r' || c == '\n' { + p.xerrorf("invalid nul, cr or lf in string") + } else if esc { + if c == '\\' || c == '"' { + r += string(c) + esc = false + } else { + p.xerrorf("invalid escape char %c", c) + } + } else if c == '"' { + p.o += i + 1 + return r + } else { + r += string(c) + } + } + p.xerrorf("missing closing dquote in string") + } + size, sync := p.xliteralSize(100*1024, false) + s := p.conn.xreadliteral(size, sync) + line := p.conn.readline(false) + p.orig, p.upper, p.o = line, toUpper(line), 0 + return s +} + +func (p *parser) xnil() { + p.xtake("NIL") +} + +// Returns NIL as empty string. +func (p *parser) xnilString() string { + if p.take("NIL") { + return "" + } + return p.xstring() +} + +func (p *parser) xastring() string { + if p.hasPrefix(`"`) || p.hasPrefix("{") || p.hasPrefix("~{") { + return p.xstring() + } + return p.xtakechars(astringChar, "astring") +} + +func contains(s string, c rune) bool { + for _, x := range s { + if x == c { + return true + } + } + return false +} + +func (p *parser) xtag() string { + p.xnonempty() + for i, c := range p.orig[p.o:] { + if c == '+' || !contains(astringChar, c) { + return p.xtake1n(i, "tag") + } + } + return p.xtakeall() +} + +func (p *parser) xcommand() string { + for i, c := range p.upper[p.o:] { + if !(c >= 'A' && c <= 'Z' || c == ' ' && p.upper[p.o:p.o+i] == "UID") { + return p.xtake1n(i, "command") + } + } + return p.xtakeall() +} + +func (p *parser) remainder() string { + return p.orig[p.o:] +} + +func (p *parser) xflag() string { + return p.xtakelist(`\`, "$") + p.xatom() +} + +func (p *parser) xflagList() (l []string) { + p.xtake("(") + if !p.hasPrefix(")") { + l = append(l, p.xflag()) + } + for !p.take(")") { + p.xspace() + l = append(l, p.xflag()) + } + return +} + +func (p *parser) xatom() string { + return p.xtakechars(atomChar, "atom") +} + +func (p *parser) xmailbox() string { + s := p.xastring() + // UTF-7 is deprecated in IMAP4rev2. IMAP4rev1 does not fully forbid + // UTF-8 returned in mailbox names. We'll do our best by attempting to + // decode utf-7. But if that doesn't work, we'll just use the original + // string. + // ../rfc/3501:964 + if !p.conn.enabled[capIMAP4rev2] { + ns, err := utf7decode(s) + if err != nil { + p.conn.log.Infox("decoding utf7 or mailbox name", err, mlog.Field("name", s)) + } else { + s = ns + } + } + return s +} + +// ../rfc/9051:6605 +func (p *parser) xlistMailbox() string { + if p.hasPrefix(`"`) || p.hasPrefix("{") { + return p.xstring() + } + return p.xtakechars(atomChar+listWildcards+respSpecials, "list-char") +} + +// ../rfc/9051:6707 ../rfc/9051:6848 ../rfc/5258:1095 ../rfc/5258:1169 ../rfc/5258:1196 +func (p *parser) xmboxOrPat() ([]string, bool) { + if !p.take("(") { + return []string{p.xlistMailbox()}, false + } + l := []string{p.xlistMailbox()} + for !p.take(")") { + p.xspace() + l = append(l, p.xlistMailbox()) + } + return l, true +} + +// ../rfc/9051:7056 +// RECENT only in ../rfc/3501:5047 +// APPENDLIMIT is from ../rfc/7889:252 +func (p *parser) xstatusAtt() string { + return p.xtakelist("MESSAGES", "UIDNEXT", "UIDVALIDITY", "UNSEEN", "DELETED", "SIZE", "RECENT", "APPENDLIMIT") +} + +// ../rfc/9051:7133 ../rfc/9051:7034 +func (p *parser) xnumSet() (r numSet) { + defer p.context("numSet")() + if p.take("$") { + return numSet{searchResult: true} + } + r.ranges = append(r.ranges, p.xnumRange()) + for p.take(",") { + r.ranges = append(r.ranges, p.xnumRange()) + } + return r +} + +// parse numRange, which can be just a setNumber. +func (p *parser) xnumRange() (r numRange) { + if p.take("*") { + r.first.star = true + } else { + r.first.number = p.xnznumber() + } + if p.take(":") { + r.last = &setNumber{} + if p.take("*") { + r.last.star = true + } else { + r.last.number = p.xnznumber() + } + } + return +} + +// ../rfc/9051:6989 ../rfc/3501:4977 +func (p *parser) xsectionMsgtext() (r *sectionMsgtext) { + defer p.context("sectionMsgtext")() + msgtextWords := []string{"HEADER.FIELDS.NOT", "HEADER.FIELDS", "HEADER", "TEXT"} + w := p.xtakelist(msgtextWords...) + r = §ionMsgtext{s: w} + if strings.HasPrefix(w, "HEADER.FIELDS") { + p.xspace() + p.xtake("(") + r.headers = append(r.headers, textproto.CanonicalMIMEHeaderKey(p.xastring())) + for { + if p.take(")") { + break + } + p.xspace() + r.headers = append(r.headers, textproto.CanonicalMIMEHeaderKey(p.xastring())) + } + } + return +} + +// ../rfc/9051:6999 ../rfc/3501:4991 +func (p *parser) xsectionSpec() (r *sectionSpec) { + defer p.context("parseSectionSpec")() + + n, ok := p.nznumber() + if !ok { + return §ionSpec{msgtext: p.xsectionMsgtext()} + } + defer p.context("part...")() + pt := §ionPart{} + pt.part = append(pt.part, n) + for { + if !p.take(".") { + break + } + if n, ok := p.nznumber(); ok { + pt.part = append(pt.part, n) + continue + } + if p.take("MIME") { + pt.text = §ionText{mime: true} + break + } + pt.text = §ionText{msgtext: p.xsectionMsgtext()} + break + } + return §ionSpec{part: pt} +} + +// ../rfc/9051:6985 ../rfc/3501:4975 +func (p *parser) xsection() *sectionSpec { + defer p.context("parseSection")() + p.xtake("[") + if p.take("]") { + return §ionSpec{} + } + r := p.xsectionSpec() + p.xtake("]") + return r +} + +// ../rfc/9051:6841 +func (p *parser) xpartial() *partial { + p.xtake("<") + offset := p.xnumber() + p.xtake(".") + count := p.xnznumber() + p.xtake(">") + return &partial{offset, count} +} + +// ../rfc/9051:6987 +func (p *parser) xsectionBinary() (r []uint32) { + p.xtake("[") + if p.take("]") { + return nil + } + r = append(r, p.xnznumber()) + for { + if !p.take(".") { + break + } + r = append(r, p.xnznumber()) + } + p.xtake("]") + return r +} + +// ../rfc/9051:6557 ../rfc/3501:4751 +func (p *parser) xfetchAtt() (r fetchAtt) { + defer p.context("fetchAtt")() + words := []string{ + "ENVELOPE", "FLAGS", "INTERNALDATE", "RFC822.SIZE", "BODYSTRUCTURE", "UID", "BODY.PEEK", "BODY", "BINARY.PEEK", "BINARY.SIZE", "BINARY", + "RFC822.HEADER", "RFC822.TEXT", "RFC822", // older IMAP + } + f := p.xtakelist(words...) + r.peek = strings.HasSuffix(f, ".PEEK") + r.field = strings.TrimSuffix(f, ".PEEK") + + switch r.field { + case "BODY": + if p.hasPrefix("[") { + r.section = p.xsection() + if p.hasPrefix("<") { + r.partial = p.xpartial() + } + } + case "BINARY": + r.sectionBinary = p.xsectionBinary() + if p.hasPrefix("<") { + r.partial = p.xpartial() + } + case "BINARY.SIZE": + r.sectionBinary = p.xsectionBinary() + } + return +} + +// ../rfc/9051:6553 ../rfc/3501:4748 +func (p *parser) xfetchAtts() []fetchAtt { + defer p.context("fetchAtts")() + + fields := func(l ...string) []fetchAtt { + r := make([]fetchAtt, len(l)) + for i, s := range l { + r[i] = fetchAtt{field: s} + } + return r + } + + if w, ok := p.takelist("ALL", "FAST", "FULL"); ok { + switch w { + case "ALL": + return fields("FLAGS", "INTERNALDATE", "RFC822.SIZE", "ENVELOPE") + case "FAST": + return fields("FLAGS", "INTERNALDATE", "RFC822.SIZE") + case "FULL": + return fields("FLAGS", "INTERNALDATE", "RFC822.SIZE", "ENVELOPE", "BODY") + } + panic("missing case") + } + + if !p.hasPrefix("(") { + return []fetchAtt{p.xfetchAtt()} + } + + l := []fetchAtt{} + p.xtake("(") + for { + l = append(l, p.xfetchAtt()) + if !p.take(" ") { + break + } + } + p.xtake(")") + return l +} + +func xint(p *parser, s string) int { + v, err := strconv.ParseInt(s, 10, 32) + if err != nil { + p.xerrorf("bad int %q: %v", s, err) + } + return int(v) +} + +func (p *parser) digit() (string, bool) { + if p.empty() { + return "", false + } + c := p.orig[p.o] + if c < '0' || c > '9' { + return "", false + } + s := p.orig[p.o : p.o+1] + p.o++ + return s, true +} + +func (p *parser) xdigit() string { + s, ok := p.digit() + if !ok { + p.xerrorf("expected digit") + } + return s +} + +// ../rfc/9051:6492 ../rfc/3501:4695 +func (p *parser) xdateDayFixed() int { + if p.take(" ") { + return xint(p, p.xdigit()) + } + return xint(p, p.xdigit()+p.xdigit()) +} + +var months = []string{"jan", "feb", "mar", "apr", "may", "jun", "jul", "aug", "sep", "oct", "nov", "dec"} + +// ../rfc/9051:6495 ../rfc/3501:4698 +func (p *parser) xdateMonth() time.Month { + s := strings.ToLower(p.xtaken(3)) + for i, m := range months { + if m == s { + return time.Month(1 + i) + } + } + p.xerrorf("unknown month %q", s) + return 0 +} + +// ../rfc/9051:7120 ../rfc/3501:5067 +func (p *parser) xtime() (int, int, int) { + h := xint(p, p.xtaken(2)) + p.xtake(":") + m := xint(p, p.xtaken(2)) + p.xtake(":") + s := xint(p, p.xtaken(2)) + return h, m, s +} + +// ../rfc/9051:7159 ../rfc/3501:5083 +func (p *parser) xzone() (string, int) { + sign := p.xtakelist("+", "-") + s := p.xtaken(4) + v := xint(p, s) + seconds := (v/100)*3600 + (v%100)*60 + if sign[0] == '-' { + seconds = -seconds + } + return sign + s, seconds +} + +// ../rfc/9051:6502 ../rfc/3501:4713 +func (p *parser) xdateTime() time.Time { + // DQUOTE date-day-fixed "-" date-month "-" date-year SP time SP zone DQUOTE + p.xtake(`"`) + day := p.xdateDayFixed() + p.xtake("-") + month := p.xdateMonth() + p.xtake("-") + year := xint(p, p.xtaken(4)) + p.xspace() + hours, minutes, seconds := p.xtime() + p.xspace() + name, zoneSeconds := p.xzone() + p.xtake(`"`) + loc := time.FixedZone(name, zoneSeconds) + return time.Date(year, month, day, hours, minutes, seconds, 0, loc) +} + +// ../rfc/9051:6655 ../rfc/7888:330 ../rfc/3501:4801 +func (p *parser) xliteralSize(maxSize int64, lit8 bool) (size int64, sync bool) { + // todo: enforce that we get non-binary when ~ isn't present? + if lit8 { + p.take("~") + } + p.xtake("{") + size = p.xnumber64() + if maxSize > 0 && size > maxSize { + // ../rfc/7888:249 + line := fmt.Sprintf("* BYE [ALERT] Max literal size %d is larger than allowed %d in this context", size, maxSize) + panic(syntaxError{line, "TOOBIG", fmt.Errorf("literal too big")}) + } + + sync = !p.take("+") + p.xtake("}") + p.xempty() + return size, sync +} + +var searchKeyWords = []string{ + "ALL", "ANSWERED", "BCC", + "BEFORE", "BODY", + "CC", "DELETED", "FLAGGED", + "FROM", "KEYWORD", + "NEW", "OLD", "ON", "RECENT", "SEEN", + "SINCE", "SUBJECT", + "TEXT", "TO", + "UNANSWERED", "UNDELETED", "UNFLAGGED", + "UNKEYWORD", "UNSEEN", + "DRAFT", "HEADER", + "LARGER", "NOT", + "OR", + "SENTBEFORE", "SENTON", + "SENTSINCE", "SMALLER", + "UID", "UNDRAFT", +} + +// ../rfc/9051:6923 ../rfc/3501:4957 +// differences: rfc 9051 removes NEW, OLD, RECENT and makes SMALLER and LARGER number64 instead of number. +func (p *parser) xsearchKey() *searchKey { + if p.take("(") { + sk := p.xsearchKey() + l := []searchKey{*sk} + for !p.take(")") { + p.xspace() + l = append(l, *p.xsearchKey()) + } + return &searchKey{searchKeys: l} + } + + w, ok := p.takelist(searchKeyWords...) + if !ok { + seqs := p.xnumSet() + return &searchKey{seqSet: &seqs} + } + + sk := &searchKey{op: w} + switch sk.op { + case "ALL": + case "ANSWERED": + case "BCC": + p.xspace() + sk.astring = p.xastring() + case "BEFORE": + p.xspace() + sk.date = p.xdate() + case "BODY": + p.xspace() + sk.astring = p.xastring() + case "CC": + p.xspace() + sk.astring = p.xastring() + case "DELETED": + case "FLAGGED": + case "FROM": + p.xspace() + sk.astring = p.xastring() + case "KEYWORD": + p.xspace() + sk.atom = p.xatom() + case "NEW": + case "OLD": + case "ON": + p.xspace() + sk.date = p.xdate() + case "RECENT": + case "SEEN": + case "SINCE": + p.xspace() + sk.date = p.xdate() + case "SUBJECT": + p.xspace() + sk.astring = p.xastring() + case "TEXT": + p.xspace() + sk.astring = p.xastring() + case "TO": + p.xspace() + sk.astring = p.xastring() + case "UNANSWERED": + case "UNDELETED": + case "UNFLAGGED": + case "UNKEYWORD": + p.xspace() + sk.atom = p.xatom() + case "UNSEEN": + case "DRAFT": + case "HEADER": + p.xspace() + sk.headerField = p.xastring() + p.xspace() + sk.astring = p.xastring() + case "LARGER": + p.xspace() + sk.number = p.xnumber64() + case "NOT": + p.xspace() + sk.searchKey = p.xsearchKey() + case "OR": + p.xspace() + sk.searchKey = p.xsearchKey() + p.xspace() + sk.searchKey2 = p.xsearchKey() + case "SENTBEFORE": + p.xspace() + sk.date = p.xdate() + case "SENTON": + p.xspace() + sk.date = p.xdate() + case "SENTSINCE": + p.xspace() + sk.date = p.xdate() + case "SMALLER": + p.xspace() + sk.number = p.xnumber64() + case "UID": + p.xspace() + sk.uidSet = p.xnumSet() + case "UNDRAFT": + default: + p.xerrorf("missing case for op %q", sk.op) + } + return sk +} + +// ../rfc/9051:6489 ../rfc/3501:4692 +func (p *parser) xdateDay() int { + d := p.xdigit() + if s, ok := p.digit(); ok { + d += s + } + return xint(p, d) +} + +// ../rfc/9051:6487 ../rfc/3501:4690 +func (p *parser) xdate() time.Time { + dquote := p.take(`"`) + day := p.xdateDay() + p.xtake("-") + mon := p.xdateMonth() + p.xtake("-") + year := xint(p, p.xtaken(4)) + if dquote { + p.take(`"`) + } + return time.Date(year, mon, day, 0, 0, 0, 0, time.UTC) +} + +// ../rfc/9051:7090 ../rfc/4466:716 +func (p *parser) xtaggedExtLabel() string { + return p.xtake1fn(func(i int, c rune) bool { + return c >= 'A' && c <= 'Z' || c == '-' || c == '_' || c == '.' || i > 0 && (c >= '0' && c <= '9' || c == ':') + }) +} + +// no return value since we don't currently use the value. +// ../rfc/9051:7111 ../rfc/4466:749 +func (p *parser) xtaggedExtVal() { + if p.take("(") { + if p.take(")") { + return + } + p.xtaggedExtComp() + p.xtake(")") + } else { + p.xtaggedExtSimple() + } +} + +// ../rfc/9051:7109 ../rfc/4466:747 +func (p *parser) xtaggedExtSimple() { + s := p.digits() + if s == "" { + p.xnumSet() + } + + // This can be a number64, or the start of a sequence-set. A sequence-set can also + // start with a number, but only an uint32. After the number we'll try to continue + // parsing as a sequence-set. + _, err := strconv.ParseInt(s, 10, 64) + if err != nil { + p.xerrorf("parsing int: %v", err) + } + + if p.take(":") { + if !p.take("*") { + p.xnznumber() + } + } + for p.take(",") { + p.xnumRange() + } +} + +// ../rfc/9051:7111 ../rfc/4466:735 +func (p *parser) xtaggedExtComp() { + if p.take("(") { + p.xtaggedExtComp() + p.xtake(")") + return + } + p.xastring() + for p.space() { + p.xtaggedExtComp() + } +} diff --git a/imapserver/prefixconn.go b/imapserver/prefixconn.go new file mode 100644 index 0000000..a0372c3 --- /dev/null +++ b/imapserver/prefixconn.go @@ -0,0 +1,28 @@ +package imapserver + +import ( + "net" +) + +// prefixConn is a net.Conn with a buffer from which the first reads are satisfied. +// used for STARTTLS where already did a buffered read of initial TLS data. +type prefixConn struct { + prefix []byte + net.Conn +} + +func (c *prefixConn) Read(buf []byte) (int, error) { + if len(c.prefix) > 0 { + n := len(buf) + if n > len(c.prefix) { + n = len(c.prefix) + } + copy(buf[:n], c.prefix[:n]) + c.prefix = c.prefix[n:] + if len(c.prefix) == 0 { + c.prefix = nil + } + return n, nil + } + return c.Conn.Read(buf) +} diff --git a/imapserver/protocol.go b/imapserver/protocol.go new file mode 100644 index 0000000..9541db0 --- /dev/null +++ b/imapserver/protocol.go @@ -0,0 +1,186 @@ +package imapserver + +import ( + "fmt" + "time" + + "github.com/mjl-/mox/store" +) + +type numSet struct { + searchResult bool // "$" + ranges []numRange +} + +// containsSeq returns whether seq is in the numSet, given uids and (saved) searchResult. +// uids and searchResult must be sorted. searchResult can have uids that are no longer in uids. +func (ss numSet) containsSeq(seq msgseq, uids []store.UID, searchResult []store.UID) bool { + if len(uids) == 0 { + return false + } + if ss.searchResult { + uid := uids[int(seq)-1] + return uidSearch(searchResult, uid) > 0 && uidSearch(uids, uid) > 0 + } + for _, r := range ss.ranges { + first := r.first.number + if r.first.star { + first = 1 + } + last := first + if r.last != nil { + last = r.last.number + if r.last.star { + last = uint32(len(uids)) + } + } + if last > uint32(len(uids)) { + last = uint32(len(uids)) + } + if uint32(seq) >= first && uint32(seq) <= last { + return true + } + } + return false +} + +func (ss numSet) containsUID(uid store.UID, uids []store.UID, searchResult []store.UID) bool { + if len(uids) == 0 { + return false + } + if ss.searchResult { + return uidSearch(searchResult, uid) > 0 && uidSearch(uids, uid) > 0 + } + for _, r := range ss.ranges { + first := store.UID(r.first.number) + if r.first.star { + first = uids[0] + } + last := first + // Num in :* can be larger than last, but it still matches the last... + // Similar for *:. ../rfc/9051:4814 + if r.last != nil { + last = store.UID(r.last.number) + if r.last.star { + last = uids[len(uids)-1] + if last > first { + first = last + } + } else if r.first.star && last < first { + last = first + } + } + if uid < first || uid > last { + continue + } + if uidSearch(uids, uid) > 0 { + return true + } + } + return false +} + +func (ss numSet) String() string { + if ss.searchResult { + return "$" + } + s := "" + for _, r := range ss.ranges { + if s != "" { + s += "," + } + if r.first.star { + s += "*" + } else { + s += fmt.Sprintf("%d", r.first.number) + } + if r.last == nil { + if r.first.star { + panic("invalid numSet range first star without last") + } + continue + } + s += ":" + if r.last.star { + s += "*" + } else { + s += fmt.Sprintf("%d", r.last.number) + } + } + return s +} + +type setNumber struct { + number uint32 + star bool +} + +type numRange struct { + first setNumber + last *setNumber // if nil, this numRange is just a setNumber in "first" and first.star will be false +} + +type partial struct { + offset uint32 + count uint32 +} + +type sectionPart struct { + part []uint32 + text *sectionText +} + +type sectionText struct { + mime bool // if "MIME" + msgtext *sectionMsgtext +} + +// a non-nil *sectionSpec with nil msgtext & nil part means there were []'s, but nothing inside. e.g. "BODY[]". +type sectionSpec struct { + msgtext *sectionMsgtext + part *sectionPart +} + +type sectionMsgtext struct { + s string // "HEADER", "HEADER.FIELDS", "HEADER.FIELDS.NOT", "TEXT" + headers []string // for "HEADER.FIELDS"* +} + +type fetchAtt struct { + field string // uppercase, eg "ENVELOPE", "BODY". ".PEEK" is removed. + peek bool + section *sectionSpec + sectionBinary []uint32 + partial *partial +} + +type searchKey struct { + // Only one of searchKeys, seqSet and op can be non-nil/non-empty. + searchKeys []searchKey // In case of nested/multiple keys. Also for the top-level command. + seqSet *numSet // In case of bare sequence set. For op UID, field uidSet contains the parameter. + op string // Determines which of the fields below are set. + headerField string + astring string + date time.Time + atom string + number int64 + searchKey *searchKey + searchKey2 *searchKey + uidSet numSet +} + +func compactUIDSet(l []store.UID) (r numSet) { + for len(l) > 0 { + e := 1 + for ; e < len(l) && l[e] == l[e-1]+1; e++ { + } + first := setNumber{number: uint32(l[0])} + var last *setNumber + if e > 1 { + last = &setNumber{number: uint32(l[e-1])} + } + r.ranges = append(r.ranges, numRange{first, last}) + l = l[e:] + } + return +} diff --git a/imapserver/protocol_test.go b/imapserver/protocol_test.go new file mode 100644 index 0000000..4870e5a --- /dev/null +++ b/imapserver/protocol_test.go @@ -0,0 +1,61 @@ +package imapserver + +import ( + "testing" + + "github.com/mjl-/mox/store" +) + +func TestNumSetContains(t *testing.T) { + num := func(v uint32) *setNumber { + return &setNumber{v, false} + } + star := &setNumber{star: true} + + check := func(v bool) { + t.Helper() + if !v { + t.Fatalf("bad") + } + } + + ss0 := numSet{true, nil} // "$" + check(ss0.containsSeq(1, []store.UID{2}, []store.UID{2})) + check(!ss0.containsSeq(1, []store.UID{2}, []store.UID{})) + + check(ss0.containsUID(1, []store.UID{1}, []store.UID{1})) + check(ss0.containsUID(2, []store.UID{1, 2, 3}, []store.UID{2})) + check(!ss0.containsUID(2, []store.UID{1, 2, 3}, []store.UID{})) + check(!ss0.containsUID(2, []store.UID{}, []store.UID{2})) + + ss1 := numSet{false, []numRange{{*num(1), nil}}} // Single number 1. + check(ss1.containsSeq(1, []store.UID{2}, nil)) + check(!ss1.containsSeq(2, []store.UID{1, 2}, nil)) + + check(ss1.containsUID(1, []store.UID{1}, nil)) + check(ss1.containsSeq(1, []store.UID{2}, nil)) + check(!ss1.containsSeq(2, []store.UID{1, 2}, nil)) + + // 2:* + ss2 := numSet{false, []numRange{{*num(2), star}}} + check(!ss2.containsSeq(1, []store.UID{2}, nil)) + check(ss2.containsSeq(2, []store.UID{4, 5}, nil)) + check(ss2.containsSeq(3, []store.UID{4, 5, 6}, nil)) + + check(ss2.containsUID(2, []store.UID{2}, nil)) + check(ss2.containsUID(3, []store.UID{1, 2, 3}, nil)) + check(ss2.containsUID(2, []store.UID{2}, nil)) + check(!ss2.containsUID(2, []store.UID{4, 5}, nil)) + check(!ss2.containsUID(2, []store.UID{1}, nil)) + + // *:2 + ss3 := numSet{false, []numRange{{*star, num(2)}}} + check(ss3.containsSeq(1, []store.UID{2}, nil)) + check(ss3.containsSeq(2, []store.UID{4, 5}, nil)) + check(!ss3.containsSeq(3, []store.UID{1, 2, 3}, nil)) + + check(ss3.containsUID(1, []store.UID{1}, nil)) + check(ss3.containsUID(2, []store.UID{1, 2, 3}, nil)) + check(!ss3.containsUID(1, []store.UID{2, 3}, nil)) + check(!ss3.containsUID(3, []store.UID{1, 2, 3}, nil)) +} diff --git a/imapserver/rename_test.go b/imapserver/rename_test.go new file mode 100644 index 0000000..552b30d --- /dev/null +++ b/imapserver/rename_test.go @@ -0,0 +1,81 @@ +package imapserver + +import ( + "testing" + + "github.com/mjl-/mox/imapclient" +) + +// todo: check that UIDValidity is indeed updated properly. +func TestRename(t *testing.T) { + tc := start(t) + defer tc.close() + + tc2 := startNoSwitchboard(t) + defer tc2.close() + + tc.client.Login("mjl@mox.example", "testtest") + tc2.client.Login("mjl@mox.example", "testtest") + + tc.transactf("bad", "rename") // Missing parameters. + tc.transactf("bad", "rename x") // Missing destination. + tc.transactf("bad", "rename x y ") // Leftover data. + + tc.transactf("no", "rename doesnotexist newbox") // Does not exist. + tc.xcode("NONEXISTENT") // ../rfc/9051:5140 + tc.transactf("no", `rename "Sent" "Trash"`) // Already exists. + tc.xcode("ALREADYEXISTS") + + tc.client.Create("x") + tc.client.Subscribe("sub") + tc.client.Create("a/b/c") + tc.client.Subscribe("x/y/c") // For later rename, but not affected by rename of x. + tc2.transactf("ok", "noop") // Drain. + + tc.transactf("ok", "rename x y") + tc2.transactf("ok", "noop") + tc2.xuntagged(imapclient.UntaggedList{Separator: '/', Mailbox: "y", OldName: "x"}) + + // Rename to a mailbox that only exists in database as subscribed. + tc.transactf("ok", "rename y sub") + tc2.transactf("ok", "noop") + tc2.xuntagged(imapclient.UntaggedList{Flags: []string{`\Subscribed`}, Separator: '/', Mailbox: "sub", OldName: "y"}) + + // Cannot rename a child to a parent. It already exists. + tc.transactf("no", "rename a/b/c a/b") + tc.xcode("ALREADYEXISTS") + tc.transactf("no", "rename a/b a") + tc.xcode("ALREADYEXISTS") + + tc2.transactf("ok", "noop") // Drain. + tc.transactf("ok", "rename a/b x/y") // This will cause new parent "x" to be created, and a/b and a/b/c to be renamed. + tc2.transactf("ok", "noop") + tc2.xuntagged(imapclient.UntaggedList{Flags: []string{`\Subscribed`}, Separator: '/', Mailbox: "x"}, imapclient.UntaggedList{Separator: '/', Mailbox: "x/y", OldName: "a/b"}, imapclient.UntaggedList{Flags: []string{`\Subscribed`}, Separator: '/', Mailbox: "x/y/c", OldName: "a/b/c"}) + + tc.client.Create("k/l") + tc.transactf("ok", "rename k/l k/l/m") // With "l" renamed, a new "k" will be created. + tc.transactf("ok", `list "" "k*" return (subscribed)`) + tc.xuntagged(imapclient.UntaggedList{Flags: []string{`\Subscribed`}, Separator: '/', Mailbox: "k"}, imapclient.UntaggedList{Flags: []string{`\Subscribed`}, Separator: '/', Mailbox: "k/l"}, imapclient.UntaggedList{Separator: '/', Mailbox: "k/l/m"}) + + // Similar, but with missing parent not subscribed. + tc.transactf("ok", "rename k/l/m k/ll") + tc.transactf("ok", "delete k/l") + tc.transactf("ok", "rename k/ll k/l") // Restored to previous mailboxes now. + tc.client.Unsubscribe("k") + tc.transactf("ok", "rename k/l k/l/m") // With "l" renamed, a new "k" will be created. + tc.transactf("ok", `list "" "k*" return (subscribed)`) + tc.xuntagged(imapclient.UntaggedList{Separator: '/', Mailbox: "k"}, imapclient.UntaggedList{Flags: []string{"\\Subscribed"}, Separator: '/', Mailbox: "k/l"}, imapclient.UntaggedList{Separator: '/', Mailbox: "k/l/m"}) + + // Renaming inbox keeps inbox in existence and does not rename children. + tc.transactf("ok", "create inbox/a") + tc.transactf("ok", "rename inbox minbox") + tc.transactf("ok", `list "" (inbox inbox/a minbox)`) + tc.xuntagged(imapclient.UntaggedList{Separator: '/', Mailbox: "Inbox"}, imapclient.UntaggedList{Separator: '/', Mailbox: "Inbox/a"}, imapclient.UntaggedList{Separator: '/', Mailbox: "minbox"}) + + // Renaming to new hiearchy that does not have any subscribes. + tc.transactf("ok", "rename minbox w/w") + tc.transactf("ok", `list "" "w*"`) + tc.xuntagged(imapclient.UntaggedList{Separator: '/', Mailbox: "w"}, imapclient.UntaggedList{Separator: '/', Mailbox: "w/w"}) + + // todo: test create+delete+rename of/to a name results in a higher uidvalidity. +} diff --git a/imapserver/search.go b/imapserver/search.go new file mode 100644 index 0000000..2f623cd --- /dev/null +++ b/imapserver/search.go @@ -0,0 +1,463 @@ +package imapserver + +import ( + "fmt" + "io" + "net/textproto" + "strings" + + "github.com/mjl-/bstore" + + "github.com/mjl-/mox/message" + "github.com/mjl-/mox/mlog" + "github.com/mjl-/mox/store" +) + +// Search returns messages matching criteria specified in parameters. +// +// State: Selected +func (c *conn) cmdxSearch(isUID bool, tag, cmd string, p *parser) { + // Command: ../rfc/9051:3716 ../rfc/4731:31 ../rfc/4466:354 ../rfc/3501:2723 + // Examples: ../rfc/9051:3986 ../rfc/4731:153 ../rfc/3501:2975 + // Syntax: ../rfc/9051:6918 ../rfc/4466:611 ../rfc/3501:4954 + + // We will respond with ESEARCH instead of SEARCH if "RETURN" is present or for IMAP4rev2. + var eargs map[string]bool // Options except SAVE. Nil means old-style SEARCH response. + var save bool // For SAVE option. Kept separately for easier handling of MIN/MAX later. + + // IMAP4rev2 always returns ESEARCH, even with absent RETURN. + if c.enabled[capIMAP4rev2] { + eargs = map[string]bool{} + } + // ../rfc/9051:6967 + if p.take(" RETURN (") { + eargs = map[string]bool{} + + for !p.take(")") { + if len(eargs) > 0 || save { + p.xspace() + } + if w, ok := p.takelist("MIN", "MAX", "ALL", "COUNT", "SAVE"); ok { + if w == "SAVE" { + save = true + } else { + eargs[w] = true + } + } else { + // ../rfc/4466:378 ../rfc/9051:3745 + xsyntaxErrorf("ESEARCH result option %q not supported", w) + } + } + } + // ../rfc/4731:149 ../rfc/9051:3737 + if eargs != nil && len(eargs) == 0 && !save { + eargs["ALL"] = true + } + + // If UTF8=ACCEPT is enabled, we should not accept any charset. We are a bit more + // relaxed (reasonable?) and still allow US-ASCII and UTF-8. ../rfc/6855:198 + if p.take(" CHARSET ") { + charset := strings.ToUpper(p.xastring()) + if charset != "US-ASCII" && charset != "UTF-8" { + // ../rfc/3501:2771 ../rfc/9051:3836 + xusercodeErrorf("BADCHARSET", "only US-ASCII and UTF-8 supported") + } + } + p.xspace() + sk := &searchKey{ + searchKeys: []searchKey{*p.xsearchKey()}, + } + for !p.empty() { + p.xspace() + sk.searchKeys = append(sk.searchKeys, *p.xsearchKey()) + } + + // Even in case of error, we ensure search result is changed. + if save { + c.searchResult = []store.UID{} + } + + // Note: we only hold the account rlock for verifying the mailbox at the start. + c.account.RLock() + runlock := c.account.RUnlock + // Note: in a defer because we replace it below. + defer func() { + runlock() + }() + + // If we only have a MIN and/or MAX, we can stop processing as soon as we + // have those matches. + var min, max int + if eargs["MIN"] { + min = 1 + } + if eargs["MAX"] { + max = 1 + } + + var expungeIssued bool + + var uids []store.UID + c.xdbread(func(tx *bstore.Tx) { + c.xmailboxID(tx, c.mailboxID) // Validate. + runlock() + runlock = func() {} + + // Normal forward search when we don't have MAX only. + var lastIndex = -1 + if eargs == nil || max == 0 || len(eargs) != 1 { + for i, uid := range c.uids { + lastIndex = i + if c.searchMatch(tx, msgseq(i+1), uid, *sk, &expungeIssued) { + uids = append(uids, uid) + if min == 1 && min+max == len(eargs) { + break + } + } + } + } + // And reverse search for MAX if we have only MAX or MAX combined with MIN. + if max == 1 && (len(eargs) == 1 || min+max == len(eargs)) { + for i := len(c.uids) - 1; i > lastIndex; i-- { + if c.searchMatch(tx, msgseq(i+1), c.uids[i], *sk, &expungeIssued) { + uids = append(uids, c.uids[i]) + break + } + } + } + }) + + if eargs == nil { + // Old-style SEARCH response. We must spell out each number. So we may be splitting + // into multiple responses. ../rfc/9051:6809 ../rfc/3501:4833 + for len(uids) > 0 { + n := len(uids) + if n > 100 { + n = 100 + } + s := "" + for _, v := range uids[:n] { + if !isUID { + v = store.UID(c.xsequence(v)) + } + s += " " + fmt.Sprintf("%d", v) + } + uids = uids[n:] + c.bwritelinef("* SEARCH%s", s) + } + } else { + // New-style ESEARCH response. ../rfc/9051:6546 ../rfc/4466:522 + + if save { + // ../rfc/9051:3784 ../rfc/5182:13 + c.searchResult = uids + if sanityChecks { + checkUIDs(c.searchResult) + } + } + + // No untagged ESEARCH response if nothing was requested. ../rfc/9051:4160 + if len(eargs) > 0 { + resp := fmt.Sprintf("* ESEARCH (TAG %s)", tag) + if isUID { + resp += " UID" + } + + // NOTE: we are converting UIDs to msgseq in the uids slice (if needed) while + // keeping the "uids" name! + if !isUID { + // If searchResult is hanging on to the slice, we need to work on a copy. + if save { + nuids := make([]store.UID, len(uids)) + copy(nuids, uids) + uids = nuids + } + for i, uid := range uids { + uids[i] = store.UID(c.xsequence(uid)) + } + } + + // If no matches, then no MIN/MAX response. ../rfc/4731:98 ../rfc/9051:3758 + if eargs["MIN"] && len(uids) > 0 { + resp += fmt.Sprintf(" MIN %d", uids[0]) + } + if eargs["MAX"] && len(uids) > 0 { + resp += fmt.Sprintf(" MAX %d", uids[len(uids)-1]) + } + if eargs["COUNT"] { + resp += fmt.Sprintf(" COUNT %d", len(uids)) + } + if eargs["ALL"] && len(uids) > 0 { + resp += fmt.Sprintf(" ALL %s", compactUIDSet(uids).String()) + } + c.bwritelinef("%s", resp) + } + } + if expungeIssued { + // ../rfc/9051:5102 + c.writeresultf("%s OK [EXPUNGEISSUED] done", tag) + } else { + c.ok(tag, cmd) + } +} + +type search struct { + c *conn + tx *bstore.Tx + seq msgseq + uid store.UID + mr *store.MsgReader + m store.Message + p *message.Part + expungeIssued *bool +} + +func (c *conn) searchMatch(tx *bstore.Tx, seq msgseq, uid store.UID, sk searchKey, expungeIssued *bool) bool { + s := search{c: c, tx: tx, seq: seq, uid: uid, expungeIssued: expungeIssued} + defer func() { + if s.mr != nil { + err := s.mr.Close() + c.xsanity(err, "closing messagereader") + s.mr = nil + } + }() + return s.match(sk) +} + +func (s *search) match(sk searchKey) bool { + c := s.c + + if sk.searchKeys != nil { + for _, ssk := range sk.searchKeys { + if !s.match(ssk) { + return false + } + } + return true + } else if sk.seqSet != nil { + return sk.seqSet.containsSeq(s.seq, c.uids, c.searchResult) + } + + filterHeader := func(field, value string) bool { + lower := strings.ToLower(value) + h, err := s.p.Header() + if err != nil { + c.log.Debugx("parsing message header", err, mlog.Field("uid", s.uid)) + return false + } + for _, v := range h.Values(field) { + if strings.Contains(strings.ToLower(v), lower) { + return true + } + } + return false + } + + // We handle ops by groups that need increasing details about the message. + + switch sk.op { + case "ALL": + return true + case "NEW": + // We do not implement the RECENT flag, so messages cannot be NEW. + return false + case "OLD": + // We treat all messages as non-recent, so this means all messages. + return true + case "RECENT": + // We do not implement the RECENT flag. All messages are not recent. + return false + case "NOT": + return !s.match(*sk.searchKey) + case "OR": + return s.match(*sk.searchKey) || s.match(*sk.searchKey2) + case "UID": + return sk.uidSet.containsUID(s.uid, c.uids, c.searchResult) + } + + // Parsed message. + if s.mr == nil { + q := bstore.QueryTx[store.Message](s.tx) + q.FilterNonzero(store.Message{MailboxID: c.mailboxID, UID: s.uid}) + m, err := q.Get() + if err == bstore.ErrAbsent { + // ../rfc/2180:607 + *s.expungeIssued = true + return false + } + xcheckf(err, "get message") + s.m = m + + // Closed by searchMatch after all (recursive) search.match calls are finished. + s.mr = c.account.MessageReader(m) + + if m.ParsedBuf == nil { + c.log.Error("missing parsed message") + } else { + p, err := m.LoadPart(s.mr) + xcheckf(err, "load parsed message") + s.p = &p + } + } + + // Parsed message, basic info. + switch sk.op { + case "ANSWERED": + return s.m.Answered + case "DELETED": + return s.m.Deleted + case "FLAGGED": + return s.m.Flagged + case "KEYWORD": + switch sk.atom { + case "$Forwarded": + return s.m.Forwarded + case "$Junk": + return s.m.Junk + case "$NotJunk": + return s.m.Notjunk + case "$Phishing": + return s.m.Phishing + case "$MDNSent": + return s.m.MDNSent + default: + c.log.Info("search with unknown keyword", mlog.Field("keyword", sk.atom)) + return false + } + case "SEEN": + return s.m.Seen + case "UNANSWERED": + return !s.m.Answered + case "UNDELETED": + return !s.m.Deleted + case "UNFLAGGED": + return !s.m.Flagged + case "UNKEYWORD": + switch sk.atom { + case "$Forwarded": + return !s.m.Forwarded + case "$Junk": + return !s.m.Junk + case "$NotJunk": + return !s.m.Notjunk + case "$Phishing": + return !s.m.Phishing + case "$MDNSent": + return !s.m.MDNSent + default: + c.log.Info("search with unknown keyword", mlog.Field("keyword", sk.atom)) + return false + } + case "UNSEEN": + return !s.m.Seen + case "DRAFT": + return s.m.Draft + case "UNDRAFT": + return !s.m.Draft + case "BEFORE", "ON", "SINCE": + skdt := sk.date.Format("2006-01-02") + rdt := s.m.Received.Format("2006-01-02") + switch sk.op { + case "BEFORE": + return rdt < skdt + case "ON": + return rdt == skdt + case "SINCE": + return rdt >= skdt + } + panic("missing case") + case "LARGER": + return s.m.Size > sk.number + case "SMALLER": + return s.m.Size < sk.number + } + + if s.p == nil { + c.log.Info("missing parsed message, not matching", mlog.Field("uid", s.uid)) + return false + } + + // Parsed message, more info. + switch sk.op { + case "BCC": + return filterHeader("Bcc", sk.astring) + case "BODY", "TEXT": + headerToo := sk.op == "TEXT" + lower := strings.ToLower(sk.astring) + return mailContains(c, s.uid, s.p, lower, headerToo) + case "CC": + return filterHeader("Cc", sk.astring) + case "FROM": + return filterHeader("From", sk.astring) + case "SUBJECT": + return filterHeader("Subject", sk.astring) + case "TO": + return filterHeader("To", sk.astring) + case "HEADER": + // ../rfc/9051:3895 + lower := strings.ToLower(sk.astring) + h, err := s.p.Header() + if err != nil { + c.log.Errorx("parsing header for search", err, mlog.Field("uid", s.uid)) + return false + } + k := textproto.CanonicalMIMEHeaderKey(sk.headerField) + for _, v := range h.Values(k) { + if lower == "" || strings.Contains(strings.ToLower(v), lower) { + return true + } + } + return false + case "SENTBEFORE", "SENTON", "SENTSINCE": + if s.p.Envelope == nil || s.p.Envelope.Date.IsZero() { + return false + } + dt := s.p.Envelope.Date.Format("2006-01-02") + skdt := sk.date.Format("2006-01-02") + switch sk.op { + case "SENTBEFORE": + return dt < skdt + case "SENTON": + return dt == skdt + case "SENTSINCE": + return dt > skdt + } + panic("missing case") + } + panic(serverError{fmt.Errorf("missing case for search key op %q", sk.op)}) +} + +// mailContains returns whether the mail message or part represented by p contains (case-insensitive) string lower. +// The (decoded) text bodies are tested for a match. +// If headerToo is set, the header part of the message is checked as well. +func mailContains(c *conn, uid store.UID, p *message.Part, lower string, headerToo bool) bool { + if headerToo && mailContainsReader(c, uid, p.HeaderReader(), lower) { + return true + } + + if len(p.Parts) == 0 { + if p.MediaType != "TEXT" { + // todo: for types we could try to find a library for parsing and search in there too + return false + } + // todo: for html and perhaps other types, we could try to parse as text and filter on the text. + return mailContainsReader(c, uid, p.Reader(), lower) + } + for _, pp := range p.Parts { + headerToo = pp.MediaType == "MESSAGE" && (pp.MediaSubType == "RFC822" || pp.MediaSubType == "GLOBAL") + if mailContains(c, uid, &pp, lower, headerToo) { + return true + } + } + return false +} + +func mailContainsReader(c *conn, uid store.UID, r io.Reader, lower string) bool { + // todo: match as we read + buf, err := io.ReadAll(r) + if err != nil { + c.log.Errorx("reading for search text match", err, mlog.Field("uid", uid)) + return false + } + return strings.Contains(strings.ToLower(string(buf)), lower) +} diff --git a/imapserver/search_test.go b/imapserver/search_test.go new file mode 100644 index 0000000..2495cb0 --- /dev/null +++ b/imapserver/search_test.go @@ -0,0 +1,345 @@ +package imapserver + +import ( + "strconv" + "strings" + "testing" + "time" + + "github.com/mjl-/mox/imapclient" +) + +var searchMsg = strings.ReplaceAll(`Date: Mon, 1 Jan 2022 10:00:00 +0100 (CEST) +From: mjl +Subject: mox +To: mox +Cc: +Bcc: +Reply-To: +Message-Id: <123@mox.example> +MIME-Version: 1.0 +Content-Type: multipart/alternative; boundary=x + +--x +Content-Type: text/plain; charset=utf-8 + +this is plain text. + +--x +Content-Type: text/html; charset=utf-8 + +this is html. + +--x-- +`, "\n", "\r\n") + +func (tc *testconn) xsearch(nums ...uint32) { + tc.t.Helper() + + if len(nums) == 0 { + tc.xnountagged() + return + } + tc.xuntagged(imapclient.UntaggedSearch(nums)) +} + +func (tc *testconn) xesearch(exp imapclient.UntaggedEsearch) { + tc.t.Helper() + + exp.Correlator = tc.client.LastTag + tc.xuntagged(exp) +} + +func TestSearch(t *testing.T) { + tc := start(t) + defer tc.close() + tc.client.Login("mjl@mox.example", "testtest") + tc.client.Select("inbox") + + // Add 5 and delete first 4 messages. So UIDs start at 5. + received := time.Date(2020, time.January, 1, 10, 0, 0, 0, time.UTC) + for i := 0; i < 5; i++ { + tc.client.Append("inbox", nil, &received, []byte(exampleMsg)) + } + tc.client.StoreFlagsSet("1:4", true, `\Deleted`) + tc.client.Expunge() + + received = time.Date(2022, time.January, 1, 9, 0, 0, 0, time.UTC) + tc.client.Append("inbox", nil, &received, []byte(searchMsg)) + + received = time.Date(2022, time.January, 1, 9, 0, 0, 0, time.UTC) + mostFlags := []string{ + `\Deleted`, + `\Seen`, + `\Answered`, + `\Flagged`, + `\Draft`, + `$Forwarded`, + `$Junk`, + `$Notjunk`, + `$Phishing`, + `$MDNSent`, + } + tc.client.Append("inbox", mostFlags, &received, []byte(searchMsg)) + + // We now have sequence numbers 1,2,3 and UIDs 5,6,7. + + tc.transactf("ok", "search all") + tc.xsearch(1, 2, 3) + + tc.transactf("ok", "uid search all") + tc.xsearch(5, 6, 7) + + tc.transactf("ok", "search answered") + tc.xsearch(3) + + tc.transactf("ok", `search bcc "bcc@mox.example"`) + tc.xsearch(2, 3) + + tc.transactf("ok", "search before 1-Jan-2038") + tc.xsearch(1, 2, 3) + tc.transactf("ok", "search before 1-Jan-2020") + tc.xsearch() // Before is about received, not date header of message. + + tc.transactf("ok", `search body "Joe"`) + tc.xsearch(1) + tc.transactf("ok", `search body "this is plain text"`) + tc.xsearch(2, 3) + tc.transactf("ok", `search body "this is html"`) + tc.xsearch(2, 3) + + tc.transactf("ok", `search cc "xcc@mox.example"`) + tc.xsearch(2, 3) + + tc.transactf("ok", `search deleted`) + tc.xsearch(3) + + tc.transactf("ok", `search flagged`) + tc.xsearch(3) + + tc.transactf("ok", `search from "foobar@Blurdybloop.example"`) + tc.xsearch(1) + + tc.transactf("ok", `search keyword $Forwarded`) + tc.xsearch(3) + + tc.transactf("ok", `search new`) + tc.xsearch() // New requires a message to be recent. We pretend all messages are not recent. + + tc.transactf("ok", `search old`) + tc.xsearch(1, 2, 3) + + tc.transactf("ok", `search on 1-Jan-2022`) + tc.xsearch(2, 3) + + tc.transactf("ok", `search recent`) + tc.xsearch() + + tc.transactf("ok", `search seen`) + tc.xsearch(3) + + tc.transactf("ok", `search since 1-Jan-2020`) + tc.xsearch(1, 2, 3) + + tc.transactf("ok", `search subject "afternoon"`) + tc.xsearch(1) + + tc.transactf("ok", `search text "Joe"`) + tc.xsearch(1) + + tc.transactf("ok", `search to "mooch@owatagu.siam.edu.example"`) + tc.xsearch(1) + + tc.transactf("ok", `search unanswered`) + tc.xsearch(1, 2) + + tc.transactf("ok", `search undeleted`) + tc.xsearch(1, 2) + + tc.transactf("ok", `search unflagged`) + tc.xsearch(1, 2) + + tc.transactf("ok", `search unkeyword $Junk`) + tc.xsearch(1, 2) + + tc.transactf("ok", `search unseen`) + tc.xsearch(1, 2) + + tc.transactf("ok", `search draft`) + tc.xsearch(3) + + tc.transactf("ok", `search header "subject" "afternoon"`) + tc.xsearch(1) + + tc.transactf("ok", `search larger 1`) + tc.xsearch(1, 2, 3) + + tc.transactf("ok", `search not text "mox"`) + tc.xsearch(1) + + tc.transactf("ok", `search or seen unseen`) + tc.xsearch(1, 2, 3) + + tc.transactf("ok", `search or unseen seen`) + tc.xsearch(1, 2, 3) + + tc.transactf("ok", `search sentbefore 8-Feb-1994`) + tc.xsearch(1) + + tc.transactf("ok", `search senton 7-Feb-1994`) + tc.xsearch(1) + + tc.transactf("ok", `search sentsince 6-Feb-1994`) + tc.xsearch(1, 2, 3) + + tc.transactf("ok", `search smaller 9999999`) + tc.xsearch(1, 2, 3) + + tc.transactf("ok", `search uid 1`) + tc.xsearch() + + tc.transactf("ok", `search uid 5`) + tc.xsearch(1) + + tc.transactf("ok", `search undraft`) + tc.xsearch(1, 2) + + tc.transactf("no", `search charset unknown text "mox"`) + tc.transactf("ok", `search charset us-ascii text "mox"`) + tc.xsearch(2, 3) + tc.transactf("ok", `search charset utf-8 text "mox"`) + tc.xsearch(2, 3) + + // esearchall makes an UntaggedEsearch response with All set, for comparisons. + esearchall0 := func(ss string) imapclient.NumSet { + seqset := imapclient.NumSet{} + for _, rs := range strings.Split(ss, ",") { + t := strings.Split(rs, ":") + if len(t) > 2 { + panic("bad seqset") + } + var first uint32 + var last *uint32 + if t[0] != "*" { + v, err := strconv.ParseUint(t[0], 10, 32) + if err != nil { + panic("parse first") + } + first = uint32(v) + } + if len(t) == 2 { + if t[1] != "*" { + v, err := strconv.ParseUint(t[1], 10, 32) + if err != nil { + panic("parse last") + } + u := uint32(v) + last = &u + } + } + seqset.Ranges = append(seqset.Ranges, imapclient.NumRange{First: first, Last: last}) + } + return seqset + } + + esearchall := func(ss string) imapclient.UntaggedEsearch { + return imapclient.UntaggedEsearch{All: esearchall0(ss)} + } + + uintptr := func(v uint32) *uint32 { + return &v + } + + // Do new-style ESEARCH requests with RETURN. We should get an ESEARCH response. + tc.transactf("ok", "search return () all") + tc.xesearch(esearchall("1:3")) // Without any options, "ALL" is implicit. + + tc.transactf("ok", "search return (min max count all) all") + tc.xesearch(imapclient.UntaggedEsearch{Min: 1, Max: 3, Count: uintptr(3), All: esearchall0("1:3")}) + + tc.transactf("ok", "UID search return (min max count all) all") + tc.xesearch(imapclient.UntaggedEsearch{UID: true, Min: 5, Max: 7, Count: uintptr(3), All: esearchall0("5:7")}) + + tc.transactf("ok", "search return (min) all") + tc.xesearch(imapclient.UntaggedEsearch{Min: 1}) + + tc.transactf("ok", "search return (min) 3") + tc.xesearch(imapclient.UntaggedEsearch{Min: 3}) + + tc.transactf("ok", "search return (min) NOT all") + tc.xesearch(imapclient.UntaggedEsearch{}) // Min not present if no match. + + tc.transactf("ok", "search return (max) all") + tc.xesearch(imapclient.UntaggedEsearch{Max: 3}) + + tc.transactf("ok", "search return (max) 1") + tc.xesearch(imapclient.UntaggedEsearch{Max: 1}) + + tc.transactf("ok", "search return (max) not all") + tc.xesearch(imapclient.UntaggedEsearch{}) // Max not present if no match. + + tc.transactf("ok", "search return (min max) all") + tc.xesearch(imapclient.UntaggedEsearch{Min: 1, Max: 3}) + + tc.transactf("ok", "search return (min max) 1") + tc.xesearch(imapclient.UntaggedEsearch{Min: 1, Max: 1}) + + tc.transactf("ok", "search return (min max) not all") + tc.xesearch(imapclient.UntaggedEsearch{}) + + tc.transactf("ok", "search return (all) not all") + tc.xesearch(imapclient.UntaggedEsearch{}) // All not present if no match. + + tc.transactf("ok", "search return (min max all) not all") + tc.xesearch(imapclient.UntaggedEsearch{}) + + tc.transactf("ok", "search return (min max all count) not all") + tc.xesearch(imapclient.UntaggedEsearch{Count: uintptr(0)}) + + tc.transactf("ok", "search return (min max count all) 1,3") + tc.xesearch(imapclient.UntaggedEsearch{Min: 1, Max: 3, Count: uintptr(2), All: esearchall0("1,3")}) + + tc.transactf("ok", "search return (min max count all) UID 5,7") + tc.xesearch(imapclient.UntaggedEsearch{Min: 1, Max: 3, Count: uintptr(2), All: esearchall0("1,3")}) + + tc.transactf("ok", "uid search return (min max count all) 1,3") + tc.xesearch(imapclient.UntaggedEsearch{UID: true, Min: 5, Max: 7, Count: uintptr(2), All: esearchall0("5,7")}) + + tc.transactf("ok", "uid search return (min max count all) UID 5,7") + tc.xesearch(imapclient.UntaggedEsearch{UID: true, Min: 5, Max: 7, Count: uintptr(2), All: esearchall0("5,7")}) + + tc.transactf("no", `search return () charset unknown text "mox"`) + tc.transactf("ok", `search return () charset us-ascii text "mox"`) + tc.xesearch(esearchall("2:3")) + tc.transactf("ok", `search return () charset utf-8 text "mox"`) + tc.xesearch(esearchall("2:3")) + + tc.transactf("bad", `search return (unknown) all`) + + tc.transactf("ok", "search return (save) 2") + tc.xnountagged() // ../rfc/9051:3800 + tc.transactf("ok", "fetch $ (uid)") + tc.xuntagged(imapclient.UntaggedFetch{Seq: 2, Attrs: []imapclient.FetchAttr{imapclient.FetchUID(6)}}) + + tc.transactf("ok", "search return (all) $") + tc.xesearch(esearchall("2")) + + tc.transactf("ok", "search return (save) $") + tc.xnountagged() + + tc.transactf("ok", "search return (save all) all") + tc.xesearch(esearchall("1:3")) + + tc.transactf("ok", "search return (all save) all") + tc.xesearch(esearchall("1:3")) + + tc.transactf("ok", "search return (min save) all") + tc.xesearch(imapclient.UntaggedEsearch{Min: 1}) + tc.transactf("ok", "fetch $ (uid)") + tc.xuntagged(imapclient.UntaggedFetch{Seq: 1, Attrs: []imapclient.FetchAttr{imapclient.FetchUID(5)}}) + + // Do a seemingly old-style search command with IMAP4rev2 enabled. We'll still get ESEARCH responses. + tc.client.Enable("IMAP4rev2") + tc.transactf("ok", `search undraft`) + tc.xesearch(esearchall("1:2")) +} diff --git a/imapserver/selectexamine_test.go b/imapserver/selectexamine_test.go new file mode 100644 index 0000000..dea4b1d --- /dev/null +++ b/imapserver/selectexamine_test.go @@ -0,0 +1,71 @@ +package imapserver + +import ( + "strings" + "testing" + + "github.com/mjl-/mox/imapclient" +) + +func TestSelect(t *testing.T) { + testSelectExamine(t, false) +} + +func TestExamine(t *testing.T) { + testSelectExamine(t, true) +} + +// select and examine are pretty much the same. but examine opens readonly instead of readwrite. +func testSelectExamine(t *testing.T, examine bool) { + defer mockUIDValidity()() + tc := start(t) + defer tc.close() + + tc.client.Login("mjl@mox.example", "testtest") + + cmd := "select" + okcode := "READ-WRITE" + if examine { + cmd = "examine" + okcode = "READ-ONLY" + } + + uclosed := imapclient.UntaggedResult{Status: imapclient.OK, RespText: imapclient.RespText{Code: "CLOSED", More: "x"}} + flags := strings.Split(`\Seen \Answered \Flagged \Deleted \Draft $Forwarded $Junk $NotJunk $Phishing $MDNSent`, " ") + uflags := imapclient.UntaggedFlags(flags) + upermflags := imapclient.UntaggedResult{Status: imapclient.OK, RespText: imapclient.RespText{Code: "PERMANENTFLAGS", CodeArg: imapclient.CodeList{Code: "PERMANENTFLAGS", Args: flags}, More: "x"}} + urecent := imapclient.UntaggedRecent(0) + uexists0 := imapclient.UntaggedExists(0) + uexists1 := imapclient.UntaggedExists(1) + uuidval1 := imapclient.UntaggedResult{Status: imapclient.OK, RespText: imapclient.RespText{Code: "UIDVALIDITY", CodeArg: imapclient.CodeUint{Code: "UIDVALIDITY", Num: 1}, More: "x"}} + uuidnext1 := imapclient.UntaggedResult{Status: imapclient.OK, RespText: imapclient.RespText{Code: "UIDNEXT", CodeArg: imapclient.CodeUint{Code: "UIDNEXT", Num: 1}, More: "x"}} + ulist := imapclient.UntaggedList{Separator: '/', Mailbox: "Inbox"} + uunseen := imapclient.UntaggedResult{Status: imapclient.OK, RespText: imapclient.RespText{Code: "UNSEEN", CodeArg: imapclient.CodeUint{Code: "UNSEEN", Num: 1}, More: "x"}} + uuidnext2 := imapclient.UntaggedResult{Status: imapclient.OK, RespText: imapclient.RespText{Code: "UIDNEXT", CodeArg: imapclient.CodeUint{Code: "UIDNEXT", Num: 2}, More: "x"}} + + // Parameter required. + tc.transactf("bad", cmd) + + // Mailbox does not exist. + tc.transactf("no", cmd+" bogus") + + tc.transactf("ok", cmd+" inbox") + tc.xuntagged(uflags, upermflags, urecent, uexists0, uuidval1, uuidnext1, ulist) + tc.xcode(okcode) + + tc.transactf("ok", cmd+` "inbox"`) + tc.xuntagged(uclosed, uflags, upermflags, urecent, uexists0, uuidval1, uuidnext1, ulist) + tc.xcode(okcode) + + // Append a message. It will be reported as UNSEEN. + tc.client.Append("inbox", nil, nil, []byte(exampleMsg)) + tc.transactf("ok", cmd+" inbox") + tc.xuntagged(uclosed, uflags, upermflags, urecent, uunseen, uexists1, uuidval1, uuidnext2, ulist) + tc.xcode(okcode) + + // With imap4rev2, we no longer get untagged RECENT or untagged UNSEEN. + tc.client.Enable("imap4rev2") + tc.transactf("ok", cmd+" inbox") + tc.xuntagged(uclosed, uflags, upermflags, uexists1, uuidval1, uuidnext2, ulist) + tc.xcode(okcode) +} diff --git a/imapserver/server.go b/imapserver/server.go new file mode 100644 index 0000000..8362ea5 --- /dev/null +++ b/imapserver/server.go @@ -0,0 +1,3012 @@ +// Package imapserver implements an IMAPv4 server, rev2 (RFC 9051) and rev1 with extensions (RFC 3501 and more). +package imapserver + +/* +Implementation notes + +IMAP4rev2 includes functionality that was in extensions for IMAP4rev1. The +extensions sometimes include features not in IMAP4rev2. We want IMAP4rev1-only +implementations to use extensions, so we implement the full feature set of the +extension and announce it as capability. The extensions: LITERAL+, IDLE, +NAMESPACE, BINARY, UNSELECT, UIDPLUS, ESEARCH, SEARCHRES, SASL-IR, ENABLE, +LIST-EXTENDED, SPECIAL-USE, MOVE, UTF8=ONLY. + +We take a liberty with UTF8=ONLY. We are supposed to wait for ENABLE of +UTF8=ACCEPT or IMAP4rev2 before we respond with quoted strings that contain +non-ASCII UTF-8. But we will unconditionally accept UTF-8 at the moment. See +../rfc/6855:251 + +We always respond with utf8 mailbox names. We do parse utf7 (only in IMAP4rev1, +not in IMAP4rev2). ../rfc/3501:964 + +- We never execute multiple commands at the same time for a connection. We expect a client to open multiple connections instead. ../rfc/9051:1110 +- Do not write output on a connection with an account lock held. Writing can block, a slow client could block account operations. +- When handling commands that modify the selected mailbox, always check that the mailbox is not opened readonly. And always revalidate the selected mailbox, another session may have deleted the mailbox. +- After making changes to an account/mailbox/message, you must broadcast changes. You must do this with the account lock held. Otherwise, other later changes (e.g. message deliveries) may be made and broadcast before changes that were made earlier. Make sure to commit changes in the database first, because the commit may fail. +- Mailbox hierarchies are slash separated, no leading slash. We keep the case, except INBOX is renamed to Inbox, also for submailboxes in INBOX. We don't allow existence of a child where its parent does not exist. We have no \NoInferiors or \NoSelect. Newly created mailboxes are automatically subscribed. +*/ + +/* +- todo: do not return binary data for a fetch body. at least not for imap4rev1. we should be encoding it as base64? +- todo: on expunge we currently remove the message even if other sessions still have a reference to the uid. if they try to query the uid, they'll get an error. we could be nicer and only actually remove the message when the last reference has gone. we could add a new flag to store.Message marking the message as expunged, not give new session access to such messages, and make store remove them at startup, and clean them when the last session referencing the session goes. however, it will get much more complicated. renaming messages would need special handling. and should we do the same for removed mailboxes? +- todo: CONDSTORE, QRESYNC. Add fields modseq on mailbox and each message. Keep (log of) deleted messages and their modseqs. +- todo: try to recover from syntax errors when the last command line ends with a }, i.e. a literal. we currently abort the entire connection. we may want to read some amount of literal data and continue with a next command. +- future: more extensions: STATUS=SIZE, OBJECTID, MULTISEARCH, REPLACE, NOTIFY, CATENATE, MULTIAPPEND, SORT, THREAD, CREATE-SPECIAL-USE. +- future: implement user-defined keyword flags? ../rfc/9051:566 +*/ + +import ( + "bufio" + "bytes" + "context" + "crypto/tls" + "encoding/base64" + "errors" + "fmt" + "io" + "net" + "os" + "path/filepath" + "regexp" + "runtime/debug" + "sort" + "strings" + "time" + + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" + "golang.org/x/text/unicode/norm" + + "github.com/mjl-/bstore" + + "github.com/mjl-/mox/config" + "github.com/mjl-/mox/message" + "github.com/mjl-/mox/metrics" + "github.com/mjl-/mox/mlog" + "github.com/mjl-/mox/mox-" + "github.com/mjl-/mox/moxio" + "github.com/mjl-/mox/moxvar" + "github.com/mjl-/mox/scram" + "github.com/mjl-/mox/store" +) + +// Most logging should be done through conn.log* functions. +// Only use imaplog in contexts without connection. +var xlog = mlog.New("imapserver") + +var ( + metricIMAPConnection = promauto.NewCounterVec( + prometheus.CounterOpts{ + Name: "mox_imap_connection_total", + Help: "Incoming IMAP connections.", + }, + []string{ + "service", // imap, imaps + }, + ) + metricIMAPCommands = promauto.NewHistogramVec( + prometheus.HistogramOpts{ + Name: "mox_imap_command_duration_seconds", + Help: "IMAP command duration and result codes in seconds.", + Buckets: []float64{0.001, 0.005, 0.01, 0.05, 0.100, 0.5, 1, 5, 10, 20}, + }, + []string{ + "cmd", + "result", // ok, panic, ioerror, badsyntax, servererror, usererror, error + }, + ) +) + +// Capabilities (extensions) the server supports. Connections will add a few more, e.g. STARTTLS, LOGINDISABLED, AUTH=PLAIN. +// ENABLE: ../rfc/5161 +// LITERAL+: ../rfc/7888 +// IDLE: ../rfc/2177 +// SASL-IR: ../rfc/4959 +// BINARY: ../rfc/3516 +// UNSELECT: ../rfc/3691 +// UIDPLUS: ../rfc/4315 +// ESEARCH: ../rfc/4731 +// SEARCHRES: ../rfc/5182 +// MOVE: ../rfc/6851 +// UTF8=ONLY: ../rfc/6855 +// LIST-EXTENDED: ../rfc/5258 +// SPECIAL-USE: ../rfc/6154 +// LIST-STATUS: ../rfc/5819 +// ID: ../rfc/2971 +// AUTH=SCRAM-SHA-256: ../rfc/7677 ../rfc/5802 +// APPENDLIMIT, we support the max possible size, 1<<63 - 1: ../rfc/7889:129 +const serverCapabilities = "IMAP4rev2 IMAP4rev1 ENABLE LITERAL+ IDLE SASL-IR BINARY UNSELECT UIDPLUS ESEARCH SEARCHRES MOVE UTF8=ONLY LIST-EXTENDED SPECIAL-USE LIST-STATUS AUTH=SCRAM-SHA-256 ID APPENDLIMIT=9223372036854775807" + +type conn struct { + cid int64 + state state + conn net.Conn + tls bool // Whether TLS has been initialized. + br *bufio.Reader // From remote, with TLS unwrapped in case of TLS. + line chan lineErr // If set, instead of reading from br, a line is read from this channel. For reading a line in IDLE while also waiting for mailbox/account updates. + lastLine string // For detecting if syntax error is fatal, i.e. if this ends with a literal. Without crlf. + bw *bufio.Writer // To remote, with TLS added in case of TLS. + lastlog time.Time // For printing time since previous log line. + tlsConfig *tls.Config // TLS config to use for handshake. + noRequireSTARTTLS bool + cmd string // Currently executing, for deciding to applyChanges and logging. + cmdMetric string // Currently executing, for metrics. + cmdStart time.Time + log *mlog.Log + enabled map[capability]bool // All upper-case. + + // Set by SEARCH with SAVE. Can be used by commands accepting a sequence-set with + // value "$". When used, UIDs must be verified to still exist, because they may + // have been expunged. Cleared by a SELECT or EXAMINE. + // Nil means no searchResult is present. An empty list is a valid searchResult, + // just not matching any messages. + // ../rfc/5182:13 ../rfc/9051:4040 + searchResult []store.UID + + // Only when authenticated. + username string // Full username as used during login. + account *store.Account + comm *store.Comm // For sending/receiving changes on mailboxes in account, e.g. from messages incoming on smtp, or another imap client. + + mailboxID int64 // Only for StateSelected. + readonly bool // If opened mailbox is readonly. + uids []store.UID // UIDs known in this session, sorted. todo future: store more space-efficiently, as ranges. +} + +// capability for use with ENABLED and CAPABILITY. We always keep this upper case, +// e.g. IMAP4REV2. These values are treated case-insensitive, but it's easier for +// comparison to just always have the same case. +type capability string + +const ( + capIMAP4rev2 capability = "IMAP4REV2" + capUTF8Accept capability = "UTF8=ACCEPT" +) + +type lineErr struct { + line string + err error +} + +type state byte + +const ( + stateNotAuthenticated state = iota + stateAuthenticated + stateSelected +) + +func stateCommands(cmds ...string) map[string]struct{} { + r := map[string]struct{}{} + for _, cmd := range cmds { + r[cmd] = struct{}{} + } + return r +} + +var ( + commandsStateAny = stateCommands("capability", "noop", "logout", "id") + commandsStateNotAuthenticated = stateCommands("starttls", "authenticate", "login") + commandsStateAuthenticated = stateCommands("enable", "select", "examine", "create", "delete", "rename", "subscribe", "unsubscribe", "list", "namespace", "status", "append", "idle", "lsub") + commandsStateSelected = stateCommands("close", "unselect", "expunge", "search", "fetch", "store", "copy", "move", "uid expunge", "uid search", "uid fetch", "uid store", "uid copy", "uid move") +) + +var commands = map[string]func(c *conn, tag, cmd string, p *parser){ + // Any state. + "capability": (*conn).cmdCapability, + "noop": (*conn).cmdNoop, + "logout": (*conn).cmdLogout, + "id": (*conn).cmdID, + + // Notauthenticated. + "starttls": (*conn).cmdStarttls, + "authenticate": (*conn).cmdAuthenticate, + "login": (*conn).cmdLogin, + + // Authenticated and selected. + "enable": (*conn).cmdEnable, + "select": (*conn).cmdSelect, + "examine": (*conn).cmdExamine, + "create": (*conn).cmdCreate, + "delete": (*conn).cmdDelete, + "rename": (*conn).cmdRename, + "subscribe": (*conn).cmdSubscribe, + "unsubscribe": (*conn).cmdUnsubscribe, + "list": (*conn).cmdList, + "lsub": (*conn).cmdLsub, + "namespace": (*conn).cmdNamespace, + "status": (*conn).cmdStatus, + "append": (*conn).cmdAppend, + "idle": (*conn).cmdIdle, + + // Selected. + "check": (*conn).cmdCheck, + "close": (*conn).cmdClose, + "unselect": (*conn).cmdUnselect, + "expunge": (*conn).cmdExpunge, + "uid expunge": (*conn).cmdUIDExpunge, + "search": (*conn).cmdSearch, + "uid search": (*conn).cmdUIDSearch, + "fetch": (*conn).cmdFetch, + "uid fetch": (*conn).cmdUIDFetch, + "store": (*conn).cmdStore, + "uid store": (*conn).cmdUIDStore, + "copy": (*conn).cmdCopy, + "uid copy": (*conn).cmdUIDCopy, + "move": (*conn).cmdMove, + "uid move": (*conn).cmdUIDMove, +} + +var errIO = errors.New("fatal io error") // For read/write errors and errors that should close the connection. +var errProtocol = errors.New("fatal protocol error") // For protocol errors for which a stack trace should be printed. + +var sanityChecks bool + +// check err for sanity. +// if not nil and checkSanity true (set during tests), then panic. if not nil during normal operation, just log. +func (c *conn) xsanity(err error, format string, args ...any) { + if err == nil { + return + } + if sanityChecks { + panic(fmt.Errorf("%s: %s", fmt.Sprintf(format, args...), err)) + } + c.log.Errorx(fmt.Sprintf(format, args...), err) +} + +type msgseq uint32 + +// ListenAndServe starts all imap listeners for the configuration, in new goroutines. +func ListenAndServe() { + for name, listener := range mox.Conf.Static.Listeners { + var tlsConfig *tls.Config + if listener.TLS != nil { + tlsConfig = listener.TLS.Config + } + + if listener.IMAP.Enabled { + port := config.Port(listener.IMAP.Port, 143) + for _, ip := range listener.IPs { + go listenServe("imap", name, ip, port, tlsConfig, false, listener.IMAP.NoRequireSTARTTLS) + } + } + + if listener.IMAPS.Enabled { + port := config.Port(listener.IMAPS.Port, 993) + for _, ip := range listener.IPs { + go listenServe("imaps", name, ip, port, tlsConfig, true, false) + } + } + } +} + +func listenServe(protocol, listenerName, ip string, port int, tlsConfig *tls.Config, xtls, noRequireSTARTTLS bool) { + addr := net.JoinHostPort(ip, fmt.Sprintf("%d", port)) + xlog.Print("listening for imap", mlog.Field("listener", listenerName), mlog.Field("addr", addr), mlog.Field("protocol", protocol)) + network := mox.Network(ip) + var ln net.Listener + var err error + if xtls { + ln, err = tls.Listen(network, addr, tlsConfig) + } else { + ln, err = net.Listen(network, addr) + } + if err != nil { + xlog.Fatalx("imap: listen for imap"+mox.LinuxSetcapHint(err), err, mlog.Field("protocol", protocol), mlog.Field("listener", listenerName)) + } + + for { + conn, err := ln.Accept() + if err != nil { + xlog.Infox("imap: accept", err, mlog.Field("protocol", protocol), mlog.Field("listener", listenerName)) + continue + } + + metricIMAPConnection.WithLabelValues(protocol).Inc() + go serve(listenerName, mox.Cid(), tlsConfig, conn, xtls, noRequireSTARTTLS) + } +} + +// returns whether this connection accepts utf-8 in strings. +func (c *conn) utf8strings() bool { + return c.enabled[capIMAP4rev2] || c.enabled[capUTF8Accept] +} + +func (c *conn) xdbwrite(fn func(tx *bstore.Tx)) { + err := c.account.DB.Write(func(tx *bstore.Tx) error { + fn(tx) + return nil + }) + xcheckf(err, "transaction") +} + +func (c *conn) xdbread(fn func(tx *bstore.Tx)) { + err := c.account.DB.Read(func(tx *bstore.Tx) error { + fn(tx) + return nil + }) + xcheckf(err, "transaction") +} + +// Closes the currently selected/active mailbox, setting state from selected to authenticated. +// Does not remove messages marked for deletion. +func (c *conn) unselect() { + if c.state == stateSelected { + c.state = stateAuthenticated + } + c.mailboxID = 0 + c.uids = nil +} + +// Write makes a connection an io.Writer. It panics for i/o errors. These errors +// are handled in the connection command loop. +func (c *conn) Write(buf []byte) (int, error) { + if err := c.conn.SetWriteDeadline(time.Now().Add(30 * time.Second)); err != nil { + c.log.Errorx("setting write deadline", err) + } + + n, err := c.conn.Write(buf) + if err != nil { + panic(fmt.Errorf("write: %s (%w)", err, errIO)) + } + return n, err +} + +// Cache of line buffers for reading commands. +var bufpool = moxio.NewBufpool(8, 16*1024) + +// read line from connection, not going through line channel. +func (c *conn) readline0() (string, error) { + d := 30 * time.Minute + if c.state == stateNotAuthenticated { + d = 30 * time.Second + } + c.conn.SetReadDeadline(time.Now().Add(d)) + + line, err := bufpool.Readline(c.br) + if err != nil && errors.Is(err, moxio.ErrLineTooLong) { + return "", fmt.Errorf("%s (%w)", err, errProtocol) + } else if err != nil { + return "", fmt.Errorf("%s (%w)", err, errIO) + } + return line, nil +} + +func (c *conn) lineChan() chan lineErr { + if c.line == nil { + c.line = make(chan lineErr, 1) + go func() { + line, err := c.readline0() + c.line <- lineErr{line, err} + }() + } + return c.line +} + +// readline from either the c.line channel, or otherwise read from connection. +func (c *conn) readline(readCmd bool) string { + var line string + var err error + if c.line != nil { + le := <-c.line + c.line = nil + line, err = le.line, le.err + } else { + line, err = c.readline0() + } + if err != nil { + if readCmd && errors.Is(err, os.ErrDeadlineExceeded) { + c.conn.SetWriteDeadline(time.Now().Add(10 * time.Second)) + c.writelinef("* BYE inactive") + } + if !errors.Is(err, errIO) && !errors.Is(err, errProtocol) { + err = fmt.Errorf("%s (%w)", err, errIO) + } + panic(err) + } + c.lastLine = line + + // We typically respond immediately (IDLE is an exception). + // The client may not be reading, or may have disappeared. + // Don't wait more than 5 minutes before closing down the connection. + // The write deadline is managed in IDLE as well. + // For unauthenticated connections, we require the client to read faster. + wd := 5 * time.Minute + if c.state == stateNotAuthenticated { + wd = 30 * time.Second + } + c.conn.SetWriteDeadline(time.Now().Add(wd)) + + return line +} + +// write tagged command response, but first write pending changes. +func (c *conn) writeresultf(format string, args ...any) { + c.bwriteresultf(format, args...) + c.xflush() +} + +// write buffered taggedcommand response, but first write pending changes. +func (c *conn) bwriteresultf(format string, args ...any) { + switch c.cmd { + case "fetch", "store", "search": + // ../rfc/9051:5862 + default: + if c.comm != nil { + c.applyChanges(c.comm.Get(), false) + } + } + c.bwritelinef(format, args...) +} + +func (c *conn) writelinef(format string, args ...any) { + c.bwritelinef(format, args...) + c.xflush() +} + +// Buffer line for write. +func (c *conn) bwritelinef(format string, args ...any) { + format += "\r\n" + fmt.Fprintf(c.bw, format, args...) +} + +func (c *conn) xflush() { + err := c.bw.Flush() + xcheckf(err, "flush") // Should never happen, the Write caused by the Flush should panic on i/o error. +} + +func (c *conn) readCommand(tag *string) (cmd string, p *parser) { + line := c.readline(true) + p = newParser(line, c) + p.context("tag") + *tag = p.xtag() + p.context("command") + p.xspace() + cmd = p.xcommand() + return cmd, newParser(p.remainder(), c) +} + +func (c *conn) xreadliteral(size int64, sync bool) string { + if sync { + c.writelinef("+") + } + buf := make([]byte, size) + if size > 0 { + if err := c.conn.SetReadDeadline(time.Now().Add(30 * time.Second)); err != nil { + c.log.Errorx("setting read deadline", err) + } + + _, err := io.ReadFull(c.br, buf) + if err != nil { + // Cannot use xcheckf due to %w handling of errIO. + panic(fmt.Errorf("reading literal: %s (%w)", err, errIO)) + } + } + return string(buf) +} + +var cleanClose struct{} // Sentinel value for panic/recover indicating clean close of connection. + +func serve(listenerName string, cid int64, tlsConfig *tls.Config, nc net.Conn, xtls, noRequireSTARTTLS bool) { + c := &conn{ + cid: cid, + conn: nc, + tls: xtls, + lastlog: time.Now(), + tlsConfig: tlsConfig, + noRequireSTARTTLS: noRequireSTARTTLS, + enabled: map[capability]bool{}, + cmd: "(greeting)", + cmdStart: time.Now(), + } + c.log = xlog.MoreFields(func() []mlog.Pair { + now := time.Now() + l := []mlog.Pair{ + mlog.Field("cid", c.cid), + mlog.Field("delta", now.Sub(c.lastlog)), + } + c.lastlog = now + if c.username != "" { + l = append(l, mlog.Field("username", c.username)) + } + return l + }) + c.br = bufio.NewReader(moxio.NewTraceReader(c.log, "C: ", c.conn)) + c.bw = bufio.NewWriter(moxio.NewTraceWriter(c.log, "S: ", c)) + + // Many IMAP connections use IDLE to wait for new incoming messages. We'll enable + // keepalive to get a higher chance of the connection staying alive, or otherwise + // detecting broken connections early. + xconn := c.conn + if xtls { + xconn = c.conn.(*tls.Conn).NetConn() + } + if tcpconn, ok := xconn.(*net.TCPConn); ok { + if err := tcpconn.SetKeepAlivePeriod(5 * time.Minute); err != nil { + c.log.Errorx("setting keepalive period", err) + } else if err := tcpconn.SetKeepAlive(true); err != nil { + c.log.Errorx("enabling keepalive", err) + } + } + + c.log.Info("new connection", mlog.Field("remote", c.conn.RemoteAddr()), mlog.Field("local", c.conn.LocalAddr()), mlog.Field("tls", xtls), mlog.Field("listener", listenerName)) + + defer func() { + c.conn.Close() + + if c.account != nil { + c.comm.Unregister() + err := c.account.Close() + c.xsanity(err, "close account") + c.account = nil + c.comm = nil + } + + x := recover() + if x == nil || x == cleanClose { + c.log.Info("connection closed") + } else if err, ok := x.(error); ok || isClosed(err) { + c.log.Infox("connection closed", err) + } else { + c.log.Error("unhandled error", mlog.Field("err", x)) + debug.PrintStack() + metrics.PanicInc("imapserver") + } + }() + + select { + case <-mox.Shutdown: + // ../rfc/9051:5381 + c.writelinef("* BYE mox shutting down") + panic(errIO) + default: + } + + // We register and unregister the original connection, in case it c.conn is + // replaced with a TLS connection later on. + mox.Connections.Register(nc, "imap", listenerName) + defer mox.Connections.Unregister(nc) + + c.writelinef("* OK [CAPABILITY %s] mox imap", c.capabilities()) + + for { + c.command() + c.xflush() // For flushing errors, or possibly commands that did not flush explicitly. + } +} + +// isClosed returns whether i/o failed, typically because the connection is closed. +// For connection errors, we often want to generate fewer logs. +func isClosed(err error) bool { + return errors.Is(err, errIO) || errors.Is(err, errProtocol) || moxio.IsClosed(err) +} + +func (c *conn) command() { + var tag, cmd, cmdlow string + var p *parser + + defer func() { + var result string + defer func() { + metricIMAPCommands.WithLabelValues(c.cmdMetric, result).Observe(float64(time.Since(c.cmdStart)) / float64(time.Second)) + }() + + logFields := []mlog.Pair{ + mlog.Field("cmd", c.cmd), + mlog.Field("duration", time.Since(c.cmdStart)), + } + c.cmd = "" + + x := recover() + if x == nil || x == cleanClose { + c.log.Debug("imap command done", logFields...) + result = "ok" + return + } + err, ok := x.(error) + if !ok { + c.log.Error("imap command panic", append([]mlog.Pair{mlog.Field("panic", x)}, logFields...)...) + result = "panic" + panic(x) + } + + if isClosed(err) { + c.log.Infox("imap command ioerror", err, logFields...) + result = "ioerror" + if errors.Is(err, errProtocol) { + debug.PrintStack() + } + panic(err) + } + + var sxerr syntaxError + var uerr userError + var serr serverError + if errors.As(err, &sxerr) { + result = "badsyntax" + c.log.Debugx("imap command syntax error", err, logFields...) + c.log.Info("imap syntax error", mlog.Field("lastline", c.lastLine)) + fatal := strings.HasSuffix(c.lastLine, "+}") + if fatal { + c.conn.SetWriteDeadline(time.Now().Add(5 * time.Second)) + } + c.bwriteresultf("%s BAD %s unrecognized syntax/command: %v", tag, cmd, err) + if fatal { + c.xflush() + panic(fmt.Errorf("aborting connection after syntax error for command with non-sync literal: %w", errProtocol)) + } + } else if errors.As(err, &serr) { + result = "servererror" + c.log.Errorx("imap command server error", err, logFields...) + debug.PrintStack() + c.bwriteresultf("%s NO %s %v", tag, cmd, err) + } else if errors.As(err, &uerr) { + result = "usererror" + c.log.Debugx("imap command user error", err, logFields...) + if uerr.code != "" { + c.bwriteresultf("%s NO [%s] %s %v", tag, uerr.code, cmd, err) + } else { + c.bwriteresultf("%s NO %s %v", tag, cmd, err) + } + } else { + result = "error" + c.log.Infox("imap command error", err, logFields...) + // todo: introduce a store.Error, and check for that, don't blindly pass on errors? + debug.PrintStack() + c.bwriteresultf("%s NO %s %v", tag, cmd, err) + } + }() + + tag = "*" + cmd, p = c.readCommand(&tag) + cmdlow = strings.ToLower(cmd) + c.cmd = cmdlow + c.cmdStart = time.Now() + c.cmdMetric = "(unrecognized)" + + select { + case <-mox.Shutdown: + // ../rfc/9051:5375 + c.writelinef("* BYE shutting down") + panic(errIO) + default: + } + + fn := commands[cmdlow] + if fn == nil { + xsyntaxErrorf("unknown command %q", cmd) + } + c.cmdMetric = c.cmd + + // Check if command is allowed in this state. + if _, ok1 := commandsStateAny[cmdlow]; ok1 { + } else if _, ok2 := commandsStateNotAuthenticated[cmdlow]; ok2 && c.state == stateNotAuthenticated { + } else if _, ok3 := commandsStateAuthenticated[cmdlow]; ok3 && c.state == stateAuthenticated || c.state == stateSelected { + } else if _, ok4 := commandsStateSelected[cmdlow]; ok4 && c.state == stateSelected { + } else if ok1 || ok2 || ok3 || ok4 { + xuserErrorf("not allowed in this connection state") + } else { + xserverErrorf("unrecognized command") + } + + fn(c, tag, cmd, p) +} + +func (c *conn) broadcast(changes []store.Change) { + if len(changes) == 0 { + return + } + c.log.Debug("broadcast changes", mlog.Field("changes", changes)) + c.comm.Broadcast(changes) +} + +// matchStringer matches a string against reference + mailbox patterns. +type matchStringer interface { + MatchString(s string) bool +} + +type noMatch struct{} + +// MatchString for noMatch always returns false. +func (noMatch) MatchString(s string) bool { + return false +} + +// xmailboxPatternMatcher returns a matcher for mailbox names given the reference and patterns. +// Patterns can include "%" and "*", matching any character excluding and including a slash respectively. +func xmailboxPatternMatcher(ref string, patterns []string) matchStringer { + if strings.HasPrefix(ref, "/") { + return noMatch{} + } + + var subs []string + for _, pat := range patterns { + if strings.HasPrefix(pat, "/") { + continue + } + + s := pat + if ref != "" { + s = filepath.Join(ref, pat) + } + + // Fix casing for all Inbox paths. + first := strings.SplitN(s, "/", 2)[0] + if strings.EqualFold(first, "Inbox") { + s = "Inbox" + s[len("Inbox"):] + } + + // ../rfc/9051:2361 + var rs string + for _, c := range s { + if c == '%' { + rs += "[^/]*" + } else if c == '*' { + rs += ".*" + } else { + rs += regexp.QuoteMeta(string(c)) + } + } + subs = append(subs, rs) + } + + if len(subs) == 0 { + return noMatch{} + } + rs := "^(" + strings.Join(subs, "|") + ")$" + re, err := regexp.Compile(rs) + xcheckf(err, "compiling regexp for mailbox patterns") + return re +} + +func (c *conn) sequence(uid store.UID) msgseq { + return uidSearch(c.uids, uid) +} + +func uidSearch(uids []store.UID, uid store.UID) msgseq { + s := 0 + e := len(uids) + for s < e { + i := (s + e) / 2 + m := uids[i] + if uid == m { + return msgseq(i + 1) + } else if uid < m { + e = i + } else { + s = i + 1 + } + } + return 0 +} + +func (c *conn) xsequence(uid store.UID) msgseq { + seq := c.sequence(uid) + if seq <= 0 { + xserverErrorf("unknown uid %d (%w)", uid, errProtocol) + } + return seq +} + +func (c *conn) sequenceRemove(seq msgseq, uid store.UID) { + i := seq - 1 + if c.uids[i] != uid { + xserverErrorf(fmt.Sprintf("got uid %d at msgseq %d, expected uid %d", uid, seq, c.uids[i])) + } + copy(c.uids[i:], c.uids[i+1:]) + c.uids = c.uids[:len(c.uids)-1] + if sanityChecks { + checkUIDs(c.uids) + } +} + +// add uid to the session. care must be taken that pending changes are fetched +// while holding the account wlock, and applied before adding this uid, because +// those pending changes may contain another new uid that has to be added first. +func (c *conn) uidAppend(uid store.UID) { + if uidSearch(c.uids, uid) > 0 { + xserverErrorf("uid already present (%w)", errProtocol) + } + if len(c.uids) > 0 && uid < c.uids[len(c.uids)-1] { + xserverErrorf("new uid %d is smaller than last uid %d (%w)", uid, c.uids[len(c.uids)-1], errProtocol) + } + c.uids = append(c.uids, uid) + if sanityChecks { + checkUIDs(c.uids) + } +} + +// sanity check that uids are in ascending order. +func checkUIDs(uids []store.UID) { + for i, uid := range uids { + if uid == 0 || i > 0 && uid <= uids[i-1] { + xserverErrorf("bad uids %v", uids) + } + } +} + +func (c *conn) xnumSetUIDs(isUID bool, nums numSet) []store.UID { + _, uids := c.xnumSetConditionUIDs(false, true, isUID, nums) + return uids +} + +func (c *conn) xnumSetCondition(isUID bool, nums numSet) []any { + uidargs, _ := c.xnumSetConditionUIDs(true, false, isUID, nums) + return uidargs +} + +func (c *conn) xnumSetConditionUIDs(forDB, returnUIDs bool, isUID bool, nums numSet) ([]any, []store.UID) { + if nums.searchResult { + // Update previously stored UIDs. Some may have been deleted. + // Once deleted a UID will never come back, so we'll just remove those uids. + o := 0 + for _, uid := range c.searchResult { + if uidSearch(c.uids, uid) > 0 { + c.searchResult[o] = uid + o++ + } + } + c.searchResult = c.searchResult[:o] + uidargs := make([]any, len(c.searchResult)) + for i, uid := range c.searchResult { + uidargs[i] = uid + } + return uidargs, c.searchResult + } + + var uidargs []any + var uids []store.UID + + add := func(uid store.UID) { + if forDB { + uidargs = append(uidargs, uid) + } + if returnUIDs { + uids = append(uids, uid) + } + } + + if !isUID { + // Sequence numbers that don't exist, or * on an empty mailbox, should result in a BAD response. ../rfc/9051:7018 + for _, r := range nums.ranges { + var ia, ib int + if r.first.star { + if len(c.uids) == 0 { + xsyntaxErrorf("invalid seqset * on empty mailbox") + } + ia = len(c.uids) - 1 + } else { + ia = int(r.first.number - 1) + if ia >= len(c.uids) { + xsyntaxErrorf("msgseq %d not in mailbox", r.first.number) + } + } + if r.last == nil { + add(c.uids[ia]) + continue + } + + if r.last.star { + if len(c.uids) == 0 { + xsyntaxErrorf("invalid seqset * on empty mailbox") + } + ib = len(c.uids) - 1 + } else { + ib = int(r.last.number - 1) + if ib >= len(c.uids) { + xsyntaxErrorf("msgseq %d not in mailbox", r.last.number) + } + } + if ia > ib { + ia, ib = ib, ia + } + for _, uid := range c.uids[ia : ib+1] { + add(uid) + } + } + return uidargs, uids + } + + // UIDs that do not exist can be ignored. + if len(c.uids) == 0 { + return nil, nil + } + + for _, r := range nums.ranges { + last := r.first + if r.last != nil { + last = *r.last + } + + uida := store.UID(r.first.number) + if r.first.star { + uida = c.uids[len(c.uids)-1] + } + + uidb := store.UID(last.number) + if last.star { + uidb = c.uids[len(c.uids)-1] + } + + if uida > uidb { + uida, uidb = uidb, uida + } + + // Binary search for uida. + s := 0 + e := len(c.uids) + for s < e { + m := (s + e) / 2 + if uida < c.uids[m] { + e = m + } else if uida > c.uids[m] { + s = m + 1 + } else { + break + } + } + + for _, uid := range c.uids[s:] { + if uid >= uida && uid <= uidb { + add(uid) + } else if uid > uidb { + break + } + } + } + + return uidargs, uids +} + +func (c *conn) ok(tag, cmd string) { + c.bwriteresultf("%s OK %s done", tag, cmd) + c.xflush() +} + +// xcheckmailboxname checks if name is valid, returning an INBOX-normalized name. +// I.e. it changes various casings of INBOX and INBOX/* to Inbox and Inbox/*. +// Name is invalid if it contains leading/trailing/double slashes, or when it isn't +// unicode-normalized, or when empty or has special characters. +func xcheckmailboxname(name string, allowInbox bool) string { + first := strings.SplitN(name, "/", 2)[0] + if strings.EqualFold(first, "inbox") { + if len(name) == len("inbox") && !allowInbox { + xuserErrorf("special mailbox name Inbox not allowed") + } + name = "Inbox" + name[len("Inbox"):] + } + + if norm.NFC.String(name) != name { + xusercodeErrorf("CANNOT", "non-unicode-normalized mailbox names not allowed") + } + + if name == "" { + xusercodeErrorf("CANNOT", "empty mailbox name") + } + if strings.HasPrefix(name, "/") || strings.HasSuffix(name, "/") || strings.Contains(name, "//") { + xusercodeErrorf("CANNOT", "bad slashes in mailbox name") + } + for _, c := range name { + switch c { + case '%', '*', '#', '&': + xusercodeErrorf("CANNOT", "character %c not allowed in mailbox name", c) + } + // ../rfc/6855:192 + if c <= 0x1f || c >= 0x7f && c <= 0x9f || c == 0x2028 || c == 0x2029 { + xusercodeErrorf("CANNOT", "control characters not allowed in mailbox name") + } + } + return name +} + +// Lookup mailbox by name. +// If the mailbox does not exist, panic is called with a user error. +// Must be called with account rlock held. +func (c *conn) xmailbox(tx *bstore.Tx, name string, missingErrCode string) store.Mailbox { + mb := c.account.MailboxFindX(tx, name) + if mb == nil { + // missingErrCode can be empty, or e.g. TRYCREATE or ALREADYEXISTS. + xusercodeErrorf(missingErrCode, "%w", store.ErrUnknownMailbox) + } + return *mb +} + +// Lookup mailbox by ID. +// If the mailbox does not exist, panic is called with a user error. +// Must be called with account rlock held. +func (c *conn) xmailboxID(tx *bstore.Tx, id int64) store.Mailbox { + mb := store.Mailbox{ID: id} + err := tx.Get(&mb) + if err == bstore.ErrAbsent { + xuserErrorf("%w", store.ErrUnknownMailbox) + } + return mb +} + +// Apply changes to our session state. +// If initial is false, updates like EXISTS and EXPUNGE are written to the client. +// If initial is true, we only apply the changes. +// Should not be called while holding locks, as changes are written to client connections, which can block. +// Does not flush output. +func (c *conn) applyChanges(changes []store.Change, initial bool) { + if len(changes) == 0 { + return + } + + c.conn.SetWriteDeadline(time.Now().Add(5 * time.Minute)) + + c.log.Debug("applying changes", mlog.Field("changes", changes)) + + // Only keep changes for the selected mailbox, and changes that are always relevant. + var n []store.Change + for _, change := range changes { + var mbID int64 + switch ch := change.(type) { + case store.ChangeAddUID: + mbID = ch.MailboxID + case store.ChangeRemoveUIDs: + mbID = ch.MailboxID + case store.ChangeFlags: + mbID = ch.MailboxID + case store.ChangeRemoveMailbox, store.ChangeAddMailbox, store.ChangeRenameMailbox, store.ChangeAddSubscription: + n = append(n, change) + continue + default: + panic(fmt.Errorf("missing case for %#v", change)) + } + if c.state == stateSelected && mbID == c.mailboxID { + n = append(n, change) + } + } + changes = n + + i := 0 + for i < len(changes) { + // First process all new uids. So we only send a single EXISTS. + var adds []store.ChangeAddUID + for ; i < len(changes); i++ { + ch, ok := changes[i].(store.ChangeAddUID) + if !ok { + break + } + seq := c.sequence(ch.UID) + if seq > 0 && initial { + continue + } + c.uidAppend(ch.UID) + adds = append(adds, ch) + } + if len(adds) > 0 { + if initial { + continue + } + // Write the exists, and the UID and flags as well. Hopefully the client waits for + // long enough after the EXISTS to see these messages, and doesn't request them + // again with a FETCH. + c.bwritelinef("* %d EXISTS", len(c.uids)) + for _, add := range adds { + seq := c.xsequence(add.UID) + c.bwritelinef("* %d FETCH (UID %d FLAGS %s)", seq, add.UID, flaglist(add.Flags).pack(c)) + } + continue + } + + change := changes[i] + i++ + + switch ch := change.(type) { + case store.ChangeRemoveUIDs: + for _, uid := range ch.UIDs { + var seq msgseq + if initial { + seq = c.sequence(uid) + if seq <= 0 { + continue + } + } else { + seq = c.xsequence(uid) + } + c.sequenceRemove(seq, uid) + if !initial { + c.bwritelinef("* %d EXPUNGE", seq) + } + } + case store.ChangeFlags: + // The uid can be unknown if we just expunged it while another session marked it as deleted just before. + seq := c.sequence(ch.UID) + if seq <= 0 { + continue + } + if !initial { + c.bwritelinef("* %d FETCH (UID %d FLAGS %s)", seq, ch.UID, flaglist(ch.Flags).pack(c)) + } + case store.ChangeRemoveMailbox: + c.bwritelinef(`* LIST (\NonExistent) "/" %s`, astring(ch.Name).pack(c)) + case store.ChangeAddMailbox: + c.bwritelinef(`* LIST (%s) "/" %s`, strings.Join(ch.Flags, " "), astring(ch.Name).pack(c)) + case store.ChangeRenameMailbox: + c.bwritelinef(`* LIST (%s) "/" %s ("OLDNAME" (%s))`, strings.Join(ch.Flags, " "), astring(ch.NewName).pack(c), string0(ch.OldName).pack(c)) + case store.ChangeAddSubscription: + c.bwritelinef(`* LIST (\Subscribed) "/" %s`, astring(ch.Name).pack(c)) + default: + panic(fmt.Sprintf("internal error, missing case for %#v", change)) + } + } +} + +// Capability returns the capabilities this server implements and currently has +// available given the connection state. +// +// State: any +func (c *conn) cmdCapability(tag, cmd string, p *parser) { + // Command: ../rfc/9051:1208 ../rfc/3501:1300 + + // Request syntax: ../rfc/9051:6464 ../rfc/3501:4669 + p.xempty() + + caps := c.capabilities() + + // Response syntax: ../rfc/9051:6427 ../rfc/3501:4655 + c.bwritelinef("* CAPABILITY %s", caps) + c.ok(tag, cmd) +} + +// capabilities returns non-empty string with available capabilities based on connection state. +// For use in cmdCapability and untagged OK responses on connection start, login and authenticate. +func (c *conn) capabilities() string { + caps := serverCapabilities + // ../rfc/9051:1238 + if !c.tls { + caps += " STARTTLS" + } + if c.tls || c.noRequireSTARTTLS { + caps += " AUTH=PLAIN" + } else { + caps += " LOGINDISABLED" + } + return caps +} + +// No op, but useful for retrieving pending changes as untagged responses, e.g. of +// message delivery. +// +// State: any +func (c *conn) cmdNoop(tag, cmd string, p *parser) { + // Command: ../rfc/9051:1261 ../rfc/3501:1363 + + // Request syntax: ../rfc/9051:6464 ../rfc/3501:4669 + p.xempty() + c.ok(tag, cmd) +} + +// Logout, after which server closes the connection. +// +// State: any +func (c *conn) cmdLogout(tag, cmd string, p *parser) { + // Commands: ../rfc/3501:1407 ../rfc/9051:1290 + + // Request syntax: ../rfc/9051:6464 ../rfc/3501:4669 + p.xempty() + + c.unselect() + c.state = stateNotAuthenticated + // Response syntax: ../rfc/9051:6886 ../rfc/3501:4935 + c.bwritelinef("* BYE thanks") + c.ok(tag, cmd) + panic(cleanClose) +} + +// Clients can use ID to tell the server which software they are using. Servers can +// respond with their version. For statistics/logging/debugging purposes. +// +// State: any +func (c *conn) cmdID(tag, cmd string, p *parser) { + // Command: ../rfc/2971:129 + + // Request syntax: ../rfc/2971:241 + p.xspace() + var params map[string]string + if p.take("(") { + params = map[string]string{} + for !p.take(")") { + if len(params) > 0 { + p.xspace() + } + k := p.xstring() + p.xspace() + v := p.xnilString() + if _, ok := params[k]; ok { + xsyntaxErrorf("duplicate key %q", k) + } + params[k] = v + } + } else { + p.xnil() + } + p.xempty() + + // We just log the client id. + c.log.Info("client id", mlog.Field("params", params)) + + // Response syntax: ../rfc/2971:243 + // We send our name and version. ../rfc/2971:193 + c.bwritelinef(`* ID ("name" "mox" "version" %s)`, string0(moxvar.Version).pack(c)) + c.ok(tag, cmd) +} + +// STARTTLS enables TLS on the connection, after a plain text start. +// Only allowed if TLS isn't already enabled, either through connecting to a +// TLS-enabled TCP port, or a previous STARTTLS command. +// After STARTTLS, plain text authentication typically becomes available. +// +// Status: Not authenticated. +func (c *conn) cmdStarttls(tag, cmd string, p *parser) { + // Command: ../rfc/9051:1340 ../rfc/3501:1468 + + // Request syntax: ../rfc/9051:6473 ../rfc/3501:4676 + p.xempty() + + if c.tls { + xsyntaxErrorf("tls already active") // ../rfc/9051:1353 + } + + conn := c.conn + if n := c.br.Buffered(); n > 0 { + buf := make([]byte, n) + _, err := io.ReadFull(c.br, buf) + xcheckf(err, "reading buffered data for tls handshake") + conn = &prefixConn{buf, conn} + } + c.ok(tag, cmd) + + cidctx := context.WithValue(mox.Context, mlog.CidKey, c.cid) + ctx, cancel := context.WithTimeout(cidctx, time.Minute) + defer cancel() + tlsConn := tls.Server(conn, c.tlsConfig) + c.log.Debug("starting tls server handshake") + if err := tlsConn.HandshakeContext(ctx); err != nil { + panic(fmt.Errorf("starttls handshake: %s (%w)", err, errIO)) + } + cancel() + tlsversion, ciphersuite := mox.TLSInfo(tlsConn) + c.log.Debug("tls server handshake done", mlog.Field("tls", tlsversion), mlog.Field("ciphersuite", ciphersuite)) + + c.conn = tlsConn + c.br = bufio.NewReader(moxio.NewTraceReader(c.log, "C: ", c.conn)) + c.bw = bufio.NewWriter(moxio.NewTraceWriter(c.log, "S: ", c)) + c.tls = true +} + +// Authenticate using SASL. Supports multiple back and forths between client and +// server to finish authentication, unlike LOGIN which is just a single +// username/password. +// +// Status: Not authenticated. +func (c *conn) cmdAuthenticate(tag, cmd string, p *parser) { + // Command: ../rfc/9051:1403 ../rfc/3501:1519 + // Examples: ../rfc/9051:1520 ../rfc/3501:1631 + + var authVariant string + authResult := "error" + defer func() { + metrics.AuthenticationInc("imap", authVariant, authResult) + }() + + // Request syntax: ../rfc/9051:6341 ../rfc/3501:4561 + p.xspace() + authType := p.xatom() + + xreadInitial := func() []byte { + var line string + if p.empty() { + c.writelinef("+ ") + line = c.readline(false) + } else { + // ../rfc/9051:1407 ../rfc/4959:84 + p.xspace() + line = p.remainder() + if line == "=" { + // ../rfc/9051:1450 + line = "" // Base64 decode will result in empty buffer. + } + } + // ../rfc/9051:1442 ../rfc/3501:1553 + if line == "*" { + authResult = "aborted" + xsyntaxErrorf("authenticate aborted by client") + } + buf, err := base64.StdEncoding.DecodeString(line) + if err != nil { + xsyntaxErrorf("parsing base64: %v", err) + } + return buf + } + + xreadContinuation := func() []byte { + line := c.readline(false) + if line == "*" { + authResult = "aborted" + xsyntaxErrorf("authenticate aborted by client") + } + buf, err := base64.StdEncoding.DecodeString(line) + if err != nil { + xsyntaxErrorf("parsing base64: %v", err) + } + return buf + } + + switch strings.ToUpper(authType) { + case "PLAIN": + authVariant = "plain" + + if !c.noRequireSTARTTLS && !c.tls { + // ../rfc/9051:5194 + xusercodeErrorf("PRIVACYREQUIRED", "tls required for login") + } + + buf := xreadInitial() + plain := bytes.Split(buf, []byte{0}) + if len(plain) != 3 { + xsyntaxErrorf("bad plain auth data, expected 3 nul-separated tokens, got %d tokens", len(plain)) + } + authz := string(plain[0]) + authc := string(plain[1]) + password := string(plain[2]) + acc, err := store.OpenEmailAuth(authc, password) + if err != nil { + if errors.Is(err, store.ErrUnknownCredentials) { + authResult = "badcreds" + xusercodeErrorf("AUTHENTICATIONFAILED", "bad credentials") + } + xusercodeErrorf("", "error") + } + if authz != "" && authz != authc { + acc.Close() + xusercodeErrorf("AUTHORIZATIONFAILED", "cannot assume role") + } + c.account = acc + c.username = authc + authResult = "ok" + + case "SCRAM-SHA-256": + // todo: improve handling of errors during scram. e.g. invalid parameters. should we abort the imap command, or continue until the end and respond with a scram-level error? + + authVariant = "scram-sha-256" + + c0 := xreadInitial() + ss, err := scram.NewServer(c0) + if err != nil { + xsyntaxErrorf("starting scram: %w", err) + } + c.log.Info("scram auth", mlog.Field("authentication", ss.Authentication)) + acc, _, err := store.OpenEmail(ss.Authentication) + if err != nil { + // todo: we could continue scram with a generated salt, deterministically generated + // from the username. that way we don't have to store anything but attackers cannot + // learn if an account exists. same for absent scram saltedpassword below. + xuserErrorf("scram not possible") + } + defer func() { + if acc != nil { + err := acc.Close() + c.xsanity(err, "close account") + } + }() + if ss.Authorization != "" && ss.Authorization != ss.Authentication { + xuserErrorf("authentication with authorization for different user not supported") + } + var password store.Password + acc.WithRLock(func() { + err := acc.DB.Read(func(tx *bstore.Tx) error { + password, err = bstore.QueryTx[store.Password](tx).Get() + xsc := password.SCRAMSHA256 + if err == bstore.ErrAbsent || err == nil && (len(xsc.Salt) == 0 || xsc.Iterations == 0 || len(xsc.SaltedPassword) == 0) { + xuserErrorf("scram not possible") + } + xcheckf(err, "fetching credentials") + return err + }) + xcheckf(err, "read tx") + }) + s1, err := ss.ServerFirst(password.SCRAMSHA256.Iterations, password.SCRAMSHA256.Salt) + if err != nil { + xsyntaxErrorf("server first: %w", err) + } + xcheckf(err, "scram first server step") + c.writelinef("+ %s", base64.StdEncoding.EncodeToString([]byte(s1))) + c2 := xreadContinuation() + s3, err := ss.Finish(c2, password.SCRAMSHA256.SaltedPassword) + if len(s3) > 0 { + c.writelinef("+ %s", base64.StdEncoding.EncodeToString([]byte(s3))) + } + if err != nil { + if errors.Is(err, scram.ErrInvalidProof) { + authResult = "badcreds" + xusercodeErrorf("AUTHENTICATIONFAILED", "bad credentials") + } + xuserErrorf("server final: %w", err) + } + + // Client must still respond, but there is nothing to say. See ../rfc/9051:6221 + // The message should be empty. todo: should we require it is empty? + xreadContinuation() + + c.account = acc + acc = nil // Cancel cleanup. + c.username = ss.Authentication + authResult = "ok" + + default: + xuserErrorf("method not supported") + } + c.comm = store.RegisterComm(c.account) + c.state = stateAuthenticated + c.writeresultf("%s OK [CAPABILITY %s] authenticate done", tag, c.capabilities()) +} + +// Login logs in with username and password. +// +// Status: Not authenticated. +func (c *conn) cmdLogin(tag, cmd string, p *parser) { + // Command: ../rfc/9051:1597 ../rfc/3501:1663 + + authResult := "error" + defer func() { + metrics.AuthenticationInc("imap", "login", authResult) + }() + + // Request syntax: ../rfc/9051:6667 ../rfc/3501:4804 + p.xspace() + userid := p.xastring() + p.xspace() + password := p.xastring() + p.xempty() + + if !c.noRequireSTARTTLS && !c.tls { + // ../rfc/9051:5194 + xusercodeErrorf("PRIVACYREQUIRED", "tls required for login") + } + + acc, err := store.OpenEmailAuth(userid, password) + if err != nil { + authResult = "badcreds" + var code string + if errors.Is(err, store.ErrUnknownCredentials) { + code = "AUTHENTICATIONFAILED" + } + xusercodeErrorf(code, "login failed") + } + c.account = acc + c.username = userid + c.comm = store.RegisterComm(acc) + c.state = stateAuthenticated + authResult = "ok" + c.writeresultf("%s OK [CAPABILITY %s] login done", tag, c.capabilities()) +} + +// Enable explicitly opts in to an extension. A server can typically send new kinds +// of responses to a client. Most extensions do not require an ENABLE because a +// client implicitly opts in to new response syntax by making a requests that uses +// new optional extension request syntax. +// +// State: Authenticated and selected. +func (c *conn) cmdEnable(tag, cmd string, p *parser) { + // Command: ../rfc/9051:1652 ../rfc/5161:80 + // Examples: ../rfc/9051:1728 ../rfc/5161:147 + + // Request syntax: ../rfc/9051:6518 ../rfc/5161:207 + p.xspace() + caps := []string{p.xatom()} + for !p.empty() { + p.xspace() + caps = append(caps, p.xatom()) + } + + // Clients should only send capabilities that need enabling. + // We should only echo that we recognize as needing enabling. + var enabled string + for _, s := range caps { + cap := capability(strings.ToUpper(s)) + switch cap { + case capIMAP4rev2, capUTF8Accept: + c.enabled[cap] = true + enabled += " " + s + } + } + + // Response syntax: ../rfc/9051:6520 ../rfc/5161:211 + c.bwritelinef("* ENABLED%s", enabled) + c.ok(tag, cmd) +} + +// State: Authenticated and selected. +func (c *conn) cmdSelect(tag, cmd string, p *parser) { + c.cmdSelectExamine(true, tag, cmd, p) +} + +// State: Authenticated and selected. +func (c *conn) cmdExamine(tag, cmd string, p *parser) { + c.cmdSelectExamine(false, tag, cmd, p) +} + +// Select and examine are almost the same commands. Select just opens a mailbox for +// read/write and examine opens a mailbox readonly. +// +// State: Authenticated and selected. +func (c *conn) cmdSelectExamine(isselect bool, tag, cmd string, p *parser) { + // Select command: ../rfc/9051:1754 ../rfc/3501:1743 + // Examine command: ../rfc/9051:1868 ../rfc/3501:1855 + // Select examples: ../rfc/9051:1831 ../rfc/3501:1826 + + // Select request syntax: ../rfc/9051:7005 ../rfc/3501:4996 + // Examine request syntax: ../rfc/9051:6551 ../rfc/3501:4746 + p.xspace() + name := p.xmailbox() + p.xempty() + + // Deselect before attempting the new select. This means we will deselect when an + // error occurs during select. + // ../rfc/9051:1809 + if c.state == stateSelected { + // ../rfc/9051:1812 + c.bwritelinef("* OK [CLOSED] x") + c.unselect() + } + + name = xcheckmailboxname(name, true) + + var firstUnseen msgseq = 0 + var mb store.Mailbox + c.account.WithRLock(func() { + c.xdbread(func(tx *bstore.Tx) { + mb = c.xmailbox(tx, name, "") + + q := bstore.QueryTx[store.Message](tx) + q.FilterNonzero(store.Message{MailboxID: mb.ID}) + q.SortAsc("UID") + c.uids = []store.UID{} + var seq msgseq = 1 + err := q.ForEach(func(m store.Message) error { + c.uids = append(c.uids, m.UID) + if firstUnseen == 0 && !m.Seen { + firstUnseen = seq + } + seq++ + return nil + }) + if sanityChecks { + checkUIDs(c.uids) + } + xcheckf(err, "fetching uids") + }) + }) + c.applyChanges(c.comm.Get(), true) + + c.bwritelinef(`* FLAGS (\Seen \Answered \Flagged \Deleted \Draft $Forwarded $Junk $NotJunk $Phishing $MDNSent)`) + c.bwritelinef(`* OK [PERMANENTFLAGS (\Seen \Answered \Flagged \Deleted \Draft $Forwarded $Junk $NotJunk $Phishing $MDNSent)] x`) + if !c.enabled[capIMAP4rev2] { + c.bwritelinef(`* 0 RECENT`) + } + c.bwritelinef(`* %d EXISTS`, len(c.uids)) + if !c.enabled[capIMAP4rev2] && firstUnseen > 0 { + // ../rfc/9051:8051 ../rfc/3501:1774 + c.bwritelinef(`* OK [UNSEEN %d] x`, firstUnseen) + } + c.bwritelinef(`* OK [UIDVALIDITY %d] x`, mb.UIDValidity) + c.bwritelinef(`* OK [UIDNEXT %d] x`, mb.UIDNext) + c.bwritelinef(`* LIST () "/" %s`, astring(mb.Name).pack(c)) + if isselect { + c.bwriteresultf("%s OK [READ-WRITE] x", tag) + c.readonly = false + } else { + c.bwriteresultf("%s OK [READ-ONLY] x", tag) + c.readonly = true + } + c.mailboxID = mb.ID + c.state = stateSelected + c.searchResult = nil + c.xflush() +} + +// Create makes a new mailbox, and its parents too if absent. +// +// State: Authenticated and selected. +func (c *conn) cmdCreate(tag, cmd string, p *parser) { + // Command: ../rfc/9051:1900 ../rfc/3501:1888 + // Examples: ../rfc/9051:1951 ../rfc/6154:411 ../rfc/4466:212 ../rfc/3501:1933 + + // Request syntax: ../rfc/9051:6484 ../rfc/6154:468 ../rfc/4466:500 ../rfc/3501:4687 + p.xspace() + name := p.xmailbox() + // todo: support CREATE-SPECIAL-USE ../rfc/6154:296 + p.xempty() + + origName := name + name = strings.TrimRight(name, "/") // ../rfc/9051:1930 + name = xcheckmailboxname(name, false) + + var changes []store.Change + var created []string // Created mailbox names. + + c.account.WithWLock(func() { + c.xdbwrite(func(tx *bstore.Tx) { + elems := strings.Split(name, "/") + var p string + for i, elem := range elems { + if i > 0 { + p += "/" + } + p += elem + if c.account.MailboxExistsX(tx, p) { + if i == len(elems)-1 { + // ../rfc/9051:1914 + xuserErrorf("mailbox already exists") + } + continue + } + _, nchanges := c.account.MailboxEnsureX(tx, p, true) + changes = append(changes, nchanges...) + created = append(created, p) + } + }) + + c.broadcast(changes) + }) + + for _, n := range created { + var more string + if n == name && name != origName && !(name == "Inbox" || strings.HasPrefix(name, "Inbox/")) { + more = fmt.Sprintf(` ("OLDNAME" (%s))`, string0(origName).pack(c)) + } + c.bwritelinef(`* LIST (\Subscribed) "/" %s%s`, astring(n).pack(c), more) + } + c.ok(tag, cmd) +} + +// Delete removes a mailbox and all its messages. +// Inbox cannot be removed. +// +// State: Authenticated and selected. +func (c *conn) cmdDelete(tag, cmd string, p *parser) { + // Command: ../rfc/9051:1972 ../rfc/3501:1946 + // Examples: ../rfc/9051:2025 ../rfc/3501:1992 + + // Request syntax: ../rfc/9051:6505 ../rfc/3501:4716 + p.xspace() + name := p.xmailbox() + p.xempty() + + name = xcheckmailboxname(name, false) + + // Messages to remove after having broadcasted the removal of messages. + var remove []store.Message + + c.account.WithWLock(func() { + var mb store.Mailbox + + c.xdbwrite(func(tx *bstore.Tx) { + mb = c.xmailbox(tx, name, "NONEXISTENT") + + // Look for existence of child mailboxes. There is a lot of text in the RFCs about + // NoInferior and NoSelect. We just require only leaf mailboxes are deleted. + qmb := bstore.QueryTx[store.Mailbox](tx) + mbprefix := name + "/" + qmb.FilterFn(func(mb store.Mailbox) bool { + return strings.HasPrefix(mb.Name, mbprefix) + }) + childExists, err := qmb.Exists() + xcheckf(err, "checking child existence") + if childExists { + xusercodeErrorf("HASCHILDREN", "mailbox has a child, only leaf mailboxes can be deleted") + } + + qm := bstore.QueryTx[store.Message](tx) + qm.FilterNonzero(store.Message{MailboxID: mb.ID}) + remove, err = qm.List() + xcheckf(err, "listing messages to remove") + + if len(remove) > 0 { + removeIDs := make([]any, len(remove)) + for i, m := range remove { + removeIDs[i] = m.ID + } + qmr := bstore.QueryTx[store.Recipient](tx) + qmr.FilterEqual("MessageID", removeIDs...) + _, err = qmr.Delete() + xcheckf(err, "removing message recipients for messages") + + qm = bstore.QueryTx[store.Message](tx) + qm.FilterNonzero(store.Message{MailboxID: mb.ID}) + _, err = qm.Delete() + xcheckf(err, "removing messages") + + conf, _ := c.account.Conf() + if name != conf.RejectsMailbox { + err = c.account.Untrain(c.log, remove) + xcheckf(err, "untraining deleted messages") + } + } + + err = tx.Delete(&store.Mailbox{ID: mb.ID}) + xcheckf(err, "removing mailbox") + }) + + c.broadcast([]store.Change{store.ChangeRemoveMailbox{Name: name}}) + }) + + for _, m := range remove { + p := c.account.MessagePath(m.ID) + if err := os.Remove(p); err != nil { + c.log.Infox("removing message file for mailbox delete", err, mlog.Field("path", p)) + } + } + + c.ok(tag, cmd) +} + +// Rename changes the name of a mailbox. +// Renaming INBOX is special, it moves the inbox messages to a new mailbox, leaving inbox empty. +// Renaming a mailbox with submailboxes also renames all submailboxes. +// Subscriptions stay with the old name, though newly created missing parent +// mailboxes for the destination name are automatically subscribed. +// +// State: Authenticated and selected. +func (c *conn) cmdRename(tag, cmd string, p *parser) { + // Command: ../rfc/9051:2062 ../rfc/3501:2040 + // Examples: ../rfc/9051:2132 ../rfc/3501:2092 + + // Request syntax: ../rfc/9051:6863 ../rfc/3501:4908 + p.xspace() + src := p.xmailbox() + p.xspace() + dst := p.xmailbox() + p.xempty() + + src = xcheckmailboxname(src, true) + dst = xcheckmailboxname(dst, false) + + c.account.WithWLock(func() { + var changes []store.Change + + c.xdbwrite(func(tx *bstore.Tx) { + uidval, err := c.account.NextUIDValidity(tx) + xcheckf(err, "next uid validity") + + // Inbox is very special case. Unlike other mailboxes, its children are not moved. And + // unlike a regular move, its messages are moved to a newly created mailbox. + // We do indeed create a new destination mailbox and actually move the messages. + // ../rfc/9051:2101 + if src == "Inbox" { + if c.account.MailboxExistsX(tx, dst) { + xusercodeErrorf("ALREADYEXISTS", "destination mailbox %q already exists", dst) + } + srcMB := c.account.MailboxFindX(tx, src) + if srcMB == nil { + xserverErrorf("inbox not found") + } + if dst == src { + xuserErrorf("cannot move inbox to itself") + } + + dstMB := store.Mailbox{ + Name: dst, + UIDValidity: uidval, + UIDNext: 1, + } + err := tx.Insert(&dstMB) + xcheckf(err, "create new destination mailbox") + + var messages []store.Message + q := bstore.QueryTx[store.Message](tx) + q.FilterNonzero(store.Message{MailboxID: srcMB.ID}) + q.Gather(&messages) + _, err = q.UpdateNonzero(store.Message{MailboxID: dstMB.ID}) + xcheckf(err, "moving messages from inbox to destination mailbox") + + uids := make([]store.UID, len(messages)) + for i, m := range messages { + uids[i] = m.UID + } + var dstFlags []string + if tx.Get(&store.Subscription{Name: dstMB.Name}) == nil { + dstFlags = []string{`\Subscribed`} + } + changes = []store.Change{ + store.ChangeRemoveUIDs{MailboxID: srcMB.ID, UIDs: uids}, + store.ChangeAddMailbox{Name: dstMB.Name, Flags: dstFlags}, + // todo: in future, we could announce all messages. no one is listening now though. + } + return + } + + // We gather existing mailboxes that we need for deciding what to create/delete/update. + q := bstore.QueryTx[store.Mailbox](tx) + srcPrefix := src + "/" + dstRoot := strings.SplitN(dst, "/", 2)[0] + dstRootPrefix := dstRoot + "/" + q.FilterFn(func(mb store.Mailbox) bool { + return mb.Name == src || strings.HasPrefix(mb.Name, srcPrefix) || mb.Name == dstRoot || strings.HasPrefix(mb.Name, dstRootPrefix) + }) + q.SortAsc("Name") // We'll rename the parents before children. + l, err := q.List() + xcheckf(err, "listing relevant mailboxes") + + mailboxes := map[string]store.Mailbox{} + for _, mb := range l { + mailboxes[mb.Name] = mb + } + + if _, ok := mailboxes[src]; !ok { + // ../rfc/9051:5140 + xusercodeErrorf("NONEXISTENT", "mailbox does not exist") + } + + // Ensure parent mailboxes for the destination paths exist. + var parent string + dstElems := strings.Split(dst, "/") + for i, elem := range dstElems[:len(dstElems)-1] { + if i > 0 { + parent += "/" + } + parent += elem + + mb, ok := mailboxes[parent] + if ok { + continue + } + omb := mb + mb = store.Mailbox{ + ID: omb.ID, + Name: parent, + UIDValidity: uidval, + UIDNext: 1, + } + err = tx.Insert(&mb) + xcheckf(err, "creating parent mailbox") + err = tx.Insert(&store.Subscription{Name: parent}) + if err != nil && !errors.Is(err, bstore.ErrUnique) { + xcheckf(err, "creating subscription") + } + changes = append(changes, store.ChangeAddMailbox{Name: parent, Flags: []string{`\Subscribed`}}) + } + + // Process src mailboxes, renaming them to dst. + for _, srcmb := range l { + if srcmb.Name != src && !strings.HasPrefix(srcmb.Name, srcPrefix) { + continue + } + srcName := srcmb.Name + dstName := dst + srcmb.Name[len(src):] + if _, ok := mailboxes[dstName]; ok { + xusercodeErrorf("ALREADYEXISTS", "destination mailbox %q already exists", dstName) + } + + srcmb.Name = dstName + srcmb.UIDValidity = uidval + err = tx.Update(&srcmb) + xcheckf(err, "renaming mailbox") + + // Renaming Inbox is special, it leaves an empty inbox instead of removing it. + var dstFlags []string + if tx.Get(&store.Subscription{Name: dstName}) == nil { + dstFlags = []string{`\Subscribed`} + } + changes = append(changes, store.ChangeRenameMailbox{OldName: srcName, NewName: dstName, Flags: dstFlags}) + } + + // If we renamed e.g. a/b to a/b/c/d, and a/b/c to a/b/c/d/c, we'll have to recreate a/b and a/b/c. + srcElems := strings.Split(src, "/") + xsrc := src + for i := 0; i < len(dstElems) && strings.HasPrefix(dst, xsrc+"/"); i++ { + mb := store.Mailbox{ + UIDValidity: uidval, + UIDNext: 1, + Name: xsrc, + } + err = tx.Insert(&mb) + xcheckf(err, "creating mailbox at old path") + xsrc += "/" + dstElems[len(srcElems)+i] + } + }) + c.broadcast(changes) + }) + + c.ok(tag, cmd) +} + +// Subscribe marks a mailbox path as subscribed. The mailbox does not have to +// exist. Subscribed may mean an email client will show the mailbox in its UI +// and/or periodically fetch new messages for the mailbox. +// +// State: Authenticated and selected. +func (c *conn) cmdSubscribe(tag, cmd string, p *parser) { + // Command: ../rfc/9051:2172 ../rfc/3501:2135 + // Examples: ../rfc/9051:2198 ../rfc/3501:2162 + + // Request syntax: ../rfc/9051:7083 ../rfc/3501:5059 + p.xspace() + name := p.xmailbox() + p.xempty() + + name = xcheckmailboxname(name, true) + + c.account.WithWLock(func() { + var changes []store.Change + + c.xdbwrite(func(tx *bstore.Tx) { + changes = c.account.SubscriptionEnsureX(tx, name) + }) + + c.broadcast(changes) + }) + + c.ok(tag, cmd) +} + +// Unsubscribe marks a mailbox as not subscribed. The mailbox doesn't have to exist. +// +// State: Authenticated and selected. +func (c *conn) cmdUnsubscribe(tag, cmd string, p *parser) { + // Command: ../rfc/9051:2203 ../rfc/3501:2166 + // Examples: ../rfc/9051:2219 ../rfc/3501:2181 + + // Request syntax: ../rfc/9051:7143 ../rfc/3501:5077 + p.xspace() + name := p.xmailbox() + p.xempty() + + name = xcheckmailboxname(name, true) + + c.account.WithWLock(func() { + c.xdbwrite(func(tx *bstore.Tx) { + // It's OK if not currently subscribed, ../rfc/9051:2215 + err := tx.Delete(&store.Subscription{Name: name}) + if err == bstore.ErrAbsent { + if !c.account.MailboxExistsX(tx, name) { + xuserErrorf("mailbox does not exist") + } + return + } + xcheckf(err, "removing subscription") + }) + + // todo: can we send untagged message about a mailbox no longer being subscribed? + }) + + c.ok(tag, cmd) +} + +// LSUB command for listing subscribed mailboxes. +// Removed in IMAP4rev2, only in IMAP4rev1. +// +// State: Authenticated and selected. +func (c *conn) cmdLsub(tag, cmd string, p *parser) { + // Command: ../rfc/3501:2374 + // Examples: ../rfc/3501:2415 + + // Request syntax: ../rfc/3501:4806 + p.xspace() + ref := p.xmailbox() + p.xspace() + pattern := p.xlistMailbox() + p.xempty() + + re := xmailboxPatternMatcher(ref, []string{pattern}) + + var lines []string + c.xdbread(func(tx *bstore.Tx) { + q := bstore.QueryTx[store.Subscription](tx) + q.SortAsc("Name") + subscriptions, err := q.List() + xcheckf(err, "querying subscriptions") + + have := map[string]bool{} + subscribedKids := map[string]bool{} + ispercent := strings.HasSuffix(pattern, "%") + for _, sub := range subscriptions { + name := sub.Name + if ispercent { + for p := filepath.Dir(name); p != "."; p = filepath.Dir(p) { + subscribedKids[p] = true + } + } + if !re.MatchString(name) { + continue + } + have[name] = true + line := fmt.Sprintf(`* LSUB () "/" %s`, astring(name).pack(c)) + lines = append(lines, line) + + } + + // ../rfc/3501:2394 + if !ispercent { + return + } + qmb := bstore.QueryTx[store.Mailbox](tx) + qmb.SortAsc("Name") + err = qmb.ForEach(func(mb store.Mailbox) error { + if have[mb.Name] || !subscribedKids[mb.Name] || !re.MatchString(mb.Name) { + return nil + } + line := fmt.Sprintf(`* LSUB (\NoSelect) "/" %s`, astring(mb.Name).pack(c)) + lines = append(lines, line) + return nil + }) + xcheckf(err, "querying mailboxes") + }) + + // Response syntax: ../rfc/3501:4833 ../rfc/3501:4837 + for _, line := range lines { + c.bwritelinef("%s", line) + } + c.ok(tag, cmd) +} + +// The namespace command returns the mailbox path separator. We only implement +// the personal mailbox hierarchy, no shared/other. +// +// In IMAP4rev2, it was an extension before. +// +// State: Authenticated and selected. +func (c *conn) cmdNamespace(tag, cmd string, p *parser) { + // Command: ../rfc/9051:3098 ../rfc/2342:137 + // Examples: ../rfc/9051:3117 ../rfc/2342:155 + // Request syntax: ../rfc/9051:6767 ../rfc/2342:410 + p.xempty() + + // Response syntax: ../rfc/9051:6778 ../rfc/2342:415 + c.bwritelinef(`* NAMESPACE (("" "/")) NIL NIL`) + c.ok(tag, cmd) +} + +// The status command returns information about a mailbox, such as the number of +// messages, "uid validity", etc. Nowadays, the extended LIST command can return +// the same information about many mailboxes for one command. +// +// State: Authenticated and selected. +func (c *conn) cmdStatus(tag, cmd string, p *parser) { + // Command: ../rfc/9051:3328 ../rfc/3501:2424 + // Examples: ../rfc/9051:3400 ../rfc/3501:2501 + + // Request syntax: ../rfc/9051:7053 ../rfc/3501:5036 + p.xspace() + name := p.xmailbox() + p.xspace() + p.xtake("(") + attrs := []string{p.xstatusAtt()} + for !p.take(")") { + p.xspace() + attrs = append(attrs, p.xstatusAtt()) + } + p.xempty() + + name = xcheckmailboxname(name, true) + + var mb store.Mailbox + + var responseLine string + c.account.WithRLock(func() { + c.xdbread(func(tx *bstore.Tx) { + mb = c.xmailbox(tx, name, "") + responseLine = c.xstatusLine(tx, mb, attrs) + }) + }) + + c.bwritelinef("%s", responseLine) + c.ok(tag, cmd) +} + +// Response syntax: ../rfc/9051:6681 ../rfc/9051:7070 ../rfc/9051:7059 ../rfc/3501:4834 +func (c *conn) xstatusLine(tx *bstore.Tx, mb store.Mailbox, attrs []string) string { + var count, unseen, deleted int + var size int64 + + q := bstore.QueryTx[store.Message](tx) + q.FilterNonzero(store.Message{MailboxID: mb.ID}) + err := q.ForEach(func(m store.Message) error { + count++ + if !m.Seen { + unseen++ + } + if m.Deleted { + deleted++ + } + size += m.Size + return nil + }) + xcheckf(err, "processing mailbox messages") + + status := []string{} + for _, a := range attrs { + A := strings.ToUpper(a) + switch A { + case "MESSAGES": + status = append(status, A, fmt.Sprintf("%d", count)) + case "UIDNEXT": + status = append(status, A, fmt.Sprintf("%d", mb.UIDNext)) + case "UIDVALIDITY": + status = append(status, A, fmt.Sprintf("%d", mb.UIDValidity)) + case "UNSEEN": + status = append(status, A, fmt.Sprintf("%d", unseen)) + case "DELETED": + status = append(status, A, fmt.Sprintf("%d", deleted)) + case "SIZE": + status = append(status, A, fmt.Sprintf("%d", size)) + case "RECENT": + status = append(status, A, "0") + case "APPENDLIMIT": + // ../rfc/7889:255 + status = append(status, A, "NIL") + default: + xsyntaxErrorf("unknown attribute %q", a) + } + } + return fmt.Sprintf("* STATUS %s (%s)", astring(mb.Name).pack(c), strings.Join(status, " ")) +} + +func xparseStoreFlags(l []string, syntax bool) (flags store.Flags) { + fields := map[string]*bool{ + `\answered`: &flags.Answered, + `\flagged`: &flags.Flagged, + `\deleted`: &flags.Deleted, + `\seen`: &flags.Seen, + `\draft`: &flags.Draft, + `$junk`: &flags.Junk, + `$notjunk`: &flags.Notjunk, + `$forwarded`: &flags.Forwarded, + `$phishing`: &flags.Phishing, + `$mdnsent`: &flags.MDNSent, + } + for _, f := range l { + if field, ok := fields[strings.ToLower(f)]; !ok { + if syntax { + xsyntaxErrorf("unknown flag %q", f) + } + xuserErrorf("unknown flag %q", f) + } else { + *field = true + } + } + return +} + +func flaglist(fl store.Flags) listspace { + l := listspace{} + flag := func(v bool, s string) { + if v { + l = append(l, bare(s)) + } + } + flag(fl.Seen, `\Seen`) + flag(fl.Answered, `\Answered`) + flag(fl.Flagged, `\Flagged`) + flag(fl.Deleted, `\Deleted`) + flag(fl.Draft, `\Draft`) + flag(fl.Forwarded, `$Forwarded`) + flag(fl.Junk, `$Junk`) + flag(fl.Notjunk, `$NotJunk`) + flag(fl.Phishing, `$Phishing`) + flag(fl.MDNSent, `$MDNSent`) + return l +} + +// Append adds a message to a mailbox. +// +// State: Authenticated and selected. +func (c *conn) cmdAppend(tag, cmd string, p *parser) { + // Command: ../rfc/9051:3406 ../rfc/6855:204 ../rfc/3501:2527 + // Examples: ../rfc/9051:3482 ../rfc/3501:2589 + + // Request syntax: ../rfc/9051:6325 ../rfc/6855:219 ../rfc/3501:4547 + p.xspace() + name := p.xmailbox() + p.xspace() + var storeFlags store.Flags + if p.hasPrefix("(") { + // Error must be a syntax error, to properly abort the connection due to literal. + storeFlags = xparseStoreFlags(p.xflagList(), true) + p.xspace() + } + var tm time.Time + if p.hasPrefix(`"`) { + tm = p.xdateTime() + p.xspace() + } else { + tm = time.Now() + } + // todo: only with utf8 should we we accept message headers with utf-8. we currently always accept them. + // todo: this is only relevant if we also support the CATENATE extension? + // ../rfc/6855:204 + utf8 := p.take("UTF8 (") + size, sync := p.xliteralSize(0, utf8) + + name = xcheckmailboxname(name, true) + c.xdbread(func(tx *bstore.Tx) { + c.xmailbox(tx, name, "TRYCREATE") + }) + if sync { + c.writelinef("+") + } + + // Read the message into a temporary file. + msgFile, err := store.CreateMessageTemp("imap-append") + xcheckf(err, "creating temp file for message") + defer func() { + if msgFile != nil { + err := os.Remove(msgFile.Name()) + c.xsanity(err, "removing APPEND temporary file") + err = msgFile.Close() + c.xsanity(err, "closing APPEND temporary file") + } + }() + mw := &message.Writer{Writer: msgFile} + msize, err := io.Copy(mw, io.LimitReader(c.br, size)) + if err != nil { + // Cannot use xcheckf due to %w handling of errIO. + panic(fmt.Errorf("reading literal message: %s (%w)", err, errIO)) + } + if msize != size { + xserverErrorf("read %d bytes for message, expected %d (%w)", msize, size, errIO) + } + msgPrefix := []byte{} + // todo: should we treat the message as body? i believe headers are required in messages, and bodies are optional. so would make more sense to treat the data as headers. perhaps only if the headers are valid? + if !mw.HaveHeaders { + msgPrefix = []byte("\r\n") + } + + if utf8 { + line := c.readline(false) + np := newParser(line, c) + np.xtake(")") + np.xempty() + } else { + line := c.readline(false) + np := newParser(line, c) + np.xempty() + } + p.xempty() + if !sync { + name = xcheckmailboxname(name, true) + } + + var mb store.Mailbox + var msg store.Message + var pendingChanges []store.Change + + c.account.WithWLock(func() { + c.xdbwrite(func(tx *bstore.Tx) { + mb = c.xmailbox(tx, name, "TRYCREATE") + msg = store.Message{ + MailboxID: mb.ID, + MailboxOrigID: mb.ID, + Received: tm, + Flags: storeFlags, + Size: size, + MsgPrefix: msgPrefix, + } + isSent := name == "Sent" + c.account.DeliverX(c.log, tx, &msg, msgFile, true, isSent, true, true) + }) + + // Fetch pending changes, possibly with new UIDs, so we can apply them before adding our own new UID. + if c.comm != nil { + pendingChanges = c.comm.Get() + } + + // Broadcast the change to other connections. + c.broadcast([]store.Change{store.ChangeAddUID{MailboxID: mb.ID, UID: msg.UID, Flags: msg.Flags}}) + }) + + msgFile.Close() + msgFile = nil + + if c.mailboxID == mb.ID { + c.applyChanges(pendingChanges, false) + c.uidAppend(msg.UID) + c.bwritelinef("* %d EXISTS", len(c.uids)) + } + + c.writeresultf("%s OK [APPENDUID %d %d] appended", tag, mb.UIDValidity, msg.UID) +} + +// Idle makes a client wait until the server sends untagged updates, e.g. about +// message delivery or mailbox create/rename/delete/subscription, etc. It allows a +// client to get updates in real-time, not needing the use for NOOP. +// +// State: Authenticated and selected. +func (c *conn) cmdIdle(tag, cmd string, p *parser) { + // Command: ../rfc/9051:3542 ../rfc/2177:49 + // Example: ../rfc/9051:3589 ../rfc/2177:119 + + // Request syntax: ../rfc/9051:6594 ../rfc/2177:163 + p.xempty() + + c.writelinef("+ waiting") + + var line string +wait: + for { + select { + case le := <-c.lineChan(): + c.line = nil + xcheckf(le.err, "get line") + line = le.line + break wait + case changes := <-c.comm.Changes: + c.applyChanges(changes, false) + c.xflush() + case <-mox.Shutdown: + // ../rfc/9051:5375 + c.writelinef("* BYE shutting down") + panic(errIO) + } + } + + // Reset the write deadline. In case of little activity, with a command timeout of + // 30 minutes, we have likely passed it. + c.conn.SetWriteDeadline(time.Now().Add(5 * time.Minute)) + + if strings.ToUpper(line) != "DONE" { + // We just close the connection because our protocols are out of sync. + panic(fmt.Errorf("%w: in IDLE, expected DONE", errIO)) + } + + c.ok(tag, cmd) +} + +// Check is an old deprecated command that is supposed to execute some mailbox consistency checks. +// +// State: Selected +func (c *conn) cmdCheck(tag, cmd string, p *parser) { + // Command: ../rfc/3501:2618 + + // Request syntax: ../rfc/3501:4679 + p.xempty() + + c.account.WithRLock(func() { + c.xdbread(func(tx *bstore.Tx) { + c.xmailboxID(tx, c.mailboxID) // Validate. + }) + }) + + c.ok(tag, cmd) +} + +// Close undoes select/examine, closing the currently opened mailbox and deleting +// messages that were marked for deletion with the \Deleted flag. +// +// State: Selected +func (c *conn) cmdClose(tag, cmd string, p *parser) { + // Command: ../rfc/9051:3636 ../rfc/3501:2652 + + // Request syntax: ../rfc/9051:6476 ../rfc/3501:4679 + p.xempty() + + if c.readonly { + c.unselect() + c.ok(tag, cmd) + return + } + + remove := c.xexpunge(nil, true) + + defer func() { + for _, m := range remove { + p := c.account.MessagePath(m.ID) + err := os.Remove(p) + c.xsanity(err, "removing message file for expunge for close") + } + }() + + c.unselect() + c.ok(tag, cmd) +} + +// expunge messages marked for deletion in currently selected/active mailbox. +// if uidSet is not nil, only messages matching the set are deleted. +// messages that have been deleted from the database returned, but the corresponding files still have to be removed. +func (c *conn) xexpunge(uidSet *numSet, missingMailboxOK bool) []store.Message { + var remove []store.Message + + c.account.WithWLock(func() { + c.xdbwrite(func(tx *bstore.Tx) { + mb := store.Mailbox{ID: c.mailboxID} + err := tx.Get(&mb) + if err == bstore.ErrAbsent { + if missingMailboxOK { + return + } + xuserErrorf("%w", store.ErrUnknownMailbox) + } + + qm := bstore.QueryTx[store.Message](tx) + qm.FilterNonzero(store.Message{MailboxID: c.mailboxID}) + qm.FilterEqual("Deleted", true) + qm.FilterFn(func(m store.Message) bool { + // Only remove if this session knows about the message and if present in optional uidSet. + return uidSearch(c.uids, m.UID) > 0 && (uidSet == nil || uidSet.containsUID(m.UID, c.uids, c.searchResult)) + }) + qm.SortAsc("UID") + remove, err = qm.List() + xcheckf(err, "listing messages to delete") + + if len(remove) == 0 { + return + } + + removeIDs := make([]int64, len(remove)) + anyIDs := make([]any, len(remove)) + for i, m := range remove { + removeIDs[i] = m.ID + anyIDs[i] = m.ID + } + qmr := bstore.QueryTx[store.Recipient](tx) + qmr.FilterEqual("MessageID", anyIDs...) + _, err = qmr.Delete() + xcheckf(err, "removing message recipients") + + qm = bstore.QueryTx[store.Message](tx) + qm.FilterIDs(removeIDs) + _, err = qm.Delete() + xcheckf(err, "removing messages marked for deletion") + + conf, _ := c.account.Conf() + if mb.Name != conf.RejectsMailbox { + err = c.account.Untrain(c.log, remove) + xcheckf(err, "untraining deleted messages") + } + }) + + // Broadcast changes to other connections. We may not have actually removed any + // messages, so take care not to send an empty update. + if len(remove) > 0 { + ouids := make([]store.UID, len(remove)) + for i, m := range remove { + ouids[i] = m.UID + } + changes := []store.Change{store.ChangeRemoveUIDs{MailboxID: c.mailboxID, UIDs: ouids}} + c.broadcast(changes) + } + }) + return remove +} + +// Unselect is similar to close in that it closes the currently active mailbox, but +// it does not remove messages marked for deletion. +// +// State: Selected +func (c *conn) cmdUnselect(tag, cmd string, p *parser) { + // Command: ../rfc/9051:3667 ../rfc/3691:89 + + // Request syntax: ../rfc/9051:6476 ../rfc/3691:135 + p.xempty() + + c.unselect() + c.ok(tag, cmd) +} + +// Expunge deletes messages marked with \Deleted in the currently selected mailbox. +// Clients are wiser to use UID EXPUNGE because it allows a UID sequence set to +// explicitly opt in to removing specific messages. +// +// State: Selected +func (c *conn) cmdExpunge(tag, cmd string, p *parser) { + // Command: ../rfc/9051:3687 ../rfc/3501:2695 + + // Request syntax: ../rfc/9051:6476 ../rfc/3501:4679 + p.xempty() + + if c.readonly { + xuserErrorf("mailbox open in read-only mode") + } + + c.cmdxExpunge(tag, cmd, nil) +} + +// UID expunge deletes messages marked with \Deleted in the currently selected +// mailbox if they match a UID sequence set. +// +// State: Selected +func (c *conn) cmdUIDExpunge(tag, cmd string, p *parser) { + // Command: ../rfc/9051:4775 ../rfc/4315:75 + + // Request syntax: ../rfc/9051:7125 ../rfc/9051:7129 ../rfc/4315:298 + p.xspace() + uidSet := p.xnumSet() + p.xempty() + + if c.readonly { + xuserErrorf("mailbox open in read-only mode") + } + + c.cmdxExpunge(tag, cmd, &uidSet) +} + +// Permanently delete messages for the currently selected/active mailbox. If uidset +// is not nil, only those UIDs are removed. +// State: Selected +func (c *conn) cmdxExpunge(tag, cmd string, uidSet *numSet) { + // Command: ../rfc/9051:3687 ../rfc/3501:2695 + + remove := c.xexpunge(uidSet, false) + + defer func() { + for _, m := range remove { + p := c.account.MessagePath(m.ID) + err := os.Remove(p) + c.xsanity(err, "removing message file for expunge") + } + }() + + // Response syntax: ../rfc/9051:6742 ../rfc/3501:4864 + for _, m := range remove { + seq := c.xsequence(m.UID) + c.sequenceRemove(seq, m.UID) + c.bwritelinef("* %d EXPUNGE", seq) + } + + c.ok(tag, cmd) +} + +// State: Selected +func (c *conn) cmdSearch(tag, cmd string, p *parser) { + c.cmdxSearch(false, tag, cmd, p) +} + +// State: Selected +func (c *conn) cmdUIDSearch(tag, cmd string, p *parser) { + c.cmdxSearch(true, tag, cmd, p) +} + +// State: Selected +func (c *conn) cmdFetch(tag, cmd string, p *parser) { + c.cmdxFetch(false, tag, cmd, p) +} + +// State: Selected +func (c *conn) cmdUIDFetch(tag, cmd string, p *parser) { + c.cmdxFetch(true, tag, cmd, p) +} + +// State: Selected +func (c *conn) cmdStore(tag, cmd string, p *parser) { + c.cmdxStore(false, tag, cmd, p) +} + +// State: Selected +func (c *conn) cmdUIDStore(tag, cmd string, p *parser) { + c.cmdxStore(true, tag, cmd, p) +} + +// State: Selected +func (c *conn) cmdCopy(tag, cmd string, p *parser) { + c.cmdxCopy(false, tag, cmd, p) +} + +// State: Selected +func (c *conn) cmdUIDCopy(tag, cmd string, p *parser) { + c.cmdxCopy(true, tag, cmd, p) +} + +// State: Selected +func (c *conn) cmdMove(tag, cmd string, p *parser) { + c.cmdxMove(false, tag, cmd, p) +} + +// State: Selected +func (c *conn) cmdUIDMove(tag, cmd string, p *parser) { + c.cmdxMove(true, tag, cmd, p) +} + +func (c *conn) gatherCopyMoveUIDs(isUID bool, nums numSet) ([]store.UID, []any) { + // Gather uids, then sort so we can return a consistently simple and hard to + // misinterpret COPYUID/MOVEUID response. It seems safer to have UIDs in ascending + // order, because requested uid set of 12:10 is equal to 10:12, so if we would just + // echo whatever the client sends us without reordering, the client can reorder our + // response and interpret it differently than we intended. + // ../rfc/9051:5072 + uids := c.xnumSetUIDs(isUID, nums) + sort.Slice(uids, func(i, j int) bool { + return uids[i] < uids[j] + }) + uidargs := make([]any, len(uids)) + for i, uid := range uids { + uidargs[i] = uid + } + return uids, uidargs +} + +// Copy copies messages from the currently selected/active mailbox to another named +// mailbox. +// +// State: Selected +func (c *conn) cmdxCopy(isUID bool, tag, cmd string, p *parser) { + // Command: ../rfc/9051:4602 ../rfc/3501:3288 + + // Request syntax: ../rfc/9051:6482 ../rfc/3501:4685 + p.xspace() + nums := p.xnumSet() + p.xspace() + name := p.xmailbox() + p.xempty() + + name = xcheckmailboxname(name, true) + + uids, uidargs := c.gatherCopyMoveUIDs(isUID, nums) + + // Files that were created during the copy. Remove them if the operation fails. + var createdIDs []int64 + defer func() { + x := recover() + if x == nil { + return + } + for _, id := range createdIDs { + p := c.account.MessagePath(id) + err := os.Remove(p) + c.xsanity(err, "cleaning up created file") + } + panic(x) + }() + + var mbDst store.Mailbox + var origUIDs, newUIDs []store.UID + var flags []store.Flags + + c.account.WithWLock(func() { + c.xdbwrite(func(tx *bstore.Tx) { + mbSrc := c.xmailboxID(tx, c.mailboxID) // Validate. + mbDst = c.xmailbox(tx, name, "TRYCREATE") + if mbDst.ID == mbSrc.ID { + xuserErrorf("cannot copy to currently selected mailbox") + } + + if len(uidargs) == 0 { + xuserErrorf("no matching messages to copy") + } + + // Reserve the uids in the destination mailbox. + uidFirst := mbDst.UIDNext + mbDst.UIDNext += store.UID(len(uidargs)) + err := tx.Update(&mbDst) + xcheckf(err, "reserve uid in destination mailbox") + + // Fetch messages from database. + q := bstore.QueryTx[store.Message](tx) + q.FilterNonzero(store.Message{MailboxID: c.mailboxID}) + q.FilterEqual("UID", uidargs...) + xmsgs, err := q.List() + xcheckf(err, "fetching messages") + + if len(xmsgs) != len(uidargs) { + xserverErrorf("uid and message mismatch") + } + + msgs := map[store.UID]store.Message{} + for _, m := range xmsgs { + msgs[m.UID] = m + } + + // Insert new messages into database. + var origMsgIDs, newMsgIDs []int64 + for i, uid := range uids { + m, ok := msgs[uid] + if !ok { + xuserErrorf("messages changed, could not fetch requested uid") + } + origID := m.ID + origMsgIDs = append(origMsgIDs, origID) + m.ID = 0 + m.UID = uidFirst + store.UID(i) + m.MailboxID = mbDst.ID + m.MailboxOrigID = mbSrc.ID + err := tx.Insert(&m) + xcheckf(err, "inserting message") + msgs[uid] = m + origUIDs = append(origUIDs, uid) + newUIDs = append(newUIDs, m.UID) + newMsgIDs = append(newMsgIDs, m.ID) + flags = append(flags, m.Flags) + + qmr := bstore.QueryTx[store.Recipient](tx) + qmr.FilterNonzero(store.Recipient{MessageID: origID}) + mrs, err := qmr.List() + xcheckf(err, "listing message recipients") + for _, mr := range mrs { + mr.ID = 0 + mr.MessageID = m.ID + err := tx.Insert(&mr) + xcheckf(err, "inserting message recipient") + } + } + + // Copy message files to new message ID's. + for i := range origMsgIDs { + src := c.account.MessagePath(origMsgIDs[i]) + dst := c.account.MessagePath(newMsgIDs[i]) + os.MkdirAll(filepath.Dir(dst), 0770) // todo optimization: keep track of dirs we already created, don't create them again + err := c.linkOrCopyFile(dst, src) + xcheckf(err, "link or copy file %q to %q", src, dst) + createdIDs = append(createdIDs, newMsgIDs[i]) + } + + conf, _ := c.account.Conf() + if mbDst.Name != conf.RejectsMailbox { + err = c.account.Train(c.log, xmsgs) + xcheckf(err, "train copied messages") + } + }) + + // Broadcast changes to other connections. + if len(newUIDs) > 0 { + changes := make([]store.Change, len(newUIDs)) + for i, uid := range newUIDs { + changes[i] = store.ChangeAddUID{MailboxID: mbDst.ID, UID: uid, Flags: flags[i]} + } + c.broadcast(changes) + } + }) + + // All good, prevent defer above from cleaning up copied files. + createdIDs = nil + + // ../rfc/9051:6881 ../rfc/4315:183 + c.writeresultf("%s OK [COPYUID %d %s %s] copied", tag, mbDst.UIDValidity, compactUIDSet(origUIDs).String(), compactUIDSet(newUIDs).String()) +} + +func (c *conn) linkOrCopyFile(dst, src string) error { + // Try hardlink first. + if err := os.Link(src, dst); err == nil { + return nil + } + + // File system may not support hardlinks, or link would cross file systems. Do a regular file copy. + sf, err := os.Open(src) + if err != nil { + return err + } + defer func() { + err := sf.Close() + c.xsanity(err, "closing copied src file") + }() + + df, err := os.OpenFile(dst, os.O_CREATE|os.O_EXCL|os.O_WRONLY, 0660) + if err != nil { + return err + } + defer func() { + if df != nil { + err = os.Remove(df.Name()) + c.xsanity(err, "removing unfinished dst file") + df.Close() + c.xsanity(err, "closing unfinished dst file") + } + }() + + if _, err := io.Copy(df, sf); err != nil { + return err + } + if err := df.Close(); err != nil { + xerr := os.Remove(df.Name()) + c.xsanity(xerr, "removing unfinished dst file") + df = nil + return err + } + // todo: may need to do a file/dir sync to flush to disk. better to do it once after multiple linkOrCopyFile calls. + df = nil + return nil +} + +// Move moves messages from the currently selected/active mailbox to a named mailbox. +// +// State: Selected +func (c *conn) cmdxMove(isUID bool, tag, cmd string, p *parser) { + // Command: ../rfc/9051:4650 ../rfc/6851:119 + + // Request syntax: ../rfc/6851:320 ../rfc/9051:6744 + p.xspace() + nums := p.xnumSet() + p.xspace() + name := p.xmailbox() + p.xempty() + + name = xcheckmailboxname(name, true) + + if c.readonly { + xuserErrorf("mailbox open in read-only mode") + } + + uids, uidargs := c.gatherCopyMoveUIDs(isUID, nums) + + var mbDst store.Mailbox + var changes []store.Change + var newUIDs []store.UID + + c.account.WithWLock(func() { + c.xdbwrite(func(tx *bstore.Tx) { + c.xmailboxID(tx, c.mailboxID) // Validate. + mbDst = c.xmailbox(tx, name, "TRYCREATE") + if mbDst.ID == c.mailboxID { + xuserErrorf("cannot move to currently selected mailbox") + } + + if len(uidargs) == 0 { + xuserErrorf("no matching messages to move") + } + + // Reserve the uids in the destination mailbox. + uidFirst := mbDst.UIDNext + uidnext := uidFirst + mbDst.UIDNext += store.UID(len(uids)) + err := tx.Update(&mbDst) + xcheckf(err, "reserve uids in destination mailbox") + + // Update UID and MailboxID in database for messages. + q := bstore.QueryTx[store.Message](tx) + q.FilterNonzero(store.Message{MailboxID: c.mailboxID}) + q.FilterEqual("UID", uidargs...) + q.SortAsc("UID") + msgs, err := q.List() + xcheckf(err, "listing messages to move") + + if len(msgs) != len(uidargs) { + xserverErrorf("uid and message mismatch") + } + + for i := range msgs { + m := &msgs[i] + if m.UID != uids[i] { + xserverErrorf("internal error: got uid %d, expected %d, for index %d", m.UID, uids[i], i) + } + m.MailboxID = mbDst.ID + m.UID = uidnext + uidnext++ + err := tx.Update(m) + xcheckf(err, "updating moved message in database") + } + + // Prepare broadcast changes to other connections. + changes = make([]store.Change, 0, 1+len(msgs)) + changes = append(changes, store.ChangeRemoveUIDs{MailboxID: c.mailboxID, UIDs: uids}) + for _, m := range msgs { + newUIDs = append(newUIDs, m.UID) + changes = append(changes, store.ChangeAddUID{MailboxID: mbDst.ID, UID: m.UID, Flags: m.Flags}) + } + }) + + c.broadcast(changes) + }) + + // ../rfc/9051:4708 ../rfc/6851:254 + // ../rfc/9051:4713 + c.bwritelinef("* OK [COPYUID %d %s %s] moved", mbDst.UIDValidity, compactUIDSet(uids).String(), compactUIDSet(newUIDs).String()) + for i := 0; i < len(uids); i++ { + seq := c.xsequence(uids[i]) + c.sequenceRemove(seq, uids[i]) + c.bwritelinef("* %d EXPUNGE", seq) + } + + c.ok(tag, cmd) +} + +// Store sets a full set of flags, or adds/removes specific flags. +// +// State: Selected +func (c *conn) cmdxStore(isUID bool, tag, cmd string, p *parser) { + // Command: ../rfc/9051:4543 ../rfc/3501:3214 + + // Request syntax: ../rfc/9051:7076 ../rfc/3501:5052 + p.xspace() + nums := p.xnumSet() + p.xspace() + var plus, minus bool + if p.take("+") { + plus = true + } else if p.take("-") { + minus = true + } + p.xtake("FLAGS") + silent := p.take(".SILENT") + p.xspace() + var flagstrs []string + if p.hasPrefix("(") { + flagstrs = p.xflagList() + } else { + flagstrs = append(flagstrs, p.xflag()) + for p.space() { + flagstrs = append(flagstrs, p.xflag()) + } + } + p.xempty() + + if c.readonly { + xuserErrorf("mailbox open in read-only mode") + } + + var mask, flags store.Flags + if plus { + mask = xparseStoreFlags(flagstrs, false) + flags = store.FlagsAll + } else if minus { + mask = xparseStoreFlags(flagstrs, false) + flags = store.Flags{} + } else { + mask = store.FlagsAll + flags = xparseStoreFlags(flagstrs, false) + } + + updates := store.FlagsQuerySet(mask, flags) + + var updated []store.Message + var oflags []store.Flags + + c.account.WithWLock(func() { + var mb store.Mailbox + c.xdbwrite(func(tx *bstore.Tx) { + mb = c.xmailboxID(tx, c.mailboxID) // Validate. + + uidargs := c.xnumSetCondition(isUID, nums) + + if len(uidargs) == 0 { + return + } + + q := bstore.QueryTx[store.Message](tx) + q.FilterNonzero(store.Message{MailboxID: c.mailboxID}) + q.FilterEqual("UID", uidargs...) + q.FilterFn(func(m store.Message) bool { + // We use this filter just to get the pre-update flags... + oflags = append(oflags, m.Flags) + return true + }) + if len(updates) == 0 { + var err error + updated, err = q.List() + xcheckf(err, "listing for flags") + } else { + q.Gather(&updated) + _, err := q.UpdateFields(updates) + xcheckf(err, "updating flags") + } + }) + + conf, _ := c.account.Conf() + if mb.Name != conf.RejectsMailbox { + jf, _, err := c.account.OpenJunkFilter(c.log) + if err == nil { + defer func() { + if jf != nil { + err := jf.Close() + c.xsanity(err, "closing junkfilter") + } + }() + for i, m := range updated { + err := c.account.Retrain(c.log, jf, oflags[i], m) + xcheckf(err, "retraining message") + } + err = jf.Close() + jf = nil + xcheckf(err, "closing junkfilter") + } else if !errors.Is(err, store.ErrNoJunkFilter) { + xcheckf(err, "open junk filter for retraining") + } + } + + // Broadcast changes to other connections. + changes := make([]store.Change, len(updated)) + for i, m := range updated { + changes[i] = store.ChangeFlags{MailboxID: m.MailboxID, UID: m.UID, Mask: mask, Flags: m.Flags} + } + c.broadcast(changes) + }) + + for _, m := range updated { + if !silent { + // ../rfc/9051:6749 ../rfc/3501:4869 + c.bwritelinef("* %d FETCH (UID %d FLAGS %s)", c.xsequence(m.UID), m.UID, flaglist(m.Flags).pack(c)) + } + } + + c.ok(tag, cmd) +} diff --git a/imapserver/server_test.go b/imapserver/server_test.go new file mode 100644 index 0000000..f3ff1b4 --- /dev/null +++ b/imapserver/server_test.go @@ -0,0 +1,646 @@ +package imapserver + +import ( + "context" + "crypto/ed25519" + cryptorand "crypto/rand" + "crypto/tls" + "crypto/x509" + "fmt" + "math/big" + "net" + "os" + "reflect" + "strings" + "testing" + "time" + + "github.com/mjl-/mox/imapclient" + "github.com/mjl-/mox/mox-" + "github.com/mjl-/mox/moxvar" + "github.com/mjl-/mox/store" +) + +func init() { + sanityChecks = true +} + +func tocrlf(s string) string { + return strings.ReplaceAll(s, "\n", "\r\n") +} + +// From ../rfc/3501:2589 +var exampleMsg = tocrlf(`Date: Mon, 7 Feb 1994 21:52:25 -0800 (PST) +From: Fred Foobar +Subject: afternoon meeting +To: mooch@owatagu.siam.edu.example +Message-Id: +MIME-Version: 1.0 +Content-Type: TEXT/PLAIN; CHARSET=US-ASCII + +Hello Joe, do you think we can meet at 3:30 tomorrow? + +`) + +/* +From ../rfc/2049:801 + +Message structure: + +Message - multipart/mixed +Part 1 - no content-type +Part 2 - text/plain +Part 3 - multipart/parallel +Part 3.1 - audio/basic (base64) +Part 3.2 - image/jpeg (base64, empty) +Part 4 - text/enriched +Part 5 - message/rfc822 +Part 5.1 - text/plain (quoted-printable) +*/ +var nestedMessage = tocrlf(`MIME-Version: 1.0 +From: Nathaniel Borenstein +To: Ned Freed +Date: Fri, 07 Oct 1994 16:15:05 -0700 (PDT) +Subject: A multipart example +Content-Type: multipart/mixed; + boundary=unique-boundary-1 + +This is the preamble area of a multipart message. +Mail readers that understand multipart format +should ignore this preamble. + +If you are reading this text, you might want to +consider changing to a mail reader that understands +how to properly display multipart messages. + +--unique-boundary-1 + + ... Some text appears here ... + +[Note that the blank between the boundary and the start + of the text in this part means no header fields were + given and this is text in the US-ASCII character set. + It could have been done with explicit typing as in the + next part.] + +--unique-boundary-1 +Content-type: text/plain; charset=US-ASCII + +This could have been part of the previous part, but +illustrates explicit versus implicit typing of body +parts. + +--unique-boundary-1 +Content-Type: multipart/parallel; boundary=unique-boundary-2 + +--unique-boundary-2 +Content-Type: audio/basic +Content-Transfer-Encoding: base64 + +aGVsbG8NCndvcmxkDQo= + +--unique-boundary-2 +Content-Type: image/jpeg +Content-Transfer-Encoding: base64 + + +--unique-boundary-2-- + +--unique-boundary-1 +Content-type: text/enriched + +This is enriched. +as defined in RFC 1896 + +Isn't it +cool? + +--unique-boundary-1 +Content-Type: message/rfc822 + +From: info@mox.example +To: mox +Subject: (subject in US-ASCII) +Content-Type: Text/plain; charset=ISO-8859-1 +Content-Transfer-Encoding: Quoted-printable + + ... Additional text in ISO-8859-1 goes here ... + +--unique-boundary-1-- +`) + +func tcheck(t *testing.T, err error, msg string) { + t.Helper() + if err != nil { + t.Fatalf("%s: %s", msg, err) + } +} + +func mockUIDValidity() func() { + orig := store.InitialUIDValidity + store.InitialUIDValidity = func() uint32 { + return 1 + } + return func() { + store.InitialUIDValidity = orig + } +} + +type testconn struct { + t *testing.T + conn net.Conn + client *imapclient.Conn + done chan struct{} + serverConn net.Conn + + // Result of last command. + lastUntagged []imapclient.Untagged + lastResult imapclient.Result + lastErr error +} + +func (tc *testconn) check(err error, msg string) { + tc.t.Helper() + if err != nil { + tc.t.Fatalf("%s: %s", msg, err) + } +} + +func (tc *testconn) last(l []imapclient.Untagged, r imapclient.Result, err error) { + tc.lastUntagged = l + tc.lastResult = r + tc.lastErr = err +} + +func (tc *testconn) xcode(s string) { + tc.t.Helper() + if tc.lastResult.Code != s { + tc.t.Fatalf("got last code %q, expected %q", tc.lastResult.Code, s) + } +} + +func (tc *testconn) xcodeArg(v any) { + tc.t.Helper() + if !reflect.DeepEqual(tc.lastResult.CodeArg, v) { + tc.t.Fatalf("got last code argument %v, expected %v", tc.lastResult.CodeArg, v) + } +} + +func (tc *testconn) xuntagged(exps ...any) { + tc.t.Helper() + last := append([]imapclient.Untagged{}, tc.lastUntagged...) +next: + for ei, exp := range exps { + for i, l := range last { + if reflect.TypeOf(l) != reflect.TypeOf(exp) { + continue + } + if !reflect.DeepEqual(l, exp) { + tc.t.Fatalf("untagged data mismatch, got:\n\t%T %#v\nexpected:\n\t%T %#v", l, l, exp, exp) + } + copy(last[i:], last[i+1:]) + last = last[:len(last)-1] + continue next + } + var next string + if len(tc.lastUntagged) > 0 { + next = fmt.Sprintf(", next %#v", tc.lastUntagged[0]) + } + tc.t.Fatalf("did not find untagged response %#v %T (%d) in %v%s", exp, exp, ei, tc.lastUntagged, next) + } + if len(last) > 0 { + tc.t.Fatalf("leftover untagged responses %v", last) + } +} + +func tuntagged(t *testing.T, got imapclient.Untagged, dst any) { + t.Helper() + gotv := reflect.ValueOf(got) + dstv := reflect.ValueOf(dst) + if gotv.Type() != dstv.Type().Elem() { + t.Fatalf("got %v, expected %v", gotv.Type(), dstv.Type().Elem()) + } + dstv.Elem().Set(gotv) +} + +func (tc *testconn) xnountagged() { + tc.t.Helper() + if len(tc.lastUntagged) != 0 { + tc.t.Fatalf("got %v untagged, expected 0", tc.lastUntagged) + } +} + +func (tc *testconn) transactf(status, format string, args ...any) { + tc.t.Helper() + tc.cmdf("", format, args...) + tc.response(status) +} + +func (tc *testconn) response(status string) { + tc.t.Helper() + tc.lastUntagged, tc.lastResult, tc.lastErr = tc.client.Response() + tcheck(tc.t, tc.lastErr, "read imap response") + if strings.ToUpper(status) != string(tc.lastResult.Status) { + tc.t.Fatalf("got status %q, expected %q", tc.lastResult.Status, status) + } +} + +func (tc *testconn) cmdf(tag, format string, args ...any) { + tc.t.Helper() + err := tc.client.Commandf(tag, format, args...) + tcheck(tc.t, err, "writing imap command") +} + +func (tc *testconn) readstatus(status string) { + tc.t.Helper() + tc.response(status) +} + +func (tc *testconn) readprefixline(pre string) { + tc.t.Helper() + line, err := tc.client.Readline() + tcheck(tc.t, err, "read line") + if !strings.HasPrefix(line, pre) { + tc.t.Fatalf("expected prefix %q, got %q", pre, line) + } +} + +func (tc *testconn) writelinef(format string, args ...any) { + tc.t.Helper() + err := tc.client.Writelinef(format, args...) + tcheck(tc.t, err, "write line") +} + +// wait at most 1 second for server to quit. +func (tc *testconn) waitDone() { + tc.t.Helper() + t := time.NewTimer(time.Second) + select { + case <-tc.done: + t.Stop() + case <-t.C: + tc.t.Fatalf("server not done within 1s") + } +} + +func (tc *testconn) close() { + tc.client.Close() + tc.serverConn.Close() + tc.waitDone() +} + +var connCounter int64 + +func start(t *testing.T) *testconn { + return startArgs(t, true, false, true) +} + +func startNoSwitchboard(t *testing.T) *testconn { + return startArgs(t, false, false, true) +} + +func startArgs(t *testing.T, first, isTLS, allowLoginWithoutTLS bool) *testconn { + if first { + os.RemoveAll("../testdata/imap/data") + } + mox.Context = context.Background() + mox.ConfigStaticPath = "../testdata/imap/mox.conf" + mox.MustLoadConfig() + acc, err := store.OpenAccount("mjl") + tcheck(t, err, "open account") + if first { + err = acc.SetPassword("testtest") + tcheck(t, err, "set password") + } + err = acc.Close() + tcheck(t, err, "close account") + var switchDone chan struct{} + if first { + switchDone = store.Switchboard() + } else { + switchDone = make(chan struct{}) // Dummy, that can be closed. + } + + serverConn, clientConn := net.Pipe() + + tlsConfig := &tls.Config{ + Certificates: []tls.Certificate{fakeCert(t)}, + } + if isTLS { + serverConn = tls.Server(serverConn, tlsConfig) + clientConn = tls.Client(clientConn, &tls.Config{InsecureSkipVerify: true}) + } + + done := make(chan struct{}) + connCounter++ + cid := connCounter + go func() { + serve("test", cid, tlsConfig, serverConn, isTLS, allowLoginWithoutTLS) + close(switchDone) + close(done) + }() + client, err := imapclient.New(clientConn, true) + tcheck(t, err, "new client") + return &testconn{t: t, conn: clientConn, client: client, done: done, serverConn: serverConn} +} + +func fakeCert(t *testing.T) tls.Certificate { + privKey := ed25519.NewKeyFromSeed(make([]byte, ed25519.SeedSize)) // Fake key, don't use this for real! + template := &x509.Certificate{ + SerialNumber: big.NewInt(1), // Required field... + } + localCertBuf, err := x509.CreateCertificate(cryptorand.Reader, template, template, privKey.Public(), privKey) + if err != nil { + t.Fatalf("making certificate: %s", err) + } + cert, err := x509.ParseCertificate(localCertBuf) + if err != nil { + t.Fatalf("parsing generated certificate: %s", err) + } + c := tls.Certificate{ + Certificate: [][]byte{localCertBuf}, + PrivateKey: privKey, + Leaf: cert, + } + return c +} + +func TestLogin(t *testing.T) { + tc := start(t) + defer tc.close() + + tc.transactf("bad", "login too many args") + tc.transactf("bad", "login") // no args + tc.transactf("no", "login mjl@mox.example badpass") + tc.transactf("no", "login mjl testtest") // must use email, not account + tc.transactf("no", "login mjl@mox.example test") + tc.transactf("no", "login mjl@mox.example testtesttest") + tc.transactf("no", `login "mjl@mox.example" "testtesttest"`) + tc.transactf("no", "login \"m\xf8x@mox.example\" \"testtesttest\"") + tc.transactf("ok", "login mjl@mox.example testtest") + tc.close() + + tc = start(t) + defer tc.close() + tc.transactf("ok", `login "mjl@mox.example" "testtest"`) + + tc.transactf("bad", "logout badarg") + tc.transactf("ok", "logout") +} + +// Test that commands don't work in the states they are not supposed to. +func TestState(t *testing.T) { + tc := start(t) + + tc.transactf("bad", "boguscommand") + + notAuthenticated := []string{"starttls", "authenticate", "login"} + authenticatedOrSelected := []string{"enable", "select", "examine", "create", "delete", "rename", "subscribe", "unsubscribe", "list", "namespace", "status", "append", "idle", "lsub"} + selected := []string{"close", "unselect", "expunge", "search", "fetch", "store", "copy", "move", "uid expunge"} + + // Always allowed. + tc.transactf("ok", "capability") + tc.transactf("ok", "noop") + tc.transactf("ok", "logout") + tc.close() + tc = start(t) + defer tc.close() + + // Not authenticated, lots of commands not allowed. + for _, cmd := range append(append([]string{}, authenticatedOrSelected...), selected...) { + tc.transactf("no", "%s", cmd) + } + + // Some commands not allowed when authenticated. + tc.transactf("ok", "login mjl@mox.example testtest") + for _, cmd := range append(append([]string{}, notAuthenticated...), selected...) { + tc.transactf("no", "%s", cmd) + } +} + +func TestLiterals(t *testing.T) { + tc := start(t) + defer tc.close() + + tc.client.Login("mjl@mox.example", "testtest") + tc.client.Create("tmpbox") + + tc.transactf("ok", "rename {6+}\r\ntmpbox {7+}\r\nntmpbox") + + from := "ntmpbox" + to := "tmpbox" + fmt.Fprint(tc.client, "xtag rename ") + tc.client.WriteSyncLiteral(from) + fmt.Fprint(tc.client, " ") + tc.client.WriteSyncLiteral(to) + fmt.Fprint(tc.client, "\r\n") + tc.client.LastTag = "xtag" + tc.last(tc.client.Response()) + if tc.lastResult.Status != "OK" { + tc.t.Fatalf(`got %q, expected "OK"`, tc.lastResult.Status) + } +} + +// Test longer scenario with login, lists, subscribes, status, selects, etc. +func TestScenario(t *testing.T) { + tc := start(t) + defer tc.close() + tc.transactf("ok", "login mjl@mox.example testtest") + + tc.transactf("bad", " missingcommand") + + tc.transactf("ok", "examine inbox") + tc.transactf("ok", "unselect") + + tc.transactf("ok", "examine inbox") + tc.transactf("ok", "close") + + tc.transactf("ok", "select inbox") + tc.transactf("ok", "close") + + tc.transactf("ok", "select inbox") + tc.transactf("ok", "expunge") + tc.transactf("ok", "check") + + tc.transactf("ok", "subscribe inbox") + tc.transactf("ok", "unsubscribe inbox") + tc.transactf("ok", "subscribe inbox") + + tc.transactf("ok", `lsub "" "*"`) + + tc.transactf("ok", `list "" ""`) + tc.transactf("ok", `namespace`) + + tc.transactf("ok", "enable utf8=accept") + tc.transactf("ok", "enable imap4rev2 utf8=accept") + + tc.transactf("no", "create inbox") + tc.transactf("ok", "create tmpbox") + tc.transactf("ok", "rename tmpbox ntmpbox") + tc.transactf("ok", "delete ntmpbox") + + tc.transactf("ok", "status inbox (uidnext messages uidvalidity deleted size unseen recent)") + + tc.transactf("ok", "append inbox (\\seen) {%d+}\r\n%s", len(exampleMsg), exampleMsg) + tc.transactf("no", "append bogus () {%d}", len(exampleMsg)) + tc.cmdf("", "append inbox () {%d}", len(exampleMsg)) + tc.readprefixline("+") + _, err := tc.conn.Write([]byte(exampleMsg + "\r\n")) + tc.check(err, "write message") + tc.response("ok") + + tc.transactf("ok", "fetch 1 all") + tc.transactf("ok", "fetch 1 body") + tc.transactf("ok", "fetch 1 binary[]") + + tc.transactf("ok", `store 1 flags (\seen \answered)`) + tc.transactf("ok", `store 1 +flags ($junk)`) // should train as junk. + tc.transactf("ok", `store 1 -flags ($junk)`) // should retrain as non-junk. + tc.transactf("ok", `store 1 -flags (\seen)`) // should untrain completely. + tc.transactf("ok", `store 1 -flags (\answered)`) + tc.transactf("ok", `store 1 +flags (\answered)`) + tc.transactf("ok", `store 1 flags.silent (\seen \answered)`) + tc.transactf("ok", `store 1 -flags.silent (\answered)`) + tc.transactf("ok", `store 1 +flags.silent (\answered)`) + tc.transactf("no", `store 1 flags (\badflag)`) + tc.transactf("ok", "noop") + + tc.transactf("ok", "copy 1 Trash") + tc.transactf("ok", "copy 1 Trash") + tc.transactf("ok", "move 1 Trash") + + tc.transactf("ok", "close") + tc.transactf("ok", "select Trash") + tc.transactf("ok", `store 1 flags (\deleted)`) + tc.transactf("ok", "expunge") + tc.transactf("ok", "noop") + + tc.transactf("ok", `store 1 flags (\deleted)`) + tc.transactf("ok", "close") + tc.transactf("ok", "delete Trash") +} + +func TestMailbox(t *testing.T) { + tc := start(t) + defer tc.close() + tc.client.Login("mjl@mox.example", "testtest") + + invalid := []string{ + "e\u0301", // é but as e + acute, not unicode-normalized + "/leadingslash", + "a//b", + "Inbox/", + "\x01", + " ", + "\x7f", + "\x80", + "\u2028", + "\u2029", + } + for _, bad := range invalid { + tc.transactf("no", "select {%d+}\r\n%s", len(bad), bad) + } +} + +func TestMailboxDeleted(t *testing.T) { + tc := start(t) + defer tc.close() + tc.client.Login("mjl@mox.example", "testtest") + + tc2 := startNoSwitchboard(t) + defer tc2.close() + tc2.client.Login("mjl@mox.example", "testtest") + + tc.client.Create("testbox") + tc2.client.Select("testbox") + tc.client.Delete("testbox") + + // Now try to operate on testbox while it has been removed. + tc2.transactf("no", "check") + tc2.transactf("no", "expunge") + tc2.transactf("no", "uid expunge 1") + tc2.transactf("no", "search all") + tc2.transactf("no", "uid search all") + tc2.transactf("no", "fetch 1:* all") + tc2.transactf("no", "uid fetch 1 all") + tc2.transactf("no", "store 1 flags ()") + tc2.transactf("no", "uid store 1 flags ()") + tc2.transactf("bad", "copy 1 inbox") // msgseq 1 not available. + tc2.transactf("no", "uid copy 1 inbox") + tc2.transactf("bad", "move 1 inbox") // msgseq 1 not available. + tc2.transactf("no", "uid move 1 inbox") + + tc2.transactf("ok", "unselect") + + tc.client.Create("testbox") + tc2.client.Select("testbox") + tc.client.Delete("testbox") + tc2.transactf("ok", "close") +} + +func TestID(t *testing.T) { + tc := start(t) + defer tc.close() + tc.client.Login("mjl@mox.example", "testtest") + + tc.transactf("ok", "id nil") + tc.xuntagged(imapclient.UntaggedID{"name": "mox", "version": moxvar.Version}) + + tc.transactf("ok", `id ("name" "mox" "version" "1.2.3" "other" "test" "test" nil)`) + tc.xuntagged(imapclient.UntaggedID{"name": "mox", "version": moxvar.Version}) + + tc.transactf("bad", `id ("name" "mox" "name" "mox")`) // Duplicate field. +} + +func TestSequence(t *testing.T) { + tc := start(t) + defer tc.close() + tc.client.Login("mjl@mox.example", "testtest") + tc.client.Select("inbox") + + tc.transactf("bad", "fetch * all") // ../rfc/9051:7018 + tc.transactf("bad", "fetch 1 all") // ../rfc/9051:7018 + + tc.transactf("ok", "uid fetch 1 all") // non-existing messages are OK for uids. + tc.transactf("ok", "uid fetch * all") // * is like uidnext, a non-existing message. + + tc.client.Append("inbox", nil, nil, []byte(exampleMsg)) + tc.client.Append("inbox", nil, nil, []byte(exampleMsg)) + tc.transactf("ok", "fetch 2:1,1 uid") // We reorder 2:1 to 1:2, but we don't deduplicate numbers. + tc.xuntagged( + imapclient.UntaggedFetch{Seq: 1, Attrs: []imapclient.FetchAttr{imapclient.FetchUID(1)}}, + imapclient.UntaggedFetch{Seq: 2, Attrs: []imapclient.FetchAttr{imapclient.FetchUID(2)}}, + imapclient.UntaggedFetch{Seq: 1, Attrs: []imapclient.FetchAttr{imapclient.FetchUID(1)}}, + ) + + tc.transactf("ok", "uid fetch 3:* uid") // Because * is the last message, which is 2, the range becomes 3:2, which matches the last message. + tc.xuntagged(imapclient.UntaggedFetch{Seq: 2, Attrs: []imapclient.FetchAttr{imapclient.FetchUID(2)}}) +} + +// Test that a message that is expunged by another session can be read as long as a +// reference is held by a session. New sessions do not see the expunged message. +// todo: possibly implement the additional reference counting. so far it hasn't been worth the trouble. +func disabledTestReference(t *testing.T) { + tc := start(t) + defer tc.close() + tc.client.Login("mjl@mox.example", "testtest") + tc.client.Select("inbox") + tc.client.Append("inbox", nil, nil, []byte(exampleMsg)) + + tc2 := startNoSwitchboard(t) + defer tc2.close() + tc2.client.Login("mjl@mox.example", "testtest") + tc2.client.Select("inbox") + + tc.client.StoreFlagsSet("1", true, `\Deleted`) + tc.client.Expunge() + + tc3 := startNoSwitchboard(t) + defer tc3.close() + tc3.client.Login("mjl@mox.example", "testtest") + tc3.transactf("ok", `list "" "inbox" return (status (messages))`) + tc3.xuntagged(imapclient.UntaggedList{Separator: '/', Mailbox: "Inbox"}, imapclient.UntaggedStatus{Mailbox: "Inbox", Attrs: map[string]int64{"MESSAGES": 0}}) + + tc2.transactf("ok", "fetch 1 rfc822.size") + tc.xuntagged(imapclient.UntaggedFetch{Seq: 1, Attrs: []imapclient.FetchAttr{imapclient.FetchRFC822Size(len(exampleMsg))}}) +} diff --git a/imapserver/starttls_test.go b/imapserver/starttls_test.go new file mode 100644 index 0000000..682a549 --- /dev/null +++ b/imapserver/starttls_test.go @@ -0,0 +1,28 @@ +package imapserver + +import ( + "crypto/tls" + "encoding/base64" + "testing" +) + +func TestStarttls(t *testing.T) { + tc := start(t) + tc.client.Starttls(&tls.Config{InsecureSkipVerify: true}) + tc.transactf("bad", "starttls") // TLS already active. + tc.client.Login("mjl@mox.example", "testtest") + tc.close() + + tc = startArgs(t, true, true, false) + tc.transactf("bad", "starttls") // TLS already active. + tc.close() + + tc = startArgs(t, true, false, false) + tc.transactf("no", `login "mjl@mox.example" "testtest"`) + tc.xcode("PRIVACYREQUIRED") + tc.transactf("no", "authenticate PLAIN %s", base64.StdEncoding.EncodeToString([]byte("\u0000mjl@mox.example\u0000testtest"))) + tc.xcode("PRIVACYREQUIRED") + tc.client.Starttls(&tls.Config{InsecureSkipVerify: true}) + tc.client.Login("mjl@mox.example", "testtest") + tc.close() +} diff --git a/imapserver/status_test.go b/imapserver/status_test.go new file mode 100644 index 0000000..1ba4b49 --- /dev/null +++ b/imapserver/status_test.go @@ -0,0 +1,34 @@ +package imapserver + +import ( + "testing" + + "github.com/mjl-/mox/imapclient" +) + +func TestStatus(t *testing.T) { + defer mockUIDValidity()() + tc := start(t) + defer tc.close() + + tc.client.Login("mjl@mox.example", "testtest") + + tc.transactf("bad", "status") // Missing param. + tc.transactf("bad", "status inbox") // Missing param. + tc.transactf("bad", "status inbox ()") // At least one attribute required. + tc.transactf("bad", "status inbox (uidvalidity) ") // Leftover data. + tc.transactf("bad", "status inbox (unknown)") // Unknown attribute. + + tc.transactf("ok", "status inbox (messages uidnext uidvalidity unseen deleted size recent appendlimit)") + tc.xuntagged(imapclient.UntaggedStatus{Mailbox: "Inbox", Attrs: map[string]int64{"MESSAGES": 0, "UIDVALIDITY": 1, "UIDNEXT": 1, "UNSEEN": 0, "DELETED": 0, "SIZE": 0, "RECENT": 0, "APPENDLIMIT": 0}}) + + // Again, now with a message in the mailbox. + tc.transactf("ok", "append inbox {4+}\r\ntest") + tc.transactf("ok", "status inbox (messages uidnext uidvalidity unseen deleted size recent appendlimit)") + tc.xuntagged(imapclient.UntaggedStatus{Mailbox: "Inbox", Attrs: map[string]int64{"MESSAGES": 1, "UIDVALIDITY": 1, "UIDNEXT": 2, "UNSEEN": 1, "DELETED": 0, "SIZE": 4, "RECENT": 0, "APPENDLIMIT": 0}}) + + tc.client.Select("inbox") + tc.client.StoreFlagsSet("1", true, `\Deleted`) + tc.transactf("ok", "status inbox (messages uidnext uidvalidity unseen deleted size recent appendlimit)") + tc.xuntagged(imapclient.UntaggedStatus{Mailbox: "Inbox", Attrs: map[string]int64{"MESSAGES": 1, "UIDVALIDITY": 1, "UIDNEXT": 2, "UNSEEN": 1, "DELETED": 1, "SIZE": 4, "RECENT": 0, "APPENDLIMIT": 0}}) +} diff --git a/imapserver/store_test.go b/imapserver/store_test.go new file mode 100644 index 0000000..9c73815 --- /dev/null +++ b/imapserver/store_test.go @@ -0,0 +1,68 @@ +package imapserver + +import ( + "testing" + + "github.com/mjl-/mox/imapclient" +) + +func TestStore(t *testing.T) { + tc := start(t) + defer tc.close() + + tc.client.Login("mjl@mox.example", "testtest") + tc.client.Enable("imap4rev2") + + tc.client.Append("inbox", nil, nil, []byte(exampleMsg)) + tc.client.Select("inbox") + + uid1 := imapclient.FetchUID(1) + noflags := imapclient.FetchFlags(nil) + + tc.transactf("ok", "store 1 flags.silent ()") + tc.xuntagged() + + tc.transactf("ok", `store 1 flags ()`) + tc.xuntagged(imapclient.UntaggedFetch{Seq: 1, Attrs: []imapclient.FetchAttr{uid1, noflags}}) + tc.transactf("ok", `fetch 1 flags`) + tc.xuntagged(imapclient.UntaggedFetch{Seq: 1, Attrs: []imapclient.FetchAttr{uid1, noflags}}) + + tc.transactf("ok", `store 1 flags.silent (\Seen)`) + tc.xuntagged() + tc.transactf("ok", `fetch 1 flags`) + tc.xuntagged(imapclient.UntaggedFetch{Seq: 1, Attrs: []imapclient.FetchAttr{uid1, imapclient.FetchFlags{`\Seen`}}}) + + tc.transactf("ok", `store 1 flags ($Junk)`) + tc.xuntagged(imapclient.UntaggedFetch{Seq: 1, Attrs: []imapclient.FetchAttr{uid1, imapclient.FetchFlags{`$Junk`}}}) + tc.transactf("ok", `fetch 1 flags`) + tc.xuntagged(imapclient.UntaggedFetch{Seq: 1, Attrs: []imapclient.FetchAttr{uid1, imapclient.FetchFlags{`$Junk`}}}) + + tc.transactf("ok", `store 1 +flags ()`) + tc.xuntagged(imapclient.UntaggedFetch{Seq: 1, Attrs: []imapclient.FetchAttr{uid1, imapclient.FetchFlags{`$Junk`}}}) + tc.transactf("ok", `store 1 +flags (\Deleted)`) + tc.xuntagged(imapclient.UntaggedFetch{Seq: 1, Attrs: []imapclient.FetchAttr{uid1, imapclient.FetchFlags{`\Deleted`, `$Junk`}}}) + tc.transactf("ok", `fetch 1 flags`) + tc.xuntagged(imapclient.UntaggedFetch{Seq: 1, Attrs: []imapclient.FetchAttr{uid1, imapclient.FetchFlags{`\Deleted`, `$Junk`}}}) + + tc.transactf("ok", `store 1 -flags \Deleted $Junk`) + tc.xuntagged(imapclient.UntaggedFetch{Seq: 1, Attrs: []imapclient.FetchAttr{uid1, noflags}}) + tc.transactf("ok", `fetch 1 flags`) + tc.xuntagged(imapclient.UntaggedFetch{Seq: 1, Attrs: []imapclient.FetchAttr{uid1, noflags}}) + + tc.transactf("bad", "store 2 flags ()") // ../rfc/9051:7018 + + tc.transactf("ok", "uid store 1 flags ()") + tc.xuntagged(imapclient.UntaggedFetch{Seq: 1, Attrs: []imapclient.FetchAttr{uid1, noflags}}) + + tc.transactf("bad", "store") // Need numset, flags and args. + tc.transactf("bad", "store 1") // Need flags. + tc.transactf("bad", "store 1 +") // Need flags. + tc.transactf("bad", "store 1 -") // Need flags. + tc.transactf("bad", "store 1 flags ") // Need flags. + tc.transactf("bad", "store 1 flags ") // Need flags. + tc.transactf("bad", "store 1 flags (bogus)") // Unknown flag. + + tc.client.Unselect() + tc.client.Examine("inbox") // Open read-only. + tc.transactf("no", `store 1 flags ()`) // No permission to set flags. +} diff --git a/imapserver/subscribe_test.go b/imapserver/subscribe_test.go new file mode 100644 index 0000000..b12a63c --- /dev/null +++ b/imapserver/subscribe_test.go @@ -0,0 +1,32 @@ +package imapserver + +import ( + "testing" + + "github.com/mjl-/mox/imapclient" +) + +func TestSubscribe(t *testing.T) { + tc := start(t) + defer tc.close() + + tc2 := startNoSwitchboard(t) + defer tc2.close() + + tc.client.Login("mjl@mox.example", "testtest") + tc2.client.Login("mjl@mox.example", "testtest") + + tc.transactf("bad", "subscribe") // Missing param. + tc.transactf("bad", "subscribe ") // Missing param. + tc.transactf("bad", "subscribe fine ") // Leftover data. + + tc.transactf("ok", "subscribe a/b") + tc2.transactf("ok", "noop") + tc2.xuntagged(imapclient.UntaggedList{Flags: []string{`\Subscribed`, `\NonExistent`}, Separator: '/', Mailbox: "a/b"}) + tc.transactf("ok", "subscribe a/b") // Already subscribed, which is fine. + tc2.transactf("ok", "noop") + tc2.xuntagged() // But no new changes. + + tc.transactf("ok", `list (subscribed) "" "a*" return (subscribed)`) + tc.xuntagged(imapclient.UntaggedList{Flags: []string{`\Subscribed`, `\NonExistent`}, Separator: '/', Mailbox: "a/b"}) +} diff --git a/imapserver/unselect_test.go b/imapserver/unselect_test.go new file mode 100644 index 0000000..9ebf4bb --- /dev/null +++ b/imapserver/unselect_test.go @@ -0,0 +1,26 @@ +package imapserver + +import ( + "testing" + + "github.com/mjl-/mox/imapclient" +) + +func TestUnselect(t *testing.T) { + tc := start(t) + defer tc.close() + + tc.client.Login("mjl@mox.example", "testtest") + tc.client.Select("inbox") + + tc.transactf("bad", "unselect bogus") // Leftover data. + tc.transactf("ok", "unselect") + tc.transactf("no", "fetch 1 all") // Invalid when not selected. + + tc.client.Select("inbox") + tc.client.Append("inbox", nil, nil, []byte(exampleMsg)) + tc.client.StoreFlagsAdd("1", true, `\Deleted`) + tc.transactf("ok", "unselect") + tc.transactf("ok", "status inbox (messages)") + tc.xuntagged(imapclient.UntaggedStatus{Mailbox: "Inbox", Attrs: map[string]int64{"MESSAGES": 1}}) // Message not removed. +} diff --git a/imapserver/unsubscribe_test.go b/imapserver/unsubscribe_test.go new file mode 100644 index 0000000..939efba --- /dev/null +++ b/imapserver/unsubscribe_test.go @@ -0,0 +1,23 @@ +package imapserver + +import ( + "testing" +) + +func TestUnsubscribe(t *testing.T) { + tc := start(t) + defer tc.close() + + tc.client.Login("mjl@mox.example", "testtest") + + tc.transactf("bad", "unsubscribe") // Missing param. + tc.transactf("bad", "unsubscribe ") // Missing param. + tc.transactf("bad", "unsubscribe fine ") // Leftover data. + + tc.transactf("no", "unsubscribe a/b") // Does not exist and is not subscribed. + tc.transactf("ok", "create a/b") + tc.transactf("ok", "unsubscribe a/b") + tc.transactf("ok", "unsubscribe a/b") // Can unsubscribe even if it does not exist. + tc.transactf("ok", "subscribe a/b") + tc.transactf("ok", "unsubscribe a/b") +} diff --git a/imapserver/utf7.go b/imapserver/utf7.go new file mode 100644 index 0000000..b8236d9 --- /dev/null +++ b/imapserver/utf7.go @@ -0,0 +1,83 @@ +package imapserver + +import ( + "encoding/base64" + "errors" + "fmt" +) + +const utf7chars = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+," + +var utf7encoding = base64.NewEncoding(utf7chars).WithPadding(base64.NoPadding) + +var ( + errUTF7SuperfluousShift = errors.New("utf7: superfluous unshift+shift") + errUTF7Base64 = errors.New("utf7: bad base64") + errUTF7OddSized = errors.New("utf7: odd-sized data") + errUTF7UnneededShift = errors.New("utf7: unneeded shift") + errUTF7UnfinishedShift = errors.New("utf7: unfinished shift") +) + +func utf7decode(s string) (string, error) { + var r string + var shifted bool + var b string + lastunshift := -2 + + for i, c := range s { + if !shifted { + if c == '&' { + if lastunshift == i-1 { + return "", errUTF7SuperfluousShift + } + shifted = true + } else { + r += string(c) + } + continue + } + + if c != '-' { + b += string(c) + continue + } + + shifted = false + lastunshift = i + if b == "" { + r += "&" + continue + } + buf, err := utf7encoding.DecodeString(b) + if err != nil { + return "", fmt.Errorf("%w: %q: %v", errUTF7Base64, b, err) + } + b = "" + + if len(buf)%2 != 0 { + return "", errUTF7OddSized + } + + x := make([]rune, len(buf)/2) + j := 0 + for i := 0; i < len(buf); i += 2 { + x[j] = rune(buf[i])<<8 | rune(buf[i+1]) + j++ + } + + need := false + for _, c := range x { + if c < 0x20 || c > 0x7e || c == '&' { + need = true + } + r += string(c) + } + if !need { + return "", errUTF7UnneededShift + } + } + if shifted { + return "", errUTF7UnfinishedShift + } + return r, nil +} diff --git a/imapserver/utf7_test.go b/imapserver/utf7_test.go new file mode 100644 index 0000000..a60ea09 --- /dev/null +++ b/imapserver/utf7_test.go @@ -0,0 +1,33 @@ +package imapserver + +import ( + "errors" + "testing" +) + +func TestUTF7(t *testing.T) { + check := func(input string, output string, expErr error) { + t.Helper() + + r, err := utf7decode(input) + if r != output { + t.Fatalf("got %q, expected %q (err %v), for input %q", r, output, err, input) + } + if (expErr == nil) != (err == nil) || err != nil && !errors.Is(err, expErr) { + t.Fatalf("got err %v, expected %v", err, expErr) + } + } + + check("plain", "plain", nil) + check("&Jjo-", "☺", nil) + check("test&Jjo-", "test☺", nil) + check("&Jjo-test&Jjo-", "☺test☺", nil) + check("&Jjo-test", "☺test", nil) + check("&-", "&", nil) + check("&-", "&", nil) + check("&Jjo", "", errUTF7UnfinishedShift) // missing closing - + check("&Jjo-&-", "", errUTF7SuperfluousShift) // shift just after unshift not allowed, should have been a single shift. + check("&AGE-", "", errUTF7UnneededShift) // Just 'a', does not need utf7. + check("&☺-", "", errUTF7Base64) + check("&YQ-", "", errUTF7OddSized) // Just a single byte 'a' +} diff --git a/import.go b/import.go new file mode 100644 index 0000000..ac802cb --- /dev/null +++ b/import.go @@ -0,0 +1,700 @@ +package main + +import ( + "bufio" + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "log" + "os" + "path/filepath" + "runtime/debug" + "strconv" + "strings" + "time" + + "github.com/mjl-/mox/junk" + "github.com/mjl-/mox/message" + "github.com/mjl-/mox/metrics" + "github.com/mjl-/mox/mlog" + "github.com/mjl-/mox/mox-" + "github.com/mjl-/mox/store" +) + +// todo: implement export of all maildirs to a zip file and also import of such a zip file. +// todo: add option to trust imported messages, causing us to look at Authentication-Results and Received-SPF headers and add eg verified spf/dkim/dmarc domains to our store, to jumpstart reputation. + +const importCommonHelp = `By default, messages will train the junk filter based on their flags and +mailbox naming. If the destination mailbox name starts with "junk" or "spam" +(case insensitive), messages are imported and trained as junk regardless of +pre-existing flags. Use the -train=false flag to prevent training the filter. + +If the destination mailbox is "Sent", the recipients of the messages are added +to the message metadata, causing later incoming messages from these recipients +to be accepted, unless other reputation signals prevent that. + +The message "read"/"seen" flag can be overridden during import with the +-markread flag. +` + +func cmdImportMaildir(c *cmd) { + c.params = "accountname mailboxname maildir" + c.help = `Import a maildir into an account. + +` + importCommonHelp + ` +Mailbox flags, like "seen", "answered", "forwarded", will be imported. An +attempt is made to parse dovecot keyword files. + +The maildir files/directories are read by the mox process, so make sure it has +access to the maildir directories/files. +` + + var train bool + var markRead bool + c.flag.BoolVar(&train, "train", true, "train junkfilter with messages") + c.flag.BoolVar(&markRead, "markread", false, "mark all imported messages as read") + + args := c.Parse() + xcmdImport(false, train, markRead, args, c) +} + +func cmdImportMbox(c *cmd) { + c.params = "accountname mailboxname mbox" + c.help = `Import an mbox into an account. + +Using mbox is not recommended, maildir is a better format. + +` + importCommonHelp + ` + +The mailbox is read by the mox process, so make sure it has access to the +maildir directories/files. +` + + var train bool + var markRead bool + c.flag.BoolVar(&train, "train", true, "train junkfilter with messages") + c.flag.BoolVar(&markRead, "markread", false, "mark all imported messages as read") + + args := c.Parse() + xcmdImport(true, train, markRead, args, c) +} + +func xcmdImport(mbox, train, markRead bool, args []string, c *cmd) { + if len(args) != 3 { + c.Usage() + } + + mox.MustLoadConfig() + + account := args[0] + mailbox := args[1] + if strings.EqualFold(mailbox, "inbox") { + mailbox = "Inbox" + } + src := args[2] + + var ctlcmd string + if mbox { + ctlcmd = "importmbox" + } else { + ctlcmd = "importmaildir" + } + + ctl := xctl() + ctl.xwrite(ctlcmd) + ctl.xwrite(account) + ctl.xwrite(mailbox) + ctl.xwrite(src) + if train { + ctl.xwrite("train") + } else { + ctl.xwrite("notrain") + } + if markRead { + ctl.xwrite("markread") + } else { + ctl.xwrite("nomarkread") + } + ctl.xreadok() + fmt.Fprintln(os.Stderr, "importing...") + for { + line := ctl.xread() + if strings.HasPrefix(line, "progress ") { + n := line[len("progress "):] + fmt.Fprintf(os.Stderr, "%s...\n", n) + continue + } + if line != "ok" { + log.Fatalf("import, expected ok, got %q", line) + } + break + } + count := ctl.xread() + fmt.Fprintf(os.Stderr, "%s imported\n", count) +} + +func importctl(ctl *ctl, mbox bool) { + /* protocol: + > "importmaildir" or "importmbox" + > account + > mailbox + > src (mbox file or maildir directory) + > "train" or "notrain" + > "markread" or "nomarkread" + < "ok" or error + < "progress" count (zero or more times, once for every 1000 messages) + < "ok" when done, or error + < count (of total imported messages, only if not error) + */ + account := ctl.xread() + mailbox := ctl.xread() + src := ctl.xread() + xtrain := ctl.xread() + xmarkread := ctl.xread() + + var train bool + switch xtrain { + case "train": + train = true + case "notrain": + train = false + default: + ctl.xerror("bad value for train: " + xtrain) + } + + var markRead bool + switch xmarkread { + case "markread": + markRead = true + case "nomarkread": + markRead = false + default: + ctl.xerror("bad value for markread: " + xmarkread) + } + + kind := "maildir" + if mbox { + kind = "mbox" + } + ctl.log.Info("importing messages", mlog.Field("kind", kind), mlog.Field("account", account), mlog.Field("mailbox", mailbox), mlog.Field("source", src)) + + var err error + var mboxf *os.File + var mdnewf, mdcurf *os.File + var msgreader msgReader + + defer func() { + if mboxf != nil { + if err := mboxf.Close(); err != nil { + ctl.log.Infox("closing mbox file after import", err) + } + } + if mdnewf != nil { + if err := mdnewf.Close(); err != nil { + ctl.log.Infox("closing maildir new after import", err) + } + } + if mdcurf != nil { + if err := mdcurf.Close(); err != nil { + ctl.log.Infox("closing maildir cur after import", err) + } + } + }() + + // Open account, creating a database file if it doesn't exist yet. It must be known + // in the configuration file. + a, err := store.OpenAccount(account) + ctl.xcheck(err, "opening account") + defer func() { + if a != nil { + if err := a.Close(); err != nil { + ctl.log.Errorx("closing account after import", err) + } + } + }() + + // Messages don't always have a junk flag set. We'll assume anything in a mailbox + // starting with junk or spam is junk mail. + isjunk := strings.HasPrefix(strings.ToLower(mailbox), "junk") || strings.HasPrefix(strings.ToLower(mailbox), "spam") + + // First check if we can access the mbox/maildir. + // Mox needs to be able to access those files, the user running the import command + // may be a different user who can access the files. + if mbox { + mboxf, err = os.Open(src) + ctl.xcheck(err, "open mbox file") + msgreader = newMboxReader(isjunk, store.CreateMessageTemp, mboxf, ctl.log) + } else { + mdnewf, err = os.Open(filepath.Join(src, "new")) + ctl.xcheck(err, "open subdir new of maildir") + mdcurf, err = os.Open(filepath.Join(src, "cur")) + ctl.xcheck(err, "open subdir cur of maildir") + msgreader = newMaildirReader(isjunk, store.CreateMessageTemp, mdnewf, mdcurf, ctl.log) + } + + tx, err := a.DB.Begin(true) + ctl.xcheck(err, "begin transaction") + defer func() { + if tx != nil { + tx.Rollback() + } + }() + + // All preparations done. Good to go. + ctl.xwriteok() + + // We will be delivering messages. If we fail halfway, we need to remove the created msg files. + var deliveredIDs []int64 + + // Handle errors from store.*X calls. + defer func() { + x := recover() + if x == nil { + return + } + + ctl.log.Error("store error", mlog.Field("panic", x)) + debug.PrintStack() + metrics.PanicInc("import") + + for _, id := range deliveredIDs { + p := a.MessagePath(id) + if err := os.Remove(p); err != nil { + ctl.log.Errorx("closing message file after import error", err, mlog.Field("path", p)) + } + } + + ctl.xerror(fmt.Sprintf("%v", x)) + }() + + var changes []store.Change + + xdeliver := func(m *store.Message, mf *os.File) { + // todo: possibly set dmarcdomain to the domain of the from address? at least for non-spams that have been seen. otherwise user would start without any reputations. the assumption would be that the user has accepted email and deemed it legit, coming from the indicated sender. + + const consumeFile = true + isSent := mailbox == "Sent" + const sync = false + const train = false + a.DeliverX(ctl.log, tx, m, mf, consumeFile, isSent, sync, train) + deliveredIDs = append(deliveredIDs, m.ID) + ctl.log.Debug("delivered message", mlog.Field("id", m.ID)) + changes = append(changes, store.ChangeAddUID{MailboxID: m.MailboxID, UID: m.UID, Flags: m.Flags}) + } + + // todo: one goroutine for reading messages, one for parsing the message, one adding to database, one for junk filter training. + n := 0 + a.WithWLock(func() { + // Ensure mailbox exists. + var mb store.Mailbox + mb, changes = a.MailboxEnsureX(tx, mailbox, true) + + var jf *junk.Filter + if train { + jf, _, err = a.OpenJunkFilter(ctl.log) + if err != nil && !errors.Is(err, store.ErrNoJunkFilter) { + ctl.xcheck(err, "open junk filter") + } + defer func() { + if jf != nil { + err = jf.Close() + ctl.xcheck(err, "close junk filter") + } + }() + } + + process := func(m *store.Message, msgf *os.File, origPath string) { + defer func() { + if msgf == nil { + return + } + if err := os.Remove(msgf.Name()); err != nil { + ctl.log.Errorx("removing temporary message after failing to import", err) + } + msgf.Close() + }() + + if markRead { + m.Seen = true + } + + // todo: if message does not contain a date header, but this was a maildir file, add a Date header based on the time in the filename? + + // Parse message and store parsed information for later fast retrieval. + p, err := message.EnsurePart(msgf, m.Size) + if err != nil { + ctl.log.Infox("parsing message, continuing", err, mlog.Field("path", origPath)) + } + m.ParsedBuf, err = json.Marshal(p) + ctl.xcheck(err, "marshal parsed message structure") + + if m.Received.IsZero() { + if p.Envelope != nil && !p.Envelope.Date.IsZero() { + m.Received = p.Envelope.Date + } else { + m.Received = time.Now() + } + } + + if jf != nil && (m.Seen || m.Junk) { + if words, err := jf.ParseMessage(p); err != nil { + ctl.log.Infox("parsing message for updating junk filter", err, mlog.Field("parse", ""), mlog.Field("path", origPath)) + } else { + err = jf.Train(!m.Junk, words) + ctl.xcheck(err, "training junk filter") + } + } + + m.MailboxID = mb.ID + m.MailboxOrigID = mb.ID + xdeliver(m, msgf) + msgf.Close() + msgf = nil + + n++ + if n%1000 == 0 { + ctl.xwrite(fmt.Sprintf("progress %d", n)) + } + } + + for { + m, msgf, origPath, err := msgreader.Next() + if err == io.EOF { + break + } + ctl.xcheck(err, "reading next message") + + process(m, msgf, origPath) + } + + err = tx.Commit() + ctl.xcheck(err, "commit") + tx = nil + ctl.log.Info("delivered messages through import", mlog.Field("count", len(deliveredIDs))) + deliveredIDs = nil + + comm := store.RegisterComm(a) + defer comm.Unregister() + comm.Broadcast(changes) + }) + + err = a.Close() + ctl.xcheck(err, "closing account") + a = nil + + ctl.xwriteok() + ctl.xwrite(fmt.Sprintf("%d", n)) +} + +type msgReader interface { + // Return next message, or io.EOF when there are no more. + Next() (*store.Message, *os.File, string, error) +} + +type mboxReader struct { + createTemp func(pattern string) (*os.File, error) + path string + line int + r *bufio.Reader + prevempty bool + nonfirst bool + log *mlog.Log + eof bool + junk bool +} + +func newMboxReader(isjunk bool, createTemp func(pattern string) (*os.File, error), f *os.File, log *mlog.Log) *mboxReader { + return &mboxReader{createTemp: createTemp, path: f.Name(), line: 1, r: bufio.NewReader(f), log: log, junk: isjunk} +} + +func (mr *mboxReader) position() string { + return fmt.Sprintf("%s:%d", mr.path, mr.line) +} + +func (mr *mboxReader) Next() (*store.Message, *os.File, string, error) { + if mr.eof { + return nil, nil, "", io.EOF + } + + from := []byte("From ") + + if !mr.nonfirst { + // First read, we're at the beginning of the file. + line, err := mr.r.ReadBytes('\n') + if err == io.EOF { + return nil, nil, "", io.EOF + } + mr.line++ + + if !bytes.HasPrefix(line, from) { + return nil, nil, mr.position(), fmt.Errorf(`first line does not start with "From "`) + } + mr.nonfirst = true + } + + f, err := mr.createTemp("mboxreader") + if err != nil { + return nil, nil, mr.position(), err + } + defer func() { + if f != nil { + f.Close() + if err := os.Remove(f.Name()); err != nil { + mr.log.Errorx("removing temporary message file after mbox read error", err, mlog.Field("path", f.Name())) + } + } + }() + + bf := bufio.NewWriter(f) + + var size int64 + for { + line, err := mr.r.ReadBytes('\n') + if err != nil && err != io.EOF { + return nil, nil, mr.position(), fmt.Errorf("reading from mbox: %v", err) + } + if len(line) > 0 { + mr.line++ + // We store data with crlf, adjust any imported messages with bare newlines. + if !bytes.HasSuffix(line, []byte("\r\n")) { + line = append(line[:len(line)-1], "\r\n"...) + } + + // Next mail message starts at bare From word. + if mr.prevempty && bytes.HasPrefix(line, from) { + break + } + if bytes.HasPrefix(line, []byte(">")) && bytes.HasPrefix(bytes.TrimLeft(line, ">"), []byte("From ")) { + line = line[1:] + } + n, err := bf.Write(line) + if err != nil { + return nil, nil, mr.position(), fmt.Errorf("writing message to file: %v", err) + } + size += int64(n) + mr.prevempty = bytes.Equal(line, []byte("\r\n")) + } + if err == io.EOF { + mr.eof = true + break + } + } + if err := bf.Flush(); err != nil { + return nil, nil, mr.position(), fmt.Errorf("flush: %v", err) + } + + // todo: look at Status or X-Status header in message? + // todo: take Received from the "From " line if present? + flags := store.Flags{Seen: true, Junk: mr.junk} + m := &store.Message{Flags: flags, Size: size} + + // Prevent cleanup by defer. + mf := f + f = nil + + return m, mf, mr.position(), nil +} + +type maildirReader struct { + createTemp func(pattern string) (*os.File, error) + newf, curf *os.File + f *os.File // File we are currently reading from. We first read newf, then curf. + dir string // Name of directory for f. Can be empty on first call. + entries []os.DirEntry + dovecotKeywords []string + log *mlog.Log + junk bool +} + +func newMaildirReader(isjunk bool, createTemp func(pattern string) (*os.File, error), newf, curf *os.File, log *mlog.Log) *maildirReader { + mr := &maildirReader{createTemp: createTemp, newf: newf, curf: curf, f: newf, log: log, junk: isjunk} + + // Best-effort parsing of dovecot keywords. + kf, err := os.Open(filepath.Join(filepath.Dir(newf.Name()), "dovecot-keywords")) + if err == nil { + mr.dovecotKeywords = tryParseDovecotKeywords(kf, log) + kf.Close() + } + + return mr +} + +func (mr *maildirReader) Next() (*store.Message, *os.File, string, error) { + if mr.dir == "" { + mr.dir = mr.f.Name() + } + + if len(mr.entries) == 0 { + var err error + mr.entries, err = mr.f.ReadDir(100) + if err != nil && err != io.EOF { + return nil, nil, "", err + } + if len(mr.entries) == 0 { + if mr.f == mr.curf { + return nil, nil, "", io.EOF + } + mr.f = mr.curf + mr.dir = "" + return mr.Next() + } + } + + p := filepath.Join(mr.dir, mr.entries[0].Name()) + mr.entries = mr.entries[1:] + sf, err := os.Open(p) + if err != nil { + return nil, nil, p, fmt.Errorf("open message in maildir: %s", err) + } + defer sf.Close() + f, err := mr.createTemp("maildirreader") + if err != nil { + return nil, nil, p, err + } + defer func() { + if f != nil { + f.Close() + if err := os.Remove(f.Name()); err != nil { + mr.log.Errorx("removing temporary message file after maildir read error", err, mlog.Field("path", f.Name())) + } + } + }() + + // Copy data, changing bare \n into \r\n. + r := bufio.NewReader(sf) + w := bufio.NewWriter(f) + var size int64 + for { + line, err := r.ReadBytes('\n') + if err != nil && err != io.EOF { + return nil, nil, p, fmt.Errorf("reading message: %v", err) + } + if len(line) > 0 { + if !bytes.HasSuffix(line, []byte("\r\n")) { + line = append(line[:len(line)-1], "\r\n"...) + } + + if n, err := w.Write(line); err != nil { + return nil, nil, p, fmt.Errorf("writing message: %v", err) + } else { + size += int64(n) + } + } + if err == io.EOF { + break + } + } + if err := w.Flush(); err != nil { + return nil, nil, p, fmt.Errorf("writing message: %v", err) + } + + // Take received time from filename. + var received time.Time + t := strings.SplitN(filepath.Base(sf.Name()), ".", 2) + if v, err := strconv.ParseInt(t[0], 10, 64); err == nil { + received = time.Unix(v, 0) + } + + // Parse flags. See https://cr.yp.to/proto/maildir.html. + flags := store.Flags{} + t = strings.SplitN(filepath.Base(sf.Name()), ":2,", 2) + if len(t) == 2 { + for _, c := range t[1] { + switch c { + case 'P': + // Passed, doesn't map to a common IMAP flag. + case 'R': + flags.Answered = true + case 'S': + flags.Seen = true + case 'T': + flags.Deleted = true + case 'D': + flags.Draft = true + case 'F': + flags.Flagged = true + default: + if c >= 'a' && c <= 'z' { + index := int(c - 'a') + if index >= len(mr.dovecotKeywords) { + continue + } + kw := mr.dovecotKeywords[index] + switch kw { + case "$Forwarded", "Forwarded": + flags.Forwarded = true + case "$Junk", "Junk": + flags.Junk = true + case "$NotJunk", "NotJunk", "NonJunk": + flags.Notjunk = true + case "$MDNSent": + flags.MDNSent = true + case "$Phishing", "Phishing": + flags.Phishing = true + } + // todo: custom labels, e.g. $label1, JunkRecorded? + } + } + } + } + + if mr.junk { + flags.Junk = true + } + + m := &store.Message{Received: received, Flags: flags, Size: size} + + // Prevent cleanup by defer. + mf := f + f = nil + + return m, mf, p, nil +} + +func tryParseDovecotKeywords(r io.Reader, log *mlog.Log) []string { + /* + If the dovecot-keywords file is present, we parse its additional flags, see + https://doc.dovecot.org/admin_manual/mailbox_formats/maildir/ + + 0 Old + 1 Junk + 2 NonJunk + 3 $Forwarded + 4 $Junk + */ + keywords := make([]string, 26) + end := 0 + scanner := bufio.NewScanner(r) + for scanner.Scan() { + s := scanner.Text() + t := strings.SplitN(s, " ", 2) + if len(t) != 2 { + log.Info("unexpected dovecot keyword line", mlog.Field("line", s)) + continue + } + v, err := strconv.ParseInt(t[0], 10, 32) + if err != nil { + log.Infox("unexpected dovecot keyword index", err, mlog.Field("line", s)) + continue + } + if v < 0 || v >= int64(len(keywords)) { + log.Info("dovecot keyword index too big", mlog.Field("line", s)) + continue + } + index := int(v) + if keywords[index] != "" { + log.Info("duplicate dovecot keyword", mlog.Field("line", s)) + continue + } + keywords[index] = t[1] + if index >= end { + end = index + 1 + } + } + if err := scanner.Err(); err != nil { + log.Infox("reading dovecot keywords file", err) + } + return keywords[:end] +} diff --git a/import_test.go b/import_test.go new file mode 100644 index 0000000..61a4eeb --- /dev/null +++ b/import_test.go @@ -0,0 +1,77 @@ +package main + +import ( + "io" + "os" + "testing" + + "github.com/mjl-/mox/mlog" +) + +func TestMboxReader(t *testing.T) { + createTemp := func(pattern string) (*os.File, error) { + return os.CreateTemp("", pattern) + } + mboxf, err := os.Open("testdata/importtest.mbox") + if err != nil { + t.Fatalf("open mbox: %v", err) + } + defer mboxf.Close() + + mr := newMboxReader(false, createTemp, mboxf, mlog.New("mboxreader")) + _, mf0, _, err := mr.Next() + if err != nil { + t.Fatalf("next mbox message: %v", err) + } + defer mf0.Close() + defer os.Remove(mf0.Name()) + + _, mf1, _, err := mr.Next() + if err != nil { + t.Fatalf("next mbox message: %v", err) + } + defer mf1.Close() + defer os.Remove(mf1.Name()) + + _, _, _, err = mr.Next() + if err != io.EOF { + t.Fatalf("got err %v, expected eof for next mbox message", err) + } +} + +func TestMaildirReader(t *testing.T) { + createTemp := func(pattern string) (*os.File, error) { + return os.CreateTemp("", pattern) + } + newf, err := os.Open("testdata/importtest.maildir/new") + if err != nil { + t.Fatalf("open maildir new: %v", err) + } + defer newf.Close() + + curf, err := os.Open("testdata/importtest.maildir/cur") + if err != nil { + t.Fatalf("open maildir cur: %v", err) + } + defer curf.Close() + + mr := newMaildirReader(false, createTemp, newf, curf, mlog.New("maildirreader")) + _, mf0, _, err := mr.Next() + if err != nil { + t.Fatalf("next maildir message: %v", err) + } + defer mf0.Close() + defer os.Remove(mf0.Name()) + + _, mf1, _, err := mr.Next() + if err != nil { + t.Fatalf("next maildir message: %v", err) + } + defer mf1.Close() + defer os.Remove(mf1.Name()) + + _, _, _, err = mr.Next() + if err != io.EOF { + t.Fatalf("got err %v, expected eof for next maildir message", err) + } +} diff --git a/integration_test.go b/integration_test.go new file mode 100644 index 0000000..844c2fb --- /dev/null +++ b/integration_test.go @@ -0,0 +1,144 @@ +//go:build integration + +// Run this using docker-compose.yml, see Makefile. + +package main + +import ( + "bytes" + "context" + "encoding/base64" + "fmt" + "log" + "net" + "os" + "strings" + "testing" + "time" + + "github.com/mjl-/bstore" + + "github.com/mjl-/mox/mlog" + "github.com/mjl-/mox/mox-" + "github.com/mjl-/mox/smtpclient" + "github.com/mjl-/mox/store" +) + +func tcheck(t *testing.T, err error, msg string) { + t.Helper() + if err != nil { + t.Fatalf("%s: %s", msg, err) + } +} + +// Submit a message to mox, which sends it to postfix, which forwards back to mox. +// We check if we receive the message. +func TestDeliver(t *testing.T) { + mlog.Logfmt = true + mox.Context = context.Background() + + // Remove state. + os.RemoveAll("testdata/integration/run") + os.MkdirAll("testdata/integration/run", 0750) + + // Load mox config. + mox.ConfigStaticPath = "testdata/integration/mox.conf" + if errs := mox.LoadConfig(mox.Context); len(errs) > 0 { + t.Fatalf("loading mox config: %v", errs) + } + + // Create new accounts + createAccount := func(email, password string) { + t.Helper() + acc, _, err := store.OpenEmail(email) + tcheck(t, err, "open account") + err = acc.SetPassword(password) + tcheck(t, err, "setting password") + err = acc.Close() + tcheck(t, err, "closing account") + } + + createAccount("moxtest1@mox1.example", "pass1234") + createAccount("moxtest2@mox2.example", "pass1234") + createAccount("moxtest3@mox3.example", "pass1234") + + // Start mox. + mtastsdbRefresher := false + err := start(mtastsdbRefresher) + tcheck(t, err, "starting mox") + + // todo: we should probably hook store.Comm to get updates. + latestMsgID := func(username string) int64 { + // We open the account index database created by mox for the test user. And we keep looking for the email we sent. + dbpath := fmt.Sprintf("testdata/integration/run/accounts/%s/index.db", username) + db, err := bstore.Open(dbpath, &bstore.Options{Timeout: 5 * time.Second}, store.Message{}, store.Recipient{}, store.Mailbox{}, store.Password{}) + tcheck(t, err, "open test account database") + defer db.Close() + + q := bstore.QueryDB[store.Mailbox](db) + q.FilterNonzero(store.Mailbox{Name: "Inbox"}) + inbox, err := q.Get() + if err != nil { + log.Printf("inbox for finding latest message id: %v", err) + return 0 + } + + qm := bstore.QueryDB[store.Message](db) + qm.FilterNonzero(store.Message{MailboxID: inbox.ID}) + qm.SortDesc("ID") + qm.Limit(1) + m, err := qm.Get() + if err != nil { + log.Printf("finding latest message id: %v", err) + return 0 + } + return m.ID + } + + waitForMsg := func(prevMsgID int64, username string) int64 { + t.Helper() + + for i := 0; i < 10; i++ { + msgID := latestMsgID(username) + if msgID > prevMsgID { + return msgID + } + time.Sleep(500 * time.Millisecond) + } + t.Fatalf("timeout waiting for message") + return 0 // not reached + } + + deliver := func(username, desthost, mailfrom, password, rcptto string) { + t.Helper() + + prevMsgID := latestMsgID(username) + + conn, err := net.Dial("tcp", desthost+":587") + tcheck(t, err, "dial submission") + defer conn.Close() + + // todo: this is "aware" (hopefully) of the config smtpclient/client.go sets up... tricky + mox.Conf.Static.HostnameDomain.ASCII = desthost + msg := fmt.Sprintf(`From: <%s> +To: <%s> +Subject: test message + +This is the message. +`, mailfrom, rcptto) + msg = strings.ReplaceAll(msg, "\n", "\r\n") + auth := bytes.Join([][]byte{nil, []byte(mailfrom), []byte(password)}, []byte{0}) + authLine := fmt.Sprintf("AUTH PLAIN %s", base64.StdEncoding.EncodeToString(auth)) + c, err := smtpclient.New(mox.Context, mlog.New("test"), conn, smtpclient.TLSOpportunistic, desthost, authLine) + tcheck(t, err, "smtp hello") + err = c.Deliver(mox.Context, mailfrom, rcptto, int64(len(msg)), strings.NewReader(msg), false, false) + tcheck(t, err, "deliver with smtp") + err = c.Close() + tcheck(t, err, "close smtpclient") + + waitForMsg(prevMsgID, username) + } + + deliver("moxtest1", "moxmail1.mox1.example", "moxtest1@mox1.example", "pass1234", "root@postfix.example") + deliver("moxtest3", "moxmail2.mox2.example", "moxtest2@mox2.example", "pass1234", "moxtest3@mox3.example") +} diff --git a/iprev/iprev.go b/iprev/iprev.go new file mode 100644 index 0000000..516dd6f --- /dev/null +++ b/iprev/iprev.go @@ -0,0 +1,90 @@ +// Package iprev checks if an IP has a reverse DNS name configured and that the +// reverse DNS name resolves back to the IP (RFC 8601, Section 3). +package iprev + +import ( + "context" + "errors" + "fmt" + "net" + "time" + + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" + + "github.com/mjl-/mox/dns" + "github.com/mjl-/mox/mlog" +) + +var xlog = mlog.New("iprev") + +var ( + metricIPRev = promauto.NewHistogramVec( + prometheus.HistogramOpts{ + Name: "mox_iprev_lookup_total", + Help: "Number of iprev lookups.", + Buckets: []float64{0.001, 0.005, 0.01, 0.05, 0.100, 0.5, 1, 5, 10, 20, 30}, + }, + []string{"status"}, + ) +) + +// Lookup errors. +var ( + ErrNoRecord = errors.New("iprev: no reverse dns record") + ErrDNS = errors.New("iprev: dns lookup") +) + +// ../rfc/8601:1082 + +// Status is the result of a lookup. +type Status string + +const ( + StatusPass Status = "pass" // Reverse and forward lookup results were in agreement. + StatusFail Status = "fail" // Reverse and forward lookup results were not in agreement, but at least the reverse name does exist. + StatusTemperror Status = "temperror" // Temporary error, e.g. DNS timeout. + StatusPermerror Status = "permerror" // Permanent error and later retry is unlikely to succeed. E.g. no PTR record. +) + +// Lookup checks whether an IP has a proper reverse & forward +// DNS configuration. I.e. that it is explicitly associated with its domain name. +// +// A PTR lookup is done on the IP, resulting in zero or more names. These names are +// forward resolved (A or AAAA) until the original IP address is found. The first +// matching name is returned as "name". All names, matching or not, are returned as +// "names". +// +// If a temporary error occurred, rerr is set. +func Lookup(ctx context.Context, resolver dns.Resolver, ip net.IP) (rstatus Status, name string, names []string, rerr error) { + log := xlog.WithContext(ctx) + start := time.Now() + defer func() { + metricIPRev.WithLabelValues(string(rstatus)).Observe(float64(time.Since(start)) / float64(time.Second)) + log.Debugx("iprev lookup result", rerr, mlog.Field("ip", ip), mlog.Field("status", rstatus), mlog.Field("duration", time.Since(start))) + }() + + revNames, revErr := dns.WithPackage(resolver, "iprev").LookupAddr(ctx, ip.String()) + if dns.IsNotFound(revErr) { + return StatusPermerror, "", nil, ErrNoRecord + } else if revErr != nil { + return StatusTemperror, "", nil, fmt.Errorf("%w: %s", ErrDNS, revErr) + } + + var lastErr error + for _, rname := range revNames { + ips, err := dns.WithPackage(resolver, "iprev").LookupIP(ctx, "ip", rname) + for _, fwdIP := range ips { + if ip.Equal(fwdIP) { + return StatusPass, rname, revNames, nil + } + } + if err != nil && !dns.IsNotFound(err) { + lastErr = err + } + } + if lastErr != nil { + return StatusTemperror, "", revNames, fmt.Errorf("%w: %s", ErrDNS, lastErr) + } + return StatusFail, "", revNames, nil +} diff --git a/iprev/iprev_test.go b/iprev/iprev_test.go new file mode 100644 index 0000000..a617b34 --- /dev/null +++ b/iprev/iprev_test.go @@ -0,0 +1,68 @@ +package iprev + +import ( + "context" + "errors" + "net" + "strings" + "testing" + + "github.com/mjl-/mox/dns" +) + +func TestIPRev(t *testing.T) { + resolver := dns.MockResolver{ + PTR: map[string][]string{ + "10.0.0.1": {"basic.example."}, + "10.0.0.4": {"absent.example.", "b.example."}, + "10.0.0.5": {"other.example.", "c.example."}, + "10.0.0.6": {"temperror.example.", "d.example."}, + "10.0.0.7": {"temperror.example.", "temperror2.example."}, + "10.0.0.8": {"other.example."}, + "2001:db8::1": {"basic6.example."}, + }, + A: map[string][]string{ + "basic.example.": {"10.0.0.1"}, + "b.example.": {"10.0.0.4"}, + "c.example.": {"10.0.0.5"}, + "d.example.": {"10.0.0.6"}, + "other.example.": {"10.9.9.9"}, + "temperror.example.": {"10.0.0.99"}, + "temperror2.example.": {"10.0.0.99"}, + }, + AAAA: map[string][]string{ + "basic6.example.": {"2001:db8::1"}, + }, + Fail: map[dns.Mockreq]struct{}{ + {Type: "ptr", Name: "10.0.0.3"}: {}, + {Type: "ptr", Name: "2001:db8::3"}: {}, + {Type: "ip", Name: "temperror.example."}: {}, + {Type: "ip", Name: "temperror2.example."}: {}, + }, + } + + test := func(ip string, expStatus Status, expName string, expNames string, expErr error) { + t.Helper() + + status, name, names, err := Lookup(context.Background(), resolver, net.ParseIP(ip)) + if (err == nil) != (expErr == nil) || err != nil && !errors.Is(err, expErr) { + t.Fatalf("got err %v, expected err %v", err, expErr) + } else if err != nil { + return + } else if status != expStatus || name != expName || strings.Join(names, ",") != expNames { + t.Fatalf("got status %q, name %q, expNames %v, expected %q %q %v", status, name, names, expStatus, expName, expNames) + } + } + + test("10.0.0.1", StatusPass, "basic.example.", "basic.example.", nil) + test("10.0.0.2", StatusPermerror, "", "", ErrNoRecord) + test("10.0.0.3", StatusTemperror, "", "", ErrDNS) + test("10.0.0.4", StatusPass, "b.example.", "absent.example.,b.example.", nil) + test("10.0.0.5", StatusPass, "c.example.", "other.example.,c.example.", nil) + test("10.0.0.6", StatusPass, "d.example.", "temperror.example.,d.example.", nil) + test("10.0.0.7", StatusTemperror, "", "temperror.example.,temperror2.example.", ErrDNS) + test("10.0.0.8", StatusFail, "", "other.example.", nil) + test("2001:db8::1", StatusPass, "basic6.example.", "basic6.example.", nil) + test("2001:db8::2", StatusPermerror, "", "", ErrNoRecord) + test("2001:db8::3", StatusTemperror, "", "", ErrDNS) +} diff --git a/junk.go b/junk.go new file mode 100644 index 0000000..05d2bc0 --- /dev/null +++ b/junk.go @@ -0,0 +1,440 @@ +package main + +/* +note: these testdata paths are not in the repo, you should gather some of your +own ham/spam emails. + +./mox junk train testdata/train/ham testdata/train/spam +./mox junk train -sent-dir testdata/sent testdata/train/ham testdata/train/spam +./mox junk check 'testdata/check/ham/mail1' +./mox junk test testdata/check/ham testdata/check/spam +./mox junk analyze testdata/train/ham testdata/train/spam +./mox junk analyze -top-words 10 -train-ratio 0.5 -spam-threshold 0.85 -max-power 0.01 -sent-dir testdata/sent testdata/train/ham testdata/train/spam +./mox junk play -top-words 10 -train-ratio 0.5 -spam-threshold 0.85 -max-power 0.01 -sent-dir testdata/sent testdata/train/ham testdata/train/spam +*/ + +import ( + "flag" + "fmt" + "log" + mathrand "math/rand" + "os" + "runtime" + "runtime/pprof" + "sort" + "time" + + "github.com/mjl-/mox/junk" + "github.com/mjl-/mox/message" + "github.com/mjl-/mox/mlog" + "github.com/mjl-/mox/mox-" +) + +type junkArgs struct { + params junk.Params + cpuprofile, memprofile string + spamThreshold float64 + trainRatio float64 + seed bool + sentDir string + databasePath, bloomfilterPath string + debug bool +} + +func (a junkArgs) Memprofile() { + if a.memprofile == "" { + return + } + + f, err := os.Create(a.memprofile) + xcheckf(err, "creating memory profile") + defer f.Close() + runtime.GC() // get up-to-date statistics + err = pprof.WriteHeapProfile(f) + xcheckf(err, "writing memory profile") +} + +func (a junkArgs) Profile() func() { + if a.cpuprofile == "" { + return func() { + a.Memprofile() + } + } + + f, err := os.Create(a.cpuprofile) + xcheckf(err, "creating CPU profile") + err = pprof.StartCPUProfile(f) + xcheckf(err, "start CPU profile") + return func() { + pprof.StopCPUProfile() + f.Close() + a.Memprofile() + } +} + +func (a junkArgs) SetLogLevel() { + mox.Conf.Log[""] = mlog.LevelInfo + if a.debug { + mox.Conf.Log[""] = mlog.LevelDebug + } + mlog.SetConfig(mox.Conf.Log) +} + +func junkFlags(fs *flag.FlagSet) (a junkArgs) { + fs.BoolVar(&a.params.Onegrams, "one-grams", false, "use 1-grams, i.e. single words, for scoring") + fs.BoolVar(&a.params.Twograms, "two-grams", true, "use 2-grams, i.e. word pairs, for scoring") + fs.BoolVar(&a.params.Threegrams, "three-grams", false, "use 3-grams, i.e. word triplets, for scoring") + fs.Float64Var(&a.params.MaxPower, "max-power", 0.05, "maximum word power, e.g. min 0.05/max 0.95") + fs.Float64Var(&a.params.IgnoreWords, "ignore-words", 0.1, "ignore words with ham/spaminess within this distance from 0.5") + fs.IntVar(&a.params.TopWords, "top-words", 10, "number of top spam and number of top ham words from email to use") + fs.IntVar(&a.params.RareWords, "rare-words", 1, "words are rare if encountered this number during training, and skipped for scoring") + fs.BoolVar(&a.debug, "debug", false, "print debug logging when calculating spam probability") + + fs.Float64Var(&a.spamThreshold, "spam-threshold", 0.95, "probability where message is seen as spam") + fs.Float64Var(&a.trainRatio, "train-ratio", 0.5, "part of data to use for training versus analyzing (for analyze only)") + fs.StringVar(&a.sentDir, "sent-dir", "", "directory with sent mails, for training") + fs.BoolVar(&a.seed, "seed", false, "seed prng before analysis") + fs.StringVar(&a.databasePath, "dbpath", "filter.db", "database file for ham/spam words") + fs.StringVar(&a.bloomfilterPath, "bloompath", "filter.bloom", "bloom filter for ignoring unique strings") + + fs.StringVar(&a.cpuprofile, "cpuprof", "", "store cpu profile to file") + fs.StringVar(&a.memprofile, "memprof", "", "store mem profile to file") + return +} + +func listDir(dir string) (l []string) { + files, err := os.ReadDir(dir) + xcheckf(err, "listing directory %q", dir) + for _, f := range files { + l = append(l, f.Name()) + } + return l +} + +func must(f *junk.Filter, err error) *junk.Filter { + xcheckf(err, "filter") + return f +} + +func cmdJunkTrain(c *cmd) { + c.unlisted = true + c.params = "hamdir spamdir" + c.help = "Train a junk filter with messages from hamdir and spamdir." + a := junkFlags(c.flag) + args := c.Parse() + if len(args) != 2 { + c.Usage() + } + defer a.Profile()() + a.SetLogLevel() + + f := must(junk.NewFilter(mlog.New("junktrain"), a.params, a.databasePath, a.bloomfilterPath)) + defer f.Close() + + hamFiles := listDir(args[0]) + spamFiles := listDir(args[1]) + var sentFiles []string + if a.sentDir != "" { + sentFiles = listDir(a.sentDir) + } + + err := f.TrainDirs(args[0], a.sentDir, args[1], hamFiles, sentFiles, spamFiles) + xcheckf(err, "train") +} + +func cmdJunkCheck(c *cmd) { + c.unlisted = true + c.params = "mailfile" + c.help = "Check an email message against a junk filter, printing the probability of spam on a scale from 0 to 1." + a := junkFlags(c.flag) + args := c.Parse() + if len(args) != 1 { + c.Usage() + } + defer a.Profile()() + a.SetLogLevel() + + f := must(junk.OpenFilter(mlog.New("junkcheck"), a.params, a.databasePath, a.bloomfilterPath, false)) + defer f.Close() + + prob, _, _, _, err := f.ClassifyMessagePath(args[0]) + xcheckf(err, "testing mail") + + fmt.Printf("%.6f\n", prob) +} + +func cmdJunkTest(c *cmd) { + c.unlisted = true + c.params = "hamdir spamdir" + c.help = "Check a directory with hams and one with spams against the junk filter, and report the success ratio." + a := junkFlags(c.flag) + args := c.Parse() + if len(args) != 2 { + c.Usage() + } + defer a.Profile()() + a.SetLogLevel() + + f := must(junk.OpenFilter(mlog.New("junktest"), a.params, a.databasePath, a.bloomfilterPath, false)) + defer f.Close() + + testDir := func(dir string, ham bool) (int, int) { + ok, bad := 0, 0 + files, err := os.ReadDir(dir) + xcheckf(err, "readdir %q", dir) + for _, fi := range files { + path := dir + "/" + fi.Name() + prob, _, _, _, err := f.ClassifyMessagePath(path) + if err != nil { + log.Printf("classify message %q: %s", path, err) + continue + } + if ham && prob < a.spamThreshold || !ham && prob > a.spamThreshold { + ok++ + } else { + bad++ + } + if ham && prob > a.spamThreshold { + fmt.Printf("ham %q: %.4f\n", path, prob) + } + if !ham && prob < a.spamThreshold { + fmt.Printf("spam %q: %.4f\n", path, prob) + } + } + return ok, bad + } + + nhamok, nhambad := testDir(args[0], true) + nspamok, nspambad := testDir(args[1], false) + fmt.Printf("total ham, ok %d, bad %d\n", nhamok, nhambad) + fmt.Printf("total spam, ok %d, bad %d\n", nspamok, nspambad) + fmt.Printf("specifity (true negatives, hams identified): %.6f\n", float64(nhamok)/(float64(nhamok+nhambad))) + fmt.Printf("sensitivity (true positives, spams identified): %.6f\n", float64(nspamok)/(float64(nspamok+nspambad))) + fmt.Printf("accuracy: %.6f\n", float64(nhamok+nspamok)/float64(nhamok+nhambad+nspamok+nspambad)) +} + +func cmdJunkAnalyze(c *cmd) { + c.unlisted = true + c.params = "hamdir spamdir" + c.help = `Analyze a directory with ham messages and one with spam messages. + +A part of the messages is used for training, and remaining for testing. The +messages are shuffled, with optional random seed.` + a := junkFlags(c.flag) + args := c.Parse() + if len(args) != 2 { + c.Usage() + } + defer a.Profile()() + a.SetLogLevel() + + f := must(junk.NewFilter(mlog.New("junkanalyze"), a.params, a.databasePath, a.bloomfilterPath)) + defer f.Close() + + hamDir := args[0] + spamDir := args[1] + hamFiles := listDir(hamDir) + spamFiles := listDir(spamDir) + + var rand *mathrand.Rand + if a.seed { + rand = mathrand.New(mathrand.NewSource(time.Now().UnixMilli())) + } else { + rand = mathrand.New(mathrand.NewSource(0)) + } + + shuffle := func(l []string) { + count := len(l) + for i := range l { + n := rand.Intn(count) + l[i], l[n] = l[n], l[i] + } + } + + shuffle(hamFiles) + shuffle(spamFiles) + + ntrainham := int(a.trainRatio * float64(len(hamFiles))) + ntrainspam := int(a.trainRatio * float64(len(spamFiles))) + + trainHam := hamFiles[:ntrainham] + trainSpam := spamFiles[:ntrainspam] + testHam := hamFiles[ntrainham:] + testSpam := spamFiles[ntrainspam:] + + var trainSent []string + if a.sentDir != "" { + trainSent = listDir(a.sentDir) + } + + err := f.TrainDirs(hamDir, a.sentDir, spamDir, trainHam, trainSent, trainSpam) + xcheckf(err, "train") + + testDir := func(dir string, files []string, ham bool) (ok, bad, malformed int) { + for _, name := range files { + path := dir + "/" + name + prob, _, _, _, err := f.ClassifyMessagePath(path) + if err != nil { + // log.Infof("%s: %s", path, err) + malformed++ + continue + } + if ham && prob < a.spamThreshold || !ham && prob > a.spamThreshold { + ok++ + } else { + bad++ + } + if ham && prob > a.spamThreshold { + fmt.Printf("ham %q: %.4f\n", path, prob) + } + if !ham && prob < a.spamThreshold { + fmt.Printf("spam %q: %.4f\n", path, prob) + } + } + return + } + + nhamok, nhambad, nmalformedham := testDir(args[0], testHam, true) + nspamok, nspambad, nmalformedspam := testDir(args[1], testSpam, false) + fmt.Printf("training done, nham %d, nsent %d, nspam %d\n", ntrainham, len(trainSent), ntrainspam) + fmt.Printf("total ham, ok %d, bad %d, malformed %d\n", nhamok, nhambad, nmalformedham) + fmt.Printf("total spam, ok %d, bad %d, malformed %d\n", nspamok, nspambad, nmalformedspam) + fmt.Printf("specifity (true negatives, hams identified): %.6f\n", float64(nhamok)/(float64(nhamok+nhambad))) + fmt.Printf("sensitivity (true positives, spams identified): %.6f\n", float64(nspamok)/(float64(nspamok+nspambad))) + fmt.Printf("accuracy: %.6f\n", float64(nhamok+nspamok)/float64(nhamok+nhambad+nspamok+nspambad)) +} + +func cmdJunkPlay(c *cmd) { + c.unlisted = true + c.params = "hamdir spamdir" + c.help = "Play messages from ham and spam directory according to their time of arrival and report on junk filter performance." + a := junkFlags(c.flag) + args := c.Parse() + if len(args) != 2 { + c.Usage() + } + defer a.Profile()() + a.SetLogLevel() + + f := must(junk.NewFilter(mlog.New("junkplay"), a.params, a.databasePath, a.bloomfilterPath)) + defer f.Close() + + // We'll go through all emails to find their dates. + type msg struct { + dir, filename string + ham, sent bool + t time.Time + } + var msgs []msg + + var nbad, nnodate, nham, nspam, nsent int + + scanDir := func(dir string, ham, sent bool) { + for _, name := range listDir(dir) { + path := dir + "/" + name + mf, err := os.Open(path) + xcheckf(err, "open %q", path) + fi, err := mf.Stat() + xcheckf(err, "stat %q", path) + p, err := message.EnsurePart(mf, fi.Size()) + if err != nil { + nbad++ + mf.Close() + continue + } + if p.Envelope.Date.IsZero() { + nnodate++ + mf.Close() + continue + } + mf.Close() + msgs = append(msgs, msg{dir, name, ham, sent, p.Envelope.Date}) + if sent { + nsent++ + } else if ham { + nham++ + } else { + nspam++ + } + } + } + + hamDir := args[0] + spamDir := args[1] + scanDir(hamDir, true, false) + scanDir(spamDir, false, false) + if a.sentDir != "" { + scanDir(a.sentDir, true, true) + } + + // Sort the messages, earliest first. + sort.Slice(msgs, func(i, j int) bool { + return msgs[i].t.Before(msgs[j].t) + }) + + // Play all messages as if they are coming in. We predict their spaminess, check if + // we are right. And we train the system with the result. + var nhamok, nhambad, nspamok, nspambad int + + play := func(msg msg) { + var words map[string]struct{} + path := msg.dir + "/" + msg.filename + if !msg.sent { + var prob float64 + var err error + prob, words, _, _, err = f.ClassifyMessagePath(path) + if err != nil { + nbad++ + return + } + if msg.ham { + if prob < a.spamThreshold { + nhamok++ + } else { + nhambad++ + } + } else { + if prob > a.spamThreshold { + nspamok++ + } else { + nspambad++ + } + } + } else { + mf, err := os.Open(path) + xcheckf(err, "open %q", path) + defer mf.Close() + fi, err := mf.Stat() + xcheckf(err, "stat %q", path) + p, err := message.EnsurePart(mf, fi.Size()) + if err != nil { + log.Printf("bad sent message %q: %s", path, err) + return + } + + words, err = f.ParseMessage(p) + if err != nil { + log.Printf("bad sent message %q: %s", path, err) + return + } + } + + if err := f.Train(msg.ham, words); err != nil { + log.Printf("train: %s", err) + } + } + + for _, m := range msgs { + play(m) + } + + err := f.Save() + xcheckf(err, "saving filter") + + fmt.Printf("completed, nham %d, nsent %d, nspam %d, nbad %d, nwithoutdate %d\n", nham, nsent, nspam, nbad, nnodate) + fmt.Printf("total ham, ok %d, bad %d\n", nhamok, nhambad) + fmt.Printf("total spam, ok %d, bad %d\n", nspamok, nspambad) + fmt.Printf("specifity (true negatives, hams identified): %.6f\n", float64(nhamok)/(float64(nhamok+nhambad))) + fmt.Printf("sensitivity (true positives, spams identified): %.6f\n", float64(nspamok)/(float64(nspamok+nspambad))) + fmt.Printf("accuracy: %.6f\n", float64(nhamok+nspamok)/float64(nhamok+nhambad+nspamok+nspambad)) +} diff --git a/junk/bloom.go b/junk/bloom.go new file mode 100644 index 0000000..50e6783 --- /dev/null +++ b/junk/bloom.go @@ -0,0 +1,165 @@ +package junk + +import ( + "errors" + "os" + + "golang.org/x/crypto/blake2b" +) + +// see https://en.wikipedia.org/wiki/Bloom_filter + +var errWidth = errors.New("k and width wider than 256 bits and width not more than 32") +var errPowerOfTwo = errors.New("data not a power of two") + +// Bloom is a bloom filter. +type Bloom struct { + data []byte + k int // Number of bits we store/lookup in the bloom filter per value. + w int // Number of bits needed to address a single bit position. + modified bool +} + +func bloomWidth(fileSize int) int { + w := 0 + for bits := uint32(fileSize * 8); bits > 1; bits >>= 1 { + w++ + } + return w +} + +// BloomValid returns an error if the bloom file parameters are not correct. +func BloomValid(fileSize int, k int) error { + _, err := bloomValid(fileSize, k) + return err +} + +func bloomValid(fileSize, k int) (int, error) { + w := bloomWidth(fileSize) + if 1< 256 || w > 32 { + return 0, errWidth + } + return w, nil +} + +// NewBloom returns a bloom filter with given initial data. +// +// The number of bits in data must be a power of 2. +// K is the number of "hashes" (bits) to store/lookup for each value stored. +// Width is calculated as the number of bits needed to represent a single bit/hash +// position in the data. +// +// For each value stored/looked up, a hash over the value is calculated. The hash +// is split into "k" values that are "width" bits wide, each used to lookup a bit. +// K * width must not exceed 256. +func NewBloom(data []byte, k int) (*Bloom, error) { + w, err := bloomValid(len(data), k) + if err != nil { + return nil, err + } + + return &Bloom{ + data: data, + k: k, + w: w, + }, nil +} + +func (b *Bloom) Add(s string) { + h := hash([]byte(s), b.w) + for i := 0; i < b.k; i++ { + b.set(h.nextPos()) + } +} + +func (b *Bloom) Has(s string) bool { + h := hash([]byte(s), b.w) + for i := 0; i < b.k; i++ { + if !b.has(h.nextPos()) { + return false + } + } + return true +} + +func (b *Bloom) Bytes() []byte { + return b.data +} + +func (b *Bloom) Modified() bool { + return b.modified +} + +// Ones returns the number of ones. +func (b *Bloom) Ones() (n int) { + for _, d := range b.data { + for i := 0; i < 8; i++ { + if d&1 != 0 { + n++ + } + d >>= 1 + } + } + return n +} + +func (b *Bloom) Write(path string) error { + f, err := os.OpenFile(path, os.O_CREATE|os.O_WRONLY, 0660) + if err != nil { + return err + } + if _, err := f.Write(b.data); err != nil { + f.Close() + return err + } + if err := f.Close(); err != nil { + return err + } + b.modified = false + return nil +} + +func (b *Bloom) has(p int) bool { + v := b.data[p>>3] >> (7 - (p & 7)) + return v&1 != 0 +} + +func (b *Bloom) set(p int) { + by := p >> 3 + bi := p & 0x7 + var v byte = 1 << (7 - bi) + if b.data[by]&v == 0 { + b.data[by] |= v + b.modified = true + } +} + +type bits struct { + width int // Number of bits for each position. + buf []byte // Remaining bytes to use for next position. + cur uint64 // Bits to read next position from. Replenished from buf. + ncur int // Number of bits available in cur. We consume the highest bits first. +} + +func hash(v []byte, width int) *bits { + buf := blake2b.Sum256(v) + return &bits{width: width, buf: buf[:]} +} + +// nextPos returns the next bit position. +func (b *bits) nextPos() (v int) { + if b.width > b.ncur { + for len(b.buf) > 0 && b.ncur < 64-8 { + b.cur <<= 8 + b.cur |= uint64(b.buf[0]) + b.ncur += 8 + b.buf = b.buf[1:] + } + } + v = int((b.cur >> (b.ncur - b.width)) & ((1 << b.width) - 1)) + b.ncur -= b.width + return v +} diff --git a/junk/bloom_test.go b/junk/bloom_test.go new file mode 100644 index 0000000..96fa54b --- /dev/null +++ b/junk/bloom_test.go @@ -0,0 +1,136 @@ +package junk + +import ( + "fmt" + "testing" +) + +func TestBloom(t *testing.T) { + if err := BloomValid(3, 10); err == nil { + t.Fatalf("missing error for invalid bloom filter size") + } + + _, err := NewBloom(make([]byte, 3), 10) + if err == nil { + t.Fatalf("missing error for invalid bloom filter size") + } + + b, err := NewBloom(make([]byte, 256), 5) + if err != nil { + t.Fatalf("newbloom: %s", err) + } + + absent := func(v string) { + t.Helper() + if b.Has(v) { + t.Fatalf("should be absent: %q", v) + } + } + + present := func(v string) { + t.Helper() + if !b.Has(v) { + t.Fatalf("should be present: %q", v) + } + } + + absent("test") + if b.Modified() { + t.Fatalf("bloom filter already modified?") + } + b.Add("test") + present("test") + present("test") + words := []string{} + for i := 'a'; i <= 'z'; i++ { + words = append(words, fmt.Sprintf("%c", i)) + } + for _, w := range words { + absent(w) + b.Add(w) + present(w) + } + for _, w := range words { + present(w) + } + if !b.Modified() { + t.Fatalf("bloom filter was not modified?") + } + + //log.Infof("ones: %d, m %d", b.Ones(), len(b.Bytes())*8) +} + +func TestBits(t *testing.T) { + b := &bits{width: 1, buf: []byte{0xff, 0xff}} + for i := 0; i < 16; i++ { + if b.nextPos() != 1 { + t.Fatalf("pos not 1") + } + } + b = &bits{width: 2, buf: []byte{0xff, 0xff}} + for i := 0; i < 8; i++ { + if b.nextPos() != 0b11 { + t.Fatalf("pos not 0b11") + } + } + + b = &bits{width: 1, buf: []byte{0b10101010, 0b10101010}} + for i := 0; i < 16; i++ { + if b.nextPos() != ((i + 1) % 2) { + t.Fatalf("bad pos") + } + } + b = &bits{width: 2, buf: []byte{0b10101010, 0b10101010}} + for i := 0; i < 8; i++ { + if b.nextPos() != 0b10 { + t.Fatalf("pos not 0b10") + } + } +} + +func TestSet(t *testing.T) { + b := &Bloom{ + data: []byte{ + 0b10101010, + 0b00000000, + 0b11111111, + 0b01010101, + }, + } + for i := 0; i < 8; i++ { + v := b.has(i) + if v != (i%2 == 0) { + t.Fatalf("bad has") + } + } + for i := 8; i < 16; i++ { + if b.has(i) { + t.Fatalf("bad has") + } + } + for i := 16; i < 24; i++ { + if !b.has(i) { + t.Fatalf("bad has") + } + } + for i := 24; i < 32; i++ { + v := b.has(i) + if v != (i%2 != 0) { + t.Fatalf("bad has") + } + } +} + +func TestOnes(t *testing.T) { + ones := func(b *Bloom, x int) { + t.Helper() + n := b.Ones() + if n != x { + t.Fatalf("ones: got %d, expected %d", n, x) + } + } + ones(&Bloom{data: []byte{0b10101010}}, 4) + ones(&Bloom{data: []byte{0b01010101}}, 4) + ones(&Bloom{data: []byte{0b11111111}}, 8) + ones(&Bloom{data: []byte{0b00000000}}, 0) +} diff --git a/junk/filter.go b/junk/filter.go new file mode 100644 index 0000000..af197bd --- /dev/null +++ b/junk/filter.go @@ -0,0 +1,726 @@ +// Package junk implements a bayesian spam filter. +// +// A message can be parsed into words. Words (or pairs or triplets) can be used +// to train the filter or to classify the message as ham or spam. Training +// records the words in the database as ham/spam. Classifying consists of +// calculating the ham/spam probability by combining the words in the message +// with their ham/spam status. +package junk + +// todo: look at inverse chi-square function? see https://www.linuxjournal.com/article/6467 +// todo: perhaps: whether anchor text in links in html are different from the url + +import ( + "errors" + "fmt" + "io" + "math" + "os" + "sort" + "time" + + "github.com/mjl-/bstore" + + "github.com/mjl-/mox/message" + "github.com/mjl-/mox/mlog" +) + +var ( + xlog = mlog.New("junk") + + errBadContentType = errors.New("bad content-type") // sure sign of spam + errClosed = errors.New("filter is closed") +) + +type word struct { + Ham uint32 + Spam uint32 +} + +type wordscore struct { + Word string + Ham uint32 + Spam uint32 +} + +// Params holds parameters for the filter. Most are at test-time. The first are +// used during parsing and training. +type Params struct { + Onegrams bool `sconf:"optional" sconf-doc:"Track ham/spam ranking for single words."` + Twograms bool `sconf:"optional" sconf-doc:"Track ham/spam ranking for each two consecutive words."` + Threegrams bool `sconf:"optional" sconf-doc:"Track ham/spam ranking for each three consecutive words."` + MaxPower float64 `sconf-doc:"Maximum power a word (combination) can have. If spaminess is 0.99, and max power is 0.1, spaminess of the word will be set to 0.9. Similar for ham words."` + TopWords int `sconf-doc:"Number of most spammy/hammy words to use for calculating probability. E.g. 10."` + IgnoreWords float64 `sconf:"optional" sconf-doc:"Ignore words that are this much away from 0.5 haminess/spaminess. E.g. 0.1, causing word (combinations) of 0.4 to 0.6 to be ignored."` + RareWords int `sconf:"optional" sconf-doc:"Occurrences in word database until a word is considered rare and its influence in calculating probability reduced. E.g. 1 or 2."` +} + +type Filter struct { + Params + + log *mlog.Log // For logging cid. + closed bool + modified bool // Whether any modifications are pending. Cleared by Save. + hams, spams uint32 // Message count, stored in db under word "-". + cache map[string]word // Words read from database or during training. + changed map[string]word // Words modified during training. + dbPath, bloomPath string + db *bstore.DB // Always open on a filter. + bloom *Bloom // Only opened when writing. + isNew bool // Set for new filters until their first sync to disk. For faster writing. +} + +func (f *Filter) ensureBloom() error { + if f.bloom != nil { + return nil + } + var err error + f.bloom, err = openBloom(f.bloomPath) + return err +} + +// Close first saves the filter if it has modifications, then closes the database +// connection and releases the bloom filter. +func (f *Filter) Close() error { + if f.closed { + return errClosed + } + var err error + if f.modified { + err = f.Save() + } + if err != nil { + f.db.Close() + } else { + err = f.db.Close() + } + *f = Filter{log: f.log, closed: true} + return err +} + +func OpenFilter(log *mlog.Log, params Params, dbPath, bloomPath string, loadBloom bool) (*Filter, error) { + var bloom *Bloom + if loadBloom { + var err error + bloom, err = openBloom(bloomPath) + if err != nil { + return nil, err + } + } else if fi, err := os.Stat(bloomPath); err == nil { + if err := BloomValid(int(fi.Size()), bloomK); err != nil { + return nil, fmt.Errorf("bloom: %s", err) + } + } + + db, err := openDB(dbPath) + if err != nil { + return nil, fmt.Errorf("open database: %s", err) + } + + f := &Filter{ + Params: params, + log: log, + cache: map[string]word{}, + changed: map[string]word{}, + dbPath: dbPath, + bloomPath: bloomPath, + db: db, + bloom: bloom, + } + err = f.db.Read(func(tx *bstore.Tx) error { + wc := wordscore{Word: "-"} + err := tx.Get(&wc) + f.hams = wc.Ham + f.spams = wc.Spam + return err + }) + if err != nil { + f.Close() + return nil, fmt.Errorf("looking up ham/spam message count: %s", err) + } + return f, nil +} + +// NewFilter creates a new filter with empty bloom filter and database files. The +// filter is marked as new until the first save, will be done automatically if +// TrainDirs is called. If the bloom and/or database files exist, an error is +// returned. +func NewFilter(log *mlog.Log, params Params, dbPath, bloomPath string) (*Filter, error) { + var err error + if _, err := os.Stat(bloomPath); err == nil { + return nil, fmt.Errorf("bloom filter already exists on disk: %s", bloomPath) + } else if _, err := os.Stat(dbPath); err == nil { + return nil, fmt.Errorf("database file already exists on disk: %s", dbPath) + } + + bloomSizeBytes := 4 * 1024 * 1024 + if err := BloomValid(bloomSizeBytes, bloomK); err != nil { + return nil, fmt.Errorf("bloom: %s", err) + } + bf, err := os.Create(bloomPath) + if err != nil { + return nil, fmt.Errorf("creating bloom file: %w", err) + } + if err := bf.Truncate(4 * 1024 * 1024); err != nil { + bf.Close() + os.Remove(bloomPath) + return nil, fmt.Errorf("making empty bloom filter: %s", err) + } + bf.Close() + + db, err := newDB(dbPath) + if err != nil { + os.Remove(bloomPath) + os.Remove(dbPath) + return nil, fmt.Errorf("open database: %s", err) + } + + words := map[string]word{} // f.changed is set to new map after training + f := &Filter{ + Params: params, + log: log, + modified: true, // Ensure ham/spam message count is added for new filter. + cache: words, + changed: words, + dbPath: dbPath, + bloomPath: bloomPath, + db: db, + isNew: true, + } + return f, nil +} + +const bloomK = 10 + +func openBloom(path string) (*Bloom, error) { + buf, err := os.ReadFile(path) + if err != nil { + return nil, fmt.Errorf("reading bloom file: %w", err) + } + return NewBloom(buf, bloomK) +} + +func newDB(path string) (db *bstore.DB, rerr error) { + // Remove any existing files. + os.Remove(path) + + defer func() { + if rerr != nil { + if db != nil { + db.Close() + } + db = nil + os.Remove(path) + } + }() + + db, err := bstore.Open(path, &bstore.Options{Timeout: 5 * time.Second, Perm: 0660}, wordscore{}) + if err != nil { + return nil, fmt.Errorf("open new database: %w", err) + } + return db, nil +} + +func openDB(path string) (*bstore.DB, error) { + if _, err := os.Stat(path); err != nil { + return nil, fmt.Errorf("stat db file: %w", err) + } + return bstore.Open(path, &bstore.Options{Timeout: 5 * time.Second, Perm: 0660}, wordscore{}) +} + +// Save stores modifications, e.g. from training, to the database and bloom +// filter files. +func (f *Filter) Save() error { + if f.closed { + return errClosed + } + if !f.modified { + return nil + } + + if f.bloom != nil && f.bloom.Modified() { + if err := f.bloom.Write(f.bloomPath); err != nil { + return fmt.Errorf("writing bloom filter: %w", err) + } + } + + // We need to insert sequentially for reasonable performance. + words := make([]string, len(f.changed)) + i := 0 + for w := range f.changed { + words[i] = w + i++ + } + sort.Slice(words, func(i, j int) bool { + return words[i] < words[j] + }) + + f.log.Info("inserting words in junkfilter db", mlog.Field("words", len(f.changed))) + // start := time.Now() + if f.isNew { + if err := f.db.HintAppend(true, wordscore{}); err != nil { + f.log.Errorx("hint appendonly", err) + } else { + defer f.db.HintAppend(false, wordscore{}) + } + } + err := f.db.Write(func(tx *bstore.Tx) error { + update := func(w string, ham, spam uint32) error { + if f.isNew { + return tx.Insert(&wordscore{w, ham, spam}) + } + + wc := wordscore{w, 0, 0} + err := tx.Get(&wc) + if err == bstore.ErrAbsent { + return tx.Insert(&wordscore{w, ham, spam}) + } else if err != nil { + return err + } + return tx.Update(&wordscore{w, wc.Ham + ham, wc.Spam + spam}) + } + if err := update("-", f.hams, f.spams); err != nil { + return fmt.Errorf("storing total ham/spam message count: %s", err) + } + + for _, w := range words { + c := f.changed[w] + if err := update(w, c.Ham, c.Spam); err != nil { + return fmt.Errorf("updating ham/spam count: %s", err) + } + } + return nil + }) + if err != nil { + return fmt.Errorf("updating database: %w", err) + } + + f.changed = map[string]word{} + f.modified = false + f.isNew = false + // f.log.Info("wrote filter to db", mlog.Field("duration", time.Since(start))) + return nil +} + +func loadWords(db *bstore.DB, l []string, dst map[string]word) error { + sort.Slice(l, func(i, j int) bool { + return l[i] < l[j] + }) + + err := db.Read(func(tx *bstore.Tx) error { + for _, w := range l { + wc := wordscore{Word: w} + if err := tx.Get(&wc); err == nil { + dst[w] = word{wc.Ham, wc.Spam} + } + } + return nil + }) + if err != nil { + return fmt.Errorf("fetching words: %s", err) + } + return nil +} + +// ClassifyWords returns the spam probability for the given words, and number of recognized ham and spam words. +func (f *Filter) ClassifyWords(words map[string]struct{}) (probability float64, nham, nspam int, rerr error) { + if f.closed { + return 0, 0, 0, errClosed + } + + type xword struct { + Word string + R float64 + } + + var hamHigh float64 = 0 + var spamLow float64 = 1 + var topHam []xword + var topSpam []xword + + // Find words that should be in the database. + lookupWords := []string{} + expect := map[string]struct{}{} + unknowns := map[string]struct{}{} + totalUnknown := 0 + for w := range words { + if f.bloom != nil && !f.bloom.Has(w) { + totalUnknown++ + if len(unknowns) < 50 { + unknowns[w] = struct{}{} + } + continue + } + if _, ok := f.cache[w]; ok { + continue + } + lookupWords = append(lookupWords, w) + expect[w] = struct{}{} + } + if len(unknowns) > 0 { + f.log.Debug("unknown words in bloom filter, showing max 50", mlog.Field("words", unknowns), mlog.Field("totalunknown", totalUnknown), mlog.Field("totalwords", len(words))) + } + + // Fetch words from database. + fetched := map[string]word{} + if len(lookupWords) > 0 { + if err := loadWords(f.db, lookupWords, fetched); err != nil { + return 0, 0, 0, err + } + for w, c := range fetched { + delete(expect, w) + f.cache[w] = c + } + f.log.Debug("unknown words in db", mlog.Field("words", expect), mlog.Field("totalunknown", len(expect)), mlog.Field("totalwords", len(words))) + } + + for w := range words { + c, ok := f.cache[w] + if !ok { + continue + } + var wS, wH float64 + if f.spams > 0 { + wS = float64(c.Spam) / float64(f.spams) + } + if f.hams > 0 { + wH = float64(c.Ham) / float64(f.hams) + } + r := wS / (wS + wH) + + if r < f.MaxPower { + r = f.MaxPower + } else if r >= 1-f.MaxPower { + r = 1 - f.MaxPower + } + + if c.Ham+c.Spam <= uint32(f.RareWords) { + // Reduce the power of rare words. + r += float64(1+uint32(f.RareWords)-(c.Ham+c.Spam)) * (0.5 - r) / 10 + } + if math.Abs(0.5-r) < f.IgnoreWords { + continue + } + if r < 0.5 { + if len(topHam) >= f.TopWords && r > hamHigh { + continue + } + topHam = append(topHam, xword{w, r}) + if r > hamHigh { + hamHigh = r + } + } else if r > 0.5 { + if len(topSpam) >= f.TopWords && r < spamLow { + continue + } + topSpam = append(topSpam, xword{w, r}) + if r < spamLow { + spamLow = r + } + } + } + + sort.Slice(topHam, func(i, j int) bool { + a, b := topHam[i], topHam[j] + if a.R == b.R { + return len(a.Word) > len(b.Word) + } + return a.R < b.R + }) + sort.Slice(topSpam, func(i, j int) bool { + a, b := topSpam[i], topSpam[j] + if a.R == b.R { + return len(a.Word) > len(b.Word) + } + return a.R > b.R + }) + + nham = f.TopWords + if nham > len(topHam) { + nham = len(topHam) + } + nspam = f.TopWords + if nspam > len(topSpam) { + nspam = len(topSpam) + } + topHam = topHam[:nham] + topSpam = topSpam[:nspam] + + var eta float64 + for _, x := range topHam { + eta += math.Log(1-x.R) - math.Log(x.R) + } + for _, x := range topSpam { + eta += math.Log(1-x.R) - math.Log(x.R) + } + + f.log.Debug("top words", mlog.Field("hams", topHam), mlog.Field("spams", topSpam)) + + prob := 1 / (1 + math.Pow(math.E, eta)) + return prob, len(topHam), len(topSpam), nil +} + +// ClassifyMessagePath is a convenience wrapper for calling ClassifyMessage on a file. +func (f *Filter) ClassifyMessagePath(path string) (probability float64, words map[string]struct{}, nham, nspam int, rerr error) { + if f.closed { + return 0, nil, 0, 0, errClosed + } + + mf, err := os.Open(path) + if err != nil { + return 0, nil, 0, 0, err + } + defer mf.Close() + fi, err := mf.Stat() + if err != nil { + return 0, nil, 0, 0, err + } + return f.ClassifyMessageReader(mf, fi.Size()) +} + +func (f *Filter) ClassifyMessageReader(mf io.ReaderAt, size int64) (probability float64, words map[string]struct{}, nham, nspam int, rerr error) { + m, err := message.EnsurePart(mf, size) + if err != nil && errors.Is(err, message.ErrBadContentType) { + // Invalid content-type header is a sure sign of spam. + //f.log.Infox("parsing content", err) + return 1, nil, 0, 0, nil + } + return f.ClassifyMessage(m) +} + +// ClassifyMessage parses the mail message in r and returns the spam probability +// (between 0 and 1), along with the tokenized words found in the message, and the +// number of recognized ham and spam words. +func (f *Filter) ClassifyMessage(m message.Part) (probability float64, words map[string]struct{}, nham, nspam int, rerr error) { + var err error + words, err = f.ParseMessage(m) + if err != nil { + return 0, nil, 0, 0, err + } + + probability, nham, nspam, err = f.ClassifyWords(words) + return probability, words, nham, nspam, err +} + +// Train adds the words of a single message to the filter. +func (f *Filter) Train(ham bool, words map[string]struct{}) error { + if err := f.ensureBloom(); err != nil { + return err + } + + var lwords []string + + for w := range words { + if !f.bloom.Has(w) { + f.bloom.Add(w) + continue + } + if _, ok := f.cache[w]; !ok { + lwords = append(lwords, w) + } + } + + if err := f.loadCache(lwords); err != nil { + return err + } + + f.modified = true + if ham { + f.hams++ + } else { + f.spams++ + } + + for w := range words { + c := f.cache[w] + if ham { + c.Ham++ + } else { + c.Spam++ + } + f.cache[w] = c + f.changed[w] = c + } + return nil +} + +func (f *Filter) TrainMessage(r io.ReaderAt, size int64, ham bool) error { + p, _ := message.EnsurePart(r, size) + words, err := f.ParseMessage(p) + if err != nil { + return fmt.Errorf("parsing mail contents: %v", err) + } + return f.Train(ham, words) +} + +func (f *Filter) UntrainMessage(r io.ReaderAt, size int64, ham bool) error { + p, _ := message.EnsurePart(r, size) + words, err := f.ParseMessage(p) + if err != nil { + return fmt.Errorf("parsing mail contents: %v", err) + } + return f.Untrain(ham, words) +} + +func (f *Filter) loadCache(lwords []string) error { + if len(lwords) == 0 { + return nil + } + + return loadWords(f.db, lwords, f.cache) +} + +// Untrain adjusts the filter to undo a previous training of the words. +func (f *Filter) Untrain(ham bool, words map[string]struct{}) error { + if err := f.ensureBloom(); err != nil { + return err + } + + // Lookup any words from the db that aren't in the cache and put them in the cache for modification. + var lwords []string + for w := range words { + if _, ok := f.cache[w]; !ok { + lwords = append(lwords, w) + } + } + if err := f.loadCache(lwords); err != nil { + return err + } + + // Modify the message count. + f.modified = true + if ham { + f.hams-- + } else { + f.spams-- + } + + // Decrease the word counts. + for w := range words { + c, ok := f.cache[w] + if !ok { + continue + } + if ham { + c.Ham-- + } else { + c.Spam-- + } + f.cache[w] = c + f.changed[w] = c + } + return nil +} + +// TrainDir parses mail messages from files and trains the filter. +func (f *Filter) TrainDir(dir string, files []string, ham bool) (n, malformed uint32, rerr error) { + if f.closed { + return 0, 0, errClosed + } + if err := f.ensureBloom(); err != nil { + return 0, 0, err + } + + for _, name := range files { + p := fmt.Sprintf("%s/%s", dir, name) + valid, words, err := f.tokenizeMail(p) + if err != nil { + // f.log.Infox("tokenizing mail", err, mlog.Field("path", p)) + malformed++ + continue + } + if !valid { + continue + } + n++ + for w := range words { + if !f.bloom.Has(w) { + f.bloom.Add(w) + continue + } + c := f.cache[w] + f.modified = true + if ham { + c.Ham++ + } else { + c.Spam++ + } + f.cache[w] = c + f.changed[w] = c + } + } + return +} + +// TrainDirs trains and saves a filter with mail messages from different types +// of directories. +func (f *Filter) TrainDirs(hamDir, sentDir, spamDir string, hamFiles, sentFiles, spamFiles []string) error { + if f.closed { + return errClosed + } + + var err error + + var start time.Time + var hamMalformed, sentMalformed, spamMalformed uint32 + + start = time.Now() + f.hams, hamMalformed, err = f.TrainDir(hamDir, hamFiles, true) + if err != nil { + return err + } + tham := time.Since(start) + + var sent uint32 + start = time.Now() + if sentDir != "" { + sent, sentMalformed, err = f.TrainDir(sentDir, sentFiles, true) + if err != nil { + return err + } + } + tsent := time.Since(start) + + start = time.Now() + f.spams, spamMalformed, err = f.TrainDir(spamDir, spamFiles, false) + if err != nil { + return err + } + tspam := time.Since(start) + + hams := f.hams + f.hams += sent + if err := f.Save(); err != nil { + return fmt.Errorf("saving filter: %s", err) + } + + dbSize := f.fileSize(f.dbPath) + bloomSize := f.fileSize(f.bloomPath) + + fields := []mlog.Pair{ + mlog.Field("hams", hams), + mlog.Field("hamTime", tham), + mlog.Field("hamMalformed", hamMalformed), + mlog.Field("sent", sent), + mlog.Field("sentTime", tsent), + mlog.Field("sentMalformed", sentMalformed), + mlog.Field("spams", f.spams), + mlog.Field("spamTime", tspam), + mlog.Field("spamMalformed", spamMalformed), + mlog.Field("dbsize", fmt.Sprintf("%.1fmb", float64(dbSize)/(1024*1024))), + mlog.Field("bloomsize", fmt.Sprintf("%.1fmb", float64(bloomSize)/(1024*1024))), + mlog.Field("bloom1ratio", fmt.Sprintf("%.4f", float64(f.bloom.Ones())/float64(len(f.bloom.Bytes())*8))), + } + xlog.Print("training done", fields...) + + return nil +} + +func (f *Filter) fileSize(p string) int { + fi, err := os.Stat(p) + if err != nil { + f.log.Infox("stat", err, mlog.Field("path", p)) + return 0 + } + return int(fi.Size()) +} diff --git a/junk/filter_test.go b/junk/filter_test.go new file mode 100644 index 0000000..bbef2a7 --- /dev/null +++ b/junk/filter_test.go @@ -0,0 +1,201 @@ +package junk + +import ( + "fmt" + "math" + "os" + "path/filepath" + "testing" + + "github.com/mjl-/mox/mlog" +) + +func tcheck(t *testing.T, err error, msg string) { + t.Helper() + if err != nil { + t.Fatalf("%s: %s", msg, err) + } +} + +func tlistdir(t *testing.T, name string) []string { + t.Helper() + l, err := os.ReadDir(name) + tcheck(t, err, "readdir") + names := make([]string, len(l)) + for i, e := range l { + names[i] = e.Name() + } + return names +} + +func TestFilter(t *testing.T) { + log := mlog.New("junk") + params := Params{ + Onegrams: true, + Twograms: true, + Threegrams: false, + MaxPower: 0.1, + TopWords: 10, + IgnoreWords: 0.1, + RareWords: 1, + } + dbPath := "../testdata/junk/filter.db" + bloomPath := "../testdata/junk/filter.bloom" + os.Remove(dbPath) + os.Remove(bloomPath) + f, err := NewFilter(log, params, dbPath, bloomPath) + tcheck(t, err, "new filter") + err = f.Close() + tcheck(t, err, "close filter") + + f, err = OpenFilter(log, params, dbPath, bloomPath, true) + tcheck(t, err, "open filter") + + // Ensure these dirs exist. Developers should bring their own ham/spam example + // emails. + os.MkdirAll("../testdata/train/ham", 0770) + os.MkdirAll("../testdata/train/spam", 0770) + + hamdir := "../testdata/train/ham" + spamdir := "../testdata/train/spam" + hamfiles := tlistdir(t, hamdir) + if len(hamfiles) > 100 { + hamfiles = hamfiles[:100] + } + spamfiles := tlistdir(t, spamdir) + if len(spamfiles) > 100 { + spamfiles = spamfiles[:100] + } + + err = f.TrainDirs(hamdir, "", spamdir, hamfiles, nil, spamfiles) + tcheck(t, err, "train dirs") + + if len(hamfiles) == 0 || len(spamfiles) == 0 { + fmt.Println("not training, no ham and/or spam messages, add them to testdata/train/ham and testdata/train/spam") + return + } + + prob, _, _, _, err := f.ClassifyMessagePath(filepath.Join(hamdir, hamfiles[0])) + tcheck(t, err, "classify ham message") + if prob > 0.1 { + t.Fatalf("trained ham file has prob %v, expected <= 0.1", prob) + } + + prob, _, _, _, err = f.ClassifyMessagePath(filepath.Join(spamdir, spamfiles[0])) + tcheck(t, err, "classify spam message") + if prob < 0.9 { + t.Fatalf("trained spam file has prob %v, expected > 0.9", prob) + } + + err = f.Close() + tcheck(t, err, "close filter") + + // Start again with empty filter. We'll train a few messages and check they are + // classified as ham/spam. Then we untrain to see they are no longer classified. + os.Remove(dbPath) + os.Remove(bloomPath) + f, err = NewFilter(log, params, dbPath, bloomPath) + tcheck(t, err, "open filter") + + hamf, err := os.Open(filepath.Join(hamdir, hamfiles[0])) + tcheck(t, err, "open hamfile") + defer hamf.Close() + hamstat, err := hamf.Stat() + tcheck(t, err, "stat hamfile") + hamsize := hamstat.Size() + + spamf, err := os.Open(filepath.Join(spamdir, spamfiles[0])) + tcheck(t, err, "open spamfile") + defer spamf.Close() + spamstat, err := spamf.Stat() + tcheck(t, err, "stat spamfile") + spamsize := spamstat.Size() + + // Train each message twice, to prevent single occurrences from being ignored. + err = f.TrainMessage(hamf, hamsize, true) + tcheck(t, err, "train ham message") + _, err = hamf.Seek(0, 0) + tcheck(t, err, "seek ham message") + err = f.TrainMessage(hamf, hamsize, true) + tcheck(t, err, "train ham message") + + err = f.TrainMessage(spamf, spamsize, false) + tcheck(t, err, "train spam message") + _, err = spamf.Seek(0, 0) + tcheck(t, err, "seek spam message") + err = f.TrainMessage(spamf, spamsize, true) + tcheck(t, err, "train spam message") + + if !f.modified { + t.Fatalf("filter not modified after training") + } + if !f.bloom.Modified() { + t.Fatalf("bloom filter not modified after training") + } + + err = f.Save() + tcheck(t, err, "save filter") + if f.modified || f.bloom.Modified() { + t.Fatalf("filter or bloom filter still modified after save") + } + + // Classify and verify. + _, err = hamf.Seek(0, 0) + tcheck(t, err, "seek ham message") + prob, _, _, _, err = f.ClassifyMessageReader(hamf, hamsize) + tcheck(t, err, "classify ham") + if prob > 0.1 { + t.Fatalf("got prob %v, expected <= 0.1", prob) + } + + _, err = spamf.Seek(0, 0) + tcheck(t, err, "seek spam message") + prob, _, _, _, err = f.ClassifyMessageReader(spamf, spamsize) + tcheck(t, err, "classify spam") + if prob < 0.9 { + t.Fatalf("got prob %v, expected >= 0.9", prob) + } + + // Untrain ham & spam. + _, err = hamf.Seek(0, 0) + tcheck(t, err, "seek ham message") + err = f.UntrainMessage(hamf, hamsize, true) + tcheck(t, err, "untrain ham message") + _, err = hamf.Seek(0, 0) + tcheck(t, err, "seek ham message") + err = f.UntrainMessage(hamf, spamsize, true) + tcheck(t, err, "untrain ham message") + + _, err = spamf.Seek(0, 0) + tcheck(t, err, "seek spam message") + err = f.UntrainMessage(spamf, spamsize, true) + tcheck(t, err, "untrain spam message") + _, err = spamf.Seek(0, 0) + tcheck(t, err, "seek spam message") + err = f.UntrainMessage(spamf, spamsize, true) + tcheck(t, err, "untrain spam message") + + if !f.modified { + t.Fatalf("filter not modified after untraining") + } + + // Classify again, should be unknown. + _, err = hamf.Seek(0, 0) + tcheck(t, err, "seek ham message") + prob, _, _, _, err = f.ClassifyMessageReader(hamf, hamsize) + tcheck(t, err, "classify ham") + if math.Abs(prob-0.5) > 0.1 { + t.Fatalf("got prob %v, expected 0.5 +-0.1", prob) + } + + _, err = spamf.Seek(0, 0) + tcheck(t, err, "seek spam message") + prob, _, _, _, err = f.ClassifyMessageReader(spamf, spamsize) + tcheck(t, err, "classify spam") + if math.Abs(prob-0.5) > 0.1 { + t.Fatalf("got prob %v, expected 0.5 +-0.1", prob) + } + + err = f.Close() + tcheck(t, err, "close filter") +} diff --git a/junk/parse.go b/junk/parse.go new file mode 100644 index 0000000..521508a --- /dev/null +++ b/junk/parse.go @@ -0,0 +1,323 @@ +package junk + +// see https://en.wikipedia.org/wiki/Naive_Bayes_spam_filtering +// - todo: better html parsing? +// - todo: try reading text in pdf? +// - todo: try to detect language, have words per language? can be in the same dictionary. currently my dictionary is biased towards treating english as spam. + +import ( + "bufio" + "fmt" + "io" + "os" + "strings" + "unicode" + + "golang.org/x/net/html" + + "github.com/mjl-/mox/message" +) + +func (f *Filter) tokenizeMail(path string) (bool, map[string]struct{}, error) { + mf, err := os.Open(path) + if err != nil { + return false, nil, err + } + defer mf.Close() + fi, err := mf.Stat() + if err != nil { + return false, nil, err + } + p, _ := message.EnsurePart(mf, fi.Size()) + words, err := f.ParseMessage(p) + return true, words, err +} + +// ParseMessage reads a mail and returns a map with words. +func (f *Filter) ParseMessage(p message.Part) (map[string]struct{}, error) { + metaWords := map[string]struct{}{} + textWords := map[string]struct{}{} + htmlWords := map[string]struct{}{} + + hdrs, err := p.Header() + if err != nil { + return nil, fmt.Errorf("parsing headers: %v", err) + } + + // Add words from the header, annotated with +":". + // todo: add whether header is dkim-verified? + for k, l := range hdrs { + for _, h := range l { + switch k { + case "From", "To", "Cc", "Bcc", "Reply-To", "Subject", "Sender", "Return-Path": + // case "Subject", "To": + default: + continue + } + words := map[string]struct{}{} + f.tokenizeText(strings.NewReader(h), words) + for w := range words { + if len(w) <= 3 { + continue + } + metaWords[k+":"+w] = struct{}{} + } + } + } + + if err := f.mailParse(p, metaWords, textWords, htmlWords); err != nil { + return nil, fmt.Errorf("parsing message: %w", err) + } + + for w := range metaWords { + textWords[w] = struct{}{} + } + for w := range htmlWords { + textWords[w] = struct{}{} + } + + return textWords, nil +} + +// mailParse looks through the mail for the first text and html parts, and tokenizes their words. +func (f *Filter) mailParse(p message.Part, metaWords, textWords, htmlWords map[string]struct{}) error { + ct := p.MediaType + "/" + p.MediaSubType + + if ct == "TEXT/HTML" { + err := f.tokenizeHTML(p.Reader(), metaWords, htmlWords) + // log.Printf("html parsed, words %v", htmlWords) + return err + } + if ct == "" || strings.HasPrefix(ct, "TEXT/") { + err := f.tokenizeText(p.Reader(), textWords) + // log.Printf("text parsed, words %v", textWords) + return err + } + if p.Message != nil { + // Nested message, happens for forwarding. + if err := p.SetMessageReaderAt(); err != nil { + return fmt.Errorf("setting reader on nested message: %w", err) + } + return f.mailParse(*p.Message, metaWords, textWords, htmlWords) + } + for _, sp := range p.Parts { + if err := f.mailParse(sp, metaWords, textWords, htmlWords); err != nil { + return err + } + } + return nil +} + +func looksRandom(s string) bool { + // Random strings, eg 2fvu9stm9yxhnlu. ASCII only and a many consonants in a stretch. + stretch := 0 + const consonants = "bcdfghjklmnpqrstvwxyzBCDFGHJKLMNPQRSTVWXYZ23456789" // 0 and 1 may be used as o and l/i + stretches := 0 + for _, c := range s { + if c >= 0x80 { + return false + } + if strings.ContainsRune(consonants, c) { + stretch++ + continue + } + if stretch >= 6 { + stretches++ + } + stretch = 0 + } + if stretch >= 6 { + stretches++ + } + return stretches > 0 +} + +func looksNumeric(s string) bool { + s = strings.TrimPrefix(s, "0x") // Hexadecimal. + var digits, hex, other, digitstretch, maxdigitstretch int + for _, c := range s { + if c >= '0' && c <= '9' { + digits++ + digitstretch++ + continue + } else if c >= 'a' && c <= 'f' || c >= 'A' && c <= 'F' { + hex++ + } else { + other++ + } + if digitstretch > maxdigitstretch { + maxdigitstretch = digitstretch + } + } + if digitstretch > maxdigitstretch { + maxdigitstretch = digitstretch + } + return maxdigitstretch >= 4 || other == 0 && maxdigitstretch >= 3 +} + +func (f *Filter) tokenizeText(r io.Reader, words map[string]struct{}) error { + b := &strings.Builder{} + var prev string + var prev2 string + + add := func() { + defer b.Reset() + if b.Len() <= 2 { + return + } + + s := b.String() + s = strings.Trim(s, "'") + var nondigit bool + for _, c := range s { + if !unicode.IsDigit(c) { + nondigit = true + break + } + } + + if !(nondigit && len(s) > 2) { + return + } + + if looksRandom(s) { + return + } + if looksNumeric(s) { + return + } + + // todo: do something for URLs, parse them? keep their domain only? + + if f.Threegrams && prev2 != "" && prev != "" { + words[prev2+" "+prev+" "+s] = struct{}{} + } + if f.Twograms && prev != "" { + words[prev+" "+s] = struct{}{} + } + if f.Onegrams { + words[s] = struct{}{} + } + prev2 = prev + prev = s + } + + br := bufio.NewReader(r) + + peekLetter := func() bool { + c, _, err := br.ReadRune() + br.UnreadRune() + return err == nil && unicode.IsLetter(c) + } + + for { + c, _, err := br.ReadRune() + if err == io.EOF { + break + } + if err != nil { + return err + } + if !unicode.IsLetter(c) && !unicode.IsDigit(c) && (c != '\'' || b.Len() > 0 && peekLetter()) { + add() + } else { + b.WriteRune(unicode.ToLower(c)) + } + } + add() + return nil +} + +// tokenizeHTML parses html, and tokenizes its text into words. +func (f *Filter) tokenizeHTML(r io.Reader, meta, words map[string]struct{}) error { + htmlReader := &htmlTextReader{ + t: html.NewTokenizer(r), + meta: map[string]struct{}{}, + } + return f.tokenizeText(htmlReader, words) +} + +type htmlTextReader struct { + t *html.Tokenizer + meta map[string]struct{} + tagStack []string + buf []byte + err error +} + +func (r *htmlTextReader) Read(buf []byte) (n int, err error) { + // todo: deal with invalid html better. the tokenizer is just tokenizing, we need to fix up the nesting etc. eg, rules say some elements close certain open elements. + // todo: deal with inline elements? they shouldn't cause a word break. + + give := func(nbuf []byte) (int, error) { + n := len(buf) + if n > len(nbuf) { + n = len(nbuf) + } + copy(buf, nbuf[:n]) + nbuf = nbuf[n:] + if len(nbuf) < cap(r.buf) { + r.buf = r.buf[:len(nbuf)] + } else { + r.buf = make([]byte, len(nbuf), 3*len(nbuf)/2) + } + copy(r.buf, nbuf) + return n, nil + } + + if len(r.buf) > 0 { + return give(r.buf) + } + if r.err != nil { + return 0, r.err + } + + for { + switch r.t.Next() { + case html.ErrorToken: + r.err = r.t.Err() + return 0, r.err + case html.TextToken: + if len(r.tagStack) > 0 { + switch r.tagStack[len(r.tagStack)-1] { + case "script", "style", "svg": + continue + } + } + buf := r.t.Text() + if len(buf) > 0 { + return give(buf) + } + case html.StartTagToken: + tagBuf, moreAttr := r.t.TagName() + tag := string(tagBuf) + //log.Printf("tag %q %v", tag, r.tagStack) + + if tag == "img" && moreAttr { + var key, val []byte + for moreAttr { + key, val, moreAttr = r.t.TagAttr() + if string(key) == "alt" && len(val) > 0 { + return give(val) + } + } + } + + // Empty elements, https://developer.mozilla.org/en-US/docs/Glossary/Empty_element + switch tag { + case "area", "base", "br", "col", "embed", "hr", "img", "input", "link", "meta", "param", "source", "track", "wbr": + continue + } + + r.tagStack = append(r.tagStack, tag) + case html.EndTagToken: + // log.Printf("tag pop %v", r.tagStack) + if len(r.tagStack) > 0 { + r.tagStack = r.tagStack[:len(r.tagStack)-1] + } + case html.SelfClosingTagToken: + case html.CommentToken: + case html.DoctypeToken: + } + } +} diff --git a/junk/parse_test.go b/junk/parse_test.go new file mode 100644 index 0000000..1acab60 --- /dev/null +++ b/junk/parse_test.go @@ -0,0 +1,33 @@ +package junk + +import ( + "os" + "testing" +) + +func FuzzParseMessage(f *testing.F) { + f.Add("") + add := func(p string) { + buf, err := os.ReadFile(p) + if err != nil { + f.Fatalf("reading file %q: %v", p, err) + } + f.Add(string(buf)) + } + add("../testdata/junk/parse.eml") + add("../testdata/junk/parse2.eml") + add("../testdata/junk/parse3.eml") + + dbPath := "../testdata/junk/parse.db" + bloomPath := "../testdata/junk/parse.bloom" + os.Remove(dbPath) + os.Remove(bloomPath) + params := Params{Twograms: true} + jf, err := NewFilter(xlog, params, dbPath, bloomPath) + if err != nil { + f.Fatalf("new filter: %v", err) + } + f.Fuzz(func(t *testing.T, s string) { + jf.tokenizeMail(s) + }) +} diff --git a/main.go b/main.go new file mode 100644 index 0000000..0df4b73 --- /dev/null +++ b/main.go @@ -0,0 +1,1908 @@ +package main + +import ( + "bufio" + "bytes" + "context" + "crypto/ed25519" + "crypto/rsa" + "crypto/tls" + "crypto/x509" + "encoding/base64" + "encoding/json" + "encoding/pem" + "flag" + "fmt" + "io" + "log" + "net" + "net/mail" + "os" + "path/filepath" + "strings" + "time" + + "golang.org/x/crypto/bcrypt" + + "github.com/mjl-/bstore" + "github.com/mjl-/sconf" + "github.com/mjl-/sherpa" + + "github.com/mjl-/mox/config" + "github.com/mjl-/mox/dkim" + "github.com/mjl-/mox/dmarc" + "github.com/mjl-/mox/dmarcdb" + "github.com/mjl-/mox/dmarcrpt" + "github.com/mjl-/mox/dns" + "github.com/mjl-/mox/dnsbl" + "github.com/mjl-/mox/http" + "github.com/mjl-/mox/message" + "github.com/mjl-/mox/mlog" + "github.com/mjl-/mox/mox-" + "github.com/mjl-/mox/moxvar" + "github.com/mjl-/mox/mtasts" + "github.com/mjl-/mox/smtp" + "github.com/mjl-/mox/smtpclient" + "github.com/mjl-/mox/spf" + "github.com/mjl-/mox/store" + "github.com/mjl-/mox/tlsrpt" + "github.com/mjl-/mox/tlsrptdb" + "github.com/mjl-/mox/updates" +) + +var ( + changelogDomain = "xmox.nl" + changelogURL = "https://updates.xmox.nl/changelog" + changelogPubKey = base64Decode("sPNiTDQzvb4FrytNEiebJhgyQzn57RwEjNbGWMM/bDY=") +) + +func base64Decode(s string) []byte { + buf, err := base64.StdEncoding.DecodeString(s) + if err != nil { + panic(err) + } + return buf +} + +func envString(k, def string) string { + s := os.Getenv(k) + if s == "" { + return def + } + return s +} + +var commands = []struct { + cmd string + fn func(c *cmd) +}{ + {"serve", cmdServe}, + {"quickstart", cmdQuickstart}, + {"restart", cmdRestart}, + {"stop", cmdStop}, + {"setaccountpassword", cmdSetaccountpassword}, + {"setadminpassword", cmdSetadminpassword}, + {"loglevels", cmdLoglevels}, + {"queue list", cmdQueueList}, + {"queue kick", cmdQueueKick}, + {"queue drop", cmdQueueDrop}, + {"queue dump", cmdQueueDump}, + {"import maildir", cmdImportMaildir}, + {"import mbox", cmdImportMbox}, + {"export maildir", cmdExportMaildir}, + {"export mbox", cmdExportMbox}, + {"help", cmdHelp}, + + {"config test", cmdConfigTest}, + {"config dnscheck", cmdConfigDNSCheck}, + {"config dnsrecords", cmdConfigDNSRecords}, + {"config describe-domains", cmdConfigDescribeDomains}, + {"config describe-static", cmdConfigDescribeStatic}, + {"config account add", cmdConfigAccountAdd}, + {"config account rm", cmdConfigAccountRemove}, + {"config address add", cmdConfigAddressAdd}, + {"config address rm", cmdConfigAddressRemove}, + {"config domain add", cmdConfigDomainAdd}, + {"config domain rm", cmdConfigDomainRemove}, + + {"checkupdate", cmdCheckupdate}, + {"cid", cmdCid}, + {"clientconfig", cmdClientConfig}, + {"deliver", cmdDeliver}, + {"dkim gened25519", cmdDKIMGened25519}, + {"dkim genrsa", cmdDKIMGenrsa}, + {"dkim lookup", cmdDKIMLookup}, + {"dkim txt", cmdDKIMTXT}, + {"dkim verify", cmdDKIMVerify}, + {"dmarc lookup", cmdDMARCLookup}, + {"dmarc parsereportmsg", cmdDMARCParsereportmsg}, + {"dmarc verify", cmdDMARCVerify}, + {"dnsbl check", cmdDNSBLCheck}, + {"dnsbl checkhealth", cmdDNSBLCheckhealth}, + {"mtasts lookup", cmdMTASTSLookup}, + {"sendmail", cmdSendmail}, + {"spf check", cmdSPFCheck}, + {"spf lookup", cmdSPFLookup}, + {"spf parse", cmdSPFParse}, + {"tlsrpt lookup", cmdTLSRPTLookup}, + {"tlsrpt parsereportmsg", cmdTLSRPTParsereportmsg}, + {"version", cmdVersion}, + + // Not listed. + {"helpall", cmdHelpall}, + {"junk analyze", cmdJunkAnalyze}, + {"junk check", cmdJunkCheck}, + {"junk play", cmdJunkPlay}, + {"junk test", cmdJunkTest}, + {"junk train", cmdJunkTrain}, + {"bumpuidvalidity", cmdBumpUIDValidity}, + {"dmarcdb addreport", cmdDMARCDBAddReport}, + {"ensureparsed", cmdEnsureParsed}, + {"tlsrptdb addreport", cmdTLSRPTDBAddReport}, + {"updates addsigned", cmdUpdatesAddSigned}, + {"updates genkey", cmdUpdatesGenkey}, + {"updates pubkey", cmdUpdatesPubkey}, + {"updates verify", cmdUpdatesVerify}, +} + +var cmds []cmd + +func init() { + for _, xc := range commands { + c := cmd{words: strings.Split(xc.cmd, " "), fn: xc.fn} + cmds = append(cmds, c) + } +} + +type cmd struct { + words []string + fn func(c *cmd) + + // Set before calling command. + flag *flag.FlagSet + flagArgs []string + _gather bool // Set when using Parse to gather usage for a command. + + // Set by invoked command or Parse. + unlisted bool // If set, command is not listed until at least some words are matched from command. + params string // Arguments to command. Mutliple lines possible. + help string // Additional explanation. First line is synopsis, the rest is only printed for an explicit help/usage for that command. + args []string +} + +func (c *cmd) Parse() []string { + // To gather params and usage information, we just run the command but cause this + // panic after the command has registered its flags and set its params and help + // information. This is then caught and that info printed. + if c._gather { + panic("gather") + } + + c.flag.Usage = c.Usage + c.flag.Parse(c.flagArgs) + c.args = c.flag.Args() + return c.args +} + +func (c *cmd) gather() { + c.flag = flag.NewFlagSet("mox "+strings.Join(c.words, " "), flag.ExitOnError) + c._gather = true + defer func() { + x := recover() + // panic generated by Parse. + if x != "gather" { + panic(x) + } + }() + c.fn(c) +} + +func (c *cmd) makeUsage() string { + var r strings.Builder + cs := "mox " + strings.Join(c.words, " ") + for i, line := range strings.Split(strings.TrimSpace(c.params), "\n") { + s := "" + if i == 0 { + s = "usage:" + } + if line != "" { + line = " " + line + } + fmt.Fprintf(&r, "%6s %s%s\n", s, cs, line) + } + c.flag.SetOutput(&r) + c.flag.PrintDefaults() + return r.String() +} + +func (c *cmd) printUsage() { + fmt.Fprint(os.Stderr, c.makeUsage()) + if c.help != "" { + fmt.Fprint(os.Stderr, "\n"+c.help+"\n") + } +} + +func (c *cmd) Usage() { + c.printUsage() + os.Exit(2) +} + +func cmdHelp(c *cmd) { + c.params = "[command ...]" + c.help = `Prints help about matching commands. + +If multiple commands match, they are listed along with the first line of their help text. +If a single command matches, its usage and full help text is printed. +` + args := c.Parse() + if len(args) == 0 { + c.Usage() + } + + equal := func(a, b []string) bool { + if len(a) != len(b) { + return false + } + for i := range a { + if a[i] != b[i] { + return false + } + } + return true + } + + prefix := func(l, pre []string) bool { + if len(pre) > len(l) { + return false + } + return equal(pre, l[:len(pre)]) + } + + var partial []cmd + for _, c := range cmds { + if equal(c.words, args) { + c.gather() + fmt.Print(c.makeUsage()) + if c.help != "" { + fmt.Print("\n" + c.help + "\n") + } + return + } else if prefix(c.words, args) { + partial = append(partial, c) + } + } + if len(partial) == 0 { + fmt.Fprintf(os.Stderr, "%s: unknown command\n", strings.Join(args, " ")) + os.Exit(2) + } + for _, c := range partial { + c.gather() + line := "mox " + strings.Join(c.words, " ") + fmt.Printf("%s\n", line) + if c.help != "" { + fmt.Printf("\t%s\n", strings.Split(c.help, "\n")[0]) + } + } +} + +func cmdHelpall(c *cmd) { + c.unlisted = true + c.help = `Print all detailed usage and help information for all listed commands. + +Used to generate documentation. +` + args := c.Parse() + if len(args) != 0 { + c.Usage() + } + + n := 0 + for _, c := range cmds { + c.gather() + if c.unlisted { + continue + } + if n > 0 { + fmt.Fprintf(os.Stderr, "\n") + } + n++ + + fmt.Fprintf(os.Stderr, "# mox %s\n\n", strings.Join(c.words, " ")) + if c.help != "" { + fmt.Println(c.help + "\n") + } + s := c.makeUsage() + s = "\t" + strings.ReplaceAll(s, "\n", "\n\t") + fmt.Println(s) + } +} + +func usage(l []cmd, unlisted bool) { + var lines []string + if !unlisted { + lines = append(lines, "mox [-config mox.conf] ...") + } + for _, c := range l { + c.gather() + if c.unlisted && !unlisted { + continue + } + for _, line := range strings.Split(c.params, "\n") { + x := append([]string{"mox"}, c.words...) + if line != "" { + x = append(x, line) + } + lines = append(lines, strings.Join(x, " ")) + } + } + for i, line := range lines { + pre := " " + if i == 0 { + pre = "usage: " + } + fmt.Fprintln(os.Stderr, pre+line) + } + os.Exit(2) +} + +func main() { + log.SetFlags(0) + + // If invoked as sendmail, e.g. /usr/sbin/sendmail, we do enough so cron can get a + // message sent using smtp submission to a configured server. + if len(os.Args) > 0 && filepath.Base(os.Args[0]) == "sendmail" { + cmdSendmail(&cmd{flagArgs: os.Args[1:]}) + return + } + + var loglevel string + flag.StringVar(&mox.ConfigStaticPath, "config", envString("MOXCONF", "mox.conf"), "configuration file, other config files are looked up in the same directory, defaults to $MOXCONF with a fallback to mox.conf") + flag.StringVar(&loglevel, "loglevel", "", "if non-empty, this debug level is set early in startup") + + flag.Usage = func() { usage(cmds, false) } + flag.Parse() + args := flag.Args() + if len(args) == 0 { + usage(cmds, false) + } + + mox.ConfigDynamicPath = filepath.Join(filepath.Dir(mox.ConfigStaticPath), "domains.conf") + if level, ok := mlog.Levels[loglevel]; ok && loglevel != "" { + mox.Conf.Log[""] = level + mlog.SetConfig(mox.Conf.Log) + } + + var partial []cmd +next: + for _, c := range cmds { + for i, w := range c.words { + if i >= len(args) || w != args[i] { + if i > 0 { + partial = append(partial, c) + } + continue next + } + } + c.flag = flag.NewFlagSet("mox "+strings.Join(c.words, " "), flag.ExitOnError) + c.flagArgs = args[len(c.words):] + c.fn(&c) + return + } + if len(partial) > 0 { + usage(partial, true) + } + usage(cmds, false) +} + +func xcheckf(err error, format string, args ...any) { + if err == nil { + return + } + msg := fmt.Sprintf(format, args...) + log.Fatalf("%s: %s", msg, err) +} + +func xparseIP(s, what string) net.IP { + ip := net.ParseIP(s) + if ip == nil { + log.Fatalf("invalid %s: %q", what, s) + } + return ip +} + +func xparseDomain(s, what string) dns.Domain { + d, err := dns.ParseDomain(s) + xcheckf(err, "parsing %s %q", what, s) + return d +} + +func cmdClientConfig(c *cmd) { + c.params = "domain" + c.help = `Print the configuration for email clients for a domain. + +Sending email is typically not done on the SMTP port 25, but on submission +ports 465 (with TLS) and 587 (without initial TLS, but usually added to the +connection with STARTTLS). For IMAP, the port with TLS is 993 and without is +143. + +Without TLS/STARTTLS, passwords are sent in clear text, which should only be +configured over otherwise secured connections, like a VPN. +` + args := c.Parse() + if len(args) != 1 { + c.Usage() + } + d := xparseDomain(args[0], "domain") + mox.MustLoadConfig() + printClientConfig(d) +} + +func printClientConfig(d dns.Domain) { + cc, err := mox.ClientConfigDomain(d) + xcheckf(err, "getting client config") + fmt.Printf("%-20s %-30s %5s %-15s %s\n", "Protocol", "Host", "Port", "Listener", "Note") + for _, e := range cc.Entries { + fmt.Printf("%-20s %-30s %5d %-15s %s\n", e.Protocol, e.Host, e.Port, e.Listener, e.Note) + } +} + +func cmdConfigTest(c *cmd) { + c.help = `Parses and validates the configuration files. + +If valid, the command exits with status 0. If not valid, all errors encountered +are printed. +` + args := c.Parse() + if len(args) != 0 { + c.Usage() + } + + _, errs := mox.ParseConfig(context.Background(), mox.ConfigStaticPath, true) + if len(errs) > 1 { + log.Printf("multiple errors:") + for _, err := range errs { + log.Printf("%s", err) + } + os.Exit(1) + } else if len(errs) == 1 { + log.Fatalf("%s", errs[0]) + os.Exit(1) + } + fmt.Println("config OK") +} + +func cmdConfigDescribeStatic(c *cmd) { + c.params = ">mox.conf" + c.help = `Prints an annotated empty configuration for use as mox.conf. + +The static configuration file cannot be reloaded while mox is running. Mox has +to be restarted for changes to the static configuration file to take effect. + +This configuration file needs modifications to make it valid. For example, it +may contain unfinished list items. +` + if len(c.Parse()) != 0 { + c.Usage() + } + + var sc config.Static + err := sconf.Describe(os.Stdout, &sc) + xcheckf(err, "describing config") +} + +func cmdConfigDescribeDomains(c *cmd) { + c.params = ">domains.conf" + c.help = `Prints an annotated empty configuration for use as domains.conf. + +The domains configuration file contains the domains and their configuration, +and accounts and their configuration. This includes the configured email +addresses. The mox admin web interface, and the mox command line interface, can +make changes to this file. Mox automatically reloads this file when it changes. + +Like the static configuration, the example domains.conf printed by this command +needs modifications to make it valid. +` + if len(c.Parse()) != 0 { + c.Usage() + } + + var dc config.Dynamic + err := sconf.Describe(os.Stdout, &dc) + xcheckf(err, "describing config") +} + +func cmdConfigDomainAdd(c *cmd) { + c.params = "domain account [localpart]" + c.help = `Adds a new domain to the configuration and reloads the configuration. + +The account is used for the postmaster mailboxes the domain, including as DMARC and +TLS reporting. Localpart is the "username" at the domain for this account. If +must be set if and only if account does not yet exist. +` + args := c.Parse() + if len(args) != 2 && len(args) != 3 { + c.Usage() + } + + d := xparseDomain(args[0], "domain") + mox.MustLoadConfig() + + if len(args) == 2 { + args = append(args, "") + } + ctl := xctl() + ctl.xwrite("domainadd") + for _, s := range args { + ctl.xwrite(s) + } + ctl.xreadok() + fmt.Printf("domain added, remember to add dns records, see:\n\nmox config dnsrecords %s\nmox config dnscheck %s\n", d.Name(), d.Name()) +} + +func cmdConfigDomainRemove(c *cmd) { + c.params = "domain" + c.help = `Remove a domain from the configuration and reload the configuration. + +This is a dangerous operation. Incoming email delivery for this domain will be +rejected. +` + args := c.Parse() + if len(args) != 1 { + c.Usage() + } + + d := xparseDomain(args[0], "domain") + mox.MustLoadConfig() + ctl := xctl() + ctl.xwrite("domainrm") + ctl.xwrite(args[0]) + ctl.xreadok() + fmt.Printf("domain removed, remember to remove dns records for %s\n", d) +} + +func cmdConfigAccountAdd(c *cmd) { + c.params = "account address" + c.help = `Add an account with an email address and reload the configuration. + +Email can be delivered to this address/account. A password has to be configured +explicitly, see the setaccountpassword command. +` + args := c.Parse() + if len(args) != 2 { + c.Usage() + } + + mox.MustLoadConfig() + ctl := xctl() + ctl.xwrite("accountadd") + for _, s := range args { + ctl.xwrite(s) + } + ctl.xreadok() + fmt.Printf("account added, set a password with \"mox setaccountpassword %s\"\n", args[1]) +} + +func cmdConfigAccountRemove(c *cmd) { + c.params = "account" + c.help = `Remove an account and reload the configuration. + +Email addresses for this account will also be removed, and incoming email for +these addresses will be rejected. +` + args := c.Parse() + if len(args) != 1 { + c.Usage() + } + + mox.MustLoadConfig() + ctl := xctl() + ctl.xwrite("accountrm") + ctl.xwrite(args[0]) + ctl.xreadok() + fmt.Println("account removed") +} + +func cmdConfigAddressAdd(c *cmd) { + c.params = "address account" + c.help = "Adds an address to an account and reloads the configuration." + args := c.Parse() + if len(args) != 2 { + c.Usage() + } + + mox.MustLoadConfig() + ctl := xctl() + ctl.xwrite("addressadd") + for _, s := range args { + ctl.xwrite(s) + } + ctl.xreadok() + fmt.Println("address added") +} + +func cmdConfigAddressRemove(c *cmd) { + c.params = "address" + c.help = `Remove an address and reload the configuration. + +Incoming email for this address will be rejected. +` + args := c.Parse() + if len(args) != 1 { + c.Usage() + } + + mox.MustLoadConfig() + ctl := xctl() + ctl.xwrite("addressrm") + ctl.xwrite(args[0]) + ctl.xreadok() + fmt.Println("address removed") +} + +func cmdConfigDNSRecords(c *cmd) { + c.params = "domain" + c.help = `Prints annotated DNS records as zone file that should be created for the domain. + +The zone file can be imported into existing DNS software. You should review the +DNS records, especially if your domain previously/currently has email +configured. +` + args := c.Parse() + if len(args) != 1 { + c.Usage() + } + + d := xparseDomain(args[0], "domain") + mox.MustLoadConfig() + domConf, ok := mox.Conf.Domain(d) + if !ok { + log.Fatalf("unknown domain") + } + records, err := mox.DomainRecords(domConf, d) + xcheckf(err, "records") + fmt.Print(strings.Join(records, "\n") + "\n") +} + +func cmdConfigDNSCheck(c *cmd) { + c.params = "domain" + c.help = "Check the DNS records with the configuration for the domain, and print any errors/warnings." + args := c.Parse() + if len(args) != 1 { + c.Usage() + } + + d := xparseDomain(args[0], "domain") + mox.MustLoadConfig() + _, ok := mox.Conf.Domain(d) + if !ok { + log.Fatalf("unknown domain") + } + + // todo future: move http.Admin.CheckDomain to mox- and make it return a regular error. + defer func() { + x := recover() + if x == nil { + return + } + err, ok := x.(*sherpa.Error) + if !ok { + panic(x) + } + log.Fatalf("%s", err) + }() + + printResult := func(name string, r http.Result) { + if len(r.Errors) == 0 && len(r.Warnings) == 0 { + return + } + fmt.Printf("# %s\n", name) + for _, s := range r.Errors { + fmt.Printf("error: %s\n", s) + } + for _, s := range r.Warnings { + fmt.Printf("warning: %s\n", s) + } + } + + result := http.Admin{}.CheckDomain(context.Background(), args[0]) + printResult("MX", result.MX.Result) + printResult("TLS", result.TLS.Result) + printResult("SPF", result.SPF.Result) + printResult("DKIM", result.DKIM.Result) + printResult("DMARC", result.DMARC.Result) + printResult("TLSRPT", result.TLSRPT.Result) + printResult("MTASTS", result.MTASTS.Result) + printResult("SRVConf", result.SRVConf.Result) + printResult("Autoconf", result.Autoconf.Result) + printResult("Autodiscover", result.Autodiscover.Result) +} + +func cmdLoglevels(c *cmd) { + c.params = "[level [pkg]]" + c.help = `Print the log levels, or set a new default log level, or a level for the given package. + +By default, a single log level applies to all logging in mox. But for each +"pkg", an overriding log level can be configured. Examples of packages: +smtpserver, smtpclient, queue, imapserver, spf, dkim, dmarc, junk, message, +etc. + +Valid labels: error, info, debug, trace. +` + args := c.Parse() + if len(args) > 2 { + c.Usage() + } + mox.MustLoadConfig() + + if len(args) == 0 { + ctl := xctl() + ctl.xwrite("loglevels") + ctl.xreadok() + ctl.xstreamto(os.Stdout) + return + } + + ctl := xctl() + ctl.xwrite("setloglevels") + if len(args) == 2 { + ctl.xwrite(args[1]) + } else { + ctl.xwrite("") + } + ctl.xwrite(args[0]) + ctl.xreadok() +} + +func cmdStop(c *cmd) { + c.help = `Shut mox down, giving connections maximum 3 seconds to stop before closing them. + +While shutting down, new IMAP and SMTP connections will get a status response +indicating temporary unavailability. Existing connections will get a 3 second +period to finish their transaction and shut down. Under normal circumstances, +only IMAP has long-living connections, with the IDLE command to get notified of +new mail deliveries. +` + if len(c.Parse()) != 0 { + c.Usage() + } + mox.MustLoadConfig() + + ctl := xctl() + ctl.xwrite("stop") + // Read will hang until remote has shut down. + buf := make([]byte, 128) + n, err := ctl.conn.Read(buf) + if err == nil { + log.Fatalf("expected eof after graceful shutdown, got data %q", buf[:n]) + } else if err != io.EOF { + log.Fatalf("expected eof after graceful shutdown, got error %v", err) + } + fmt.Println("mox stopped") +} + +func cmdRestart(c *cmd) { + c.help = `Restart mox after validating the configuration file. + +Restart execs the mox binary, which have been updated. Restart returns after +the restart has finished. If you update the mox binary, keep in mind that the +validation of the configuration file is done by the old process with the old +binary. The new binary may report a syntax error. If you update the binary, you +should use the "config test" command with the new binary to validate the +configuration file. + +Like stop, existing connections get a 3 second period for graceful shutdown. +` + if len(c.Parse()) != 0 { + c.Usage() + } + mox.MustLoadConfig() + + ctl := xctl() + ctl.xwrite("restart") + line := ctl.xread() + if line != "ok" { + log.Fatalf("restart failed: %s", line) + } + // Server is now restarting. It will write ok when it is back online again. If it fails, our connection will be closed. + buf := make([]byte, 128) + n, err := ctl.conn.Read(buf) + if err != nil { + log.Fatalf("restart failed: %s", err) + } + s := strings.TrimSuffix(string(buf[:n]), "\n") + if s != "ok" { + log.Fatalf("restart failed: %s", s) + } + fmt.Println("mox restarted") +} + +func cmdSetadminpassword(c *cmd) { + c.help = `Set a new admin password, for the web interface. + +The password is read from stdin. Its bcrypt hash is stored in a file named +"adminpasswd" in the configuration directory. +` + if len(c.Parse()) != 0 { + c.Usage() + } + mox.MustLoadConfig() + + path := mox.ConfigDirPath(mox.Conf.Static.AdminPasswordFile) + if path == "" { + log.Fatal("no admin password file configured") + } + + pw := xreadpassword() + hash, err := bcrypt.GenerateFromPassword([]byte(pw), bcrypt.DefaultCost) + xcheckf(err, "generating hash for password") + err = os.WriteFile(path, hash, 0660) + xcheckf(err, "writing hash to admin password file") +} + +func xreadpassword() string { + fmt.Println("Type new password. Password WILL echo.") + fmt.Printf("password: ") + buf := make([]byte, 64) + n, err := os.Stdin.Read(buf) + xcheckf(err, "reading stdin") + pw := string(buf[:n]) + pw = strings.TrimSuffix(strings.TrimSuffix(pw, "\r\n"), "\n") + if len(pw) < 8 { + log.Fatal("password must be at least 8 characters") + } + return pw +} + +func cmdSetaccountpassword(c *cmd) { + c.params = "address" + c.help = `Set new password an account. + +The password is read from stdin. Its bcrypt hash and SCRAM-SHA-256 derivations +are stored in the accounts database. + +Any email address configured for the account can be used. +` + args := c.Parse() + if len(args) != 1 { + c.Usage() + } + mox.MustLoadConfig() + + acc, _, err := store.OpenEmail(args[0]) + xcheckf(err, "open account") + + pw := xreadpassword() + + err = acc.SetPassword(pw) + xcheckf(err, "setting password") + err = acc.Close() + xcheckf(err, "closing account") +} + +func cmdDeliver(c *cmd) { + c.unlisted = true + c.params = "address < message" + c.help = "Deliver message to address." + args := c.Parse() + if len(args) != 1 { + c.Usage() + } + mox.MustLoadConfig() + + ctl := xctl() + ctl.xwrite("deliver") + ctl.xwrite(args[0]) + ctl.xreadok() + ctl.xstreamfrom(os.Stdin) + line := ctl.xread() + if line == "ok" { + fmt.Println("message delivered") + } else { + log.Fatalf("deliver: %s", line) + } +} + +func cmdQueueList(c *cmd) { + c.help = `List messages in the delivery queue. + +This prints the message with its ID, last and next delivery attempts, last +error. +` + if len(c.Parse()) != 0 { + c.Usage() + } + mox.MustLoadConfig() + + ctl := xctl() + ctl.xwrite("queue") + ctl.xreadok() + if _, err := io.Copy(os.Stdout, ctl.reader()); err != nil { + log.Fatalf("%s", err) + } +} + +func cmdQueueKick(c *cmd) { + c.params = "[-id id] [-todomain domain] [-recipient address]" + c.help = `Schedule matching messages in the queue for immediate delivery. + +Messages deliveries are normally attempted with exponential backoff. The first +retry after 7.5 minutes, and doubling each time. Kicking messages sets their +next scheduled attempt to now, it can cause delivery to fail earlier than +without rescheduling. +` + var id int64 + var todomain, recipient string + c.flag.Int64Var(&id, "id", 0, "id of message in queue") + c.flag.StringVar(&todomain, "todomain", "", "destination domain of messages") + c.flag.StringVar(&recipient, "recipient", "", "recipient email address") + if len(c.Parse()) != 0 { + c.Usage() + } + mox.MustLoadConfig() + + ctl := xctl() + ctl.xwrite("queuekick") + ctl.xwrite(fmt.Sprintf("%d", id)) + ctl.xwrite(todomain) + ctl.xwrite(recipient) + count := ctl.xread() + line := ctl.xread() + if line == "ok" { + fmt.Printf("%s messages scheduled\n", count) + } else { + log.Fatalf("scheduling messages for immediate delivery: %s", line) + } +} + +func cmdQueueDrop(c *cmd) { + c.params = "[-id id] [-todomain domain] [-recipient address]" + c.help = `Remove matching messages from the queue. + +Dangerous operation, this completely removes the message. If you want to store +the message, use "queue dump" before removing. +` + var id int64 + var todomain, recipient string + c.flag.Int64Var(&id, "id", 0, "id of message in queue") + c.flag.StringVar(&todomain, "todomain", "", "destination domain of messages") + c.flag.StringVar(&recipient, "recipient", "", "recipient email address") + if len(c.Parse()) != 0 { + c.Usage() + } + mox.MustLoadConfig() + + ctl := xctl() + ctl.xwrite("queuedrop") + ctl.xwrite(fmt.Sprintf("%d", id)) + ctl.xwrite(todomain) + ctl.xwrite(recipient) + count := ctl.xread() + line := ctl.xread() + if line == "ok" { + fmt.Printf("%s messages dropped\n", count) + } else { + log.Fatalf("scheduling messages for immediate delivery: %s", line) + } +} + +func cmdQueueDump(c *cmd) { + c.params = "id" + c.help = `Dump a message from the queue. + +The message is printed to stdout and is in standard internet mail format. +` + args := c.Parse() + if len(args) != 1 { + c.Usage() + } + mox.MustLoadConfig() + + ctl := xctl() + ctl.xwrite("queuedump") + ctl.xwrite(args[0]) + ctl.xreadok() + if _, err := io.Copy(os.Stdout, ctl.reader()); err != nil { + log.Fatalf("%s", err) + } +} + +func cmdDKIMGenrsa(c *cmd) { + c.params = ">$selector._domainkey.$domain.rsakey.pkcs8.pem" + c.help = `Generate a new 2048 bit RSA private key for use with DKIM. + +The generated file is in PEM format, and has a comment it is generated for use +with DKIM, by mox. +` + if len(c.Parse()) != 0 { + c.Usage() + } + + buf, err := mox.MakeDKIMRSAKey(dns.Domain{}, dns.Domain{}) + xcheckf(err, "making rsa private key") + _, err = os.Stdout.Write(buf) + xcheckf(err, "writing rsa private key") +} + +func cmdDKIMGened25519(c *cmd) { + c.params = ">$selector._domainkey.$domain.ed25519key.pkcs8.pem" + c.help = `Generate a new ed25519 key for use with DKIM. + +Ed25519 keys are much smaller than RSA keys of comparable cryptographic +strength. This is convenient because of maximum DNS message sizes. At the time +of writing, not many mail servers appear to support ed25519 DKIM keys though, +so it is recommended to sign messages with both RSA and ed25519 keys. +` + if len(c.Parse()) != 0 { + c.Usage() + } + + buf, err := mox.MakeDKIMEd25519Key(dns.Domain{}, dns.Domain{}) + xcheckf(err, "making dkim ed25519 key") + _, err = os.Stdout.Write(buf) + xcheckf(err, "writing dkim ed25519 key") +} + +func cmdDKIMTXT(c *cmd) { + c.params = "<$selector._domainkey.$domain.key.pkcs8.pem" + c.help = `Print a DKIM DNS TXT record with the public key derived from the private key read from stdin. + +The DNS should be configured as a TXT record at $selector._domainkey.$domain. +` + if len(c.Parse()) != 0 { + c.Usage() + } + + privKey, err := parseDKIMKey(os.Stdin) + xcheckf(err, "reading dkim private key from stdin") + + r := dkim.Record{ + Version: "DKIM1", + Hashes: []string{"sha256"}, + Flags: []string{"s"}, + } + + switch key := privKey.(type) { + case *rsa.PrivateKey: + r.PublicKey = key.Public() + case ed25519.PrivateKey: + r.PublicKey = key.Public() + r.Key = "ed25519" + default: + log.Fatalf("unsupported private key type %T, must be rsa or ed25519", privKey) + } + + record, err := r.Record() + xcheckf(err, "making record") + fmt.Print("._domainkey. IN TXT ") + for record != "" { + s := record + if len(s) > 255 { + s, record = record[:255], record[255:] + } else { + record = "" + } + fmt.Printf(`"%s" `, s) + } + fmt.Println("") +} + +func parseDKIMKey(r io.Reader) (any, error) { + buf, err := io.ReadAll(r) + if err != nil { + return nil, fmt.Errorf("reading pem from stdin: %v", err) + } + b, _ := pem.Decode(buf) + if b == nil { + return nil, fmt.Errorf("decoding pem: %v", err) + } + privKey, err := x509.ParsePKCS8PrivateKey(b.Bytes) + if err != nil { + return nil, fmt.Errorf("parsing private key: %v", err) + } + return privKey, nil +} + +func cmdDKIMVerify(c *cmd) { + c.params = "message" + c.help = `Verify the DKIM signatures in a message and print the results. + +The message is parsed, and the DKIM-Signature headers are validated. Validation +of older messages may fail because the DNS records have been removed or changed +by now, or because the signature header may have specified an expiration time +that was passed. +` + args := c.Parse() + if len(args) != 1 { + c.Usage() + } + + msgf, err := os.Open(args[0]) + xcheckf(err, "open message") + + results, err := dkim.Verify(context.Background(), dns.StrictResolver{}, false, dkim.DefaultPolicy, msgf, true) + xcheckf(err, "dkim verify") + + for _, result := range results { + record, err := result.Record.Record() + if err != nil { + log.Printf("warning: record: %s", err) + } + sigh, err := result.Sig.Header() + xcheckf(err, "packing dkim-signature header") + fmt.Printf("status %q, err %v\nrecord %s\nheader %s\n", result.Status, result.Err, record, sigh) + } +} + +func cmdDKIMLookup(c *cmd) { + c.params = "selector domain" + c.help = "Lookup and print the DKIM record for the selector at the domain." + args := c.Parse() + if len(args) != 2 { + c.Usage() + } + + selector := xparseDomain(args[0], "selector") + domain := xparseDomain(args[1], "domain") + + status, record, txt, err := dkim.Lookup(context.Background(), dns.StrictResolver{}, selector, domain) + if err != nil { + fmt.Printf("error: %s\n", err) + } + if status != dkim.StatusNeutral { + fmt.Printf("status: %s\n", status) + } + if txt != "" { + fmt.Printf("TXT record: %s\n", txt) + } + if record != nil { + fmt.Printf("Record:\n") + pairs := []any{ + "version", record.Version, + "hashes", record.Hashes, + "key", record.Key, + "notes", record.Notes, + "services", record.Services, + "flags", record.Flags, + } + for i := 0; i < len(pairs); i += 2 { + fmt.Printf("\t%s: %v\n", pairs[i], pairs[i+1]) + } + } +} + +func cmdDMARCLookup(c *cmd) { + c.params = "domain" + c.help = "Lookup dmarc policy for domain, a DNS TXT record at _dmarc., validate and print it." + args := c.Parse() + if len(args) != 1 { + c.Usage() + } + + fromdomain := xparseDomain(args[0], "domain") + _, domain, _, txt, err := dmarc.Lookup(context.Background(), dns.StrictResolver{}, fromdomain) + xcheckf(err, "dmarc lookup domain %s", fromdomain) + fmt.Printf("dmarc record at domain %s: %s\n", domain, txt) +} + +func cmdDMARCVerify(c *cmd) { + c.params = "remoteip mailfromaddress helodomain < message" + c.help = `Parse an email message and evaluate it against the DMARC policy of the domain in the From-header. + +mailfromaddress and helodomain are used for SPF validation. If both are empty, +SPF validation is skipped. + +mailfromaddress should be the address used as MAIL FROM in the SMTP session. +For DSN messages, that address may be empty. The helo domain was specified at +the beginning of the SMTP transaction that delivered the message. These values +can be found in message headers. +` + args := c.Parse() + if len(args) != 3 { + c.Usage() + } + + var heloDomain *dns.Domain + + remoteIP := xparseIP(args[0], "remoteip") + + var mailfrom *smtp.Address + if args[1] != "" { + a, err := smtp.ParseAddress(args[1]) + xcheckf(err, "parsing mailfrom address") + mailfrom = &a + } + if args[2] != "" { + d := xparseDomain(args[2], "helo domain") + heloDomain = &d + } + var received *spf.Received + spfStatus := spf.StatusNone + var spfIdentity *dns.Domain + if mailfrom != nil || heloDomain != nil { + spfArgs := spf.Args{ + RemoteIP: remoteIP, + LocalIP: net.ParseIP("127.0.0.1"), + LocalHostname: dns.Domain{ASCII: "localhost"}, + } + if mailfrom != nil { + spfArgs.MailFromLocalpart = mailfrom.Localpart + spfArgs.MailFromDomain = mailfrom.Domain + } + if heloDomain != nil { + spfArgs.HelloDomain = dns.IPDomain{Domain: *heloDomain} + } + rspf, spfDomain, expl, err := spf.Verify(context.Background(), dns.StrictResolver{}, spfArgs) + if err != nil { + log.Printf("spf verify: %v (explanation: %q)", err, expl) + } else { + received = &rspf + spfStatus = received.Result + // todo: should probably potentially do two separate spf validations + if mailfrom != nil { + spfIdentity = &mailfrom.Domain + } else { + spfIdentity = heloDomain + } + fmt.Printf("spf result: %q: %q\n", spfDomain, spfStatus) + } + } + + data, err := io.ReadAll(os.Stdin) + xcheckf(err, "read message") + dmarcFrom, _, err := message.From(bytes.NewReader(data)) + xcheckf(err, "extract dmarc from message") + + const ignoreTestMode = false + dkimResults, err := dkim.Verify(context.Background(), dns.StrictResolver{}, true, func(*dkim.Sig) error { return nil }, bytes.NewReader(data), ignoreTestMode) + xcheckf(err, "dkim verify") + for _, r := range dkimResults { + fmt.Printf("dkim result: %q (err %v)\n", r.Status, r.Err) + } + + _, result := dmarc.Verify(context.Background(), dns.StrictResolver{}, dmarcFrom.Domain, dkimResults, spfStatus, spfIdentity, false) + xcheckf(result.Err, "dmarc verify") + fmt.Printf("dmarc from: %s\ndmarc status: %q\ndmarc reject: %v\ncmarc record: %s\n", dmarcFrom, result.Status, result.Reject, result.Record) +} + +func cmdDMARCParsereportmsg(c *cmd) { + c.params = "message ..." + c.help = `Parse a DMARC report from an email message, and print its extracted details. + +DMARC reports are periodically mailed, if requested in the DMARC DNS record of +a domain. Reports are sent by mail servers that received messages with our +domain in a From header. This may or may not be legatimate email. DMARC reports +contain summaries of evaluations of DMARC and DKIM/SPF, which can help +understand email deliverability problems. +` + args := c.Parse() + if len(args) == 0 { + c.Usage() + } + + for _, arg := range args { + f, err := os.Open(arg) + xcheckf(err, "open %q", arg) + feedback, err := dmarcrpt.ParseMessageReport(f) + xcheckf(err, "parse report in %q", arg) + meta := feedback.ReportMetadata + fmt.Printf("Report: period %s-%s, organisation %q, reportID %q, %s\n", time.Unix(meta.DateRange.Begin, 0).UTC().String(), time.Unix(meta.DateRange.End, 0).UTC().String(), meta.OrgName, meta.ReportID, meta.Email) + if len(meta.Errors) > 0 { + fmt.Printf("Errors:\n") + for _, s := range meta.Errors { + fmt.Printf("\t- %s\n", s) + } + } + pol := feedback.PolicyPublished + fmt.Printf("Policy: domain %q, policy %q, subdomainpolicy %q, dkim %q, spf %q, percentage %d, options %q\n", pol.Domain, pol.Policy, pol.SubdomainPolicy, pol.ADKIM, pol.ASPF, pol.Percentage, pol.ReportingOptions) + for _, record := range feedback.Records { + idents := record.Identifiers + fmt.Printf("\theaderfrom %q, envelopes from %q, to %q\n", idents.HeaderFrom, idents.EnvelopeFrom, idents.EnvelopeTo) + eval := record.Row.PolicyEvaluated + var reasons string + for _, reason := range eval.Reasons { + reasons += "; " + string(reason.Type) + if reason.Comment != "" { + reasons += fmt.Sprintf(": %q", reason.Comment) + } + } + fmt.Printf("\tresult %s: dkim %s, spf %s; sourceIP %s, count %d%s\n", eval.Disposition, eval.DKIM, eval.SPF, record.Row.SourceIP, record.Row.Count, reasons) + for _, dkim := range record.AuthResults.DKIM { + var result string + if dkim.HumanResult != "" { + result = fmt.Sprintf(": %q", dkim.HumanResult) + } + fmt.Printf("\t\tdkim %s; domain %q selector %q%s\n", dkim.Result, dkim.Domain, dkim.Selector, result) + } + for _, spf := range record.AuthResults.SPF { + fmt.Printf("\t\tspf %s; domain %q scope %q\n", spf.Result, spf.Domain, spf.Scope) + } + } + } +} + +func cmdDMARCDBAddReport(c *cmd) { + c.unlisted = true + c.params = "fromdomain < message" + c.help = "Add a DMARC report to the database." + args := c.Parse() + if len(args) != 1 { + c.Usage() + } + + mox.MustLoadConfig() + + fromdomain := xparseDomain(args[0], "domain") + fmt.Fprintln(os.Stderr, "reading report message from stdin") + report, err := dmarcrpt.ParseMessageReport(os.Stdin) + xcheckf(err, "parse message") + err = dmarcdb.AddReport(context.Background(), report, fromdomain) + xcheckf(err, "add dmarc report") +} + +func cmdTLSRPTLookup(c *cmd) { + c.params = "domain" + c.help = `Lookup the TLSRPT record for the domain. + +A TLSRPT record typically contains an email address where reports about TLS +connectivity should be sent. Mail servers attempting delivery to our domain +should attempt to use TLS. TLSRPT lets them report how many connection +successfully used TLS, and how what kind of errors occurred otherwise. +` + args := c.Parse() + if len(args) != 1 { + c.Usage() + } + + d := xparseDomain(args[0], "domain") + _, txt, err := tlsrpt.Lookup(context.Background(), dns.StrictResolver{}, d) + xcheckf(err, "tlsrpt lookup for %s", d) + fmt.Println(txt) +} + +func cmdTLSRPTParsereportmsg(c *cmd) { + c.params = "message ..." + c.help = `Parse and print the TLSRPT in the message. + +The report is printed in formatted JSON. +` + args := c.Parse() + if len(args) == 0 { + c.Usage() + } + + for _, arg := range args { + f, err := os.Open(arg) + xcheckf(err, "open %q", arg) + report, err := tlsrpt.ParseMessage(f) + xcheckf(err, "parse report in %q", arg) + // todo future: only print the highlights? + enc := json.NewEncoder(os.Stdout) + enc.SetIndent("", "\t") + err = enc.Encode(report) + xcheckf(err, "write report") + } +} + +func cmdSPFCheck(c *cmd) { + c.params = "domain ip" + c.help = `Check the status of IP for the policy published in DNS for the domain. + +IPs may be allowed to send for a domain, or disallowed, and several shades in +between. If not allowed, an explanation may be provided by the policy. If so, +the explanation is printed. The SPF mechanism that matched (if any) is also +printed. +` + args := c.Parse() + if len(args) != 2 { + c.Usage() + } + + domain := xparseDomain(args[0], "domain") + + ip := xparseIP(args[1], "ip") + + spfargs := spf.Args{ + RemoteIP: ip, + MailFromLocalpart: "user", + MailFromDomain: domain, + HelloDomain: dns.IPDomain{Domain: domain}, + LocalIP: net.ParseIP("127.0.0.1"), + LocalHostname: dns.Domain{ASCII: "localhost"}, + } + r, _, explanation, err := spf.Verify(context.Background(), dns.StrictResolver{}, spfargs) + if err != nil { + fmt.Printf("error: %s\n", err) + } + if explanation != "" { + fmt.Printf("explanation: %s\n", explanation) + } + fmt.Printf("status: %s\n", r.Result) + if r.Mechanism != "" { + fmt.Printf("mechanism: %s\n", r.Mechanism) + } +} + +func cmdSPFParse(c *cmd) { + c.params = "txtrecord" + c.help = "Parse the record as SPF record. If valid, nothing is printed." + args := c.Parse() + if len(args) != 1 { + c.Usage() + } + + _, _, err := spf.ParseRecord(args[0]) + xcheckf(err, "parsing record") +} + +func cmdSPFLookup(c *cmd) { + c.params = "domain" + c.help = "Lookup the SPF record for the domain and print it." + args := c.Parse() + if len(args) != 1 { + c.Usage() + } + + domain := xparseDomain(args[0], "domain") + _, txt, _, err := spf.Lookup(context.Background(), dns.StrictResolver{}, domain) + xcheckf(err, "spf lookup for %s", domain) + fmt.Println(txt) +} + +func cmdMTASTSLookup(c *cmd) { + c.params = "domain" + c.help = `Lookup the MTASTS record and policy for the domain. + +MTA-STS is a mechanism for a domain to specify if it requires TLS connections +for delivering email. If a domain has a valid MTA-STS DNS TXT record at +_mta-sts. it signals it implements MTA-STS. A policy can then be +fetched at https://mta-sts./.well-known/mta-sts.txt. The policy +specifies the mode (enforce, testing, none), which MX servers support TLS and +should be used, and how long the policy can be cached. +` + args := c.Parse() + if len(args) != 1 { + c.Usage() + } + + domain := xparseDomain(args[0], "domain") + + record, policy, err := mtasts.Get(context.Background(), dns.StrictResolver{}, domain) + if err != nil { + fmt.Printf("error: %s\n", err) + } + if record != nil { + fmt.Printf("DNS TXT record _mta-sts.%s: %s\n", domain.ASCII, record.String()) + } + if policy != nil { + fmt.Println("") + fmt.Printf("policy at https://mta-sts.%s/.well-known/mta-sts.txt:\n", domain.ASCII) + fmt.Printf("%s", policy.String()) + } +} + +func cmdTLSRPTDBAddReport(c *cmd) { + c.unlisted = true + c.params = "< message" + c.help = "Parse a TLS report from the message and add it to the database." + args := c.Parse() + if len(args) != 0 { + c.Usage() + } + + mox.MustLoadConfig() + + // First read message, to get the From-header. Then parse it as TLSRPT. + fmt.Fprintln(os.Stderr, "reading report message from stdin") + buf, err := io.ReadAll(os.Stdin) + xcheckf(err, "reading message") + part, err := message.Parse(bytes.NewReader(buf)) + xcheckf(err, "parsing message") + if part.Envelope == nil || len(part.Envelope.From) != 1 { + log.Fatalf("message must have one From-header") + } + from := part.Envelope.From[0] + domain := xparseDomain(from.Host, "domain") + + report, err := tlsrpt.ParseMessage(bytes.NewReader(buf)) + xcheckf(err, "parsing tls report in message") + + mailfrom := from.User + "@" + from.Host // todo future: should escape and such + err = tlsrptdb.AddReport(context.Background(), domain, mailfrom, report) + xcheckf(err, "add tls report to database") +} + +func cmdDNSBLCheck(c *cmd) { + c.params = "zone ip" + c.help = `Test if IP is in the DNS blocklist of the zone, e.g. bl.spamcop.net. + +If the IP is in the blocklist, an explanation is printed. This is typically a +URL with more information. +` + args := c.Parse() + if len(args) != 2 { + c.Usage() + } + + zone := xparseDomain(args[0], "zone") + ip := xparseIP(args[1], "ip") + + status, explanation, err := dnsbl.Lookup(context.Background(), dns.StrictResolver{}, zone, ip) + fmt.Printf("status: %s\n", status) + if status == dnsbl.StatusFail { + fmt.Printf("explanation: %q\n", explanation) + } + if err != nil { + fmt.Printf("error: %s\n", err) + } +} + +func cmdDNSBLCheckhealth(c *cmd) { + c.params = "zone" + c.help = `Check the health of the DNS blocklist represented by zone, e.g. bl.spamcop.net. + +The health of a DNS blocklist can be checked by querying for 127.0.0.1 and +127.0.0.2. The second must and the first must not be present. +` + args := c.Parse() + if len(args) != 1 { + c.Usage() + } + + zone := xparseDomain(args[0], "zone") + err := dnsbl.CheckHealth(context.Background(), dns.StrictResolver{}, zone) + xcheckf(err, "unhealthy") + fmt.Println("healthy") +} + +func cmdCheckupdate(c *cmd) { + c.help = `Check if a newer version of mox is available. + +A single DNS TXT lookup to _updates.xmox.nl tells if a new version is +available. If so, a changelog is fetched from https://updates.xmox.nl, and the +individual entries validated with a builtin public key. The changelog is +printed. +` + if len(c.Parse()) != 0 { + c.Usage() + } + mox.MustLoadConfig() + + current, lastknown, _, err := mox.LastKnown() + if err != nil { + log.Printf("getting last known version: %s", err) + } else { + fmt.Printf("last known version: %s\n", lastknown) + fmt.Printf("current version: %s\n", current) + } + latest, _, err := updates.Lookup(context.Background(), dns.StrictResolver{}, dns.Domain{ASCII: changelogDomain}) + xcheckf(err, "lookup of latest version") + fmt.Printf("latest version: %s\n", latest) + + if latest.After(current) { + changelog, err := updates.FetchChangelog(context.Background(), changelogURL, current, changelogPubKey) + xcheckf(err, "fetching changelog") + fmt.Printf("Changelog\n\n") + fmt.Println(changelog) + } +} + +func cmdCid(c *cmd) { + c.params = "cid" + c.help = `Turn an ID from a Received header into a cid, for looking up in logs. + +A cid is essentially a connection counter initialized when mox starts. Each log +line contains a cid. Received headers added by mox contain a unique ID that can +be decrypted to a cid by admin of a mox instance only. +` + args := c.Parse() + if len(args) != 1 { + c.Usage() + } + + mox.MustLoadConfig() + recvidpath := mox.DataDirPath("receivedid.key") + recvidbuf, err := os.ReadFile(recvidpath) + xcheckf(err, "reading %s", recvidpath) + if len(recvidbuf) != 16+8 { + log.Fatalf("bad data in %s: got %d bytes, expect 16+8=24", recvidpath, len(recvidbuf)) + } + err = mox.ReceivedIDInit(recvidbuf[:16], recvidbuf[16:]) + xcheckf(err, "init receivedid") + + cid, err := mox.ReceivedToCid(args[0]) + xcheckf(err, "received id to cid") + fmt.Printf("%x\n", cid) +} + +func cmdVersion(c *cmd) { + c.help = "Prints this mox version." + if len(c.Parse()) != 0 { + c.Usage() + } + fmt.Println(moxvar.Version) +} + +func cmdEnsureParsed(c *cmd) { + c.unlisted = true + c.params = "account" + c.help = "Ensure messages in the database have a ParsedBuf." + var all bool + c.flag.BoolVar(&all, "all", false, "store new parsed message for all messages") + args := c.Parse() + if len(args) != 1 { + c.Usage() + } + + mox.MustLoadConfig() + a, err := store.OpenAccount(args[0]) + xcheckf(err, "open account") + defer a.Close() + + n := 0 + err = a.DB.Write(func(tx *bstore.Tx) error { + q := bstore.QueryTx[store.Message](tx) + q.FilterFn(func(m store.Message) bool { + return all || m.ParsedBuf == nil + }) + l, err := q.List() + if err != nil { + return fmt.Errorf("list messages: %v", err) + } + for _, m := range l { + mr := a.MessageReader(m) + p, err := message.EnsurePart(mr, m.Size) + if err != nil { + log.Printf("parsing message %d: %v (continuing)", m.ID, err) + } + m.ParsedBuf, err = json.Marshal(p) + if err != nil { + return fmt.Errorf("marshal parsed message: %v", err) + } + if err := tx.Update(&m); err != nil { + return fmt.Errorf("update message: %v", err) + } + n++ + } + return nil + }) + xcheckf(err, "update messages with parsed mime structure") + fmt.Printf("%d messages updated\n", n) +} + +func cmdBumpUIDValidity(c *cmd) { + c.unlisted = true + c.params = "account mailbox" + c.help = "Change the IMAP UID validity of the mailbox, causing IMAP clients to refetch messages." + args := c.Parse() + if len(args) != 2 { + c.Usage() + } + + mox.MustLoadConfig() + a, err := store.OpenAccount(args[0]) + xcheckf(err, "open account") + defer a.Close() + + var uidvalidity uint32 + err = a.DB.Write(func(tx *bstore.Tx) error { + mb, err := bstore.QueryTx[store.Mailbox](tx).FilterEqual("Name", args[1]).Get() + if err != nil { + return fmt.Errorf("looking up mailbox: %v", err) + } + mb.UIDValidity++ + uidvalidity = mb.UIDValidity + err = tx.Update(&mb) + if err != nil { + return fmt.Errorf("updating uid validity for mailbox: %v", err) + } + return nil + }) + xcheckf(err, "updating uidvalidity for mailbox: %v", err) + fmt.Printf("uid validity for %q is now %d\n", args[1], uidvalidity) +} + +func cmdSendmail(c *cmd) { + c.params = "[-Fname] [ignoredflags] [-t] [\r\n", submitconf.From) + var haveTo bool + for { + line, err := r.ReadString('\n') + if err != nil && err != io.EOF { + xcheckf(err, "reading message") + } + if line != "" { + if !strings.HasSuffix(line, "\n") { + line += "\n" + } + if !strings.HasSuffix(line, "\r\n") { + line = line[:len(line)-1] + "\r\n" + } + if header && line == "\r\n" { + // Bare \r\n marks end of header. + if !haveTo { + line = fmt.Sprintf("To: <%s>\r\n", recipient) + line + } + header = false + } else if header { + t := strings.SplitN(line, ":", 2) + if len(t) != 2 { + log.Fatalf("invalid message, missing colon in header") + } + k := strings.ToLower(t[0]) + if k == "from" { + // We already added a From header. + if err == io.EOF { + break + } + continue + } else if tflag && k == "to" { + if recipient != "" { + log.Fatalf("only single To header allowed") + } + addrs, err := mail.ParseAddressList(strings.TrimSpace(t[1])) + xcheckf(err, "parsing To address list") + if len(addrs) != 1 { + log.Fatalf("only single address allowed in To header") + } + recipient = addrs[0].Address + } + if k == "to" { + haveTo = true + } + } + sb.WriteString(line) + } + if err == io.EOF { + break + } + } + msg := sb.String() + + if recipient == "" { + log.Fatalf("no recipient") + } + + // Message seems acceptable. We'll try to deliver it from here. If that fails, we + // store the message in the users home directory. + + xcheckf := func(err error, format string, args ...any) { + if err == nil { + return + } + log.Printf("submit failed: %s: %s", fmt.Sprintf(format, args...), err) + homedir, err := os.UserHomeDir() + xcheckf(err, "finding homedir for storing message after failed delivery") + maildir := filepath.Join(homedir, "moxsubmit.failures") + os.Mkdir(maildir, 0700) + f, err := os.CreateTemp(maildir, "newmsg.") + xcheckf(err, "creating temp file for storing message after failed delivery") + defer func() { + if f != nil { + os.Remove(f.Name()) + } + }() + _, err = f.Write([]byte(msg)) + xcheckf(err, "writing message to temp file after failed delivery") + name := f.Name() + err = f.Close() + xcheckf(err, "closing message in temp file after failed delivery") + f = nil + log.Printf("saved message in %s", name) + os.Exit(1) + } + + var conn net.Conn + addr := net.JoinHostPort(submitconf.Host, fmt.Sprintf("%d", submitconf.Port)) + d := net.Dialer{Timeout: 30 * time.Second} + if submitconf.TLS { + conn, err = tls.DialWithDialer(&d, "tcp", addr, nil) + } else { + conn, err = d.Dial("tcp", addr) + } + xcheckf(err, "dial submit server") + + ctx, cancel := context.WithTimeout(context.Background(), 3*time.Minute) + defer cancel() + tlsMode := smtpclient.TLSOpportunistic + authLine := fmt.Sprintf("AUTH PLAIN %s", base64.StdEncoding.EncodeToString([]byte(fmt.Sprintf("\u0000%s\u0000%s", submitconf.Username, submitconf.Password)))) + mox.Conf.Static.HostnameDomain.ASCII = submitconf.LocalHostname + client, err := smtpclient.New(ctx, mlog.New("sendmail"), conn, tlsMode, submitconf.Host, authLine) + xcheckf(err, "open smtp session") + + err = client.Deliver(ctx, submitconf.From, recipient, int64(len(msg)), strings.NewReader(msg), true, false) + xcheckf(err, "submit message") + + if err := client.Close(); err != nil { + log.Printf("closing smtp session after message was sent: %v", err) + } +} diff --git a/main_test.go b/main_test.go new file mode 100644 index 0000000..1685533 --- /dev/null +++ b/main_test.go @@ -0,0 +1,24 @@ +package main + +import ( + "strings" + "testing" + + "github.com/mjl-/mox/mlog" +) + +func TestParseDovecotKeywords(t *testing.T) { + const data = `0 Old +1 Junk +2 NonJunk +3 $Forwarded +4 $Junk +` + keywords := tryParseDovecotKeywords(strings.NewReader(data), mlog.New("dovecotkeywords")) + got := strings.Join(keywords, ",") + want := "Old,Junk,NonJunk,$Forwarded,$Junk" + if got != want { + t.Fatalf("parsing dovecot keywords, got %q, want %q", got, want) + + } +} diff --git a/message/doc.go b/message/doc.go new file mode 100644 index 0000000..06a784b --- /dev/null +++ b/message/doc.go @@ -0,0 +1,3 @@ +// Package message provides functions for reading and writing email messages, +// ensuring they are correctly formatted. +package message diff --git a/message/from.go b/message/from.go new file mode 100644 index 0000000..9550479 --- /dev/null +++ b/message/from.go @@ -0,0 +1,43 @@ +package message + +import ( + "fmt" + "io" + "net/textproto" + + "github.com/mjl-/mox/dns" + "github.com/mjl-/mox/smtp" +) + +// From extracts the address in the From-header. +// +// An RFC5322 message must have a From header. +// In theory, multiple addresses may be present. In practice zero or multiple +// From headers may be present. From returns an error if there is not exactly +// one address. This address can be used for evaluating a DMARC policy against +// SPF and DKIM results. +func From(r io.ReaderAt) (raddr smtp.Address, header textproto.MIMEHeader, rerr error) { + // ../rfc/7489:1243 + + // todo: only allow utf8 if enabled in session/message? + + p, err := Parse(r) + if err != nil { + // todo: should we continue with p, perhaps headers can be parsed? + return raddr, nil, fmt.Errorf("parsing message: %v", err) + } + header, err = p.Header() + if err != nil { + return raddr, nil, fmt.Errorf("parsing message header: %v", err) + } + from := p.Envelope.From + if len(from) != 1 { + return raddr, nil, fmt.Errorf("from header has %d addresses, need exactly 1 address", len(from)) + } + d, err := dns.ParseDomain(from[0].Host) + if err != nil { + return raddr, nil, fmt.Errorf("bad domain in from address: %v", err) + } + addr := smtp.Address{Localpart: smtp.Localpart(from[0].User), Domain: d} + return addr, textproto.MIMEHeader(header), nil +} diff --git a/message/headerwriter.go b/message/headerwriter.go new file mode 100644 index 0000000..8ba6f18 --- /dev/null +++ b/message/headerwriter.go @@ -0,0 +1,65 @@ +package message + +import ( + "fmt" + "strings" +) + +// HeaderWriter helps create headers, folding to the next line when it would +// become too large. Useful for creating Received and DKIM-Signature headers. +type HeaderWriter struct { + b *strings.Builder + lineLen int + nonfirst bool +} + +// Addf formats the string and calls Add. +func (w *HeaderWriter) Addf(separator string, format string, args ...any) { + w.Add(separator, fmt.Sprintf(format, args...)) +} + +// Add adds texts, each separated by separator. Individual elements in text are +// not wrapped. +func (w *HeaderWriter) Add(separator string, texts ...string) { + if w.b == nil { + w.b = &strings.Builder{} + } + for _, text := range texts { + n := len(text) + if w.nonfirst && w.lineLen > 1 && w.lineLen+len(separator)+n > 78 { + w.b.WriteString("\r\n\t") + w.lineLen = 1 + } else if w.nonfirst && separator != "" { + w.b.WriteString(separator) + w.lineLen += len(separator) + } + w.b.WriteString(text) + w.lineLen += len(text) + w.nonfirst = true + } +} + +// AddWrap adds data, folding anywhere in the buffer. E.g. for base64 data. +func (w *HeaderWriter) AddWrap(buf []byte) { + for len(buf) > 0 { + line := buf + n := 78 - w.lineLen + if len(buf) > n { + line, buf = buf[:n], buf[n:] + } else { + buf = nil + n = len(buf) + } + w.b.Write(line) + w.lineLen += n + if len(buf) > 0 { + w.b.WriteString("\r\n\t") + w.lineLen = 1 + } + } +} + +// String returns the header in string form, ending with \r\n. +func (w *HeaderWriter) String() string { + return w.b.String() + "\r\n" +} diff --git a/message/part.go b/message/part.go new file mode 100644 index 0000000..0633f62 --- /dev/null +++ b/message/part.go @@ -0,0 +1,777 @@ +package message + +// todo: we should be more forgiving when parsing, at least as an option for imported messages, possibly incoming as well, but not for submitted/outgoing messages. +// todo: allow more invalid content-type values, we now stop parsing on: empty media type (eg "content-type: ; name=..."), empty value for property (eg "charset=", missing quotes for characters that should be quoted (eg boundary containing "=" but without quotes), duplicate properties (two charsets), empty pairs (eg "text/html;;"). +// todo: what should our max line length be? rfc says 1000. messages exceed that. we should enforce 1000 for outgoing messages. +// todo: should we be forgiving when closing boundary in multipart message is missing? seems like spam messages do this... +// todo: allow bare \r (without \n)? this does happen in messages. +// todo: should we allow base64 messages where a line starts with a space? and possibly more whitespace. is happening in messages. coreutils base64 accepts it, encoding/base64 does not. +// todo: handle comments in headers? +// todo: should we just always store messages with \n instead of \r\n? \r\n seems easier for use with imap. +// todo: is a header always \r\n\r\n-separated? or is \r\n enough at the beginning of a file? because what would this mean: "\r\ndata"? data isn't a header. +// todo: can use a cleanup + +import ( + "bufio" + "bytes" + "encoding/base64" + "errors" + "fmt" + "io" + "mime" + "mime/quotedprintable" + "net/mail" + "net/textproto" + "strings" + "time" + + "github.com/mjl-/mox/mlog" + "github.com/mjl-/mox/smtp" +) + +var xlog = mlog.New("message") + +var ( + ErrBadContentType = errors.New("bad content-type") +) + +var ( + errNotMultipart = errors.New("not a multipart message") + errFirstBoundCloses = errors.New("first boundary cannot be finishing boundary") + errLineTooLong = errors.New("line too long") + errMissingBoundaryParam = errors.New("missing/empty boundary content-type parameter") + errMissingClosingBoundary = errors.New("eof without closing boundary") + errHalfLineSep = errors.New("invalid CR or LF without the other") + errUnexpectedEOF = errors.New("unexpected eof") +) + +// If set, during tests, attempts to reparse a part will cause an error, because sequentially reading parts should not lead to reparsing. +var enforceSequential bool + +// Part represents a whole mail message, or a part of a multipart message. It +// is designed to handle IMAP requirements efficiently. +type Part struct { + BoundaryOffset int64 // Offset in message where bound starts. -1 for top-level message. + HeaderOffset int64 // Offset in message file where header starts. + BodyOffset int64 // Offset in message file where body starts. + EndOffset int64 // Where body of part ends. Set when part is fully read. + RawLineCount int64 // Number of lines in raw, undecoded, body of part. Set when part is fully read. + DecodedSize int64 // Number of octets when decoded. If this is a text mediatype, lines ending only in LF are changed end in CRLF and DecodedSize reflects that. + + MediaType string // From Content-Type, upper case. E.g. "TEXT". Can be empty because content-type may be absent. In this case, the part may be treated as TEXT/PLAIN. + MediaSubType string // From Content-Type, upper case. E.g. "PLAIN". + ContentTypeParams map[string]string // E.g. holds "boundary" for multipart messages. Has lower-case keys, and original case values. + ContentID string + ContentDescription string + ContentTransferEncoding string // In upper case. + Envelope *Envelope // Email message headers. Not for non-message parts. + + Parts []Part // Parts if this is a multipart. + + // Only for message/rfc822 and message/global. This part may have a buffer as + // backing io.ReaderAt, because a message/global can have a non-identity + // content-transfer-encoding. This part has a nil parent. + Message *Part + + r io.ReaderAt + header textproto.MIMEHeader // Parsed header. + nextBoundOffset int64 // If >= 0, the offset where the next part header starts. We can set this when a user fully reads each part. + lastBoundOffset int64 // Start of header of last/previous part. Used to skip a part if ParseNextPart is called and nextBoundOffset is -1. + parent *Part // Parent part, for getting bound from, and setting nextBoundOffset when a part has finished reading. Only for subparts, not top-level parts. + bound []byte // Only set if valid multipart with boundary, includes leading --, excludes \r\n. +} + +// Envelope holds the basic/common message headers as used in IMAP4. +type Envelope struct { + Date time.Time + Subject string + From []Address + Sender []Address + ReplyTo []Address + To []Address + CC []Address + BCC []Address + InReplyTo string + MessageID string +} + +// Address as used in From and To headers. +type Address struct { + Name string // Free-form name for display in mail applications. + User string // Localpart. + Host string // Domain in ASCII. +} + +// Parse reads the headers of the mail message and returns a part. +// A part provides access to decoded and raw contents of a message and its multiple parts. +func Parse(r io.ReaderAt) (Part, error) { + return newPart(r, 0, nil) +} + +// EnsurePart parses a part as with Parse, but ensures a usable part is always +// returned, even if error is non-nil. If a parse error occurs, the message is +// returned as application/octet-stream, and headers can still be read if they +// were valid. +func EnsurePart(r io.ReaderAt, size int64) (Part, error) { + p, err := Parse(r) + if err == nil { + err = p.Walk() + } + if err != nil { + np := Part{ + HeaderOffset: p.HeaderOffset, + BodyOffset: p.BodyOffset, + EndOffset: size, + MediaType: "APPLICATION", + MediaSubType: "OCTET-STREAM", + ContentTypeParams: p.ContentTypeParams, + ContentID: p.ContentID, + ContentDescription: p.ContentDescription, + ContentTransferEncoding: p.ContentTransferEncoding, + Envelope: p.Envelope, + // We don't keep: + // - BoundaryOffset: irrelevant for top-level message. + // - RawLineCount and DecodedSize: set below. + // - Parts: we are not treating this as a multipart message. + } + p = np + p.SetReaderAt(r) + // By reading body, the number of lines and decoded size will be set. + _, err2 := io.Copy(io.Discard, p.Reader()) + if err2 != nil { + err = err2 + } + } + return p, err +} + +// SetReaderAt sets r as reader for this part and all its sub parts, recursively. +// No reader is set for any Message subpart, see SetMessageReaderAt. +func (p *Part) SetReaderAt(r io.ReaderAt) { + if r == nil { + panic("nil reader") + } + p.r = r + for i := range p.Parts { + pp := &p.Parts[i] + pp.SetReaderAt(r) + } +} + +// SetMessageReaderAt sets a reader on p.Message, which must be non-nil. +func (p *Part) SetMessageReaderAt() error { + // todo: if p.Message does not contain any non-identity content-transfer-encoding, we should set an offsetReader of p.Message, recursively. + buf, err := io.ReadAll(p.Reader()) + if err != nil { + return err + } + p.Message.SetReaderAt(bytes.NewReader(buf)) + return nil +} + +// Walk through message, decoding along the way, and collecting mime part offsets and sizes, and line counts. +func (p *Part) Walk() error { + if len(p.bound) == 0 { + if p.MediaType == "MESSAGE" && (p.MediaSubType == "RFC822" || p.MediaSubType == "GLOBAL") { + // todo: don't read whole submessage in memory... + buf, err := io.ReadAll(p.Reader()) + if err != nil { + return err + } + mp, err := Parse(bytes.NewReader(buf)) + if err != nil { + return fmt.Errorf("parsing embedded message: %w", err) + } + // todo: if this is a DSN, we should have a lax parser that doesn't fail on unexpected end of file. this is quite common because MTA's can just truncate the original message. + if err := mp.Walk(); err != nil { + return fmt.Errorf("parsing parts of embedded message: %w", err) + } + // todo: if mp does not contain any non-identity content-transfer-encoding, we should set an offsetReader of p.r on mp, recursively. + p.Message = &mp + return nil + } + _, err := io.Copy(io.Discard, p.Reader()) + return err + } + + for { + pp, err := p.ParseNextPart() + if err == io.EOF { + return nil + } + if err != nil { + return err + } + if err := pp.Walk(); err != nil { + return err + } + } +} + +// String returns a debugging representation of the part. +func (p *Part) String() string { + return fmt.Sprintf("&Part{%s/%s offsets %d/%d/%d/%d lines %d decodedsize %d next %d last %d bound %q parts %v}", p.MediaType, p.MediaSubType, p.BoundaryOffset, p.HeaderOffset, p.BodyOffset, p.EndOffset, p.RawLineCount, p.DecodedSize, p.nextBoundOffset, p.lastBoundOffset, p.bound, p.Parts) +} + +// newPart parses a new part, which can be the top-level message. +// offset is the bound offset for parts, and the start of message for top-level messages. parent indicates if this is a top-level message or sub-part. +// If an error occurs, p's exported values can still be relevant. EnsurePart uses these values. +func newPart(r io.ReaderAt, offset int64, parent *Part) (p Part, rerr error) { + if r == nil { + panic("nil reader") + } + p = Part{ + BoundaryOffset: -1, + EndOffset: -1, + r: r, + parent: parent, + } + + b := &bufAt{r: r, offset: offset} + + if parent != nil { + p.BoundaryOffset = offset + if line, _, err := b.ReadLine(true); err != nil { + return p, err + } else if match, finish := checkBound(line, parent.bound); !match { + return p, fmt.Errorf("missing bound") + } else if finish { + return p, fmt.Errorf("new part for closing boundary") + } + } + + // Collect header. + p.HeaderOffset = b.offset + p.BodyOffset = b.offset + hb := &bytes.Buffer{} + for { + line, _, err := b.ReadLine(true) + if err != nil { + return p, err + } + hb.Write(line) + if len(line) == 2 { + break // crlf + } + } + p.BodyOffset = b.offset + + h, err := parseHeader(hb) + if err != nil { + return p, fmt.Errorf("parsing header: %w", err) + } + p.header = h + + ct := h.Get("Content-Type") + mt, params, err := mime.ParseMediaType(ct) + if err != nil && ct != "" { + return p, fmt.Errorf("%w: %s: %q", ErrBadContentType, err, ct) + } + if mt != "" { + t := strings.SplitN(strings.ToUpper(mt), "/", 2) + if len(t) != 2 { + return p, fmt.Errorf("bad content-type: %q (content-type %q)", mt, ct) + } + p.MediaType = t[0] + p.MediaSubType = t[1] + p.ContentTypeParams = params + } + + p.ContentID = h.Get("Content-Id") + p.ContentDescription = h.Get("Content-Description") + p.ContentTransferEncoding = strings.ToUpper(h.Get("Content-Transfer-Encoding")) + + if parent == nil { + p.Envelope, err = parseEnvelope(mail.Header(h)) + if err != nil { + return p, err + } + } + + if p.MediaType == "MULTIPART" { + s := params["boundary"] + if s == "" { + return p, errMissingBoundaryParam + } + p.bound = append([]byte("--"), s...) + + // Discard preamble, before first boundary. + for { + line, _, err := b.PeekLine(true) + if err != nil { + return p, fmt.Errorf("parsing line for part preamble: %w", err) + } + // Line only needs boundary prefix, not exact match. ../rfc/2046:1103 + // Well, for compatibility, we require whitespace after the boundary. Because some + // software use the same boundary but with text appended for sub parts. + if match, finish := checkBound(line, p.bound); match { + if finish { + return p, errFirstBoundCloses + } + break + } + b.ReadLine(true) + } + p.nextBoundOffset = b.offset + p.lastBoundOffset = b.offset + } + + return p, nil +} + +// Header returns the parsed header of this part. +func (p *Part) Header() (textproto.MIMEHeader, error) { + if p.header != nil { + return p.header, nil + } + h, err := parseHeader(p.HeaderReader()) + p.header = h + return h, err +} + +// HeaderReader returns a reader for the header section of this part, including ending bare CRLF. +func (p *Part) HeaderReader() io.Reader { + return io.NewSectionReader(p.r, p.HeaderOffset, p.BodyOffset-p.HeaderOffset) +} + +func parseHeader(r io.Reader) (textproto.MIMEHeader, error) { + return textproto.NewReader(bufio.NewReader(r)).ReadMIMEHeader() +} + +func parseEnvelope(h mail.Header) (*Envelope, error) { + date, _ := h.Date() + env := &Envelope{ + date, + h.Get("Subject"), + parseAddressList(h, "from"), + parseAddressList(h, "sender"), + parseAddressList(h, "reply-to"), + parseAddressList(h, "to"), + parseAddressList(h, "cc"), + parseAddressList(h, "bcc"), + h.Get("In-Reply-To"), + h.Get("Message-Id"), + } + return env, nil +} + +func parseAddressList(h mail.Header, k string) []Address { + l, err := h.AddressList(k) + if err != nil { + return nil + } + var r []Address + for _, a := range l { + // todo: parse more fully according to ../rfc/5322:959 + var user, host string + addr, err := smtp.ParseAddress(a.Address) + if err != nil { + // todo: pass a ctx to this function so we can log with cid. + xlog.Infox("parsing address", err, mlog.Field("address", a.Address)) + } else { + user = addr.Localpart.String() + host = addr.Domain.ASCII + } + r = append(r, Address{a.Name, user, host}) + } + return r +} + +// ParseNextPart parses the next (sub)part of this multipart message. +// ParseNextPart returns io.EOF and a nil part when there are no more parts. +// Only use for initial parsing of message. Once parsed, use p.Parts. +func (p *Part) ParseNextPart() (*Part, error) { + if len(p.bound) == 0 { + return nil, errNotMultipart + } + if p.nextBoundOffset == -1 { + if enforceSequential { + panic("access not sequential") + } + // Set nextBoundOffset by fully reading the last part. + last, err := newPart(p.r, p.lastBoundOffset, p) + if err != nil { + return nil, err + } + if _, err := io.Copy(io.Discard, last.RawReader()); err != nil { + return nil, err + } + if p.nextBoundOffset == -1 { + return nil, fmt.Errorf("internal error: reading part did not set nextBoundOffset") + } + } + b := &bufAt{r: p.r, offset: p.nextBoundOffset} + // todo: should we require a crlf on final closing bound? we don't require it because some message/rfc822 don't have a crlf after their closing boundary, so those messages don't end in crlf. + line, crlf, err := b.ReadLine(false) + if err != nil { + return nil, err + } + if match, finish := checkBound(line, p.bound); !match { + return nil, fmt.Errorf("expected bound, got %q", line) + } else if finish { + // Read any trailing data. + if p.parent != nil { + for { + line, _, err := b.PeekLine(false) + if err != nil { + break + } + if match, _ := checkBound(line, p.parent.bound); match { + break + } + b.ReadLine(false) + } + if p.parent.lastBoundOffset == p.BoundaryOffset { + p.parent.nextBoundOffset = b.offset + } + } + p.EndOffset = b.offset + return nil, io.EOF + } else if !crlf { + return nil, fmt.Errorf("non-finishing bound without crlf: %w", errUnexpectedEOF) + } + boundOffset := p.nextBoundOffset + p.lastBoundOffset = boundOffset + p.nextBoundOffset = -1 + np, err := newPart(p.r, boundOffset, p) + if err != nil { + return nil, err + } + p.Parts = append(p.Parts, np) + return &p.Parts[len(p.Parts)-1], nil +} + +// Reader returns a reader for the decoded body content. +func (p *Part) Reader() io.Reader { + return p.bodyReader(p.RawReader()) +} + +func (p *Part) bodyReader(r io.Reader) io.Reader { + r = newDecoder(p.ContentTransferEncoding, r) + if p.MediaType == "TEXT" { + return &textReader{p, bufio.NewReader(r), 0, false} + } + return &countReader{p, r, 0} +} + +// countReader is an io.Reader that passes Reads to the underlying reader. +// when eof is read, it sets p.DecodedSize to the number of bytes returned. +type countReader struct { + p *Part + r io.Reader + count int64 +} + +func (cr *countReader) Read(buf []byte) (int, error) { + n, err := cr.r.Read(buf) + if n >= 0 { + cr.count += int64(n) + } + if err == io.EOF { + cr.p.DecodedSize = cr.count + } + return n, err +} + +// textReader is an io.Reader that ensures all lines return end in CRLF. +// when eof is read from the underlying reader, it sets p.DecodedSize. +type textReader struct { + p *Part + r *bufio.Reader + count int64 + prevcr bool // If previous byte returned was a CR. +} + +func (tr *textReader) Read(buf []byte) (int, error) { + o := 0 + for o < len(buf) { + c, err := tr.r.ReadByte() + if err != nil { + tr.count += int64(o) + tr.p.DecodedSize = tr.count + return o, err + } + if c == '\n' && !tr.prevcr { + buf[o] = '\r' + o++ + tr.prevcr = true + tr.r.UnreadByte() + continue + } + buf[o] = c + tr.prevcr = c == '\r' + o++ + } + tr.count += int64(o) + return o, nil +} + +func newDecoder(cte string, r io.Reader) io.Reader { + // ../rfc/2045:775 + switch cte { + case "BASE64": + return base64.NewDecoder(base64.StdEncoding, r) + case "QUOTED-PRINTABLE": + return quotedprintable.NewReader(r) + } + return r +} + +// RawReader returns a reader for the raw, undecoded body content. E.g. with +// quoted-printable or base64 content intact. +// Fully reading a part helps its parent part find its next part efficiently. +func (p *Part) RawReader() io.Reader { + if p.r == nil { + panic("missing reader") + } + if p.EndOffset >= 0 { + return io.NewSectionReader(p.r, p.BodyOffset, p.EndOffset-p.BodyOffset) + } + p.RawLineCount = 0 + if p.parent == nil { + return &offsetReader{p, p.BodyOffset, true} + } + return &boundReader{p: p, b: &bufAt{r: p.r, offset: p.BodyOffset}, lastnewline: true} +} + +// bufAt is a buffered reader on an underlying ReaderAt. +type bufAt struct { + offset int64 // Offset in r currently consumed, i.e. ignoring any buffered data. + + r io.ReaderAt + buf []byte // Buffered data. + nbuf int // Valid bytes in buf. + scratch []byte +} + +// todo: lower max line length? at least have a mode where we refuse anything beyong 1000 bytes. ../rfc/5321:3512 +const maxLineLength = 8 * 1024 + +// ensure makes sure b.nbuf is up to maxLineLength, unless eof is encountered. +func (b *bufAt) ensure() error { + for _, c := range b.buf[:b.nbuf] { + if c == '\n' { + return nil + } + } + if b.scratch == nil { + b.scratch = make([]byte, maxLineLength) + } + if b.buf == nil { + b.buf = make([]byte, maxLineLength) + } + for b.nbuf < maxLineLength { + n, err := b.r.ReadAt(b.buf[b.nbuf:], b.offset+int64(b.nbuf)) + if n > 0 { + b.nbuf += n + } + if err != nil && err != io.EOF || err == io.EOF && b.nbuf+n == 0 { + return err + } + if n == 0 || err == io.EOF { + break + } + } + return nil +} + +// ReadLine reads a line until \r\n is found, returning the line including \r\n. +// If not found, or a single \r or \n is encountered, ReadLine returns an error, e.g. io.EOF. +func (b *bufAt) ReadLine(requirecrlf bool) (buf []byte, crlf bool, err error) { + return b.line(true, requirecrlf) +} + +func (b *bufAt) PeekLine(requirecrlf bool) (buf []byte, crlf bool, err error) { + return b.line(false, requirecrlf) +} + +func (b *bufAt) line(consume, requirecrlf bool) (buf []byte, crlf bool, err error) { + if err := b.ensure(); err != nil { + return nil, false, err + } + for i, c := range b.buf[:b.nbuf] { + if c == '\n' { + return nil, false, errHalfLineSep + } + if c != '\r' { + continue + } + i++ + if i >= b.nbuf || b.buf[i] != '\n' { + return nil, false, errHalfLineSep + } + b.scratch = b.scratch[:i+1] + copy(b.scratch, b.buf[:i+1]) + if consume { + copy(b.buf, b.buf[i+1:]) + b.offset += int64(i + 1) + b.nbuf -= i + 1 + } + return b.scratch, true, nil + } + if b.nbuf >= maxLineLength { + return nil, false, errLineTooLong + } + if requirecrlf { + return nil, false, errUnexpectedEOF + } + b.scratch = b.scratch[:b.nbuf] + copy(b.scratch, b.buf[:b.nbuf]) + if consume { + b.offset += int64(b.nbuf) + b.nbuf = 0 + } + return b.scratch, false, nil +} + +// PeekByte returns the next unread byte, or an error. +func (b *bufAt) PeekByte() (byte, error) { + if err := b.ensure(); err != nil { + return 0, err + } + if b.nbuf == 0 { + return 0, io.EOF + } + return b.buf[0], nil +} + +type offsetReader struct { + p *Part + offset int64 + lastnewline bool +} + +func (r *offsetReader) Read(buf []byte) (int, error) { + n, err := r.p.r.ReadAt(buf, r.offset) + if n > 0 { + r.offset += int64(n) + + for _, c := range buf[:n] { + if r.lastnewline { + r.p.RawLineCount++ + } + r.lastnewline = c == '\n' + } + } + if err == io.EOF { + r.p.EndOffset = r.offset + } + return n, err +} + +var crlf = []byte("\r\n") + +// boundReader is a reader that stops at a closing multipart boundary. +type boundReader struct { + p *Part + b *bufAt + buf []byte // Data from previous line, to be served first. + nbuf int // Number of valid bytes in buf. + crlf []byte // Possible crlf, to be returned if we do not yet encounter a boundary. + lastnewline bool // If last char return was a newline. For counting lines. +} + +func (b *boundReader) Read(buf []byte) (count int, rerr error) { + origBuf := buf + defer func() { + if count > 0 { + for _, c := range origBuf[:count] { + if b.lastnewline { + b.p.RawLineCount++ + } + b.lastnewline = c == '\n' + } + } + }() + + for { + // Read data from earlier line. + if b.nbuf > 0 { + n := b.nbuf + if n > len(buf) { + n = len(buf) + } + copy(buf, b.buf[:n]) + copy(b.buf, b.buf[n:]) + buf = buf[n:] + b.nbuf -= n + count += n + if b.nbuf > 0 { + break + } + } + + // Look at next line. If it is a boundary, we are done and won't serve the crlf from the last line. + line, _, err := b.b.PeekLine(false) + if match, _ := checkBound(line, b.p.parent.bound); match { + b.p.EndOffset = b.b.offset - int64(len(b.crlf)) + if b.p.parent.lastBoundOffset == b.p.BoundaryOffset { + b.p.parent.nextBoundOffset = b.b.offset + } else if enforceSequential { + panic("access not sequential") + } + return count, io.EOF + } + if err == io.EOF { + err = errMissingClosingBoundary + } + if err != nil && err != io.EOF { + return count, err + } + if len(b.crlf) > 0 { + n := len(b.crlf) + if n > len(buf) { + n = len(buf) + } + copy(buf, b.crlf[:n]) + count += n + buf = buf[n:] + b.crlf = b.crlf[n:] + } + if len(buf) == 0 { + break + } + line, _, err = b.b.ReadLine(true) + if err != nil { + // Could be an unexpected end of the part. + return 0, err + } + b.crlf = crlf // crlf will be read next time, but not if a boundary follows. + n := len(line) - 2 + line = line[:n] + if n > len(buf) { + n = len(buf) + } + copy(buf, line[:n]) + count += n + buf = buf[n:] + line = line[n:] + if len(line) > 0 { + if b.buf == nil { + b.buf = make([]byte, maxLineLength) + } + copy(b.buf, line) + b.nbuf = len(line) + } + } + return count, nil +} + +func checkBound(line, bound []byte) (bool, bool) { + if !bytes.HasPrefix(line, bound) { + return false, false + } + line = line[len(bound):] + if bytes.HasPrefix(line, []byte("--")) { + return true, true + } + if len(line) == 0 { + return true, false + } + c := line[0] + switch c { + case ' ', '\t', '\r', '\n': + return true, false + } + return false, false +} diff --git a/message/part_test.go b/message/part_test.go new file mode 100644 index 0000000..e6b29e7 --- /dev/null +++ b/message/part_test.go @@ -0,0 +1,501 @@ +package message + +import ( + "bytes" + "errors" + "io" + "log" + "os" + "path/filepath" + "reflect" + "strings" + "testing" +) + +func tcheck(t *testing.T, err error, msg string) { + t.Helper() + if err != nil { + t.Fatalf("%s: %s", msg, err) + } +} + +func tcompare(t *testing.T, got, exp any) { + t.Helper() + if !reflect.DeepEqual(got, exp) { + t.Fatalf("got %q, expected %q", got, exp) + } +} + +func tfail(t *testing.T, err, expErr error) { + t.Helper() + if (err == nil) != (expErr == nil) || expErr != nil && !errors.Is(err, expErr) { + t.Fatalf("got err %v, expected %v", err, expErr) + } +} + +func TestEmptyHeader(t *testing.T) { + s := "\r\nx" + p, err := EnsurePart(strings.NewReader(s), int64(len(s))) + tcheck(t, err, "parse empty headers") + buf, err := io.ReadAll(p.Reader()) + tcheck(t, err, "read") + expBody := "x" + tcompare(t, string(buf), expBody) + tcompare(t, p.MediaType, "") + tcompare(t, p.MediaSubType, "") +} + +func TestBadContentType(t *testing.T) { + s := "content-type: text/html;;\r\n\r\ntest" + p, err := EnsurePart(strings.NewReader(s), int64(len(s))) + tfail(t, err, ErrBadContentType) + buf, err := io.ReadAll(p.Reader()) + tcheck(t, err, "read") + expBody := "test" + tcompare(t, string(buf), expBody) + tcompare(t, p.MediaType, "APPLICATION") + tcompare(t, p.MediaSubType, "OCTET-STREAM") +} + +var basicMsg = strings.ReplaceAll(`From: +Content-Type: text/plain +Content-Transfer-Encoding: base64 + +aGkK +`, "\n", "\r\n") + +func TestBasic(t *testing.T) { + r := strings.NewReader(basicMsg) + p, err := Parse(r) + tcheck(t, err, "new reader") + + buf, err := io.ReadAll(p.RawReader()) + tcheck(t, err, "read raw") + expBody := "aGkK\r\n" + tcompare(t, string(buf), expBody) + + buf, err = io.ReadAll(p.Reader()) + tcheck(t, err, "read decoded") + tcompare(t, string(buf), "hi\r\n") + + if p.RawLineCount != 1 { + t.Fatalf("basic message, got %d lines, expected 1", p.RawLineCount) + } + if size := p.EndOffset - p.BodyOffset; size != int64(len(expBody)) { + t.Fatalf("basic message, got size %d, expected %d", size, len(expBody)) + } +} + +// From ../rfc/3501:2589 +var basicMsg2 = strings.ReplaceAll(`Date: Mon, 7 Feb 1994 21:52:25 -0800 (PST) +From: Fred Foobar +Subject: afternoon meeting +To: mooch@owatagu.siam.edu.example +Message-Id: +MIME-Version: 1.0 +Content-Type: TEXT/PLAIN; CHARSET=US-ASCII + +Hello Joe, do you think we can meet at 3:30 tomorrow? + +`, "\n", "\r\n") + +func TestBasic2(t *testing.T) { + r := strings.NewReader(basicMsg2) + p, err := Parse(r) + tcheck(t, err, "new reader") + + buf, err := io.ReadAll(p.RawReader()) + tcheck(t, err, "read raw") + expBody := "Hello Joe, do you think we can meet at 3:30 tomorrow?\r\n\r\n" + tcompare(t, string(buf), expBody) + + buf, err = io.ReadAll(p.Reader()) + tcheck(t, err, "read decoded") + tcompare(t, string(buf), expBody) + + if p.RawLineCount != 2 { + t.Fatalf("basic message, got %d lines, expected 2", p.RawLineCount) + } + if size := p.EndOffset - p.BodyOffset; size != int64(len(expBody)) { + t.Fatalf("basic message, got size %d, expected %d", size, len(expBody)) + } + + r = strings.NewReader(basicMsg2) + p, err = Parse(r) + tcheck(t, err, "new reader") + err = p.Walk() + tcheck(t, err, "walk") + if p.RawLineCount != 2 { + t.Fatalf("basic message, got %d lines, expected 2", p.RawLineCount) + } + if size := p.EndOffset - p.BodyOffset; size != int64(len(expBody)) { + t.Fatalf("basic message, got size %d, expected %d", size, len(expBody)) + } +} + +var mimeMsg = strings.ReplaceAll(`From: Nathaniel Borenstein +To: Ned Freed +Date: Sun, 21 Mar 1993 23:56:48 -0800 (PST) +Subject: Sample message +MIME-Version: 1.0 +Content-type: multipart/mixed; boundary="simple boundary" + +This is the preamble. It is to be ignored, though it +is a handy place for composition agents to include an +explanatory note to non-MIME conformant readers. + +--simple boundary + +This is implicitly typed plain US-ASCII text. +It does NOT end with a linebreak. +--simple boundary +Content-type: text/plain; charset=us-ascii + +This is explicitly typed plain US-ASCII text. +It DOES end with a linebreak. + +--simple boundary-- + +This is the epilogue. It is also to be ignored. +`, "\n", "\r\n") + +func TestMime(t *testing.T) { + // from ../rfc/2046:1148 + r := strings.NewReader(mimeMsg) + p, err := Parse(r) + tcheck(t, err, "new reader") + if len(p.bound) == 0 { + t.Fatalf("got no bound, expected bound for mime message") + } + + pp, err := p.ParseNextPart() + tcheck(t, err, "next part") + buf, err := io.ReadAll(pp.Reader()) + tcheck(t, err, "read all") + tcompare(t, string(buf), "This is implicitly typed plain US-ASCII text.\r\nIt does NOT end with a linebreak.") + + pp, err = p.ParseNextPart() + tcheck(t, err, "next part") + buf, err = io.ReadAll(pp.Reader()) + tcheck(t, err, "read all") + tcompare(t, string(buf), "This is explicitly typed plain US-ASCII text.\r\nIt DOES end with a linebreak.\r\n") + + _, err = p.ParseNextPart() + tcompare(t, err, io.EOF) + + if len(p.Parts) != 2 { + t.Fatalf("got %d parts, expected 2", len(p.Parts)) + } + if p.Parts[0].RawLineCount != 2 { + t.Fatalf("got %d lines for first part, expected 2", p.Parts[0].RawLineCount) + } + if p.Parts[1].RawLineCount != 2 { + t.Fatalf("got %d lines for second part, expected 2", p.Parts[1].RawLineCount) + } +} + +func TestLongLine(t *testing.T) { + line := make([]byte, maxLineLength+1) + for i := range line { + line[i] = 'a' + } + _, err := Parse(bytes.NewReader(line)) + tfail(t, err, errLineTooLong) +} + +func TestHalfCrLf(t *testing.T) { + _, err := Parse(strings.NewReader("test\rtest")) + tfail(t, err, errHalfLineSep) + + _, err = Parse(strings.NewReader("test\ntest")) + tfail(t, err, errHalfLineSep) +} + +func TestMissingClosingBoundary(t *testing.T) { + message := strings.ReplaceAll(`Content-Type: multipart/mixed; boundary=x + +--x + +test +`, "\n", "\r\n") + msg, err := Parse(strings.NewReader(message)) + tcheck(t, err, "new reader") + err = walkmsg(&msg) + tfail(t, err, errMissingClosingBoundary) + + msg, _ = Parse(strings.NewReader(message)) + err = msg.Walk() + tfail(t, err, errMissingClosingBoundary) +} + +func TestHeaderEOF(t *testing.T) { + message := "header: test" + _, err := Parse(strings.NewReader(message)) + tfail(t, err, errUnexpectedEOF) +} + +func TestBodyEOF(t *testing.T) { + message := "header: test\r\n\r\ntest" + msg, err := Parse(strings.NewReader(message)) + tcheck(t, err, "new reader") + buf, err := io.ReadAll(msg.Reader()) + tcheck(t, err, "read body") + tcompare(t, string(buf), "test") +} + +func TestWalk(t *testing.T) { + var message = strings.ReplaceAll(`Content-Type: multipart/related; boundary="----=_NextPart_afb3ad6f146b12b709deac3e387a3ad7" + +------=_NextPart_afb3ad6f146b12b709deac3e387a3ad7 +Content-Type: multipart/alternative; boundary="----=_NextPart_afb3ad6f146b12b709deac3e387a3ad7_alt" + +------=_NextPart_afb3ad6f146b12b709deac3e387a3ad7_alt +Content-Type: text/plain; charset="utf-8" +Content-Transfer-Encoding: 8bit + +test + + +------=_NextPart_afb3ad6f146b12b709deac3e387a3ad7_alt +Content-Type: text/html; charset="utf-8" +Content-Transfer-Encoding: 8bit + +test + +------=_NextPart_afb3ad6f146b12b709deac3e387a3ad7_alt-- +------=_NextPart_afb3ad6f146b12b709deac3e387a3ad7-- + +`, "\n", "\r\n") + + msg, err := Parse(strings.NewReader(message)) + tcheck(t, err, "new reader") + enforceSequential = true + defer func() { + enforceSequential = false + }() + err = walkmsg(&msg) + tcheck(t, err, "walkmsg") + + msg, _ = Parse(strings.NewReader(message)) + err = msg.Walk() + tcheck(t, err, "msg.Walk") +} + +func TestNested(t *testing.T) { + // From ../rfc/2049:801 + nestedMessage := strings.ReplaceAll(`MIME-Version: 1.0 +From: Nathaniel Borenstein +To: Ned Freed +Date: Fri, 07 Oct 1994 16:15:05 -0700 (PDT) +Subject: A multipart example +Content-Type: multipart/mixed; + boundary=unique-boundary-1 + +This is the preamble area of a multipart message. +Mail readers that understand multipart format +should ignore this preamble. + +If you are reading this text, you might want to +consider changing to a mail reader that understands +how to properly display multipart messages. + +--unique-boundary-1 + + ... Some text appears here ... + +[Note that the blank between the boundary and the start + of the text in this part means no header fields were + given and this is text in the US-ASCII character set. + It could have been done with explicit typing as in the + next part.] + +--unique-boundary-1 +Content-type: text/plain; charset=US-ASCII + +This could have been part of the previous part, but +illustrates explicit versus implicit typing of body +parts. + +--unique-boundary-1 +Content-Type: multipart/parallel; boundary=unique-boundary-2 + +--unique-boundary-2 +Content-Type: audio/basic +Content-Transfer-Encoding: base64 + + +--unique-boundary-2 +Content-Type: image/jpeg +Content-Transfer-Encoding: base64 + + +--unique-boundary-2-- + +--unique-boundary-1 +Content-type: text/enriched + +This is enriched. +as defined in RFC 1896 + +Isn't it +cool? + +--unique-boundary-1 +Content-Type: message/rfc822 + +From: (mailbox in US-ASCII) +To: (address in US-ASCII) +Subject: (subject in US-ASCII) +Content-Type: Text/plain; charset=ISO-8859-1 +Content-Transfer-Encoding: Quoted-printable + + ... Additional text in ISO-8859-1 goes here ... + +--unique-boundary-1-- +`, "\n", "\r\n") + + msg, err := Parse(strings.NewReader(nestedMessage)) + tcheck(t, err, "new reader") + enforceSequential = true + defer func() { + enforceSequential = false + }() + err = walkmsg(&msg) + tcheck(t, err, "walkmsg") + + if len(msg.Parts) != 5 { + t.Fatalf("got %d parts, expected 5", len(msg.Parts)) + } + sub := msg.Parts[4].Message + if sub == nil { + t.Fatalf("missing part.Message") + } + buf, err := io.ReadAll(sub.Reader()) + if err != nil { + t.Fatalf("read message body: %v", err) + } + exp := " ... Additional text in ISO-8859-1 goes here ...\r\n" + if string(buf) != exp { + t.Fatalf("got %q, expected %q", buf, exp) + } + + msg, _ = Parse(strings.NewReader(nestedMessage)) + err = msg.Walk() + tcheck(t, err, "msg.Walk") + +} + +func TestWalkdir(t *testing.T) { + // Ensure these dirs exist. Developers should bring their own ham/spam example + // emails. + os.MkdirAll("../testdata/train/ham", 0770) + os.MkdirAll("../testdata/train/spam", 0770) + + var n, nfail int + twalkdir(t, "../testdata/train/ham", &n, &nfail) + twalkdir(t, "../testdata/train/spam", &n, &nfail) + log.Printf("parsing messages: %d/%d failed", nfail, n) +} + +func twalkdir(t *testing.T, dir string, n, nfail *int) { + names, err := os.ReadDir(dir) + tcheck(t, err, "readdir") + if len(names) > 1000 { + names = names[:1000] + } + for _, name := range names { + p := filepath.Join(dir, name.Name()) + *n++ + err := walk(p) + if err != nil { + *nfail++ + log.Printf("%s: %v", p, err) + } + } +} + +func walk(path string) error { + r, err := os.Open(path) + if err != nil { + return err + } + defer r.Close() + msg, err := Parse(r) + if err != nil { + return err + } + return walkmsg(&msg) +} + +func walkmsg(msg *Part) error { + enforceSequential = true + defer func() { + enforceSequential = false + }() + + if len(msg.bound) == 0 { + buf, err := io.ReadAll(msg.Reader()) + if err != nil { + return err + } + + if msg.MediaType == "MESSAGE" && (msg.MediaSubType == "RFC822" || msg.MediaSubType == "GLOBAL") { + mp, err := Parse(bytes.NewReader(buf)) + if err != nil { + return err + } + msg.Message = &mp + walkmsg(msg.Message) + } + + size := msg.EndOffset - msg.BodyOffset + if size < 0 { + log.Printf("msg %v", msg) + panic("inconsistent body/end offset") + } + sr := io.NewSectionReader(msg.r, msg.BodyOffset, size) + decsr := msg.bodyReader(sr) + buf2, err := io.ReadAll(decsr) + if err != nil { + return err + } + + if !bytes.Equal(buf, buf2) { + panic("data mismatch reading sequentially vs via offsets") + } + + return nil + } + + for { + pp, err := msg.ParseNextPart() + if err == io.EOF { + return nil + } + if err != nil { + return err + } + if err := walkmsg(pp); err != nil { + return err + } + enforceSequential = true + } +} + +func TestEmbedded(t *testing.T) { + f, err := os.Open("../testdata/message/message-rfc822-multipart.eml") + tcheck(t, err, "open") + fi, err := f.Stat() + tcheck(t, err, "stat") + _, err = EnsurePart(f, fi.Size()) + tcheck(t, err, "parse") +} + +func TestEmbedded2(t *testing.T) { + buf, err := os.ReadFile("../testdata/message/message-rfc822-multipart2.eml") + tcheck(t, err, "readfile") + buf = bytes.ReplaceAll(buf, []byte("\n"), []byte("\r\n")) + + _, err = EnsurePart(bytes.NewReader(buf), int64(len(buf))) + tfail(t, err, errUnexpectedEOF) // todo: be able to parse this without an error? truncate message/rfc822 in dsn. +} diff --git a/message/readheaders.go b/message/readheaders.go new file mode 100644 index 0000000..dd7492e --- /dev/null +++ b/message/readheaders.go @@ -0,0 +1,31 @@ +package message + +import ( + "bufio" + "bytes" + "errors" + "io" +) + +var crlf2x = []byte("\r\n\r\n") + +var ErrHeaderSeparator = errors.New("no header separator found") + +// ReadHeaders returns the headers of a message, ending with a single crlf. +// Returns ErrHeaderSeparator if no header separator is found. +func ReadHeaders(msg *bufio.Reader) ([]byte, error) { + buf := []byte{} + for { + line, err := msg.ReadBytes('\n') + if err != io.EOF && err != nil { + return nil, err + } + buf = append(buf, line...) + if bytes.HasSuffix(buf, crlf2x) { + return buf[:len(buf)-2], nil + } + if err == io.EOF { + return nil, ErrHeaderSeparator + } + } +} diff --git a/message/time.go b/message/time.go new file mode 100644 index 0000000..76c5fad --- /dev/null +++ b/message/time.go @@ -0,0 +1,4 @@ +package message + +// Timestamp as used in internet mail messages. +const RFC5322Z = "02 Jan 2006 15:04 -0700" diff --git a/message/todo.go b/message/todo.go new file mode 100644 index 0000000..5ae5c61 --- /dev/null +++ b/message/todo.go @@ -0,0 +1,12 @@ +package message + +// todo: we should parse headers ourselves + +// Link rfc updates about UTF-8 characters in messages. +// These productions list valid characters in contexts: +// VCHAR, visible printing: ../rfc/5234:774 ../rfc/6532:236 +// ctext, in comment: ../rfc/5322:602 ../rfc/6532:238 +// atext, in atom: ../rfc/5322:679 ../rfc/6532:240 +// qtext, in quoted string: ../rfc/5322:735 ../rfc/6532:242 +// text, in message body: ../rfc/5322:1001 ../rfc/6532:244 +// dtext, in domain: ../rfc/5322:967 ../rfc/6532:247 diff --git a/message/writer.go b/message/writer.go new file mode 100644 index 0000000..bb535f5 --- /dev/null +++ b/message/writer.go @@ -0,0 +1,55 @@ +package message + +import ( + "io" +) + +// Writer is a write-through helper, collecting properties about the written +// message. +type Writer struct { + Writer io.Writer + HaveHeaders bool + Has8bit bool // Whether a byte with the high/8bit has been read. So whether this is 8BITMIME instead of 7BIT. + Size int64 + tail [3]byte // For detecting crlfcrlf. + // todo: should be parsing headers here, as we go +} + +// Write implements io.Writer. +func (w *Writer) Write(buf []byte) (int, error) { + if !w.HaveHeaders && len(buf) > 0 { + get := func(i int) byte { + if i < 0 { + return w.tail[3+i] + } + return buf[i] + } + + for i, b := range buf { + if b == '\n' && get(i-3) == '\r' && get(i-2) == '\n' && get(i-1) == '\r' { + w.HaveHeaders = true + break + } + } + + n := len(buf) + if n > 3 { + n = 3 + } + copy(w.tail[:], w.tail[n:]) + copy(w.tail[3-n:], buf[len(buf)-n:]) + } + if !w.Has8bit { + for _, b := range buf { + if b&0x80 != 0 { + w.Has8bit = true + break + } + } + } + n, err := w.Writer.Write(buf) + if n > 0 { + w.Size += int64(n) + } + return n, err +} diff --git a/message/writer_test.go b/message/writer_test.go new file mode 100644 index 0000000..2155729 --- /dev/null +++ b/message/writer_test.go @@ -0,0 +1,41 @@ +package message + +import ( + "strings" + "testing" +) + +func TestMsgWriter(t *testing.T) { + check := func(data string, want bool) { + t.Helper() + + b := &strings.Builder{} + mw := &Writer{Writer: b} + if _, err := mw.Write([]byte(data)); err != nil { + t.Fatalf("write for message %q: %s", data, err) + } + if mw.HaveHeaders != want { + t.Fatalf("got %v, expected %v, for message %q", mw.HaveHeaders, want, data) + } + + b = &strings.Builder{} + mw = &Writer{Writer: b} + for i := range data { + if _, err := mw.Write([]byte(data[i : i+1])); err != nil { + t.Fatalf("write for message %q: %s", data, err) + } + } + if mw.HaveHeaders != want { + t.Fatalf("got %v, expected %v, for message %q", mw.HaveHeaders, want, data) + } + } + + check("no header", false) + check("no header\r\n", false) + check("key: value\r\n\r\n", true) + check("key: value\r\n\r\nbody", true) + check("key: value\n\nbody", false) + check("key: value\r\rbody", false) + check("\r\n\r\n", true) + check("\r\n\r\nbody", true) +} diff --git a/metrics/auth.go b/metrics/auth.go new file mode 100644 index 0000000..baf3d53 --- /dev/null +++ b/metrics/auth.go @@ -0,0 +1,25 @@ +package metrics + +import ( + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" +) + +var ( + metricAuthentication = promauto.NewCounterVec( + prometheus.CounterOpts{ + Name: "mox_authentication_total", + Help: "Authentication attempts and results.", + }, + []string{ + "kind", // submission, imap, httpaccount, httpadmin + "variant", // login, plain, scram-sha-256, httpbasic + // todo: we currently only use badcreds, but known baduser can be helpful + "result", // ok, baduser, badpassword, badcreds, error, aborted + }, + ) +) + +func AuthenticationInc(kind, variant, result string) { + metricAuthentication.WithLabelValues(kind, variant, result).Inc() +} diff --git a/metrics/http.go b/metrics/http.go new file mode 100644 index 0000000..3ecc1ce --- /dev/null +++ b/metrics/http.go @@ -0,0 +1,61 @@ +// Package metrics has prometheus metric variables/functions. +package metrics + +import ( + "context" + "errors" + "fmt" + "os" + "time" + + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" + + "github.com/mjl-/mox/mlog" +) + +var xlog = mlog.New("metrics") + +var ( + metricHTTPClient = promauto.NewHistogramVec( + prometheus.HistogramOpts{ + Name: "mox_httpclient_request_duration_seconds", + Help: "HTTP requests lookups.", + Buckets: []float64{0.01, 0.05, 0.100, 0.5, 1, 5, 10, 20, 30}, + }, + []string{ + "pkg", + "method", + "code", + "result", + }, + ) +) + +// HTTPClientObserve tracks the result of an HTTP transaction in a metric, and +// logs the result. +func HTTPClientObserve(ctx context.Context, pkg, method string, statusCode int, err error, start time.Time) { + log := xlog.WithContext(ctx) + var result string + switch { + case err == nil: + switch statusCode / 100 { + case 2: + result = "ok" + case 4: + result = "usererror" + case 5: + result = "servererror" + default: + result = "other" + } + case errors.Is(err, os.ErrDeadlineExceeded) || errors.Is(err, context.DeadlineExceeded): + result = "timeout" + case errors.Is(err, context.Canceled): + result = "canceled" + default: + result = "error" + } + metricHTTPClient.WithLabelValues(pkg, method, result, fmt.Sprintf("%d", statusCode)).Observe(float64(time.Since(start)) / float64(time.Second)) + log.Debugx("httpclient result", err, mlog.Field("pkg", pkg), mlog.Field("method", method), mlog.Field("code", statusCode), mlog.Field("duration", time.Since(start))) +} diff --git a/metrics/panic.go b/metrics/panic.go new file mode 100644 index 0000000..45717de --- /dev/null +++ b/metrics/panic.go @@ -0,0 +1,20 @@ +package metrics + +import ( + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" +) + +var metricPanic = promauto.NewCounterVec( + prometheus.CounterOpts{ + Name: "mox_panic_total", + Help: "Number of unhandled panics, by package.", + }, + []string{ + "pkg", + }, +) + +func PanicInc(pkg string) { + metricPanic.WithLabelValues(pkg).Inc() +} diff --git a/mlog/log.go b/mlog/log.go new file mode 100644 index 0000000..10f2b2b --- /dev/null +++ b/mlog/log.go @@ -0,0 +1,376 @@ +// Package mlog provides logging with log levels and fields. +// +// Each log level has a function to log with and without error. +// Each such function takes a varargs list of fields (key value pairs) to log. +// Variable data should be in fields. Logging strings themselves should be +// constant, for easier log processing (e.g. building metrics based on log +// messages). +// +// The log levels can be configured per originating package, e.g. smtpclient, +// imapserver. The configuration is application-global, so each Log instance +// uses the same log levels. +// +// Print* should be used for lines that always should be printed, regardless of +// configured log levels. Useful for startup logging and subcommands. +// +// Fatal* stops the program. Its log text is always printed. +package mlog + +// todo: log with source=path:linenumber? and/or stacktrace (perhaps optional) +// todo: should we turn errors logged with an context.Canceled from a level error into level info? +// todo: rethink format. perhaps simply using %#v is more useful for many types? + +import ( + "bytes" + "context" + "encoding/base64" + "errors" + "fmt" + "io" + "os" + "reflect" + "strconv" + "strings" + "sync/atomic" +) + +var Logfmt bool + +type Level int + +var LevelStrings = map[Level]string{ + LevelPrint: "print", + LevelFatal: "fatal", + LevelError: "error", + LevelInfo: "info", + LevelDebug: "debug", + LevelTrace: "trace", +} + +var Levels = map[string]Level{ + "print": LevelPrint, + "fatal": LevelFatal, + "error": LevelError, + "info": LevelInfo, + "debug": LevelDebug, + "trace": LevelTrace, +} + +const ( + LevelPrint Level = 0 // Printed regardless of configured log level. + LevelFatal Level = 1 // Printed regardless of configured log level. + LevelError Level = 2 + LevelInfo Level = 3 + LevelDebug Level = 4 + LevelTrace Level = 5 +) + +// Holds a map[string]Level, mapping a package (field pkg in logs) to a log level. +// The empty string is the default/fallback log level. +var config atomic.Value + +func init() { + config.Store(map[string]Level{"": LevelError}) +} + +// SetConfig atomically sets the new log levels used by all Log instances. +func SetConfig(c map[string]Level) { + config.Store(c) +} + +// Pair is a field/value pair, for use in logged lines. +type Pair struct { + key string + value any +} + +// Field is a shorthand for making a Pair. +func Field(k string, v any) Pair { + return Pair{k, v} +} + +// Log is an instance potentially with its own field/value pair added to any +// logging output. +type Log struct { + fields []Pair + moreFields func() []Pair +} + +// New returns a new Log instance. Each log invocation adds field "pkg". +func New(pkg string) *Log { + return &Log{ + fields: []Pair{{"pkg", pkg}}, + } +} + +type key string + +// CidKey can be used with context.WithValue to store a "cid" in a context, for logging. +var CidKey key = "cid" + +// WithCid adds a field "cid". +// Also see WithContext. +func (l *Log) WithCid(cid int64) *Log { + return l.Fields(Pair{"cid", cid}) +} + +// WithContext adds cid from context, if present. Context are often passed to +// functions, especially between packages, to pass a "cid" for an operation. At the +// start of a function (especially if exported) a variable "log" is often +// instantiated from a package-level variable "xlog", with WithContext for its cid. +// A *Log could be passed instead, but contexts are more pervasive. For the same +// reason WithContext is more common than WithCid. +func (l *Log) WithContext(ctx context.Context) *Log { + cidv := ctx.Value(CidKey) + if cidv == nil { + return l + } + cid := cidv.(int64) + return l.WithCid(cid) +} + +// Field adds fields to the logger. Each logged line adds these fields. +func (l *Log) Fields(fields ...Pair) *Log { + nl := *l + nl.fields = append(fields, nl.fields...) + return &nl +} + +// MoreFields sets a function on the logger that is called just before logging, +// to retrieve additional fields to log. +func (l *Log) MoreFields(fn func() []Pair) *Log { + nl := *l + nl.moreFields = fn + return &nl +} + +func (l *Log) Trace(text string) bool { + return l.logx(LevelTrace, nil, text) +} + +func (l *Log) Fatal(text string, fields ...Pair) { l.Fatalx(text, nil, fields...) } +func (l *Log) Fatalx(text string, err error, fields ...Pair) { + l.plog(LevelFatal, err, text, fields...) + os.Exit(1) +} + +func (l *Log) Print(text string, fields ...Pair) bool { + return l.logx(LevelPrint, nil, text, fields...) +} +func (l *Log) Printx(text string, err error, fields ...Pair) bool { + return l.logx(LevelPrint, err, text, fields...) +} + +func (l *Log) Debug(text string, fields ...Pair) bool { + return l.logx(LevelDebug, nil, text, fields...) +} +func (l *Log) Debugx(text string, err error, fields ...Pair) bool { + return l.logx(LevelDebug, err, text, fields...) +} + +func (l *Log) Info(text string, fields ...Pair) bool { return l.logx(LevelInfo, nil, text, fields...) } +func (l *Log) Infox(text string, err error, fields ...Pair) bool { + return l.logx(LevelInfo, err, text, fields...) +} + +func (l *Log) Error(text string, fields ...Pair) bool { + return l.logx(LevelError, nil, text, fields...) +} +func (l *Log) Errorx(text string, err error, fields ...Pair) bool { + return l.logx(LevelError, err, text, fields...) +} + +func (l *Log) logx(level Level, err error, text string, fields ...Pair) bool { + if !l.match(level) { + return false + } + l.plog(level, err, text, fields...) + return true +} + +// escape logfmt string if required, otherwise return original string. +func logfmtValue(s string) string { + for _, c := range s { + if c == '"' || c == '\\' || c <= ' ' || c == '=' || c >= 0x7f { + return fmt.Sprintf("%q", s) + } + } + return s +} + +func stringValue(iscid, nested bool, v any) string { + // Handle some common types first. + if v == nil { + return "" + } + switch r := v.(type) { + case string: + return r + case int: + return strconv.Itoa(r) + case int64: + if iscid { + return fmt.Sprintf("%x", v) + } + return strconv.FormatInt(r, 10) + case bool: + if r { + return "true" + } + return "false" + case float64: + return fmt.Sprintf("%v", v) + case []byte: + return base64.RawURLEncoding.EncodeToString(r) + case []string: + if nested && len(r) == 0 { + // Drop field from logging. + return "" + } + return "[" + strings.Join(r, ",") + "]" + } + + rv := reflect.ValueOf(v) + if rv.Kind() == reflect.Ptr && rv.IsNil() { + return "" + } + + if r, ok := v.(fmt.Stringer); ok { + return r.String() + } + + if rv.Kind() == reflect.Ptr { + rv = rv.Elem() + return stringValue(iscid, nested, rv.Interface()) + } + if rv.Kind() == reflect.Slice { + n := rv.Len() + if nested && n == 0 { + // Drop field. + return "" + } + b := &strings.Builder{} + b.WriteString("[") + for i := 0; i < n; i++ { + if i > 0 { + b.WriteString(";") + } + b.WriteString(stringValue(false, true, rv.Index(i).Interface())) + } + b.WriteString("]") + return b.String() + } else if rv.Kind() != reflect.Struct { + return fmt.Sprintf("%v", v) + } + n := rv.NumField() + t := rv.Type() + b := &strings.Builder{} + first := true + for i := 0; i < n; i++ { + fv := rv.Field(i) + if !t.Field(i).IsExported() { + continue + } + if fv.Kind() == reflect.Struct || fv.Kind() == reflect.Ptr || fv.Kind() == reflect.Interface { + // Don't recurse. + continue + } + vs := stringValue(false, true, fv.Interface()) + if vs == "" { + continue + } + if !first { + b.WriteByte(' ') + } + first = false + k := strings.ToLower(t.Field(i).Name) + b.WriteString(k + "=" + logfmtValue(vs)) + } + return b.String() +} + +func (l *Log) plog(level Level, err error, text string, fields ...Pair) { + fields = append(l.fields, fields...) + if l.moreFields != nil { + fields = append(fields, l.moreFields()...) + } + // We build up a buffer so we can do a single atomic write of the data. Otherwise partial log lines may interleaf. + b := &bytes.Buffer{} + if Logfmt { + fmt.Fprintf(b, "l=%s m=%s", LevelStrings[level], logfmtValue(text)) + if err != nil { + fmt.Fprintf(b, " err=%s", logfmtValue(err.Error())) + } + for i := 0; i < len(fields); i++ { + kv := fields[i] + fmt.Fprintf(b, " %s=%s", kv.key, logfmtValue(stringValue(kv.key == "cid", false, kv.value))) + } + b.WriteString("\n") + } else { + fmt.Fprintf(b, "%s: %s", LevelStrings[level], logfmtValue(text)) + if err != nil { + fmt.Fprintf(b, ": %s", logfmtValue(err.Error())) + } + if len(fields) > 0 { + fmt.Fprint(b, " (") + for i := 0; i < len(fields); i++ { + if i > 0 { + fmt.Fprint(b, "; ") + } + kv := fields[i] + fmt.Fprintf(b, "%s: %s", kv.key, logfmtValue(stringValue(kv.key == "cid", false, kv.value))) + } + fmt.Fprint(b, ")") + } + b.WriteString("\n") + } + os.Stderr.Write(b.Bytes()) +} + +func (l *Log) match(level Level) bool { + if level == LevelPrint || level == LevelFatal { + return true + } + + cl := config.Load().(map[string]Level) + + seen := false + for _, kv := range l.fields { + if kv.key != "pkg" { + continue + } + pkg, ok := kv.value.(string) + if !ok { + continue + } + v, ok := cl[pkg] + if ok && v >= level { + return true + } + seen = seen || ok + } + if seen { + return false + } + v, ok := cl[""] + return ok && v >= level +} + +type errWriter struct { + log *Log + level Level + msg string +} + +func (w *errWriter) Write(buf []byte) (int, error) { + err := errors.New(strings.TrimSpace(string(buf))) + w.log.logx(w.level, err, w.msg) + return len(buf), nil +} + +// ErrWriter returns a writer that turns each write into a logging call on "log" +// with given "level" and "msg" and the written content as an error. +// Can be used for making a Go log.Logger for use in http.Server.ErrorLog. +func ErrWriter(log *Log, level Level, msg string) io.Writer { + return &errWriter{log, level, msg} +} diff --git a/mox-/admin.go b/mox-/admin.go new file mode 100644 index 0000000..321c973 --- /dev/null +++ b/mox-/admin.go @@ -0,0 +1,824 @@ +package mox + +import ( + "bytes" + "context" + "crypto/ed25519" + cryptorand "crypto/rand" + "crypto/rsa" + "crypto/x509" + "encoding/pem" + "fmt" + "net" + "os" + "path/filepath" + "sort" + "time" + + "github.com/mjl-/mox/config" + "github.com/mjl-/mox/dkim" + "github.com/mjl-/mox/dns" + "github.com/mjl-/mox/junk" + "github.com/mjl-/mox/mlog" + "github.com/mjl-/mox/mtasts" + "github.com/mjl-/mox/smtp" +) + +// TXTStrings returns a TXT record value as one or more quoted strings, taking the max +// length of 255 characters for a string into account. +func TXTStrings(s string) string { + r := "" + for len(s) > 0 { + n := len(s) + if n > 255 { + n = 255 + } + if r != "" { + r += " " + } + r += `"` + s[:n] + `"` + s = s[n:] + } + return r +} + +// MakeDKIMEd25519Key returns a PEM buffer containing an ed25519 key for use +// with DKIM. +// selector and domain can be empty. If not, they are used in the note. +func MakeDKIMEd25519Key(selector, domain dns.Domain) ([]byte, error) { + _, privKey, err := ed25519.GenerateKey(cryptorand.Reader) + if err != nil { + return nil, fmt.Errorf("generating key: %w", err) + } + + pkcs8, err := x509.MarshalPKCS8PrivateKey(privKey) + if err != nil { + return nil, fmt.Errorf("marshal key: %w", err) + } + + block := &pem.Block{ + Type: "PRIVATE KEY", + Headers: map[string]string{ + "Note": dkimKeyNote("ed25519", selector, domain), + }, + Bytes: pkcs8, + } + b := &bytes.Buffer{} + if err := pem.Encode(b, block); err != nil { + return nil, fmt.Errorf("encoding pem: %w", err) + } + return b.Bytes(), nil +} + +func dkimKeyNote(kind string, selector, domain dns.Domain) string { + s := kind + " dkim private key" + var zero dns.Domain + if selector != zero && domain != zero { + s += fmt.Sprintf(" for %s._domainkey.%s", selector.ASCII, domain.ASCII) + } + s += fmt.Sprintf(", generated by mox on %s", time.Now().Format(time.RFC3339)) + return s +} + +// MakeDKIMEd25519Key returns a PEM buffer containing an rsa key for use with +// DKIM. +// selector and domain can be empty. If not, they are used in the note. +func MakeDKIMRSAKey(selector, domain dns.Domain) ([]byte, error) { + // 2048 bits seems reasonable in 2022, 1024 is on the low side, larger + // keys may not fit in UDP DNS response. + privKey, err := rsa.GenerateKey(cryptorand.Reader, 2048) + if err != nil { + return nil, fmt.Errorf("generating key: %w", err) + } + + pkcs8, err := x509.MarshalPKCS8PrivateKey(privKey) + if err != nil { + return nil, fmt.Errorf("marshal key: %w", err) + } + + block := &pem.Block{ + Type: "PRIVATE KEY", + Headers: map[string]string{ + "Note": dkimKeyNote("rsa", selector, domain), + }, + Bytes: pkcs8, + } + b := &bytes.Buffer{} + if err := pem.Encode(b, block); err != nil { + return nil, fmt.Errorf("encoding pem: %w", err) + } + return b.Bytes(), nil +} + +// MakeAccountConfig returns a new account configuration for an email address. +func MakeAccountConfig(addr smtp.Address) config.Account { + account := config.Account{ + Domain: addr.Domain.Name(), + Destinations: map[string]config.Destination{ + addr.Localpart.String(): {}, + }, + RejectsMailbox: "Rejects", + JunkFilter: &config.JunkFilter{ + Threshold: 0.95, + Params: junk.Params{ + Onegrams: true, + MaxPower: .01, + TopWords: 10, + IgnoreWords: .1, + RareWords: 2, + }, + }, + } + account.SubjectPass.Period = 12 * time.Hour + return account +} + +// MakeDomainConfig makes a new config for a domain, creating DKIM keys, using +// accountName for DMARC and TLS reports. +func MakeDomainConfig(ctx context.Context, domain, hostname dns.Domain, accountName string) (config.Domain, []string, error) { + log := xlog.WithContext(ctx) + + now := time.Now() + year := now.Format("2006") + timestamp := now.Format("20060102T150405") + + var paths []string + defer func() { + for _, p := range paths { + if err := os.Remove(p); err != nil { + log.Errorx("removing path for domain config", err, mlog.Field("path", p)) + } + } + }() + + writeFile := func(path string, data []byte) error { + os.MkdirAll(filepath.Dir(path), 0770) + + f, err := os.OpenFile(path, os.O_WRONLY|os.O_CREATE|os.O_EXCL, 0660) + if err != nil { + return fmt.Errorf("creating file %s: %s", path, err) + } + defer func() { + if f != nil { + os.Remove(path) + f.Close() + } + }() + if _, err := f.Write(data); err != nil { + return fmt.Errorf("writing file %s: %s", path, err) + } + if err := f.Close(); err != nil { + return fmt.Errorf("close file: %v", err) + } + f = nil + return nil + } + + confDKIM := config.DKIM{ + Selectors: map[string]config.Selector{}, + } + + addSelector := func(kind, name string, privKey []byte) error { + record := fmt.Sprintf("%s._domainkey.%s", name, domain.ASCII) + keyPath := filepath.Join("dkim", fmt.Sprintf("%s.%s.%skey.pkcs8.pem", record, timestamp, kind)) + p := ConfigDirPath(keyPath) + if err := writeFile(p, privKey); err != nil { + return err + } + paths = append(paths, p) + confDKIM.Selectors[name] = config.Selector{ + // Example from RFC has 5 day between signing and expiration. ../rfc/6376:1393 + // Expiration is not intended as antireplay defense, but it may help. ../rfc/6376:1340 + // Messages in the wild have been observed with 2 hours and 1 year expiration. + Expiration: "72h", + PrivateKeyFile: keyPath, + } + return nil + } + + addEd25519 := func(name string) error { + key, err := MakeDKIMEd25519Key(dns.Domain{ASCII: name}, domain) + if err != nil { + return fmt.Errorf("making dkim ed25519 private key: %s", err) + } + return addSelector("ed25519", name, key) + } + + addRSA := func(name string) error { + key, err := MakeDKIMRSAKey(dns.Domain{ASCII: name}, domain) + if err != nil { + return fmt.Errorf("making dkim rsa private key: %s", err) + } + return addSelector("rsa", name, key) + } + + if err := addEd25519(year + "a"); err != nil { + return config.Domain{}, nil, err + } + if err := addRSA(year + "b"); err != nil { + return config.Domain{}, nil, err + } + if err := addEd25519(year + "c"); err != nil { + return config.Domain{}, nil, err + } + if err := addRSA(year + "d"); err != nil { + return config.Domain{}, nil, err + } + + // We sign with the first two. In case they are misused, the switch to the other + // keys is easy, just change the config. Operators should make the public key field + // of the misused keys empty in the DNS records to disable the misused keys. + confDKIM.Sign = []string{year + "a", year + "b"} + + confDomain := config.Domain{ + LocalpartCatchallSeparator: "+", + DKIM: confDKIM, + DMARC: &config.DMARC{ + Account: accountName, + Localpart: "dmarc-reports", + Mailbox: "DMARC", + }, + MTASTS: &config.MTASTS{ + PolicyID: time.Now().UTC().Format("20060102T150405"), + Mode: mtasts.ModeEnforce, + // We start out with 24 hour, and warn in the admin interface that users should + // increase it to weeks. Once the setup works. + MaxAge: 24 * time.Hour, + MX: []string{hostname.ASCII}, + }, + TLSRPT: &config.TLSRPT{ + Account: accountName, + Localpart: "tls-reports", + Mailbox: "TLSRPT", + }, + } + + rpaths := paths + paths = nil + + return confDomain, rpaths, nil +} + +// DomainAdd adds the domain to the domains config, rewriting domains.conf and +// marking it loaded. +// +// accountName is used for DMARC/TLS report. +// If the account does not exist, it is created with localpart. Localpart must be +// set only if the account does not yet exist. +func DomainAdd(ctx context.Context, domain dns.Domain, accountName string, localpart smtp.Localpart) (rerr error) { + log := xlog.WithContext(ctx) + defer func() { + if rerr != nil { + log.Errorx("adding domain", rerr, mlog.Field("domain", domain), mlog.Field("account", accountName), mlog.Field("localpart", localpart)) + } + }() + + Conf.dynamicMutex.Lock() + defer Conf.dynamicMutex.Unlock() + + c := Conf.Dynamic + if _, ok := c.Domains[domain.Name()]; ok { + return fmt.Errorf("domain already present") + } + + // Compose new config without modifying existing data structures. If we fail, we + // leave no trace. + nc := c + nc.Domains = map[string]config.Domain{} + for name, d := range c.Domains { + nc.Domains[name] = d + } + + confDomain, cleanupFiles, err := MakeDomainConfig(ctx, domain, Conf.Static.HostnameDomain, accountName) + if err != nil { + return fmt.Errorf("preparing domain config: %v", err) + } + defer func() { + for _, f := range cleanupFiles { + if err := os.Remove(f); err != nil { + log.Errorx("cleaning up file after error", err, mlog.Field("path", f)) + } + } + }() + + if _, ok := c.Accounts[accountName]; ok && localpart != "" { + return fmt.Errorf("account already exists (leave localpart empty when using an existing account)") + } else if !ok && localpart == "" { + return fmt.Errorf("account does not yet exist (specify a localpart)") + } else if accountName == "" { + return fmt.Errorf("account name is empty") + } else if !ok { + nc.Accounts[accountName] = MakeAccountConfig(smtp.Address{Localpart: localpart, Domain: domain}) + } + + nc.Domains[domain.Name()] = confDomain + + if err := writeDynamic(ctx, nc); err != nil { + return fmt.Errorf("writing domains.conf: %v", err) + } + log.Info("domain added", mlog.Field("domain", domain)) + cleanupFiles = nil // All good, don't cleanup. + return nil +} + +// DomainRemove removes domain from the config, rewriting domains.conf. +// +// No accounts are removed, also not when they still reference this domain. +func DomainRemove(ctx context.Context, domain dns.Domain) (rerr error) { + log := xlog.WithContext(ctx) + defer func() { + if rerr != nil { + log.Errorx("removing domain", rerr, mlog.Field("domain", domain)) + } + }() + + Conf.dynamicMutex.Lock() + defer Conf.dynamicMutex.Unlock() + + c := Conf.Dynamic + domConf, ok := c.Domains[domain.Name()] + if !ok { + return fmt.Errorf("domain does not exist") + } + + // Compose new config without modifying existing data structures. If we fail, we + // leave no trace. + nc := c + nc.Domains = map[string]config.Domain{} + s := domain.Name() + for name, d := range c.Domains { + if name != s { + nc.Domains[name] = d + } + } + + if err := writeDynamic(ctx, nc); err != nil { + return fmt.Errorf("writing domains.conf: %v", err) + } + + // Move away any DKIM private keys to a subdirectory "old". But only if + // they are not in use by other domains. + usedKeyPaths := map[string]bool{} + for _, dc := range nc.Domains { + for _, sel := range dc.DKIM.Selectors { + usedKeyPaths[filepath.Clean(sel.PrivateKeyFile)] = true + } + } + for _, sel := range domConf.DKIM.Selectors { + if sel.PrivateKeyFile == "" || usedKeyPaths[filepath.Clean(sel.PrivateKeyFile)] { + continue + } + src := ConfigDirPath(sel.PrivateKeyFile) + dst := ConfigDirPath(filepath.Join(filepath.Dir(sel.PrivateKeyFile), "old", filepath.Base(sel.PrivateKeyFile))) + _, err := os.Stat(dst) + if err == nil { + err = fmt.Errorf("destination already exists") + } else if os.IsNotExist(err) { + os.MkdirAll(filepath.Dir(dst), 0770) + err = os.Rename(src, dst) + } + if err != nil { + log.Errorx("renaming dkim private key file for removed domain", err, mlog.Field("src", src), mlog.Field("dst", dst)) + } + } + + log.Info("domain removed", mlog.Field("domain", domain)) + return nil +} + +// todo: find a way to automatically create the dns records as it would greatly simplify setting up email for a domain. we could also dynamically make changes, e.g. providing grace periods after disabling a dkim key, only automatically removing the dkim dns key after a few days. but this requires some kind of api and authentication to the dns server. there doesn't appear to be a single commonly used api for dns management. each of the numerous cloud providers have their own APIs and rather large SKDs to use them. we don't want to link all of them in. + +// DomainRecords returns text lines describing DNS records required for configuring +// a domain. +func DomainRecords(domConf config.Domain, domain dns.Domain) ([]string, error) { + d := domain.ASCII + h := Conf.Static.HostnameDomain.ASCII + + records := []string{ + "; Time To Live, may be recognized if importing as a zone file.", + "$TTL 300", + "", + + "; Deliver email to this host.", + fmt.Sprintf("%s. MX 10 %s.", d, h), + "", + + "; Outgoing messages will be signed with the first two DKIM keys. The other two", + "; configured for backup, switching to them is just a config change.", + } + var selectors []string + for name := range domConf.DKIM.Selectors { + selectors = append(selectors, name) + } + sort.Slice(selectors, func(i, j int) bool { + return selectors[i] < selectors[j] + }) + for _, name := range selectors { + sel := domConf.DKIM.Selectors[name] + dkimr := dkim.Record{ + Version: "DKIM1", + Hashes: []string{"sha256"}, + PublicKey: sel.Key.Public(), + } + if _, ok := sel.Key.(ed25519.PrivateKey); ok { + dkimr.Key = "ed25519" + } else if _, ok := sel.Key.(*rsa.PrivateKey); !ok { + return nil, fmt.Errorf("unrecognized private key for DKIM selector %q: %T", name, sel.Key) + } + txt, err := dkimr.Record() + if err != nil { + return nil, fmt.Errorf("making DKIM DNS TXT record: %v", err) + } + + if len(txt) > 255 { + records = append(records, + "; NOTE: Ensure the next record is added in DNS as a single record, it consists", + "; of multiple strings (max size of each is 255 bytes).", + ) + } + s := fmt.Sprintf("%s._domainkey.%s. IN TXT %s", name, d, TXTStrings(txt)) + records = append(records, s) + + } + records = append(records, + "", + + "; Specify the MX host is allowed to send for our domain and for itself (for DSNs).", + "; ~all means softfail for anything else, which is done instead of -all to prevent older", + "; mail servers from rejecting the message because they never get to looking for a dkim/dmarc pass.", + fmt.Sprintf(`%s. IN TXT "v=spf1 mx ~all"`, d), + "; The next record may already exist if you have more domains configured.", + fmt.Sprintf(`%-*s IN TXT "v=spf1 a -all"`, 20+len(d), h+"."), // ../rfc/7208:2263 ../rfc/7208:2287 + "", + + "; Emails that fail the DMARC check (without DKIM and without SPF) should be rejected, and request reports.", + "; If you email through mailing lists that strip DKIM-Signature headers and don't", + "; rewrite the From header, you may want to set the policy to p=none.", + fmt.Sprintf(`_dmarc.%s. IN TXT "v=DMARC1; p=reject; rua=mailto:dmarc-reports@%s!10m"`, d, d), + "", + ) + + if sts := domConf.MTASTS; sts != nil { + records = append(records, + "; TLS must be used when delivering to us.", + fmt.Sprintf(`mta-sts.%s. IN CNAME %s.`, d, h), + fmt.Sprintf(`_mta-sts.%s. IN TXT "v=STSv1; id=%s"`, d, sts.PolicyID), + "", + ) + } + + records = append(records, + "; Request reporting about TLS failures.", + fmt.Sprintf(`_smtp._tls.%s. IN TXT "v=TLSRPTv1; rua=mailto:tls-reports@%s"`, d, d), + "", + + "; Autoconfig is used by Thunderbird. Autodiscover is (in theory) used by Microsoft.", + fmt.Sprintf(`autoconfig.%s. IN CNAME %s.`, d, h), + fmt.Sprintf(`_autodiscover._tcp.%s. IN SRV 0 1 443 autoconfig.%s.`, d, d), + "", + + // ../rfc/6186:133 ../rfc/8314:692 + "; For secure IMAP and submission autoconfig, point to mail host.", + fmt.Sprintf(`_imaps._tcp.%s. IN SRV 0 1 993 %s.`, d, h), + fmt.Sprintf(`_submissions._tcp.%s. IN SRV 0 1 465 %s.`, d, h), + "", + // ../rfc/6186:242 + "; Next records specify POP3 and plain text ports are not to be used.", + fmt.Sprintf(`_imap._tcp.%s. IN SRV 0 1 143 .`, d), + fmt.Sprintf(`_submission._tcp.%s. IN SRV 0 1 587 .`, d), + fmt.Sprintf(`_pop3._tcp.%s. IN SRV 0 1 110 .`, d), + fmt.Sprintf(`_pop3s._tcp.%s. IN SRV 0 1 995 .`, d), + "", + + "; Optional:", + "; You could mark Let's Encrypt as the only Certificate Authority allowed to", + "; sign TLS certificates for your domain.", + fmt.Sprintf("%s. IN CAA 0 issue \"letsencrypt.org\"", d), + ) + return records, nil +} + +// AccountAdd adds an account and an initial address and reloads the +// configuration. +// +// The new account does not have a password, so cannot log in. Email can be +// delivered. +func AccountAdd(ctx context.Context, account, address string) (rerr error) { + log := xlog.WithContext(ctx) + defer func() { + if rerr != nil { + log.Errorx("adding account", rerr, mlog.Field("account", account), mlog.Field("address", address)) + } + }() + + Conf.dynamicMutex.Lock() + defer Conf.dynamicMutex.Unlock() + + c := Conf.Dynamic + if _, ok := c.Accounts[account]; ok { + return fmt.Errorf("account already present") + } + + addr, err := smtp.ParseAddress(address) + if err != nil { + return fmt.Errorf("parsing email address: %v", err) + } + if _, ok := Conf.accountDestinations[addr.String()]; ok { + return fmt.Errorf("address already exists") + } + + dname := addr.Domain.Name() + if _, ok := c.Domains[dname]; !ok { + return fmt.Errorf("domain does not exist") + } + + // Compose new config without modifying existing data structures. If we fail, we + // leave no trace. + nc := c + nc.Accounts = map[string]config.Account{} + for name, a := range c.Accounts { + nc.Accounts[name] = a + } + nc.Accounts[account] = MakeAccountConfig(addr) + + if err := writeDynamic(ctx, nc); err != nil { + return fmt.Errorf("writing domains.conf: %v", err) + } + log.Info("account added", mlog.Field("account", account), mlog.Field("address", addr)) + return nil +} + +// AccountRemove removes an account and reloads the configuration. +func AccountRemove(ctx context.Context, account string) (rerr error) { + log := xlog.WithContext(ctx) + defer func() { + if rerr != nil { + log.Errorx("adding account", rerr, mlog.Field("account", account)) + } + }() + + Conf.dynamicMutex.Lock() + defer Conf.dynamicMutex.Unlock() + + c := Conf.Dynamic + if _, ok := c.Accounts[account]; !ok { + return fmt.Errorf("account does not exist") + } + + // Compose new config without modifying existing data structures. If we fail, we + // leave no trace. + nc := c + nc.Accounts = map[string]config.Account{} + for name, a := range c.Accounts { + if name != account { + nc.Accounts[name] = a + } + } + + if err := writeDynamic(ctx, nc); err != nil { + return fmt.Errorf("writing domains.conf: %v", err) + } + log.Info("account removed", mlog.Field("account", account)) + return nil +} + +// AddressAdd adds an email address to an account and reloads the +// configuration. +func AddressAdd(ctx context.Context, address, account string) (rerr error) { + log := xlog.WithContext(ctx) + defer func() { + if rerr != nil { + log.Errorx("adding address", rerr, mlog.Field("address", address), mlog.Field("account", account)) + } + }() + + Conf.dynamicMutex.Lock() + defer Conf.dynamicMutex.Unlock() + + c := Conf.Dynamic + a, ok := c.Accounts[account] + if !ok { + return fmt.Errorf("account does not exist") + } + + addr, err := smtp.ParseAddress(address) + if err != nil { + return fmt.Errorf("parsing email address: %v", err) + } + if _, ok := Conf.accountDestinations[addr.String()]; ok { + return fmt.Errorf("address already exists") + } + + dname := addr.Domain.Name() + if _, ok := c.Domains[dname]; !ok { + return fmt.Errorf("domain does not exist") + } + + // Compose new config without modifying existing data structures. If we fail, we + // leave no trace. + nc := c + nc.Accounts = map[string]config.Account{} + for name, a := range c.Accounts { + nc.Accounts[name] = a + } + nd := map[string]config.Destination{} + for name, d := range a.Destinations { + nd[name] = d + } + var k string + if addr.Domain == a.DNSDomain { + k = addr.Localpart.String() + } else { + k = addr.String() + } + nd[k] = config.Destination{} + a.Destinations = nd + nc.Accounts[account] = a + + if err := writeDynamic(ctx, nc); err != nil { + return fmt.Errorf("writing domains.conf: %v", err) + } + log.Info("address added", mlog.Field("address", addr), mlog.Field("account", account)) + return nil +} + +// AddressRemove removes an email address and reloads the configuration. +func AddressRemove(ctx context.Context, address string) (rerr error) { + log := xlog.WithContext(ctx) + defer func() { + if rerr != nil { + log.Errorx("removing address", rerr, mlog.Field("address", address)) + } + }() + + Conf.dynamicMutex.Lock() + defer Conf.dynamicMutex.Unlock() + + c := Conf.Dynamic + + addr, err := smtp.ParseAddress(address) + if err != nil { + return fmt.Errorf("parsing email address: %v", err) + } + ad, ok := Conf.accountDestinations[addr.String()] + if !ok { + return fmt.Errorf("address does not exists") + } + addrStr := addr.String() + + // Compose new config without modifying existing data structures. If we fail, we + // leave no trace. + a, ok := c.Accounts[ad.Account] + if !ok { + return fmt.Errorf("internal error: cannot find account") + } + na := a + na.Destinations = map[string]config.Destination{} + var dropped bool + for name, d := range a.Destinations { + if !(name == addr.Localpart.String() && a.DNSDomain == addr.Domain || name == addrStr) { + na.Destinations[name] = d + } else { + dropped = true + } + } + if !dropped { + return fmt.Errorf("address not removed, likely a postmaster/reporting address") + } + nc := c + nc.Accounts = map[string]config.Account{} + for name, a := range c.Accounts { + nc.Accounts[name] = a + } + nc.Accounts[ad.Account] = na + + if err := writeDynamic(ctx, nc); err != nil { + return fmt.Errorf("writing domains.conf: %v", err) + } + log.Info("address removed", mlog.Field("address", addr), mlog.Field("account", ad.Account)) + return nil +} + +// ClientConfig holds the client configuration for IMAP/Submission for a +// domain. +type ClientConfig struct { + Entries []ClientConfigEntry +} + +type ClientConfigEntry struct { + Protocol string + Host dns.Domain + Port int + Listener string + Note string +} + +// ClientConfigDomain returns the client config for IMAP/Submission for a +// domain. +func ClientConfigDomain(d dns.Domain) (ClientConfig, error) { + _, ok := Conf.Domain(d) + if !ok { + return ClientConfig{}, fmt.Errorf("unknown domain") + } + + c := ClientConfig{} + c.Entries = []ClientConfigEntry{} + var listeners []string + + for name := range Conf.Static.Listeners { + listeners = append(listeners, name) + } + sort.Slice(listeners, func(i, j int) bool { + return listeners[i] < listeners[j] + }) + + note := func(tls bool, requiretls bool) string { + if !tls { + return "plain text, no STARTTLS configured" + } + if requiretls { + return "STARTTLS required" + } + return "STARTTLS optional" + } + + for _, name := range listeners { + l := Conf.Static.Listeners[name] + host := Conf.Static.HostnameDomain + if l.Hostname != "" { + host = l.HostnameDomain + } + if l.Submissions.Enabled { + c.Entries = append(c.Entries, ClientConfigEntry{"Submission (SMTP)", host, config.Port(l.Submissions.Port, 465), name, "with TLS"}) + } + if l.IMAPS.Enabled { + c.Entries = append(c.Entries, ClientConfigEntry{"IMAP", host, config.Port(l.IMAPS.Port, 993), name, "with TLS"}) + } + if l.Submission.Enabled { + c.Entries = append(c.Entries, ClientConfigEntry{"Submission (SMTP)", host, config.Port(l.Submission.Port, 587), name, note(l.TLS != nil, !l.Submission.NoRequireSTARTTLS)}) + } + if l.IMAP.Enabled { + c.Entries = append(c.Entries, ClientConfigEntry{"IMAP", host, config.Port(l.IMAPS.Port, 143), name, note(l.TLS != nil, !l.IMAP.NoRequireSTARTTLS)}) + } + } + + return c, nil +} + +// return IPs we may be listening on or connecting from to the outside. +func IPs(ctx context.Context) ([]net.IP, error) { + log := xlog.WithContext(ctx) + + // Try to gather all IPs we are listening on by going through the config. + // If we encounter 0.0.0.0 or ::, we'll gather all local IPs afterwards. + var ips []net.IP + var ipv4all, ipv6all bool + for _, l := range Conf.Static.Listeners { + for _, s := range l.IPs { + ip := net.ParseIP(s) + if ip.IsUnspecified() { + if ip.To4() != nil { + ipv4all = true + } else { + ipv6all = true + } + continue + } + ips = append(ips, ip) + } + } + + // We'll list the IPs on the interfaces. How useful is this? There is a good chance + // we're listening on all addresses because of a load balancing/firewall. + if ipv4all || ipv6all { + ifaces, err := net.Interfaces() + if err != nil { + return nil, fmt.Errorf("listing network interfaces: %v", err) + } + for _, iface := range ifaces { + if iface.Flags&net.FlagUp == 0 { + continue + } + addrs, err := iface.Addrs() + if err != nil { + return nil, fmt.Errorf("listing addresses for network interface: %v", err) + } + if len(addrs) == 0 { + continue + } + + for _, addr := range addrs { + ip, _, err := net.ParseCIDR(addr.String()) + if err != nil { + log.Errorx("bad interface addr", err, mlog.Field("address", addr)) + continue + } + v4 := ip.To4() != nil + if ipv4all && v4 || ipv6all && !v4 { + ips = append(ips, ip) + } + } + } + } + return ips, nil +} diff --git a/mox-/cid.go b/mox-/cid.go new file mode 100644 index 0000000..370bcfb --- /dev/null +++ b/mox-/cid.go @@ -0,0 +1,17 @@ +package mox + +import ( + "sync/atomic" + "time" +) + +var cid atomic.Int64 + +func init() { + cid.Store(time.Now().UnixMilli()) +} + +// Cid returns a new unique id to be used for connections/sessions/requests. +func Cid() int64 { + return cid.Add(1) +} diff --git a/mox-/config.go b/mox-/config.go new file mode 100644 index 0000000..ed013d1 --- /dev/null +++ b/mox-/config.go @@ -0,0 +1,888 @@ +package mox + +import ( + "bytes" + "context" + "crypto/ed25519" + "crypto/rsa" + "crypto/tls" + "crypto/x509" + "encoding/pem" + "errors" + "fmt" + "net" + "os" + "path/filepath" + "regexp" + "sort" + "strings" + "sync" + "time" + + "golang.org/x/text/unicode/norm" + + "github.com/mjl-/sconf" + + "github.com/mjl-/mox/autotls" + "github.com/mjl-/mox/config" + "github.com/mjl-/mox/dns" + "github.com/mjl-/mox/mlog" + "github.com/mjl-/mox/moxio" + "github.com/mjl-/mox/mtasts" + "github.com/mjl-/mox/smtp" +) + +var xlog = mlog.New("mox") + +// Config paths are set early in program startup. They will point to files in +// the same directory. +var ( + ConfigStaticPath string + ConfigDynamicPath string + Conf = Config{Log: map[string]mlog.Level{"": mlog.LevelError}} +) + +// Config as used in the code, a processed version of what is in the config file. +// +// Use methods to lookup a domain/account/address in the dynamic configuration. +type Config struct { + Static config.Static // Does not change during the lifetime of a running instance. + + logMutex sync.Mutex // For accessing the log levels. + Log map[string]mlog.Level + + dynamicMutex sync.Mutex + Dynamic config.Dynamic // Can only be accessed directly by tests. Use methods on Config for locked access. + dynamicMtime time.Time + DynamicLastCheck time.Time // For use by quickstart only to skip checks. + // From correctly-cased full address (localpart@domain) to account and + // address. Domains are IDNA names in utf8. + accountDestinations map[string]AccountDestination +} + +type AccountDestination struct { + Localpart smtp.Localpart + Account string + Destination config.Destination +} + +// SetLogLevel sets a new log level for pkg. An empty pkg sets the default log +// value that is used if no explicit log level is configured for a package. +// This change is ephemeral, no config file is changed. +func (c *Config) SetLogLevel(pkg string, level mlog.Level) { + c.logMutex.Lock() + defer c.logMutex.Unlock() + l := c.copyLogLevels() + l[pkg] = level + c.Log = l + xlog.Print("log level changed", mlog.Field("pkg", pkg), mlog.Field("level", mlog.LevelStrings[level])) + mlog.SetConfig(c.Log) +} + +// copyLogLevels returns a copy of c.Log, for modifications. +// must be called with log lock held. +func (c *Config) copyLogLevels() map[string]mlog.Level { + m := map[string]mlog.Level{} + for pkg, level := range c.Log { + m[pkg] = level + } + return m +} + +// LogLevels returns a copy of the current log levels. +func (c *Config) LogLevels() map[string]mlog.Level { + c.logMutex.Lock() + defer c.logMutex.Unlock() + return c.copyLogLevels() +} + +func (c *Config) withDynamicLock(fn func()) { + c.dynamicMutex.Lock() + defer c.dynamicMutex.Unlock() + now := time.Now() + if now.Sub(c.DynamicLastCheck) > time.Second { + c.DynamicLastCheck = now + if fi, err := os.Stat(ConfigDynamicPath); err != nil { + xlog.Errorx("stat domains config", err) + } else if !fi.ModTime().Equal(c.dynamicMtime) { + if errs := c.loadDynamic(); len(errs) > 0 { + xlog.Errorx("loading domains config", errs[0], mlog.Field("errors", errs)) + } else { + xlog.Info("domains config reloaded") + c.dynamicMtime = fi.ModTime() + } + } + } + fn() +} + +// must be called with dynamic lock held. +func (c *Config) loadDynamic() []error { + d, mtime, accDests, err := ParseDynamicConfig(context.Background(), ConfigDynamicPath, c.Static) + if err != nil { + return err + } + c.Dynamic = d + c.dynamicMtime = mtime + c.accountDestinations = accDests + return nil +} + +func (c *Config) Domains() (l []string) { + c.withDynamicLock(func() { + for name := range c.Dynamic.Domains { + l = append(l, name) + } + }) + sort.Slice(l, func(i, j int) bool { + return l[i] < l[j] + }) + return l +} + +func (c *Config) Accounts() (l []string) { + c.withDynamicLock(func() { + for name := range c.Dynamic.Accounts { + l = append(l, name) + } + }) + return +} + +func (c *Config) DomainLocalparts(d dns.Domain) map[smtp.Localpart]string { + suffix := "@" + d.Name() + m := map[smtp.Localpart]string{} + c.withDynamicLock(func() { + for addr, ad := range c.accountDestinations { + if strings.HasSuffix(addr, suffix) { + m[ad.Localpart] = ad.Account + } + } + }) + return m +} + +func (c *Config) Domain(d dns.Domain) (dom config.Domain, ok bool) { + c.withDynamicLock(func() { + dom, ok = c.Dynamic.Domains[d.Name()] + }) + return +} + +func (c *Config) Account(name string) (acc config.Account, ok bool) { + c.withDynamicLock(func() { + acc, ok = c.Dynamic.Accounts[name] + }) + return +} + +func (c *Config) AccountDestination(addr string) (accDests AccountDestination, ok bool) { + c.withDynamicLock(func() { + accDests, ok = c.accountDestinations[addr] + }) + return +} + +func (c *Config) allowACMEHosts() { + // todo future: reset the allowed hosts for autoconfig & mtasts when loading new list. + for _, l := range c.Static.Listeners { + if l.TLS == nil || l.TLS.ACME == "" { + continue + } + m := c.Static.ACME[l.TLS.ACME].Manager + for _, dom := range c.Dynamic.Domains { + + if l.AutoconfigHTTPS.Enabled { + d, err := dns.ParseDomain("autoconfig." + dom.Domain.ASCII) + if err != nil { + xlog.Errorx("parsing autoconfig domain", err, mlog.Field("domain", dom.Domain)) + continue + } + m.AllowHostname(d) + } + + if l.MTASTSHTTPS.Enabled && dom.MTASTS != nil { + d, err := dns.ParseDomain("mta-sts." + dom.Domain.ASCII) + if err != nil { + xlog.Errorx("parsing mta-sts domain", err, mlog.Field("domain", dom.Domain)) + continue + } + m.AllowHostname(d) + } + } + } +} + +// todo future: write config parsing & writing code that can read a config and remembers the exact tokens including newlines and comments, and can write back a modified file. the goal is to be able to write a config file automatically (after changing fields through the ui), but not loose comments and whitespace, to still get useful diffs for storing the config in a version control system. + +// must be called with lock held. +func writeDynamic(ctx context.Context, c config.Dynamic) error { + accDests, errs := prepareDynamicConfig(ctx, ConfigDynamicPath, Conf.Static, &c) + if len(errs) > 0 { + return errs[0] + } + + var b bytes.Buffer + err := sconf.Write(&b, c) + if err != nil { + return err + } + f, err := os.OpenFile(ConfigDynamicPath, os.O_WRONLY, 0660) + if err != nil { + return err + } + defer func() { + if f != nil { + f.Close() + } + }() + buf := b.Bytes() + if _, err := f.Write(buf); err != nil { + return fmt.Errorf("write domains.conf: %v", err) + } + if err := f.Truncate(int64(len(buf))); err != nil { + return fmt.Errorf("truncate domains.conf after write: %v", err) + } + if err := f.Sync(); err != nil { + return fmt.Errorf("sync domains.conf after write: %v", err) + } + if err := moxio.SyncDir(filepath.Dir(ConfigDynamicPath)); err != nil { + return fmt.Errorf("sync dir of domains.conf after write: %v", err) + } + + fi, err := f.Stat() + if err != nil { + return fmt.Errorf("stat after writing domains.conf: %v", err) + } + + Conf.dynamicMtime = fi.ModTime() + Conf.DynamicLastCheck = time.Now() + Conf.Dynamic = c + Conf.accountDestinations = accDests + + Conf.allowACMEHosts() + + return nil +} + +// MustLoadConfig loads the config, quitting on errors. +func MustLoadConfig() { + errs := LoadConfig(context.Background()) + if len(errs) > 1 { + xlog.Error("loading config file: multiple errors") + for _, err := range errs { + xlog.Errorx("config error", err) + } + xlog.Fatal("stopping after multiple config errors") + } else if len(errs) == 1 { + xlog.Fatalx("loading config file", errs[0]) + } +} + +// LoadConfig attempts to parse and load a config, returning any errors +// encountered. +func LoadConfig(ctx context.Context) []error { + c, errs := ParseConfig(ctx, ConfigStaticPath, false) + if len(errs) > 0 { + return errs + } + + mlog.SetConfig(c.Log) + SetConfig(c) + return nil +} + +// SetConfig sets a new config. Not to be used during normal operation. +func SetConfig(c *Config) { + // Cannot just assign *c to Conf, it would copy the mutex. + Conf = Config{c.Static, sync.Mutex{}, c.Log, sync.Mutex{}, c.Dynamic, c.dynamicMtime, c.DynamicLastCheck, c.accountDestinations} +} + +// ParseConfig parses the static config at path p. If checkOnly is true, no +// changes are made, such as registering ACME identities. +func ParseConfig(ctx context.Context, p string, checkOnly bool) (c *Config, errs []error) { + c = &Config{ + Static: config.Static{ + DataDir: ".", + }, + } + + f, err := os.Open(p) + if err != nil { + if os.IsNotExist(err) && os.Getenv("MOXCONF") == "" { + return nil, []error{fmt.Errorf("open config file: %v (hint: use mox -config ... or set MOXCONF=...)", err)} + } + return nil, []error{fmt.Errorf("open config file: %v", err)} + } + defer f.Close() + if err := sconf.Parse(f, &c.Static); err != nil { + return nil, []error{fmt.Errorf("parsing %s: %v", p, err)} + } + + if xerrs := PrepareStaticConfig(ctx, p, c, checkOnly); len(xerrs) > 0 { + return nil, xerrs + } + + pp := filepath.Join(filepath.Dir(p), "domains.conf") + c.Dynamic, c.dynamicMtime, c.accountDestinations, errs = ParseDynamicConfig(ctx, pp, c.Static) + + if !checkOnly { + c.allowACMEHosts() + } + + return c, errs +} + +// PrepareStaticConfig parses the static config file and prepares data structures +// for starting mox. If checkOnly is set no substantial changes are made, like +// creating an ACME registration. +func PrepareStaticConfig(ctx context.Context, configFile string, config *Config, checkOnly bool) (errs []error) { + addErrorf := func(format string, args ...any) { + errs = append(errs, fmt.Errorf(format, args...)) + } + + c := &config.Static + + // check that mailbox is in unicode NFC normalized form. + checkMailboxNormf := func(mailbox string, format string, args ...any) { + s := norm.NFC.String(mailbox) + if mailbox != s { + msg := fmt.Sprintf(format, args...) + addErrorf("%s: mailbox %q is not in NFC normalized form, should be %q", msg, mailbox, s) + } + } + + // Post-process logging config. + if logLevel, ok := mlog.Levels[c.LogLevel]; ok { + config.Log = map[string]mlog.Level{"": logLevel} + } else { + addErrorf("invalid log level %q", c.LogLevel) + } + for pkg, s := range c.PackageLogLevels { + if logLevel, ok := mlog.Levels[s]; ok { + config.Log[pkg] = logLevel + } else { + addErrorf("invalid package log level %q", s) + } + } + + hostname, err := dns.ParseDomain(c.Hostname) + if err != nil { + addErrorf("parsing hostname: %s", err) + } else if hostname.Name() != c.Hostname { + addErrorf("hostname must be in IDNA form %q", hostname.Name()) + } + c.HostnameDomain = hostname + + for name, acme := range c.ACME { + if checkOnly { + continue + } + acmeDir := dataDirPath(configFile, c.DataDir, "acme") + os.MkdirAll(acmeDir, 0770) + manager, err := autotls.Load(name, acmeDir, acme.ContactEmail, acme.DirectoryURL, Shutdown) + if err != nil { + addErrorf("loading ACME identity for %q: %s", name, err) + } + acme.Manager = manager + c.ACME[name] = acme + } + + var haveUnspecifiedSMTPListener bool + for name, l := range c.Listeners { + if l.Hostname != "" { + d, err := dns.ParseDomain(l.Hostname) + if err != nil { + addErrorf("bad listener hostname %q: %s", l.Hostname, err) + } + l.HostnameDomain = d + } + if l.TLS != nil { + if l.TLS.ACME != "" && len(l.TLS.KeyCerts) != 0 { + addErrorf("listener %q: cannot have ACME and static key/certificates", name) + } else if l.TLS.ACME != "" { + acme, ok := c.ACME[l.TLS.ACME] + if !ok { + addErrorf("listener %q: unknown ACME provider %q", name, l.TLS.ACME) + } + + // If only checking, we don't have an acme manager, so set an empty tls config to + // continue without errors. + var tlsconfig *tls.Config + if checkOnly { + tlsconfig = &tls.Config{} + } else { + tlsconfig = acme.Manager.TLSConfig.Clone() + l.TLS.ACMEConfig = acme.Manager.ACMETLSConfig + + // SMTP STARTTLS connections are commonly made without SNI, because certificates + // often aren't validated. + hostname := c.HostnameDomain + if l.Hostname != "" { + hostname = l.HostnameDomain + } + getCert := tlsconfig.GetCertificate + tlsconfig.GetCertificate = func(hello *tls.ClientHelloInfo) (*tls.Certificate, error) { + if hello.ServerName == "" { + hello.ServerName = hostname.ASCII + } + return getCert(hello) + } + } + l.TLS.Config = tlsconfig + } else if len(l.TLS.KeyCerts) != 0 { + if err := loadTLSKeyCerts(configFile, "listener "+name, l.TLS); err != nil { + addErrorf("%w", err) + } + } else { + addErrorf("listener %q: cannot have TLS config without ACME and without static keys/certificates", name) + } + + // TLS 1.2 was introduced in 2008. TLS <1.2 was deprecated by ../rfc/8996:31 and ../rfc/8997:66 in 2021. + var minVersion uint16 = tls.VersionTLS12 + if l.TLS.MinVersion != "" { + versions := map[string]uint16{ + "TLSv1.0": tls.VersionTLS10, + "TLSv1.1": tls.VersionTLS11, + "TLSv1.2": tls.VersionTLS12, + "TLSv1.3": tls.VersionTLS13, + } + v, ok := versions[l.TLS.MinVersion] + if !ok { + addErrorf("listener %q: unknown TLS mininum version %q", name, l.TLS.MinVersion) + } + minVersion = v + } + if l.TLS.Config != nil { + l.TLS.Config.MinVersion = minVersion + } + if l.TLS.ACMEConfig != nil { + l.TLS.ACMEConfig.MinVersion = minVersion + } + } else if l.IMAPS.Enabled || l.SMTP.Enabled && !l.SMTP.NoSTARTTLS || l.Submissions.Enabled || l.Submission.Enabled && !l.Submission.NoRequireSTARTTLS || l.AdminHTTPS.Enabled || l.AutoconfigHTTPS.Enabled || l.MTASTSHTTPS.Enabled { + addErrorf("listener %q requires TLS, but does not specify tls config", name) + } + if l.AutoconfigHTTPS.Enabled && (!l.IMAP.Enabled && !l.IMAPS.Enabled || !l.Submission.Enabled && !l.Submissions.Enabled) { + addErrorf("listener %q with autoconfig enabled must have SMTP submission or submissions and IMAP or IMAPS enabled", name) + } + if l.SMTP.Enabled { + if len(l.IPs) == 0 { + haveUnspecifiedSMTPListener = true + } + for _, ipstr := range l.IPs { + ip := net.ParseIP(ipstr) + if ip == nil { + addErrorf("listener %q has invalid IP %q", name, ipstr) + continue + } + if ip.IsUnspecified() { + haveUnspecifiedSMTPListener = true + break + } + if len(c.SpecifiedSMTPListenIPs) >= 2 { + haveUnspecifiedSMTPListener = true + } else if len(c.SpecifiedSMTPListenIPs) > 0 && (c.SpecifiedSMTPListenIPs[0].To4() == nil) == (ip.To4() == nil) { + haveUnspecifiedSMTPListener = true + } else { + c.SpecifiedSMTPListenIPs = append(c.SpecifiedSMTPListenIPs, ip) + } + } + } + for _, s := range l.SMTP.DNSBLs { + d, err := dns.ParseDomain(s) + if err != nil { + addErrorf("listener %q has invalid DNSBL zone %q", name, s) + continue + } + l.SMTP.DNSBLZones = append(l.SMTP.DNSBLZones, d) + } + c.Listeners[name] = l + } + if haveUnspecifiedSMTPListener { + c.SpecifiedSMTPListenIPs = nil + } + + for _, mb := range c.DefaultMailboxes { + checkMailboxNormf(mb, "default mailbox") + } + + // Load CA certificate pool. + if c.TLS.CA != nil { + if c.TLS.CA.AdditionalToSystem { + var err error + c.TLS.CertPool, err = x509.SystemCertPool() + if err != nil { + addErrorf("fetching system CA cert pool: %v", err) + } + } else { + c.TLS.CertPool = x509.NewCertPool() + } + for _, certfile := range c.TLS.CA.CertFiles { + p := configDirPath(configFile, certfile) + pemBuf, err := os.ReadFile(p) + if err != nil { + addErrorf("reading TLS CA cert file: %v", err) + continue + } else if !c.TLS.CertPool.AppendCertsFromPEM(pemBuf) { + // todo: can we check more fully if we're getting some useful data back? + addErrorf("no CA certs added from %q", p) + } + } + } + return +} + +// PrepareDynamicConfig parses the dynamic config file given a static file. +func ParseDynamicConfig(ctx context.Context, dynamicPath string, static config.Static) (c config.Dynamic, mtime time.Time, accDests map[string]AccountDestination, errs []error) { + addErrorf := func(format string, args ...any) { + errs = append(errs, fmt.Errorf(format, args...)) + } + + f, err := os.Open(dynamicPath) + if err != nil { + addErrorf("parsing domains config: %v", err) + return + } + defer f.Close() + fi, err := f.Stat() + if err != nil { + addErrorf("stat domains config: %v", err) + } + if err := sconf.Parse(f, &c); err != nil { + addErrorf("parsing dynamic config file: %v", err) + return + } + + accDests, errs = prepareDynamicConfig(ctx, dynamicPath, static, &c) + return c, fi.ModTime(), accDests, errs +} + +func prepareDynamicConfig(ctx context.Context, dynamicPath string, static config.Static, c *config.Dynamic) (accDests map[string]AccountDestination, errs []error) { + log := xlog.WithContext(ctx) + + addErrorf := func(format string, args ...any) { + errs = append(errs, fmt.Errorf(format, args...)) + } + + // check that mailbox is in unicode NFC normalized form. + checkMailboxNormf := func(mailbox string, format string, args ...any) { + s := norm.NFC.String(mailbox) + if mailbox != s { + msg := fmt.Sprintf(format, args...) + addErrorf("%s: mailbox %q is not in NFC normalized form, should be %q", msg, mailbox, s) + } + } + + // Validate postmaster account exists. + if _, ok := c.Accounts[static.Postmaster.Account]; !ok { + addErrorf("postmaster account %q does not exist", static.Postmaster.Account) + } + checkMailboxNormf(static.Postmaster.Mailbox, "postmaster mailbox") + + var haveSTSListener bool + for _, l := range static.Listeners { + if l.MTASTSHTTPS.Enabled { + haveSTSListener = true + break + } + } + + // Validate domains. + for d, domain := range c.Domains { + dnsdomain, err := dns.ParseDomain(d) + if err != nil { + addErrorf("bad domain %q: %s", d, err) + } else if dnsdomain.Name() != d { + addErrorf("domain %q must be specified in IDNA form, %q", d, dnsdomain.Name()) + } + + domain.Domain = dnsdomain + + for _, sign := range domain.DKIM.Sign { + if _, ok := domain.DKIM.Selectors[sign]; !ok { + addErrorf("selector %q for signing is missing in domain %q", sign, d) + } + } + for name, sel := range domain.DKIM.Selectors { + seld, err := dns.ParseDomain(name) + if err != nil { + addErrorf("bad selector %q: %s", name, err) + } else if seld.Name() != name { + addErrorf("selector %q must be specified in IDNA form, %q", name, seld.Name()) + } + sel.Domain = seld + + if sel.Expiration != "" { + exp, err := time.ParseDuration(sel.Expiration) + if err != nil { + addErrorf("selector %q has invalid expiration %q: %v", name, sel.Expiration, err) + } else { + sel.ExpirationSeconds = int(exp / time.Second) + } + } + + sel.HashEffective = sel.Hash + switch sel.HashEffective { + case "": + sel.HashEffective = "sha256" + case "sha1": + log.Error("using sha1 with DKIM is deprecated as not secure enough, switch to sha256") + case "sha256": + default: + addErrorf("unsupported hash %q for selector %q in domain %q", sel.HashEffective, name, d) + } + + pemBuf, err := os.ReadFile(configDirPath(dynamicPath, sel.PrivateKeyFile)) + if err != nil { + addErrorf("reading private key for selector %q in domain %q: %s", name, d, err) + continue + } + p, _ := pem.Decode(pemBuf) + if p == nil { + addErrorf("private key for selector %q in domain %q has no PEM block", name, d) + continue + } + key, err := x509.ParsePKCS8PrivateKey(p.Bytes) + if err != nil { + addErrorf("parsing private key for selector %q in domain %q: %s", name, d, err) + continue + } + switch k := key.(type) { + case *rsa.PrivateKey: + if k.N.BitLen() < 1024 { + // ../rfc/6376:757 + // Let's help user do the right thing. + addErrorf("rsa keys should be >= 1024 bits") + } + sel.Key = k + case ed25519.PrivateKey: + if sel.HashEffective != "sha256" { + addErrorf("hash algorithm %q is not supported with ed25519, only sha256 is", sel.HashEffective) + } + sel.Key = k + default: + addErrorf("private key type %T not yet supported, at selector %q in domain %q", key, name, d) + } + + if len(sel.Headers) == 0 { + // ../rfc/6376:2139 + // ../rfc/6376:2203 + // ../rfc/6376:2212 + // By default we seal signed headers, and we sign user-visible headers to + // prevent/limit reuse of previously signed messages: All addressing fields, date + // and subject, message-referencing fields, parsing instructions (content-type). + sel.HeadersEffective = strings.Split("From,To,Cc,Bcc,Reply-To,References,In-Reply-To,Subject,Date,Message-Id,Content-Type", ",") + } else { + var from bool + for _, h := range sel.Headers { + from = from || strings.EqualFold(h, "From") + // ../rfc/6376:2269 + if strings.EqualFold(h, "DKIM-Signature") || strings.EqualFold(h, "Received") || strings.EqualFold(h, "Return-Path") { + log.Error("DKIM-signing header %q is recommended against as it may be modified in transit") + } + } + if !from { + addErrorf("From-field must always be DKIM-signed") + } + sel.HeadersEffective = sel.Headers + } + + domain.DKIM.Selectors[name] = sel + } + + if domain.MTASTS != nil { + if !haveSTSListener { + addErrorf("MTA-STS enabled for domain %q, but there is no listener for MTASTS", d) + } + sts := domain.MTASTS + if sts.PolicyID == "" { + addErrorf("invalid empty MTA-STS PolicyID") + } + switch sts.Mode { + case mtasts.ModeNone, mtasts.ModeTesting, mtasts.ModeEnforce: + default: + addErrorf("invalid mtasts mode %q", sts.Mode) + } + } + + c.Domains[d] = domain + } + + // Post-process email addresses for fast lookups. + accDests = map[string]AccountDestination{} + for accName, acc := range c.Accounts { + var err error + acc.DNSDomain, err = dns.ParseDomain(acc.Domain) + if err != nil { + addErrorf("parsing domain %q for account %q: %s", acc.Domain, accName, err) + } + c.Accounts[accName] = acc + + if strings.EqualFold(acc.RejectsMailbox, "Inbox") { + addErrorf("account %q: cannot set RejectsMailbox to inbox", accName) + } + checkMailboxNormf(acc.RejectsMailbox, "account %q", accName) + for addrName, dest := range acc.Destinations { + checkMailboxNormf(dest.Mailbox, "account %q, destination %q", accName, addrName) + + for i, rs := range dest.Rulesets { + checkMailboxNormf(rs.Mailbox, "account %q, destination %q, ruleset %d", accName, addrName, i+1) + + n := 0 + + if rs.SMTPMailFromRegexp != "" { + n++ + r, err := regexp.Compile(rs.SMTPMailFromRegexp) + if err != nil { + addErrorf("invalid SMTPMailFrom regular expression: %v", err) + } + c.Accounts[accName].Destinations[addrName].Rulesets[i].SMTPMailFromRegexpCompiled = r + } + if rs.VerifiedDomain != "" { + n++ + d, err := dns.ParseDomain(rs.VerifiedDomain) + if err != nil { + addErrorf("invalid VerifiedDomain: %v", err) + } + c.Accounts[accName].Destinations[addrName].Rulesets[i].VerifiedDNSDomain = d + } + + var hdr [][2]*regexp.Regexp + for k, v := range rs.HeadersRegexp { + n++ + if strings.ToLower(k) != k { + addErrorf("header field %q must only have lower case characters", k) + } + if strings.ToLower(v) != v { + addErrorf("header value %q must only have lower case characters", v) + } + rk, err := regexp.Compile(k) + if err != nil { + addErrorf("invalid rule header regexp %q: %v", k, err) + } + rv, err := regexp.Compile(v) + if err != nil { + addErrorf("invalid rule header regexp %q: %v", v, err) + } + hdr = append(hdr, [...]*regexp.Regexp{rk, rv}) + } + c.Accounts[accName].Destinations[addrName].Rulesets[i].HeadersRegexpCompiled = hdr + + if n == 0 { + addErrorf("ruleset must have at least one rule") + } + + if rs.ListAllowDomain != "" { + d, err := dns.ParseDomain(rs.ListAllowDomain) + if err != nil { + addErrorf("invalid ListAllowDomain %q: %v", rs.ListAllowDomain, err) + } + c.Accounts[accName].Destinations[addrName].Rulesets[i].ListAllowDNSDomain = d + } + } + + var address smtp.Address + localpart, err := smtp.ParseLocalpart(addrName) + if err != nil && errors.Is(err, smtp.ErrBadLocalpart) { + address, err = smtp.ParseAddress(addrName) + if err != nil { + addErrorf("invalid email address %q in account %q", addrName, accName) + continue + } else if _, ok := c.Domains[address.Domain.Name()]; !ok { + addErrorf("unknown domain for address %q in account %q", addrName, accName) + continue + } + } else { + if err != nil { + addErrorf("invalid localpart %q in account %q", addrName, accName) + continue + } + address = smtp.NewAddress(localpart, acc.DNSDomain) + if _, ok := c.Domains[acc.DNSDomain.Name()]; !ok { + addErrorf("unknown domain %q for account %q", acc.DNSDomain.Name(), accName) + continue + } + } + addrFull := address.Pack(true) + if _, ok := accDests[addrFull]; ok { + addErrorf("duplicate destination address %q", addrFull) + } + accDests[addrFull] = AccountDestination{address.Localpart, accName, dest} + } + } + + // Set DMARC destinations. + for d, domain := range c.Domains { + dmarc := domain.DMARC + if dmarc == nil { + continue + } + if _, ok := c.Accounts[dmarc.Account]; !ok { + addErrorf("DMARC account %q does not exist", dmarc.Account) + } + lp, err := smtp.ParseLocalpart(dmarc.Localpart) + if err != nil { + addErrorf("invalid DMARC localpart %q: %s", dmarc.Localpart, err) + } + if lp.IsInternational() { + // ../rfc/8616:234 + addErrorf("DMARC localpart %q is an internationalized address, only conventional ascii-only address possible for interopability", lp) + } + domain.DMARC.ParsedLocalpart = lp + c.Domains[d] = domain + addrFull := smtp.NewAddress(lp, domain.Domain).String() + dest := config.Destination{ + Mailbox: dmarc.Mailbox, + DMARCReports: true, + } + checkMailboxNormf(dmarc.Mailbox, "DMARC mailbox for account %q", dmarc.Account) + accDests[addrFull] = AccountDestination{lp, dmarc.Account, dest} + } + + // Set TLSRPT destinations. + for d, domain := range c.Domains { + tlsrpt := domain.TLSRPT + if tlsrpt == nil { + continue + } + if _, ok := c.Accounts[tlsrpt.Account]; !ok { + addErrorf("TLSRPT account %q does not exist", tlsrpt.Account) + } + lp, err := smtp.ParseLocalpart(tlsrpt.Localpart) + if err != nil { + addErrorf("invalid TLSRPT localpart %q: %s", tlsrpt.Localpart, err) + } + if lp.IsInternational() { + // Does not appear documented in ../rfc/8460, but similar to DMARC it makes sense + // to keep this ascii-only addresses. + addErrorf("TLSRPT localpart %q is an internationalized address, only conventional ascii-only address allowed for interopability", lp) + } + domain.TLSRPT.ParsedLocalpart = lp + c.Domains[d] = domain + addrFull := smtp.NewAddress(lp, domain.Domain).String() + dest := config.Destination{ + Mailbox: tlsrpt.Mailbox, + TLSReports: true, + } + checkMailboxNormf(tlsrpt.Mailbox, "TLSRPT mailbox for account %q", tlsrpt.Account) + accDests[addrFull] = AccountDestination{lp, tlsrpt.Account, dest} + } + return +} + +func loadTLSKeyCerts(configFile, kind string, ctls *config.TLS) error { + certs := []tls.Certificate{} + for _, kp := range ctls.KeyCerts { + certPath := configDirPath(configFile, kp.CertFile) + keyPath := configDirPath(configFile, kp.KeyFile) + cert, err := tls.LoadX509KeyPair(certPath, keyPath) + if err != nil { + return fmt.Errorf("tls config for %q: parsing x509 key pair: %v", kind, err) + } + certs = append(certs, cert) + } + ctls.Config = &tls.Config{ + Certificates: certs, + } + return nil +} diff --git a/mox-/dir.go b/mox-/dir.go new file mode 100644 index 0000000..792b673 --- /dev/null +++ b/mox-/dir.go @@ -0,0 +1,36 @@ +package mox + +import ( + "path/filepath" +) + +// ConfigDirPath returns the path to "f". Either f itself when absolute, or +// interpreted relative to the directory of the current config file. +func ConfigDirPath(f string) string { + return configDirPath(ConfigStaticPath, f) +} + +// DataDirPath returns to the path to "f". Either f itself when absolute, or +// interpreted relative to the data directory from the currently active +// configuration. +func DataDirPath(f string) string { + return dataDirPath(ConfigStaticPath, Conf.Static.DataDir, f) +} + +// return f interpreted relative to the directory of the config dir. f is returned +// unchanged when absolute. +func configDirPath(configFile, f string) string { + if filepath.IsAbs(f) { + return f + } + return filepath.Join(filepath.Dir(configFile), f) +} + +// return f interpreted relative to the data directory that is interpreted relative +// to the directory of the config dir. f is returned unchanged when absolute. +func dataDirPath(configFile, dataDir, f string) string { + if filepath.IsAbs(f) { + return f + } + return filepath.Join(configDirPath(configFile, dataDir), f) +} diff --git a/mox-/doc.go b/mox-/doc.go new file mode 100644 index 0000000..3d718bf --- /dev/null +++ b/mox-/doc.go @@ -0,0 +1,3 @@ +// Package mox provides functions dealing with global state, such as the +// current configuration, and convenience functions. +package mox diff --git a/mox-/ip.go b/mox-/ip.go new file mode 100644 index 0000000..116322b --- /dev/null +++ b/mox-/ip.go @@ -0,0 +1,21 @@ +package mox + +import ( + "net" +) + +// Network returns tcp4 or tcp6, depending on the ip. +// This network can be passed to Listen instead of "tcp", which may start listening +// on both ipv4 and ipv6 for addresses 0.0.0.0 and ::, which can lead to errors +// about the port already being in use. +// For invalid IPs, "tcp" is returned. +func Network(ip string) string { + v := net.ParseIP(ip) + if v == nil { + return "tcp" + } + if v.To4() != nil { + return "tcp4" + } + return "tcp6" +} diff --git a/mox-/lastknown.go b/mox-/lastknown.go new file mode 100644 index 0000000..b1a0da9 --- /dev/null +++ b/mox-/lastknown.go @@ -0,0 +1,51 @@ +package mox + +import ( + "fmt" + "os" + "strings" + "time" + + "github.com/mjl-/mox/moxvar" + "github.com/mjl-/mox/updates" +) + +// StoreLastKnown stores the the last known version. Future update checks compare +// against it, or the currently running version, whichever is newer. +func StoreLastKnown(v updates.Version) error { + return os.WriteFile(DataDirPath("lastknownversion"), []byte(v.String()), 0660) +} + +// LastKnown returns the last known version that has been mentioned in an update +// email, or the current application. +func LastKnown() (current, lastknown updates.Version, mtime time.Time, rerr error) { + curv, curerr := updates.ParseVersion(moxvar.Version) + + p := DataDirPath("lastknownversion") + fi, _ := os.Stat(p) + if fi != nil { + mtime = fi.ModTime() + } + + vbuf, err := os.ReadFile(p) + if err != nil && !os.IsNotExist(err) { + return curv, updates.Version{}, mtime, err + } + + lastknown, lasterr := updates.ParseVersion(strings.TrimSpace(string(vbuf))) + + if curerr == nil && lasterr == nil { + if curv.After(lastknown) { + return curv, curv, mtime, nil + } + return curv, lastknown, mtime, nil + } else if curerr == nil { + return curv, curv, mtime, nil + } else if lasterr == nil { + return curv, lastknown, mtime, nil + } + if moxvar.Version == "(devel)" { + return curv, updates.Version{}, mtime, fmt.Errorf("development version") + } + return curv, updates.Version{}, mtime, fmt.Errorf("parsing version: %w", err) +} diff --git a/mox-/lifecycle.go b/mox-/lifecycle.go new file mode 100644 index 0000000..6804e7d --- /dev/null +++ b/mox-/lifecycle.go @@ -0,0 +1,147 @@ +package mox + +import ( + "context" + "net" + "runtime/debug" + "sync" + "time" + + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" +) + +// Shutdown is closed when a graceful shutdown is initiated. SMTP, IMAP, periodic +// processes should check this before starting a new operation. If true, the +// operation should be aborted, and new connections should receive a message that +// the service is currently not available. +var Shutdown chan struct{} + +// Context should be used as parent by all operations. It is canceled when mox is +// shutdown, aborting all pending operations. +// +// Operations typically have context timeouts, 30s for single i/o like DNS queries, +// and 1 minute for operations with more back and forth. These are set through a +// context.WithTimeout based on this context, so those contexts are still canceled +// when shutting down. +// +// Explicit read/write deadlines on connections, typically 30s. +// +// HTTP servers don't get graceful shutdown, their connections are just aborted. +var Context context.Context + +// Connections holds all active protocol sockets (smtp, imap). They will be given +// an immediate read/write deadline shortly after initiating mox shutdown, after +// which the connections get 1 more second for error handling before actual +// shutdown. +var Connections = &connections{ + conns: map[net.Conn]connKind{}, + gauges: map[connKind]prometheus.GaugeFunc{}, + active: map[connKind]int64{}, +} + +type connKind struct { + protocol string + listener string +} + +type connections struct { + sync.Mutex + conns map[net.Conn]connKind + dones []chan struct{} + gauges map[connKind]prometheus.GaugeFunc + + activeMutex sync.Mutex + active map[connKind]int64 +} + +// Register adds a connection for receiving an immediate i/o deadline on shutdown. +// When the connection is closed, Remove must be called to cancel the registration. +func (c *connections) Register(nc net.Conn, protocol, listener string) { + // This can happen, when a connection was initiated before a shutdown, but it + // doesn't hurt to log it. + select { + case <-Shutdown: + xlog.Error("new connection added while shutting down") + debug.PrintStack() + default: + } + + ck := connKind{protocol, listener} + + c.activeMutex.Lock() + c.active[ck]++ + c.activeMutex.Unlock() + + c.Lock() + defer c.Unlock() + c.conns[nc] = ck + if _, ok := c.gauges[ck]; !ok { + c.gauges[ck] = promauto.NewGaugeFunc( + prometheus.GaugeOpts{ + Name: "mox_connections_count", + Help: "Open connections, per protocol/listener.", + ConstLabels: prometheus.Labels{ + "protocol": protocol, + "listener": listener, + }, + }, + func() float64 { + c.activeMutex.Lock() + defer c.activeMutex.Unlock() + return float64(c.active[ck]) + }, + ) + } +} + +// Unregister removes a connection for shutdown. +func (c *connections) Unregister(nc net.Conn) { + c.Lock() + defer c.Unlock() + ck := c.conns[nc] + + defer func() { + c.activeMutex.Lock() + c.active[ck]-- + c.activeMutex.Unlock() + }() + + delete(c.conns, nc) + if len(c.conns) > 0 { + return + } + for _, done := range c.dones { + done <- struct{}{} + } + c.dones = nil +} + +// Shutdown sets an immediate i/o deadline on all open registered sockets. Called +// some time after mox shutdown is initiated. +// The deadline will cause i/o's to be aborted, which should result in the +// connection being unregistered. +func (c *connections) Shutdown() { + now := time.Now() + c.Lock() + defer c.Unlock() + for nc := range c.conns { + if err := nc.SetDeadline(now); err != nil { + xlog.Errorx("setting immediate read/write deadline for shutdown", err) + } + } +} + +// Done returns a new channel on which a value is sent when no more sockets are +// open, which could be immediate. +func (c *connections) Done() chan struct{} { + c.Lock() + defer c.Unlock() + done := make(chan struct{}, 1) + if len(c.conns) == 0 { + done <- struct{}{} + return done + } + c.dones = append(c.dones, done) + return done +} diff --git a/mox-/lifecycle_test.go b/mox-/lifecycle_test.go new file mode 100644 index 0000000..4c58e04 --- /dev/null +++ b/mox-/lifecycle_test.go @@ -0,0 +1,44 @@ +package mox + +import ( + "errors" + "net" + "os" + "testing" + + "github.com/prometheus/client_golang/prometheus" +) + +func TestLifecycle(t *testing.T) { + c := &connections{ + conns: map[net.Conn]connKind{}, + gauges: map[connKind]prometheus.GaugeFunc{}, + active: map[connKind]int64{}, + } + nc0, nc1 := net.Pipe() + defer nc0.Close() + defer nc1.Close() + c.Register(nc0, "proto", "listener") + c.Shutdown() + + done := c.Done() + select { + case <-done: + t.Fatalf("already done, but still a connection open") + default: + } + + _, err := nc0.Read(make([]byte, 1)) + if err == nil { + t.Fatalf("expected i/o deadline exceeded, got no error") + } + if !errors.Is(err, os.ErrDeadlineExceeded) { + t.Fatalf("got %v, expected os.ErrDeadlineExceeded", err) + } + c.Unregister(nc0) + select { + case <-done: + default: + t.Fatalf("unregistered connection, but not yet done") + } +} diff --git a/mox-/lookup.go b/mox-/lookup.go new file mode 100644 index 0000000..2559fe7 --- /dev/null +++ b/mox-/lookup.go @@ -0,0 +1,66 @@ +package mox + +import ( + "errors" + "fmt" + "strings" + + "github.com/mjl-/mox/config" + "github.com/mjl-/mox/dns" + "github.com/mjl-/mox/smtp" +) + +var ( + ErrDomainNotFound = errors.New("domain not found") + ErrAccountNotFound = errors.New("account not found") +) + +// FindAccount lookups the account for localpart and domain. +// +// Can return ErrDomainNotFound and ErrAccountNotFound. +func FindAccount(localpart smtp.Localpart, domain dns.Domain, allowPostmaster bool) (accountName string, canonicalAddress string, dest config.Destination, rerr error) { + if strings.EqualFold(string(localpart), "postmaster") { + localpart = "postmaster" + } + var zerodomain dns.Domain + if localpart == "postmaster" && domain == zerodomain { + if !allowPostmaster { + return "", "", config.Destination{}, ErrAccountNotFound + } + return Conf.Static.Postmaster.Account, "postmaster", config.Destination{Mailbox: Conf.Static.Postmaster.Mailbox}, nil + } + + d, ok := Conf.Domain(domain) + if !ok { + return "", "", config.Destination{}, ErrDomainNotFound + } + + localpart, err := CanonicalLocalpart(localpart, d) + if err != nil { + return "", "", config.Destination{}, fmt.Errorf("%w: %s", ErrAccountNotFound, err) + } + canonical := smtp.NewAddress(localpart, domain).String() + + accAddr, ok := Conf.AccountDestination(canonical) + if !ok { + return "", "", config.Destination{}, ErrAccountNotFound + } + return accAddr.Account, canonical, accAddr.Destination, nil +} + +// CanonicalLocalpart returns the canonical localpart, removing optional catchall +// separator, and optionally lower-casing the string. +func CanonicalLocalpart(localpart smtp.Localpart, d config.Domain) (smtp.Localpart, error) { + if d.LocalpartCatchallSeparator != "" { + t := strings.SplitN(string(localpart), d.LocalpartCatchallSeparator, 2) + localpart = smtp.Localpart(t[0]) + if localpart == "" { + return "", fmt.Errorf("empty localpart") + } + } + + if !d.LocalpartCaseSensitive { + localpart = smtp.Localpart(strings.ToLower(string(localpart))) + } + return localpart, nil +} diff --git a/mox-/msgid.go b/mox-/msgid.go new file mode 100644 index 0000000..9fe2f6b --- /dev/null +++ b/mox-/msgid.go @@ -0,0 +1,14 @@ +package mox + +import ( + "encoding/base64" +) + +var messageIDRand = NewRand() + +// MessageIDGen returns a generated unique random Message-Id value, excluding <>. +func MessageIDGen(smtputf8 bool) string { + buf := make([]byte, 16) + messageIDRand.Read(buf) + return base64.RawURLEncoding.EncodeToString(buf) + "@" + Conf.Static.HostnameDomain.XName(smtputf8) +} diff --git a/mox-/rand.go b/mox-/rand.go new file mode 100644 index 0000000..ec088a9 --- /dev/null +++ b/mox-/rand.go @@ -0,0 +1,22 @@ +package mox + +import ( + cryptorand "crypto/rand" + "encoding/binary" + "fmt" + mathrand "math/rand" +) + +// NewRand returns a new PRNG seeded with random bytes from crypto/rand. +func NewRand() *mathrand.Rand { + return mathrand.New(mathrand.NewSource(cryptoRandInt())) +} + +func cryptoRandInt() int64 { + buf := make([]byte, 8) + _, err := cryptorand.Read(buf) + if err != nil { + panic(fmt.Errorf("reading random bytes: %v", err)) + } + return int64(binary.LittleEndian.Uint64(buf)) +} diff --git a/mox-/recvid.go b/mox-/recvid.go new file mode 100644 index 0000000..e3c7847 --- /dev/null +++ b/mox-/recvid.go @@ -0,0 +1,61 @@ +package mox + +import ( + "bytes" + "crypto/aes" + "crypto/cipher" + "encoding/base64" + "encoding/binary" + "fmt" +) + +var idCipher cipher.Block +var idRand []byte + +func init() { + // Init for tests. Overwritten in ../serve.go. + err := ReceivedIDInit([]byte("0123456701234567"), []byte("01234567")) + if err != nil { + panic(err) + } +} + +// ReceivedIDInit sets an AES key (must be 16 bytes) and random buffer (must be +// 8 bytes) for use by ReceivedID. +func ReceivedIDInit(key, rand []byte) error { + var err error + idCipher, err = aes.NewCipher(key) + idRand = rand + return err +} + +// ReceivedID returns an ID for use in a message Received header. +// +// The ID is based on the cid. The cid itself is a counter and would leak the +// number of connections in received headers. Instead they are obfuscated by +// encrypting them with AES with a per-install key and random buffer. This allows +// recovery of the cid based on the id. See subcommand cid. +func ReceivedID(cid int64) string { + buf := make([]byte, 16) + copy(buf, idRand) + binary.BigEndian.PutUint64(buf[8:], uint64(cid)) + idCipher.Encrypt(buf, buf) + return base64.RawURLEncoding.EncodeToString(buf) +} + +// ReceivedToCid returns the cid given a ReceivedID. +func ReceivedToCid(s string) (cid int64, err error) { + buf, err := base64.RawURLEncoding.DecodeString(s) + if err != nil { + return 0, fmt.Errorf("decode base64: %v", err) + } + if len(buf) != 16 { + return 0, fmt.Errorf("bad length, got %d, expect 16", len(buf)) + } + idCipher.Decrypt(buf, buf) + if !bytes.Equal(buf[:8], idRand) { + return 0, fmt.Errorf("rand mismatch") + } + cid = int64(binary.BigEndian.Uint64(buf[8:])) + return cid, nil +} diff --git a/mox-/setcaphint.go b/mox-/setcaphint.go new file mode 100644 index 0000000..259dc1e --- /dev/null +++ b/mox-/setcaphint.go @@ -0,0 +1,18 @@ +package mox + +import ( + "errors" + "os" + "runtime" +) + +// todo: perhaps find and document the recommended way to get this on other platforms? + +// LinuxSetcapHint returns a hint about using setcap for binding to privileged +// ports, only if relevant the error and GOOS (Linux). +func LinuxSetcapHint(err error) string { + if runtime.GOOS == "linux" && errors.Is(err, os.ErrPermission) { + return " (privileged port? try again after: sudo setcap 'cap_net_bind_service=+ep' mox)" + } + return "" +} diff --git a/mox-/sleep.go b/mox-/sleep.go new file mode 100644 index 0000000..af56bc5 --- /dev/null +++ b/mox-/sleep.go @@ -0,0 +1,19 @@ +package mox + +import ( + "context" + "time" +) + +// Sleep for d, but return as soon as ctx is done. +// +// Used for a few places where sleep is used to push back on clients, but where +// shutting down should abort the sleep. +func Sleep(ctx context.Context, d time.Duration) { + t := time.NewTicker(d) + defer t.Stop() + select { + case <-t.C: + case <-ctx.Done(): + } +} diff --git a/mox-/tlsinfo.go b/mox-/tlsinfo.go new file mode 100644 index 0000000..5c60671 --- /dev/null +++ b/mox-/tlsinfo.go @@ -0,0 +1,29 @@ +package mox + +import ( + "crypto/tls" + "fmt" +) + +// TLSInfo returns human-readable strings about the TLS connection, for use in +// logging. +func TLSInfo(conn *tls.Conn) (version, ciphersuite string) { + st := conn.ConnectionState() + + versions := map[uint16]string{ + tls.VersionTLS10: "TLS1.0", + tls.VersionTLS11: "TLS1.1", + tls.VersionTLS12: "TLS1.2", + tls.VersionTLS13: "TLS1.3", + } + + v, ok := versions[st.Version] + if ok { + version = v + } else { + version = fmt.Sprintf("TLS %x", st.Version) + } + + ciphersuite = tls.CipherSuiteName(st.CipherSuite) + return +} diff --git a/mox.service b/mox.service new file mode 100644 index 0000000..a0a1b76 --- /dev/null +++ b/mox.service @@ -0,0 +1,55 @@ +[Unit] +Description=mox mail server +After=network-online.target +Wants=network-online.target + +[Service] +UMask=007 +LimitNOFILE=65535 +Type=simple +User=mox +Group=mox +Environment="MOXCONF=/home/service/mox/config/mox.conf" +WorkingDirectory=/home/service/mox +ExecStart=/home/service/mox/mox serve +RestartSec=5s +Restart=always +ExecStop=/home/service/mox/mox stop +# Restart does shut down existing smtp/imap connections (gracefully), but first +# verifies the config file, and it returns after restart was complete. +ExecReload=/home/service/mox/mox restart + +# Isolate process, reducing attack surface. +PrivateDevices=yes +PrivateTmp=yes +ProtectSystem=strict +ReadWritePaths=/home/service/mox/config /home/service/mox/data +ProtectKernelTunables=yes +ProtectControlGroups=yes +AmbientCapabilities=CAP_NET_BIND_SERVICE +CapabilityBoundingSet=CAP_NET_BIND_SERVICE +NoNewPrivileges=yes +RestrictAddressFamilies=AF_INET AF_INET6 AF_UNIX AF_NETLINK +ProtectProc=invisible +RestrictSUIDSGID=yes +RestrictNamespaces=yes +RestrictRealtime=yes +RemoveIPC=yes +ProtectHostname=yes +ProtectClock=yes +ProtectKernelLogs=yes +ProtectKernelModules=yes +MemoryDenyWriteExecute=yes +LockPersonality=yes +DevicePolicy=closed +SystemCallArchitectures=native +SystemCallFilter=@system-service + +# prevents CAP_NET_BIND_SERVICE from working? +#PrivateUsers=yes + +# To check security-related settings: +# sudo systemd-analyze security mox.service + +[Install] +WantedBy=multi-user.target diff --git a/moxio/atreader.go b/moxio/atreader.go new file mode 100644 index 0000000..45ab3c9 --- /dev/null +++ b/moxio/atreader.go @@ -0,0 +1,20 @@ +package moxio + +import ( + "io" +) + +// AtReader is turns an io.ReaderAt into a io.Reader by keeping track of the +// offset. +type AtReader struct { + R io.ReaderAt + Offset int64 +} + +func (r *AtReader) Read(buf []byte) (int, error) { + n, err := r.R.ReadAt(buf, r.Offset) + if n > 0 { + r.Offset += int64(n) + } + return n, err +} diff --git a/moxio/bufpool.go b/moxio/bufpool.go new file mode 100644 index 0000000..bc2f241 --- /dev/null +++ b/moxio/bufpool.go @@ -0,0 +1,103 @@ +package moxio + +import ( + "bufio" + "errors" + "fmt" + "io" + + "github.com/mjl-/mox/mlog" +) + +var xlog = mlog.New("moxio") + +// todo: instead of a bufpool, should maybe just make an alternative to bufio.Reader with a big enough buffer that we can fully use to read a line. + +var ErrLineTooLong = errors.New("line from remote too long") // Returned by Bufpool.Readline. + +// Bufpool caches byte slices for reuse during parsing of line-terminated commands. +type Bufpool struct { + c chan []byte + size int +} + +// NewBufpool makes a new pool, initially empty, but holding at most "max" buffers of "size" bytes each. +func NewBufpool(max, size int) *Bufpool { + return &Bufpool{ + c: make(chan []byte, max), + size: size, + } +} + +// get returns a buffer from the pool if available, otherwise allocates a new buffer. +// The buffer should be returned with a call to put. +func (b *Bufpool) get() []byte { + var buf []byte + + // Attempt to get buffer from pool. Otherwise create new buffer. + select { + case buf = <-b.c: + default: + } + if buf == nil { + buf = make([]byte, b.size) + } + return buf +} + +// put puts a "buf" back in the pool. Put clears the first "n" bytes, which should +// be all the bytes that have been read in the buffer. If the pool is full, the +// buffer is discarded, and will be cleaned up by the garbage collector. +// The caller should no longer reference "buf" after a call to put. +func (b *Bufpool) put(buf []byte, n int) { + if len(buf) != b.size { + xlog.Error("buffer with bad size returned, ignoring", mlog.Field("badsize", len(buf)), mlog.Field("expsize", b.size)) + return + } + + for i := 0; i < n; i++ { + buf[i] = 0 + } + select { + case b.c <- buf: + default: + } +} + +// Readline reads a \n- or \r\n-terminated line. Line is returned without \n or \r\n. +// If the line was too long, ErrLineTooLong is returned. +// If an EOF is encountered before a \n, io.ErrUnexpectedEOF is returned. +func (b *Bufpool) Readline(r *bufio.Reader) (line string, rerr error) { + var nread int + buf := b.get() + defer func() { + b.put(buf, nread) + }() + + // Read until newline. If we reach the end of the buffer first, we write back an + // error and abort the connection because our protocols cannot be recovered. We + // don't want to consume data until we finally see a newline, which may be never. + for { + if nread >= len(buf) { + return "", fmt.Errorf("%w: no newline after all %d bytes", ErrLineTooLong, nread) + } + c, err := r.ReadByte() + if err == io.EOF { + return "", io.ErrUnexpectedEOF + } else if err != nil { + return "", fmt.Errorf("reading line from remote: %w", err) + } + if c == '\n' { + var s string + if nread > 0 && buf[nread-1] == '\r' { + s = string(buf[:nread-1]) + } else { + s = string(buf[:nread]) + } + nread++ + return s, nil + } + buf[nread] = c + nread++ + } +} diff --git a/moxio/bufpool_test.go b/moxio/bufpool_test.go new file mode 100644 index 0000000..f2e6d61 --- /dev/null +++ b/moxio/bufpool_test.go @@ -0,0 +1,57 @@ +package moxio + +import ( + "bufio" + "errors" + "fmt" + "io" + "strings" + "testing" +) + +func TestBufpool(t *testing.T) { + bp := NewBufpool(1, 8) + a := bp.get() + b := bp.get() + for i := 0; i < len(a); i++ { + a[i] = 1 + } + bp.put(a, len(a)) // Will be stored. + bp.put(b, 0) // Will be discarded. + na := bp.get() + if fmt.Sprintf("%p", a) != fmt.Sprintf("%p", na) { + t.Fatalf("received unexpected new buf %p != %p", a, na) + } + for _, c := range na { + if c != 0 { + t.Fatalf("reused buf not cleared") + } + } + + if _, err := bp.Readline(bufio.NewReader(strings.NewReader("this is too long"))); !errors.Is(err, ErrLineTooLong) { + t.Fatalf("expected ErrLineTooLong, got error %v", err) + } + if _, err := bp.Readline(bufio.NewReader(strings.NewReader("short"))); !errors.Is(err, io.ErrUnexpectedEOF) { + t.Fatalf("expected ErrLineTooLong, got error %v", err) + } + + er := errReader{fmt.Errorf("bad")} + if _, err := bp.Readline(bufio.NewReader(er)); err == nil || !errors.Is(err, er.err) { + t.Fatalf("got unexpected error %s", err) + } + + if line, err := bp.Readline(bufio.NewReader(strings.NewReader("ok\r\n"))); line != "ok" { + t.Fatalf(`got %q, err %v, expected line "ok"`, line, err) + } + if line, err := bp.Readline(bufio.NewReader(strings.NewReader("ok\n"))); line != "ok" { + t.Fatalf(`got %q, err %v, expected line "ok"`, line, err) + } +} + +type errReader struct { + err error +} + +func (r errReader) Read(buf []byte) (int, error) { + return 0, r.err +} diff --git a/moxio/doc.go b/moxio/doc.go new file mode 100644 index 0000000..c96caf9 --- /dev/null +++ b/moxio/doc.go @@ -0,0 +1,2 @@ +// Package moxio has common i/o functions. +package moxio diff --git a/moxio/isclosed.go b/moxio/isclosed.go new file mode 100644 index 0000000..853c1bd --- /dev/null +++ b/moxio/isclosed.go @@ -0,0 +1,24 @@ +package moxio + +import ( + "errors" + "net" + "syscall" +) + +// In separate file because of import of syscall. + +// IsClosed returns whether i/o failed, typically because the connection is closed +// or otherwise cannot be used for further i/o. +// +// Used to prevent error logging for connections that are closed. +func IsClosed(err error) bool { + return errors.Is(err, net.ErrClosed) || errors.Is(err, syscall.EPIPE) || errors.Is(err, syscall.ECONNRESET) || isRemoteTLSError(err) +} + +// A remote TLS client can send a message indicating failure, this makes it back to +// us as a write error. +func isRemoteTLSError(err error) bool { + var netErr *net.OpError + return errors.As(err, &netErr) && netErr.Op == "remote error" +} diff --git a/moxio/limitatreader.go b/moxio/limitatreader.go new file mode 100644 index 0000000..c67ed5d --- /dev/null +++ b/moxio/limitatreader.go @@ -0,0 +1,20 @@ +package moxio + +import ( + "io" +) + +// LimitAtReader is a reader at that returns ErrLimit if reads would extend +// beyond Limit. +type LimitAtReader struct { + R io.ReaderAt + Limit int64 +} + +// ReadAt passes the read on to R, but returns an error if the read data would extend beyond Limit. +func (r *LimitAtReader) ReadAt(buf []byte, offset int64) (int, error) { + if offset+int64(len(buf)) > r.Limit { + return 0, ErrLimit + } + return r.R.ReadAt(buf, offset) +} diff --git a/moxio/limitreader.go b/moxio/limitreader.go new file mode 100644 index 0000000..f025ecc --- /dev/null +++ b/moxio/limitreader.go @@ -0,0 +1,27 @@ +package moxio + +import ( + "errors" + "io" +) + +var ErrLimit = errors.New("input exceeds maximum size") // Returned by LimitReader. + +// LimitReader reads up to Limit bytes, returning an error if more bytes are +// read. LimitReader can be used to enforce a maximum input length. +type LimitReader struct { + R io.Reader + Limit int64 +} + +// Read reads bytes from the underlying reader. +func (r *LimitReader) Read(buf []byte) (int, error) { + n, err := r.R.Read(buf) + if n > 0 { + r.Limit -= int64(n) + if r.Limit < 0 { + return 0, ErrLimit + } + } + return n, err +} diff --git a/moxio/prefixconn.go b/moxio/prefixconn.go new file mode 100644 index 0000000..0602e52 --- /dev/null +++ b/moxio/prefixconn.go @@ -0,0 +1,25 @@ +package moxio + +import ( + "io" + "net" +) + +// PrefixConn is a net.Conn prefixed with a reader that is first drained. +// Used for STARTTLS where already did a buffered read of initial TLS data. +type PrefixConn struct { + PrefixReader io.Reader // If not nil, reads are fulfilled from here. It is cleared when a read returns io.EOF. + net.Conn +} + +// Read returns data when PrefixReader when not nil, and net.Conn otherwise. +func (c *PrefixConn) Read(buf []byte) (int, error) { + if c.PrefixReader != nil { + n, err := c.PrefixReader.Read(buf) + if err == io.EOF { + c.PrefixReader = nil + } + return n, err + } + return c.Conn.Read(buf) +} diff --git a/moxio/storagespace.go b/moxio/storagespace.go new file mode 100644 index 0000000..37c68aa --- /dev/null +++ b/moxio/storagespace.go @@ -0,0 +1,14 @@ +package moxio + +import ( + "errors" + "syscall" +) + +// In separate file because of syscall import. + +// IsStorageSpace returns whether the error is for storage space issue. +// Like disk full, no inodes, quota reached. +func IsStorageSpace(err error) bool { + return errors.Is(err, syscall.ENOSPC) || errors.Is(err, syscall.EDQUOT) +} diff --git a/moxio/syncdir.go b/moxio/syncdir.go new file mode 100644 index 0000000..a490d6d --- /dev/null +++ b/moxio/syncdir.go @@ -0,0 +1,17 @@ +package moxio + +import ( + "fmt" + "os" +) + +// SyncDir opens a directory and syncs its contents to disk. +func SyncDir(dir string) error { + d, err := os.Open(dir) + if err != nil { + return fmt.Errorf("open directory: %v", err) + } + xerr := d.Sync() + d.Close() + return xerr +} diff --git a/moxio/trace.go b/moxio/trace.go new file mode 100644 index 0000000..cdd0d1d --- /dev/null +++ b/moxio/trace.go @@ -0,0 +1,48 @@ +package moxio + +import ( + "io" + + "github.com/mjl-/mox/mlog" +) + +type writer struct { + log *mlog.Log + prefix string + w io.Writer +} + +// NewTraceWriter wraps "w" into a writer that logs all writes to "log" with +// log level trace, prefixed with "prefix". +func NewTraceWriter(log *mlog.Log, prefix string, w io.Writer) io.Writer { + return writer{log, prefix, w} +} + +// Write logs a trace line for writing buf to the client, then writes to the +// client. +func (w writer) Write(buf []byte) (int, error) { + w.log.Trace(w.prefix + string(buf)) + return w.w.Write(buf) +} + +type reader struct { + log *mlog.Log + prefix string + r io.Reader +} + +// NewTraceReader wraps reader "r" into a reader that logs all reads to "log" +// with log level trace, prefixed with "prefix". +func NewTraceReader(log *mlog.Log, prefix string, r io.Reader) io.Reader { + return reader{log, prefix, r} +} + +// Read does a single Read on its underlying reader, logs data of successful +// reads, and returns the data read. +func (r reader) Read(buf []byte) (int, error) { + n, err := r.r.Read(buf) + if n > 0 { + r.log.Trace(r.prefix + string(buf[:n])) + } + return n, err +} diff --git a/moxio/umask.go b/moxio/umask.go new file mode 100644 index 0000000..73fc493 --- /dev/null +++ b/moxio/umask.go @@ -0,0 +1,18 @@ +package moxio + +import ( + "fmt" + "syscall" +) + +// CheckUmask checks that the umask is 7 for "other". Because files written +// should not be world-accessible. E.g. database files, and the control unix +// domain socket. +func CheckUmask() error { + old := syscall.Umask(007) + syscall.Umask(old) + if old&7 != 7 { + return fmt.Errorf(`umask must have "7" for world/other, e.g. 007, not current %o`, old) + } + return nil +} diff --git a/moxvar/version.go b/moxvar/version.go new file mode 100644 index 0000000..8c6bac8 --- /dev/null +++ b/moxvar/version.go @@ -0,0 +1,38 @@ +// Package moxvar provides the version number of a mox build. +package moxvar + +import ( + "runtime/debug" +) + +// Version is set at runtime based on the Go module used to build. +var Version = "(devel)" + +func init() { + buildInfo, ok := debug.ReadBuildInfo() + if !ok { + return + } + Version = buildInfo.Main.Version + if Version == "(devel)" { + var vcsRev, vcsMod string + for _, setting := range buildInfo.Settings { + if setting.Key == "vcs.revision" { + vcsRev = setting.Value + } else if setting.Key == "vcs.modified" { + vcsMod = setting.Value + } + } + if vcsRev == "" { + return + } + Version = vcsRev + switch vcsMod { + case "false": + case "true": + Version += "+modifications" + default: + Version += "+unknown" + } + } +} diff --git a/mtasts/mtasts.go b/mtasts/mtasts.go new file mode 100644 index 0000000..54ba880 --- /dev/null +++ b/mtasts/mtasts.go @@ -0,0 +1,333 @@ +// Package mtasts implements MTA-STS (SMTP MTA Strict Transport Security, RFC 8461) +// which allows a domain to specify SMTP TLS requirements. +// +// SMTP for message delivery to a remote mail server always starts out unencrypted, +// in plain text. STARTTLS allows upgrading the connection to TLS, but is optional +// and by default mail servers will fall back to plain text communication if +// STARTTLS does not work (which can be sabotaged by DNS manipulation or SMTP +// connection manipulation). MTA-STS can specify a policy for requiring STARTTLS to +// be used for message delivery. A TXT DNS record at "_mta-sts." specifies +// the version of the policy, and +// "https://mta-sts./.well-known/mta-sts.txt" serves the policy. +package mtasts + +import ( + "context" + "errors" + "fmt" + "io" + "net/http" + "strings" + "time" + + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" + + "github.com/mjl-/mox/dns" + "github.com/mjl-/mox/metrics" + "github.com/mjl-/mox/mlog" + "github.com/mjl-/mox/moxio" +) + +var xlog = mlog.New("mtasts") + +var ( + metricGet = promauto.NewHistogramVec( + prometheus.HistogramOpts{ + Name: "mox_mtasts_get_duration_seconds", + Help: "MTA-STS get of policy, including lookup, duration and result.", + Buckets: []float64{0.01, 0.05, 0.100, 0.5, 1, 5, 10, 20}, + }, + []string{ + "result", // ok, lookuperror, fetcherror + }, + ) +) + +// Pair is an extension key/value pair in a MTA-STS DNS record or policy. +type Pair struct { + Key string + Value string +} + +// Record is an MTA-STS DNS record, served under "_mta-sts." as a TXT +// record. +// +// Example: +// +// v=STSv1; id=20160831085700Z +type Record struct { + Version string // "STSv1", for "v=". Required. + ID string // Record version, for "id=". Required. + Extensions []Pair // Optional extensions. +} + +// String returns a textual version of the MTA-STS record for use as DNS TXT +// record. +func (r Record) String() string { + b := &strings.Builder{} + fmt.Fprint(b, "v="+r.Version) + fmt.Fprint(b, "; id="+r.ID) + for _, p := range r.Extensions { + fmt.Fprint(b, "; "+p.Key+"="+p.Value) + } + return b.String() +} + +// Mode indicates how the policy should be interpreted. +type Mode string + +// ../rfc/8461:655 + +const ( + ModeEnforce Mode = "enforce" // Policy must be followed, i.e. deliveries must fail if a TLS connection cannot be made. + ModeTesting Mode = "testing" // In case TLS cannot be negotiated, plain SMTP can be used, but failures must be reported, e.g. with TLS-RPT. + ModeNone Mode = "none" // In case MTA-STS is not or no longer implemented. +) + +// STSMX is an allowlisted MX host name/pattern. +// todo: find a way to name this just STSMX without getting duplicate names for "MX" in the sherpa api. +type STSMX struct { + // "*." wildcard, e.g. if a subdomain matches. A wildcard must match exactly one + // label. *.example.com matches mail.example.com, but not example.com, and not + // foor.bar.example.com. + Wildcard bool + + Domain dns.Domain +} + +// Policy is an MTA-STS policy as served at "https://mta-sts./.well-known/mta-sts.txt". +type Policy struct { + Version string // "STSv1" + Mode Mode + MX []STSMX + MaxAgeSeconds int // How long this policy can be cached. Suggested values are in weeks or more. + Extensions []Pair +} + +// String returns a textual representation for serving at the well-known URL. +func (p Policy) String() string { + b := &strings.Builder{} + line := func(k, v string) { + fmt.Fprint(b, k+": "+v+"\n") + } + line("version", p.Version) + line("mode", string(p.Mode)) + line("max_age", fmt.Sprintf("%d", p.MaxAgeSeconds)) + for _, mx := range p.MX { + s := mx.Domain.Name() + if mx.Wildcard { + s = "*." + s + } + line("mx", s) + } + return b.String() +} + +// Matches returns whether the hostname matches the mx list in the policy. +func (p *Policy) Matches(host dns.Domain) bool { + // ../rfc/8461:636 + for _, mx := range p.MX { + if mx.Wildcard { + v := strings.SplitN(host.ASCII, ".", 2) + if len(v) == 2 && v[1] == mx.Domain.ASCII { + return true + } + } else if host == mx.Domain { + return true + } + } + return false +} + +// Lookup errors. +var ( + ErrNoRecord = errors.New("mtasts: no mta-sts dns txt record") // Domain does not implement MTA-STS. If a cached non-expired policy is available, it should still be used. + ErrMultipleRecords = errors.New("mtasts: multiple mta-sts records") // Should be treated as if domain does not implement MTA-STS, unless a cached non-expired policy is available. + ErrDNS = errors.New("mtasts: dns lookup") // For temporary DNS errors. + ErrRecordSyntax = errors.New("mtasts: record syntax error") +) + +// LookupRecord looks up the MTA-STS TXT DNS record at "_mta-sts.", +// following CNAME records, and returns the parsed MTA-STS record, the DNS TXT +// record and any CNAMEs that were followed. +func LookupRecord(ctx context.Context, resolver dns.Resolver, domain dns.Domain) (rrecord *Record, rtxt string, rcnames []string, rerr error) { + log := xlog.WithContext(ctx) + start := time.Now() + defer func() { + log.Debugx("mtasts lookup result", rerr, mlog.Field("domain", domain), mlog.Field("record", rrecord), mlog.Field("cnames", rcnames), mlog.Field("duration", time.Since(start))) + }() + + // ../rfc/8461:289 + // ../rfc/8461:351 + // We lookup the txt record, but must follow CNAME records when the TXT does not exist. + var cnames []string + name := "_mta-sts." + domain.ASCII + "." + var txts []string + for { + var err error + txts, err = dns.WithPackage(resolver, "mtasts").LookupTXT(ctx, name) + if dns.IsNotFound(err) { + // DNS has no specified limit on how many CNAMEs to follow. Chains of 10 CNAMEs + // have been seen on the internet. + if len(cnames) > 16 { + return nil, "", cnames, fmt.Errorf("too many cnames") + } + cname, err := dns.WithPackage(resolver, "mtasts").LookupCNAME(ctx, name) + if dns.IsNotFound(err) { + return nil, "", cnames, ErrNoRecord + } + if err != nil { + return nil, "", cnames, fmt.Errorf("%w: %s", ErrDNS, err) + } + cnames = append(cnames, cname) + name = cname + continue + } else if err != nil { + return nil, "", cnames, fmt.Errorf("%w: %s", ErrDNS, err) + } else { + break + } + } + + var text string + var record *Record + for _, txt := range txts { + r, ismtasts, err := ParseRecord(txt) + if !ismtasts { + // ../rfc/8461:331 says we should essentially treat a record starting with e.g. + // "v=STSv1 ;" (note the space) as a non-STS record too in case of multiple TXT + // records. We treat it as an STS record that is invalid, which is possibly more + // reasonable. + continue + } + if err != nil { + return nil, "", cnames, err + } + if record != nil { + return nil, "", cnames, ErrMultipleRecords + } + record = r + text = txt + } + if record == nil { + return nil, "", cnames, ErrNoRecord + } + return record, text, cnames, nil +} + +// Policy fetch errors. +var ( + ErrNoPolicy = errors.New("mtasts: no policy served") // If the name "mta-sts." does not exist in DNS or if webserver returns HTTP status 404 "File not found". + ErrPolicyFetch = errors.New("mtasts: cannot fetch policy") // E.g. for HTTP request errors. + ErrPolicySyntax = errors.New("mtasts: policy syntax error") +) + +// HTTPClient is used by FetchPolicy for HTTP requests. +var HTTPClient = &http.Client{ + CheckRedirect: func(req *http.Request, via []*http.Request) error { + return fmt.Errorf("redirect not allowed for MTA-STS policies") // ../rfc/8461:549 + }, +} + +// FetchPolicy fetches a new policy for the domain, at +// https://mta-sts./.well-known/mta-sts.txt. +// +// FetchPolicy returns the parsed policy and the literal policy text as fetched +// from the server. If a policy was fetched but could not be parsed, the policyText +// return value will be set. +// +// Policies longer than 64KB result in a syntax error. +// +// If an error is returned, callers should back off for 5 minutes until the next +// attempt. +func FetchPolicy(ctx context.Context, domain dns.Domain) (policy *Policy, policyText string, rerr error) { + log := xlog.WithContext(ctx) + start := time.Now() + defer func() { + log.Debugx("mtasts fetch policy result", rerr, mlog.Field("domain", domain), mlog.Field("policy", policy), mlog.Field("policytext", policyText), mlog.Field("duration", time.Since(start))) + }() + + // Timeout of 1 minute. ../rfc/8461:569 + ctx, cancel := context.WithTimeout(ctx, time.Minute) + defer cancel() + + // TLS requirements are what the Go standard library checks: trusted, non-expired, + // hostname validated against DNS-ID supporting wildcard. ../rfc/8461:524 + url := "https://mta-sts." + domain.Name() + "/.well-known/mta-sts.txt" + req, err := http.NewRequestWithContext(ctx, "GET", url, nil) + if err != nil { + return nil, "", fmt.Errorf("%w: http request: %s", ErrPolicyFetch, err) + } + // We are not likely to reuse a connection: we cache policies and negative DNS + // responses. So don't keep connections open unnecessarily. + req.Close = true + + resp, err := HTTPClient.Do(req) + if dns.IsNotFound(err) { + return nil, "", ErrNoPolicy + } + if err != nil { + return nil, "", fmt.Errorf("%w: http get: %s", ErrPolicyFetch, err) + } + metrics.HTTPClientObserve(ctx, "mtasts", req.Method, resp.StatusCode, err, start) + defer resp.Body.Close() + if resp.StatusCode == http.StatusNotFound { + return nil, "", ErrNoPolicy + } + if resp.StatusCode != http.StatusOK { + // ../rfc/8461:548 + return nil, "", fmt.Errorf("%w: http status %s while status 200 is required", ErrPolicyFetch, resp.Status) + } + + // We don't look at Content-Type and charset. It should be ASCII or UTF-8, we'll + // just always whatever is sent as UTF-8. ../rfc/8461:367 + + // ../rfc/8461:570 + buf, err := io.ReadAll(&moxio.LimitReader{R: resp.Body, Limit: 64 * 1024}) + if err != nil { + return nil, "", fmt.Errorf("%w: reading policy: %s", ErrPolicySyntax, err) + } + policyText = string(buf) + policy, err = ParsePolicy(policyText) + if err != nil { + return nil, policyText, fmt.Errorf("parsing policy: %w", err) + } + return policy, policyText, nil +} + +// Get looks up the MTA-STS DNS record and fetches the policy. +// +// Errors can be those returned by LookupRecord and FetchPolicy. +// +// If a valid policy cannot be retrieved, a sender must treat the domain as not +// implementing MTA-STS. If a sender has a non-expired cached policy, that policy +// would still apply. +// +// If a record was retrieved, but a policy could not be retrieved/parsed, the +// record is still returned. +// +// Also see Get in package mtastsdb. +func Get(ctx context.Context, resolver dns.Resolver, domain dns.Domain) (record *Record, policy *Policy, err error) { + log := xlog.WithContext(ctx) + start := time.Now() + result := "lookuperror" + defer func() { + metricGet.WithLabelValues(result).Observe(float64(time.Since(start)) / float64(time.Second)) + log.Debugx("mtasts get result", err, mlog.Field("domain", domain), mlog.Field("record", record), mlog.Field("policy", policy), mlog.Field("duration", time.Since(start))) + }() + + record, _, _, err = LookupRecord(ctx, resolver, domain) + if err != nil { + return nil, nil, err + } + + result = "fetcherror" + policy, _, err = FetchPolicy(ctx, domain) + if err != nil { + return record, nil, err + } + + result = "ok" + return record, policy, nil +} diff --git a/mtasts/mtasts_test.go b/mtasts/mtasts_test.go new file mode 100644 index 0000000..3d11bca --- /dev/null +++ b/mtasts/mtasts_test.go @@ -0,0 +1,267 @@ +package mtasts + +import ( + "context" + "crypto/ed25519" + cryptorand "crypto/rand" + "crypto/tls" + "crypto/x509" + "errors" + "io" + "log" + "math/big" + "net" + "net/http" + "reflect" + "strings" + "sync" + "testing" + "time" + + "github.com/mjl-/mox/dns" +) + +func TestLookup(t *testing.T) { + resolver := dns.MockResolver{ + TXT: map[string][]string{ + "_mta-sts.a.example.": {"v=STSv1; id=1"}, + "_mta-sts.one.example.": {"v=STSv1; id=1", "bogus"}, + "_mta-sts.bad.example.": {"v=STSv1; bogus"}, + "_mta-sts.multiple.example.": {"v=STSv1; id=1", "v=STSv1; id=2"}, + "_mta-sts.c.cnames.example.": {"v=STSv1; id=1"}, + "_mta-sts.temperror.example.": {"v=STSv1; id=1"}, + "_mta-sts.other.example.": {"bogus", "more"}, + }, + CNAME: map[string]string{ + "_mta-sts.a.cnames.example.": "_mta-sts.b.cnames.example.", + "_mta-sts.b.cnames.example.": "_mta-sts.c.cnames.example.", + "_mta-sts.followtemperror.example.": "_mta-sts.cnametemperror.example.", + }, + Fail: map[dns.Mockreq]struct{}{ + {Type: "txt", Name: "_mta-sts.temperror.example."}: {}, + {Type: "cname", Name: "_mta-sts.cnametemperror.example."}: {}, + }, + } + + test := func(host string, expRecord *Record, expCNAMEs []string, expErr error) { + t.Helper() + + record, _, cnames, err := LookupRecord(context.Background(), resolver, dns.Domain{ASCII: host}) + if (err == nil) != (expErr == nil) || err != nil && !errors.Is(err, expErr) { + t.Fatalf("lookup: got err %#v, expected %#v", err, expErr) + } + if err != nil { + return + } + if !reflect.DeepEqual(record, expRecord) || !reflect.DeepEqual(cnames, expCNAMEs) { + t.Fatalf("lookup: got record %#v, cnames %#v, expected %#v %#v", record, cnames, expRecord, expCNAMEs) + } + } + + test("absent.example", nil, nil, ErrNoRecord) + test("other.example", nil, nil, ErrNoRecord) + test("a.example", &Record{Version: "STSv1", ID: "1"}, nil, nil) + test("one.example", &Record{Version: "STSv1", ID: "1"}, nil, nil) + test("bad.example", nil, nil, ErrRecordSyntax) + test("multiple.example", nil, nil, ErrMultipleRecords) + test("a.cnames.example", &Record{Version: "STSv1", ID: "1"}, []string{"_mta-sts.b.cnames.example.", "_mta-sts.c.cnames.example."}, nil) + test("temperror.example", nil, nil, ErrDNS) + test("cnametemperror.example", nil, nil, ErrDNS) + test("followtemperror.example", nil, nil, ErrDNS) +} + +func TestMatches(t *testing.T) { + p, err := ParsePolicy("version: STSv1\nmode: enforce\nmax_age: 1\nmx: a.example\nmx: *.b.example\n") + if err != nil { + t.Fatalf("parsing policy: %s", err) + } + + mustParseDomain := func(s string) dns.Domain { + t.Helper() + d, err := dns.ParseDomain(s) + if err != nil { + t.Fatalf("parsing domain %q: %s", s, err) + } + return d + } + + match := func(s string) { + t.Helper() + if !p.Matches(mustParseDomain(s)) { + t.Fatalf("unexpected mismatch for %q", s) + } + } + + not := func(s string) { + t.Helper() + if p.Matches(mustParseDomain(s)) { + t.Fatalf("unexpected match for %q", s) + } + } + + match("a.example") + match("sub.b.example") + not("b.example") + not("sub.sub.b.example") + not("other") +} + +type pipeListener struct { + sync.Mutex + closed bool + C chan net.Conn +} + +var _ net.Listener = &pipeListener{} + +func newPipeListener() *pipeListener { return &pipeListener{C: make(chan net.Conn)} } +func (l *pipeListener) Dial() (net.Conn, error) { + l.Lock() + defer l.Unlock() + if l.closed { + return nil, errors.New("closed") + } + c, s := net.Pipe() + l.C <- s + return c, nil +} +func (l *pipeListener) Accept() (net.Conn, error) { + conn := <-l.C + if conn == nil { + return nil, io.EOF + } + return conn, nil +} +func (l *pipeListener) Close() error { + l.Lock() + defer l.Unlock() + if !l.closed { + l.closed = true + close(l.C) + } + return nil +} +func (l *pipeListener) Addr() net.Addr { return pipeAddr{} } + +type pipeAddr struct{} + +func (a pipeAddr) Network() string { return "pipe" } +func (a pipeAddr) String() string { return "pipe" } + +func fakeCert(t *testing.T, expired bool) tls.Certificate { + notAfter := time.Now() + if expired { + notAfter = notAfter.Add(-time.Hour) + } else { + notAfter = notAfter.Add(time.Hour) + } + + privKey := ed25519.NewKeyFromSeed(make([]byte, ed25519.SeedSize)) // Fake key, don't use this for real! + + template := &x509.Certificate{ + SerialNumber: big.NewInt(1), // Required field... + DNSNames: []string{"mta-sts.mox.example"}, + NotBefore: time.Now().Add(-time.Hour), + NotAfter: notAfter, + } + localCertBuf, err := x509.CreateCertificate(cryptorand.Reader, template, template, privKey.Public(), privKey) + if err != nil { + t.Fatalf("making certificate: %s", err) + } + cert, err := x509.ParseCertificate(localCertBuf) + if err != nil { + t.Fatalf("parsing generated certificate: %s", err) + } + c := tls.Certificate{ + Certificate: [][]byte{localCertBuf}, + PrivateKey: privKey, + Leaf: cert, + } + return c +} + +func TestFetch(t *testing.T) { + certok := fakeCert(t, false) + certbad := fakeCert(t, true) + + defer func() { + HTTPClient.Transport = nil + }() + + resolver := dns.MockResolver{ + TXT: map[string][]string{ + "_mta-sts.mox.example.": {"v=STSv1; id=1"}, + "_mta-sts.other.example.": {"v=STSv1; id=1"}, + }, + } + + test := func(cert tls.Certificate, domain string, status int, policyText string, expPolicy *Policy, expErr error) { + t.Helper() + + pool := x509.NewCertPool() + pool.AddCert(cert.Leaf) + + l := newPipeListener() + defer l.Close() + go func() { + mux := &http.ServeMux{} + mux.HandleFunc("/.well-known/mta-sts.txt", func(w http.ResponseWriter, r *http.Request) { + w.Header().Add("Location", "/other") // Ignored except for redirect. + w.WriteHeader(status) + w.Write([]byte(policyText)) + }) + s := &http.Server{ + Handler: mux, + TLSConfig: &tls.Config{ + Certificates: []tls.Certificate{cert}, + }, + ErrorLog: log.New(io.Discard, "", 0), + } + s.ServeTLS(l, "", "") + }() + + HTTPClient.Transport = &http.Transport{ + Dial: func(network, addr string) (net.Conn, error) { + if strings.HasPrefix(addr, "mta-sts.doesnotexist.example") { + return nil, &net.DNSError{IsNotFound: true} + } + return l.Dial() + }, + TLSClientConfig: &tls.Config{ + RootCAs: pool, + }, + } + + p, _, err := FetchPolicy(context.Background(), dns.Domain{ASCII: domain}) + if (err == nil) != (expErr == nil) || err != nil && !errors.Is(err, expErr) { + t.Fatalf("policy: got err %#v, expected %#v", err, expErr) + } + if err == nil && !reflect.DeepEqual(p, expPolicy) { + t.Fatalf("policy: got %#v, expected %#v", p, expPolicy) + } + + if domain == "doesnotexist.example" { + expErr = ErrNoRecord + } + + _, p, err = Get(context.Background(), resolver, dns.Domain{ASCII: domain}) + if (err == nil) != (expErr == nil) || err != nil && !errors.Is(err, expErr) { + t.Fatalf("get: got err %#v, expected %#v", err, expErr) + } + if err == nil && !reflect.DeepEqual(p, expPolicy) { + t.Fatalf("get: got %#v, expected %#v", p, expPolicy) + } + } + + test(certok, "mox.example", 200, "bogus", nil, ErrPolicySyntax) + test(certok, "other.example", 200, "bogus", nil, ErrPolicyFetch) + test(certbad, "mox.example", 200, "bogus", nil, ErrPolicyFetch) + test(certok, "mox.example", 404, "bogus", nil, ErrNoPolicy) + test(certok, "doesnotexist.example", 200, "bogus", nil, ErrNoPolicy) + test(certok, "mox.example", 301, "bogus", nil, ErrPolicyFetch) + test(certok, "mox.example", 500, "bogus", nil, ErrPolicyFetch) + large := make([]byte, 64*1024+2) + test(certok, "mox.example", 200, string(large), nil, ErrPolicySyntax) + validPolicy := "version:STSv1\nmode:none\nmax_age:1" + test(certok, "mox.example", 200, validPolicy, &Policy{Version: "STSv1", Mode: "none", MaxAgeSeconds: 1}, nil) +} diff --git a/mtasts/parse.go b/mtasts/parse.go new file mode 100644 index 0000000..1d31a35 --- /dev/null +++ b/mtasts/parse.go @@ -0,0 +1,347 @@ +package mtasts + +import ( + "fmt" + "strconv" + "strings" + + "github.com/mjl-/mox/dns" +) + +type parseErr string + +func (e parseErr) Error() string { + return string(e) +} + +var _ error = parseErr("") + +// ParseRecord parses an MTA-STS record. +func ParseRecord(txt string) (record *Record, ismtasts bool, err error) { + defer func() { + x := recover() + if x == nil { + return + } + if xerr, ok := x.(parseErr); ok { + record = nil + err = fmt.Errorf("%w: %s", ErrRecordSyntax, xerr) + return + } + panic(x) + }() + + // Parsing is mostly case-sensitive. + // ../rfc/8461:306 + p := newParser(txt) + record = &Record{ + Version: "STSv1", + } + seen := map[string]struct{}{} + p.xtake("v=STSv1") + p.xdelim() + ismtasts = true + for { + k := p.xkey() + p.xtake("=") + + // Section 3.1 about the TXT record does not say anything about duplicate fields. + // But section 3.2 about (parsing) policies has a paragraph that starts + // requirements on both TXT and policy records. That paragraph ends with a note + // about handling duplicate fields. Let's assume that note also applies to TXT + // records. ../rfc/8461:517 + _, dup := seen[k] + seen[k] = struct{}{} + + switch k { + case "id": + if !dup { + record.ID = p.xid() + } + default: + v := p.xvalue() + record.Extensions = append(record.Extensions, Pair{k, v}) + } + if !p.delim() || p.empty() { + break + } + } + if !p.empty() { + p.xerrorf("leftover characters") + } + if record.ID == "" { + p.xerrorf("missing id") + } + return +} + +// ParsePolicy parses an MTA-STS policy. +func ParsePolicy(s string) (policy *Policy, err error) { + defer func() { + x := recover() + if x == nil { + return + } + if xerr, ok := x.(parseErr); ok { + policy = nil + err = fmt.Errorf("%w: %s", ErrPolicySyntax, xerr) + return + } + panic(x) + }() + + // ../rfc/8461:426 + p := newParser(s) + policy = &Policy{ + Version: "STSv1", + } + seen := map[string]struct{}{} + for { + k := p.xkey() + // For fields except "mx", only the first must be used. ../rfc/8461:517 + _, dup := seen[k] + seen[k] = struct{}{} + p.xtake(":") + p.wsp() + switch k { + case "version": + policy.Version = p.xtake("STSv1") + case "mode": + mode := Mode(p.xtakelist("testing", "enforce", "none")) + if !dup { + policy.Mode = mode + } + case "max_age": + maxage := p.xmaxage() + if !dup { + policy.MaxAgeSeconds = maxage + } + case "mx": + policy.MX = append(policy.MX, p.xmx()) + default: + v := p.xpolicyvalue() + policy.Extensions = append(policy.Extensions, Pair{k, v}) + } + p.wsp() + if !p.eol() || p.empty() { + break + } + } + if !p.empty() { + p.xerrorf("leftover characters") + } + required := []string{"version", "mode", "max_age"} + for _, req := range required { + if _, ok := seen[req]; !ok { + p.xerrorf("missing field %q", req) + } + } + if _, ok := seen["mx"]; !ok && policy.Mode != ModeNone { + // ../rfc/8461:437 + p.xerrorf("missing mx given mode") + } + return +} + +type parser struct { + s string + o int +} + +func newParser(s string) *parser { + return &parser{s: s} +} + +func (p *parser) xerrorf(format string, args ...any) { + msg := fmt.Sprintf(format, args...) + if p.o < len(p.s) { + msg += fmt.Sprintf(" (remain %q)", p.s[p.o:]) + } + panic(parseErr(msg)) +} + +func (p *parser) xtake(s string) string { + if !p.prefix(s) { + p.xerrorf("expected %q", s) + } + p.o += len(s) + return s +} + +func (p *parser) xdelim() { + if !p.delim() { + p.xerrorf("expected semicolon") + } +} + +func (p *parser) xtaken(n int) string { + r := p.s[p.o : p.o+n] + p.o += n + return r +} + +func (p *parser) xtakefn1(fn func(rune, int) bool) string { + for i, b := range p.s[p.o:] { + if !fn(b, i) { + if i == 0 { + p.xerrorf("expected at least one char") + } + return p.xtaken(i) + } + } + if p.empty() { + p.xerrorf("expected at least 1 char") + } + return p.xtaken(len(p.s) - p.o) +} + +func (p *parser) prefix(s string) bool { + return strings.HasPrefix(p.s[p.o:], s) +} + +// File name, the known values match this syntax. +// ../rfc/8461:482 +func (p *parser) xkey() string { + return p.xtakefn1(func(b rune, i int) bool { + return i < 32 && (b >= 'a' && b <= 'z' || b >= 'A' && b <= 'Z' || b >= '0' && b <= '9' || (i > 0 && b == '_' || b == '-' || b == '.')) + }) +} + +// ../rfc/8461:319 +func (p *parser) xid() string { + return p.xtakefn1(func(b rune, i int) bool { + return i < 32 && (b >= 'a' && b <= 'z' || b >= 'A' && b <= 'Z' || b >= '0' && b <= '9') + }) +} + +// ../rfc/8461:326 +func (p *parser) xvalue() string { + return p.xtakefn1(func(b rune, i int) bool { + return b > ' ' && b < 0x7f && b != '=' && b != ';' + }) +} + +// ../rfc/8461:315 +func (p *parser) delim() bool { + o := p.o + e := len(p.s) + for o < e && (p.s[o] == ' ' || p.s[o] == '\t') { + o++ + } + if o >= e || p.s[o] != ';' { + return false + } + o++ + for o < e && (p.s[o] == ' ' || p.s[o] == '\t') { + o++ + } + p.o = o + return true +} + +func (p *parser) empty() bool { + return p.o >= len(p.s) +} + +// ../rfc/8461:485 +func (p *parser) eol() bool { + return p.take("\n") || p.take("\r\n") +} + +func (p *parser) xtakelist(l ...string) string { + for _, s := range l { + if p.prefix(s) { + return p.xtaken(len(s)) + } + } + p.xerrorf("expected one of %s", strings.Join(l, ", ")) + return "" // not reached +} + +// ../rfc/8461:476 +func (p *parser) xmaxage() int { + digits := p.xtakefn1(func(b rune, i int) bool { + return b >= '0' && b <= '9' && i < 10 + }) + v, err := strconv.ParseInt(digits, 10, 32) + if err != nil { + p.xerrorf("parsing int: %s", err) + } + return int(v) +} + +func (p *parser) take(s string) bool { + if p.prefix(s) { + p.o += len(s) + return true + } + return false +} + +// ../rfc/8461:469 +func (p *parser) xmx() (mx STSMX) { + if p.prefix("*.") { + mx.Wildcard = true + p.o += 2 + } + mx.Domain = p.xdomain() + return mx +} + +// ../rfc/5321:2291 +func (p *parser) xdomain() dns.Domain { + s := p.xsubdomain() + for p.take(".") { + s += "." + p.xsubdomain() + } + d, err := dns.ParseDomain(s) + if err != nil { + p.xerrorf("parsing domain %q: %s", s, err) + } + return d +} + +// ../rfc/8461:487 +func (p *parser) xsubdomain() string { + // note: utf-8 is valid, but U-labels are explicitly not allowed. ../rfc/8461:411 ../rfc/5321:2303 + unicode := false + s := p.xtakefn1(func(c rune, i int) bool { + if c > 0x7f { + unicode = true + } + return c >= '0' && c <= '9' || c >= 'a' && c <= 'z' || c >= 'A' && c <= 'Z' || (i > 0 && c == '-') || c > 0x7f + }) + if unicode { + p.xerrorf("domain must be specified in A labels, not U labels (unicode)") + } + return s +} + +// ../rfc/8461:487 +func (p *parser) xpolicyvalue() string { + e := len(p.s) + for i, c := range p.s[p.o:] { + if c > ' ' && c < 0x7f || c >= 0x80 || (c == ' ' && i > 0) { + continue + } + e = p.o + i + break + } + // Walk back on trailing spaces. + for e > p.o && p.s[e-1] == ' ' { + e-- + } + n := e - p.o + if n <= 0 { + p.xerrorf("empty extension value") + } + return p.xtaken(n) +} + +// "*WSP" +func (p *parser) wsp() { + n := len(p.s) + for p.o < n && (p.s[p.o] == ' ' || p.s[p.o] == '\t') { + p.o++ + } +} diff --git a/mtasts/parse_test.go b/mtasts/parse_test.go new file mode 100644 index 0000000..895f1a5 --- /dev/null +++ b/mtasts/parse_test.go @@ -0,0 +1,237 @@ +package mtasts + +import ( + "reflect" + "testing" + + "github.com/mjl-/mox/dns" +) + +func TestRecord(t *testing.T) { + good := func(txt string, want Record) { + t.Helper() + r, _, err := ParseRecord(txt) + if err != nil { + t.Fatalf("parse: %s", err) + } + if !reflect.DeepEqual(r, &want) { + t.Fatalf("want %#v, got %#v", want, *r) + } + } + + bad := func(txt string) { + t.Helper() + r, _, err := ParseRecord(txt) + if err == nil { + t.Fatalf("parse, expected error, got record %v", r) + } + } + + good("v=STSv1; id=20160831085700Z;", Record{Version: "STSv1", ID: "20160831085700Z"}) + good("v=STSv1; \t id=20160831085700Z \t;", Record{Version: "STSv1", ID: "20160831085700Z"}) + good("v=STSv1; id=a", Record{Version: "STSv1", ID: "a"}) + good("v=STSv1; id=a; more=a; ext=2", Record{Version: "STSv1", ID: "a", Extensions: []Pair{{"more", "a"}, {"ext", "2"}}}) + + bad("v=STSv0") + bad("v=STSv10") + bad("v=STSv2") + bad("v=STSv1") // missing id + bad("v=STSv1;") // missing id + bad("v=STSv1; ext=1") // missing id + bad("v=STSv1; id=") // empty id + bad("v=STSv1; id=012345678901234567890123456789012") // id too long + bad("v=STSv1; id=test-123") // invalid id + bad("v=STSv1; id=a; more=") // empty value in extension + bad("v=STSv1; id=a; a12345678901234567890123456789012=1") // extension name too long + bad("v=STSv1; id=a; 1%=a") // invalid extension name + bad("v=STSv1; id=a; test==") // invalid extension name + bad("v=STSv1; id=a;;") // additional semicolon + + const want = `v=STSv1; id=a; more=a; ext=2` + record := Record{Version: "STSv1", ID: "a", Extensions: []Pair{{"more", "a"}, {"ext", "2"}}} + got := record.String() + if got != want { + t.Fatalf("record string, got %q, want %q", got, want) + } +} + +func TestParsePolicy(t *testing.T) { + good := func(s string, want Policy) { + t.Helper() + p, err := ParsePolicy(s) + if err != nil { + t.Fatalf("parse policy: %s", err) + } + if !reflect.DeepEqual(p, &want) { + t.Fatalf("want %v, got %v", want, p) + } + } + + good(`version: STSv1 +mode: testing +mx: mx1.example.com +mx: mx2.example.com +mx: mx.backup-example.com +max_age: 1296000 +`, + Policy{ + Version: "STSv1", + Mode: ModeTesting, + MX: []STSMX{ + {Domain: dns.Domain{ASCII: "mx1.example.com"}}, + {Domain: dns.Domain{ASCII: "mx2.example.com"}}, + {Domain: dns.Domain{ASCII: "mx.backup-example.com"}}, + }, + MaxAgeSeconds: 1296000, + }, + ) + good("version: STSv1\nmode: enforce \nmx: *.example.com \nmax_age: 0 \n", + Policy{ + Version: "STSv1", + Mode: ModeEnforce, + MX: []STSMX{ + {Wildcard: true, Domain: dns.Domain{ASCII: "example.com"}}, + }, + MaxAgeSeconds: 0, + }, + ) + good("version:STSv1\r\nmode:\tenforce\r\nmx: \t\t *.example.com\nmax_age: 1\nmore:ext e ns ion", + Policy{ + Version: "STSv1", + Mode: ModeEnforce, + MX: []STSMX{ + {Wildcard: true, Domain: dns.Domain{ASCII: "example.com"}}, + }, + MaxAgeSeconds: 1, + Extensions: []Pair{{"more", "ext e ns ion"}}, + }, + ) + + bad := func(s string) { + t.Helper() + p, err := ParsePolicy(s) + if err == nil { + t.Fatalf("parsing policy did not fail: %v", p) + } + } + + bad("") // missing version + bad("version:STSv0\nmode:none\nmax_age:0") // bad version + bad("version:STSv10\nmode:none\nmax_age:0") // bad version + bad("version:STSv2\nmode:none\nmax_age:0") // bad version + bad("version:STSv1\nmax_age:0\nmx:example.com") // missing mode + bad("version:STSv1\nmode:none") // missing max_age + bad("version:STSv1\nmax_age:0\nmode:enforce") // missing mx for mode + bad("version:STSv1\nmax_age:0\nmode:testing") // missing mx for mode + bad("max_age:0\nmode:none") // missing version + bad("version:STSv1\nmode:none\nmax_age:01234567890") // max_age too long + bad("version:STSv1\nmode:bad\nmax_age:1") // bad mode + bad("version:STSv1\nmode:none\nmax_age:a") // bad max_age + bad("version:STSv1\nmode:enforce\nmax_age:0\nmx:") // missing value + bad("version:STSv1\nmode:enforce\nmax_age:0\nmx:*.*.example") // bad mx + bad("version:STSv1\nmode:enforce\nmax_age:0\nmx:**.example") // bad mx + bad("version:STSv1\nmode:enforce\nmax_age:0\nmx:**.example-") // bad mx + bad("version:STSv1\nmode:enforce\nmax_age:0\nmx:test.example-") // bad mx + bad("version:STSv1\nmode:none\nmax_age:0\next:") // empty extension + bad("version:STSv1\nmode:none\nmax_age:0\na12345678901234567890123456789012:123") // long extension name + bad("version:STSv1\nmode:none\nmax_age:0\n_bad:test") // bad ext name + bad("version:STSv1\nmode:none\nmax_age:0\nmx: møx.example") // invalid u-label in mx + + policy := Policy{ + Version: "STSv1", + Mode: ModeTesting, + MX: []STSMX{ + {Domain: dns.Domain{ASCII: "mx1.example.com"}}, + {Domain: dns.Domain{ASCII: "mx2.example.com"}}, + {Domain: dns.Domain{ASCII: "mx.backup-example.com"}}, + }, + MaxAgeSeconds: 1296000, + } + want := `version: STSv1 +mode: testing +max_age: 1296000 +mx: mx1.example.com +mx: mx2.example.com +mx: mx.backup-example.com +` + got := policy.String() + if got != want { + t.Fatalf("policy string, got %q, want %q", got, want) + } +} + +func FuzzParseRecord(f *testing.F) { + f.Add("v=STSv1; id=20160831085700Z;") + f.Add("v=STSv1; \t id=20160831085700Z \t;") + f.Add("v=STSv1; id=a") + f.Add("v=STSv1; id=a; more=a; ext=2") + + f.Add("v=STSv0") + f.Add("v=STSv10") + f.Add("v=STSv2") + f.Add("v=STSv1") // missing id + f.Add("v=STSv1;") // missing id + f.Add("v=STSv1; ext=1") // missing id + f.Add("v=STSv1; id=") // empty id + f.Add("v=STSv1; id=012345678901234567890123456789012") // id too long + f.Add("v=STSv1; id=test-123") // invalid id + f.Add("v=STSv1; id=a; more=") // empty value in extension + f.Add("v=STSv1; id=a; a12345678901234567890123456789012=1") // extension name too long + f.Add("v=STSv1; id=a; 1%=a") // invalid extension name + f.Add("v=STSv1; id=a; test==") // invalid extension name + f.Add("v=STSv1; id=a;;") // additional semicolon + + f.Fuzz(func(t *testing.T, s string) { + r, _, err := ParseRecord(s) + if err == nil { + _ = r.String() + } + }) +} + +func FuzzParsePolicy(f *testing.F) { + f.Add(`version: STSv1 +mode: testing +mx: mx1.example.com +mx: mx2.example.com +mx: mx.backup-example.com +max_age: 1296000 +`) + f.Add(`version: STSv1 +mode: enforce +mx: *.example.com +max_age: 0 +`) + f.Add("version:STSv1\r\nmode:\tenforce\r\nmx: \t\t *.example.com\nmax_age: 1\nmore:ext e ns ion") + + f.Add("") // missing version + f.Add("version:STSv0\nmode:none\nmax_age:0") // bad version + f.Add("version:STSv10\nmode:none\nmax_age:0") // bad version + f.Add("version:STSv2\nmode:none\nmax_age:0") // bad version + f.Add("version:STSv1\nmax_age:0\nmx:example.com") // missing mode + f.Add("version:STSv1\nmode:none") // missing max_age + f.Add("version:STSv1\nmax_age:0\nmode:enforce") // missing mx for mode + f.Add("version:STSv1\nmax_age:0\nmode:testing") // missing mx for mode + f.Add("max_age:0\nmode:none") // missing version + f.Add("version:STSv1\nmode:none\nmax_age:0 ") // trailing whitespace + f.Add("version:STSv1\nmode:none\nmax_age:01234567890") // max_age too long + f.Add("version:STSv1\nmode:bad\nmax_age:1") // bad mode + f.Add("version:STSv1\nmode:none\nmax_age:a") // bad max_age + f.Add("version:STSv1\nmode:enforce\nmax_age:0\nmx:") // missing value + f.Add("version:STSv1\nmode:enforce\nmax_age:0\nmx:*.*.example") // bad mx + f.Add("version:STSv1\nmode:enforce\nmax_age:0\nmx:**.example") // bad mx + f.Add("version:STSv1\nmode:enforce\nmax_age:0\nmx:**.example-") // bad mx + f.Add("version:STSv1\nmode:enforce\nmax_age:0\nmx:test.example-") // bad mx + f.Add("version:STSv1\nmode:none\nmax_age:0\next:") // empty extension + f.Add("version:STSv1\nmode:none\nmax_age:0\next:abc ") // trailing space + f.Add("version:STSv1\nmode:none\nmax_age:0\next:a\t") // invalid char + f.Add("version:STSv1\nmode:none\nmax_age:0\na12345678901234567890123456789012:123") // long extension name + f.Add("version:STSv1\nmode:none\nmax_age:0\n_bad:test") // bad ext name + + f.Fuzz(func(t *testing.T, s string) { + r, err := ParsePolicy(s) + if err == nil { + _ = r.String() + } + }) +} diff --git a/mtastsdb/db.go b/mtastsdb/db.go new file mode 100644 index 0000000..fc211dc --- /dev/null +++ b/mtastsdb/db.go @@ -0,0 +1,285 @@ +// Package mtastsdb stores MTA-STS policies for later use. +// +// An MTA-STS policy can specify how long it may be cached. By storing a +// policy, it does not have to be fetched again during email delivery, which +// makes it harder for attackers to intervene. +package mtastsdb + +import ( + "context" + "errors" + "fmt" + "os" + "path/filepath" + "sync" + "time" + + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" + + "github.com/mjl-/bstore" + + "github.com/mjl-/mox/dns" + "github.com/mjl-/mox/mlog" + "github.com/mjl-/mox/mox-" + "github.com/mjl-/mox/mtasts" +) + +var xlog = mlog.New("mtastsdb") + +var ( + metricGet = promauto.NewCounterVec( + prometheus.CounterOpts{ + Name: "mox_mtastsdb_get_total", + Help: "Number of Get by result.", + }, + []string{"result"}, + ) +) + +var timeNow = time.Now // Tests override this. + +// PolicyRecord is a cached policy or absence of a policy. +type PolicyRecord struct { + Domain string // Domain name, with unicode characters. + Inserted time.Time `bstore:"default now"` + ValidEnd time.Time + LastUpdate time.Time // Policies are refreshed on use and periodically. + LastUse time.Time `bstore:"index"` + Backoff bool + RecordID string // As retrieved from DNS. + mtasts.Policy // As retrieved from the well-known HTTPS url. +} + +var ( + // No valid non-expired policy in database. + ErrNotFound = errors.New("mtastsdb: policy not found") + + // Indicates an MTA-STS TXT record was fetched recently, but fetching the policy + // failed and should not yet be retried. + ErrBackoff = errors.New("mtastsdb: policy fetch failed recently") +) + +var mtastsDB *bstore.DB +var mutex sync.Mutex + +func database() (rdb *bstore.DB, rerr error) { + mutex.Lock() + defer mutex.Unlock() + if mtastsDB == nil { + p := mox.DataDirPath("mtasts.db") + os.MkdirAll(filepath.Dir(p), 0770) + db, err := bstore.Open(p, &bstore.Options{Timeout: 5 * time.Second, Perm: 0660}, PolicyRecord{}) + if err != nil { + return nil, err + } + mtastsDB = db + } + return mtastsDB, nil +} + +// Init opens the database and starts a goroutine that refreshes policies in +// the database, and keeps doing so periodically. +func Init(refresher bool) error { + _, err := database() + if err != nil { + return err + } + + if refresher { + // todo: allow us to shut down cleanly? + go refresh() + } + + return nil +} + +// Close closes the database. +func Close() { + mutex.Lock() + defer mutex.Unlock() + if mtastsDB != nil { + mtastsDB.Close() + mtastsDB = nil + } +} + +// Lookup looks up a policy for the domain in the database. +// +// Only non-expired records are returned. +func lookup(ctx context.Context, domain dns.Domain) (*PolicyRecord, error) { + log := xlog.WithContext(ctx) + db, err := database() + if err != nil { + return nil, err + } + + if domain.IsZero() { + return nil, fmt.Errorf("empty domain") + } + now := timeNow() + q := bstore.QueryDB[PolicyRecord](db) + q.FilterNonzero(PolicyRecord{Domain: domain.Name()}) + q.FilterGreater("ValidEnd", now) + pr, err := q.Get() + if err == bstore.ErrAbsent { + return nil, ErrNotFound + } else if err != nil { + return nil, err + } + + pr.LastUse = now + if err := db.Update(&pr); err != nil { + log.Errorx("marking cached mta-sts policy as used in database", err) + } + if pr.Backoff { + return nil, ErrBackoff + } + return &pr, nil +} + +// Upsert adds the policy to the database, overwriting an existing policy for the domain. +// Policy can be nil, indicating a failure to fetch the policy. +func Upsert(domain dns.Domain, recordID string, policy *mtasts.Policy) error { + db, err := database() + if err != nil { + return err + } + + return db.Write(func(tx *bstore.Tx) error { + pr := PolicyRecord{Domain: domain.Name()} + err := tx.Get(&pr) + if err != nil && err != bstore.ErrAbsent { + return err + } + + now := timeNow() + + var p mtasts.Policy + if policy != nil { + p = *policy + } else { + // ../rfc/8461:552 + p.Mode = mtasts.ModeNone + p.MaxAgeSeconds = 5 * 60 + } + backoff := policy == nil + validEnd := now.Add(time.Duration(p.MaxAgeSeconds) * time.Second) + + if err == bstore.ErrAbsent { + pr = PolicyRecord{domain.Name(), now, validEnd, now, now, backoff, recordID, p} + return tx.Insert(&pr) + } + + pr.ValidEnd = validEnd + pr.LastUpdate = now + pr.LastUse = now + pr.Backoff = backoff + pr.RecordID = recordID + pr.Policy = p + return tx.Update(&pr) + }) +} + +// PolicyRecords returns all policies in the database, sorted descending by last +// use, domain. +func PolicyRecords(ctx context.Context) ([]PolicyRecord, error) { + db, err := database() + if err != nil { + return nil, err + } + return bstore.QueryDB[PolicyRecord](db).SortDesc("LastUse", "Domain").List() +} + +// Get retrieves an MTA-STS policy for domain and whether it is fresh. +// +// If an error is returned, it should be considered a transient error, e.g. a +// temporary DNS lookup failure. +// +// The returned policy can be nil also when there is no error. In this case, the +// domain does not implement MTA-STS. +// +// If a policy is present in the local database, it is refreshed if needed. If no +// policy is present for the domain, an attempt is made to fetch the policy and +// store it in the local database. +// +// Some errors are logged but not otherwise returned, e.g. if a new policy is +// supposedly published but could not be retrieved. +func Get(ctx context.Context, resolver dns.Resolver, domain dns.Domain) (policy *mtasts.Policy, fresh bool, err error) { + log := xlog.WithContext(ctx) + defer func() { + result := "ok" + if err != nil && errors.Is(err, ErrBackoff) { + result = "backoff" + } else if err != nil && errors.Is(err, ErrNotFound) { + result = "notfound" + } else if err != nil { + result = "error" + } + metricGet.WithLabelValues(result).Inc() + log.Debugx("mtastsdb get result", err, mlog.Field("domain", domain), mlog.Field("fresh", fresh)) + }() + + cachedPolicy, err := lookup(ctx, domain) + if err != nil && errors.Is(err, ErrNotFound) { + // We don't have a policy for this domain, not even a record that we tried recently + // and should backoff. So attempt to fetch policy. + nctx, cancel := context.WithTimeout(ctx, time.Minute) + defer cancel() + record, p, err := mtasts.Get(nctx, resolver, domain) + if err != nil { + switch { + case errors.Is(err, mtasts.ErrNoRecord) || errors.Is(err, mtasts.ErrMultipleRecords) || errors.Is(err, mtasts.ErrRecordSyntax) || errors.Is(err, mtasts.ErrNoPolicy) || errors.Is(err, mtasts.ErrPolicyFetch) || errors.Is(err, mtasts.ErrPolicySyntax): + // Remote is not doing MTA-STS, continue below. ../rfc/8461:333 ../rfc/8461:574 + default: + // Interpret as temporary error, e.g. mtasts.ErrDNS, try again later. + return nil, false, fmt.Errorf("lookup up mta-sts policy: %w", err) + } + } + // Insert policy into database. If we could not fetch the policy itself, we back + // off for 5 minutes. ../rfc/8461:555 + if err == nil || errors.Is(err, mtasts.ErrNoPolicy) || errors.Is(err, mtasts.ErrPolicyFetch) || errors.Is(err, mtasts.ErrPolicySyntax) { + var recordID string + if record != nil { + recordID = record.ID + } + if err := Upsert(domain, recordID, p); err != nil { + log.Errorx("inserting policy into cache, continuing", err) + } + } + return p, true, nil + } else if err != nil && errors.Is(err, ErrBackoff) { + // ../rfc/8461:552 + // We recently failed to fetch a policy, act as if MTA-STS is not implemented. + return nil, false, nil + } else if err != nil { + return nil, false, fmt.Errorf("looking up mta-sts policy in cache: %w", err) + } + + // Policy was found in database. Check in DNS it is still fresh. + policy = &cachedPolicy.Policy + ctx, cancel := context.WithTimeout(ctx, 30*time.Second) + defer cancel() + record, _, _, err := mtasts.LookupRecord(ctx, resolver, domain) + if err != nil { + if !errors.Is(err, mtasts.ErrNoRecord) { + // Could be a temporary DNS or configuration error. + log.Errorx("checking for freshness of cached mta-sts dns txt record for domain, continuing with previously cached policy", err) + } + return policy, false, nil + } else if record.ID == cachedPolicy.RecordID { + return policy, true, nil + } + // New policy should be available. + ctx, cancel = context.WithTimeout(ctx, 30*time.Second) + defer cancel() + p, _, err := mtasts.FetchPolicy(ctx, domain) + if err != nil { + log.Errorx("fetching updated policy for domain, continuing with previously cached policy", err) + return policy, false, nil + } + if err := Upsert(domain, record.ID, p); err != nil { + log.Errorx("inserting refreshed policy into cache, continuing with fresh policy", err) + } + return p, true, nil +} diff --git a/mtastsdb/db_test.go b/mtastsdb/db_test.go new file mode 100644 index 0000000..a986e8d --- /dev/null +++ b/mtastsdb/db_test.go @@ -0,0 +1,158 @@ +package mtastsdb + +import ( + "context" + "errors" + "fmt" + "net" + "net/http" + "os" + "path/filepath" + "reflect" + "testing" + "time" + + "github.com/mjl-/mox/dns" + "github.com/mjl-/mox/mox-" + "github.com/mjl-/mox/mtasts" +) + +func tcheckf(t *testing.T, err error, format string, args ...any) { + if err != nil { + t.Fatalf("%s: %s", fmt.Sprintf(format, args...), err) + } +} + +func TestDB(t *testing.T) { + mox.ConfigStaticPath = "../testdata/mtasts/fake.conf" + mox.Conf.Static.DataDir = "." + + dbpath := mox.DataDirPath("mtasts.db") + os.MkdirAll(filepath.Dir(dbpath), 0770) + os.Remove(dbpath) + defer os.Remove(dbpath) + + if err := Init(false); err != nil { + t.Fatalf("init database: %s", err) + } + defer Close() + + ctx := context.Background() + + // Mock time. + now := time.Now().Round(0) + timeNow = func() time.Time { return now } + defer func() { timeNow = time.Now }() + + if p, err := lookup(ctx, dns.Domain{ASCII: "example.com"}); err != ErrNotFound { + t.Fatalf("expected not found, got %v, %#v", err, p) + } + + policy1 := mtasts.Policy{ + Version: "STSv1", + Mode: mtasts.ModeTesting, + MX: []mtasts.STSMX{ + {Domain: dns.Domain{ASCII: "mx1.example.com"}}, + {Domain: dns.Domain{ASCII: "mx2.example.com"}}, + {Domain: dns.Domain{ASCII: "mx.backup-example.com"}}, + }, + MaxAgeSeconds: 1296000, + } + if err := Upsert(dns.Domain{ASCII: "example.com"}, "123", &policy1); err != nil { + t.Fatalf("upsert record: %s", err) + } + if got, err := lookup(ctx, dns.Domain{ASCII: "example.com"}); err != nil { + t.Fatalf("lookup after insert: %s", err) + } else if !reflect.DeepEqual(got.Policy, policy1) { + t.Fatalf("mismatch between inserted and retrieved: got %#v, want %#v", got, policy1) + } + + policy2 := mtasts.Policy{ + Version: "STSv1", + Mode: mtasts.ModeEnforce, + MX: []mtasts.STSMX{ + {Domain: dns.Domain{ASCII: "mx1.example.com"}}, + }, + MaxAgeSeconds: 360000, + } + if err := Upsert(dns.Domain{ASCII: "example.com"}, "124", &policy2); err != nil { + t.Fatalf("upsert record: %s", err) + } + if got, err := lookup(ctx, dns.Domain{ASCII: "example.com"}); err != nil { + t.Fatalf("lookup after insert: %s", err) + } else if !reflect.DeepEqual(got.Policy, policy2) { + t.Fatalf("mismatch between inserted and retrieved: got %v, want %v", got, policy2) + } + + // Check if database holds expected record. + records, err := PolicyRecords(context.Background()) + tcheckf(t, err, "policyrecords") + expRecords := []PolicyRecord{ + {"example.com", now, now.Add(time.Duration(policy2.MaxAgeSeconds) * time.Second), now, now, false, "124", policy2}, + } + records[0].Policy = mtasts.Policy{} + expRecords[0].Policy = mtasts.Policy{} + if !reflect.DeepEqual(records, expRecords) { + t.Fatalf("records mismatch, got %#v, expected %#v", records, expRecords) + } + + if err := Upsert(dns.Domain{ASCII: "other.example.com"}, "", nil); err != nil { + t.Fatalf("upsert record: %s", err) + } + records, err = PolicyRecords(context.Background()) + tcheckf(t, err, "policyrecords") + expRecords = []PolicyRecord{ + {"other.example.com", now, now.Add(5 * 60 * time.Second), now, now, true, "", mtasts.Policy{Mode: mtasts.ModeNone, MaxAgeSeconds: 5 * 60}}, + {"example.com", now, now.Add(time.Duration(policy2.MaxAgeSeconds) * time.Second), now, now, false, "124", policy2}, + } + if !reflect.DeepEqual(records, expRecords) { + t.Fatalf("records mismatch, got %#v, expected %#v", records, expRecords) + } + + if _, err := lookup(context.Background(), dns.Domain{ASCII: "other.example.com"}); err != ErrBackoff { + t.Fatalf("got %#v, expected ErrBackoff", err) + } + + resolver := dns.MockResolver{ + TXT: map[string][]string{ + "_mta-sts.example.com.": {"v=STSv1; id=124"}, + "_mta-sts.other.example.com.": {"v=STSv1; id=1"}, + "_mta-sts.temperror.example.com.": {""}, + }, + Fail: map[dns.Mockreq]struct{}{ + {Type: "txt", Name: "_mta-sts.temperror.example.com."}: {}, + }, + } + + testGet := func(domain string, expPolicy *mtasts.Policy, expFresh bool, expErr error) { + t.Helper() + p, fresh, err := Get(context.Background(), resolver, dns.Domain{ASCII: domain}) + if (err == nil) != (expErr == nil) || err != nil && !errors.Is(err, expErr) { + t.Fatalf("got err %v, expected %v", err, expErr) + } + if !reflect.DeepEqual(p, expPolicy) || fresh != expFresh { + t.Fatalf("got policy %#v, fresh %v, expected %#v, %v", p, fresh, expPolicy, expFresh) + } + } + + testGet("example.com", &policy2, true, nil) + testGet("other.example.com", nil, false, nil) // Back off, already in database. + testGet("absent.example.com", nil, true, nil) // No MTA-STS. + testGet("temperror.example.com", nil, false, mtasts.ErrDNS) + + // Force refetch of policy, that will fail. + mtasts.HTTPClient.Transport = &http.Transport{ + Dial: func(network, addr string) (net.Conn, error) { + return nil, fmt.Errorf("bad") + }, + } + defer func() { + mtasts.HTTPClient.Transport = nil + }() + resolver.TXT["_mta-sts.example.com."] = []string{"v=STSv1; id=125"} + testGet("example.com", &policy2, false, nil) + + // Cached policy but no longer a DNS record. + delete(resolver.TXT, "_mta-sts.example.com.") + testGet("example.com", &policy2, false, nil) +} diff --git a/mtastsdb/refresh.go b/mtastsdb/refresh.go new file mode 100644 index 0000000..c86ab1f --- /dev/null +++ b/mtastsdb/refresh.go @@ -0,0 +1,176 @@ +package mtastsdb + +import ( + "context" + "errors" + "fmt" + mathrand "math/rand" + "runtime/debug" + "time" + + "github.com/mjl-/bstore" + + "github.com/mjl-/mox/dns" + "github.com/mjl-/mox/metrics" + "github.com/mjl-/mox/mlog" + "github.com/mjl-/mox/mox-" + "github.com/mjl-/mox/mtasts" +) + +func refresh() int { + interval := 24 * time.Hour + ticker := time.NewTicker(interval) + defer ticker.Stop() + + var refreshed int + + // Pro-actively refresh policies every 24 hours. ../rfc/8461:583 + for { + ticker.Reset(interval) + + ctx := context.WithValue(mox.Context, mlog.CidKey, mox.Cid()) + n, err := refresh1(ctx, dns.StrictResolver{Pkg: "mtastsdb"}, time.Sleep) + if err != nil { + xlog.WithContext(ctx).Errorx("periodic refresh of cached mtasts policies", err) + } + if n > 0 { + refreshed += n + } + + select { + case <-mox.Shutdown: + return refreshed + case <-ticker.C: + } + } +} + +// refresh policies that have not been updated in the past 12 hours and remove +// policies not used for 180 days. We start with the first domain immediately, so +// an admin can see any (configuration) issues that are logged. We spread the +// refreshes evenly over the next 3 hours, randomizing the domains, and we add some +// jitter to the timing. Each refresh is done in a new goroutine, so a single slow +// refresh doesn't mess up the timing. +func refresh1(ctx context.Context, resolver dns.Resolver, sleep func(d time.Duration)) (int, error) { + db, err := database() + if err != nil { + return 0, err + } + + now := timeNow() + qdel := bstore.QueryDB[PolicyRecord](db) + qdel.FilterLess("LastUse", now.Add(-180*24*time.Hour)) + if _, err := qdel.Delete(); err != nil { + return 0, fmt.Errorf("deleting old unused policies: %s", err) + } + + qup := bstore.QueryDB[PolicyRecord](db) + qup.FilterLess("LastUpdate", now.Add(-12*time.Hour)) + prs, err := qup.List() + if err != nil { + return 0, fmt.Errorf("querying policies to refresh: %s", err) + } + + if len(prs) == 0 { + // Nothing to do. + return 0, nil + } + + // Randomize list. + rand := mathrand.New(mathrand.NewSource(time.Now().UnixNano())) + for i := range prs { + if i == 0 { + continue + } + j := rand.Intn(i + 1) + prs[i], prs[j] = prs[j], prs[i] + } + + // Launch goroutine with the refresh. + xlog.WithContext(ctx).Debug("will refresh mta-sts policies over next 3 hours", mlog.Field("count", len(prs))) + start := timeNow() + for i, pr := range prs { + go refreshDomain(ctx, db, resolver, pr) + if i < len(prs)-1 { + interval := 3 * int64(time.Hour) / int64(len(prs)-1) + extra := time.Duration(rand.Int63n(interval) - interval/2) + next := start.Add(time.Duration(int64(i+1)*interval) + extra) + d := next.Sub(timeNow()) + if d > 0 { + sleep(d) + } + } + } + return len(prs), nil +} + +func refreshDomain(ctx context.Context, db *bstore.DB, resolver dns.Resolver, pr PolicyRecord) { + log := xlog.WithContext(ctx) + defer func() { + x := recover() + if x != nil { + // Should not happen, but make sure errors don't take down the application. + log.Error("refresh1", mlog.Field("panic", x)) + debug.PrintStack() + metrics.PanicInc("mtastsdb") + } + }() + + ctx, cancel := context.WithTimeout(ctx, time.Minute) + defer cancel() + + d, err := dns.ParseDomain(pr.Domain) + if err != nil { + log.Errorx("refreshing mta-sts policy: parsing policy domain", err, mlog.Field("domain", d)) + return + } + log.Debug("refreshing mta-sts policy for domain", mlog.Field("domain", d)) + record, _, _, err := mtasts.LookupRecord(ctx, resolver, d) + if err == nil && record.ID == pr.RecordID { + qup := bstore.QueryDB[PolicyRecord](db) + qup.FilterNonzero(PolicyRecord{Domain: pr.Domain, LastUpdate: pr.LastUpdate}) + now := timeNow() + update := PolicyRecord{ + LastUpdate: now, + ValidEnd: now.Add(time.Duration(pr.MaxAgeSeconds) * time.Second), + } + if n, err := qup.UpdateNonzero(update); err != nil { + log.Errorx("updating refreshed, unmodified policy in database", err) + } else if n != 1 { + log.Info("expected to update 1 policy after refresh", mlog.Field("count", n)) + } + return + } + // ../rfc/8461:587 + if err != nil && pr.Mode == mtasts.ModeNone { + return + } else if err != nil { + log.Errorx("looking up mta-sts record for domain", err, mlog.Field("domain", d)) + // Try to fetch new policy. It could be just DNS that is down. We don't want to let our policy expire. + } + + p, _, err := mtasts.FetchPolicy(ctx, d) + if err != nil { + if !errors.Is(err, mtasts.ErrNoPolicy) || pr.Mode != mtasts.ModeNone { + log.Errorx("refreshing mtasts policy for domain", err, mlog.Field("domain", d)) + } + return + } + now := timeNow() + update := map[string]any{ + "LastUpdate": now, + "ValidEnd": now.Add(time.Duration(p.MaxAgeSeconds) * time.Second), + "Backoff": false, + "Policy": *p, + } + if record != nil { + update["RecordID"] = record.ID + } + qup := bstore.QueryDB[PolicyRecord](db) + qup.FilterNonzero(PolicyRecord{Domain: pr.Domain, LastUpdate: pr.LastUpdate}) + if n, err := qup.UpdateFields(update); err != nil { + log.Errorx("updating refreshed, modified policy in database", err) + } else if n != 1 { + log.Info("updating refreshed, did not update 1 policy", mlog.Field("count", n)) + } +} diff --git a/mtastsdb/refresh_test.go b/mtastsdb/refresh_test.go new file mode 100644 index 0000000..6c62cd7 --- /dev/null +++ b/mtastsdb/refresh_test.go @@ -0,0 +1,231 @@ +package mtastsdb + +import ( + "context" + "crypto/ed25519" + cryptorand "crypto/rand" + "crypto/tls" + "crypto/x509" + "errors" + "fmt" + "io" + "log" + "math/big" + "net" + "net/http" + "os" + "path/filepath" + "sync" + "testing" + "time" + + "github.com/mjl-/bstore" + + "github.com/mjl-/mox/dns" + "github.com/mjl-/mox/mox-" + "github.com/mjl-/mox/mtasts" +) + +func TestRefresh(t *testing.T) { + mox.ConfigStaticPath = "../testdata/mtasts/fake.conf" + mox.Conf.Static.DataDir = "." + + dbpath := mox.DataDirPath("mtasts.db") + os.MkdirAll(filepath.Dir(dbpath), 0770) + os.Remove(dbpath) + defer os.Remove(dbpath) + + if err := Init(false); err != nil { + t.Fatalf("init database: %s", err) + } + defer Close() + + db, err := database() + if err != nil { + t.Fatalf("database: %s", err) + } + + cert := fakeCert(t, false) + defer func() { + mtasts.HTTPClient.Transport = nil + }() + + insert := func(domain string, validEnd, lastUpdate, lastUse time.Time, backoff bool, recordID string, mode mtasts.Mode, maxAge int, mx string) { + t.Helper() + + mxd, err := dns.ParseDomain(mx) + if err != nil { + t.Fatalf("parsing mx domain %q: %s", mx, err) + } + policy := mtasts.Policy{ + Version: "STSv1", + Mode: mode, + MX: []mtasts.STSMX{{Wildcard: false, Domain: mxd}}, + MaxAgeSeconds: maxAge, + Extensions: nil, + } + + pr := PolicyRecord{domain, time.Time{}, validEnd, lastUpdate, lastUse, backoff, recordID, policy} + if err := db.Insert(&pr); err != nil { + t.Fatalf("insert policy: %s", err) + } + } + + now := time.Now() + // Updated just now. + insert("mox.example", now.Add(24*time.Hour), now, now, false, "1", mtasts.ModeEnforce, 3600, "mx.mox.example.com") + // To be removed. + insert("stale.mox.example", now.Add(-time.Hour), now, now.Add(-181*24*time.Hour), false, "1", mtasts.ModeEnforce, 3600, "mx.mox.example.com") + // To be refreshed, same id. + insert("refresh.mox.example", now.Add(7*24*time.Hour), now.Add(-24*time.Hour), now.Add(-179*24*time.Hour), false, "1", mtasts.ModeEnforce, 3600, "mx.mox.example.com") + // To be refreshed and succeed. + insert("policyok.mox.example", now.Add(7*24*time.Hour), now.Add(-24*time.Hour), now.Add(-179*24*time.Hour), false, "1", mtasts.ModeEnforce, 3600, "mx.mox.example.com") + // To be refreshed and fail to fetch. + insert("policybad.mox.example", now.Add(7*24*time.Hour), now.Add(-24*time.Hour), now.Add(-179*24*time.Hour), false, "1", mtasts.ModeEnforce, 3600, "mx.mox.example.com") + + resolver := dns.MockResolver{ + TXT: map[string][]string{ + "_mta-sts.refresh.mox.example.": {"v=STSv1; id=1"}, + "_mta-sts.policyok.mox.example.": {"v=STSv1; id=2"}, + "_mta-sts.policybad.mox.example.": {"v=STSv1; id=2"}, + }, + } + + pool := x509.NewCertPool() + pool.AddCert(cert.Leaf) + + l := newPipeListener() + defer l.Close() + go func() { + mux := &http.ServeMux{} + mux.HandleFunc("/.well-known/mta-sts.txt", func(w http.ResponseWriter, r *http.Request) { + if r.Host == "mta-sts.policybad.mox.example" { + w.WriteHeader(500) + return + } + fmt.Fprintf(w, "version: STSv1\nmode: enforce\nmx: mx.mox.example.com\nmax_age: 3600\n") + }) + s := &http.Server{ + Handler: mux, + TLSConfig: &tls.Config{ + Certificates: []tls.Certificate{cert}, + }, + ErrorLog: log.New(io.Discard, "", 0), + } + s.ServeTLS(l, "", "") + }() + + mtasts.HTTPClient.Transport = &http.Transport{ + Dial: func(network, addr string) (net.Conn, error) { + return l.Dial() + }, + TLSClientConfig: &tls.Config{ + RootCAs: pool, + }, + } + + slept := 0 + sleep := func(d time.Duration) { + slept++ + interval := 3 * time.Hour / 2 + if d < time.Duration(slept)*interval-interval/2 || d > time.Duration(slept)*interval+interval/2 { + t.Fatalf("bad sleep duration %v", d) + } + } + if n, err := refresh1(context.Background(), resolver, sleep); err != nil || n != 3 { + t.Fatalf("refresh1: err %s, n %d, expected no error, 3", err, n) + } + if slept != 2 { + t.Fatalf("bad sleeps, %d instead of 2", slept) + } + time.Sleep(time.Second / 10) // Give goroutine time to write result, before we cleanup the database. + + // Should not do any more refreshes and return immediately. + q := bstore.QueryDB[PolicyRecord](db) + q.FilterNonzero(PolicyRecord{Domain: "policybad.mox.example"}) + if _, err := q.Delete(); err != nil { + t.Fatalf("delete record that would be refreshed: %v", err) + } + mox.Context = context.Background() + mox.Shutdown = make(chan struct{}) + close(mox.Shutdown) + n := refresh() + if n != 0 { + t.Fatalf("refresh found unexpected work, n %d", n) + } + mox.Shutdown = make(chan struct{}) +} + +type pipeListener struct { + sync.Mutex + closed bool + C chan net.Conn +} + +var _ net.Listener = &pipeListener{} + +func newPipeListener() *pipeListener { return &pipeListener{C: make(chan net.Conn)} } +func (l *pipeListener) Dial() (net.Conn, error) { + l.Lock() + defer l.Unlock() + if l.closed { + return nil, errors.New("closed") + } + c, s := net.Pipe() + l.C <- s + return c, nil +} +func (l *pipeListener) Accept() (net.Conn, error) { + conn := <-l.C + if conn == nil { + return nil, io.EOF + } + return conn, nil +} +func (l *pipeListener) Close() error { + l.Lock() + defer l.Unlock() + if !l.closed { + l.closed = true + close(l.C) + } + return nil +} +func (l *pipeListener) Addr() net.Addr { return pipeAddr{} } + +type pipeAddr struct{} + +func (a pipeAddr) Network() string { return "pipe" } +func (a pipeAddr) String() string { return "pipe" } + +func fakeCert(t *testing.T, expired bool) tls.Certificate { + notAfter := time.Now() + if expired { + notAfter = notAfter.Add(-time.Hour) + } else { + notAfter = notAfter.Add(time.Hour) + } + + privKey := ed25519.NewKeyFromSeed(make([]byte, ed25519.SeedSize)) // Fake key, don't use this for real! + + template := &x509.Certificate{ + SerialNumber: big.NewInt(1), // Required field... + DNSNames: []string{"mta-sts.policybad.mox.example", "mta-sts.policyok.mox.example"}, + NotBefore: time.Now().Add(-time.Hour), + NotAfter: notAfter, + } + localCertBuf, err := x509.CreateCertificate(cryptorand.Reader, template, template, privKey.Public(), privKey) + if err != nil { + t.Fatalf("making certificate: %s", err) + } + cert, err := x509.ParseCertificate(localCertBuf) + if err != nil { + t.Fatalf("parsing generated certificate: %s", err) + } + c := tls.Certificate{ + Certificate: [][]byte{localCertBuf}, + PrivateKey: privKey, + Leaf: cert, + } + return c +} diff --git a/publicsuffix/list.go b/publicsuffix/list.go new file mode 100644 index 0000000..850bd5b --- /dev/null +++ b/publicsuffix/list.go @@ -0,0 +1,189 @@ +// Package publicsuffix implements a public suffix list to look up the +// organizational domain for a given host name. Organizational domains can be +// registered, one level below a top-level domain. +// +// Example.com has a public suffix ".com", and example.co.uk has a public +// suffix ".co.uk". The organizational domain of sub.example.com is +// example.com, and the organization domain of sub.example.co.uk is +// example.co.uk. +package publicsuffix + +import ( + "bufio" + "bytes" + "context" + "fmt" + "io" + "strings" + + _ "embed" + + "golang.org/x/net/idna" + + "github.com/mjl-/mox/dns" + "github.com/mjl-/mox/mlog" +) + +var xlog = mlog.New("publicsuffix") + +// todo: automatically fetch new lists periodically? compare it with the old one. refuse it if it changed too much, especially if it contains far fewer entries than before. + +// Labels map from utf8 labels to labels for subdomains. +// The end is marked with an empty string as label. +type labels map[string]labels + +// List is a public suffix list. +type List struct { + includes, excludes labels +} + +var publicsuffixList List + +//go:embed public_suffix_list.txt +var publicsuffixData []byte + +func init() { + l, err := ParseList(bytes.NewReader(publicsuffixData)) + if err != nil { + xlog.Fatalx("parsing public suffix list", err) + } + publicsuffixList = l +} + +// ParseList parses a public suffix list. +// Only the "ICANN DOMAINS" are used. +func ParseList(r io.Reader) (List, error) { + list := List{labels{}, labels{}} + br := bufio.NewReader(r) + + // Only use ICANN domains. ../rfc/7489-eid6729 + var icannDomains bool + for { + line, err := br.ReadString('\n') + if line != "" { + line = strings.TrimSpace(line) + if strings.HasPrefix(line, "// ===BEGIN ICANN DOMAINS===") { + icannDomains = true + continue + } else if strings.HasPrefix(line, "// ===END ICANN DOMAINS===") { + icannDomains = false + continue + } else if line == "" || strings.HasPrefix(line, "//") || !icannDomains { + continue + } + l := list.includes + var t []string + oline := line + if strings.HasPrefix(line, "!") { + line = line[1:] + l = list.excludes + t = strings.Split(line, ".") + if len(t) == 1 { + xlog.Print("exclude rule with single label, skipping", mlog.Field("line", oline)) + continue + } + } else { + t = strings.Split(line, ".") + } + for i := len(t) - 1; i >= 0; i-- { + w := t[i] + if w == "" { + xlog.Print("empty label in rule, skipping", mlog.Field("line", oline)) + break + } + if w != "" && w != "*" { + w, err = idna.Lookup.ToUnicode(w) + if err != nil { + xlog.Printx("invalid label, skipping", err, mlog.Field("line", oline)) + } + } + m, ok := l[w] + if ok { + if _, dup := m[""]; i == 0 && dup { + xlog.Print("duplicate rule", mlog.Field("line", oline)) + } + l = m + } else { + m = labels{} + l[w] = m + l = m + } + } + l[""] = nil // Mark end. + } + if err == io.EOF { + break + } + if err != nil { + return List{}, fmt.Errorf("reading public suffix list: %w", err) + } + } + return list, nil +} + +// Lookup calls Lookup on the builtin public suffix list, from +// https://publicsuffix.org/list/. +func Lookup(ctx context.Context, domain dns.Domain) (orgDomain dns.Domain) { + return publicsuffixList.Lookup(ctx, domain) +} + +// Lookup returns the organizational domain. If domain is an organizational +// domain, or higher-level, the same domain is returned. +func (l List) Lookup(ctx context.Context, domain dns.Domain) (orgDomain dns.Domain) { + log := xlog.WithContext(ctx) + defer func() { + log.Debug("publicsuffix lookup result", mlog.Field("reqdom", domain), mlog.Field("orgdom", orgDomain)) + }() + + t := strings.Split(domain.Name(), ".") + + var n int + if nexcl, ok := match(l.excludes, t); ok { + n = nexcl + } else if nincl, ok := match(l.includes, t); ok { + n = nincl + 1 + } else { + n = 2 + } + if len(t) < n { + return domain + } + name := strings.Join(t[len(t)-n:], ".") + if isASCII(name) { + return dns.Domain{ASCII: name} + } + t = strings.Split(domain.ASCII, ".") + ascii := strings.Join(t[len(t)-n:], ".") + return dns.Domain{ASCII: ascii, Unicode: name} +} + +func isASCII(s string) bool { + for _, c := range s { + if c >= 0x80 { + return false + } + } + return true +} + +func match(l labels, t []string) (int, bool) { + if len(t) == 0 { + _, ok := l[""] + return 0, ok + } + s := t[len(t)-1] + t = t[:len(t)-1] + n := 0 + if m, mok := l[s]; mok { + if nn, sok := match(m, t); sok { + n = 1 + nn + } + } + if m, mok := l["*"]; mok { + if nn, sok := match(m, t); sok && nn >= n { + n = 1 + nn + } + } + _, mok := l[""] + return n, n > 0 || mok +} diff --git a/publicsuffix/list_test.go b/publicsuffix/list_test.go new file mode 100644 index 0000000..fa5db72 --- /dev/null +++ b/publicsuffix/list_test.go @@ -0,0 +1,79 @@ +package publicsuffix + +import ( + "bytes" + "context" + "strings" + "testing" + + "github.com/mjl-/mox/dns" +) + +func TestList(t *testing.T) { + const data = ` +// ===BEGIN ICANN DOMAINS=== +com + +*.jp +// Hosts in .hokkaido.jp can't set cookies below level 4... +*.hokkaido.jp +*.tokyo.jp +// ...except hosts in pref.hokkaido.jp, which can set cookies at level 3. +!pref.hokkaido.jp +!metro.tokyo.jp + +bücher.example.com +// ===END ICANN DOMAINS=== + +ignored.example.com +` + l, err := ParseList(strings.NewReader(data)) + if err != nil { + t.Fatalf("parsing list: %s", err) + } + + test := func(domain, orgDomain string) { + t.Helper() + + d, err := dns.ParseDomain(domain) + if err != nil { + t.Fatalf("idna to unicode %q: %s", domain, err) + } + od, err := dns.ParseDomain(orgDomain) + if err != nil { + t.Fatalf("idna to unicode org domain %q: %s", orgDomain, err) + } + + r := l.Lookup(context.Background(), d) + if r != od { + t.Fatalf("got %q, expected %q, for domain %q", r, orgDomain, domain) + } + } + + test("com", "com") + test("foo.com", "foo.com") + test("bar.foo.com", "foo.com") + test("foo.bar.jp", "foo.bar.jp") + test("baz.foo.bar.jp", "foo.bar.jp") + test("bar.jp", "bar.jp") + test("foo.bar.hokkaido.jp", "foo.bar.hokkaido.jp") + test("baz.foo.bar.hokkaido.jp", "foo.bar.hokkaido.jp") + test("bar.hokkaido.jp", "bar.hokkaido.jp") + test("pref.hokkaido.jp", "pref.hokkaido.jp") + test("foo.pref.hokkaido.jp", "pref.hokkaido.jp") + test("WwW.EXAMPLE.Com", "example.com") + test("bücher.example.com", "bücher.example.com") + test("foo.bücher.example.com", "foo.bücher.example.com") + test("bar.foo.bücher.example.com", "foo.bücher.example.com") + test("xn--bcher-kva.example.com", "bücher.example.com") + test("foo.xn--bcher-kva.example.com", "foo.bücher.example.com") + test("bar.foo.xn--bcher-kva.example.com", "foo.bücher.example.com") + test("x.ignored.example.com", "example.com") + + l, err = ParseList(bytes.NewReader(publicsuffixData)) + if err != nil { + t.Fatalf("parsing public suffix list: %s", err) + } + + // todo: add testcases from https://raw.githubusercontent.com/publicsuffix/list/master/tests/test_psl.txt +} diff --git a/publicsuffix/public_suffix_list.txt b/publicsuffix/public_suffix_list.txt new file mode 100644 index 0000000..5467214 --- /dev/null +++ b/publicsuffix/public_suffix_list.txt @@ -0,0 +1,13825 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +// Please pull this list from, and only from https://publicsuffix.org/list/public_suffix_list.dat, +// rather than any other VCS sites. Pulling from any other URL is not guaranteed to be supported. + +// Instructions on pulling and using this list can be found at https://publicsuffix.org/list/. + +// ===BEGIN ICANN DOMAINS=== + +// ac : https://en.wikipedia.org/wiki/.ac +ac +com.ac +edu.ac +gov.ac +net.ac +mil.ac +org.ac + +// ad : https://en.wikipedia.org/wiki/.ad +ad +nom.ad + +// ae : https://en.wikipedia.org/wiki/.ae +// see also: "Domain Name Eligibility Policy" at http://www.aeda.ae/eng/aepolicy.php +ae +co.ae +net.ae +org.ae +sch.ae +ac.ae +gov.ae +mil.ae + +// aero : see https://www.information.aero/index.php?id=66 +aero +accident-investigation.aero +accident-prevention.aero +aerobatic.aero +aeroclub.aero +aerodrome.aero +agents.aero +aircraft.aero +airline.aero +airport.aero +air-surveillance.aero +airtraffic.aero +air-traffic-control.aero +ambulance.aero +amusement.aero +association.aero +author.aero +ballooning.aero +broker.aero +caa.aero +cargo.aero +catering.aero +certification.aero +championship.aero +charter.aero +civilaviation.aero +club.aero +conference.aero +consultant.aero +consulting.aero +control.aero +council.aero +crew.aero +design.aero +dgca.aero +educator.aero +emergency.aero +engine.aero +engineer.aero +entertainment.aero +equipment.aero +exchange.aero +express.aero +federation.aero +flight.aero +fuel.aero +gliding.aero +government.aero +groundhandling.aero +group.aero +hanggliding.aero +homebuilt.aero +insurance.aero +journal.aero +journalist.aero +leasing.aero +logistics.aero +magazine.aero +maintenance.aero +media.aero +microlight.aero +modelling.aero +navigation.aero +parachuting.aero +paragliding.aero +passenger-association.aero +pilot.aero +press.aero +production.aero +recreation.aero +repbody.aero +res.aero +research.aero +rotorcraft.aero +safety.aero +scientist.aero +services.aero +show.aero +skydiving.aero +software.aero +student.aero +trader.aero +trading.aero +trainer.aero +union.aero +workinggroup.aero +works.aero + +// af : http://www.nic.af/help.jsp +af +gov.af +com.af +org.af +net.af +edu.af + +// ag : http://www.nic.ag/prices.htm +ag +com.ag +org.ag +net.ag +co.ag +nom.ag + +// ai : http://nic.com.ai/ +ai +off.ai +com.ai +net.ai +org.ai + +// al : http://www.ert.gov.al/ert_alb/faq_det.html?Id=31 +al +com.al +edu.al +gov.al +mil.al +net.al +org.al + +// am : https://www.amnic.net/policy/en/Policy_EN.pdf +am +co.am +com.am +commune.am +net.am +org.am + +// ao : https://en.wikipedia.org/wiki/.ao +// http://www.dns.ao/REGISTR.DOC +ao +ed.ao +gv.ao +og.ao +co.ao +pb.ao +it.ao + +// aq : https://en.wikipedia.org/wiki/.aq +aq + +// ar : https://nic.ar/es/nic-argentina/normativa +ar +bet.ar +com.ar +coop.ar +edu.ar +gob.ar +gov.ar +int.ar +mil.ar +musica.ar +mutual.ar +net.ar +org.ar +senasa.ar +tur.ar + +// arpa : https://en.wikipedia.org/wiki/.arpa +// Confirmed by registry 2008-06-18 +arpa +e164.arpa +in-addr.arpa +ip6.arpa +iris.arpa +uri.arpa +urn.arpa + +// as : https://en.wikipedia.org/wiki/.as +as +gov.as + +// asia : https://en.wikipedia.org/wiki/.asia +asia + +// at : https://en.wikipedia.org/wiki/.at +// Confirmed by registry 2008-06-17 +at +ac.at +co.at +gv.at +or.at +sth.ac.at + +// au : https://en.wikipedia.org/wiki/.au +// http://www.auda.org.au/ +au +// 2LDs +com.au +net.au +org.au +edu.au +gov.au +asn.au +id.au +// Historic 2LDs (closed to new registration, but sites still exist) +info.au +conf.au +oz.au +// CGDNs - http://www.cgdn.org.au/ +act.au +nsw.au +nt.au +qld.au +sa.au +tas.au +vic.au +wa.au +// 3LDs +act.edu.au +catholic.edu.au +// eq.edu.au - Removed at the request of the Queensland Department of Education +nsw.edu.au +nt.edu.au +qld.edu.au +sa.edu.au +tas.edu.au +vic.edu.au +wa.edu.au +// act.gov.au Bug 984824 - Removed at request of Greg Tankard +// nsw.gov.au Bug 547985 - Removed at request of +// nt.gov.au Bug 940478 - Removed at request of Greg Connors +qld.gov.au +sa.gov.au +tas.gov.au +vic.gov.au +wa.gov.au +// 4LDs +// education.tas.edu.au - Removed at the request of the Department of Education Tasmania +schools.nsw.edu.au + +// aw : https://en.wikipedia.org/wiki/.aw +aw +com.aw + +// ax : https://en.wikipedia.org/wiki/.ax +ax + +// az : https://en.wikipedia.org/wiki/.az +az +com.az +net.az +int.az +gov.az +org.az +edu.az +info.az +pp.az +mil.az +name.az +pro.az +biz.az + +// ba : http://nic.ba/users_data/files/pravilnik_o_registraciji.pdf +ba +com.ba +edu.ba +gov.ba +mil.ba +net.ba +org.ba + +// bb : https://en.wikipedia.org/wiki/.bb +bb +biz.bb +co.bb +com.bb +edu.bb +gov.bb +info.bb +net.bb +org.bb +store.bb +tv.bb + +// bd : https://en.wikipedia.org/wiki/.bd +*.bd + +// be : https://en.wikipedia.org/wiki/.be +// Confirmed by registry 2008-06-08 +be +ac.be + +// bf : https://en.wikipedia.org/wiki/.bf +bf +gov.bf + +// bg : https://en.wikipedia.org/wiki/.bg +// https://www.register.bg/user/static/rules/en/index.html +bg +a.bg +b.bg +c.bg +d.bg +e.bg +f.bg +g.bg +h.bg +i.bg +j.bg +k.bg +l.bg +m.bg +n.bg +o.bg +p.bg +q.bg +r.bg +s.bg +t.bg +u.bg +v.bg +w.bg +x.bg +y.bg +z.bg +0.bg +1.bg +2.bg +3.bg +4.bg +5.bg +6.bg +7.bg +8.bg +9.bg + +// bh : https://en.wikipedia.org/wiki/.bh +bh +com.bh +edu.bh +net.bh +org.bh +gov.bh + +// bi : https://en.wikipedia.org/wiki/.bi +// http://whois.nic.bi/ +bi +co.bi +com.bi +edu.bi +or.bi +org.bi + +// biz : https://en.wikipedia.org/wiki/.biz +biz + +// bj : https://en.wikipedia.org/wiki/.bj +bj +asso.bj +barreau.bj +gouv.bj + +// bm : http://www.bermudanic.bm/dnr-text.txt +bm +com.bm +edu.bm +gov.bm +net.bm +org.bm + +// bn : http://www.bnnic.bn/faqs +bn +com.bn +edu.bn +gov.bn +net.bn +org.bn + +// bo : https://nic.bo/delegacion2015.php#h-1.10 +bo +com.bo +edu.bo +gob.bo +int.bo +org.bo +net.bo +mil.bo +tv.bo +web.bo +// Social Domains +academia.bo +agro.bo +arte.bo +blog.bo +bolivia.bo +ciencia.bo +cooperativa.bo +democracia.bo +deporte.bo +ecologia.bo +economia.bo +empresa.bo +indigena.bo +industria.bo +info.bo +medicina.bo +movimiento.bo +musica.bo +natural.bo +nombre.bo +noticias.bo +patria.bo +politica.bo +profesional.bo +plurinacional.bo +pueblo.bo +revista.bo +salud.bo +tecnologia.bo +tksat.bo +transporte.bo +wiki.bo + +// br : http://registro.br/dominio/categoria.html +// Submitted by registry +br +9guacu.br +abc.br +adm.br +adv.br +agr.br +aju.br +am.br +anani.br +aparecida.br +app.br +arq.br +art.br +ato.br +b.br +barueri.br +belem.br +bhz.br +bib.br +bio.br +blog.br +bmd.br +boavista.br +bsb.br +campinagrande.br +campinas.br +caxias.br +cim.br +cng.br +cnt.br +com.br +contagem.br +coop.br +coz.br +cri.br +cuiaba.br +curitiba.br +def.br +des.br +det.br +dev.br +ecn.br +eco.br +edu.br +emp.br +enf.br +eng.br +esp.br +etc.br +eti.br +far.br +feira.br +flog.br +floripa.br +fm.br +fnd.br +fortal.br +fot.br +foz.br +fst.br +g12.br +geo.br +ggf.br +goiania.br +gov.br +// gov.br 26 states + df https://en.wikipedia.org/wiki/States_of_Brazil +ac.gov.br +al.gov.br +am.gov.br +ap.gov.br +ba.gov.br +ce.gov.br +df.gov.br +es.gov.br +go.gov.br +ma.gov.br +mg.gov.br +ms.gov.br +mt.gov.br +pa.gov.br +pb.gov.br +pe.gov.br +pi.gov.br +pr.gov.br +rj.gov.br +rn.gov.br +ro.gov.br +rr.gov.br +rs.gov.br +sc.gov.br +se.gov.br +sp.gov.br +to.gov.br +gru.br +imb.br +ind.br +inf.br +jab.br +jampa.br +jdf.br +joinville.br +jor.br +jus.br +leg.br +lel.br +log.br +londrina.br +macapa.br +maceio.br +manaus.br +maringa.br +mat.br +med.br +mil.br +morena.br +mp.br +mus.br +natal.br +net.br +niteroi.br +*.nom.br +not.br +ntr.br +odo.br +ong.br +org.br +osasco.br +palmas.br +poa.br +ppg.br +pro.br +psc.br +psi.br +pvh.br +qsl.br +radio.br +rec.br +recife.br +rep.br +ribeirao.br +rio.br +riobranco.br +riopreto.br +salvador.br +sampa.br +santamaria.br +santoandre.br +saobernardo.br +saogonca.br +seg.br +sjc.br +slg.br +slz.br +sorocaba.br +srv.br +taxi.br +tc.br +tec.br +teo.br +the.br +tmp.br +trd.br +tur.br +tv.br +udi.br +vet.br +vix.br +vlog.br +wiki.br +zlg.br + +// bs : http://www.nic.bs/rules.html +bs +com.bs +net.bs +org.bs +edu.bs +gov.bs + +// bt : https://en.wikipedia.org/wiki/.bt +bt +com.bt +edu.bt +gov.bt +net.bt +org.bt + +// bv : No registrations at this time. +// Submitted by registry +bv + +// bw : https://en.wikipedia.org/wiki/.bw +// http://www.gobin.info/domainname/bw.doc +// list of other 2nd level tlds ? +bw +co.bw +org.bw + +// by : https://en.wikipedia.org/wiki/.by +// http://tld.by/rules_2006_en.html +// list of other 2nd level tlds ? +by +gov.by +mil.by +// Official information does not indicate that com.by is a reserved +// second-level domain, but it's being used as one (see www.google.com.by and +// www.yahoo.com.by, for example), so we list it here for safety's sake. +com.by + +// http://hoster.by/ +of.by + +// bz : https://en.wikipedia.org/wiki/.bz +// http://www.belizenic.bz/ +bz +com.bz +net.bz +org.bz +edu.bz +gov.bz + +// ca : https://en.wikipedia.org/wiki/.ca +ca +// ca geographical names +ab.ca +bc.ca +mb.ca +nb.ca +nf.ca +nl.ca +ns.ca +nt.ca +nu.ca +on.ca +pe.ca +qc.ca +sk.ca +yk.ca +// gc.ca: https://en.wikipedia.org/wiki/.gc.ca +// see also: http://registry.gc.ca/en/SubdomainFAQ +gc.ca + +// cat : https://en.wikipedia.org/wiki/.cat +cat + +// cc : https://en.wikipedia.org/wiki/.cc +cc + +// cd : https://en.wikipedia.org/wiki/.cd +// see also: https://www.nic.cd/domain/insertDomain_2.jsp?act=1 +cd +gov.cd + +// cf : https://en.wikipedia.org/wiki/.cf +cf + +// cg : https://en.wikipedia.org/wiki/.cg +cg + +// ch : https://en.wikipedia.org/wiki/.ch +ch + +// ci : https://en.wikipedia.org/wiki/.ci +// http://www.nic.ci/index.php?page=charte +ci +org.ci +or.ci +com.ci +co.ci +edu.ci +ed.ci +ac.ci +net.ci +go.ci +asso.ci +aéroport.ci +int.ci +presse.ci +md.ci +gouv.ci + +// ck : https://en.wikipedia.org/wiki/.ck +*.ck +!www.ck + +// cl : https://www.nic.cl +// Confirmed by .CL registry +cl +co.cl +gob.cl +gov.cl +mil.cl + +// cm : https://en.wikipedia.org/wiki/.cm plus bug 981927 +cm +co.cm +com.cm +gov.cm +net.cm + +// cn : https://en.wikipedia.org/wiki/.cn +// Submitted by registry +cn +ac.cn +com.cn +edu.cn +gov.cn +net.cn +org.cn +mil.cn +公司.cn +网络.cn +網絡.cn +// cn geographic names +ah.cn +bj.cn +cq.cn +fj.cn +gd.cn +gs.cn +gz.cn +gx.cn +ha.cn +hb.cn +he.cn +hi.cn +hl.cn +hn.cn +jl.cn +js.cn +jx.cn +ln.cn +nm.cn +nx.cn +qh.cn +sc.cn +sd.cn +sh.cn +sn.cn +sx.cn +tj.cn +xj.cn +xz.cn +yn.cn +zj.cn +hk.cn +mo.cn +tw.cn + +// co : https://en.wikipedia.org/wiki/.co +// Submitted by registry +co +arts.co +com.co +edu.co +firm.co +gov.co +info.co +int.co +mil.co +net.co +nom.co +org.co +rec.co +web.co + +// com : https://en.wikipedia.org/wiki/.com +com + +// coop : https://en.wikipedia.org/wiki/.coop +coop + +// cr : http://www.nic.cr/niccr_publico/showRegistroDominiosScreen.do +cr +ac.cr +co.cr +ed.cr +fi.cr +go.cr +or.cr +sa.cr + +// cu : https://en.wikipedia.org/wiki/.cu +cu +com.cu +edu.cu +org.cu +net.cu +gov.cu +inf.cu + +// cv : https://en.wikipedia.org/wiki/.cv +// cv : http://www.dns.cv/tldcv_portal/do?com=DS;5446457100;111;+PAGE(4000018)+K-CAT-CODIGO(RDOM)+RCNT(100); <- registration rules +cv +com.cv +edu.cv +int.cv +nome.cv +org.cv + +// cw : http://www.una.cw/cw_registry/ +// Confirmed by registry 2013-03-26 +cw +com.cw +edu.cw +net.cw +org.cw + +// cx : https://en.wikipedia.org/wiki/.cx +// list of other 2nd level tlds ? +cx +gov.cx + +// cy : http://www.nic.cy/ +// Submitted by registry Panayiotou Fotia +cy +ac.cy +biz.cy +com.cy +ekloges.cy +gov.cy +ltd.cy +name.cy +net.cy +org.cy +parliament.cy +press.cy +pro.cy +tm.cy + +// cz : https://en.wikipedia.org/wiki/.cz +cz + +// de : https://en.wikipedia.org/wiki/.de +// Confirmed by registry (with technical +// reservations) 2008-07-01 +de + +// dj : https://en.wikipedia.org/wiki/.dj +dj + +// dk : https://en.wikipedia.org/wiki/.dk +// Confirmed by registry 2008-06-17 +dk + +// dm : https://en.wikipedia.org/wiki/.dm +dm +com.dm +net.dm +org.dm +edu.dm +gov.dm + +// do : https://en.wikipedia.org/wiki/.do +do +art.do +com.do +edu.do +gob.do +gov.do +mil.do +net.do +org.do +sld.do +web.do + +// dz : http://www.nic.dz/images/pdf_nic/charte.pdf +dz +art.dz +asso.dz +com.dz +edu.dz +gov.dz +org.dz +net.dz +pol.dz +soc.dz +tm.dz + +// ec : http://www.nic.ec/reg/paso1.asp +// Submitted by registry +ec +com.ec +info.ec +net.ec +fin.ec +k12.ec +med.ec +pro.ec +org.ec +edu.ec +gov.ec +gob.ec +mil.ec + +// edu : https://en.wikipedia.org/wiki/.edu +edu + +// ee : http://www.eenet.ee/EENet/dom_reeglid.html#lisa_B +ee +edu.ee +gov.ee +riik.ee +lib.ee +med.ee +com.ee +pri.ee +aip.ee +org.ee +fie.ee + +// eg : https://en.wikipedia.org/wiki/.eg +eg +com.eg +edu.eg +eun.eg +gov.eg +mil.eg +name.eg +net.eg +org.eg +sci.eg + +// er : https://en.wikipedia.org/wiki/.er +*.er + +// es : https://www.nic.es/site_ingles/ingles/dominios/index.html +es +com.es +nom.es +org.es +gob.es +edu.es + +// et : https://en.wikipedia.org/wiki/.et +et +com.et +gov.et +org.et +edu.et +biz.et +name.et +info.et +net.et + +// eu : https://en.wikipedia.org/wiki/.eu +eu + +// fi : https://en.wikipedia.org/wiki/.fi +fi +// aland.fi : https://en.wikipedia.org/wiki/.ax +// This domain is being phased out in favor of .ax. As there are still many +// domains under aland.fi, we still keep it on the list until aland.fi is +// completely removed. +// TODO: Check for updates (expected to be phased out around Q1/2009) +aland.fi + +// fj : http://domains.fj/ +// Submitted by registry 2020-02-11 +fj +ac.fj +biz.fj +com.fj +gov.fj +info.fj +mil.fj +name.fj +net.fj +org.fj +pro.fj + +// fk : https://en.wikipedia.org/wiki/.fk +*.fk + +// fm : https://en.wikipedia.org/wiki/.fm +com.fm +edu.fm +net.fm +org.fm +fm + +// fo : https://en.wikipedia.org/wiki/.fo +fo + +// fr : http://www.afnic.fr/ +// domaines descriptifs : https://www.afnic.fr/medias/documents/Cadre_legal/Afnic_Naming_Policy_12122016_VEN.pdf +fr +asso.fr +com.fr +gouv.fr +nom.fr +prd.fr +tm.fr +// domaines sectoriels : https://www.afnic.fr/en/products-and-services/the-fr-tld/sector-based-fr-domains-4.html +aeroport.fr +avocat.fr +avoues.fr +cci.fr +chambagri.fr +chirurgiens-dentistes.fr +experts-comptables.fr +geometre-expert.fr +greta.fr +huissier-justice.fr +medecin.fr +notaires.fr +pharmacien.fr +port.fr +veterinaire.fr + +// ga : https://en.wikipedia.org/wiki/.ga +ga + +// gb : This registry is effectively dormant +// Submitted by registry +gb + +// gd : https://en.wikipedia.org/wiki/.gd +edu.gd +gov.gd +gd + +// ge : http://www.nic.net.ge/policy_en.pdf +ge +com.ge +edu.ge +gov.ge +org.ge +mil.ge +net.ge +pvt.ge + +// gf : https://en.wikipedia.org/wiki/.gf +gf + +// gg : http://www.channelisles.net/register-domains/ +// Confirmed by registry 2013-11-28 +gg +co.gg +net.gg +org.gg + +// gh : https://en.wikipedia.org/wiki/.gh +// see also: http://www.nic.gh/reg_now.php +// Although domains directly at second level are not possible at the moment, +// they have been possible for some time and may come back. +gh +com.gh +edu.gh +gov.gh +org.gh +mil.gh + +// gi : http://www.nic.gi/rules.html +gi +com.gi +ltd.gi +gov.gi +mod.gi +edu.gi +org.gi + +// gl : https://en.wikipedia.org/wiki/.gl +// http://nic.gl +gl +co.gl +com.gl +edu.gl +net.gl +org.gl + +// gm : http://www.nic.gm/htmlpages%5Cgm-policy.htm +gm + +// gn : http://psg.com/dns/gn/gn.txt +// Submitted by registry +gn +ac.gn +com.gn +edu.gn +gov.gn +org.gn +net.gn + +// gov : https://en.wikipedia.org/wiki/.gov +gov + +// gp : http://www.nic.gp/index.php?lang=en +gp +com.gp +net.gp +mobi.gp +edu.gp +org.gp +asso.gp + +// gq : https://en.wikipedia.org/wiki/.gq +gq + +// gr : https://grweb.ics.forth.gr/english/1617-B-2005.html +// Submitted by registry +gr +com.gr +edu.gr +net.gr +org.gr +gov.gr + +// gs : https://en.wikipedia.org/wiki/.gs +gs + +// gt : https://www.gt/sitio/registration_policy.php?lang=en +gt +com.gt +edu.gt +gob.gt +ind.gt +mil.gt +net.gt +org.gt + +// gu : http://gadao.gov.gu/register.html +// University of Guam : https://www.uog.edu +// Submitted by uognoc@triton.uog.edu +gu +com.gu +edu.gu +gov.gu +guam.gu +info.gu +net.gu +org.gu +web.gu + +// gw : https://en.wikipedia.org/wiki/.gw +// gw : https://nic.gw/regras/ +gw + +// gy : https://en.wikipedia.org/wiki/.gy +// http://registry.gy/ +gy +co.gy +com.gy +edu.gy +gov.gy +net.gy +org.gy + +// hk : https://www.hkirc.hk +// Submitted by registry +hk +com.hk +edu.hk +gov.hk +idv.hk +net.hk +org.hk +公司.hk +教育.hk +敎育.hk +政府.hk +個人.hk +个人.hk +箇人.hk +網络.hk +网络.hk +组織.hk +網絡.hk +网絡.hk +组织.hk +組織.hk +組织.hk + +// hm : https://en.wikipedia.org/wiki/.hm +hm + +// hn : http://www.nic.hn/politicas/ps02,,05.html +hn +com.hn +edu.hn +org.hn +net.hn +mil.hn +gob.hn + +// hr : http://www.dns.hr/documents/pdf/HRTLD-regulations.pdf +hr +iz.hr +from.hr +name.hr +com.hr + +// ht : http://www.nic.ht/info/charte.cfm +ht +com.ht +shop.ht +firm.ht +info.ht +adult.ht +net.ht +pro.ht +org.ht +med.ht +art.ht +coop.ht +pol.ht +asso.ht +edu.ht +rel.ht +gouv.ht +perso.ht + +// hu : http://www.domain.hu/domain/English/sld.html +// Confirmed by registry 2008-06-12 +hu +co.hu +info.hu +org.hu +priv.hu +sport.hu +tm.hu +2000.hu +agrar.hu +bolt.hu +casino.hu +city.hu +erotica.hu +erotika.hu +film.hu +forum.hu +games.hu +hotel.hu +ingatlan.hu +jogasz.hu +konyvelo.hu +lakas.hu +media.hu +news.hu +reklam.hu +sex.hu +shop.hu +suli.hu +szex.hu +tozsde.hu +utazas.hu +video.hu + +// id : https://pandi.id/en/domain/registration-requirements/ +id +ac.id +biz.id +co.id +desa.id +go.id +mil.id +my.id +net.id +or.id +ponpes.id +sch.id +web.id + +// ie : https://en.wikipedia.org/wiki/.ie +ie +gov.ie + +// il : http://www.isoc.org.il/domains/ +il +ac.il +co.il +gov.il +idf.il +k12.il +muni.il +net.il +org.il + +// im : https://www.nic.im/ +// Submitted by registry +im +ac.im +co.im +com.im +ltd.co.im +net.im +org.im +plc.co.im +tt.im +tv.im + +// in : https://en.wikipedia.org/wiki/.in +// see also: https://registry.in/Policies +// Please note, that nic.in is not an official eTLD, but used by most +// government institutions. +in +co.in +firm.in +net.in +org.in +gen.in +ind.in +nic.in +ac.in +edu.in +res.in +gov.in +mil.in + +// info : https://en.wikipedia.org/wiki/.info +info + +// int : https://en.wikipedia.org/wiki/.int +// Confirmed by registry 2008-06-18 +int +eu.int + +// io : http://www.nic.io/rules.html +// list of other 2nd level tlds ? +io +com.io + +// iq : http://www.cmc.iq/english/iq/iqregister1.htm +iq +gov.iq +edu.iq +mil.iq +com.iq +org.iq +net.iq + +// ir : http://www.nic.ir/Terms_and_Conditions_ir,_Appendix_1_Domain_Rules +// Also see http://www.nic.ir/Internationalized_Domain_Names +// Two .ir entries added at request of , 2010-04-16 +ir +ac.ir +co.ir +gov.ir +id.ir +net.ir +org.ir +sch.ir +// xn--mgba3a4f16a.ir (.ir, Persian YEH) +ایران.ir +// xn--mgba3a4fra.ir (.ir, Arabic YEH) +ايران.ir + +// is : http://www.isnic.is/domain/rules.php +// Confirmed by registry 2008-12-06 +is +net.is +com.is +edu.is +gov.is +org.is +int.is + +// it : https://en.wikipedia.org/wiki/.it +it +gov.it +edu.it +// Reserved geo-names (regions and provinces): +// https://www.nic.it/sites/default/files/archivio/docs/Regulation_assignation_v7.1.pdf +// Regions +abr.it +abruzzo.it +aosta-valley.it +aostavalley.it +bas.it +basilicata.it +cal.it +calabria.it +cam.it +campania.it +emilia-romagna.it +emiliaromagna.it +emr.it +friuli-v-giulia.it +friuli-ve-giulia.it +friuli-vegiulia.it +friuli-venezia-giulia.it +friuli-veneziagiulia.it +friuli-vgiulia.it +friuliv-giulia.it +friulive-giulia.it +friulivegiulia.it +friulivenezia-giulia.it +friuliveneziagiulia.it +friulivgiulia.it +fvg.it +laz.it +lazio.it +lig.it +liguria.it +lom.it +lombardia.it +lombardy.it +lucania.it +mar.it +marche.it +mol.it +molise.it +piedmont.it +piemonte.it +pmn.it +pug.it +puglia.it +sar.it +sardegna.it +sardinia.it +sic.it +sicilia.it +sicily.it +taa.it +tos.it +toscana.it +trentin-sud-tirol.it +trentin-süd-tirol.it +trentin-sudtirol.it +trentin-südtirol.it +trentin-sued-tirol.it +trentin-suedtirol.it +trentino-a-adige.it +trentino-aadige.it +trentino-alto-adige.it +trentino-altoadige.it +trentino-s-tirol.it +trentino-stirol.it +trentino-sud-tirol.it +trentino-süd-tirol.it +trentino-sudtirol.it +trentino-südtirol.it +trentino-sued-tirol.it +trentino-suedtirol.it +trentino.it +trentinoa-adige.it +trentinoaadige.it +trentinoalto-adige.it +trentinoaltoadige.it +trentinos-tirol.it +trentinostirol.it +trentinosud-tirol.it +trentinosüd-tirol.it +trentinosudtirol.it +trentinosüdtirol.it +trentinosued-tirol.it +trentinosuedtirol.it +trentinsud-tirol.it +trentinsüd-tirol.it +trentinsudtirol.it +trentinsüdtirol.it +trentinsued-tirol.it +trentinsuedtirol.it +tuscany.it +umb.it +umbria.it +val-d-aosta.it +val-daosta.it +vald-aosta.it +valdaosta.it +valle-aosta.it +valle-d-aosta.it +valle-daosta.it +valleaosta.it +valled-aosta.it +valledaosta.it +vallee-aoste.it +vallée-aoste.it +vallee-d-aoste.it +vallée-d-aoste.it +valleeaoste.it +valléeaoste.it +valleedaoste.it +valléedaoste.it +vao.it +vda.it +ven.it +veneto.it +// Provinces +ag.it +agrigento.it +al.it +alessandria.it +alto-adige.it +altoadige.it +an.it +ancona.it +andria-barletta-trani.it +andria-trani-barletta.it +andriabarlettatrani.it +andriatranibarletta.it +ao.it +aosta.it +aoste.it +ap.it +aq.it +aquila.it +ar.it +arezzo.it +ascoli-piceno.it +ascolipiceno.it +asti.it +at.it +av.it +avellino.it +ba.it +balsan-sudtirol.it +balsan-südtirol.it +balsan-suedtirol.it +balsan.it +bari.it +barletta-trani-andria.it +barlettatraniandria.it +belluno.it +benevento.it +bergamo.it +bg.it +bi.it +biella.it +bl.it +bn.it +bo.it +bologna.it +bolzano-altoadige.it +bolzano.it +bozen-sudtirol.it +bozen-südtirol.it +bozen-suedtirol.it +bozen.it +br.it +brescia.it +brindisi.it +bs.it +bt.it +bulsan-sudtirol.it +bulsan-südtirol.it +bulsan-suedtirol.it +bulsan.it +bz.it +ca.it +cagliari.it +caltanissetta.it +campidano-medio.it +campidanomedio.it +campobasso.it +carbonia-iglesias.it +carboniaiglesias.it +carrara-massa.it +carraramassa.it +caserta.it +catania.it +catanzaro.it +cb.it +ce.it +cesena-forli.it +cesena-forlì.it +cesenaforli.it +cesenaforlì.it +ch.it +chieti.it +ci.it +cl.it +cn.it +co.it +como.it +cosenza.it +cr.it +cremona.it +crotone.it +cs.it +ct.it +cuneo.it +cz.it +dell-ogliastra.it +dellogliastra.it +en.it +enna.it +fc.it +fe.it +fermo.it +ferrara.it +fg.it +fi.it +firenze.it +florence.it +fm.it +foggia.it +forli-cesena.it +forlì-cesena.it +forlicesena.it +forlìcesena.it +fr.it +frosinone.it +ge.it +genoa.it +genova.it +go.it +gorizia.it +gr.it +grosseto.it +iglesias-carbonia.it +iglesiascarbonia.it +im.it +imperia.it +is.it +isernia.it +kr.it +la-spezia.it +laquila.it +laspezia.it +latina.it +lc.it +le.it +lecce.it +lecco.it +li.it +livorno.it +lo.it +lodi.it +lt.it +lu.it +lucca.it +macerata.it +mantova.it +massa-carrara.it +massacarrara.it +matera.it +mb.it +mc.it +me.it +medio-campidano.it +mediocampidano.it +messina.it +mi.it +milan.it +milano.it +mn.it +mo.it +modena.it +monza-brianza.it +monza-e-della-brianza.it +monza.it +monzabrianza.it +monzaebrianza.it +monzaedellabrianza.it +ms.it +mt.it +na.it +naples.it +napoli.it +no.it +novara.it +nu.it +nuoro.it +og.it +ogliastra.it +olbia-tempio.it +olbiatempio.it +or.it +oristano.it +ot.it +pa.it +padova.it +padua.it +palermo.it +parma.it +pavia.it +pc.it +pd.it +pe.it +perugia.it +pesaro-urbino.it +pesarourbino.it +pescara.it +pg.it +pi.it +piacenza.it +pisa.it +pistoia.it +pn.it +po.it +pordenone.it +potenza.it +pr.it +prato.it +pt.it +pu.it +pv.it +pz.it +ra.it +ragusa.it +ravenna.it +rc.it +re.it +reggio-calabria.it +reggio-emilia.it +reggiocalabria.it +reggioemilia.it +rg.it +ri.it +rieti.it +rimini.it +rm.it +rn.it +ro.it +roma.it +rome.it +rovigo.it +sa.it +salerno.it +sassari.it +savona.it +si.it +siena.it +siracusa.it +so.it +sondrio.it +sp.it +sr.it +ss.it +suedtirol.it +südtirol.it +sv.it +ta.it +taranto.it +te.it +tempio-olbia.it +tempioolbia.it +teramo.it +terni.it +tn.it +to.it +torino.it +tp.it +tr.it +trani-andria-barletta.it +trani-barletta-andria.it +traniandriabarletta.it +tranibarlettaandria.it +trapani.it +trento.it +treviso.it +trieste.it +ts.it +turin.it +tv.it +ud.it +udine.it +urbino-pesaro.it +urbinopesaro.it +va.it +varese.it +vb.it +vc.it +ve.it +venezia.it +venice.it +verbania.it +vercelli.it +verona.it +vi.it +vibo-valentia.it +vibovalentia.it +vicenza.it +viterbo.it +vr.it +vs.it +vt.it +vv.it + +// je : http://www.channelisles.net/register-domains/ +// Confirmed by registry 2013-11-28 +je +co.je +net.je +org.je + +// jm : http://www.com.jm/register.html +*.jm + +// jo : http://www.dns.jo/Registration_policy.aspx +jo +com.jo +org.jo +net.jo +edu.jo +sch.jo +gov.jo +mil.jo +name.jo + +// jobs : https://en.wikipedia.org/wiki/.jobs +jobs + +// jp : https://en.wikipedia.org/wiki/.jp +// http://jprs.co.jp/en/jpdomain.html +// Submitted by registry +jp +// jp organizational type names +ac.jp +ad.jp +co.jp +ed.jp +go.jp +gr.jp +lg.jp +ne.jp +or.jp +// jp prefecture type names +aichi.jp +akita.jp +aomori.jp +chiba.jp +ehime.jp +fukui.jp +fukuoka.jp +fukushima.jp +gifu.jp +gunma.jp +hiroshima.jp +hokkaido.jp +hyogo.jp +ibaraki.jp +ishikawa.jp +iwate.jp +kagawa.jp +kagoshima.jp +kanagawa.jp +kochi.jp +kumamoto.jp +kyoto.jp +mie.jp +miyagi.jp +miyazaki.jp +nagano.jp +nagasaki.jp +nara.jp +niigata.jp +oita.jp +okayama.jp +okinawa.jp +osaka.jp +saga.jp +saitama.jp +shiga.jp +shimane.jp +shizuoka.jp +tochigi.jp +tokushima.jp +tokyo.jp +tottori.jp +toyama.jp +wakayama.jp +yamagata.jp +yamaguchi.jp +yamanashi.jp +栃木.jp +愛知.jp +愛媛.jp +兵庫.jp +熊本.jp +茨城.jp +北海道.jp +千葉.jp +和歌山.jp +長崎.jp +長野.jp +新潟.jp +青森.jp +静岡.jp +東京.jp +石川.jp +埼玉.jp +三重.jp +京都.jp +佐賀.jp +大分.jp +大阪.jp +奈良.jp +宮城.jp +宮崎.jp +富山.jp +山口.jp +山形.jp +山梨.jp +岩手.jp +岐阜.jp +岡山.jp +島根.jp +広島.jp +徳島.jp +沖縄.jp +滋賀.jp +神奈川.jp +福井.jp +福岡.jp +福島.jp +秋田.jp +群馬.jp +香川.jp +高知.jp +鳥取.jp +鹿児島.jp +// jp geographic type names +// http://jprs.jp/doc/rule/saisoku-1.html +*.kawasaki.jp +*.kitakyushu.jp +*.kobe.jp +*.nagoya.jp +*.sapporo.jp +*.sendai.jp +*.yokohama.jp +!city.kawasaki.jp +!city.kitakyushu.jp +!city.kobe.jp +!city.nagoya.jp +!city.sapporo.jp +!city.sendai.jp +!city.yokohama.jp +// 4th level registration +aisai.aichi.jp +ama.aichi.jp +anjo.aichi.jp +asuke.aichi.jp +chiryu.aichi.jp +chita.aichi.jp +fuso.aichi.jp +gamagori.aichi.jp +handa.aichi.jp +hazu.aichi.jp +hekinan.aichi.jp +higashiura.aichi.jp +ichinomiya.aichi.jp +inazawa.aichi.jp +inuyama.aichi.jp +isshiki.aichi.jp +iwakura.aichi.jp +kanie.aichi.jp +kariya.aichi.jp +kasugai.aichi.jp +kira.aichi.jp +kiyosu.aichi.jp +komaki.aichi.jp +konan.aichi.jp +kota.aichi.jp +mihama.aichi.jp +miyoshi.aichi.jp +nishio.aichi.jp +nisshin.aichi.jp +obu.aichi.jp +oguchi.aichi.jp +oharu.aichi.jp +okazaki.aichi.jp +owariasahi.aichi.jp +seto.aichi.jp +shikatsu.aichi.jp +shinshiro.aichi.jp +shitara.aichi.jp +tahara.aichi.jp +takahama.aichi.jp +tobishima.aichi.jp +toei.aichi.jp +togo.aichi.jp +tokai.aichi.jp +tokoname.aichi.jp +toyoake.aichi.jp +toyohashi.aichi.jp +toyokawa.aichi.jp +toyone.aichi.jp +toyota.aichi.jp +tsushima.aichi.jp +yatomi.aichi.jp +akita.akita.jp +daisen.akita.jp +fujisato.akita.jp +gojome.akita.jp +hachirogata.akita.jp +happou.akita.jp +higashinaruse.akita.jp +honjo.akita.jp +honjyo.akita.jp +ikawa.akita.jp +kamikoani.akita.jp +kamioka.akita.jp +katagami.akita.jp +kazuno.akita.jp +kitaakita.akita.jp +kosaka.akita.jp +kyowa.akita.jp +misato.akita.jp +mitane.akita.jp +moriyoshi.akita.jp +nikaho.akita.jp +noshiro.akita.jp +odate.akita.jp +oga.akita.jp +ogata.akita.jp +semboku.akita.jp +yokote.akita.jp +yurihonjo.akita.jp +aomori.aomori.jp +gonohe.aomori.jp +hachinohe.aomori.jp +hashikami.aomori.jp +hiranai.aomori.jp +hirosaki.aomori.jp +itayanagi.aomori.jp +kuroishi.aomori.jp +misawa.aomori.jp +mutsu.aomori.jp +nakadomari.aomori.jp +noheji.aomori.jp +oirase.aomori.jp +owani.aomori.jp +rokunohe.aomori.jp +sannohe.aomori.jp +shichinohe.aomori.jp +shingo.aomori.jp +takko.aomori.jp +towada.aomori.jp +tsugaru.aomori.jp +tsuruta.aomori.jp +abiko.chiba.jp +asahi.chiba.jp +chonan.chiba.jp +chosei.chiba.jp +choshi.chiba.jp +chuo.chiba.jp +funabashi.chiba.jp +futtsu.chiba.jp +hanamigawa.chiba.jp +ichihara.chiba.jp +ichikawa.chiba.jp +ichinomiya.chiba.jp +inzai.chiba.jp +isumi.chiba.jp +kamagaya.chiba.jp +kamogawa.chiba.jp +kashiwa.chiba.jp +katori.chiba.jp +katsuura.chiba.jp +kimitsu.chiba.jp +kisarazu.chiba.jp +kozaki.chiba.jp +kujukuri.chiba.jp +kyonan.chiba.jp +matsudo.chiba.jp +midori.chiba.jp +mihama.chiba.jp +minamiboso.chiba.jp +mobara.chiba.jp +mutsuzawa.chiba.jp +nagara.chiba.jp +nagareyama.chiba.jp +narashino.chiba.jp +narita.chiba.jp +noda.chiba.jp +oamishirasato.chiba.jp +omigawa.chiba.jp +onjuku.chiba.jp +otaki.chiba.jp +sakae.chiba.jp +sakura.chiba.jp +shimofusa.chiba.jp +shirako.chiba.jp +shiroi.chiba.jp +shisui.chiba.jp +sodegaura.chiba.jp +sosa.chiba.jp +tako.chiba.jp +tateyama.chiba.jp +togane.chiba.jp +tohnosho.chiba.jp +tomisato.chiba.jp +urayasu.chiba.jp +yachimata.chiba.jp +yachiyo.chiba.jp +yokaichiba.chiba.jp +yokoshibahikari.chiba.jp +yotsukaido.chiba.jp +ainan.ehime.jp +honai.ehime.jp +ikata.ehime.jp +imabari.ehime.jp +iyo.ehime.jp +kamijima.ehime.jp +kihoku.ehime.jp +kumakogen.ehime.jp +masaki.ehime.jp +matsuno.ehime.jp +matsuyama.ehime.jp +namikata.ehime.jp +niihama.ehime.jp +ozu.ehime.jp +saijo.ehime.jp +seiyo.ehime.jp +shikokuchuo.ehime.jp +tobe.ehime.jp +toon.ehime.jp +uchiko.ehime.jp +uwajima.ehime.jp +yawatahama.ehime.jp +echizen.fukui.jp +eiheiji.fukui.jp +fukui.fukui.jp +ikeda.fukui.jp +katsuyama.fukui.jp +mihama.fukui.jp +minamiechizen.fukui.jp +obama.fukui.jp +ohi.fukui.jp +ono.fukui.jp +sabae.fukui.jp +sakai.fukui.jp +takahama.fukui.jp +tsuruga.fukui.jp +wakasa.fukui.jp +ashiya.fukuoka.jp +buzen.fukuoka.jp +chikugo.fukuoka.jp +chikuho.fukuoka.jp +chikujo.fukuoka.jp +chikushino.fukuoka.jp +chikuzen.fukuoka.jp +chuo.fukuoka.jp +dazaifu.fukuoka.jp +fukuchi.fukuoka.jp +hakata.fukuoka.jp +higashi.fukuoka.jp +hirokawa.fukuoka.jp +hisayama.fukuoka.jp +iizuka.fukuoka.jp +inatsuki.fukuoka.jp +kaho.fukuoka.jp +kasuga.fukuoka.jp +kasuya.fukuoka.jp +kawara.fukuoka.jp +keisen.fukuoka.jp +koga.fukuoka.jp +kurate.fukuoka.jp +kurogi.fukuoka.jp +kurume.fukuoka.jp +minami.fukuoka.jp +miyako.fukuoka.jp +miyama.fukuoka.jp +miyawaka.fukuoka.jp +mizumaki.fukuoka.jp +munakata.fukuoka.jp +nakagawa.fukuoka.jp +nakama.fukuoka.jp +nishi.fukuoka.jp +nogata.fukuoka.jp +ogori.fukuoka.jp +okagaki.fukuoka.jp +okawa.fukuoka.jp +oki.fukuoka.jp +omuta.fukuoka.jp +onga.fukuoka.jp +onojo.fukuoka.jp +oto.fukuoka.jp +saigawa.fukuoka.jp +sasaguri.fukuoka.jp +shingu.fukuoka.jp +shinyoshitomi.fukuoka.jp +shonai.fukuoka.jp +soeda.fukuoka.jp +sue.fukuoka.jp +tachiarai.fukuoka.jp +tagawa.fukuoka.jp +takata.fukuoka.jp +toho.fukuoka.jp +toyotsu.fukuoka.jp +tsuiki.fukuoka.jp +ukiha.fukuoka.jp +umi.fukuoka.jp +usui.fukuoka.jp +yamada.fukuoka.jp +yame.fukuoka.jp +yanagawa.fukuoka.jp +yukuhashi.fukuoka.jp +aizubange.fukushima.jp +aizumisato.fukushima.jp +aizuwakamatsu.fukushima.jp +asakawa.fukushima.jp +bandai.fukushima.jp +date.fukushima.jp +fukushima.fukushima.jp +furudono.fukushima.jp +futaba.fukushima.jp +hanawa.fukushima.jp +higashi.fukushima.jp +hirata.fukushima.jp +hirono.fukushima.jp +iitate.fukushima.jp +inawashiro.fukushima.jp +ishikawa.fukushima.jp +iwaki.fukushima.jp +izumizaki.fukushima.jp +kagamiishi.fukushima.jp +kaneyama.fukushima.jp +kawamata.fukushima.jp +kitakata.fukushima.jp +kitashiobara.fukushima.jp +koori.fukushima.jp +koriyama.fukushima.jp +kunimi.fukushima.jp +miharu.fukushima.jp +mishima.fukushima.jp +namie.fukushima.jp +nango.fukushima.jp +nishiaizu.fukushima.jp +nishigo.fukushima.jp +okuma.fukushima.jp +omotego.fukushima.jp +ono.fukushima.jp +otama.fukushima.jp +samegawa.fukushima.jp +shimogo.fukushima.jp +shirakawa.fukushima.jp +showa.fukushima.jp +soma.fukushima.jp +sukagawa.fukushima.jp +taishin.fukushima.jp +tamakawa.fukushima.jp +tanagura.fukushima.jp +tenei.fukushima.jp +yabuki.fukushima.jp +yamato.fukushima.jp +yamatsuri.fukushima.jp +yanaizu.fukushima.jp +yugawa.fukushima.jp +anpachi.gifu.jp +ena.gifu.jp +gifu.gifu.jp +ginan.gifu.jp +godo.gifu.jp +gujo.gifu.jp +hashima.gifu.jp +hichiso.gifu.jp +hida.gifu.jp +higashishirakawa.gifu.jp +ibigawa.gifu.jp +ikeda.gifu.jp +kakamigahara.gifu.jp +kani.gifu.jp +kasahara.gifu.jp +kasamatsu.gifu.jp +kawaue.gifu.jp +kitagata.gifu.jp +mino.gifu.jp +minokamo.gifu.jp +mitake.gifu.jp +mizunami.gifu.jp +motosu.gifu.jp +nakatsugawa.gifu.jp +ogaki.gifu.jp +sakahogi.gifu.jp +seki.gifu.jp +sekigahara.gifu.jp +shirakawa.gifu.jp +tajimi.gifu.jp +takayama.gifu.jp +tarui.gifu.jp +toki.gifu.jp +tomika.gifu.jp +wanouchi.gifu.jp +yamagata.gifu.jp +yaotsu.gifu.jp +yoro.gifu.jp +annaka.gunma.jp +chiyoda.gunma.jp +fujioka.gunma.jp +higashiagatsuma.gunma.jp +isesaki.gunma.jp +itakura.gunma.jp +kanna.gunma.jp +kanra.gunma.jp +katashina.gunma.jp +kawaba.gunma.jp +kiryu.gunma.jp +kusatsu.gunma.jp +maebashi.gunma.jp +meiwa.gunma.jp +midori.gunma.jp +minakami.gunma.jp +naganohara.gunma.jp +nakanojo.gunma.jp +nanmoku.gunma.jp +numata.gunma.jp +oizumi.gunma.jp +ora.gunma.jp +ota.gunma.jp +shibukawa.gunma.jp +shimonita.gunma.jp +shinto.gunma.jp +showa.gunma.jp +takasaki.gunma.jp +takayama.gunma.jp +tamamura.gunma.jp +tatebayashi.gunma.jp +tomioka.gunma.jp +tsukiyono.gunma.jp +tsumagoi.gunma.jp +ueno.gunma.jp +yoshioka.gunma.jp +asaminami.hiroshima.jp +daiwa.hiroshima.jp +etajima.hiroshima.jp +fuchu.hiroshima.jp +fukuyama.hiroshima.jp +hatsukaichi.hiroshima.jp +higashihiroshima.hiroshima.jp +hongo.hiroshima.jp +jinsekikogen.hiroshima.jp +kaita.hiroshima.jp +kui.hiroshima.jp +kumano.hiroshima.jp +kure.hiroshima.jp +mihara.hiroshima.jp +miyoshi.hiroshima.jp +naka.hiroshima.jp +onomichi.hiroshima.jp +osakikamijima.hiroshima.jp +otake.hiroshima.jp +saka.hiroshima.jp +sera.hiroshima.jp +seranishi.hiroshima.jp +shinichi.hiroshima.jp +shobara.hiroshima.jp +takehara.hiroshima.jp +abashiri.hokkaido.jp +abira.hokkaido.jp +aibetsu.hokkaido.jp +akabira.hokkaido.jp +akkeshi.hokkaido.jp +asahikawa.hokkaido.jp +ashibetsu.hokkaido.jp +ashoro.hokkaido.jp +assabu.hokkaido.jp +atsuma.hokkaido.jp +bibai.hokkaido.jp +biei.hokkaido.jp +bifuka.hokkaido.jp +bihoro.hokkaido.jp +biratori.hokkaido.jp +chippubetsu.hokkaido.jp +chitose.hokkaido.jp +date.hokkaido.jp +ebetsu.hokkaido.jp +embetsu.hokkaido.jp +eniwa.hokkaido.jp +erimo.hokkaido.jp +esan.hokkaido.jp +esashi.hokkaido.jp +fukagawa.hokkaido.jp +fukushima.hokkaido.jp +furano.hokkaido.jp +furubira.hokkaido.jp +haboro.hokkaido.jp +hakodate.hokkaido.jp +hamatonbetsu.hokkaido.jp +hidaka.hokkaido.jp +higashikagura.hokkaido.jp +higashikawa.hokkaido.jp +hiroo.hokkaido.jp +hokuryu.hokkaido.jp +hokuto.hokkaido.jp +honbetsu.hokkaido.jp +horokanai.hokkaido.jp +horonobe.hokkaido.jp +ikeda.hokkaido.jp +imakane.hokkaido.jp +ishikari.hokkaido.jp +iwamizawa.hokkaido.jp +iwanai.hokkaido.jp +kamifurano.hokkaido.jp +kamikawa.hokkaido.jp +kamishihoro.hokkaido.jp +kamisunagawa.hokkaido.jp +kamoenai.hokkaido.jp +kayabe.hokkaido.jp +kembuchi.hokkaido.jp +kikonai.hokkaido.jp +kimobetsu.hokkaido.jp +kitahiroshima.hokkaido.jp +kitami.hokkaido.jp +kiyosato.hokkaido.jp +koshimizu.hokkaido.jp +kunneppu.hokkaido.jp +kuriyama.hokkaido.jp +kuromatsunai.hokkaido.jp +kushiro.hokkaido.jp +kutchan.hokkaido.jp +kyowa.hokkaido.jp +mashike.hokkaido.jp +matsumae.hokkaido.jp +mikasa.hokkaido.jp +minamifurano.hokkaido.jp +mombetsu.hokkaido.jp +moseushi.hokkaido.jp +mukawa.hokkaido.jp +muroran.hokkaido.jp +naie.hokkaido.jp +nakagawa.hokkaido.jp +nakasatsunai.hokkaido.jp +nakatombetsu.hokkaido.jp +nanae.hokkaido.jp +nanporo.hokkaido.jp +nayoro.hokkaido.jp +nemuro.hokkaido.jp +niikappu.hokkaido.jp +niki.hokkaido.jp +nishiokoppe.hokkaido.jp +noboribetsu.hokkaido.jp +numata.hokkaido.jp +obihiro.hokkaido.jp +obira.hokkaido.jp +oketo.hokkaido.jp +okoppe.hokkaido.jp +otaru.hokkaido.jp +otobe.hokkaido.jp +otofuke.hokkaido.jp +otoineppu.hokkaido.jp +oumu.hokkaido.jp +ozora.hokkaido.jp +pippu.hokkaido.jp +rankoshi.hokkaido.jp +rebun.hokkaido.jp +rikubetsu.hokkaido.jp +rishiri.hokkaido.jp +rishirifuji.hokkaido.jp +saroma.hokkaido.jp +sarufutsu.hokkaido.jp +shakotan.hokkaido.jp +shari.hokkaido.jp +shibecha.hokkaido.jp +shibetsu.hokkaido.jp +shikabe.hokkaido.jp +shikaoi.hokkaido.jp +shimamaki.hokkaido.jp +shimizu.hokkaido.jp +shimokawa.hokkaido.jp +shinshinotsu.hokkaido.jp +shintoku.hokkaido.jp +shiranuka.hokkaido.jp +shiraoi.hokkaido.jp +shiriuchi.hokkaido.jp +sobetsu.hokkaido.jp +sunagawa.hokkaido.jp +taiki.hokkaido.jp +takasu.hokkaido.jp +takikawa.hokkaido.jp +takinoue.hokkaido.jp +teshikaga.hokkaido.jp +tobetsu.hokkaido.jp +tohma.hokkaido.jp +tomakomai.hokkaido.jp +tomari.hokkaido.jp +toya.hokkaido.jp +toyako.hokkaido.jp +toyotomi.hokkaido.jp +toyoura.hokkaido.jp +tsubetsu.hokkaido.jp +tsukigata.hokkaido.jp +urakawa.hokkaido.jp +urausu.hokkaido.jp +uryu.hokkaido.jp +utashinai.hokkaido.jp +wakkanai.hokkaido.jp +wassamu.hokkaido.jp +yakumo.hokkaido.jp +yoichi.hokkaido.jp +aioi.hyogo.jp +akashi.hyogo.jp +ako.hyogo.jp +amagasaki.hyogo.jp +aogaki.hyogo.jp +asago.hyogo.jp +ashiya.hyogo.jp +awaji.hyogo.jp +fukusaki.hyogo.jp +goshiki.hyogo.jp +harima.hyogo.jp +himeji.hyogo.jp +ichikawa.hyogo.jp +inagawa.hyogo.jp +itami.hyogo.jp +kakogawa.hyogo.jp +kamigori.hyogo.jp +kamikawa.hyogo.jp +kasai.hyogo.jp +kasuga.hyogo.jp +kawanishi.hyogo.jp +miki.hyogo.jp +minamiawaji.hyogo.jp +nishinomiya.hyogo.jp +nishiwaki.hyogo.jp +ono.hyogo.jp +sanda.hyogo.jp +sannan.hyogo.jp +sasayama.hyogo.jp +sayo.hyogo.jp +shingu.hyogo.jp +shinonsen.hyogo.jp +shiso.hyogo.jp +sumoto.hyogo.jp +taishi.hyogo.jp +taka.hyogo.jp +takarazuka.hyogo.jp +takasago.hyogo.jp +takino.hyogo.jp +tamba.hyogo.jp +tatsuno.hyogo.jp +toyooka.hyogo.jp +yabu.hyogo.jp +yashiro.hyogo.jp +yoka.hyogo.jp +yokawa.hyogo.jp +ami.ibaraki.jp +asahi.ibaraki.jp +bando.ibaraki.jp +chikusei.ibaraki.jp +daigo.ibaraki.jp +fujishiro.ibaraki.jp +hitachi.ibaraki.jp +hitachinaka.ibaraki.jp +hitachiomiya.ibaraki.jp +hitachiota.ibaraki.jp +ibaraki.ibaraki.jp +ina.ibaraki.jp +inashiki.ibaraki.jp +itako.ibaraki.jp +iwama.ibaraki.jp +joso.ibaraki.jp +kamisu.ibaraki.jp +kasama.ibaraki.jp +kashima.ibaraki.jp +kasumigaura.ibaraki.jp +koga.ibaraki.jp +miho.ibaraki.jp +mito.ibaraki.jp +moriya.ibaraki.jp +naka.ibaraki.jp +namegata.ibaraki.jp +oarai.ibaraki.jp +ogawa.ibaraki.jp +omitama.ibaraki.jp +ryugasaki.ibaraki.jp +sakai.ibaraki.jp +sakuragawa.ibaraki.jp +shimodate.ibaraki.jp +shimotsuma.ibaraki.jp +shirosato.ibaraki.jp +sowa.ibaraki.jp +suifu.ibaraki.jp +takahagi.ibaraki.jp +tamatsukuri.ibaraki.jp +tokai.ibaraki.jp +tomobe.ibaraki.jp +tone.ibaraki.jp +toride.ibaraki.jp +tsuchiura.ibaraki.jp +tsukuba.ibaraki.jp +uchihara.ibaraki.jp +ushiku.ibaraki.jp +yachiyo.ibaraki.jp +yamagata.ibaraki.jp +yawara.ibaraki.jp +yuki.ibaraki.jp +anamizu.ishikawa.jp +hakui.ishikawa.jp +hakusan.ishikawa.jp +kaga.ishikawa.jp +kahoku.ishikawa.jp +kanazawa.ishikawa.jp +kawakita.ishikawa.jp +komatsu.ishikawa.jp +nakanoto.ishikawa.jp +nanao.ishikawa.jp +nomi.ishikawa.jp +nonoichi.ishikawa.jp +noto.ishikawa.jp +shika.ishikawa.jp +suzu.ishikawa.jp +tsubata.ishikawa.jp +tsurugi.ishikawa.jp +uchinada.ishikawa.jp +wajima.ishikawa.jp +fudai.iwate.jp +fujisawa.iwate.jp +hanamaki.iwate.jp +hiraizumi.iwate.jp +hirono.iwate.jp +ichinohe.iwate.jp +ichinoseki.iwate.jp +iwaizumi.iwate.jp +iwate.iwate.jp +joboji.iwate.jp +kamaishi.iwate.jp +kanegasaki.iwate.jp +karumai.iwate.jp +kawai.iwate.jp +kitakami.iwate.jp +kuji.iwate.jp +kunohe.iwate.jp +kuzumaki.iwate.jp +miyako.iwate.jp +mizusawa.iwate.jp +morioka.iwate.jp +ninohe.iwate.jp +noda.iwate.jp +ofunato.iwate.jp +oshu.iwate.jp +otsuchi.iwate.jp +rikuzentakata.iwate.jp +shiwa.iwate.jp +shizukuishi.iwate.jp +sumita.iwate.jp +tanohata.iwate.jp +tono.iwate.jp +yahaba.iwate.jp +yamada.iwate.jp +ayagawa.kagawa.jp +higashikagawa.kagawa.jp +kanonji.kagawa.jp +kotohira.kagawa.jp +manno.kagawa.jp +marugame.kagawa.jp +mitoyo.kagawa.jp +naoshima.kagawa.jp +sanuki.kagawa.jp +tadotsu.kagawa.jp +takamatsu.kagawa.jp +tonosho.kagawa.jp +uchinomi.kagawa.jp +utazu.kagawa.jp +zentsuji.kagawa.jp +akune.kagoshima.jp +amami.kagoshima.jp +hioki.kagoshima.jp +isa.kagoshima.jp +isen.kagoshima.jp +izumi.kagoshima.jp +kagoshima.kagoshima.jp +kanoya.kagoshima.jp +kawanabe.kagoshima.jp +kinko.kagoshima.jp +kouyama.kagoshima.jp +makurazaki.kagoshima.jp +matsumoto.kagoshima.jp +minamitane.kagoshima.jp +nakatane.kagoshima.jp +nishinoomote.kagoshima.jp +satsumasendai.kagoshima.jp +soo.kagoshima.jp +tarumizu.kagoshima.jp +yusui.kagoshima.jp +aikawa.kanagawa.jp +atsugi.kanagawa.jp +ayase.kanagawa.jp +chigasaki.kanagawa.jp +ebina.kanagawa.jp +fujisawa.kanagawa.jp +hadano.kanagawa.jp +hakone.kanagawa.jp +hiratsuka.kanagawa.jp +isehara.kanagawa.jp +kaisei.kanagawa.jp +kamakura.kanagawa.jp +kiyokawa.kanagawa.jp +matsuda.kanagawa.jp +minamiashigara.kanagawa.jp +miura.kanagawa.jp +nakai.kanagawa.jp +ninomiya.kanagawa.jp +odawara.kanagawa.jp +oi.kanagawa.jp +oiso.kanagawa.jp +sagamihara.kanagawa.jp +samukawa.kanagawa.jp +tsukui.kanagawa.jp +yamakita.kanagawa.jp +yamato.kanagawa.jp +yokosuka.kanagawa.jp +yugawara.kanagawa.jp +zama.kanagawa.jp +zushi.kanagawa.jp +aki.kochi.jp +geisei.kochi.jp +hidaka.kochi.jp +higashitsuno.kochi.jp +ino.kochi.jp +kagami.kochi.jp +kami.kochi.jp +kitagawa.kochi.jp +kochi.kochi.jp +mihara.kochi.jp +motoyama.kochi.jp +muroto.kochi.jp +nahari.kochi.jp +nakamura.kochi.jp +nankoku.kochi.jp +nishitosa.kochi.jp +niyodogawa.kochi.jp +ochi.kochi.jp +okawa.kochi.jp +otoyo.kochi.jp +otsuki.kochi.jp +sakawa.kochi.jp +sukumo.kochi.jp +susaki.kochi.jp +tosa.kochi.jp +tosashimizu.kochi.jp +toyo.kochi.jp +tsuno.kochi.jp +umaji.kochi.jp +yasuda.kochi.jp +yusuhara.kochi.jp +amakusa.kumamoto.jp +arao.kumamoto.jp +aso.kumamoto.jp +choyo.kumamoto.jp +gyokuto.kumamoto.jp +kamiamakusa.kumamoto.jp +kikuchi.kumamoto.jp +kumamoto.kumamoto.jp +mashiki.kumamoto.jp +mifune.kumamoto.jp +minamata.kumamoto.jp +minamioguni.kumamoto.jp +nagasu.kumamoto.jp +nishihara.kumamoto.jp +oguni.kumamoto.jp +ozu.kumamoto.jp +sumoto.kumamoto.jp +takamori.kumamoto.jp +uki.kumamoto.jp +uto.kumamoto.jp +yamaga.kumamoto.jp +yamato.kumamoto.jp +yatsushiro.kumamoto.jp +ayabe.kyoto.jp +fukuchiyama.kyoto.jp +higashiyama.kyoto.jp +ide.kyoto.jp +ine.kyoto.jp +joyo.kyoto.jp +kameoka.kyoto.jp +kamo.kyoto.jp +kita.kyoto.jp +kizu.kyoto.jp +kumiyama.kyoto.jp +kyotamba.kyoto.jp +kyotanabe.kyoto.jp +kyotango.kyoto.jp +maizuru.kyoto.jp +minami.kyoto.jp +minamiyamashiro.kyoto.jp +miyazu.kyoto.jp +muko.kyoto.jp +nagaokakyo.kyoto.jp +nakagyo.kyoto.jp +nantan.kyoto.jp +oyamazaki.kyoto.jp +sakyo.kyoto.jp +seika.kyoto.jp +tanabe.kyoto.jp +uji.kyoto.jp +ujitawara.kyoto.jp +wazuka.kyoto.jp +yamashina.kyoto.jp +yawata.kyoto.jp +asahi.mie.jp +inabe.mie.jp +ise.mie.jp +kameyama.mie.jp +kawagoe.mie.jp +kiho.mie.jp +kisosaki.mie.jp +kiwa.mie.jp +komono.mie.jp +kumano.mie.jp +kuwana.mie.jp +matsusaka.mie.jp +meiwa.mie.jp +mihama.mie.jp +minamiise.mie.jp +misugi.mie.jp +miyama.mie.jp +nabari.mie.jp +shima.mie.jp +suzuka.mie.jp +tado.mie.jp +taiki.mie.jp +taki.mie.jp +tamaki.mie.jp +toba.mie.jp +tsu.mie.jp +udono.mie.jp +ureshino.mie.jp +watarai.mie.jp +yokkaichi.mie.jp +furukawa.miyagi.jp +higashimatsushima.miyagi.jp +ishinomaki.miyagi.jp +iwanuma.miyagi.jp +kakuda.miyagi.jp +kami.miyagi.jp +kawasaki.miyagi.jp +marumori.miyagi.jp +matsushima.miyagi.jp +minamisanriku.miyagi.jp +misato.miyagi.jp +murata.miyagi.jp +natori.miyagi.jp +ogawara.miyagi.jp +ohira.miyagi.jp +onagawa.miyagi.jp +osaki.miyagi.jp +rifu.miyagi.jp +semine.miyagi.jp +shibata.miyagi.jp +shichikashuku.miyagi.jp +shikama.miyagi.jp +shiogama.miyagi.jp +shiroishi.miyagi.jp +tagajo.miyagi.jp +taiwa.miyagi.jp +tome.miyagi.jp +tomiya.miyagi.jp +wakuya.miyagi.jp +watari.miyagi.jp +yamamoto.miyagi.jp +zao.miyagi.jp +aya.miyazaki.jp +ebino.miyazaki.jp +gokase.miyazaki.jp +hyuga.miyazaki.jp +kadogawa.miyazaki.jp +kawaminami.miyazaki.jp +kijo.miyazaki.jp +kitagawa.miyazaki.jp +kitakata.miyazaki.jp +kitaura.miyazaki.jp +kobayashi.miyazaki.jp +kunitomi.miyazaki.jp +kushima.miyazaki.jp +mimata.miyazaki.jp +miyakonojo.miyazaki.jp +miyazaki.miyazaki.jp +morotsuka.miyazaki.jp +nichinan.miyazaki.jp +nishimera.miyazaki.jp +nobeoka.miyazaki.jp +saito.miyazaki.jp +shiiba.miyazaki.jp +shintomi.miyazaki.jp +takaharu.miyazaki.jp +takanabe.miyazaki.jp +takazaki.miyazaki.jp +tsuno.miyazaki.jp +achi.nagano.jp +agematsu.nagano.jp +anan.nagano.jp +aoki.nagano.jp +asahi.nagano.jp +azumino.nagano.jp +chikuhoku.nagano.jp +chikuma.nagano.jp +chino.nagano.jp +fujimi.nagano.jp +hakuba.nagano.jp +hara.nagano.jp +hiraya.nagano.jp +iida.nagano.jp +iijima.nagano.jp +iiyama.nagano.jp +iizuna.nagano.jp +ikeda.nagano.jp +ikusaka.nagano.jp +ina.nagano.jp +karuizawa.nagano.jp +kawakami.nagano.jp +kiso.nagano.jp +kisofukushima.nagano.jp +kitaaiki.nagano.jp +komagane.nagano.jp +komoro.nagano.jp +matsukawa.nagano.jp +matsumoto.nagano.jp +miasa.nagano.jp +minamiaiki.nagano.jp +minamimaki.nagano.jp +minamiminowa.nagano.jp +minowa.nagano.jp +miyada.nagano.jp +miyota.nagano.jp +mochizuki.nagano.jp +nagano.nagano.jp +nagawa.nagano.jp +nagiso.nagano.jp +nakagawa.nagano.jp +nakano.nagano.jp +nozawaonsen.nagano.jp +obuse.nagano.jp +ogawa.nagano.jp +okaya.nagano.jp +omachi.nagano.jp +omi.nagano.jp +ookuwa.nagano.jp +ooshika.nagano.jp +otaki.nagano.jp +otari.nagano.jp +sakae.nagano.jp +sakaki.nagano.jp +saku.nagano.jp +sakuho.nagano.jp +shimosuwa.nagano.jp +shinanomachi.nagano.jp +shiojiri.nagano.jp +suwa.nagano.jp +suzaka.nagano.jp +takagi.nagano.jp +takamori.nagano.jp +takayama.nagano.jp +tateshina.nagano.jp +tatsuno.nagano.jp +togakushi.nagano.jp +togura.nagano.jp +tomi.nagano.jp +ueda.nagano.jp +wada.nagano.jp +yamagata.nagano.jp +yamanouchi.nagano.jp +yasaka.nagano.jp +yasuoka.nagano.jp +chijiwa.nagasaki.jp +futsu.nagasaki.jp +goto.nagasaki.jp +hasami.nagasaki.jp +hirado.nagasaki.jp +iki.nagasaki.jp +isahaya.nagasaki.jp +kawatana.nagasaki.jp +kuchinotsu.nagasaki.jp +matsuura.nagasaki.jp +nagasaki.nagasaki.jp +obama.nagasaki.jp +omura.nagasaki.jp +oseto.nagasaki.jp +saikai.nagasaki.jp +sasebo.nagasaki.jp +seihi.nagasaki.jp +shimabara.nagasaki.jp +shinkamigoto.nagasaki.jp +togitsu.nagasaki.jp +tsushima.nagasaki.jp +unzen.nagasaki.jp +ando.nara.jp +gose.nara.jp +heguri.nara.jp +higashiyoshino.nara.jp +ikaruga.nara.jp +ikoma.nara.jp +kamikitayama.nara.jp +kanmaki.nara.jp +kashiba.nara.jp +kashihara.nara.jp +katsuragi.nara.jp +kawai.nara.jp +kawakami.nara.jp +kawanishi.nara.jp +koryo.nara.jp +kurotaki.nara.jp +mitsue.nara.jp +miyake.nara.jp +nara.nara.jp +nosegawa.nara.jp +oji.nara.jp +ouda.nara.jp +oyodo.nara.jp +sakurai.nara.jp +sango.nara.jp +shimoichi.nara.jp +shimokitayama.nara.jp +shinjo.nara.jp +soni.nara.jp +takatori.nara.jp +tawaramoto.nara.jp +tenkawa.nara.jp +tenri.nara.jp +uda.nara.jp +yamatokoriyama.nara.jp +yamatotakada.nara.jp +yamazoe.nara.jp +yoshino.nara.jp +aga.niigata.jp +agano.niigata.jp +gosen.niigata.jp +itoigawa.niigata.jp +izumozaki.niigata.jp +joetsu.niigata.jp +kamo.niigata.jp +kariwa.niigata.jp +kashiwazaki.niigata.jp +minamiuonuma.niigata.jp +mitsuke.niigata.jp +muika.niigata.jp +murakami.niigata.jp +myoko.niigata.jp +nagaoka.niigata.jp +niigata.niigata.jp +ojiya.niigata.jp +omi.niigata.jp +sado.niigata.jp +sanjo.niigata.jp +seiro.niigata.jp +seirou.niigata.jp +sekikawa.niigata.jp +shibata.niigata.jp +tagami.niigata.jp +tainai.niigata.jp +tochio.niigata.jp +tokamachi.niigata.jp +tsubame.niigata.jp +tsunan.niigata.jp +uonuma.niigata.jp +yahiko.niigata.jp +yoita.niigata.jp +yuzawa.niigata.jp +beppu.oita.jp +bungoono.oita.jp +bungotakada.oita.jp +hasama.oita.jp +hiji.oita.jp +himeshima.oita.jp +hita.oita.jp +kamitsue.oita.jp +kokonoe.oita.jp +kuju.oita.jp +kunisaki.oita.jp +kusu.oita.jp +oita.oita.jp +saiki.oita.jp +taketa.oita.jp +tsukumi.oita.jp +usa.oita.jp +usuki.oita.jp +yufu.oita.jp +akaiwa.okayama.jp +asakuchi.okayama.jp +bizen.okayama.jp +hayashima.okayama.jp +ibara.okayama.jp +kagamino.okayama.jp +kasaoka.okayama.jp +kibichuo.okayama.jp +kumenan.okayama.jp +kurashiki.okayama.jp +maniwa.okayama.jp +misaki.okayama.jp +nagi.okayama.jp +niimi.okayama.jp +nishiawakura.okayama.jp +okayama.okayama.jp +satosho.okayama.jp +setouchi.okayama.jp +shinjo.okayama.jp +shoo.okayama.jp +soja.okayama.jp +takahashi.okayama.jp +tamano.okayama.jp +tsuyama.okayama.jp +wake.okayama.jp +yakage.okayama.jp +aguni.okinawa.jp +ginowan.okinawa.jp +ginoza.okinawa.jp +gushikami.okinawa.jp +haebaru.okinawa.jp +higashi.okinawa.jp +hirara.okinawa.jp +iheya.okinawa.jp +ishigaki.okinawa.jp +ishikawa.okinawa.jp +itoman.okinawa.jp +izena.okinawa.jp +kadena.okinawa.jp +kin.okinawa.jp +kitadaito.okinawa.jp +kitanakagusuku.okinawa.jp +kumejima.okinawa.jp +kunigami.okinawa.jp +minamidaito.okinawa.jp +motobu.okinawa.jp +nago.okinawa.jp +naha.okinawa.jp +nakagusuku.okinawa.jp +nakijin.okinawa.jp +nanjo.okinawa.jp +nishihara.okinawa.jp +ogimi.okinawa.jp +okinawa.okinawa.jp +onna.okinawa.jp +shimoji.okinawa.jp +taketomi.okinawa.jp +tarama.okinawa.jp +tokashiki.okinawa.jp +tomigusuku.okinawa.jp +tonaki.okinawa.jp +urasoe.okinawa.jp +uruma.okinawa.jp +yaese.okinawa.jp +yomitan.okinawa.jp +yonabaru.okinawa.jp +yonaguni.okinawa.jp +zamami.okinawa.jp +abeno.osaka.jp +chihayaakasaka.osaka.jp +chuo.osaka.jp +daito.osaka.jp +fujiidera.osaka.jp +habikino.osaka.jp +hannan.osaka.jp +higashiosaka.osaka.jp +higashisumiyoshi.osaka.jp +higashiyodogawa.osaka.jp +hirakata.osaka.jp +ibaraki.osaka.jp +ikeda.osaka.jp +izumi.osaka.jp +izumiotsu.osaka.jp +izumisano.osaka.jp +kadoma.osaka.jp +kaizuka.osaka.jp +kanan.osaka.jp +kashiwara.osaka.jp +katano.osaka.jp +kawachinagano.osaka.jp +kishiwada.osaka.jp +kita.osaka.jp +kumatori.osaka.jp +matsubara.osaka.jp +minato.osaka.jp +minoh.osaka.jp +misaki.osaka.jp +moriguchi.osaka.jp +neyagawa.osaka.jp +nishi.osaka.jp +nose.osaka.jp +osakasayama.osaka.jp +sakai.osaka.jp +sayama.osaka.jp +sennan.osaka.jp +settsu.osaka.jp +shijonawate.osaka.jp +shimamoto.osaka.jp +suita.osaka.jp +tadaoka.osaka.jp +taishi.osaka.jp +tajiri.osaka.jp +takaishi.osaka.jp +takatsuki.osaka.jp +tondabayashi.osaka.jp +toyonaka.osaka.jp +toyono.osaka.jp +yao.osaka.jp +ariake.saga.jp +arita.saga.jp +fukudomi.saga.jp +genkai.saga.jp +hamatama.saga.jp +hizen.saga.jp +imari.saga.jp +kamimine.saga.jp +kanzaki.saga.jp +karatsu.saga.jp +kashima.saga.jp +kitagata.saga.jp +kitahata.saga.jp +kiyama.saga.jp +kouhoku.saga.jp +kyuragi.saga.jp +nishiarita.saga.jp +ogi.saga.jp +omachi.saga.jp +ouchi.saga.jp +saga.saga.jp +shiroishi.saga.jp +taku.saga.jp +tara.saga.jp +tosu.saga.jp +yoshinogari.saga.jp +arakawa.saitama.jp +asaka.saitama.jp +chichibu.saitama.jp +fujimi.saitama.jp +fujimino.saitama.jp +fukaya.saitama.jp +hanno.saitama.jp +hanyu.saitama.jp +hasuda.saitama.jp +hatogaya.saitama.jp +hatoyama.saitama.jp +hidaka.saitama.jp +higashichichibu.saitama.jp +higashimatsuyama.saitama.jp +honjo.saitama.jp +ina.saitama.jp +iruma.saitama.jp +iwatsuki.saitama.jp +kamiizumi.saitama.jp +kamikawa.saitama.jp +kamisato.saitama.jp +kasukabe.saitama.jp +kawagoe.saitama.jp +kawaguchi.saitama.jp +kawajima.saitama.jp +kazo.saitama.jp +kitamoto.saitama.jp +koshigaya.saitama.jp +kounosu.saitama.jp +kuki.saitama.jp +kumagaya.saitama.jp +matsubushi.saitama.jp +minano.saitama.jp +misato.saitama.jp +miyashiro.saitama.jp +miyoshi.saitama.jp +moroyama.saitama.jp +nagatoro.saitama.jp +namegawa.saitama.jp +niiza.saitama.jp +ogano.saitama.jp +ogawa.saitama.jp +ogose.saitama.jp +okegawa.saitama.jp +omiya.saitama.jp +otaki.saitama.jp +ranzan.saitama.jp +ryokami.saitama.jp +saitama.saitama.jp +sakado.saitama.jp +satte.saitama.jp +sayama.saitama.jp +shiki.saitama.jp +shiraoka.saitama.jp +soka.saitama.jp +sugito.saitama.jp +toda.saitama.jp +tokigawa.saitama.jp +tokorozawa.saitama.jp +tsurugashima.saitama.jp +urawa.saitama.jp +warabi.saitama.jp +yashio.saitama.jp +yokoze.saitama.jp +yono.saitama.jp +yorii.saitama.jp +yoshida.saitama.jp +yoshikawa.saitama.jp +yoshimi.saitama.jp +aisho.shiga.jp +gamo.shiga.jp +higashiomi.shiga.jp +hikone.shiga.jp +koka.shiga.jp +konan.shiga.jp +kosei.shiga.jp +koto.shiga.jp +kusatsu.shiga.jp +maibara.shiga.jp +moriyama.shiga.jp +nagahama.shiga.jp +nishiazai.shiga.jp +notogawa.shiga.jp +omihachiman.shiga.jp +otsu.shiga.jp +ritto.shiga.jp +ryuoh.shiga.jp +takashima.shiga.jp +takatsuki.shiga.jp +torahime.shiga.jp +toyosato.shiga.jp +yasu.shiga.jp +akagi.shimane.jp +ama.shimane.jp +gotsu.shimane.jp +hamada.shimane.jp +higashiizumo.shimane.jp +hikawa.shimane.jp +hikimi.shimane.jp +izumo.shimane.jp +kakinoki.shimane.jp +masuda.shimane.jp +matsue.shimane.jp +misato.shimane.jp +nishinoshima.shimane.jp +ohda.shimane.jp +okinoshima.shimane.jp +okuizumo.shimane.jp +shimane.shimane.jp +tamayu.shimane.jp +tsuwano.shimane.jp +unnan.shimane.jp +yakumo.shimane.jp +yasugi.shimane.jp +yatsuka.shimane.jp +arai.shizuoka.jp +atami.shizuoka.jp +fuji.shizuoka.jp +fujieda.shizuoka.jp +fujikawa.shizuoka.jp +fujinomiya.shizuoka.jp +fukuroi.shizuoka.jp +gotemba.shizuoka.jp +haibara.shizuoka.jp +hamamatsu.shizuoka.jp +higashiizu.shizuoka.jp +ito.shizuoka.jp +iwata.shizuoka.jp +izu.shizuoka.jp +izunokuni.shizuoka.jp +kakegawa.shizuoka.jp +kannami.shizuoka.jp +kawanehon.shizuoka.jp +kawazu.shizuoka.jp +kikugawa.shizuoka.jp +kosai.shizuoka.jp +makinohara.shizuoka.jp +matsuzaki.shizuoka.jp +minamiizu.shizuoka.jp +mishima.shizuoka.jp +morimachi.shizuoka.jp +nishiizu.shizuoka.jp +numazu.shizuoka.jp +omaezaki.shizuoka.jp +shimada.shizuoka.jp +shimizu.shizuoka.jp +shimoda.shizuoka.jp +shizuoka.shizuoka.jp +susono.shizuoka.jp +yaizu.shizuoka.jp +yoshida.shizuoka.jp +ashikaga.tochigi.jp +bato.tochigi.jp +haga.tochigi.jp +ichikai.tochigi.jp +iwafune.tochigi.jp +kaminokawa.tochigi.jp +kanuma.tochigi.jp +karasuyama.tochigi.jp +kuroiso.tochigi.jp +mashiko.tochigi.jp +mibu.tochigi.jp +moka.tochigi.jp +motegi.tochigi.jp +nasu.tochigi.jp +nasushiobara.tochigi.jp +nikko.tochigi.jp +nishikata.tochigi.jp +nogi.tochigi.jp +ohira.tochigi.jp +ohtawara.tochigi.jp +oyama.tochigi.jp +sakura.tochigi.jp +sano.tochigi.jp +shimotsuke.tochigi.jp +shioya.tochigi.jp +takanezawa.tochigi.jp +tochigi.tochigi.jp +tsuga.tochigi.jp +ujiie.tochigi.jp +utsunomiya.tochigi.jp +yaita.tochigi.jp +aizumi.tokushima.jp +anan.tokushima.jp +ichiba.tokushima.jp +itano.tokushima.jp +kainan.tokushima.jp +komatsushima.tokushima.jp +matsushige.tokushima.jp +mima.tokushima.jp +minami.tokushima.jp +miyoshi.tokushima.jp +mugi.tokushima.jp +nakagawa.tokushima.jp +naruto.tokushima.jp +sanagochi.tokushima.jp +shishikui.tokushima.jp +tokushima.tokushima.jp +wajiki.tokushima.jp +adachi.tokyo.jp +akiruno.tokyo.jp +akishima.tokyo.jp +aogashima.tokyo.jp +arakawa.tokyo.jp +bunkyo.tokyo.jp +chiyoda.tokyo.jp +chofu.tokyo.jp +chuo.tokyo.jp +edogawa.tokyo.jp +fuchu.tokyo.jp +fussa.tokyo.jp +hachijo.tokyo.jp +hachioji.tokyo.jp +hamura.tokyo.jp +higashikurume.tokyo.jp +higashimurayama.tokyo.jp +higashiyamato.tokyo.jp +hino.tokyo.jp +hinode.tokyo.jp +hinohara.tokyo.jp +inagi.tokyo.jp +itabashi.tokyo.jp +katsushika.tokyo.jp +kita.tokyo.jp +kiyose.tokyo.jp +kodaira.tokyo.jp +koganei.tokyo.jp +kokubunji.tokyo.jp +komae.tokyo.jp +koto.tokyo.jp +kouzushima.tokyo.jp +kunitachi.tokyo.jp +machida.tokyo.jp +meguro.tokyo.jp +minato.tokyo.jp +mitaka.tokyo.jp +mizuho.tokyo.jp +musashimurayama.tokyo.jp +musashino.tokyo.jp +nakano.tokyo.jp +nerima.tokyo.jp +ogasawara.tokyo.jp +okutama.tokyo.jp +ome.tokyo.jp +oshima.tokyo.jp +ota.tokyo.jp +setagaya.tokyo.jp +shibuya.tokyo.jp +shinagawa.tokyo.jp +shinjuku.tokyo.jp +suginami.tokyo.jp +sumida.tokyo.jp +tachikawa.tokyo.jp +taito.tokyo.jp +tama.tokyo.jp +toshima.tokyo.jp +chizu.tottori.jp +hino.tottori.jp +kawahara.tottori.jp +koge.tottori.jp +kotoura.tottori.jp +misasa.tottori.jp +nanbu.tottori.jp +nichinan.tottori.jp +sakaiminato.tottori.jp +tottori.tottori.jp +wakasa.tottori.jp +yazu.tottori.jp +yonago.tottori.jp +asahi.toyama.jp +fuchu.toyama.jp +fukumitsu.toyama.jp +funahashi.toyama.jp +himi.toyama.jp +imizu.toyama.jp +inami.toyama.jp +johana.toyama.jp +kamiichi.toyama.jp +kurobe.toyama.jp +nakaniikawa.toyama.jp +namerikawa.toyama.jp +nanto.toyama.jp +nyuzen.toyama.jp +oyabe.toyama.jp +taira.toyama.jp +takaoka.toyama.jp +tateyama.toyama.jp +toga.toyama.jp +tonami.toyama.jp +toyama.toyama.jp +unazuki.toyama.jp +uozu.toyama.jp +yamada.toyama.jp +arida.wakayama.jp +aridagawa.wakayama.jp +gobo.wakayama.jp +hashimoto.wakayama.jp +hidaka.wakayama.jp +hirogawa.wakayama.jp +inami.wakayama.jp +iwade.wakayama.jp +kainan.wakayama.jp +kamitonda.wakayama.jp +katsuragi.wakayama.jp +kimino.wakayama.jp +kinokawa.wakayama.jp +kitayama.wakayama.jp +koya.wakayama.jp +koza.wakayama.jp +kozagawa.wakayama.jp +kudoyama.wakayama.jp +kushimoto.wakayama.jp +mihama.wakayama.jp +misato.wakayama.jp +nachikatsuura.wakayama.jp +shingu.wakayama.jp +shirahama.wakayama.jp +taiji.wakayama.jp +tanabe.wakayama.jp +wakayama.wakayama.jp +yuasa.wakayama.jp +yura.wakayama.jp +asahi.yamagata.jp +funagata.yamagata.jp +higashine.yamagata.jp +iide.yamagata.jp +kahoku.yamagata.jp +kaminoyama.yamagata.jp +kaneyama.yamagata.jp +kawanishi.yamagata.jp +mamurogawa.yamagata.jp +mikawa.yamagata.jp +murayama.yamagata.jp +nagai.yamagata.jp +nakayama.yamagata.jp +nanyo.yamagata.jp +nishikawa.yamagata.jp +obanazawa.yamagata.jp +oe.yamagata.jp +oguni.yamagata.jp +ohkura.yamagata.jp +oishida.yamagata.jp +sagae.yamagata.jp +sakata.yamagata.jp +sakegawa.yamagata.jp +shinjo.yamagata.jp +shirataka.yamagata.jp +shonai.yamagata.jp +takahata.yamagata.jp +tendo.yamagata.jp +tozawa.yamagata.jp +tsuruoka.yamagata.jp +yamagata.yamagata.jp +yamanobe.yamagata.jp +yonezawa.yamagata.jp +yuza.yamagata.jp +abu.yamaguchi.jp +hagi.yamaguchi.jp +hikari.yamaguchi.jp +hofu.yamaguchi.jp +iwakuni.yamaguchi.jp +kudamatsu.yamaguchi.jp +mitou.yamaguchi.jp +nagato.yamaguchi.jp +oshima.yamaguchi.jp +shimonoseki.yamaguchi.jp +shunan.yamaguchi.jp +tabuse.yamaguchi.jp +tokuyama.yamaguchi.jp +toyota.yamaguchi.jp +ube.yamaguchi.jp +yuu.yamaguchi.jp +chuo.yamanashi.jp +doshi.yamanashi.jp +fuefuki.yamanashi.jp +fujikawa.yamanashi.jp +fujikawaguchiko.yamanashi.jp +fujiyoshida.yamanashi.jp +hayakawa.yamanashi.jp +hokuto.yamanashi.jp +ichikawamisato.yamanashi.jp +kai.yamanashi.jp +kofu.yamanashi.jp +koshu.yamanashi.jp +kosuge.yamanashi.jp +minami-alps.yamanashi.jp +minobu.yamanashi.jp +nakamichi.yamanashi.jp +nanbu.yamanashi.jp +narusawa.yamanashi.jp +nirasaki.yamanashi.jp +nishikatsura.yamanashi.jp +oshino.yamanashi.jp +otsuki.yamanashi.jp +showa.yamanashi.jp +tabayama.yamanashi.jp +tsuru.yamanashi.jp +uenohara.yamanashi.jp +yamanakako.yamanashi.jp +yamanashi.yamanashi.jp + +// ke : http://www.kenic.or.ke/index.php/en/ke-domains/ke-domains +ke +ac.ke +co.ke +go.ke +info.ke +me.ke +mobi.ke +ne.ke +or.ke +sc.ke + +// kg : http://www.domain.kg/dmn_n.html +kg +org.kg +net.kg +com.kg +edu.kg +gov.kg +mil.kg + +// kh : http://www.mptc.gov.kh/dns_registration.htm +*.kh + +// ki : http://www.ki/dns/index.html +ki +edu.ki +biz.ki +net.ki +org.ki +gov.ki +info.ki +com.ki + +// km : https://en.wikipedia.org/wiki/.km +// http://www.domaine.km/documents/charte.doc +km +org.km +nom.km +gov.km +prd.km +tm.km +edu.km +mil.km +ass.km +com.km +// These are only mentioned as proposed suggestions at domaine.km, but +// https://en.wikipedia.org/wiki/.km says they're available for registration: +coop.km +asso.km +presse.km +medecin.km +notaires.km +pharmaciens.km +veterinaire.km +gouv.km + +// kn : https://en.wikipedia.org/wiki/.kn +// http://www.dot.kn/domainRules.html +kn +net.kn +org.kn +edu.kn +gov.kn + +// kp : http://www.kcce.kp/en_index.php +kp +com.kp +edu.kp +gov.kp +org.kp +rep.kp +tra.kp + +// kr : https://en.wikipedia.org/wiki/.kr +// see also: http://domain.nida.or.kr/eng/registration.jsp +kr +ac.kr +co.kr +es.kr +go.kr +hs.kr +kg.kr +mil.kr +ms.kr +ne.kr +or.kr +pe.kr +re.kr +sc.kr +// kr geographical names +busan.kr +chungbuk.kr +chungnam.kr +daegu.kr +daejeon.kr +gangwon.kr +gwangju.kr +gyeongbuk.kr +gyeonggi.kr +gyeongnam.kr +incheon.kr +jeju.kr +jeonbuk.kr +jeonnam.kr +seoul.kr +ulsan.kr + +// kw : https://www.nic.kw/policies/ +// Confirmed by registry +kw +com.kw +edu.kw +emb.kw +gov.kw +ind.kw +net.kw +org.kw + +// ky : http://www.icta.ky/da_ky_reg_dom.php +// Confirmed by registry 2008-06-17 +ky +edu.ky +gov.ky +com.ky +org.ky +net.ky + +// kz : https://en.wikipedia.org/wiki/.kz +// see also: http://www.nic.kz/rules/index.jsp +kz +org.kz +edu.kz +net.kz +gov.kz +mil.kz +com.kz + +// la : https://en.wikipedia.org/wiki/.la +// Submitted by registry +la +int.la +net.la +info.la +edu.la +gov.la +per.la +com.la +org.la + +// lb : https://en.wikipedia.org/wiki/.lb +// Submitted by registry +lb +com.lb +edu.lb +gov.lb +net.lb +org.lb + +// lc : https://en.wikipedia.org/wiki/.lc +// see also: http://www.nic.lc/rules.htm +lc +com.lc +net.lc +co.lc +org.lc +edu.lc +gov.lc + +// li : https://en.wikipedia.org/wiki/.li +li + +// lk : https://www.nic.lk/index.php/domain-registration/lk-domain-naming-structure +lk +gov.lk +sch.lk +net.lk +int.lk +com.lk +org.lk +edu.lk +ngo.lk +soc.lk +web.lk +ltd.lk +assn.lk +grp.lk +hotel.lk +ac.lk + +// lr : http://psg.com/dns/lr/lr.txt +// Submitted by registry +lr +com.lr +edu.lr +gov.lr +org.lr +net.lr + +// ls : http://www.nic.ls/ +// Confirmed by registry +ls +ac.ls +biz.ls +co.ls +edu.ls +gov.ls +info.ls +net.ls +org.ls +sc.ls + +// lt : https://en.wikipedia.org/wiki/.lt +lt +// gov.lt : http://www.gov.lt/index_en.php +gov.lt + +// lu : http://www.dns.lu/en/ +lu + +// lv : http://www.nic.lv/DNS/En/generic.php +lv +com.lv +edu.lv +gov.lv +org.lv +mil.lv +id.lv +net.lv +asn.lv +conf.lv + +// ly : http://www.nic.ly/regulations.php +ly +com.ly +net.ly +gov.ly +plc.ly +edu.ly +sch.ly +med.ly +org.ly +id.ly + +// ma : https://en.wikipedia.org/wiki/.ma +// http://www.anrt.ma/fr/admin/download/upload/file_fr782.pdf +ma +co.ma +net.ma +gov.ma +org.ma +ac.ma +press.ma + +// mc : http://www.nic.mc/ +mc +tm.mc +asso.mc + +// md : https://en.wikipedia.org/wiki/.md +md + +// me : https://en.wikipedia.org/wiki/.me +me +co.me +net.me +org.me +edu.me +ac.me +gov.me +its.me +priv.me + +// mg : http://nic.mg/nicmg/?page_id=39 +mg +org.mg +nom.mg +gov.mg +prd.mg +tm.mg +edu.mg +mil.mg +com.mg +co.mg + +// mh : https://en.wikipedia.org/wiki/.mh +mh + +// mil : https://en.wikipedia.org/wiki/.mil +mil + +// mk : https://en.wikipedia.org/wiki/.mk +// see also: http://dns.marnet.net.mk/postapka.php +mk +com.mk +org.mk +net.mk +edu.mk +gov.mk +inf.mk +name.mk + +// ml : http://www.gobin.info/domainname/ml-template.doc +// see also: https://en.wikipedia.org/wiki/.ml +ml +com.ml +edu.ml +gouv.ml +gov.ml +net.ml +org.ml +presse.ml + +// mm : https://en.wikipedia.org/wiki/.mm +*.mm + +// mn : https://en.wikipedia.org/wiki/.mn +mn +gov.mn +edu.mn +org.mn + +// mo : http://www.monic.net.mo/ +mo +com.mo +net.mo +org.mo +edu.mo +gov.mo + +// mobi : https://en.wikipedia.org/wiki/.mobi +mobi + +// mp : http://www.dot.mp/ +// Confirmed by registry 2008-06-17 +mp + +// mq : https://en.wikipedia.org/wiki/.mq +mq + +// mr : https://en.wikipedia.org/wiki/.mr +mr +gov.mr + +// ms : http://www.nic.ms/pdf/MS_Domain_Name_Rules.pdf +ms +com.ms +edu.ms +gov.ms +net.ms +org.ms + +// mt : https://www.nic.org.mt/go/policy +// Submitted by registry +mt +com.mt +edu.mt +net.mt +org.mt + +// mu : https://en.wikipedia.org/wiki/.mu +mu +com.mu +net.mu +org.mu +gov.mu +ac.mu +co.mu +or.mu + +// museum : http://about.museum/naming/ +// http://index.museum/ +museum +academy.museum +agriculture.museum +air.museum +airguard.museum +alabama.museum +alaska.museum +amber.museum +ambulance.museum +american.museum +americana.museum +americanantiques.museum +americanart.museum +amsterdam.museum +and.museum +annefrank.museum +anthro.museum +anthropology.museum +antiques.museum +aquarium.museum +arboretum.museum +archaeological.museum +archaeology.museum +architecture.museum +art.museum +artanddesign.museum +artcenter.museum +artdeco.museum +arteducation.museum +artgallery.museum +arts.museum +artsandcrafts.museum +asmatart.museum +assassination.museum +assisi.museum +association.museum +astronomy.museum +atlanta.museum +austin.museum +australia.museum +automotive.museum +aviation.museum +axis.museum +badajoz.museum +baghdad.museum +bahn.museum +bale.museum +baltimore.museum +barcelona.museum +baseball.museum +basel.museum +baths.museum +bauern.museum +beauxarts.museum +beeldengeluid.museum +bellevue.museum +bergbau.museum +berkeley.museum +berlin.museum +bern.museum +bible.museum +bilbao.museum +bill.museum +birdart.museum +birthplace.museum +bonn.museum +boston.museum +botanical.museum +botanicalgarden.museum +botanicgarden.museum +botany.museum +brandywinevalley.museum +brasil.museum +bristol.museum +british.museum +britishcolumbia.museum +broadcast.museum +brunel.museum +brussel.museum +brussels.museum +bruxelles.museum +building.museum +burghof.museum +bus.museum +bushey.museum +cadaques.museum +california.museum +cambridge.museum +can.museum +canada.museum +capebreton.museum +carrier.museum +cartoonart.museum +casadelamoneda.museum +castle.museum +castres.museum +celtic.museum +center.museum +chattanooga.museum +cheltenham.museum +chesapeakebay.museum +chicago.museum +children.museum +childrens.museum +childrensgarden.museum +chiropractic.museum +chocolate.museum +christiansburg.museum +cincinnati.museum +cinema.museum +circus.museum +civilisation.museum +civilization.museum +civilwar.museum +clinton.museum +clock.museum +coal.museum +coastaldefence.museum +cody.museum +coldwar.museum +collection.museum +colonialwilliamsburg.museum +coloradoplateau.museum +columbia.museum +columbus.museum +communication.museum +communications.museum +community.museum +computer.museum +computerhistory.museum +comunicações.museum +contemporary.museum +contemporaryart.museum +convent.museum +copenhagen.museum +corporation.museum +correios-e-telecomunicações.museum +corvette.museum +costume.museum +countryestate.museum +county.museum +crafts.museum +cranbrook.museum +creation.museum +cultural.museum +culturalcenter.museum +culture.museum +cyber.museum +cymru.museum +dali.museum +dallas.museum +database.museum +ddr.museum +decorativearts.museum +delaware.museum +delmenhorst.museum +denmark.museum +depot.museum +design.museum +detroit.museum +dinosaur.museum +discovery.museum +dolls.museum +donostia.museum +durham.museum +eastafrica.museum +eastcoast.museum +education.museum +educational.museum +egyptian.museum +eisenbahn.museum +elburg.museum +elvendrell.museum +embroidery.museum +encyclopedic.museum +england.museum +entomology.museum +environment.museum +environmentalconservation.museum +epilepsy.museum +essex.museum +estate.museum +ethnology.museum +exeter.museum +exhibition.museum +family.museum +farm.museum +farmequipment.museum +farmers.museum +farmstead.museum +field.museum +figueres.museum +filatelia.museum +film.museum +fineart.museum +finearts.museum +finland.museum +flanders.museum +florida.museum +force.museum +fortmissoula.museum +fortworth.museum +foundation.museum +francaise.museum +frankfurt.museum +franziskaner.museum +freemasonry.museum +freiburg.museum +fribourg.museum +frog.museum +fundacio.museum +furniture.museum +gallery.museum +garden.museum +gateway.museum +geelvinck.museum +gemological.museum +geology.museum +georgia.museum +giessen.museum +glas.museum +glass.museum +gorge.museum +grandrapids.museum +graz.museum +guernsey.museum +halloffame.museum +hamburg.museum +handson.museum +harvestcelebration.museum +hawaii.museum +health.museum +heimatunduhren.museum +hellas.museum +helsinki.museum +hembygdsforbund.museum +heritage.museum +histoire.museum +historical.museum +historicalsociety.museum +historichouses.museum +historisch.museum +historisches.museum +history.museum +historyofscience.museum +horology.museum +house.museum +humanities.museum +illustration.museum +imageandsound.museum +indian.museum +indiana.museum +indianapolis.museum +indianmarket.museum +intelligence.museum +interactive.museum +iraq.museum +iron.museum +isleofman.museum +jamison.museum +jefferson.museum +jerusalem.museum +jewelry.museum +jewish.museum +jewishart.museum +jfk.museum +journalism.museum +judaica.museum +judygarland.museum +juedisches.museum +juif.museum +karate.museum +karikatur.museum +kids.museum +koebenhavn.museum +koeln.museum +kunst.museum +kunstsammlung.museum +kunstunddesign.museum +labor.museum +labour.museum +lajolla.museum +lancashire.museum +landes.museum +lans.museum +läns.museum +larsson.museum +lewismiller.museum +lincoln.museum +linz.museum +living.museum +livinghistory.museum +localhistory.museum +london.museum +losangeles.museum +louvre.museum +loyalist.museum +lucerne.museum +luxembourg.museum +luzern.museum +mad.museum +madrid.museum +mallorca.museum +manchester.museum +mansion.museum +mansions.museum +manx.museum +marburg.museum +maritime.museum +maritimo.museum +maryland.museum +marylhurst.museum +media.museum +medical.museum +medizinhistorisches.museum +meeres.museum +memorial.museum +mesaverde.museum +michigan.museum +midatlantic.museum +military.museum +mill.museum +miners.museum +mining.museum +minnesota.museum +missile.museum +missoula.museum +modern.museum +moma.museum +money.museum +monmouth.museum +monticello.museum +montreal.museum +moscow.museum +motorcycle.museum +muenchen.museum +muenster.museum +mulhouse.museum +muncie.museum +museet.museum +museumcenter.museum +museumvereniging.museum +music.museum +national.museum +nationalfirearms.museum +nationalheritage.museum +nativeamerican.museum +naturalhistory.museum +naturalhistorymuseum.museum +naturalsciences.museum +nature.museum +naturhistorisches.museum +natuurwetenschappen.museum +naumburg.museum +naval.museum +nebraska.museum +neues.museum +newhampshire.museum +newjersey.museum +newmexico.museum +newport.museum +newspaper.museum +newyork.museum +niepce.museum +norfolk.museum +north.museum +nrw.museum +nyc.museum +nyny.museum +oceanographic.museum +oceanographique.museum +omaha.museum +online.museum +ontario.museum +openair.museum +oregon.museum +oregontrail.museum +otago.museum +oxford.museum +pacific.museum +paderborn.museum +palace.museum +paleo.museum +palmsprings.museum +panama.museum +paris.museum +pasadena.museum +pharmacy.museum +philadelphia.museum +philadelphiaarea.museum +philately.museum +phoenix.museum +photography.museum +pilots.museum +pittsburgh.museum +planetarium.museum +plantation.museum +plants.museum +plaza.museum +portal.museum +portland.museum +portlligat.museum +posts-and-telecommunications.museum +preservation.museum +presidio.museum +press.museum +project.museum +public.museum +pubol.museum +quebec.museum +railroad.museum +railway.museum +research.museum +resistance.museum +riodejaneiro.museum +rochester.museum +rockart.museum +roma.museum +russia.museum +saintlouis.museum +salem.museum +salvadordali.museum +salzburg.museum +sandiego.museum +sanfrancisco.museum +santabarbara.museum +santacruz.museum +santafe.museum +saskatchewan.museum +satx.museum +savannahga.museum +schlesisches.museum +schoenbrunn.museum +schokoladen.museum +school.museum +schweiz.museum +science.museum +scienceandhistory.museum +scienceandindustry.museum +sciencecenter.museum +sciencecenters.museum +science-fiction.museum +sciencehistory.museum +sciences.museum +sciencesnaturelles.museum +scotland.museum +seaport.museum +settlement.museum +settlers.museum +shell.museum +sherbrooke.museum +sibenik.museum +silk.museum +ski.museum +skole.museum +society.museum +sologne.museum +soundandvision.museum +southcarolina.museum +southwest.museum +space.museum +spy.museum +square.museum +stadt.museum +stalbans.museum +starnberg.museum +state.museum +stateofdelaware.museum +station.museum +steam.museum +steiermark.museum +stjohn.museum +stockholm.museum +stpetersburg.museum +stuttgart.museum +suisse.museum +surgeonshall.museum +surrey.museum +svizzera.museum +sweden.museum +sydney.museum +tank.museum +tcm.museum +technology.museum +telekommunikation.museum +television.museum +texas.museum +textile.museum +theater.museum +time.museum +timekeeping.museum +topology.museum +torino.museum +touch.museum +town.museum +transport.museum +tree.museum +trolley.museum +trust.museum +trustee.museum +uhren.museum +ulm.museum +undersea.museum +university.museum +usa.museum +usantiques.museum +usarts.museum +uscountryestate.museum +usculture.museum +usdecorativearts.museum +usgarden.museum +ushistory.museum +ushuaia.museum +uslivinghistory.museum +utah.museum +uvic.museum +valley.museum +vantaa.museum +versailles.museum +viking.museum +village.museum +virginia.museum +virtual.museum +virtuel.museum +vlaanderen.museum +volkenkunde.museum +wales.museum +wallonie.museum +war.museum +washingtondc.museum +watchandclock.museum +watch-and-clock.museum +western.museum +westfalen.museum +whaling.museum +wildlife.museum +williamsburg.museum +windmill.museum +workshop.museum +york.museum +yorkshire.museum +yosemite.museum +youth.museum +zoological.museum +zoology.museum +ירושלים.museum +иком.museum + +// mv : https://en.wikipedia.org/wiki/.mv +// "mv" included because, contra Wikipedia, google.mv exists. +mv +aero.mv +biz.mv +com.mv +coop.mv +edu.mv +gov.mv +info.mv +int.mv +mil.mv +museum.mv +name.mv +net.mv +org.mv +pro.mv + +// mw : http://www.registrar.mw/ +mw +ac.mw +biz.mw +co.mw +com.mw +coop.mw +edu.mw +gov.mw +int.mw +museum.mw +net.mw +org.mw + +// mx : http://www.nic.mx/ +// Submitted by registry +mx +com.mx +org.mx +gob.mx +edu.mx +net.mx + +// my : http://www.mynic.my/ +// Available strings: https://mynic.my/resources/domains/buying-a-domain/ +my +biz.my +com.my +edu.my +gov.my +mil.my +name.my +net.my +org.my + +// mz : http://www.uem.mz/ +// Submitted by registry +mz +ac.mz +adv.mz +co.mz +edu.mz +gov.mz +mil.mz +net.mz +org.mz + +// na : http://www.na-nic.com.na/ +// http://www.info.na/domain/ +na +info.na +pro.na +name.na +school.na +or.na +dr.na +us.na +mx.na +ca.na +in.na +cc.na +tv.na +ws.na +mobi.na +co.na +com.na +org.na + +// name : has 2nd-level tlds, but there's no list of them +name + +// nc : http://www.cctld.nc/ +nc +asso.nc +nom.nc + +// ne : https://en.wikipedia.org/wiki/.ne +ne + +// net : https://en.wikipedia.org/wiki/.net +net + +// nf : https://en.wikipedia.org/wiki/.nf +nf +com.nf +net.nf +per.nf +rec.nf +web.nf +arts.nf +firm.nf +info.nf +other.nf +store.nf + +// ng : http://www.nira.org.ng/index.php/join-us/register-ng-domain/189-nira-slds +ng +com.ng +edu.ng +gov.ng +i.ng +mil.ng +mobi.ng +name.ng +net.ng +org.ng +sch.ng + +// ni : http://www.nic.ni/ +ni +ac.ni +biz.ni +co.ni +com.ni +edu.ni +gob.ni +in.ni +info.ni +int.ni +mil.ni +net.ni +nom.ni +org.ni +web.ni + +// nl : https://en.wikipedia.org/wiki/.nl +// https://www.sidn.nl/ +// ccTLD for the Netherlands +nl + +// no : https://www.norid.no/en/om-domenenavn/regelverk-for-no/ +// Norid geographical second level domains : https://www.norid.no/en/om-domenenavn/regelverk-for-no/vedlegg-b/ +// Norid category second level domains : https://www.norid.no/en/om-domenenavn/regelverk-for-no/vedlegg-c/ +// Norid category second-level domains managed by parties other than Norid : https://www.norid.no/en/om-domenenavn/regelverk-for-no/vedlegg-d/ +// RSS feed: https://teknisk.norid.no/en/feed/ +no +// Norid category second level domains : https://www.norid.no/en/om-domenenavn/regelverk-for-no/vedlegg-c/ +fhs.no +vgs.no +fylkesbibl.no +folkebibl.no +museum.no +idrett.no +priv.no +// Norid category second-level domains managed by parties other than Norid : https://www.norid.no/en/om-domenenavn/regelverk-for-no/vedlegg-d/ +mil.no +stat.no +dep.no +kommune.no +herad.no +// Norid geographical second level domains : https://www.norid.no/en/om-domenenavn/regelverk-for-no/vedlegg-b/ +// counties +aa.no +ah.no +bu.no +fm.no +hl.no +hm.no +jan-mayen.no +mr.no +nl.no +nt.no +of.no +ol.no +oslo.no +rl.no +sf.no +st.no +svalbard.no +tm.no +tr.no +va.no +vf.no +// primary and lower secondary schools per county +gs.aa.no +gs.ah.no +gs.bu.no +gs.fm.no +gs.hl.no +gs.hm.no +gs.jan-mayen.no +gs.mr.no +gs.nl.no +gs.nt.no +gs.of.no +gs.ol.no +gs.oslo.no +gs.rl.no +gs.sf.no +gs.st.no +gs.svalbard.no +gs.tm.no +gs.tr.no +gs.va.no +gs.vf.no +// cities +akrehamn.no +åkrehamn.no +algard.no +ålgård.no +arna.no +brumunddal.no +bryne.no +bronnoysund.no +brønnøysund.no +drobak.no +drøbak.no +egersund.no +fetsund.no +floro.no +florø.no +fredrikstad.no +hokksund.no +honefoss.no +hønefoss.no +jessheim.no +jorpeland.no +jørpeland.no +kirkenes.no +kopervik.no +krokstadelva.no +langevag.no +langevåg.no +leirvik.no +mjondalen.no +mjøndalen.no +mo-i-rana.no +mosjoen.no +mosjøen.no +nesoddtangen.no +orkanger.no +osoyro.no +osøyro.no +raholt.no +råholt.no +sandnessjoen.no +sandnessjøen.no +skedsmokorset.no +slattum.no +spjelkavik.no +stathelle.no +stavern.no +stjordalshalsen.no +stjørdalshalsen.no +tananger.no +tranby.no +vossevangen.no +// communities +afjord.no +åfjord.no +agdenes.no +al.no +ål.no +alesund.no +ålesund.no +alstahaug.no +alta.no +áltá.no +alaheadju.no +álaheadju.no +alvdal.no +amli.no +åmli.no +amot.no +åmot.no +andebu.no +andoy.no +andøy.no +andasuolo.no +ardal.no +årdal.no +aremark.no +arendal.no +ås.no +aseral.no +åseral.no +asker.no +askim.no +askvoll.no +askoy.no +askøy.no +asnes.no +åsnes.no +audnedaln.no +aukra.no +aure.no +aurland.no +aurskog-holand.no +aurskog-høland.no +austevoll.no +austrheim.no +averoy.no +averøy.no +balestrand.no +ballangen.no +balat.no +bálát.no +balsfjord.no +bahccavuotna.no +báhccavuotna.no +bamble.no +bardu.no +beardu.no +beiarn.no +bajddar.no +bájddar.no +baidar.no +báidár.no +berg.no +bergen.no +berlevag.no +berlevåg.no +bearalvahki.no +bearalváhki.no +bindal.no +birkenes.no +bjarkoy.no +bjarkøy.no +bjerkreim.no +bjugn.no +bodo.no +bodø.no +badaddja.no +bådåddjå.no +budejju.no +bokn.no +bremanger.no +bronnoy.no +brønnøy.no +bygland.no +bykle.no +barum.no +bærum.no +bo.telemark.no +bø.telemark.no +bo.nordland.no +bø.nordland.no +bievat.no +bievát.no +bomlo.no +bømlo.no +batsfjord.no +båtsfjord.no +bahcavuotna.no +báhcavuotna.no +dovre.no +drammen.no +drangedal.no +dyroy.no +dyrøy.no +donna.no +dønna.no +eid.no +eidfjord.no +eidsberg.no +eidskog.no +eidsvoll.no +eigersund.no +elverum.no +enebakk.no +engerdal.no +etne.no +etnedal.no +evenes.no +evenassi.no +evenášši.no +evje-og-hornnes.no +farsund.no +fauske.no +fuossko.no +fuoisku.no +fedje.no +fet.no +finnoy.no +finnøy.no +fitjar.no +fjaler.no +fjell.no +flakstad.no +flatanger.no +flekkefjord.no +flesberg.no +flora.no +fla.no +flå.no +folldal.no +forsand.no +fosnes.no +frei.no +frogn.no +froland.no +frosta.no +frana.no +fræna.no +froya.no +frøya.no +fusa.no +fyresdal.no +forde.no +førde.no +gamvik.no +gangaviika.no +gáŋgaviika.no +gaular.no +gausdal.no +gildeskal.no +gildeskål.no +giske.no +gjemnes.no +gjerdrum.no +gjerstad.no +gjesdal.no +gjovik.no +gjøvik.no +gloppen.no +gol.no +gran.no +grane.no +granvin.no +gratangen.no +grimstad.no +grong.no +kraanghke.no +kråanghke.no +grue.no +gulen.no +hadsel.no +halden.no +halsa.no +hamar.no +hamaroy.no +habmer.no +hábmer.no +hapmir.no +hápmir.no +hammerfest.no +hammarfeasta.no +hámmárfeasta.no +haram.no +hareid.no +harstad.no +hasvik.no +aknoluokta.no +ákŋoluokta.no +hattfjelldal.no +aarborte.no +haugesund.no +hemne.no +hemnes.no +hemsedal.no +heroy.more-og-romsdal.no +herøy.møre-og-romsdal.no +heroy.nordland.no +herøy.nordland.no +hitra.no +hjartdal.no +hjelmeland.no +hobol.no +hobøl.no +hof.no +hol.no +hole.no +holmestrand.no +holtalen.no +holtålen.no +hornindal.no +horten.no +hurdal.no +hurum.no +hvaler.no +hyllestad.no +hagebostad.no +hægebostad.no +hoyanger.no +høyanger.no +hoylandet.no +høylandet.no +ha.no +hå.no +ibestad.no +inderoy.no +inderøy.no +iveland.no +jevnaker.no +jondal.no +jolster.no +jølster.no +karasjok.no +karasjohka.no +kárášjohka.no +karlsoy.no +galsa.no +gálsá.no +karmoy.no +karmøy.no +kautokeino.no +guovdageaidnu.no +klepp.no +klabu.no +klæbu.no +kongsberg.no +kongsvinger.no +kragero.no +kragerø.no +kristiansand.no +kristiansund.no +krodsherad.no +krødsherad.no +kvalsund.no +rahkkeravju.no +ráhkkerávju.no +kvam.no +kvinesdal.no +kvinnherad.no +kviteseid.no +kvitsoy.no +kvitsøy.no +kvafjord.no +kvæfjord.no +giehtavuoatna.no +kvanangen.no +kvænangen.no +navuotna.no +návuotna.no +kafjord.no +kåfjord.no +gaivuotna.no +gáivuotna.no +larvik.no +lavangen.no +lavagis.no +loabat.no +loabát.no +lebesby.no +davvesiida.no +leikanger.no +leirfjord.no +leka.no +leksvik.no +lenvik.no +leangaviika.no +leaŋgaviika.no +lesja.no +levanger.no +lier.no +lierne.no +lillehammer.no +lillesand.no +lindesnes.no +lindas.no +lindås.no +lom.no +loppa.no +lahppi.no +láhppi.no +lund.no +lunner.no +luroy.no +lurøy.no +luster.no +lyngdal.no +lyngen.no +ivgu.no +lardal.no +lerdal.no +lærdal.no +lodingen.no +lødingen.no +lorenskog.no +lørenskog.no +loten.no +løten.no +malvik.no +masoy.no +måsøy.no +muosat.no +muosát.no +mandal.no +marker.no +marnardal.no +masfjorden.no +meland.no +meldal.no +melhus.no +meloy.no +meløy.no +meraker.no +meråker.no +moareke.no +moåreke.no +midsund.no +midtre-gauldal.no +modalen.no +modum.no +molde.no +moskenes.no +moss.no +mosvik.no +malselv.no +målselv.no +malatvuopmi.no +málatvuopmi.no +namdalseid.no +aejrie.no +namsos.no +namsskogan.no +naamesjevuemie.no +nååmesjevuemie.no +laakesvuemie.no +nannestad.no +narvik.no +narviika.no +naustdal.no +nedre-eiker.no +nes.akershus.no +nes.buskerud.no +nesna.no +nesodden.no +nesseby.no +unjarga.no +unjárga.no +nesset.no +nissedal.no +nittedal.no +nord-aurdal.no +nord-fron.no +nord-odal.no +norddal.no +nordkapp.no +davvenjarga.no +davvenjárga.no +nordre-land.no +nordreisa.no +raisa.no +ráisa.no +nore-og-uvdal.no +notodden.no +naroy.no +nærøy.no +notteroy.no +nøtterøy.no +odda.no +oksnes.no +øksnes.no +oppdal.no +oppegard.no +oppegård.no +orkdal.no +orland.no +ørland.no +orskog.no +ørskog.no +orsta.no +ørsta.no +os.hedmark.no +os.hordaland.no +osen.no +osteroy.no +osterøy.no +ostre-toten.no +østre-toten.no +overhalla.no +ovre-eiker.no +øvre-eiker.no +oyer.no +øyer.no +oygarden.no +øygarden.no +oystre-slidre.no +øystre-slidre.no +porsanger.no +porsangu.no +porsáŋgu.no +porsgrunn.no +radoy.no +radøy.no +rakkestad.no +rana.no +ruovat.no +randaberg.no +rauma.no +rendalen.no +rennebu.no +rennesoy.no +rennesøy.no +rindal.no +ringebu.no +ringerike.no +ringsaker.no +rissa.no +risor.no +risør.no +roan.no +rollag.no +rygge.no +ralingen.no +rælingen.no +rodoy.no +rødøy.no +romskog.no +rømskog.no +roros.no +røros.no +rost.no +røst.no +royken.no +røyken.no +royrvik.no +røyrvik.no +rade.no +råde.no +salangen.no +siellak.no +saltdal.no +salat.no +sálát.no +sálat.no +samnanger.no +sande.more-og-romsdal.no +sande.møre-og-romsdal.no +sande.vestfold.no +sandefjord.no +sandnes.no +sandoy.no +sandøy.no +sarpsborg.no +sauda.no +sauherad.no +sel.no +selbu.no +selje.no +seljord.no +sigdal.no +siljan.no +sirdal.no +skaun.no +skedsmo.no +ski.no +skien.no +skiptvet.no +skjervoy.no +skjervøy.no +skierva.no +skiervá.no +skjak.no +skjåk.no +skodje.no +skanland.no +skånland.no +skanit.no +skánit.no +smola.no +smøla.no +snillfjord.no +snasa.no +snåsa.no +snoasa.no +snaase.no +snåase.no +sogndal.no +sokndal.no +sola.no +solund.no +songdalen.no +sortland.no +spydeberg.no +stange.no +stavanger.no +steigen.no +steinkjer.no +stjordal.no +stjørdal.no +stokke.no +stor-elvdal.no +stord.no +stordal.no +storfjord.no +omasvuotna.no +strand.no +stranda.no +stryn.no +sula.no +suldal.no +sund.no +sunndal.no +surnadal.no +sveio.no +svelvik.no +sykkylven.no +sogne.no +søgne.no +somna.no +sømna.no +sondre-land.no +søndre-land.no +sor-aurdal.no +sør-aurdal.no +sor-fron.no +sør-fron.no +sor-odal.no +sør-odal.no +sor-varanger.no +sør-varanger.no +matta-varjjat.no +mátta-várjjat.no +sorfold.no +sørfold.no +sorreisa.no +sørreisa.no +sorum.no +sørum.no +tana.no +deatnu.no +time.no +tingvoll.no +tinn.no +tjeldsund.no +dielddanuorri.no +tjome.no +tjøme.no +tokke.no +tolga.no +torsken.no +tranoy.no +tranøy.no +tromso.no +tromsø.no +tromsa.no +romsa.no +trondheim.no +troandin.no +trysil.no +trana.no +træna.no +trogstad.no +trøgstad.no +tvedestrand.no +tydal.no +tynset.no +tysfjord.no +divtasvuodna.no +divttasvuotna.no +tysnes.no +tysvar.no +tysvær.no +tonsberg.no +tønsberg.no +ullensaker.no +ullensvang.no +ulvik.no +utsira.no +vadso.no +vadsø.no +cahcesuolo.no +čáhcesuolo.no +vaksdal.no +valle.no +vang.no +vanylven.no +vardo.no +vardø.no +varggat.no +várggát.no +vefsn.no +vaapste.no +vega.no +vegarshei.no +vegårshei.no +vennesla.no +verdal.no +verran.no +vestby.no +vestnes.no +vestre-slidre.no +vestre-toten.no +vestvagoy.no +vestvågøy.no +vevelstad.no +vik.no +vikna.no +vindafjord.no +volda.no +voss.no +varoy.no +værøy.no +vagan.no +vågan.no +voagat.no +vagsoy.no +vågsøy.no +vaga.no +vågå.no +valer.ostfold.no +våler.østfold.no +valer.hedmark.no +våler.hedmark.no + +// np : http://www.mos.com.np/register.html +*.np + +// nr : http://cenpac.net.nr/dns/index.html +// Submitted by registry +nr +biz.nr +info.nr +gov.nr +edu.nr +org.nr +net.nr +com.nr + +// nu : https://en.wikipedia.org/wiki/.nu +nu + +// nz : https://en.wikipedia.org/wiki/.nz +// Submitted by registry +nz +ac.nz +co.nz +cri.nz +geek.nz +gen.nz +govt.nz +health.nz +iwi.nz +kiwi.nz +maori.nz +mil.nz +māori.nz +net.nz +org.nz +parliament.nz +school.nz + +// om : https://en.wikipedia.org/wiki/.om +om +co.om +com.om +edu.om +gov.om +med.om +museum.om +net.om +org.om +pro.om + +// onion : https://tools.ietf.org/html/rfc7686 +onion + +// org : https://en.wikipedia.org/wiki/.org +org + +// pa : http://www.nic.pa/ +// Some additional second level "domains" resolve directly as hostnames, such as +// pannet.pa, so we add a rule for "pa". +pa +ac.pa +gob.pa +com.pa +org.pa +sld.pa +edu.pa +net.pa +ing.pa +abo.pa +med.pa +nom.pa + +// pe : https://www.nic.pe/InformeFinalComision.pdf +pe +edu.pe +gob.pe +nom.pe +mil.pe +org.pe +com.pe +net.pe + +// pf : http://www.gobin.info/domainname/formulaire-pf.pdf +pf +com.pf +org.pf +edu.pf + +// pg : https://en.wikipedia.org/wiki/.pg +*.pg + +// ph : http://www.domains.ph/FAQ2.asp +// Submitted by registry +ph +com.ph +net.ph +org.ph +gov.ph +edu.ph +ngo.ph +mil.ph +i.ph + +// pk : http://pk5.pknic.net.pk/pk5/msgNamepk.PK +pk +com.pk +net.pk +edu.pk +org.pk +fam.pk +biz.pk +web.pk +gov.pk +gob.pk +gok.pk +gon.pk +gop.pk +gos.pk +info.pk + +// pl http://www.dns.pl/english/index.html +// Submitted by registry +pl +com.pl +net.pl +org.pl +// pl functional domains (http://www.dns.pl/english/index.html) +aid.pl +agro.pl +atm.pl +auto.pl +biz.pl +edu.pl +gmina.pl +gsm.pl +info.pl +mail.pl +miasta.pl +media.pl +mil.pl +nieruchomosci.pl +nom.pl +pc.pl +powiat.pl +priv.pl +realestate.pl +rel.pl +sex.pl +shop.pl +sklep.pl +sos.pl +szkola.pl +targi.pl +tm.pl +tourism.pl +travel.pl +turystyka.pl +// Government domains +gov.pl +ap.gov.pl +ic.gov.pl +is.gov.pl +us.gov.pl +kmpsp.gov.pl +kppsp.gov.pl +kwpsp.gov.pl +psp.gov.pl +wskr.gov.pl +kwp.gov.pl +mw.gov.pl +ug.gov.pl +um.gov.pl +umig.gov.pl +ugim.gov.pl +upow.gov.pl +uw.gov.pl +starostwo.gov.pl +pa.gov.pl +po.gov.pl +psse.gov.pl +pup.gov.pl +rzgw.gov.pl +sa.gov.pl +so.gov.pl +sr.gov.pl +wsa.gov.pl +sko.gov.pl +uzs.gov.pl +wiih.gov.pl +winb.gov.pl +pinb.gov.pl +wios.gov.pl +witd.gov.pl +wzmiuw.gov.pl +piw.gov.pl +wiw.gov.pl +griw.gov.pl +wif.gov.pl +oum.gov.pl +sdn.gov.pl +zp.gov.pl +uppo.gov.pl +mup.gov.pl +wuoz.gov.pl +konsulat.gov.pl +oirm.gov.pl +// pl regional domains (http://www.dns.pl/english/index.html) +augustow.pl +babia-gora.pl +bedzin.pl +beskidy.pl +bialowieza.pl +bialystok.pl +bielawa.pl +bieszczady.pl +boleslawiec.pl +bydgoszcz.pl +bytom.pl +cieszyn.pl +czeladz.pl +czest.pl +dlugoleka.pl +elblag.pl +elk.pl +glogow.pl +gniezno.pl +gorlice.pl +grajewo.pl +ilawa.pl +jaworzno.pl +jelenia-gora.pl +jgora.pl +kalisz.pl +kazimierz-dolny.pl +karpacz.pl +kartuzy.pl +kaszuby.pl +katowice.pl +kepno.pl +ketrzyn.pl +klodzko.pl +kobierzyce.pl +kolobrzeg.pl +konin.pl +konskowola.pl +kutno.pl +lapy.pl +lebork.pl +legnica.pl +lezajsk.pl +limanowa.pl +lomza.pl +lowicz.pl +lubin.pl +lukow.pl +malbork.pl +malopolska.pl +mazowsze.pl +mazury.pl +mielec.pl +mielno.pl +mragowo.pl +naklo.pl +nowaruda.pl +nysa.pl +olawa.pl +olecko.pl +olkusz.pl +olsztyn.pl +opoczno.pl +opole.pl +ostroda.pl +ostroleka.pl +ostrowiec.pl +ostrowwlkp.pl +pila.pl +pisz.pl +podhale.pl +podlasie.pl +polkowice.pl +pomorze.pl +pomorskie.pl +prochowice.pl +pruszkow.pl +przeworsk.pl +pulawy.pl +radom.pl +rawa-maz.pl +rybnik.pl +rzeszow.pl +sanok.pl +sejny.pl +slask.pl +slupsk.pl +sosnowiec.pl +stalowa-wola.pl +skoczow.pl +starachowice.pl +stargard.pl +suwalki.pl +swidnica.pl +swiebodzin.pl +swinoujscie.pl +szczecin.pl +szczytno.pl +tarnobrzeg.pl +tgory.pl +turek.pl +tychy.pl +ustka.pl +walbrzych.pl +warmia.pl +warszawa.pl +waw.pl +wegrow.pl +wielun.pl +wlocl.pl +wloclawek.pl +wodzislaw.pl +wolomin.pl +wroclaw.pl +zachpomor.pl +zagan.pl +zarow.pl +zgora.pl +zgorzelec.pl + +// pm : http://www.afnic.fr/medias/documents/AFNIC-naming-policy2012.pdf +pm + +// pn : http://www.government.pn/PnRegistry/policies.htm +pn +gov.pn +co.pn +org.pn +edu.pn +net.pn + +// post : https://en.wikipedia.org/wiki/.post +post + +// pr : http://www.nic.pr/index.asp?f=1 +pr +com.pr +net.pr +org.pr +gov.pr +edu.pr +isla.pr +pro.pr +biz.pr +info.pr +name.pr +// these aren't mentioned on nic.pr, but on https://en.wikipedia.org/wiki/.pr +est.pr +prof.pr +ac.pr + +// pro : http://registry.pro/get-pro +pro +aaa.pro +aca.pro +acct.pro +avocat.pro +bar.pro +cpa.pro +eng.pro +jur.pro +law.pro +med.pro +recht.pro + +// ps : https://en.wikipedia.org/wiki/.ps +// http://www.nic.ps/registration/policy.html#reg +ps +edu.ps +gov.ps +sec.ps +plo.ps +com.ps +org.ps +net.ps + +// pt : https://www.dns.pt/en/domain/pt-terms-and-conditions-registration-rules/ +pt +net.pt +gov.pt +org.pt +edu.pt +int.pt +publ.pt +com.pt +nome.pt + +// pw : https://en.wikipedia.org/wiki/.pw +pw +co.pw +ne.pw +or.pw +ed.pw +go.pw +belau.pw + +// py : http://www.nic.py/pautas.html#seccion_9 +// Submitted by registry +py +com.py +coop.py +edu.py +gov.py +mil.py +net.py +org.py + +// qa : http://domains.qa/en/ +qa +com.qa +edu.qa +gov.qa +mil.qa +name.qa +net.qa +org.qa +sch.qa + +// re : http://www.afnic.re/obtenir/chartes/nommage-re/annexe-descriptifs +re +asso.re +com.re +nom.re + +// ro : http://www.rotld.ro/ +ro +arts.ro +com.ro +firm.ro +info.ro +nom.ro +nt.ro +org.ro +rec.ro +store.ro +tm.ro +www.ro + +// rs : https://www.rnids.rs/en/domains/national-domains +rs +ac.rs +co.rs +edu.rs +gov.rs +in.rs +org.rs + +// ru : https://cctld.ru/files/pdf/docs/en/rules_ru-rf.pdf +// Submitted by George Georgievsky +ru + +// rw : https://www.ricta.org.rw/sites/default/files/resources/registry_registrar_contract_0.pdf +rw +ac.rw +co.rw +coop.rw +gov.rw +mil.rw +net.rw +org.rw + +// sa : http://www.nic.net.sa/ +sa +com.sa +net.sa +org.sa +gov.sa +med.sa +pub.sa +edu.sa +sch.sa + +// sb : http://www.sbnic.net.sb/ +// Submitted by registry +sb +com.sb +edu.sb +gov.sb +net.sb +org.sb + +// sc : http://www.nic.sc/ +sc +com.sc +gov.sc +net.sc +org.sc +edu.sc + +// sd : http://www.isoc.sd/sudanic.isoc.sd/billing_pricing.htm +// Submitted by registry +sd +com.sd +net.sd +org.sd +edu.sd +med.sd +tv.sd +gov.sd +info.sd + +// se : https://en.wikipedia.org/wiki/.se +// Submitted by registry +se +a.se +ac.se +b.se +bd.se +brand.se +c.se +d.se +e.se +f.se +fh.se +fhsk.se +fhv.se +g.se +h.se +i.se +k.se +komforb.se +kommunalforbund.se +komvux.se +l.se +lanbib.se +m.se +n.se +naturbruksgymn.se +o.se +org.se +p.se +parti.se +pp.se +press.se +r.se +s.se +t.se +tm.se +u.se +w.se +x.se +y.se +z.se + +// sg : http://www.nic.net.sg/page/registration-policies-procedures-and-guidelines +sg +com.sg +net.sg +org.sg +gov.sg +edu.sg +per.sg + +// sh : http://www.nic.sh/registrar.html +sh +com.sh +net.sh +gov.sh +org.sh +mil.sh + +// si : https://en.wikipedia.org/wiki/.si +si + +// sj : No registrations at this time. +// Submitted by registry +sj + +// sk : https://en.wikipedia.org/wiki/.sk +// list of 2nd level domains ? +sk + +// sl : http://www.nic.sl +// Submitted by registry +sl +com.sl +net.sl +edu.sl +gov.sl +org.sl + +// sm : https://en.wikipedia.org/wiki/.sm +sm + +// sn : https://en.wikipedia.org/wiki/.sn +sn +art.sn +com.sn +edu.sn +gouv.sn +org.sn +perso.sn +univ.sn + +// so : http://sonic.so/policies/ +so +com.so +edu.so +gov.so +me.so +net.so +org.so + +// sr : https://en.wikipedia.org/wiki/.sr +sr + +// ss : https://registry.nic.ss/ +// Submitted by registry +ss +biz.ss +com.ss +edu.ss +gov.ss +me.ss +net.ss +org.ss +sch.ss + +// st : http://www.nic.st/html/policyrules/ +st +co.st +com.st +consulado.st +edu.st +embaixada.st +mil.st +net.st +org.st +principe.st +saotome.st +store.st + +// su : https://en.wikipedia.org/wiki/.su +su + +// sv : http://www.svnet.org.sv/niveldos.pdf +sv +com.sv +edu.sv +gob.sv +org.sv +red.sv + +// sx : https://en.wikipedia.org/wiki/.sx +// Submitted by registry +sx +gov.sx + +// sy : https://en.wikipedia.org/wiki/.sy +// see also: http://www.gobin.info/domainname/sy.doc +sy +edu.sy +gov.sy +net.sy +mil.sy +com.sy +org.sy + +// sz : https://en.wikipedia.org/wiki/.sz +// http://www.sispa.org.sz/ +sz +co.sz +ac.sz +org.sz + +// tc : https://en.wikipedia.org/wiki/.tc +tc + +// td : https://en.wikipedia.org/wiki/.td +td + +// tel: https://en.wikipedia.org/wiki/.tel +// http://www.telnic.org/ +tel + +// tf : https://en.wikipedia.org/wiki/.tf +tf + +// tg : https://en.wikipedia.org/wiki/.tg +// http://www.nic.tg/ +tg + +// th : https://en.wikipedia.org/wiki/.th +// Submitted by registry +th +ac.th +co.th +go.th +in.th +mi.th +net.th +or.th + +// tj : http://www.nic.tj/policy.html +tj +ac.tj +biz.tj +co.tj +com.tj +edu.tj +go.tj +gov.tj +int.tj +mil.tj +name.tj +net.tj +nic.tj +org.tj +test.tj +web.tj + +// tk : https://en.wikipedia.org/wiki/.tk +tk + +// tl : https://en.wikipedia.org/wiki/.tl +tl +gov.tl + +// tm : http://www.nic.tm/local.html +tm +com.tm +co.tm +org.tm +net.tm +nom.tm +gov.tm +mil.tm +edu.tm + +// tn : http://www.registre.tn/fr/ +// https://whois.ati.tn/ +tn +com.tn +ens.tn +fin.tn +gov.tn +ind.tn +info.tn +intl.tn +mincom.tn +nat.tn +net.tn +org.tn +perso.tn +tourism.tn + +// to : https://en.wikipedia.org/wiki/.to +// Submitted by registry +to +com.to +gov.to +net.to +org.to +edu.to +mil.to + +// tr : https://nic.tr/ +// https://nic.tr/forms/eng/policies.pdf +// https://nic.tr/index.php?USRACTN=PRICELST +tr +av.tr +bbs.tr +bel.tr +biz.tr +com.tr +dr.tr +edu.tr +gen.tr +gov.tr +info.tr +mil.tr +k12.tr +kep.tr +name.tr +net.tr +org.tr +pol.tr +tel.tr +tsk.tr +tv.tr +web.tr +// Used by Northern Cyprus +nc.tr +// Used by government agencies of Northern Cyprus +gov.nc.tr + +// tt : http://www.nic.tt/ +tt +co.tt +com.tt +org.tt +net.tt +biz.tt +info.tt +pro.tt +int.tt +coop.tt +jobs.tt +mobi.tt +travel.tt +museum.tt +aero.tt +name.tt +gov.tt +edu.tt + +// tv : https://en.wikipedia.org/wiki/.tv +// Not listing any 2LDs as reserved since none seem to exist in practice, +// Wikipedia notwithstanding. +tv + +// tw : https://en.wikipedia.org/wiki/.tw +tw +edu.tw +gov.tw +mil.tw +com.tw +net.tw +org.tw +idv.tw +game.tw +ebiz.tw +club.tw +網路.tw +組織.tw +商業.tw + +// tz : http://www.tznic.or.tz/index.php/domains +// Submitted by registry +tz +ac.tz +co.tz +go.tz +hotel.tz +info.tz +me.tz +mil.tz +mobi.tz +ne.tz +or.tz +sc.tz +tv.tz + +// ua : https://hostmaster.ua/policy/?ua +// Submitted by registry +ua +// ua 2LD +com.ua +edu.ua +gov.ua +in.ua +net.ua +org.ua +// ua geographic names +// https://hostmaster.ua/2ld/ +cherkassy.ua +cherkasy.ua +chernigov.ua +chernihiv.ua +chernivtsi.ua +chernovtsy.ua +ck.ua +cn.ua +cr.ua +crimea.ua +cv.ua +dn.ua +dnepropetrovsk.ua +dnipropetrovsk.ua +donetsk.ua +dp.ua +if.ua +ivano-frankivsk.ua +kh.ua +kharkiv.ua +kharkov.ua +kherson.ua +khmelnitskiy.ua +khmelnytskyi.ua +kiev.ua +kirovograd.ua +km.ua +kr.ua +krym.ua +ks.ua +kv.ua +kyiv.ua +lg.ua +lt.ua +lugansk.ua +lutsk.ua +lv.ua +lviv.ua +mk.ua +mykolaiv.ua +nikolaev.ua +od.ua +odesa.ua +odessa.ua +pl.ua +poltava.ua +rivne.ua +rovno.ua +rv.ua +sb.ua +sebastopol.ua +sevastopol.ua +sm.ua +sumy.ua +te.ua +ternopil.ua +uz.ua +uzhgorod.ua +vinnica.ua +vinnytsia.ua +vn.ua +volyn.ua +yalta.ua +zaporizhzhe.ua +zaporizhzhia.ua +zhitomir.ua +zhytomyr.ua +zp.ua +zt.ua + +// ug : https://www.registry.co.ug/ +ug +co.ug +or.ug +ac.ug +sc.ug +go.ug +ne.ug +com.ug +org.ug + +// uk : https://en.wikipedia.org/wiki/.uk +// Submitted by registry +uk +ac.uk +co.uk +gov.uk +ltd.uk +me.uk +net.uk +nhs.uk +org.uk +plc.uk +police.uk +*.sch.uk + +// us : https://en.wikipedia.org/wiki/.us +us +dni.us +fed.us +isa.us +kids.us +nsn.us +// us geographic names +ak.us +al.us +ar.us +as.us +az.us +ca.us +co.us +ct.us +dc.us +de.us +fl.us +ga.us +gu.us +hi.us +ia.us +id.us +il.us +in.us +ks.us +ky.us +la.us +ma.us +md.us +me.us +mi.us +mn.us +mo.us +ms.us +mt.us +nc.us +nd.us +ne.us +nh.us +nj.us +nm.us +nv.us +ny.us +oh.us +ok.us +or.us +pa.us +pr.us +ri.us +sc.us +sd.us +tn.us +tx.us +ut.us +vi.us +vt.us +va.us +wa.us +wi.us +wv.us +wy.us +// The registrar notes several more specific domains available in each state, +// such as state.*.us, dst.*.us, etc., but resolution of these is somewhat +// haphazard; in some states these domains resolve as addresses, while in others +// only subdomains are available, or even nothing at all. We include the +// most common ones where it's clear that different sites are different +// entities. +k12.ak.us +k12.al.us +k12.ar.us +k12.as.us +k12.az.us +k12.ca.us +k12.co.us +k12.ct.us +k12.dc.us +k12.de.us +k12.fl.us +k12.ga.us +k12.gu.us +// k12.hi.us Bug 614565 - Hawaii has a state-wide DOE login +k12.ia.us +k12.id.us +k12.il.us +k12.in.us +k12.ks.us +k12.ky.us +k12.la.us +k12.ma.us +k12.md.us +k12.me.us +k12.mi.us +k12.mn.us +k12.mo.us +k12.ms.us +k12.mt.us +k12.nc.us +// k12.nd.us Bug 1028347 - Removed at request of Travis Rosso +k12.ne.us +k12.nh.us +k12.nj.us +k12.nm.us +k12.nv.us +k12.ny.us +k12.oh.us +k12.ok.us +k12.or.us +k12.pa.us +k12.pr.us +// k12.ri.us Removed at request of Kim Cournoyer +k12.sc.us +// k12.sd.us Bug 934131 - Removed at request of James Booze +k12.tn.us +k12.tx.us +k12.ut.us +k12.vi.us +k12.vt.us +k12.va.us +k12.wa.us +k12.wi.us +// k12.wv.us Bug 947705 - Removed at request of Verne Britton +k12.wy.us +cc.ak.us +cc.al.us +cc.ar.us +cc.as.us +cc.az.us +cc.ca.us +cc.co.us +cc.ct.us +cc.dc.us +cc.de.us +cc.fl.us +cc.ga.us +cc.gu.us +cc.hi.us +cc.ia.us +cc.id.us +cc.il.us +cc.in.us +cc.ks.us +cc.ky.us +cc.la.us +cc.ma.us +cc.md.us +cc.me.us +cc.mi.us +cc.mn.us +cc.mo.us +cc.ms.us +cc.mt.us +cc.nc.us +cc.nd.us +cc.ne.us +cc.nh.us +cc.nj.us +cc.nm.us +cc.nv.us +cc.ny.us +cc.oh.us +cc.ok.us +cc.or.us +cc.pa.us +cc.pr.us +cc.ri.us +cc.sc.us +cc.sd.us +cc.tn.us +cc.tx.us +cc.ut.us +cc.vi.us +cc.vt.us +cc.va.us +cc.wa.us +cc.wi.us +cc.wv.us +cc.wy.us +lib.ak.us +lib.al.us +lib.ar.us +lib.as.us +lib.az.us +lib.ca.us +lib.co.us +lib.ct.us +lib.dc.us +// lib.de.us Issue #243 - Moved to Private section at request of Ed Moore +lib.fl.us +lib.ga.us +lib.gu.us +lib.hi.us +lib.ia.us +lib.id.us +lib.il.us +lib.in.us +lib.ks.us +lib.ky.us +lib.la.us +lib.ma.us +lib.md.us +lib.me.us +lib.mi.us +lib.mn.us +lib.mo.us +lib.ms.us +lib.mt.us +lib.nc.us +lib.nd.us +lib.ne.us +lib.nh.us +lib.nj.us +lib.nm.us +lib.nv.us +lib.ny.us +lib.oh.us +lib.ok.us +lib.or.us +lib.pa.us +lib.pr.us +lib.ri.us +lib.sc.us +lib.sd.us +lib.tn.us +lib.tx.us +lib.ut.us +lib.vi.us +lib.vt.us +lib.va.us +lib.wa.us +lib.wi.us +// lib.wv.us Bug 941670 - Removed at request of Larry W Arnold +lib.wy.us +// k12.ma.us contains school districts in Massachusetts. The 4LDs are +// managed independently except for private (PVT), charter (CHTR) and +// parochial (PAROCH) schools. Those are delegated directly to the +// 5LD operators. +pvt.k12.ma.us +chtr.k12.ma.us +paroch.k12.ma.us +// Merit Network, Inc. maintains the registry for =~ /(k12|cc|lib).mi.us/ and the following +// see also: http://domreg.merit.edu +// see also: whois -h whois.domreg.merit.edu help +ann-arbor.mi.us +cog.mi.us +dst.mi.us +eaton.mi.us +gen.mi.us +mus.mi.us +tec.mi.us +washtenaw.mi.us + +// uy : http://www.nic.org.uy/ +uy +com.uy +edu.uy +gub.uy +mil.uy +net.uy +org.uy + +// uz : http://www.reg.uz/ +uz +co.uz +com.uz +net.uz +org.uz + +// va : https://en.wikipedia.org/wiki/.va +va + +// vc : https://en.wikipedia.org/wiki/.vc +// Submitted by registry +vc +com.vc +net.vc +org.vc +gov.vc +mil.vc +edu.vc + +// ve : https://registro.nic.ve/ +// Submitted by registry nic@nic.ve and nicve@conatel.gob.ve +ve +arts.ve +bib.ve +co.ve +com.ve +e12.ve +edu.ve +firm.ve +gob.ve +gov.ve +info.ve +int.ve +mil.ve +net.ve +nom.ve +org.ve +rar.ve +rec.ve +store.ve +tec.ve +web.ve + +// vg : https://en.wikipedia.org/wiki/.vg +vg + +// vi : http://www.nic.vi/newdomainform.htm +// http://www.nic.vi/Domain_Rules/body_domain_rules.html indicates some other +// TLDs are "reserved", such as edu.vi and gov.vi, but doesn't actually say they +// are available for registration (which they do not seem to be). +vi +co.vi +com.vi +k12.vi +net.vi +org.vi + +// vn : https://www.dot.vn/vnnic/vnnic/domainregistration.jsp +vn +com.vn +net.vn +org.vn +edu.vn +gov.vn +int.vn +ac.vn +biz.vn +info.vn +name.vn +pro.vn +health.vn + +// vu : https://en.wikipedia.org/wiki/.vu +// http://www.vunic.vu/ +vu +com.vu +edu.vu +net.vu +org.vu + +// wf : http://www.afnic.fr/medias/documents/AFNIC-naming-policy2012.pdf +wf + +// ws : https://en.wikipedia.org/wiki/.ws +// http://samoanic.ws/index.dhtml +ws +com.ws +net.ws +org.ws +gov.ws +edu.ws + +// yt : http://www.afnic.fr/medias/documents/AFNIC-naming-policy2012.pdf +yt + +// IDN ccTLDs +// When submitting patches, please maintain a sort by ISO 3166 ccTLD, then +// U-label, and follow this format: +// // A-Label ("", [, variant info]) : +// // [sponsoring org] +// U-Label + +// xn--mgbaam7a8h ("Emerat", Arabic) : AE +// http://nic.ae/english/arabicdomain/rules.jsp +امارات + +// xn--y9a3aq ("hye", Armenian) : AM +// ISOC AM (operated by .am Registry) +հայ + +// xn--54b7fta0cc ("Bangla", Bangla) : BD +বাংলা + +// xn--90ae ("bg", Bulgarian) : BG +бг + +// xn--mgbcpq6gpa1a ("albahrain", Arabic) : BH +البحرين + +// xn--90ais ("bel", Belarusian/Russian Cyrillic) : BY +// Operated by .by registry +бел + +// xn--fiqs8s ("Zhongguo/China", Chinese, Simplified) : CN +// CNNIC +// http://cnnic.cn/html/Dir/2005/10/11/3218.htm +中国 + +// xn--fiqz9s ("Zhongguo/China", Chinese, Traditional) : CN +// CNNIC +// http://cnnic.cn/html/Dir/2005/10/11/3218.htm +中國 + +// xn--lgbbat1ad8j ("Algeria/Al Jazair", Arabic) : DZ +الجزائر + +// xn--wgbh1c ("Egypt/Masr", Arabic) : EG +// http://www.dotmasr.eg/ +مصر + +// xn--e1a4c ("eu", Cyrillic) : EU +// https://eurid.eu +ею + +// xn--qxa6a ("eu", Greek) : EU +// https://eurid.eu +ευ + +// xn--mgbah1a3hjkrd ("Mauritania", Arabic) : MR +موريتانيا + +// xn--node ("ge", Georgian Mkhedruli) : GE +გე + +// xn--qxam ("el", Greek) : GR +// Hellenic Ministry of Infrastructure, Transport, and Networks +ελ + +// xn--j6w193g ("Hong Kong", Chinese) : HK +// https://www.hkirc.hk +// Submitted by registry +// https://www.hkirc.hk/content.jsp?id=30#!/34 +香港 +公司.香港 +教育.香港 +政府.香港 +個人.香港 +網絡.香港 +組織.香港 + +// xn--2scrj9c ("Bharat", Kannada) : IN +// India +ಭಾರತ + +// xn--3hcrj9c ("Bharat", Oriya) : IN +// India +ଭାରତ + +// xn--45br5cyl ("Bharatam", Assamese) : IN +// India +ভাৰত + +// xn--h2breg3eve ("Bharatam", Sanskrit) : IN +// India +भारतम् + +// xn--h2brj9c8c ("Bharot", Santali) : IN +// India +भारोत + +// xn--mgbgu82a ("Bharat", Sindhi) : IN +// India +ڀارت + +// xn--rvc1e0am3e ("Bharatam", Malayalam) : IN +// India +ഭാരതം + +// xn--h2brj9c ("Bharat", Devanagari) : IN +// India +भारत + +// xn--mgbbh1a ("Bharat", Kashmiri) : IN +// India +بارت + +// xn--mgbbh1a71e ("Bharat", Arabic) : IN +// India +بھارت + +// xn--fpcrj9c3d ("Bharat", Telugu) : IN +// India +భారత్ + +// xn--gecrj9c ("Bharat", Gujarati) : IN +// India +ભારત + +// xn--s9brj9c ("Bharat", Gurmukhi) : IN +// India +ਭਾਰਤ + +// xn--45brj9c ("Bharat", Bengali) : IN +// India +ভারত + +// xn--xkc2dl3a5ee0h ("India", Tamil) : IN +// India +இந்தியா + +// xn--mgba3a4f16a ("Iran", Persian) : IR +ایران + +// xn--mgba3a4fra ("Iran", Arabic) : IR +ايران + +// xn--mgbtx2b ("Iraq", Arabic) : IQ +// Communications and Media Commission +عراق + +// xn--mgbayh7gpa ("al-Ordon", Arabic) : JO +// National Information Technology Center (NITC) +// Royal Scientific Society, Al-Jubeiha +الاردن + +// xn--3e0b707e ("Republic of Korea", Hangul) : KR +한국 + +// xn--80ao21a ("Kaz", Kazakh) : KZ +қаз + +// xn--q7ce6a ("Lao", Lao) : LA +ລາວ + +// xn--fzc2c9e2c ("Lanka", Sinhalese-Sinhala) : LK +// https://nic.lk +ලංකා + +// xn--xkc2al3hye2a ("Ilangai", Tamil) : LK +// https://nic.lk +இலங்கை + +// xn--mgbc0a9azcg ("Morocco/al-Maghrib", Arabic) : MA +المغرب + +// xn--d1alf ("mkd", Macedonian) : MK +// MARnet +мкд + +// xn--l1acc ("mon", Mongolian) : MN +мон + +// xn--mix891f ("Macao", Chinese, Traditional) : MO +// MONIC / HNET Asia (Registry Operator for .mo) +澳門 + +// xn--mix082f ("Macao", Chinese, Simplified) : MO +澳门 + +// xn--mgbx4cd0ab ("Malaysia", Malay) : MY +مليسيا + +// xn--mgb9awbf ("Oman", Arabic) : OM +عمان + +// xn--mgbai9azgqp6j ("Pakistan", Urdu/Arabic) : PK +پاکستان + +// xn--mgbai9a5eva00b ("Pakistan", Urdu/Arabic, variant) : PK +پاكستان + +// xn--ygbi2ammx ("Falasteen", Arabic) : PS +// The Palestinian National Internet Naming Authority (PNINA) +// http://www.pnina.ps +فلسطين + +// xn--90a3ac ("srb", Cyrillic) : RS +// https://www.rnids.rs/en/domains/national-domains +срб +пр.срб +орг.срб +обр.срб +од.срб +упр.срб +ак.срб + +// xn--p1ai ("rf", Russian-Cyrillic) : RU +// https://cctld.ru/files/pdf/docs/en/rules_ru-rf.pdf +// Submitted by George Georgievsky +рф + +// xn--wgbl6a ("Qatar", Arabic) : QA +// http://www.ict.gov.qa/ +قطر + +// xn--mgberp4a5d4ar ("AlSaudiah", Arabic) : SA +// http://www.nic.net.sa/ +السعودية + +// xn--mgberp4a5d4a87g ("AlSaudiah", Arabic, variant) : SA +السعودیة + +// xn--mgbqly7c0a67fbc ("AlSaudiah", Arabic, variant) : SA +السعودیۃ + +// xn--mgbqly7cvafr ("AlSaudiah", Arabic, variant) : SA +السعوديه + +// xn--mgbpl2fh ("sudan", Arabic) : SD +// Operated by .sd registry +سودان + +// xn--yfro4i67o Singapore ("Singapore", Chinese) : SG +新加坡 + +// xn--clchc0ea0b2g2a9gcd ("Singapore", Tamil) : SG +சிங்கப்பூர் + +// xn--ogbpf8fl ("Syria", Arabic) : SY +سورية + +// xn--mgbtf8fl ("Syria", Arabic, variant) : SY +سوريا + +// xn--o3cw4h ("Thai", Thai) : TH +// http://www.thnic.co.th +ไทย +ศึกษา.ไทย +ธุรกิจ.ไทย +รัฐบาล.ไทย +ทหาร.ไทย +เน็ต.ไทย +องค์กร.ไทย + +// xn--pgbs0dh ("Tunisia", Arabic) : TN +// http://nic.tn +تونس + +// xn--kpry57d ("Taiwan", Chinese, Traditional) : TW +// http://www.twnic.net/english/dn/dn_07a.htm +台灣 + +// xn--kprw13d ("Taiwan", Chinese, Simplified) : TW +// http://www.twnic.net/english/dn/dn_07a.htm +台湾 + +// xn--nnx388a ("Taiwan", Chinese, variant) : TW +臺灣 + +// xn--j1amh ("ukr", Cyrillic) : UA +укр + +// xn--mgb2ddes ("AlYemen", Arabic) : YE +اليمن + +// xxx : http://icmregistry.com +xxx + +// ye : http://www.y.net.ye/services/domain_name.htm +ye +com.ye +edu.ye +gov.ye +net.ye +mil.ye +org.ye + +// za : https://www.zadna.org.za/content/page/domain-information/ +ac.za +agric.za +alt.za +co.za +edu.za +gov.za +grondar.za +law.za +mil.za +net.za +ngo.za +nic.za +nis.za +nom.za +org.za +school.za +tm.za +web.za + +// zm : https://zicta.zm/ +// Submitted by registry +zm +ac.zm +biz.zm +co.zm +com.zm +edu.zm +gov.zm +info.zm +mil.zm +net.zm +org.zm +sch.zm + +// zw : https://www.potraz.gov.zw/ +// Confirmed by registry 2017-01-25 +zw +ac.zw +co.zw +gov.zw +mil.zw +org.zw + + +// newGTLDs + +// List of new gTLDs imported from https://www.icann.org/resources/registries/gtlds/v2/gtlds.json on 2021-12-22T15:14:14Z +// This list is auto-generated, don't edit it manually. +// aaa : 2015-02-26 American Automobile Association, Inc. +aaa + +// aarp : 2015-05-21 AARP +aarp + +// abarth : 2015-07-30 Fiat Chrysler Automobiles N.V. +abarth + +// abb : 2014-10-24 ABB Ltd +abb + +// abbott : 2014-07-24 Abbott Laboratories, Inc. +abbott + +// abbvie : 2015-07-30 AbbVie Inc. +abbvie + +// abc : 2015-07-30 Disney Enterprises, Inc. +abc + +// able : 2015-06-25 Able Inc. +able + +// abogado : 2014-04-24 Registry Services, LLC +abogado + +// abudhabi : 2015-07-30 Abu Dhabi Systems and Information Centre +abudhabi + +// academy : 2013-11-07 Binky Moon, LLC +academy + +// accenture : 2014-08-15 Accenture plc +accenture + +// accountant : 2014-11-20 dot Accountant Limited +accountant + +// accountants : 2014-03-20 Binky Moon, LLC +accountants + +// aco : 2015-01-08 ACO Severin Ahlmann GmbH & Co. KG +aco + +// actor : 2013-12-12 Dog Beach, LLC +actor + +// adac : 2015-07-16 Allgemeiner Deutscher Automobil-Club e.V. (ADAC) +adac + +// ads : 2014-12-04 Charleston Road Registry Inc. +ads + +// adult : 2014-10-16 ICM Registry AD LLC +adult + +// aeg : 2015-03-19 Aktiebolaget Electrolux +aeg + +// aetna : 2015-05-21 Aetna Life Insurance Company +aetna + +// afl : 2014-10-02 Australian Football League +afl + +// africa : 2014-03-24 ZA Central Registry NPC trading as Registry.Africa +africa + +// agakhan : 2015-04-23 Fondation Aga Khan (Aga Khan Foundation) +agakhan + +// agency : 2013-11-14 Binky Moon, LLC +agency + +// aig : 2014-12-18 American International Group, Inc. +aig + +// airbus : 2015-07-30 Airbus S.A.S. +airbus + +// airforce : 2014-03-06 Dog Beach, LLC +airforce + +// airtel : 2014-10-24 Bharti Airtel Limited +airtel + +// akdn : 2015-04-23 Fondation Aga Khan (Aga Khan Foundation) +akdn + +// alfaromeo : 2015-07-31 Fiat Chrysler Automobiles N.V. +alfaromeo + +// alibaba : 2015-01-15 Alibaba Group Holding Limited +alibaba + +// alipay : 2015-01-15 Alibaba Group Holding Limited +alipay + +// allfinanz : 2014-07-03 Allfinanz Deutsche Vermögensberatung Aktiengesellschaft +allfinanz + +// allstate : 2015-07-31 Allstate Fire and Casualty Insurance Company +allstate + +// ally : 2015-06-18 Ally Financial Inc. +ally + +// alsace : 2014-07-02 Region Grand Est +alsace + +// alstom : 2015-07-30 ALSTOM +alstom + +// amazon : 2019-12-19 Amazon Registry Services, Inc. +amazon + +// americanexpress : 2015-07-31 American Express Travel Related Services Company, Inc. +americanexpress + +// americanfamily : 2015-07-23 AmFam, Inc. +americanfamily + +// amex : 2015-07-31 American Express Travel Related Services Company, Inc. +amex + +// amfam : 2015-07-23 AmFam, Inc. +amfam + +// amica : 2015-05-28 Amica Mutual Insurance Company +amica + +// amsterdam : 2014-07-24 Gemeente Amsterdam +amsterdam + +// analytics : 2014-12-18 Campus IP LLC +analytics + +// android : 2014-08-07 Charleston Road Registry Inc. +android + +// anquan : 2015-01-08 Beijing Qihu Keji Co., Ltd. +anquan + +// anz : 2015-07-31 Australia and New Zealand Banking Group Limited +anz + +// aol : 2015-09-17 Oath Inc. +aol + +// apartments : 2014-12-11 Binky Moon, LLC +apartments + +// app : 2015-05-14 Charleston Road Registry Inc. +app + +// apple : 2015-05-14 Apple Inc. +apple + +// aquarelle : 2014-07-24 Aquarelle.com +aquarelle + +// arab : 2015-11-12 League of Arab States +arab + +// aramco : 2014-11-20 Aramco Services Company +aramco + +// archi : 2014-02-06 Afilias Limited +archi + +// army : 2014-03-06 Dog Beach, LLC +army + +// art : 2016-03-24 UK Creative Ideas Limited +art + +// arte : 2014-12-11 Association Relative à la Télévision Européenne G.E.I.E. +arte + +// asda : 2015-07-31 Wal-Mart Stores, Inc. +asda + +// associates : 2014-03-06 Binky Moon, LLC +associates + +// athleta : 2015-07-30 The Gap, Inc. +athleta + +// attorney : 2014-03-20 Dog Beach, LLC +attorney + +// auction : 2014-03-20 Dog Beach, LLC +auction + +// audi : 2015-05-21 AUDI Aktiengesellschaft +audi + +// audible : 2015-06-25 Amazon Registry Services, Inc. +audible + +// audio : 2014-03-20 UNR Corp. +audio + +// auspost : 2015-08-13 Australian Postal Corporation +auspost + +// author : 2014-12-18 Amazon Registry Services, Inc. +author + +// auto : 2014-11-13 XYZ.COM LLC +auto + +// autos : 2014-01-09 XYZ.COM LLC +autos + +// avianca : 2015-01-08 Avianca Holdings S.A. +avianca + +// aws : 2015-06-25 AWS Registry LLC +aws + +// axa : 2013-12-19 AXA Group Operations SAS +axa + +// azure : 2014-12-18 Microsoft Corporation +azure + +// baby : 2015-04-09 XYZ.COM LLC +baby + +// baidu : 2015-01-08 Baidu, Inc. +baidu + +// banamex : 2015-07-30 Citigroup Inc. +banamex + +// bananarepublic : 2015-07-31 The Gap, Inc. +bananarepublic + +// band : 2014-06-12 Dog Beach, LLC +band + +// bank : 2014-09-25 fTLD Registry Services LLC +bank + +// bar : 2013-12-12 Punto 2012 Sociedad Anonima Promotora de Inversion de Capital Variable +bar + +// barcelona : 2014-07-24 Municipi de Barcelona +barcelona + +// barclaycard : 2014-11-20 Barclays Bank PLC +barclaycard + +// barclays : 2014-11-20 Barclays Bank PLC +barclays + +// barefoot : 2015-06-11 Gallo Vineyards, Inc. +barefoot + +// bargains : 2013-11-14 Binky Moon, LLC +bargains + +// baseball : 2015-10-29 MLB Advanced Media DH, LLC +baseball + +// basketball : 2015-08-20 Fédération Internationale de Basketball (FIBA) +basketball + +// bauhaus : 2014-04-17 Werkhaus GmbH +bauhaus + +// bayern : 2014-01-23 Bayern Connect GmbH +bayern + +// bbc : 2014-12-18 British Broadcasting Corporation +bbc + +// bbt : 2015-07-23 BB&T Corporation +bbt + +// bbva : 2014-10-02 BANCO BILBAO VIZCAYA ARGENTARIA, S.A. +bbva + +// bcg : 2015-04-02 The Boston Consulting Group, Inc. +bcg + +// bcn : 2014-07-24 Municipi de Barcelona +bcn + +// beats : 2015-05-14 Beats Electronics, LLC +beats + +// beauty : 2015-12-03 XYZ.COM LLC +beauty + +// beer : 2014-01-09 Registry Services, LLC +beer + +// bentley : 2014-12-18 Bentley Motors Limited +bentley + +// berlin : 2013-10-31 dotBERLIN GmbH & Co. KG +berlin + +// best : 2013-12-19 BestTLD Pty Ltd +best + +// bestbuy : 2015-07-31 BBY Solutions, Inc. +bestbuy + +// bet : 2015-05-07 Afilias Limited +bet + +// bharti : 2014-01-09 Bharti Enterprises (Holding) Private Limited +bharti + +// bible : 2014-06-19 American Bible Society +bible + +// bid : 2013-12-19 dot Bid Limited +bid + +// bike : 2013-08-27 Binky Moon, LLC +bike + +// bing : 2014-12-18 Microsoft Corporation +bing + +// bingo : 2014-12-04 Binky Moon, LLC +bingo + +// bio : 2014-03-06 Afilias Limited +bio + +// black : 2014-01-16 Afilias Limited +black + +// blackfriday : 2014-01-16 UNR Corp. +blackfriday + +// blockbuster : 2015-07-30 Dish DBS Corporation +blockbuster + +// blog : 2015-05-14 Knock Knock WHOIS There, LLC +blog + +// bloomberg : 2014-07-17 Bloomberg IP Holdings LLC +bloomberg + +// blue : 2013-11-07 Afilias Limited +blue + +// bms : 2014-10-30 Bristol-Myers Squibb Company +bms + +// bmw : 2014-01-09 Bayerische Motoren Werke Aktiengesellschaft +bmw + +// bnpparibas : 2014-05-29 BNP Paribas +bnpparibas + +// boats : 2014-12-04 XYZ.COM LLC +boats + +// boehringer : 2015-07-09 Boehringer Ingelheim International GmbH +boehringer + +// bofa : 2015-07-31 Bank of America Corporation +bofa + +// bom : 2014-10-16 Núcleo de Informação e Coordenação do Ponto BR - NIC.br +bom + +// bond : 2014-06-05 ShortDot SA +bond + +// boo : 2014-01-30 Charleston Road Registry Inc. +boo + +// book : 2015-08-27 Amazon Registry Services, Inc. +book + +// booking : 2015-07-16 Booking.com B.V. +booking + +// bosch : 2015-06-18 Robert Bosch GMBH +bosch + +// bostik : 2015-05-28 Bostik SA +bostik + +// boston : 2015-12-10 Registry Services, LLC +boston + +// bot : 2014-12-18 Amazon Registry Services, Inc. +bot + +// boutique : 2013-11-14 Binky Moon, LLC +boutique + +// box : 2015-11-12 Intercap Registry Inc. +box + +// bradesco : 2014-12-18 Banco Bradesco S.A. +bradesco + +// bridgestone : 2014-12-18 Bridgestone Corporation +bridgestone + +// broadway : 2014-12-22 Celebrate Broadway, Inc. +broadway + +// broker : 2014-12-11 Dog Beach, LLC +broker + +// brother : 2015-01-29 Brother Industries, Ltd. +brother + +// brussels : 2014-02-06 DNS.be vzw +brussels + +// budapest : 2013-11-21 Minds + Machines Group Limited +budapest + +// bugatti : 2015-07-23 Bugatti International SA +bugatti + +// build : 2013-11-07 Plan Bee LLC +build + +// builders : 2013-11-07 Binky Moon, LLC +builders + +// business : 2013-11-07 Binky Moon, LLC +business + +// buy : 2014-12-18 Amazon Registry Services, Inc. +buy + +// buzz : 2013-10-02 DOTSTRATEGY CO. +buzz + +// bzh : 2014-02-27 Association www.bzh +bzh + +// cab : 2013-10-24 Binky Moon, LLC +cab + +// cafe : 2015-02-11 Binky Moon, LLC +cafe + +// cal : 2014-07-24 Charleston Road Registry Inc. +cal + +// call : 2014-12-18 Amazon Registry Services, Inc. +call + +// calvinklein : 2015-07-30 PVH gTLD Holdings LLC +calvinklein + +// cam : 2016-04-21 AC Webconnecting Holding B.V. +cam + +// camera : 2013-08-27 Binky Moon, LLC +camera + +// camp : 2013-11-07 Binky Moon, LLC +camp + +// cancerresearch : 2014-05-15 Australian Cancer Research Foundation +cancerresearch + +// canon : 2014-09-12 Canon Inc. +canon + +// capetown : 2014-03-24 ZA Central Registry NPC trading as ZA Central Registry +capetown + +// capital : 2014-03-06 Binky Moon, LLC +capital + +// capitalone : 2015-08-06 Capital One Financial Corporation +capitalone + +// car : 2015-01-22 XYZ.COM LLC +car + +// caravan : 2013-12-12 Caravan International, Inc. +caravan + +// cards : 2013-12-05 Binky Moon, LLC +cards + +// care : 2014-03-06 Binky Moon, LLC +care + +// career : 2013-10-09 dotCareer LLC +career + +// careers : 2013-10-02 Binky Moon, LLC +careers + +// cars : 2014-11-13 XYZ.COM LLC +cars + +// casa : 2013-11-21 Registry Services, LLC +casa + +// case : 2015-09-03 Helium TLDs Ltd +case + +// cash : 2014-03-06 Binky Moon, LLC +cash + +// casino : 2014-12-18 Binky Moon, LLC +casino + +// catering : 2013-12-05 Binky Moon, LLC +catering + +// catholic : 2015-10-21 Pontificium Consilium de Comunicationibus Socialibus (PCCS) (Pontifical Council for Social Communication) +catholic + +// cba : 2014-06-26 COMMONWEALTH BANK OF AUSTRALIA +cba + +// cbn : 2014-08-22 The Christian Broadcasting Network, Inc. +cbn + +// cbre : 2015-07-02 CBRE, Inc. +cbre + +// cbs : 2015-08-06 CBS Domains Inc. +cbs + +// center : 2013-11-07 Binky Moon, LLC +center + +// ceo : 2013-11-07 CEOTLD Pty Ltd +ceo + +// cern : 2014-06-05 European Organization for Nuclear Research ("CERN") +cern + +// cfa : 2014-08-28 CFA Institute +cfa + +// cfd : 2014-12-11 ShortDot SA +cfd + +// chanel : 2015-04-09 Chanel International B.V. +chanel + +// channel : 2014-05-08 Charleston Road Registry Inc. +channel + +// charity : 2018-04-11 Binky Moon, LLC +charity + +// chase : 2015-04-30 JPMorgan Chase Bank, National Association +chase + +// chat : 2014-12-04 Binky Moon, LLC +chat + +// cheap : 2013-11-14 Binky Moon, LLC +cheap + +// chintai : 2015-06-11 CHINTAI Corporation +chintai + +// christmas : 2013-11-21 UNR Corp. +christmas + +// chrome : 2014-07-24 Charleston Road Registry Inc. +chrome + +// church : 2014-02-06 Binky Moon, LLC +church + +// cipriani : 2015-02-19 Hotel Cipriani Srl +cipriani + +// circle : 2014-12-18 Amazon Registry Services, Inc. +circle + +// cisco : 2014-12-22 Cisco Technology, Inc. +cisco + +// citadel : 2015-07-23 Citadel Domain LLC +citadel + +// citi : 2015-07-30 Citigroup Inc. +citi + +// citic : 2014-01-09 CITIC Group Corporation +citic + +// city : 2014-05-29 Binky Moon, LLC +city + +// cityeats : 2014-12-11 Lifestyle Domain Holdings, Inc. +cityeats + +// claims : 2014-03-20 Binky Moon, LLC +claims + +// cleaning : 2013-12-05 Binky Moon, LLC +cleaning + +// click : 2014-06-05 UNR Corp. +click + +// clinic : 2014-03-20 Binky Moon, LLC +clinic + +// clinique : 2015-10-01 The Estée Lauder Companies Inc. +clinique + +// clothing : 2013-08-27 Binky Moon, LLC +clothing + +// cloud : 2015-04-16 Aruba PEC S.p.A. +cloud + +// club : 2013-11-08 Registry Services, LLC +club + +// clubmed : 2015-06-25 Club Méditerranée S.A. +clubmed + +// coach : 2014-10-09 Binky Moon, LLC +coach + +// codes : 2013-10-31 Binky Moon, LLC +codes + +// coffee : 2013-10-17 Binky Moon, LLC +coffee + +// college : 2014-01-16 XYZ.COM LLC +college + +// cologne : 2014-02-05 dotKoeln GmbH +cologne + +// comcast : 2015-07-23 Comcast IP Holdings I, LLC +comcast + +// commbank : 2014-06-26 COMMONWEALTH BANK OF AUSTRALIA +commbank + +// community : 2013-12-05 Binky Moon, LLC +community + +// company : 2013-11-07 Binky Moon, LLC +company + +// compare : 2015-10-08 Registry Services, LLC +compare + +// computer : 2013-10-24 Binky Moon, LLC +computer + +// comsec : 2015-01-08 VeriSign, Inc. +comsec + +// condos : 2013-12-05 Binky Moon, LLC +condos + +// construction : 2013-09-16 Binky Moon, LLC +construction + +// consulting : 2013-12-05 Dog Beach, LLC +consulting + +// contact : 2015-01-08 Dog Beach, LLC +contact + +// contractors : 2013-09-10 Binky Moon, LLC +contractors + +// cooking : 2013-11-21 Registry Services, LLC +cooking + +// cookingchannel : 2015-07-02 Lifestyle Domain Holdings, Inc. +cookingchannel + +// cool : 2013-11-14 Binky Moon, LLC +cool + +// corsica : 2014-09-25 Collectivité de Corse +corsica + +// country : 2013-12-19 DotCountry LLC +country + +// coupon : 2015-02-26 Amazon Registry Services, Inc. +coupon + +// coupons : 2015-03-26 Binky Moon, LLC +coupons + +// courses : 2014-12-04 OPEN UNIVERSITIES AUSTRALIA PTY LTD +courses + +// cpa : 2019-06-10 American Institute of Certified Public Accountants +cpa + +// credit : 2014-03-20 Binky Moon, LLC +credit + +// creditcard : 2014-03-20 Binky Moon, LLC +creditcard + +// creditunion : 2015-01-22 DotCooperation LLC +creditunion + +// cricket : 2014-10-09 dot Cricket Limited +cricket + +// crown : 2014-10-24 Crown Equipment Corporation +crown + +// crs : 2014-04-03 Federated Co-operatives Limited +crs + +// cruise : 2015-12-10 Viking River Cruises (Bermuda) Ltd. +cruise + +// cruises : 2013-12-05 Binky Moon, LLC +cruises + +// csc : 2014-09-25 Alliance-One Services, Inc. +csc + +// cuisinella : 2014-04-03 SCHMIDT GROUPE S.A.S. +cuisinella + +// cymru : 2014-05-08 Nominet UK +cymru + +// cyou : 2015-01-22 ShortDot SA +cyou + +// dabur : 2014-02-06 Dabur India Limited +dabur + +// dad : 2014-01-23 Charleston Road Registry Inc. +dad + +// dance : 2013-10-24 Dog Beach, LLC +dance + +// data : 2016-06-02 Dish DBS Corporation +data + +// date : 2014-11-20 dot Date Limited +date + +// dating : 2013-12-05 Binky Moon, LLC +dating + +// datsun : 2014-03-27 NISSAN MOTOR CO., LTD. +datsun + +// day : 2014-01-30 Charleston Road Registry Inc. +day + +// dclk : 2014-11-20 Charleston Road Registry Inc. +dclk + +// dds : 2015-05-07 Registry Services, LLC +dds + +// deal : 2015-06-25 Amazon Registry Services, Inc. +deal + +// dealer : 2014-12-22 Intercap Registry Inc. +dealer + +// deals : 2014-05-22 Binky Moon, LLC +deals + +// degree : 2014-03-06 Dog Beach, LLC +degree + +// delivery : 2014-09-11 Binky Moon, LLC +delivery + +// dell : 2014-10-24 Dell Inc. +dell + +// deloitte : 2015-07-31 Deloitte Touche Tohmatsu +deloitte + +// delta : 2015-02-19 Delta Air Lines, Inc. +delta + +// democrat : 2013-10-24 Dog Beach, LLC +democrat + +// dental : 2014-03-20 Binky Moon, LLC +dental + +// dentist : 2014-03-20 Dog Beach, LLC +dentist + +// desi : 2013-11-14 Desi Networks LLC +desi + +// design : 2014-11-07 Registry Services, LLC +design + +// dev : 2014-10-16 Charleston Road Registry Inc. +dev + +// dhl : 2015-07-23 Deutsche Post AG +dhl + +// diamonds : 2013-09-22 Binky Moon, LLC +diamonds + +// diet : 2014-06-26 UNR Corp. +diet + +// digital : 2014-03-06 Binky Moon, LLC +digital + +// direct : 2014-04-10 Binky Moon, LLC +direct + +// directory : 2013-09-20 Binky Moon, LLC +directory + +// discount : 2014-03-06 Binky Moon, LLC +discount + +// discover : 2015-07-23 Discover Financial Services +discover + +// dish : 2015-07-30 Dish DBS Corporation +dish + +// diy : 2015-11-05 Lifestyle Domain Holdings, Inc. +diy + +// dnp : 2013-12-13 Dai Nippon Printing Co., Ltd. +dnp + +// docs : 2014-10-16 Charleston Road Registry Inc. +docs + +// doctor : 2016-06-02 Binky Moon, LLC +doctor + +// dog : 2014-12-04 Binky Moon, LLC +dog + +// domains : 2013-10-17 Binky Moon, LLC +domains + +// dot : 2015-05-21 Dish DBS Corporation +dot + +// download : 2014-11-20 dot Support Limited +download + +// drive : 2015-03-05 Charleston Road Registry Inc. +drive + +// dtv : 2015-06-04 Dish DBS Corporation +dtv + +// dubai : 2015-01-01 Dubai Smart Government Department +dubai + +// dunlop : 2015-07-02 The Goodyear Tire & Rubber Company +dunlop + +// dupont : 2015-06-25 DuPont Specialty Products USA, LLC +dupont + +// durban : 2014-03-24 ZA Central Registry NPC trading as ZA Central Registry +durban + +// dvag : 2014-06-23 Deutsche Vermögensberatung Aktiengesellschaft DVAG +dvag + +// dvr : 2016-05-26 DISH Technologies L.L.C. +dvr + +// earth : 2014-12-04 Interlink Co., Ltd. +earth + +// eat : 2014-01-23 Charleston Road Registry Inc. +eat + +// eco : 2016-07-08 Big Room Inc. +eco + +// edeka : 2014-12-18 EDEKA Verband kaufmännischer Genossenschaften e.V. +edeka + +// education : 2013-11-07 Binky Moon, LLC +education + +// email : 2013-10-31 Binky Moon, LLC +email + +// emerck : 2014-04-03 Merck KGaA +emerck + +// energy : 2014-09-11 Binky Moon, LLC +energy + +// engineer : 2014-03-06 Dog Beach, LLC +engineer + +// engineering : 2014-03-06 Binky Moon, LLC +engineering + +// enterprises : 2013-09-20 Binky Moon, LLC +enterprises + +// epson : 2014-12-04 Seiko Epson Corporation +epson + +// equipment : 2013-08-27 Binky Moon, LLC +equipment + +// ericsson : 2015-07-09 Telefonaktiebolaget L M Ericsson +ericsson + +// erni : 2014-04-03 ERNI Group Holding AG +erni + +// esq : 2014-05-08 Charleston Road Registry Inc. +esq + +// estate : 2013-08-27 Binky Moon, LLC +estate + +// etisalat : 2015-09-03 Emirates Telecommunications Corporation (trading as Etisalat) +etisalat + +// eurovision : 2014-04-24 European Broadcasting Union (EBU) +eurovision + +// eus : 2013-12-12 Puntueus Fundazioa +eus + +// events : 2013-12-05 Binky Moon, LLC +events + +// exchange : 2014-03-06 Binky Moon, LLC +exchange + +// expert : 2013-11-21 Binky Moon, LLC +expert + +// exposed : 2013-12-05 Binky Moon, LLC +exposed + +// express : 2015-02-11 Binky Moon, LLC +express + +// extraspace : 2015-05-14 Extra Space Storage LLC +extraspace + +// fage : 2014-12-18 Fage International S.A. +fage + +// fail : 2014-03-06 Binky Moon, LLC +fail + +// fairwinds : 2014-11-13 FairWinds Partners, LLC +fairwinds + +// faith : 2014-11-20 dot Faith Limited +faith + +// family : 2015-04-02 Dog Beach, LLC +family + +// fan : 2014-03-06 Dog Beach, LLC +fan + +// fans : 2014-11-07 ZDNS International Limited +fans + +// farm : 2013-11-07 Binky Moon, LLC +farm + +// farmers : 2015-07-09 Farmers Insurance Exchange +farmers + +// fashion : 2014-07-03 Registry Services, LLC +fashion + +// fast : 2014-12-18 Amazon Registry Services, Inc. +fast + +// fedex : 2015-08-06 Federal Express Corporation +fedex + +// feedback : 2013-12-19 Top Level Spectrum, Inc. +feedback + +// ferrari : 2015-07-31 Fiat Chrysler Automobiles N.V. +ferrari + +// ferrero : 2014-12-18 Ferrero Trading Lux S.A. +ferrero + +// fiat : 2015-07-31 Fiat Chrysler Automobiles N.V. +fiat + +// fidelity : 2015-07-30 Fidelity Brokerage Services LLC +fidelity + +// fido : 2015-08-06 Rogers Communications Canada Inc. +fido + +// film : 2015-01-08 Motion Picture Domain Registry Pty Ltd +film + +// final : 2014-10-16 Núcleo de Informação e Coordenação do Ponto BR - NIC.br +final + +// finance : 2014-03-20 Binky Moon, LLC +finance + +// financial : 2014-03-06 Binky Moon, LLC +financial + +// fire : 2015-06-25 Amazon Registry Services, Inc. +fire + +// firestone : 2014-12-18 Bridgestone Licensing Services, Inc +firestone + +// firmdale : 2014-03-27 Firmdale Holdings Limited +firmdale + +// fish : 2013-12-12 Binky Moon, LLC +fish + +// fishing : 2013-11-21 Registry Services, LLC +fishing + +// fit : 2014-11-07 Registry Services, LLC +fit + +// fitness : 2014-03-06 Binky Moon, LLC +fitness + +// flickr : 2015-04-02 Flickr, Inc. +flickr + +// flights : 2013-12-05 Binky Moon, LLC +flights + +// flir : 2015-07-23 FLIR Systems, Inc. +flir + +// florist : 2013-11-07 Binky Moon, LLC +florist + +// flowers : 2014-10-09 UNR Corp. +flowers + +// fly : 2014-05-08 Charleston Road Registry Inc. +fly + +// foo : 2014-01-23 Charleston Road Registry Inc. +foo + +// food : 2016-04-21 Lifestyle Domain Holdings, Inc. +food + +// foodnetwork : 2015-07-02 Lifestyle Domain Holdings, Inc. +foodnetwork + +// football : 2014-12-18 Binky Moon, LLC +football + +// ford : 2014-11-13 Ford Motor Company +ford + +// forex : 2014-12-11 Dog Beach, LLC +forex + +// forsale : 2014-05-22 Dog Beach, LLC +forsale + +// forum : 2015-04-02 Fegistry, LLC +forum + +// foundation : 2013-12-05 Binky Moon, LLC +foundation + +// fox : 2015-09-11 FOX Registry, LLC +fox + +// free : 2015-12-10 Amazon Registry Services, Inc. +free + +// fresenius : 2015-07-30 Fresenius Immobilien-Verwaltungs-GmbH +fresenius + +// frl : 2014-05-15 FRLregistry B.V. +frl + +// frogans : 2013-12-19 OP3FT +frogans + +// frontdoor : 2015-07-02 Lifestyle Domain Holdings, Inc. +frontdoor + +// frontier : 2015-02-05 Frontier Communications Corporation +frontier + +// ftr : 2015-07-16 Frontier Communications Corporation +ftr + +// fujitsu : 2015-07-30 Fujitsu Limited +fujitsu + +// fun : 2016-01-14 Radix FZC +fun + +// fund : 2014-03-20 Binky Moon, LLC +fund + +// furniture : 2014-03-20 Binky Moon, LLC +furniture + +// futbol : 2013-09-20 Dog Beach, LLC +futbol + +// fyi : 2015-04-02 Binky Moon, LLC +fyi + +// gal : 2013-11-07 Asociación puntoGAL +gal + +// gallery : 2013-09-13 Binky Moon, LLC +gallery + +// gallo : 2015-06-11 Gallo Vineyards, Inc. +gallo + +// gallup : 2015-02-19 Gallup, Inc. +gallup + +// game : 2015-05-28 UNR Corp. +game + +// games : 2015-05-28 Dog Beach, LLC +games + +// gap : 2015-07-31 The Gap, Inc. +gap + +// garden : 2014-06-26 Registry Services, LLC +garden + +// gay : 2019-05-23 Top Level Design, LLC +gay + +// gbiz : 2014-07-17 Charleston Road Registry Inc. +gbiz + +// gdn : 2014-07-31 Joint Stock Company "Navigation-information systems" +gdn + +// gea : 2014-12-04 GEA Group Aktiengesellschaft +gea + +// gent : 2014-01-23 COMBELL NV +gent + +// genting : 2015-03-12 Resorts World Inc Pte. Ltd. +genting + +// george : 2015-07-31 Wal-Mart Stores, Inc. +george + +// ggee : 2014-01-09 GMO Internet, Inc. +ggee + +// gift : 2013-10-17 DotGift, LLC +gift + +// gifts : 2014-07-03 Binky Moon, LLC +gifts + +// gives : 2014-03-06 Dog Beach, LLC +gives + +// giving : 2014-11-13 Giving Limited +giving + +// glass : 2013-11-07 Binky Moon, LLC +glass + +// gle : 2014-07-24 Charleston Road Registry Inc. +gle + +// global : 2014-04-17 Dot Global Domain Registry Limited +global + +// globo : 2013-12-19 Globo Comunicação e Participações S.A +globo + +// gmail : 2014-05-01 Charleston Road Registry Inc. +gmail + +// gmbh : 2016-01-29 Binky Moon, LLC +gmbh + +// gmo : 2014-01-09 GMO Internet, Inc. +gmo + +// gmx : 2014-04-24 1&1 Mail & Media GmbH +gmx + +// godaddy : 2015-07-23 Go Daddy East, LLC +godaddy + +// gold : 2015-01-22 Binky Moon, LLC +gold + +// goldpoint : 2014-11-20 YODOBASHI CAMERA CO.,LTD. +goldpoint + +// golf : 2014-12-18 Binky Moon, LLC +golf + +// goo : 2014-12-18 NTT Resonant Inc. +goo + +// goodyear : 2015-07-02 The Goodyear Tire & Rubber Company +goodyear + +// goog : 2014-11-20 Charleston Road Registry Inc. +goog + +// google : 2014-07-24 Charleston Road Registry Inc. +google + +// gop : 2014-01-16 Republican State Leadership Committee, Inc. +gop + +// got : 2014-12-18 Amazon Registry Services, Inc. +got + +// grainger : 2015-05-07 Grainger Registry Services, LLC +grainger + +// graphics : 2013-09-13 Binky Moon, LLC +graphics + +// gratis : 2014-03-20 Binky Moon, LLC +gratis + +// green : 2014-05-08 Afilias Limited +green + +// gripe : 2014-03-06 Binky Moon, LLC +gripe + +// grocery : 2016-06-16 Wal-Mart Stores, Inc. +grocery + +// group : 2014-08-15 Binky Moon, LLC +group + +// guardian : 2015-07-30 The Guardian Life Insurance Company of America +guardian + +// gucci : 2014-11-13 Guccio Gucci S.p.a. +gucci + +// guge : 2014-08-28 Charleston Road Registry Inc. +guge + +// guide : 2013-09-13 Binky Moon, LLC +guide + +// guitars : 2013-11-14 UNR Corp. +guitars + +// guru : 2013-08-27 Binky Moon, LLC +guru + +// hair : 2015-12-03 XYZ.COM LLC +hair + +// hamburg : 2014-02-20 Hamburg Top-Level-Domain GmbH +hamburg + +// hangout : 2014-11-13 Charleston Road Registry Inc. +hangout + +// haus : 2013-12-05 Dog Beach, LLC +haus + +// hbo : 2015-07-30 HBO Registry Services, Inc. +hbo + +// hdfc : 2015-07-30 HOUSING DEVELOPMENT FINANCE CORPORATION LIMITED +hdfc + +// hdfcbank : 2015-02-12 HDFC Bank Limited +hdfcbank + +// health : 2015-02-11 DotHealth, LLC +health + +// healthcare : 2014-06-12 Binky Moon, LLC +healthcare + +// help : 2014-06-26 UNR Corp. +help + +// helsinki : 2015-02-05 City of Helsinki +helsinki + +// here : 2014-02-06 Charleston Road Registry Inc. +here + +// hermes : 2014-07-10 HERMES INTERNATIONAL +hermes + +// hgtv : 2015-07-02 Lifestyle Domain Holdings, Inc. +hgtv + +// hiphop : 2014-03-06 UNR Corp. +hiphop + +// hisamitsu : 2015-07-16 Hisamitsu Pharmaceutical Co.,Inc. +hisamitsu + +// hitachi : 2014-10-31 Hitachi, Ltd. +hitachi + +// hiv : 2014-03-13 UNR Corp. +hiv + +// hkt : 2015-05-14 PCCW-HKT DataCom Services Limited +hkt + +// hockey : 2015-03-19 Binky Moon, LLC +hockey + +// holdings : 2013-08-27 Binky Moon, LLC +holdings + +// holiday : 2013-11-07 Binky Moon, LLC +holiday + +// homedepot : 2015-04-02 Home Depot Product Authority, LLC +homedepot + +// homegoods : 2015-07-16 The TJX Companies, Inc. +homegoods + +// homes : 2014-01-09 XYZ.COM LLC +homes + +// homesense : 2015-07-16 The TJX Companies, Inc. +homesense + +// honda : 2014-12-18 Honda Motor Co., Ltd. +honda + +// horse : 2013-11-21 Registry Services, LLC +horse + +// hospital : 2016-10-20 Binky Moon, LLC +hospital + +// host : 2014-04-17 Radix FZC +host + +// hosting : 2014-05-29 UNR Corp. +hosting + +// hot : 2015-08-27 Amazon Registry Services, Inc. +hot + +// hoteles : 2015-03-05 Travel Reservations SRL +hoteles + +// hotels : 2016-04-07 Booking.com B.V. +hotels + +// hotmail : 2014-12-18 Microsoft Corporation +hotmail + +// house : 2013-11-07 Binky Moon, LLC +house + +// how : 2014-01-23 Charleston Road Registry Inc. +how + +// hsbc : 2014-10-24 HSBC Global Services (UK) Limited +hsbc + +// hughes : 2015-07-30 Hughes Satellite Systems Corporation +hughes + +// hyatt : 2015-07-30 Hyatt GTLD, L.L.C. +hyatt + +// hyundai : 2015-07-09 Hyundai Motor Company +hyundai + +// ibm : 2014-07-31 International Business Machines Corporation +ibm + +// icbc : 2015-02-19 Industrial and Commercial Bank of China Limited +icbc + +// ice : 2014-10-30 IntercontinentalExchange, Inc. +ice + +// icu : 2015-01-08 ShortDot SA +icu + +// ieee : 2015-07-23 IEEE Global LLC +ieee + +// ifm : 2014-01-30 ifm electronic gmbh +ifm + +// ikano : 2015-07-09 Ikano S.A. +ikano + +// imamat : 2015-08-06 Fondation Aga Khan (Aga Khan Foundation) +imamat + +// imdb : 2015-06-25 Amazon Registry Services, Inc. +imdb + +// immo : 2014-07-10 Binky Moon, LLC +immo + +// immobilien : 2013-11-07 Dog Beach, LLC +immobilien + +// inc : 2018-03-10 Intercap Registry Inc. +inc + +// industries : 2013-12-05 Binky Moon, LLC +industries + +// infiniti : 2014-03-27 NISSAN MOTOR CO., LTD. +infiniti + +// ing : 2014-01-23 Charleston Road Registry Inc. +ing + +// ink : 2013-12-05 Top Level Design, LLC +ink + +// institute : 2013-11-07 Binky Moon, LLC +institute + +// insurance : 2015-02-19 fTLD Registry Services LLC +insurance + +// insure : 2014-03-20 Binky Moon, LLC +insure + +// international : 2013-11-07 Binky Moon, LLC +international + +// intuit : 2015-07-30 Intuit Administrative Services, Inc. +intuit + +// investments : 2014-03-20 Binky Moon, LLC +investments + +// ipiranga : 2014-08-28 Ipiranga Produtos de Petroleo S.A. +ipiranga + +// irish : 2014-08-07 Binky Moon, LLC +irish + +// ismaili : 2015-08-06 Fondation Aga Khan (Aga Khan Foundation) +ismaili + +// ist : 2014-08-28 Istanbul Metropolitan Municipality +ist + +// istanbul : 2014-08-28 Istanbul Metropolitan Municipality +istanbul + +// itau : 2014-10-02 Itau Unibanco Holding S.A. +itau + +// itv : 2015-07-09 ITV Services Limited +itv + +// jaguar : 2014-11-13 Jaguar Land Rover Ltd +jaguar + +// java : 2014-06-19 Oracle Corporation +java + +// jcb : 2014-11-20 JCB Co., Ltd. +jcb + +// jeep : 2015-07-30 FCA US LLC. +jeep + +// jetzt : 2014-01-09 Binky Moon, LLC +jetzt + +// jewelry : 2015-03-05 Binky Moon, LLC +jewelry + +// jio : 2015-04-02 Reliance Industries Limited +jio + +// jll : 2015-04-02 Jones Lang LaSalle Incorporated +jll + +// jmp : 2015-03-26 Matrix IP LLC +jmp + +// jnj : 2015-06-18 Johnson & Johnson Services, Inc. +jnj + +// joburg : 2014-03-24 ZA Central Registry NPC trading as ZA Central Registry +joburg + +// jot : 2014-12-18 Amazon Registry Services, Inc. +jot + +// joy : 2014-12-18 Amazon Registry Services, Inc. +joy + +// jpmorgan : 2015-04-30 JPMorgan Chase Bank, National Association +jpmorgan + +// jprs : 2014-09-18 Japan Registry Services Co., Ltd. +jprs + +// juegos : 2014-03-20 UNR Corp. +juegos + +// juniper : 2015-07-30 JUNIPER NETWORKS, INC. +juniper + +// kaufen : 2013-11-07 Dog Beach, LLC +kaufen + +// kddi : 2014-09-12 KDDI CORPORATION +kddi + +// kerryhotels : 2015-04-30 Kerry Trading Co. Limited +kerryhotels + +// kerrylogistics : 2015-04-09 Kerry Trading Co. Limited +kerrylogistics + +// kerryproperties : 2015-04-09 Kerry Trading Co. Limited +kerryproperties + +// kfh : 2014-12-04 Kuwait Finance House +kfh + +// kia : 2015-07-09 KIA MOTORS CORPORATION +kia + +// kids : 2021-08-13 DotKids Foundation Limited +kids + +// kim : 2013-09-23 Afilias Limited +kim + +// kinder : 2014-11-07 Ferrero Trading Lux S.A. +kinder + +// kindle : 2015-06-25 Amazon Registry Services, Inc. +kindle + +// kitchen : 2013-09-20 Binky Moon, LLC +kitchen + +// kiwi : 2013-09-20 DOT KIWI LIMITED +kiwi + +// koeln : 2014-01-09 dotKoeln GmbH +koeln + +// komatsu : 2015-01-08 Komatsu Ltd. +komatsu + +// kosher : 2015-08-20 Kosher Marketing Assets LLC +kosher + +// kpmg : 2015-04-23 KPMG International Cooperative (KPMG International Genossenschaft) +kpmg + +// kpn : 2015-01-08 Koninklijke KPN N.V. +kpn + +// krd : 2013-12-05 KRG Department of Information Technology +krd + +// kred : 2013-12-19 KredTLD Pty Ltd +kred + +// kuokgroup : 2015-04-09 Kerry Trading Co. Limited +kuokgroup + +// kyoto : 2014-11-07 Academic Institution: Kyoto Jyoho Gakuen +kyoto + +// lacaixa : 2014-01-09 Fundación Bancaria Caixa d’Estalvis i Pensions de Barcelona, “la Caixa” +lacaixa + +// lamborghini : 2015-06-04 Automobili Lamborghini S.p.A. +lamborghini + +// lamer : 2015-10-01 The Estée Lauder Companies Inc. +lamer + +// lancaster : 2015-02-12 LANCASTER +lancaster + +// lancia : 2015-07-31 Fiat Chrysler Automobiles N.V. +lancia + +// land : 2013-09-10 Binky Moon, LLC +land + +// landrover : 2014-11-13 Jaguar Land Rover Ltd +landrover + +// lanxess : 2015-07-30 LANXESS Corporation +lanxess + +// lasalle : 2015-04-02 Jones Lang LaSalle Incorporated +lasalle + +// lat : 2014-10-16 ECOM-LAC Federaciòn de Latinoamèrica y el Caribe para Internet y el Comercio Electrònico +lat + +// latino : 2015-07-30 Dish DBS Corporation +latino + +// latrobe : 2014-06-16 La Trobe University +latrobe + +// law : 2015-01-22 Registry Services, LLC +law + +// lawyer : 2014-03-20 Dog Beach, LLC +lawyer + +// lds : 2014-03-20 IRI Domain Management, LLC +lds + +// lease : 2014-03-06 Binky Moon, LLC +lease + +// leclerc : 2014-08-07 A.C.D. LEC Association des Centres Distributeurs Edouard Leclerc +leclerc + +// lefrak : 2015-07-16 LeFrak Organization, Inc. +lefrak + +// legal : 2014-10-16 Binky Moon, LLC +legal + +// lego : 2015-07-16 LEGO Juris A/S +lego + +// lexus : 2015-04-23 TOYOTA MOTOR CORPORATION +lexus + +// lgbt : 2014-05-08 Afilias Limited +lgbt + +// lidl : 2014-09-18 Schwarz Domains und Services GmbH & Co. KG +lidl + +// life : 2014-02-06 Binky Moon, LLC +life + +// lifeinsurance : 2015-01-15 American Council of Life Insurers +lifeinsurance + +// lifestyle : 2014-12-11 Lifestyle Domain Holdings, Inc. +lifestyle + +// lighting : 2013-08-27 Binky Moon, LLC +lighting + +// like : 2014-12-18 Amazon Registry Services, Inc. +like + +// lilly : 2015-07-31 Eli Lilly and Company +lilly + +// limited : 2014-03-06 Binky Moon, LLC +limited + +// limo : 2013-10-17 Binky Moon, LLC +limo + +// lincoln : 2014-11-13 Ford Motor Company +lincoln + +// linde : 2014-12-04 Linde Aktiengesellschaft +linde + +// link : 2013-11-14 UNR Corp. +link + +// lipsy : 2015-06-25 Lipsy Ltd +lipsy + +// live : 2014-12-04 Dog Beach, LLC +live + +// living : 2015-07-30 Lifestyle Domain Holdings, Inc. +living + +// lixil : 2015-03-19 LIXIL Group Corporation +lixil + +// llc : 2017-12-14 Afilias Limited +llc + +// llp : 2019-08-26 UNR Corp. +llp + +// loan : 2014-11-20 dot Loan Limited +loan + +// loans : 2014-03-20 Binky Moon, LLC +loans + +// locker : 2015-06-04 Dish DBS Corporation +locker + +// locus : 2015-06-25 Locus Analytics LLC +locus + +// loft : 2015-07-30 Annco, Inc. +loft + +// lol : 2015-01-30 UNR Corp. +lol + +// london : 2013-11-14 Dot London Domains Limited +london + +// lotte : 2014-11-07 Lotte Holdings Co., Ltd. +lotte + +// lotto : 2014-04-10 Afilias Limited +lotto + +// love : 2014-12-22 Merchant Law Group LLP +love + +// lpl : 2015-07-30 LPL Holdings, Inc. +lpl + +// lplfinancial : 2015-07-30 LPL Holdings, Inc. +lplfinancial + +// ltd : 2014-09-25 Binky Moon, LLC +ltd + +// ltda : 2014-04-17 InterNetX, Corp +ltda + +// lundbeck : 2015-08-06 H. Lundbeck A/S +lundbeck + +// luxe : 2014-01-09 Registry Services, LLC +luxe + +// luxury : 2013-10-17 Luxury Partners, LLC +luxury + +// macys : 2015-07-31 Macys, Inc. +macys + +// madrid : 2014-05-01 Comunidad de Madrid +madrid + +// maif : 2014-10-02 Mutuelle Assurance Instituteur France (MAIF) +maif + +// maison : 2013-12-05 Binky Moon, LLC +maison + +// makeup : 2015-01-15 XYZ.COM LLC +makeup + +// man : 2014-12-04 MAN SE +man + +// management : 2013-11-07 Binky Moon, LLC +management + +// mango : 2013-10-24 PUNTO FA S.L. +mango + +// map : 2016-06-09 Charleston Road Registry Inc. +map + +// market : 2014-03-06 Dog Beach, LLC +market + +// marketing : 2013-11-07 Binky Moon, LLC +marketing + +// markets : 2014-12-11 Dog Beach, LLC +markets + +// marriott : 2014-10-09 Marriott Worldwide Corporation +marriott + +// marshalls : 2015-07-16 The TJX Companies, Inc. +marshalls + +// maserati : 2015-07-31 Fiat Chrysler Automobiles N.V. +maserati + +// mattel : 2015-08-06 Mattel Sites, Inc. +mattel + +// mba : 2015-04-02 Binky Moon, LLC +mba + +// mckinsey : 2015-07-31 McKinsey Holdings, Inc. +mckinsey + +// med : 2015-08-06 Medistry LLC +med + +// media : 2014-03-06 Binky Moon, LLC +media + +// meet : 2014-01-16 Charleston Road Registry Inc. +meet + +// melbourne : 2014-05-29 The Crown in right of the State of Victoria, represented by its Department of State Development, Business and Innovation +melbourne + +// meme : 2014-01-30 Charleston Road Registry Inc. +meme + +// memorial : 2014-10-16 Dog Beach, LLC +memorial + +// men : 2015-02-26 Exclusive Registry Limited +men + +// menu : 2013-09-11 Dot Menu Registry, LLC +menu + +// merckmsd : 2016-07-14 MSD Registry Holdings, Inc. +merckmsd + +// miami : 2013-12-19 Registry Services, LLC +miami + +// microsoft : 2014-12-18 Microsoft Corporation +microsoft + +// mini : 2014-01-09 Bayerische Motoren Werke Aktiengesellschaft +mini + +// mint : 2015-07-30 Intuit Administrative Services, Inc. +mint + +// mit : 2015-07-02 Massachusetts Institute of Technology +mit + +// mitsubishi : 2015-07-23 Mitsubishi Corporation +mitsubishi + +// mlb : 2015-05-21 MLB Advanced Media DH, LLC +mlb + +// mls : 2015-04-23 The Canadian Real Estate Association +mls + +// mma : 2014-11-07 MMA IARD +mma + +// mobile : 2016-06-02 Dish DBS Corporation +mobile + +// moda : 2013-11-07 Dog Beach, LLC +moda + +// moe : 2013-11-13 Interlink Co., Ltd. +moe + +// moi : 2014-12-18 Amazon Registry Services, Inc. +moi + +// mom : 2015-04-16 UNR Corp. +mom + +// monash : 2013-09-30 Monash University +monash + +// money : 2014-10-16 Binky Moon, LLC +money + +// monster : 2015-09-11 XYZ.COM LLC +monster + +// mormon : 2013-12-05 IRI Domain Management, LLC +mormon + +// mortgage : 2014-03-20 Dog Beach, LLC +mortgage + +// moscow : 2013-12-19 Foundation for Assistance for Internet Technologies and Infrastructure Development (FAITID) +moscow + +// moto : 2015-06-04 Motorola Trademark Holdings, LLC +moto + +// motorcycles : 2014-01-09 XYZ.COM LLC +motorcycles + +// mov : 2014-01-30 Charleston Road Registry Inc. +mov + +// movie : 2015-02-05 Binky Moon, LLC +movie + +// msd : 2015-07-23 MSD Registry Holdings, Inc. +msd + +// mtn : 2014-12-04 MTN Dubai Limited +mtn + +// mtr : 2015-03-12 MTR Corporation Limited +mtr + +// music : 2021-05-04 DotMusic Limited +music + +// mutual : 2015-04-02 Northwestern Mutual MU TLD Registry, LLC +mutual + +// nab : 2015-08-20 National Australia Bank Limited +nab + +// nagoya : 2013-10-24 GMO Registry, Inc. +nagoya + +// natura : 2015-03-12 NATURA COSMÉTICOS S.A. +natura + +// navy : 2014-03-06 Dog Beach, LLC +navy + +// nba : 2015-07-31 NBA REGISTRY, LLC +nba + +// nec : 2015-01-08 NEC Corporation +nec + +// netbank : 2014-06-26 COMMONWEALTH BANK OF AUSTRALIA +netbank + +// netflix : 2015-06-18 Netflix, Inc. +netflix + +// network : 2013-11-14 Binky Moon, LLC +network + +// neustar : 2013-12-05 NeuStar, Inc. +neustar + +// new : 2014-01-30 Charleston Road Registry Inc. +new + +// news : 2014-12-18 Dog Beach, LLC +news + +// next : 2015-06-18 Next plc +next + +// nextdirect : 2015-06-18 Next plc +nextdirect + +// nexus : 2014-07-24 Charleston Road Registry Inc. +nexus + +// nfl : 2015-07-23 NFL Reg Ops LLC +nfl + +// ngo : 2014-03-06 Public Interest Registry +ngo + +// nhk : 2014-02-13 Japan Broadcasting Corporation (NHK) +nhk + +// nico : 2014-12-04 DWANGO Co., Ltd. +nico + +// nike : 2015-07-23 NIKE, Inc. +nike + +// nikon : 2015-05-21 NIKON CORPORATION +nikon + +// ninja : 2013-11-07 Dog Beach, LLC +ninja + +// nissan : 2014-03-27 NISSAN MOTOR CO., LTD. +nissan + +// nissay : 2015-10-29 Nippon Life Insurance Company +nissay + +// nokia : 2015-01-08 Nokia Corporation +nokia + +// northwesternmutual : 2015-06-18 Northwestern Mutual Registry, LLC +northwesternmutual + +// norton : 2014-12-04 NortonLifeLock Inc. +norton + +// now : 2015-06-25 Amazon Registry Services, Inc. +now + +// nowruz : 2014-09-04 Asia Green IT System Bilgisayar San. ve Tic. Ltd. Sti. +nowruz + +// nowtv : 2015-05-14 Starbucks (HK) Limited +nowtv + +// nra : 2014-05-22 NRA Holdings Company, INC. +nra + +// nrw : 2013-11-21 Minds + Machines GmbH +nrw + +// ntt : 2014-10-31 NIPPON TELEGRAPH AND TELEPHONE CORPORATION +ntt + +// nyc : 2014-01-23 The City of New York by and through the New York City Department of Information Technology & Telecommunications +nyc + +// obi : 2014-09-25 OBI Group Holding SE & Co. KGaA +obi + +// observer : 2015-04-30 Dog Beach, LLC +observer + +// office : 2015-03-12 Microsoft Corporation +office + +// okinawa : 2013-12-05 BRregistry, Inc. +okinawa + +// olayan : 2015-05-14 Crescent Holding GmbH +olayan + +// olayangroup : 2015-05-14 Crescent Holding GmbH +olayangroup + +// oldnavy : 2015-07-31 The Gap, Inc. +oldnavy + +// ollo : 2015-06-04 Dish DBS Corporation +ollo + +// omega : 2015-01-08 The Swatch Group Ltd +omega + +// one : 2014-11-07 One.com A/S +one + +// ong : 2014-03-06 Public Interest Registry +ong + +// onl : 2013-09-16 iRegistry GmbH +onl + +// online : 2015-01-15 Radix FZC +online + +// ooo : 2014-01-09 INFIBEAM AVENUES LIMITED +ooo + +// open : 2015-07-31 American Express Travel Related Services Company, Inc. +open + +// oracle : 2014-06-19 Oracle Corporation +oracle + +// orange : 2015-03-12 Orange Brand Services Limited +orange + +// organic : 2014-03-27 Afilias Limited +organic + +// origins : 2015-10-01 The Estée Lauder Companies Inc. +origins + +// osaka : 2014-09-04 Osaka Registry Co., Ltd. +osaka + +// otsuka : 2013-10-11 Otsuka Holdings Co., Ltd. +otsuka + +// ott : 2015-06-04 Dish DBS Corporation +ott + +// ovh : 2014-01-16 MédiaBC +ovh + +// page : 2014-12-04 Charleston Road Registry Inc. +page + +// panasonic : 2015-07-30 Panasonic Corporation +panasonic + +// paris : 2014-01-30 City of Paris +paris + +// pars : 2014-09-04 Asia Green IT System Bilgisayar San. ve Tic. Ltd. Sti. +pars + +// partners : 2013-12-05 Binky Moon, LLC +partners + +// parts : 2013-12-05 Binky Moon, LLC +parts + +// party : 2014-09-11 Blue Sky Registry Limited +party + +// passagens : 2015-03-05 Travel Reservations SRL +passagens + +// pay : 2015-08-27 Amazon Registry Services, Inc. +pay + +// pccw : 2015-05-14 PCCW Enterprises Limited +pccw + +// pet : 2015-05-07 Afilias Limited +pet + +// pfizer : 2015-09-11 Pfizer Inc. +pfizer + +// pharmacy : 2014-06-19 National Association of Boards of Pharmacy +pharmacy + +// phd : 2016-07-28 Charleston Road Registry Inc. +phd + +// philips : 2014-11-07 Koninklijke Philips N.V. +philips + +// phone : 2016-06-02 Dish DBS Corporation +phone + +// photo : 2013-11-14 UNR Corp. +photo + +// photography : 2013-09-20 Binky Moon, LLC +photography + +// photos : 2013-10-17 Binky Moon, LLC +photos + +// physio : 2014-05-01 PhysBiz Pty Ltd +physio + +// pics : 2013-11-14 UNR Corp. +pics + +// pictet : 2014-06-26 Pictet Europe S.A. +pictet + +// pictures : 2014-03-06 Binky Moon, LLC +pictures + +// pid : 2015-01-08 Top Level Spectrum, Inc. +pid + +// pin : 2014-12-18 Amazon Registry Services, Inc. +pin + +// ping : 2015-06-11 Ping Registry Provider, Inc. +ping + +// pink : 2013-10-01 Afilias Limited +pink + +// pioneer : 2015-07-16 Pioneer Corporation +pioneer + +// pizza : 2014-06-26 Binky Moon, LLC +pizza + +// place : 2014-04-24 Binky Moon, LLC +place + +// play : 2015-03-05 Charleston Road Registry Inc. +play + +// playstation : 2015-07-02 Sony Interactive Entertainment Inc. +playstation + +// plumbing : 2013-09-10 Binky Moon, LLC +plumbing + +// plus : 2015-02-05 Binky Moon, LLC +plus + +// pnc : 2015-07-02 PNC Domain Co., LLC +pnc + +// pohl : 2014-06-23 Deutsche Vermögensberatung Aktiengesellschaft DVAG +pohl + +// poker : 2014-07-03 Afilias Limited +poker + +// politie : 2015-08-20 Politie Nederland +politie + +// porn : 2014-10-16 ICM Registry PN LLC +porn + +// pramerica : 2015-07-30 Prudential Financial, Inc. +pramerica + +// praxi : 2013-12-05 Praxi S.p.A. +praxi + +// press : 2014-04-03 Radix FZC +press + +// prime : 2015-06-25 Amazon Registry Services, Inc. +prime + +// prod : 2014-01-23 Charleston Road Registry Inc. +prod + +// productions : 2013-12-05 Binky Moon, LLC +productions + +// prof : 2014-07-24 Charleston Road Registry Inc. +prof + +// progressive : 2015-07-23 Progressive Casualty Insurance Company +progressive + +// promo : 2014-12-18 Afilias Limited +promo + +// properties : 2013-12-05 Binky Moon, LLC +properties + +// property : 2014-05-22 UNR Corp. +property + +// protection : 2015-04-23 XYZ.COM LLC +protection + +// pru : 2015-07-30 Prudential Financial, Inc. +pru + +// prudential : 2015-07-30 Prudential Financial, Inc. +prudential + +// pub : 2013-12-12 Dog Beach, LLC +pub + +// pwc : 2015-10-29 PricewaterhouseCoopers LLP +pwc + +// qpon : 2013-11-14 dotCOOL, Inc. +qpon + +// quebec : 2013-12-19 PointQuébec Inc +quebec + +// quest : 2015-03-26 XYZ.COM LLC +quest + +// racing : 2014-12-04 Premier Registry Limited +racing + +// radio : 2016-07-21 European Broadcasting Union (EBU) +radio + +// read : 2014-12-18 Amazon Registry Services, Inc. +read + +// realestate : 2015-09-11 dotRealEstate LLC +realestate + +// realtor : 2014-05-29 Real Estate Domains LLC +realtor + +// realty : 2015-03-19 Dog Beach, LLC +realty + +// recipes : 2013-10-17 Binky Moon, LLC +recipes + +// red : 2013-11-07 Afilias Limited +red + +// redstone : 2014-10-31 Redstone Haute Couture Co., Ltd. +redstone + +// redumbrella : 2015-03-26 Travelers TLD, LLC +redumbrella + +// rehab : 2014-03-06 Dog Beach, LLC +rehab + +// reise : 2014-03-13 Binky Moon, LLC +reise + +// reisen : 2014-03-06 Binky Moon, LLC +reisen + +// reit : 2014-09-04 National Association of Real Estate Investment Trusts, Inc. +reit + +// reliance : 2015-04-02 Reliance Industries Limited +reliance + +// ren : 2013-12-12 ZDNS International Limited +ren + +// rent : 2014-12-04 XYZ.COM LLC +rent + +// rentals : 2013-12-05 Binky Moon, LLC +rentals + +// repair : 2013-11-07 Binky Moon, LLC +repair + +// report : 2013-12-05 Binky Moon, LLC +report + +// republican : 2014-03-20 Dog Beach, LLC +republican + +// rest : 2013-12-19 Punto 2012 Sociedad Anonima Promotora de Inversion de Capital Variable +rest + +// restaurant : 2014-07-03 Binky Moon, LLC +restaurant + +// review : 2014-11-20 dot Review Limited +review + +// reviews : 2013-09-13 Dog Beach, LLC +reviews + +// rexroth : 2015-06-18 Robert Bosch GMBH +rexroth + +// rich : 2013-11-21 iRegistry GmbH +rich + +// richardli : 2015-05-14 Pacific Century Asset Management (HK) Limited +richardli + +// ricoh : 2014-11-20 Ricoh Company, Ltd. +ricoh + +// ril : 2015-04-02 Reliance Industries Limited +ril + +// rio : 2014-02-27 Empresa Municipal de Informática SA - IPLANRIO +rio + +// rip : 2014-07-10 Dog Beach, LLC +rip + +// rocher : 2014-12-18 Ferrero Trading Lux S.A. +rocher + +// rocks : 2013-11-14 Dog Beach, LLC +rocks + +// rodeo : 2013-12-19 Registry Services, LLC +rodeo + +// rogers : 2015-08-06 Rogers Communications Canada Inc. +rogers + +// room : 2014-12-18 Amazon Registry Services, Inc. +room + +// rsvp : 2014-05-08 Charleston Road Registry Inc. +rsvp + +// rugby : 2016-12-15 World Rugby Strategic Developments Limited +rugby + +// ruhr : 2013-10-02 regiodot GmbH & Co. KG +ruhr + +// run : 2015-03-19 Binky Moon, LLC +run + +// rwe : 2015-04-02 RWE AG +rwe + +// ryukyu : 2014-01-09 BRregistry, Inc. +ryukyu + +// saarland : 2013-12-12 dotSaarland GmbH +saarland + +// safe : 2014-12-18 Amazon Registry Services, Inc. +safe + +// safety : 2015-01-08 Safety Registry Services, LLC. +safety + +// sakura : 2014-12-18 SAKURA Internet Inc. +sakura + +// sale : 2014-10-16 Dog Beach, LLC +sale + +// salon : 2014-12-11 Binky Moon, LLC +salon + +// samsclub : 2015-07-31 Wal-Mart Stores, Inc. +samsclub + +// samsung : 2014-04-03 SAMSUNG SDS CO., LTD +samsung + +// sandvik : 2014-11-13 Sandvik AB +sandvik + +// sandvikcoromant : 2014-11-07 Sandvik AB +sandvikcoromant + +// sanofi : 2014-10-09 Sanofi +sanofi + +// sap : 2014-03-27 SAP AG +sap + +// sarl : 2014-07-03 Binky Moon, LLC +sarl + +// sas : 2015-04-02 Research IP LLC +sas + +// save : 2015-06-25 Amazon Registry Services, Inc. +save + +// saxo : 2014-10-31 Saxo Bank A/S +saxo + +// sbi : 2015-03-12 STATE BANK OF INDIA +sbi + +// sbs : 2014-11-07 ShortDot SA +sbs + +// sca : 2014-03-13 SVENSKA CELLULOSA AKTIEBOLAGET SCA (publ) +sca + +// scb : 2014-02-20 The Siam Commercial Bank Public Company Limited ("SCB") +scb + +// schaeffler : 2015-08-06 Schaeffler Technologies AG & Co. KG +schaeffler + +// schmidt : 2014-04-03 SCHMIDT GROUPE S.A.S. +schmidt + +// scholarships : 2014-04-24 Scholarships.com, LLC +scholarships + +// school : 2014-12-18 Binky Moon, LLC +school + +// schule : 2014-03-06 Binky Moon, LLC +schule + +// schwarz : 2014-09-18 Schwarz Domains und Services GmbH & Co. KG +schwarz + +// science : 2014-09-11 dot Science Limited +science + +// scot : 2014-01-23 Dot Scot Registry Limited +scot + +// search : 2016-06-09 Charleston Road Registry Inc. +search + +// seat : 2014-05-22 SEAT, S.A. (Sociedad Unipersonal) +seat + +// secure : 2015-08-27 Amazon Registry Services, Inc. +secure + +// security : 2015-05-14 XYZ.COM LLC +security + +// seek : 2014-12-04 Seek Limited +seek + +// select : 2015-10-08 Registry Services, LLC +select + +// sener : 2014-10-24 Sener Ingeniería y Sistemas, S.A. +sener + +// services : 2014-02-27 Binky Moon, LLC +services + +// ses : 2015-07-23 SES +ses + +// seven : 2015-08-06 Seven West Media Ltd +seven + +// sew : 2014-07-17 SEW-EURODRIVE GmbH & Co KG +sew + +// sex : 2014-11-13 ICM Registry SX LLC +sex + +// sexy : 2013-09-11 UNR Corp. +sexy + +// sfr : 2015-08-13 Societe Francaise du Radiotelephone - SFR +sfr + +// shangrila : 2015-09-03 Shangri‐La International Hotel Management Limited +shangrila + +// sharp : 2014-05-01 Sharp Corporation +sharp + +// shaw : 2015-04-23 Shaw Cablesystems G.P. +shaw + +// shell : 2015-07-30 Shell Information Technology International Inc +shell + +// shia : 2014-09-04 Asia Green IT System Bilgisayar San. ve Tic. Ltd. Sti. +shia + +// shiksha : 2013-11-14 Afilias Limited +shiksha + +// shoes : 2013-10-02 Binky Moon, LLC +shoes + +// shop : 2016-04-08 GMO Registry, Inc. +shop + +// shopping : 2016-03-31 Binky Moon, LLC +shopping + +// shouji : 2015-01-08 Beijing Qihu Keji Co., Ltd. +shouji + +// show : 2015-03-05 Binky Moon, LLC +show + +// showtime : 2015-08-06 CBS Domains Inc. +showtime + +// silk : 2015-06-25 Amazon Registry Services, Inc. +silk + +// sina : 2015-03-12 Sina Corporation +sina + +// singles : 2013-08-27 Binky Moon, LLC +singles + +// site : 2015-01-15 Radix FZC +site + +// ski : 2015-04-09 Afilias Limited +ski + +// skin : 2015-01-15 XYZ.COM LLC +skin + +// sky : 2014-06-19 Sky International AG +sky + +// skype : 2014-12-18 Microsoft Corporation +skype + +// sling : 2015-07-30 DISH Technologies L.L.C. +sling + +// smart : 2015-07-09 Smart Communications, Inc. (SMART) +smart + +// smile : 2014-12-18 Amazon Registry Services, Inc. +smile + +// sncf : 2015-02-19 Société Nationale des Chemins de fer Francais S N C F +sncf + +// soccer : 2015-03-26 Binky Moon, LLC +soccer + +// social : 2013-11-07 Dog Beach, LLC +social + +// softbank : 2015-07-02 SoftBank Group Corp. +softbank + +// software : 2014-03-20 Dog Beach, LLC +software + +// sohu : 2013-12-19 Sohu.com Limited +sohu + +// solar : 2013-11-07 Binky Moon, LLC +solar + +// solutions : 2013-11-07 Binky Moon, LLC +solutions + +// song : 2015-02-26 Amazon Registry Services, Inc. +song + +// sony : 2015-01-08 Sony Corporation +sony + +// soy : 2014-01-23 Charleston Road Registry Inc. +soy + +// spa : 2019-09-19 Asia Spa and Wellness Promotion Council Limited +spa + +// space : 2014-04-03 Radix FZC +space + +// sport : 2017-11-16 Global Association of International Sports Federations (GAISF) +sport + +// spot : 2015-02-26 Amazon Registry Services, Inc. +spot + +// srl : 2015-05-07 InterNetX, Corp +srl + +// stada : 2014-11-13 STADA Arzneimittel AG +stada + +// staples : 2015-07-30 Staples, Inc. +staples + +// star : 2015-01-08 Star India Private Limited +star + +// statebank : 2015-03-12 STATE BANK OF INDIA +statebank + +// statefarm : 2015-07-30 State Farm Mutual Automobile Insurance Company +statefarm + +// stc : 2014-10-09 Saudi Telecom Company +stc + +// stcgroup : 2014-10-09 Saudi Telecom Company +stcgroup + +// stockholm : 2014-12-18 Stockholms kommun +stockholm + +// storage : 2014-12-22 XYZ.COM LLC +storage + +// store : 2015-04-09 Radix FZC +store + +// stream : 2016-01-08 dot Stream Limited +stream + +// studio : 2015-02-11 Dog Beach, LLC +studio + +// study : 2014-12-11 OPEN UNIVERSITIES AUSTRALIA PTY LTD +study + +// style : 2014-12-04 Binky Moon, LLC +style + +// sucks : 2014-12-22 Vox Populi Registry Ltd. +sucks + +// supplies : 2013-12-19 Binky Moon, LLC +supplies + +// supply : 2013-12-19 Binky Moon, LLC +supply + +// support : 2013-10-24 Binky Moon, LLC +support + +// surf : 2014-01-09 Registry Services, LLC +surf + +// surgery : 2014-03-20 Binky Moon, LLC +surgery + +// suzuki : 2014-02-20 SUZUKI MOTOR CORPORATION +suzuki + +// swatch : 2015-01-08 The Swatch Group Ltd +swatch + +// swiss : 2014-10-16 Swiss Confederation +swiss + +// sydney : 2014-09-18 State of New South Wales, Department of Premier and Cabinet +sydney + +// systems : 2013-11-07 Binky Moon, LLC +systems + +// tab : 2014-12-04 Tabcorp Holdings Limited +tab + +// taipei : 2014-07-10 Taipei City Government +taipei + +// talk : 2015-04-09 Amazon Registry Services, Inc. +talk + +// taobao : 2015-01-15 Alibaba Group Holding Limited +taobao + +// target : 2015-07-31 Target Domain Holdings, LLC +target + +// tatamotors : 2015-03-12 Tata Motors Ltd +tatamotors + +// tatar : 2014-04-24 Limited Liability Company "Coordination Center of Regional Domain of Tatarstan Republic" +tatar + +// tattoo : 2013-08-30 UNR Corp. +tattoo + +// tax : 2014-03-20 Binky Moon, LLC +tax + +// taxi : 2015-03-19 Binky Moon, LLC +taxi + +// tci : 2014-09-12 Asia Green IT System Bilgisayar San. ve Tic. Ltd. Sti. +tci + +// tdk : 2015-06-11 TDK Corporation +tdk + +// team : 2015-03-05 Binky Moon, LLC +team + +// tech : 2015-01-30 Radix FZC +tech + +// technology : 2013-09-13 Binky Moon, LLC +technology + +// temasek : 2014-08-07 Temasek Holdings (Private) Limited +temasek + +// tennis : 2014-12-04 Binky Moon, LLC +tennis + +// teva : 2015-07-02 Teva Pharmaceutical Industries Limited +teva + +// thd : 2015-04-02 Home Depot Product Authority, LLC +thd + +// theater : 2015-03-19 Binky Moon, LLC +theater + +// theatre : 2015-05-07 XYZ.COM LLC +theatre + +// tiaa : 2015-07-23 Teachers Insurance and Annuity Association of America +tiaa + +// tickets : 2015-02-05 XYZ.COM LLC +tickets + +// tienda : 2013-11-14 Binky Moon, LLC +tienda + +// tiffany : 2015-01-30 Tiffany and Company +tiffany + +// tips : 2013-09-20 Binky Moon, LLC +tips + +// tires : 2014-11-07 Binky Moon, LLC +tires + +// tirol : 2014-04-24 punkt Tirol GmbH +tirol + +// tjmaxx : 2015-07-16 The TJX Companies, Inc. +tjmaxx + +// tjx : 2015-07-16 The TJX Companies, Inc. +tjx + +// tkmaxx : 2015-07-16 The TJX Companies, Inc. +tkmaxx + +// tmall : 2015-01-15 Alibaba Group Holding Limited +tmall + +// today : 2013-09-20 Binky Moon, LLC +today + +// tokyo : 2013-11-13 GMO Registry, Inc. +tokyo + +// tools : 2013-11-21 Binky Moon, LLC +tools + +// top : 2014-03-20 .TOP Registry +top + +// toray : 2014-12-18 Toray Industries, Inc. +toray + +// toshiba : 2014-04-10 TOSHIBA Corporation +toshiba + +// total : 2015-08-06 Total SA +total + +// tours : 2015-01-22 Binky Moon, LLC +tours + +// town : 2014-03-06 Binky Moon, LLC +town + +// toyota : 2015-04-23 TOYOTA MOTOR CORPORATION +toyota + +// toys : 2014-03-06 Binky Moon, LLC +toys + +// trade : 2014-01-23 Elite Registry Limited +trade + +// trading : 2014-12-11 Dog Beach, LLC +trading + +// training : 2013-11-07 Binky Moon, LLC +training + +// travel : 2015-10-09 Dog Beach, LLC +travel + +// travelchannel : 2015-07-02 Lifestyle Domain Holdings, Inc. +travelchannel + +// travelers : 2015-03-26 Travelers TLD, LLC +travelers + +// travelersinsurance : 2015-03-26 Travelers TLD, LLC +travelersinsurance + +// trust : 2014-10-16 UNR Corp. +trust + +// trv : 2015-03-26 Travelers TLD, LLC +trv + +// tube : 2015-06-11 Latin American Telecom LLC +tube + +// tui : 2014-07-03 TUI AG +tui + +// tunes : 2015-02-26 Amazon Registry Services, Inc. +tunes + +// tushu : 2014-12-18 Amazon Registry Services, Inc. +tushu + +// tvs : 2015-02-19 T V SUNDRAM IYENGAR & SONS LIMITED +tvs + +// ubank : 2015-08-20 National Australia Bank Limited +ubank + +// ubs : 2014-12-11 UBS AG +ubs + +// unicom : 2015-10-15 China United Network Communications Corporation Limited +unicom + +// university : 2014-03-06 Binky Moon, LLC +university + +// uno : 2013-09-11 Radix FZC +uno + +// uol : 2014-05-01 UBN INTERNET LTDA. +uol + +// ups : 2015-06-25 UPS Market Driver, Inc. +ups + +// vacations : 2013-12-05 Binky Moon, LLC +vacations + +// vana : 2014-12-11 Lifestyle Domain Holdings, Inc. +vana + +// vanguard : 2015-09-03 The Vanguard Group, Inc. +vanguard + +// vegas : 2014-01-16 Dot Vegas, Inc. +vegas + +// ventures : 2013-08-27 Binky Moon, LLC +ventures + +// verisign : 2015-08-13 VeriSign, Inc. +verisign + +// versicherung : 2014-03-20 tldbox GmbH +versicherung + +// vet : 2014-03-06 Dog Beach, LLC +vet + +// viajes : 2013-10-17 Binky Moon, LLC +viajes + +// video : 2014-10-16 Dog Beach, LLC +video + +// vig : 2015-05-14 VIENNA INSURANCE GROUP AG Wiener Versicherung Gruppe +vig + +// viking : 2015-04-02 Viking River Cruises (Bermuda) Ltd. +viking + +// villas : 2013-12-05 Binky Moon, LLC +villas + +// vin : 2015-06-18 Binky Moon, LLC +vin + +// vip : 2015-01-22 Registry Services, LLC +vip + +// virgin : 2014-09-25 Virgin Enterprises Limited +virgin + +// visa : 2015-07-30 Visa Worldwide Pte. Limited +visa + +// vision : 2013-12-05 Binky Moon, LLC +vision + +// viva : 2014-11-07 Saudi Telecom Company +viva + +// vivo : 2015-07-31 Telefonica Brasil S.A. +vivo + +// vlaanderen : 2014-02-06 DNS.be vzw +vlaanderen + +// vodka : 2013-12-19 Registry Services, LLC +vodka + +// volkswagen : 2015-05-14 Volkswagen Group of America Inc. +volkswagen + +// volvo : 2015-11-12 Volvo Holding Sverige Aktiebolag +volvo + +// vote : 2013-11-21 Monolith Registry LLC +vote + +// voting : 2013-11-13 Valuetainment Corp. +voting + +// voto : 2013-11-21 Monolith Registry LLC +voto + +// voyage : 2013-08-27 Binky Moon, LLC +voyage + +// vuelos : 2015-03-05 Travel Reservations SRL +vuelos + +// wales : 2014-05-08 Nominet UK +wales + +// walmart : 2015-07-31 Wal-Mart Stores, Inc. +walmart + +// walter : 2014-11-13 Sandvik AB +walter + +// wang : 2013-10-24 Zodiac Wang Limited +wang + +// wanggou : 2014-12-18 Amazon Registry Services, Inc. +wanggou + +// watch : 2013-11-14 Binky Moon, LLC +watch + +// watches : 2014-12-22 Afilias Limited +watches + +// weather : 2015-01-08 International Business Machines Corporation +weather + +// weatherchannel : 2015-03-12 International Business Machines Corporation +weatherchannel + +// webcam : 2014-01-23 dot Webcam Limited +webcam + +// weber : 2015-06-04 Saint-Gobain Weber SA +weber + +// website : 2014-04-03 Radix FZC +website + +// wedding : 2014-04-24 Registry Services, LLC +wedding + +// weibo : 2015-03-05 Sina Corporation +weibo + +// weir : 2015-01-29 Weir Group IP Limited +weir + +// whoswho : 2014-02-20 Who's Who Registry +whoswho + +// wien : 2013-10-28 punkt.wien GmbH +wien + +// wiki : 2013-11-07 Top Level Design, LLC +wiki + +// williamhill : 2014-03-13 William Hill Organization Limited +williamhill + +// win : 2014-11-20 First Registry Limited +win + +// windows : 2014-12-18 Microsoft Corporation +windows + +// wine : 2015-06-18 Binky Moon, LLC +wine + +// winners : 2015-07-16 The TJX Companies, Inc. +winners + +// wme : 2014-02-13 William Morris Endeavor Entertainment, LLC +wme + +// wolterskluwer : 2015-08-06 Wolters Kluwer N.V. +wolterskluwer + +// woodside : 2015-07-09 Woodside Petroleum Limited +woodside + +// work : 2013-12-19 Registry Services, LLC +work + +// works : 2013-11-14 Binky Moon, LLC +works + +// world : 2014-06-12 Binky Moon, LLC +world + +// wow : 2015-10-08 Amazon Registry Services, Inc. +wow + +// wtc : 2013-12-19 World Trade Centers Association, Inc. +wtc + +// wtf : 2014-03-06 Binky Moon, LLC +wtf + +// xbox : 2014-12-18 Microsoft Corporation +xbox + +// xerox : 2014-10-24 Xerox DNHC LLC +xerox + +// xfinity : 2015-07-09 Comcast IP Holdings I, LLC +xfinity + +// xihuan : 2015-01-08 Beijing Qihu Keji Co., Ltd. +xihuan + +// xin : 2014-12-11 Elegant Leader Limited +xin + +// xn--11b4c3d : 2015-01-15 VeriSign Sarl +कॉम + +// xn--1ck2e1b : 2015-02-26 Amazon Registry Services, Inc. +セール + +// xn--1qqw23a : 2014-01-09 Guangzhou YU Wei Information Technology Co., Ltd. +佛山 + +// xn--30rr7y : 2014-06-12 Excellent First Limited +慈善 + +// xn--3bst00m : 2013-09-13 Eagle Horizon Limited +集团 + +// xn--3ds443g : 2013-09-08 TLD REGISTRY LIMITED OY +在线 + +// xn--3pxu8k : 2015-01-15 VeriSign Sarl +点看 + +// xn--42c2d9a : 2015-01-15 VeriSign Sarl +คอม + +// xn--45q11c : 2013-11-21 Zodiac Gemini Ltd +八卦 + +// xn--4gbrim : 2013-10-04 Helium TLDs Ltd +موقع + +// xn--55qw42g : 2013-11-08 China Organizational Name Administration Center +公益 + +// xn--55qx5d : 2013-11-14 China Internet Network Information Center (CNNIC) +公司 + +// xn--5su34j936bgsg : 2015-09-03 Shangri‐La International Hotel Management Limited +香格里拉 + +// xn--5tzm5g : 2014-12-22 Global Website TLD Asia Limited +网站 + +// xn--6frz82g : 2013-09-23 Afilias Limited +移动 + +// xn--6qq986b3xl : 2013-09-13 Tycoon Treasure Limited +我爱你 + +// xn--80adxhks : 2013-12-19 Foundation for Assistance for Internet Technologies and Infrastructure Development (FAITID) +москва + +// xn--80aqecdr1a : 2015-10-21 Pontificium Consilium de Comunicationibus Socialibus (PCCS) (Pontifical Council for Social Communication) +католик + +// xn--80asehdb : 2013-07-14 CORE Association +онлайн + +// xn--80aswg : 2013-07-14 CORE Association +сайт + +// xn--8y0a063a : 2015-03-26 China United Network Communications Corporation Limited +联通 + +// xn--9dbq2a : 2015-01-15 VeriSign Sarl +קום + +// xn--9et52u : 2014-06-12 RISE VICTORY LIMITED +时尚 + +// xn--9krt00a : 2015-03-12 Sina Corporation +微博 + +// xn--b4w605ferd : 2014-08-07 Temasek Holdings (Private) Limited +淡马锡 + +// xn--bck1b9a5dre4c : 2015-02-26 Amazon Registry Services, Inc. +ファッション + +// xn--c1avg : 2013-11-14 Public Interest Registry +орг + +// xn--c2br7g : 2015-01-15 VeriSign Sarl +नेट + +// xn--cck2b3b : 2015-02-26 Amazon Registry Services, Inc. +ストア + +// xn--cckwcxetd : 2019-12-19 Amazon Registry Services, Inc. +アマゾン + +// xn--cg4bki : 2013-09-27 SAMSUNG SDS CO., LTD +삼성 + +// xn--czr694b : 2014-01-16 Internet DotTrademark Organisation Limited +商标 + +// xn--czrs0t : 2013-12-19 Binky Moon, LLC +商店 + +// xn--czru2d : 2013-11-21 Zodiac Aquarius Limited +商城 + +// xn--d1acj3b : 2013-11-20 The Foundation for Network Initiatives “The Smart Internet” +дети + +// xn--eckvdtc9d : 2014-12-18 Amazon Registry Services, Inc. +ポイント + +// xn--efvy88h : 2014-08-22 Guangzhou YU Wei Information Technology Co., Ltd. +新闻 + +// xn--fct429k : 2015-04-09 Amazon Registry Services, Inc. +家電 + +// xn--fhbei : 2015-01-15 VeriSign Sarl +كوم + +// xn--fiq228c5hs : 2013-09-08 TLD REGISTRY LIMITED OY +中文网 + +// xn--fiq64b : 2013-10-14 CITIC Group Corporation +中信 + +// xn--fjq720a : 2014-05-22 Binky Moon, LLC +娱乐 + +// xn--flw351e : 2014-07-31 Charleston Road Registry Inc. +谷歌 + +// xn--fzys8d69uvgm : 2015-05-14 PCCW Enterprises Limited +電訊盈科 + +// xn--g2xx48c : 2015-01-30 Nawang Heli(Xiamen) Network Service Co., LTD. +购物 + +// xn--gckr3f0f : 2015-02-26 Amazon Registry Services, Inc. +クラウド + +// xn--gk3at1e : 2015-10-08 Amazon Registry Services, Inc. +通販 + +// xn--hxt814e : 2014-05-15 Zodiac Taurus Limited +网店 + +// xn--i1b6b1a6a2e : 2013-11-14 Public Interest Registry +संगठन + +// xn--imr513n : 2014-12-11 Internet DotTrademark Organisation Limited +餐厅 + +// xn--io0a7i : 2013-11-14 China Internet Network Information Center (CNNIC) +网络 + +// xn--j1aef : 2015-01-15 VeriSign Sarl +ком + +// xn--jlq480n2rg : 2019-12-19 Amazon Registry Services, Inc. +亚马逊 + +// xn--jlq61u9w7b : 2015-01-08 Nokia Corporation +诺基亚 + +// xn--jvr189m : 2015-02-26 Amazon Registry Services, Inc. +食品 + +// xn--kcrx77d1x4a : 2014-11-07 Koninklijke Philips N.V. +飞利浦 + +// xn--kput3i : 2014-02-13 Beijing RITT-Net Technology Development Co., Ltd +手机 + +// xn--mgba3a3ejt : 2014-11-20 Aramco Services Company +ارامكو + +// xn--mgba7c0bbn0a : 2015-05-14 Crescent Holding GmbH +العليان + +// xn--mgbaakc7dvf : 2015-09-03 Emirates Telecommunications Corporation (trading as Etisalat) +اتصالات + +// xn--mgbab2bd : 2013-10-31 CORE Association +بازار + +// xn--mgbca7dzdo : 2015-07-30 Abu Dhabi Systems and Information Centre +ابوظبي + +// xn--mgbi4ecexp : 2015-10-21 Pontificium Consilium de Comunicationibus Socialibus (PCCS) (Pontifical Council for Social Communication) +كاثوليك + +// xn--mgbt3dhd : 2014-09-04 Asia Green IT System Bilgisayar San. ve Tic. Ltd. Sti. +همراه + +// xn--mk1bu44c : 2015-01-15 VeriSign Sarl +닷컴 + +// xn--mxtq1m : 2014-03-06 Net-Chinese Co., Ltd. +政府 + +// xn--ngbc5azd : 2013-07-13 International Domain Registry Pty. Ltd. +شبكة + +// xn--ngbe9e0a : 2014-12-04 Kuwait Finance House +بيتك + +// xn--ngbrx : 2015-11-12 League of Arab States +عرب + +// xn--nqv7f : 2013-11-14 Public Interest Registry +机构 + +// xn--nqv7fs00ema : 2013-11-14 Public Interest Registry +组织机构 + +// xn--nyqy26a : 2014-11-07 Stable Tone Limited +健康 + +// xn--otu796d : 2017-08-06 Jiang Yu Liang Cai Technology Company Limited +招聘 + +// xn--p1acf : 2013-12-12 Rusnames Limited +рус + +// xn--pssy2u : 2015-01-15 VeriSign Sarl +大拿 + +// xn--q9jyb4c : 2013-09-17 Charleston Road Registry Inc. +みんな + +// xn--qcka1pmc : 2014-07-31 Charleston Road Registry Inc. +グーグル + +// xn--rhqv96g : 2013-09-11 Stable Tone Limited +世界 + +// xn--rovu88b : 2015-02-26 Amazon Registry Services, Inc. +書籍 + +// xn--ses554g : 2014-01-16 KNET Co., Ltd. +网址 + +// xn--t60b56a : 2015-01-15 VeriSign Sarl +닷넷 + +// xn--tckwe : 2015-01-15 VeriSign Sarl +コム + +// xn--tiq49xqyj : 2015-10-21 Pontificium Consilium de Comunicationibus Socialibus (PCCS) (Pontifical Council for Social Communication) +天主教 + +// xn--unup4y : 2013-07-14 Binky Moon, LLC +游戏 + +// xn--vermgensberater-ctb : 2014-06-23 Deutsche Vermögensberatung Aktiengesellschaft DVAG +vermögensberater + +// xn--vermgensberatung-pwb : 2014-06-23 Deutsche Vermögensberatung Aktiengesellschaft DVAG +vermögensberatung + +// xn--vhquv : 2013-08-27 Binky Moon, LLC +企业 + +// xn--vuq861b : 2014-10-16 Beijing Tele-info Network Technology Co., Ltd. +信息 + +// xn--w4r85el8fhu5dnra : 2015-04-30 Kerry Trading Co. Limited +嘉里大酒店 + +// xn--w4rs40l : 2015-07-30 Kerry Trading Co. Limited +嘉里 + +// xn--xhq521b : 2013-11-14 Guangzhou YU Wei Information Technology Co., Ltd. +广东 + +// xn--zfr164b : 2013-11-08 China Organizational Name Administration Center +政务 + +// xyz : 2013-12-05 XYZ.COM LLC +xyz + +// yachts : 2014-01-09 XYZ.COM LLC +yachts + +// yahoo : 2015-04-02 Oath Inc. +yahoo + +// yamaxun : 2014-12-18 Amazon Registry Services, Inc. +yamaxun + +// yandex : 2014-04-10 Yandex Europe B.V. +yandex + +// yodobashi : 2014-11-20 YODOBASHI CAMERA CO.,LTD. +yodobashi + +// yoga : 2014-05-29 Registry Services, LLC +yoga + +// yokohama : 2013-12-12 GMO Registry, Inc. +yokohama + +// you : 2015-04-09 Amazon Registry Services, Inc. +you + +// youtube : 2014-05-01 Charleston Road Registry Inc. +youtube + +// yun : 2015-01-08 Beijing Qihu Keji Co., Ltd. +yun + +// zappos : 2015-06-25 Amazon Registry Services, Inc. +zappos + +// zara : 2014-11-07 Industria de Diseño Textil, S.A. (INDITEX, S.A.) +zara + +// zero : 2014-12-18 Amazon Registry Services, Inc. +zero + +// zip : 2014-05-08 Charleston Road Registry Inc. +zip + +// zone : 2013-11-14 Binky Moon, LLC +zone + +// zuerich : 2014-11-07 Kanton Zürich (Canton of Zurich) +zuerich + + +// ===END ICANN DOMAINS=== +// ===BEGIN PRIVATE DOMAINS=== +// (Note: these are in alphabetical order by company name) + +// 1GB LLC : https://www.1gb.ua/ +// Submitted by 1GB LLC +cc.ua +inf.ua +ltd.ua + +// 611coin : https://611project.org/ +611.to + +// Aaron Marais' Gitlab pages: https://lab.aaronleem.co.za +// Submitted by Aaron Marais +graphox.us + +// accesso Technology Group, plc. : https://accesso.com/ +// Submitted by accesso Team +*.devcdnaccesso.com + +// Adobe : https://www.adobe.com/ +// Submitted by Ian Boston and Lars Trieloff +adobeaemcloud.com +*.dev.adobeaemcloud.com +hlx.live +adobeaemcloud.net +hlx.page +hlx3.page + +// Agnat sp. z o.o. : https://domena.pl +// Submitted by Przemyslaw Plewa +beep.pl + +// alboto.ca : http://alboto.ca +// Submitted by Anton Avramov +barsy.ca + +// Alces Software Ltd : http://alces-software.com +// Submitted by Mark J. Titorenko +*.compute.estate +*.alces.network + +// all-inkl.com : https://all-inkl.com +// Submitted by Werner Kaltofen +kasserver.com + +// Altervista: https://www.altervista.org +// Submitted by Carlo Cannas +altervista.org + +// alwaysdata : https://www.alwaysdata.com +// Submitted by Cyril +alwaysdata.net + +// Amazon CloudFront : https://aws.amazon.com/cloudfront/ +// Submitted by Donavan Miller +cloudfront.net + +// Amazon Elastic Compute Cloud : https://aws.amazon.com/ec2/ +// Submitted by Luke Wells +*.compute.amazonaws.com +*.compute-1.amazonaws.com +*.compute.amazonaws.com.cn +us-east-1.amazonaws.com + +// Amazon Elastic Beanstalk : https://aws.amazon.com/elasticbeanstalk/ +// Submitted by Luke Wells +cn-north-1.eb.amazonaws.com.cn +cn-northwest-1.eb.amazonaws.com.cn +elasticbeanstalk.com +ap-northeast-1.elasticbeanstalk.com +ap-northeast-2.elasticbeanstalk.com +ap-northeast-3.elasticbeanstalk.com +ap-south-1.elasticbeanstalk.com +ap-southeast-1.elasticbeanstalk.com +ap-southeast-2.elasticbeanstalk.com +ca-central-1.elasticbeanstalk.com +eu-central-1.elasticbeanstalk.com +eu-west-1.elasticbeanstalk.com +eu-west-2.elasticbeanstalk.com +eu-west-3.elasticbeanstalk.com +sa-east-1.elasticbeanstalk.com +us-east-1.elasticbeanstalk.com +us-east-2.elasticbeanstalk.com +us-gov-west-1.elasticbeanstalk.com +us-west-1.elasticbeanstalk.com +us-west-2.elasticbeanstalk.com + +// Amazon Elastic Load Balancing : https://aws.amazon.com/elasticloadbalancing/ +// Submitted by Luke Wells +*.elb.amazonaws.com +*.elb.amazonaws.com.cn + +// Amazon Global Accelerator : https://aws.amazon.com/global-accelerator/ +// Submitted by Daniel Massaguer +awsglobalaccelerator.com + +// Amazon S3 : https://aws.amazon.com/s3/ +// Submitted by Luke Wells +s3.amazonaws.com +s3-ap-northeast-1.amazonaws.com +s3-ap-northeast-2.amazonaws.com +s3-ap-south-1.amazonaws.com +s3-ap-southeast-1.amazonaws.com +s3-ap-southeast-2.amazonaws.com +s3-ca-central-1.amazonaws.com +s3-eu-central-1.amazonaws.com +s3-eu-west-1.amazonaws.com +s3-eu-west-2.amazonaws.com +s3-eu-west-3.amazonaws.com +s3-external-1.amazonaws.com +s3-fips-us-gov-west-1.amazonaws.com +s3-sa-east-1.amazonaws.com +s3-us-gov-west-1.amazonaws.com +s3-us-east-2.amazonaws.com +s3-us-west-1.amazonaws.com +s3-us-west-2.amazonaws.com +s3.ap-northeast-2.amazonaws.com +s3.ap-south-1.amazonaws.com +s3.cn-north-1.amazonaws.com.cn +s3.ca-central-1.amazonaws.com +s3.eu-central-1.amazonaws.com +s3.eu-west-2.amazonaws.com +s3.eu-west-3.amazonaws.com +s3.us-east-2.amazonaws.com +s3.dualstack.ap-northeast-1.amazonaws.com +s3.dualstack.ap-northeast-2.amazonaws.com +s3.dualstack.ap-south-1.amazonaws.com +s3.dualstack.ap-southeast-1.amazonaws.com +s3.dualstack.ap-southeast-2.amazonaws.com +s3.dualstack.ca-central-1.amazonaws.com +s3.dualstack.eu-central-1.amazonaws.com +s3.dualstack.eu-west-1.amazonaws.com +s3.dualstack.eu-west-2.amazonaws.com +s3.dualstack.eu-west-3.amazonaws.com +s3.dualstack.sa-east-1.amazonaws.com +s3.dualstack.us-east-1.amazonaws.com +s3.dualstack.us-east-2.amazonaws.com +s3-website-us-east-1.amazonaws.com +s3-website-us-west-1.amazonaws.com +s3-website-us-west-2.amazonaws.com +s3-website-ap-northeast-1.amazonaws.com +s3-website-ap-southeast-1.amazonaws.com +s3-website-ap-southeast-2.amazonaws.com +s3-website-eu-west-1.amazonaws.com +s3-website-sa-east-1.amazonaws.com +s3-website.ap-northeast-2.amazonaws.com +s3-website.ap-south-1.amazonaws.com +s3-website.ca-central-1.amazonaws.com +s3-website.eu-central-1.amazonaws.com +s3-website.eu-west-2.amazonaws.com +s3-website.eu-west-3.amazonaws.com +s3-website.us-east-2.amazonaws.com + +// Amune : https://amune.org/ +// Submitted by Team Amune +t3l3p0rt.net +tele.amune.org + +// Apigee : https://apigee.com/ +// Submitted by Apigee Security Team +apigee.io + +// Apphud : https://apphud.com +// Submitted by Alexander Selivanov +siiites.com + +// Appspace : https://www.appspace.com +// Submitted by Appspace Security Team +appspacehosted.com +appspaceusercontent.com + +// Appudo UG (haftungsbeschränkt) : https://www.appudo.com +// Submitted by Alexander Hochbaum +appudo.net + +// Aptible : https://www.aptible.com/ +// Submitted by Thomas Orozco +on-aptible.com + +// ASEINet : https://www.aseinet.com/ +// Submitted by Asei SEKIGUCHI +user.aseinet.ne.jp +gv.vc +d.gv.vc + +// Asociación Amigos de la Informática "Euskalamiga" : http://encounter.eus/ +// Submitted by Hector Martin +user.party.eus + +// Association potager.org : https://potager.org/ +// Submitted by Lunar +pimienta.org +poivron.org +potager.org +sweetpepper.org + +// ASUSTOR Inc. : http://www.asustor.com +// Submitted by Vincent Tseng +myasustor.com + +// Atlassian : https://atlassian.com +// Submitted by Sam Smyth +cdn.prod.atlassian-dev.net + +// AVM : https://avm.de +// Submitted by Andreas Weise +myfritz.net + +// AVStack Pte. Ltd. : https://avstack.io +// Submitted by Jasper Hugo +onavstack.net + +// AW AdvisorWebsites.com Software Inc : https://advisorwebsites.com +// Submitted by James Kennedy +*.awdev.ca +*.advisor.ws + +// AZ.pl sp. z.o.o: https://az.pl +// Submited by Krzysztof Wolski +ecommerce-shop.pl + +// b-data GmbH : https://www.b-data.io +// Submitted by Olivier Benz +b-data.io + +// backplane : https://www.backplane.io +// Submitted by Anthony Voutas +backplaneapp.io + +// Balena : https://www.balena.io +// Submitted by Petros Angelatos +balena-devices.com + +// University of Banja Luka : https://unibl.org +// Domains for Republic of Srpska administrative entity. +// Submitted by Marko Ivanovic +rs.ba + +// Banzai Cloud +// Submitted by Janos Matyas +*.banzai.cloud +app.banzaicloud.io +*.backyards.banzaicloud.io + +// BASE, Inc. : https://binc.jp +// Submitted by Yuya NAGASAWA +base.ec +official.ec +buyshop.jp +fashionstore.jp +handcrafted.jp +kawaiishop.jp +supersale.jp +theshop.jp +shopselect.net +base.shop + +// BetaInABox +// Submitted by Adrian +betainabox.com + +// BinaryLane : http://www.binarylane.com +// Submitted by Nathan O'Sullivan +bnr.la + +// Bitbucket : http://bitbucket.org +// Submitted by Andy Ortlieb +bitbucket.io + +// Blackbaud, Inc. : https://www.blackbaud.com +// Submitted by Paul Crowder +blackbaudcdn.net + +// Blatech : http://www.blatech.net +// Submitted by Luke Bratch +of.je + +// Blue Bite, LLC : https://bluebite.com +// Submitted by Joshua Weiss +bluebite.io + +// Boomla : https://boomla.com +// Submitted by Tibor Halter +boomla.net + +// Boutir : https://www.boutir.com +// Submitted by Eric Ng Ka Ka +boutir.com + +// Boxfuse : https://boxfuse.com +// Submitted by Axel Fontaine +boxfuse.io + +// bplaced : https://www.bplaced.net/ +// Submitted by Miroslav Bozic +square7.ch +bplaced.com +bplaced.de +square7.de +bplaced.net +square7.net + +// Brendly : https://brendly.rs +// Submitted by Dusan Radovanovic +shop.brendly.rs + +// BrowserSafetyMark +// Submitted by Dave Tharp +browsersafetymark.io + +// Bytemark Hosting : https://www.bytemark.co.uk +// Submitted by Paul Cammish +uk0.bigv.io +dh.bytemark.co.uk +vm.bytemark.co.uk + +// Caf.js Labs LLC : https://www.cafjs.com +// Submitted by Antonio Lain +cafjs.com + +// callidomus : https://www.callidomus.com/ +// Submitted by Marcus Popp +mycd.eu + +// Carrd : https://carrd.co +// Submitted by AJ +drr.ac +uwu.ai +carrd.co +crd.co +ju.mp + +// CentralNic : http://www.centralnic.com/names/domains +// Submitted by registry +ae.org +br.com +cn.com +com.de +com.se +de.com +eu.com +gb.net +hu.net +jp.net +jpn.com +mex.com +ru.com +sa.com +se.net +uk.com +uk.net +us.com +za.bz +za.com + +// No longer operated by CentralNic, these entries should be adopted and/or removed by current operators +// Submitted by Gavin Brown +ar.com +hu.com +kr.com +no.com +qc.com +uy.com + +// Africa.com Web Solutions Ltd : https://registry.africa.com +// Submitted by Gavin Brown +africa.com + +// iDOT Services Limited : http://www.domain.gr.com +// Submitted by Gavin Brown +gr.com + +// Radix FZC : http://domains.in.net +// Submitted by Gavin Brown +in.net +web.in + +// US REGISTRY LLC : http://us.org +// Submitted by Gavin Brown +us.org + +// co.com Registry, LLC : https://registry.co.com +// Submitted by Gavin Brown +co.com + +// Roar Domains LLC : https://roar.basketball/ +// Submitted by Gavin Brown +aus.basketball +nz.basketball + +// BRS Media : https://brsmedia.com/ +// Submitted by Gavin Brown +radio.am +radio.fm + +// c.la : http://www.c.la/ +c.la + +// certmgr.org : https://certmgr.org +// Submitted by B. Blechschmidt +certmgr.org + +// Cityhost LLC : https://cityhost.ua +// Submitted by Maksym Rivtin +cx.ua + +// Civilized Discourse Construction Kit, Inc. : https://www.discourse.org/ +// Submitted by Rishabh Nambiar & Michael Brown +discourse.group +discourse.team + +// Clever Cloud : https://www.clever-cloud.com/ +// Submitted by Quentin Adam +cleverapps.io + +// Clerk : https://www.clerk.dev +// Submitted by Colin Sidoti +clerk.app +clerkstage.app +*.lcl.dev +*.lclstage.dev +*.stg.dev +*.stgstage.dev + +// ClickRising : https://clickrising.com/ +// Submitted by Umut Gumeli +clickrising.net + +// Cloud66 : https://www.cloud66.com/ +// Submitted by Khash Sajadi +c66.me +cloud66.ws +cloud66.zone + +// CloudAccess.net : https://www.cloudaccess.net/ +// Submitted by Pawel Panek +jdevcloud.com +wpdevcloud.com +cloudaccess.host +freesite.host +cloudaccess.net + +// cloudControl : https://www.cloudcontrol.com/ +// Submitted by Tobias Wilken +cloudcontrolled.com +cloudcontrolapp.com + +// Cloudera, Inc. : https://www.cloudera.com/ +// Submitted by Kedarnath Waikar +*.cloudera.site + +// Cloudflare, Inc. : https://www.cloudflare.com/ +// Submitted by Cloudflare Team +pages.dev +trycloudflare.com +workers.dev + +// Clovyr : https://clovyr.io +// Submitted by Patrick Nielsen +wnext.app + +// co.ca : http://registry.co.ca/ +co.ca + +// Co & Co : https://co-co.nl/ +// Submitted by Govert Versluis +*.otap.co + +// i-registry s.r.o. : http://www.i-registry.cz/ +// Submitted by Martin Semrad +co.cz + +// CDN77.com : http://www.cdn77.com +// Submitted by Jan Krpes +c.cdn77.org +cdn77-ssl.net +r.cdn77.net +rsc.cdn77.org +ssl.origin.cdn77-secure.org + +// Cloud DNS Ltd : http://www.cloudns.net +// Submitted by Aleksander Hristov +cloudns.asia +cloudns.biz +cloudns.club +cloudns.cc +cloudns.eu +cloudns.in +cloudns.info +cloudns.org +cloudns.pro +cloudns.pw +cloudns.us + +// CNPY : https://cnpy.gdn +// Submitted by Angelo Gladding +cnpy.gdn + +// CoDNS B.V. +co.nl +co.no + +// Combell.com : https://www.combell.com +// Submitted by Thomas Wouters +webhosting.be +hosting-cluster.nl + +// Coordination Center for TLD RU and XN--P1AI : https://cctld.ru/en/domains/domens_ru/reserved/ +// Submitted by George Georgievsky +ac.ru +edu.ru +gov.ru +int.ru +mil.ru +test.ru + +// COSIMO GmbH : http://www.cosimo.de +// Submitted by Rene Marticke +dyn.cosidns.de +dynamisches-dns.de +dnsupdater.de +internet-dns.de +l-o-g-i-n.de +dynamic-dns.info +feste-ip.net +knx-server.net +static-access.net + +// Craynic, s.r.o. : http://www.craynic.com/ +// Submitted by Ales Krajnik +realm.cz + +// Cryptonomic : https://cryptonomic.net/ +// Submitted by Andrew Cady +*.cryptonomic.net + +// Cupcake : https://cupcake.io/ +// Submitted by Jonathan Rudenberg +cupcake.is + +// Curv UG : https://curv-labs.de/ +// Submitted by Marvin Wiesner +curv.dev + +// Customer OCI - Oracle Dyn https://cloud.oracle.com/home https://dyn.com/dns/ +// Submitted by Gregory Drake +// Note: This is intended to also include customer-oci.com due to wildcards implicitly including the current label +*.customer-oci.com +*.oci.customer-oci.com +*.ocp.customer-oci.com +*.ocs.customer-oci.com + +// cyon GmbH : https://www.cyon.ch/ +// Submitted by Dominic Luechinger +cyon.link +cyon.site + +// Danger Science Group: https://dangerscience.com/ +// Submitted by Skylar MacDonald +fnwk.site +folionetwork.site +platform0.app + +// Daplie, Inc : https://daplie.com +// Submitted by AJ ONeal +daplie.me +localhost.daplie.me + +// Datto, Inc. : https://www.datto.com/ +// Submitted by Philipp Heckel +dattolocal.com +dattorelay.com +dattoweb.com +mydatto.com +dattolocal.net +mydatto.net + +// Dansk.net : http://www.dansk.net/ +// Submitted by Anani Voule +biz.dk +co.dk +firm.dk +reg.dk +store.dk + +// dappnode.io : https://dappnode.io/ +// Submitted by Abel Boldu / DAppNode Team +dyndns.dappnode.io + +// dapps.earth : https://dapps.earth/ +// Submitted by Daniil Burdakov +*.dapps.earth +*.bzz.dapps.earth + +// Dark, Inc. : https://darklang.com +// Submitted by Paul Biggar +builtwithdark.com + +// DataDetect, LLC. : https://datadetect.com +// Submitted by Andrew Banchich +demo.datadetect.com +instance.datadetect.com + +// Datawire, Inc : https://www.datawire.io +// Submitted by Richard Li +edgestack.me + +// DDNS5 : https://ddns5.com +// Submitted by Cameron Elliott +ddns5.com + +// Debian : https://www.debian.org/ +// Submitted by Peter Palfrader / Debian Sysadmin Team +debian.net + +// Deno Land Inc : https://deno.com/ +// Submitted by Luca Casonato +deno.dev +deno-staging.dev + +// deSEC : https://desec.io/ +// Submitted by Peter Thomassen +dedyn.io + +// Diher Solutions : https://diher.solutions +// Submitted by Didi Hermawan +*.rss.my.id +*.diher.solutions + +// DNS Africa Ltd https://dns.business +// Submitted by Calvin Browne +jozi.biz + +// DNShome : https://www.dnshome.de/ +// Submitted by Norbert Auler +dnshome.de + +// DotArai : https://www.dotarai.com/ +// Submitted by Atsadawat Netcharadsang +online.th +shop.th + +// DrayTek Corp. : https://www.draytek.com/ +// Submitted by Paul Fang +drayddns.com + +// DreamCommerce : https://shoper.pl/ +// Submitted by Konrad Kotarba +shoparena.pl + +// DreamHost : http://www.dreamhost.com/ +// Submitted by Andrew Farmer +dreamhosters.com + +// Drobo : http://www.drobo.com/ +// Submitted by Ricardo Padilha +mydrobo.com + +// Drud Holdings, LLC. : https://www.drud.com/ +// Submitted by Kevin Bridges +drud.io +drud.us + +// DuckDNS : http://www.duckdns.org/ +// Submitted by Richard Harper +duckdns.org + +// Bip : https://bip.sh +// Submitted by Joel Kennedy +bip.sh + +// bitbridge.net : Submitted by Craig Welch, abeliidev@gmail.com +bitbridge.net + +// dy.fi : http://dy.fi/ +// Submitted by Heikki Hannikainen +dy.fi +tunk.org + +// DynDNS.com : http://www.dyndns.com/services/dns/dyndns/ +dyndns-at-home.com +dyndns-at-work.com +dyndns-blog.com +dyndns-free.com +dyndns-home.com +dyndns-ip.com +dyndns-mail.com +dyndns-office.com +dyndns-pics.com +dyndns-remote.com +dyndns-server.com +dyndns-web.com +dyndns-wiki.com +dyndns-work.com +dyndns.biz +dyndns.info +dyndns.org +dyndns.tv +at-band-camp.net +ath.cx +barrel-of-knowledge.info +barrell-of-knowledge.info +better-than.tv +blogdns.com +blogdns.net +blogdns.org +blogsite.org +boldlygoingnowhere.org +broke-it.net +buyshouses.net +cechire.com +dnsalias.com +dnsalias.net +dnsalias.org +dnsdojo.com +dnsdojo.net +dnsdojo.org +does-it.net +doesntexist.com +doesntexist.org +dontexist.com +dontexist.net +dontexist.org +doomdns.com +doomdns.org +dvrdns.org +dyn-o-saur.com +dynalias.com +dynalias.net +dynalias.org +dynathome.net +dyndns.ws +endofinternet.net +endofinternet.org +endoftheinternet.org +est-a-la-maison.com +est-a-la-masion.com +est-le-patron.com +est-mon-blogueur.com +for-better.biz +for-more.biz +for-our.info +for-some.biz +for-the.biz +forgot.her.name +forgot.his.name +from-ak.com +from-al.com +from-ar.com +from-az.net +from-ca.com +from-co.net +from-ct.com +from-dc.com +from-de.com +from-fl.com +from-ga.com +from-hi.com +from-ia.com +from-id.com +from-il.com +from-in.com +from-ks.com +from-ky.com +from-la.net +from-ma.com +from-md.com +from-me.org +from-mi.com +from-mn.com +from-mo.com +from-ms.com +from-mt.com +from-nc.com +from-nd.com +from-ne.com +from-nh.com +from-nj.com +from-nm.com +from-nv.com +from-ny.net +from-oh.com +from-ok.com +from-or.com +from-pa.com +from-pr.com +from-ri.com +from-sc.com +from-sd.com +from-tn.com +from-tx.com +from-ut.com +from-va.com +from-vt.com +from-wa.com +from-wi.com +from-wv.com +from-wy.com +ftpaccess.cc +fuettertdasnetz.de +game-host.org +game-server.cc +getmyip.com +gets-it.net +go.dyndns.org +gotdns.com +gotdns.org +groks-the.info +groks-this.info +ham-radio-op.net +here-for-more.info +hobby-site.com +hobby-site.org +home.dyndns.org +homedns.org +homeftp.net +homeftp.org +homeip.net +homelinux.com +homelinux.net +homelinux.org +homeunix.com +homeunix.net +homeunix.org +iamallama.com +in-the-band.net +is-a-anarchist.com +is-a-blogger.com +is-a-bookkeeper.com +is-a-bruinsfan.org +is-a-bulls-fan.com +is-a-candidate.org +is-a-caterer.com +is-a-celticsfan.org +is-a-chef.com +is-a-chef.net +is-a-chef.org +is-a-conservative.com +is-a-cpa.com +is-a-cubicle-slave.com +is-a-democrat.com +is-a-designer.com +is-a-doctor.com +is-a-financialadvisor.com +is-a-geek.com +is-a-geek.net +is-a-geek.org +is-a-green.com +is-a-guru.com +is-a-hard-worker.com +is-a-hunter.com +is-a-knight.org +is-a-landscaper.com +is-a-lawyer.com +is-a-liberal.com +is-a-libertarian.com +is-a-linux-user.org +is-a-llama.com +is-a-musician.com +is-a-nascarfan.com +is-a-nurse.com +is-a-painter.com +is-a-patsfan.org +is-a-personaltrainer.com +is-a-photographer.com +is-a-player.com +is-a-republican.com +is-a-rockstar.com +is-a-socialist.com +is-a-soxfan.org +is-a-student.com +is-a-teacher.com +is-a-techie.com +is-a-therapist.com +is-an-accountant.com +is-an-actor.com +is-an-actress.com +is-an-anarchist.com +is-an-artist.com +is-an-engineer.com +is-an-entertainer.com +is-by.us +is-certified.com +is-found.org +is-gone.com +is-into-anime.com +is-into-cars.com +is-into-cartoons.com +is-into-games.com +is-leet.com +is-lost.org +is-not-certified.com +is-saved.org +is-slick.com +is-uberleet.com +is-very-bad.org +is-very-evil.org +is-very-good.org +is-very-nice.org +is-very-sweet.org +is-with-theband.com +isa-geek.com +isa-geek.net +isa-geek.org +isa-hockeynut.com +issmarterthanyou.com +isteingeek.de +istmein.de +kicks-ass.net +kicks-ass.org +knowsitall.info +land-4-sale.us +lebtimnetz.de +leitungsen.de +likes-pie.com +likescandy.com +merseine.nu +mine.nu +misconfused.org +mypets.ws +myphotos.cc +neat-url.com +office-on-the.net +on-the-web.tv +podzone.net +podzone.org +readmyblog.org +saves-the-whales.com +scrapper-site.net +scrapping.cc +selfip.biz +selfip.com +selfip.info +selfip.net +selfip.org +sells-for-less.com +sells-for-u.com +sells-it.net +sellsyourhome.org +servebbs.com +servebbs.net +servebbs.org +serveftp.net +serveftp.org +servegame.org +shacknet.nu +simple-url.com +space-to-rent.com +stuff-4-sale.org +stuff-4-sale.us +teaches-yoga.com +thruhere.net +traeumtgerade.de +webhop.biz +webhop.info +webhop.net +webhop.org +worse-than.tv +writesthisblog.com + +// ddnss.de : https://www.ddnss.de/ +// Submitted by Robert Niedziela +ddnss.de +dyn.ddnss.de +dyndns.ddnss.de +dyndns1.de +dyn-ip24.de +home-webserver.de +dyn.home-webserver.de +myhome-server.de +ddnss.org + +// Definima : http://www.definima.com/ +// Submitted by Maxence Bitterli +definima.net +definima.io + +// DigitalOcean App Platform : https://www.digitalocean.com/products/app-platform/ +// Submitted by Braxton Huggins +ondigitalocean.app + +// DigitalOcean Spaces : https://www.digitalocean.com/products/spaces/ +// Submitted by Robin H. Johnson +*.digitaloceanspaces.com + +// dnstrace.pro : https://dnstrace.pro/ +// Submitted by Chris Partridge +bci.dnstrace.pro + +// Dynu.com : https://www.dynu.com/ +// Submitted by Sue Ye +ddnsfree.com +ddnsgeek.com +giize.com +gleeze.com +kozow.com +loseyourip.com +ooguy.com +theworkpc.com +casacam.net +dynu.net +accesscam.org +camdvr.org +freeddns.org +mywire.org +webredirect.org +myddns.rocks +blogsite.xyz + +// dynv6 : https://dynv6.com +// Submitted by Dominik Menke +dynv6.net + +// E4YOU spol. s.r.o. : https://e4you.cz/ +// Submitted by Vladimir Dudr +e4.cz + +// eero : https://eero.com/ +// Submitted by Yue Kang +eero.online +eero-stage.online + +// Elementor : Elementor Ltd. +// Submitted by Anton Barkan +elementor.cloud +elementor.cool + +// En root‽ : https://en-root.org +// Submitted by Emmanuel Raviart +en-root.fr + +// Enalean SAS: https://www.enalean.com +// Submitted by Thomas Cottier +mytuleap.com +tuleap-partners.com + +// ECG Robotics, Inc: https://ecgrobotics.org +// Submitted by +onred.one +staging.onred.one + +// encoway GmbH : https://www.encoway.de +// Submitted by Marcel Daus +eu.encoway.cloud + +// EU.org https://eu.org/ +// Submitted by Pierre Beyssac +eu.org +al.eu.org +asso.eu.org +at.eu.org +au.eu.org +be.eu.org +bg.eu.org +ca.eu.org +cd.eu.org +ch.eu.org +cn.eu.org +cy.eu.org +cz.eu.org +de.eu.org +dk.eu.org +edu.eu.org +ee.eu.org +es.eu.org +fi.eu.org +fr.eu.org +gr.eu.org +hr.eu.org +hu.eu.org +ie.eu.org +il.eu.org +in.eu.org +int.eu.org +is.eu.org +it.eu.org +jp.eu.org +kr.eu.org +lt.eu.org +lu.eu.org +lv.eu.org +mc.eu.org +me.eu.org +mk.eu.org +mt.eu.org +my.eu.org +net.eu.org +ng.eu.org +nl.eu.org +no.eu.org +nz.eu.org +paris.eu.org +pl.eu.org +pt.eu.org +q-a.eu.org +ro.eu.org +ru.eu.org +se.eu.org +si.eu.org +sk.eu.org +tr.eu.org +uk.eu.org +us.eu.org + +// Eurobyte : https://eurobyte.ru +// Submitted by Evgeniy Subbotin +eurodir.ru + +// Evennode : http://www.evennode.com/ +// Submitted by Michal Kralik +eu-1.evennode.com +eu-2.evennode.com +eu-3.evennode.com +eu-4.evennode.com +us-1.evennode.com +us-2.evennode.com +us-3.evennode.com +us-4.evennode.com + +// eDirect Corp. : https://hosting.url.com.tw/ +// Submitted by C.S. chang +twmail.cc +twmail.net +twmail.org +mymailer.com.tw +url.tw + +// Fabrica Technologies, Inc. : https://www.fabrica.dev/ +// Submitted by Eric Jiang +onfabrica.com + +// Facebook, Inc. +// Submitted by Peter Ruibal +apps.fbsbx.com + +// FAITID : https://faitid.org/ +// Submitted by Maxim Alzoba +// https://www.flexireg.net/stat_info +ru.net +adygeya.ru +bashkiria.ru +bir.ru +cbg.ru +com.ru +dagestan.ru +grozny.ru +kalmykia.ru +kustanai.ru +marine.ru +mordovia.ru +msk.ru +mytis.ru +nalchik.ru +nov.ru +pyatigorsk.ru +spb.ru +vladikavkaz.ru +vladimir.ru +abkhazia.su +adygeya.su +aktyubinsk.su +arkhangelsk.su +armenia.su +ashgabad.su +azerbaijan.su +balashov.su +bashkiria.su +bryansk.su +bukhara.su +chimkent.su +dagestan.su +east-kazakhstan.su +exnet.su +georgia.su +grozny.su +ivanovo.su +jambyl.su +kalmykia.su +kaluga.su +karacol.su +karaganda.su +karelia.su +khakassia.su +krasnodar.su +kurgan.su +kustanai.su +lenug.su +mangyshlak.su +mordovia.su +msk.su +murmansk.su +nalchik.su +navoi.su +north-kazakhstan.su +nov.su +obninsk.su +penza.su +pokrovsk.su +sochi.su +spb.su +tashkent.su +termez.su +togliatti.su +troitsk.su +tselinograd.su +tula.su +tuva.su +vladikavkaz.su +vladimir.su +vologda.su + +// Fancy Bits, LLC : http://getchannels.com +// Submitted by Aman Gupta +channelsdvr.net +u.channelsdvr.net + +// Fastly Inc. : http://www.fastly.com/ +// Submitted by Fastly Security +edgecompute.app +fastly-terrarium.com +fastlylb.net +map.fastlylb.net +freetls.fastly.net +map.fastly.net +a.prod.fastly.net +global.prod.fastly.net +a.ssl.fastly.net +b.ssl.fastly.net +global.ssl.fastly.net + +// FASTVPS EESTI OU : https://fastvps.ru/ +// Submitted by Likhachev Vasiliy +fastvps-server.com +fastvps.host +myfast.host +fastvps.site +myfast.space + +// Fedora : https://fedoraproject.org/ +// submitted by Patrick Uiterwijk +fedorainfracloud.org +fedorapeople.org +cloud.fedoraproject.org +app.os.fedoraproject.org +app.os.stg.fedoraproject.org + +// FearWorks Media Ltd. : https://fearworksmedia.co.uk +// submitted by Keith Fairley +couk.me +ukco.me +conn.uk +copro.uk +hosp.uk + +// Fermax : https://fermax.com/ +// submitted by Koen Van Isterdael +mydobiss.com + +// FH Muenster : https://www.fh-muenster.de +// Submitted by Robin Naundorf +fh-muenster.io + +// Filegear Inc. : https://www.filegear.com +// Submitted by Jason Zhu +filegear.me +filegear-au.me +filegear-de.me +filegear-gb.me +filegear-ie.me +filegear-jp.me +filegear-sg.me + +// Firebase, Inc. +// Submitted by Chris Raynor +firebaseapp.com + +// Firewebkit : https://www.firewebkit.com +// Submitted by Majid Qureshi +fireweb.app + +// FLAP : https://www.flap.cloud +// Submitted by Louis Chemineau +flap.id + +// FlashDrive : https://flashdrive.io +// Submitted by Eric Chan +onflashdrive.app +fldrv.com + +// fly.io: https://fly.io +// Submitted by Kurt Mackey +fly.dev +edgeapp.net +shw.io + +// Flynn : https://flynn.io +// Submitted by Jonathan Rudenberg +flynnhosting.net + +// Forgerock : https://www.forgerock.com +// Submitted by Roderick Parr +forgeblocks.com +id.forgerock.io + +// Framer : https://www.framer.com +// Submitted by Koen Rouwhorst +framer.app +framercanvas.com + +// Frusky MEDIA&PR : https://www.frusky.de +// Submitted by Victor Pupynin +*.frusky.de + +// RavPage : https://www.ravpage.co.il +// Submitted by Roni Horowitz +ravpage.co.il + +// Frederik Braun https://frederik-braun.com +// Submitted by Frederik Braun +0e.vc + +// Freebox : http://www.freebox.fr +// Submitted by Romain Fliedel +freebox-os.com +freeboxos.com +fbx-os.fr +fbxos.fr +freebox-os.fr +freeboxos.fr + +// freedesktop.org : https://www.freedesktop.org +// Submitted by Daniel Stone +freedesktop.org + +// freemyip.com : https://freemyip.com +// Submitted by Cadence +freemyip.com + +// FunkFeuer - Verein zur Förderung freier Netze : https://www.funkfeuer.at +// Submitted by Daniel A. Maierhofer +wien.funkfeuer.at + +// Futureweb OG : http://www.futureweb.at +// Submitted by Andreas Schnederle-Wagner +*.futurecms.at +*.ex.futurecms.at +*.in.futurecms.at +futurehosting.at +futuremailing.at +*.ex.ortsinfo.at +*.kunden.ortsinfo.at +*.statics.cloud + +// GDS : https://www.gov.uk/service-manual/operations/operating-servicegovuk-subdomains +// Submitted by David Illsley +service.gov.uk + +// Gehirn Inc. : https://www.gehirn.co.jp/ +// Submitted by Kohei YOSHIDA +gehirn.ne.jp +usercontent.jp + +// Gentlent, Inc. : https://www.gentlent.com +// Submitted by Tom Klein +gentapps.com +gentlentapis.com +lab.ms +cdn-edges.net + +// Ghost Foundation : https://ghost.org +// Submitted by Matt Hanley +ghost.io + +// GignoSystemJapan: http://gsj.bz +// Submitted by GignoSystemJapan +gsj.bz + +// GitHub, Inc. +// Submitted by Patrick Toomey +githubusercontent.com +githubpreview.dev +github.io + +// GitLab, Inc. +// Submitted by Alex Hanselka +gitlab.io + +// Gitplac.si - https://gitplac.si +// Submitted by Aljaž Starc +gitapp.si +gitpage.si + +// Glitch, Inc : https://glitch.com +// Submitted by Mads Hartmann +glitch.me + +// Global NOG Alliance : https://nogalliance.org/ +// Submitted by Sander Steffann +nog.community + +// Globe Hosting SRL : https://www.globehosting.com/ +// Submitted by Gavin Brown +co.ro +shop.ro + +// GMO Pepabo, Inc. : https://pepabo.com/ +// Submitted by dojineko +lolipop.io + +// GOV.UK Platform as a Service : https://www.cloud.service.gov.uk/ +// Submitted by Tom Whitwell +cloudapps.digital +london.cloudapps.digital + +// GOV.UK Pay : https://www.payments.service.gov.uk/ +// Submitted by Richard Baker +pymnt.uk + +// UKHomeOffice : https://www.gov.uk/government/organisations/home-office +// Submitted by Jon Shanks +homeoffice.gov.uk + +// GlobeHosting, Inc. +// Submitted by Zoltan Egresi +ro.im + +// GoIP DNS Services : http://www.goip.de +// Submitted by Christian Poulter +goip.de + +// Google, Inc. +// Submitted by Eduardo Vela +run.app +a.run.app +web.app +*.0emm.com +appspot.com +*.r.appspot.com +codespot.com +googleapis.com +googlecode.com +pagespeedmobilizer.com +publishproxy.com +withgoogle.com +withyoutube.com +*.gateway.dev +cloud.goog +translate.goog +*.usercontent.goog +cloudfunctions.net +blogspot.ae +blogspot.al +blogspot.am +blogspot.ba +blogspot.be +blogspot.bg +blogspot.bj +blogspot.ca +blogspot.cf +blogspot.ch +blogspot.cl +blogspot.co.at +blogspot.co.id +blogspot.co.il +blogspot.co.ke +blogspot.co.nz +blogspot.co.uk +blogspot.co.za +blogspot.com +blogspot.com.ar +blogspot.com.au +blogspot.com.br +blogspot.com.by +blogspot.com.co +blogspot.com.cy +blogspot.com.ee +blogspot.com.eg +blogspot.com.es +blogspot.com.mt +blogspot.com.ng +blogspot.com.tr +blogspot.com.uy +blogspot.cv +blogspot.cz +blogspot.de +blogspot.dk +blogspot.fi +blogspot.fr +blogspot.gr +blogspot.hk +blogspot.hr +blogspot.hu +blogspot.ie +blogspot.in +blogspot.is +blogspot.it +blogspot.jp +blogspot.kr +blogspot.li +blogspot.lt +blogspot.lu +blogspot.md +blogspot.mk +blogspot.mr +blogspot.mx +blogspot.my +blogspot.nl +blogspot.no +blogspot.pe +blogspot.pt +blogspot.qa +blogspot.re +blogspot.ro +blogspot.rs +blogspot.ru +blogspot.se +blogspot.sg +blogspot.si +blogspot.sk +blogspot.sn +blogspot.td +blogspot.tw +blogspot.ug +blogspot.vn + +// Goupile : https://goupile.fr +// Submitted by Niels Martignene +goupile.fr + +// Group 53, LLC : https://www.group53.com +// Submitted by Tyler Todd +awsmppl.com + +// GünstigBestellen : https://günstigbestellen.de +// Submitted by Furkan Akkoc +günstigbestellen.de +günstigliefern.de + +// Hakaran group: http://hakaran.cz +// Submited by Arseniy Sokolov +fin.ci +free.hr +caa.li +ua.rs +conf.se + +// Handshake : https://handshake.org +// Submitted by Mike Damm +hs.zone +hs.run + +// Hashbang : https://hashbang.sh +hashbang.sh + +// Hasura : https://hasura.io +// Submitted by Shahidh K Muhammed +hasura.app +hasura-app.io + +// Heilbronn University of Applied Sciences - Faculty Informatics (GitLab Pages): https://www.hs-heilbronn.de +// Submitted by Richard Zowalla +pages.it.hs-heilbronn.de + +// Hepforge : https://www.hepforge.org +// Submitted by David Grellscheid +hepforge.org + +// Heroku : https://www.heroku.com/ +// Submitted by Tom Maher +herokuapp.com +herokussl.com + +// Hibernating Rhinos +// Submitted by Oren Eini +ravendb.cloud +myravendb.com +ravendb.community +ravendb.me +development.run +ravendb.run + +// home.pl S.A.: https://home.pl +// Submited by Krzysztof Wolski +homesklep.pl + +// Hong Kong Productivity Council: https://www.hkpc.org/ +// Submitted by SECaaS Team +secaas.hk + +// Hoplix : https://www.hoplix.com +// Submitted by Danilo De Franco +hoplix.shop + + +// HOSTBIP REGISTRY : https://www.hostbip.com/ +// Submitted by Atanunu Igbunuroghene +orx.biz +biz.gl +col.ng +firm.ng +gen.ng +ltd.ng +ngo.ng +edu.scot +sch.so +org.yt + +// HostyHosting (hostyhosting.com) +hostyhosting.io + +// Häkkinen.fi +// Submitted by Eero Häkkinen +häkkinen.fi + +// Ici la Lune : http://www.icilalune.com/ +// Submitted by Simon Morvan +*.moonscale.io +moonscale.net + +// iki.fi +// Submitted by Hannu Aronsson +iki.fi + +// Impertrix Solutions : +// Submitted by Zhixiang Zhao +impertrixcdn.com +impertrix.com + +// Incsub, LLC: https://incsub.com/ +// Submitted by Aaron Edwards +smushcdn.com +wphostedmail.com +wpmucdn.com +tempurl.host +wpmudev.host + +// Individual Network Berlin e.V. : https://www.in-berlin.de/ +// Submitted by Christian Seitz +dyn-berlin.de +in-berlin.de +in-brb.de +in-butter.de +in-dsl.de +in-dsl.net +in-dsl.org +in-vpn.de +in-vpn.net +in-vpn.org + +// info.at : http://www.info.at/ +biz.at +info.at + +// info.cx : http://info.cx +// Submitted by Jacob Slater +info.cx + +// Interlegis : http://www.interlegis.leg.br +// Submitted by Gabriel Ferreira +ac.leg.br +al.leg.br +am.leg.br +ap.leg.br +ba.leg.br +ce.leg.br +df.leg.br +es.leg.br +go.leg.br +ma.leg.br +mg.leg.br +ms.leg.br +mt.leg.br +pa.leg.br +pb.leg.br +pe.leg.br +pi.leg.br +pr.leg.br +rj.leg.br +rn.leg.br +ro.leg.br +rr.leg.br +rs.leg.br +sc.leg.br +se.leg.br +sp.leg.br +to.leg.br + +// intermetrics GmbH : https://pixolino.com/ +// Submitted by Wolfgang Schwarz +pixolino.com + +// Internet-Pro, LLP: https://netangels.ru/ +// Submited by Vasiliy Sheredeko +na4u.ru + +// iopsys software solutions AB : https://iopsys.eu/ +// Submitted by Roman Azarenko +iopsys.se + +// IPiFony Systems, Inc. : https://www.ipifony.com/ +// Submitted by Matthew Hardeman +ipifony.net + +// IServ GmbH : https://iserv.eu +// Submitted by Kim-Alexander Brodowski +mein-iserv.de +schulserver.de +test-iserv.de +iserv.dev + +// I-O DATA DEVICE, INC. : http://www.iodata.com/ +// Submitted by Yuji Minagawa +iobb.net + +// Jelastic, Inc. : https://jelastic.com/ +// Submited by Ihor Kolodyuk +mel.cloudlets.com.au +cloud.interhostsolutions.be +users.scale.virtualcloud.com.br +mycloud.by +alp1.ae.flow.ch +appengine.flow.ch +es-1.axarnet.cloud +diadem.cloud +vip.jelastic.cloud +jele.cloud +it1.eur.aruba.jenv-aruba.cloud +it1.jenv-aruba.cloud +keliweb.cloud +cs.keliweb.cloud +oxa.cloud +tn.oxa.cloud +uk.oxa.cloud +primetel.cloud +uk.primetel.cloud +ca.reclaim.cloud +uk.reclaim.cloud +us.reclaim.cloud +ch.trendhosting.cloud +de.trendhosting.cloud +jele.club +amscompute.com +clicketcloud.com +dopaas.com +hidora.com +paas.hosted-by-previder.com +rag-cloud.hosteur.com +rag-cloud-ch.hosteur.com +jcloud.ik-server.com +jcloud-ver-jpc.ik-server.com +demo.jelastic.com +kilatiron.com +paas.massivegrid.com +jed.wafaicloud.com +lon.wafaicloud.com +ryd.wafaicloud.com +j.scaleforce.com.cy +jelastic.dogado.eu +fi.cloudplatform.fi +demo.datacenter.fi +paas.datacenter.fi +jele.host +mircloud.host +paas.beebyte.io +sekd1.beebyteapp.io +jele.io +cloud-fr1.unispace.io +jc.neen.it +cloud.jelastic.open.tim.it +jcloud.kz +upaas.kazteleport.kz +cloudjiffy.net +fra1-de.cloudjiffy.net +west1-us.cloudjiffy.net +jls-sto1.elastx.net +jls-sto2.elastx.net +jls-sto3.elastx.net +faststacks.net +fr-1.paas.massivegrid.net +lon-1.paas.massivegrid.net +lon-2.paas.massivegrid.net +ny-1.paas.massivegrid.net +ny-2.paas.massivegrid.net +sg-1.paas.massivegrid.net +jelastic.saveincloud.net +nordeste-idc.saveincloud.net +j.scaleforce.net +jelastic.tsukaeru.net +sdscloud.pl +unicloud.pl +mircloud.ru +jelastic.regruhosting.ru +enscaled.sg +jele.site +jelastic.team +orangecloud.tn +j.layershift.co.uk +phx.enscaled.us +mircloud.us + +// Jino : https://www.jino.ru +// Submitted by Sergey Ulyashin +myjino.ru +*.hosting.myjino.ru +*.landing.myjino.ru +*.spectrum.myjino.ru +*.vps.myjino.ru + +// Jotelulu S.L. : https://jotelulu.com +// Submitted by Daniel Fariña +jotelulu.cloud + +// Joyent : https://www.joyent.com/ +// Submitted by Brian Bennett +*.triton.zone +*.cns.joyent.com + +// JS.ORG : http://dns.js.org +// Submitted by Stefan Keim +js.org + +// KaasHosting : http://www.kaashosting.nl/ +// Submitted by Wouter Bakker +kaas.gg +khplay.nl + +// Keyweb AG : https://www.keyweb.de +// Submitted by Martin Dannehl +keymachine.de + +// KingHost : https://king.host +// Submitted by Felipe Keller Braz +kinghost.net +uni5.net + +// KnightPoint Systems, LLC : http://www.knightpoint.com/ +// Submitted by Roy Keene +knightpoint.systems + +// KoobinEvent, SL: https://www.koobin.com +// Submitted by Iván Oliva +koobin.events + +// KUROKU LTD : https://kuroku.ltd/ +// Submitted by DisposaBoy +oya.to + +// Katholieke Universiteit Leuven: https://www.kuleuven.be +// Submitted by Abuse KU Leuven +kuleuven.cloud +ezproxy.kuleuven.be + +// .KRD : http://nic.krd/data/krd/Registration%20Policy.pdf +co.krd +edu.krd + +// Krellian Ltd. : https://krellian.com +// Submitted by Ben Francis +krellian.net +webthings.io + +// LCube - Professional hosting e.K. : https://www.lcube-webhosting.de +// Submitted by Lars Laehn +git-repos.de +lcube-server.de +svn-repos.de + +// Leadpages : https://www.leadpages.net +// Submitted by Greg Dallavalle +leadpages.co +lpages.co +lpusercontent.com + +// Lelux.fi : https://lelux.fi/ +// Submitted by Lelux Admin +lelux.site + +// Lifetime Hosting : https://Lifetime.Hosting/ +// Submitted by Mike Fillator +co.business +co.education +co.events +co.financial +co.network +co.place +co.technology + +// Lightmaker Property Manager, Inc. : https://app.lmpm.com/ +// Submitted by Greg Holland +app.lmpm.com + +// linkyard ldt: https://www.linkyard.ch/ +// Submitted by Mario Siegenthaler +linkyard.cloud +linkyard-cloud.ch + +// Linode : https://linode.com +// Submitted by +members.linode.com +*.nodebalancer.linode.com +*.linodeobjects.com +ip.linodeusercontent.com + +// LiquidNet Ltd : http://www.liquidnetlimited.com/ +// Submitted by Victor Velchev +we.bs + +// localzone.xyz +// Submitted by Kenny Niehage +localzone.xyz + +// Log'in Line : https://www.loginline.com/ +// Submitted by Rémi Mach +loginline.app +loginline.dev +loginline.io +loginline.services +loginline.site + +// Lokalized : https://lokalized.nl +// Submitted by Noah Taheij +servers.run + +// Lõhmus Family, The +// Submitted by Heiki Lõhmus +lohmus.me + +// LubMAN UMCS Sp. z o.o : https://lubman.pl/ +// Submitted by Ireneusz Maliszewski +krasnik.pl +leczna.pl +lubartow.pl +lublin.pl +poniatowa.pl +swidnik.pl + +// Lug.org.uk : https://lug.org.uk +// Submitted by Jon Spriggs +glug.org.uk +lug.org.uk +lugs.org.uk + +// Lukanet Ltd : https://lukanet.com +// Submitted by Anton Avramov +barsy.bg +barsy.co.uk +barsyonline.co.uk +barsycenter.com +barsyonline.com +barsy.club +barsy.de +barsy.eu +barsy.in +barsy.info +barsy.io +barsy.me +barsy.menu +barsy.mobi +barsy.net +barsy.online +barsy.org +barsy.pro +barsy.pub +barsy.ro +barsy.shop +barsy.site +barsy.support +barsy.uk + +// Magento Commerce +// Submitted by Damien Tournoud +*.magentosite.cloud + +// May First - People Link : https://mayfirst.org/ +// Submitted by Jamie McClelland +mayfirst.info +mayfirst.org + +// Mail.Ru Group : https://hb.cldmail.ru +// Submitted by Ilya Zaretskiy +hb.cldmail.ru + +// Mail Transfer Platform : https://www.neupeer.com +// Submitted by Li Hui +cn.vu + +// Maze Play: https://www.mazeplay.com +// Submitted by Adam Humpherys +mazeplay.com + +// mcpe.me : https://mcpe.me +// Submitted by Noa Heyl +mcpe.me + +// McHost : https://mchost.ru +// Submitted by Evgeniy Subbotin +mcdir.me +mcdir.ru +mcpre.ru +vps.mcdir.ru + +// Mediatech : https://mediatech.by +// Submitted by Evgeniy Kozhuhovskiy +mediatech.by +mediatech.dev + +// Medicom Health : https://medicomhealth.com +// Submitted by Michael Olson +hra.health + +// Memset hosting : https://www.memset.com +// Submitted by Tom Whitwell +miniserver.com +memset.net + +// MetaCentrum, CESNET z.s.p.o. : https://www.metacentrum.cz/en/ +// Submitted by Zdeněk Šustr +*.cloud.metacentrum.cz +custom.metacentrum.cz + +// MetaCentrum, CESNET z.s.p.o. : https://www.metacentrum.cz/en/ +// Submitted by Radim Janča +flt.cloud.muni.cz +usr.cloud.muni.cz + +// Meteor Development Group : https://www.meteor.com/hosting +// Submitted by Pierre Carrier +meteorapp.com +eu.meteorapp.com + +// Michau Enterprises Limited : http://www.co.pl/ +co.pl + +// Microsoft Corporation : http://microsoft.com +// Submitted by Mitch Webster +*.azurecontainer.io +azurewebsites.net +azure-mobile.net +cloudapp.net +azurestaticapps.net +centralus.azurestaticapps.net +eastasia.azurestaticapps.net +eastus2.azurestaticapps.net +westeurope.azurestaticapps.net +westus2.azurestaticapps.net + +// minion.systems : http://minion.systems +// Submitted by Robert Böttinger +csx.cc + +// Mintere : https://mintere.com/ +// Submitted by Ben Aubin +mintere.site + +// MobileEducation, LLC : https://joinforte.com +// Submitted by Grayson Martin +forte.id + +// Mozilla Corporation : https://mozilla.com +// Submitted by Ben Francis +mozilla-iot.org + +// Mozilla Foundation : https://mozilla.org/ +// Submitted by glob +bmoattachments.org + +// MSK-IX : https://www.msk-ix.ru/ +// Submitted by Khannanov Roman +net.ru +org.ru +pp.ru + +// Mythic Beasts : https://www.mythic-beasts.com +// Submitted by Paul Cammish +hostedpi.com +customer.mythic-beasts.com +caracal.mythic-beasts.com +fentiger.mythic-beasts.com +lynx.mythic-beasts.com +ocelot.mythic-beasts.com +oncilla.mythic-beasts.com +onza.mythic-beasts.com +sphinx.mythic-beasts.com +vs.mythic-beasts.com +x.mythic-beasts.com +yali.mythic-beasts.com +cust.retrosnub.co.uk + +// Nabu Casa : https://www.nabucasa.com +// Submitted by Paulus Schoutsen +ui.nabu.casa + +// Names.of.London : https://names.of.london/ +// Submitted by James Stevens or +pony.club +of.fashion +in.london +of.london +from.marketing +with.marketing +for.men +repair.men +and.mom +for.mom +for.one +under.one +for.sale +that.win +from.work +to.work + +// Net at Work Gmbh : https://www.netatwork.de +// Submitted by Jan Jaeschke +cloud.nospamproxy.com + +// Netlify : https://www.netlify.com +// Submitted by Jessica Parsons +netlify.app + +// Neustar Inc. +// Submitted by Trung Tran +4u.com + +// ngrok : https://ngrok.com/ +// Submitted by Alan Shreve +ngrok.io + +// Nimbus Hosting Ltd. : https://www.nimbushosting.co.uk/ +// Submitted by Nicholas Ford +nh-serv.co.uk + +// NFSN, Inc. : https://www.NearlyFreeSpeech.NET/ +// Submitted by Jeff Wheelhouse +nfshost.com + +// Noop : https://noop.app +// Submitted by Nathaniel Schweinberg +*.developer.app +noop.app + +// Northflank Ltd. : https://northflank.com/ +// Submitted by Marco Suter +*.northflank.app +*.code.run + +// Noticeable : https://noticeable.io +// Submitted by Laurent Pellegrino +noticeable.news + +// Now-DNS : https://now-dns.com +// Submitted by Steve Russell +dnsking.ch +mypi.co +n4t.co +001www.com +ddnslive.com +myiphost.com +forumz.info +16-b.it +32-b.it +64-b.it +soundcast.me +tcp4.me +dnsup.net +hicam.net +now-dns.net +ownip.net +vpndns.net +dynserv.org +now-dns.org +x443.pw +now-dns.top +ntdll.top +freeddns.us +crafting.xyz +zapto.xyz + +// nsupdate.info : https://www.nsupdate.info/ +// Submitted by Thomas Waldmann +nsupdate.info +nerdpol.ovh + +// No-IP.com : https://noip.com/ +// Submitted by Deven Reza +blogsyte.com +brasilia.me +cable-modem.org +ciscofreak.com +collegefan.org +couchpotatofries.org +damnserver.com +ddns.me +ditchyourip.com +dnsfor.me +dnsiskinky.com +dvrcam.info +dynns.com +eating-organic.net +fantasyleague.cc +geekgalaxy.com +golffan.us +health-carereform.com +homesecuritymac.com +homesecuritypc.com +hopto.me +ilovecollege.info +loginto.me +mlbfan.org +mmafan.biz +myactivedirectory.com +mydissent.net +myeffect.net +mymediapc.net +mypsx.net +mysecuritycamera.com +mysecuritycamera.net +mysecuritycamera.org +net-freaks.com +nflfan.org +nhlfan.net +no-ip.ca +no-ip.co.uk +no-ip.net +noip.us +onthewifi.com +pgafan.net +point2this.com +pointto.us +privatizehealthinsurance.net +quicksytes.com +read-books.org +securitytactics.com +serveexchange.com +servehumour.com +servep2p.com +servesarcasm.com +stufftoread.com +ufcfan.org +unusualperson.com +workisboring.com +3utilities.com +bounceme.net +ddns.net +ddnsking.com +gotdns.ch +hopto.org +myftp.biz +myftp.org +myvnc.com +no-ip.biz +no-ip.info +no-ip.org +noip.me +redirectme.net +servebeer.com +serveblog.net +servecounterstrike.com +serveftp.com +servegame.com +servehalflife.com +servehttp.com +serveirc.com +serveminecraft.net +servemp3.com +servepics.com +servequake.com +sytes.net +webhop.me +zapto.org + +// NodeArt : https://nodeart.io +// Submitted by Konstantin Nosov +stage.nodeart.io + +// Nucleos Inc. : https://nucleos.com +// Submitted by Piotr Zduniak +pcloud.host + +// NYC.mn : http://www.information.nyc.mn +// Submitted by Matthew Brown +nyc.mn + +// Observable, Inc. : https://observablehq.com +// Submitted by Mike Bostock +static.observableusercontent.com + +// Octopodal Solutions, LLC. : https://ulterius.io/ +// Submitted by Andrew Sampson +cya.gg + +// OMG.LOL : +// Submitted by Adam Newbold +omg.lol + +// Omnibond Systems, LLC. : https://www.omnibond.com +// Submitted by Cole Estep +cloudycluster.net + +// OmniWe Limited: https://omniwe.com +// Submitted by Vicary Archangel +omniwe.site + +// One.com: https://www.one.com/ +// Submitted by Jacob Bunk Nielsen +service.one + +// One Fold Media : http://www.onefoldmedia.com/ +// Submitted by Eddie Jones +nid.io + +// Open Social : https://www.getopensocial.com/ +// Submitted by Alexander Varwijk +opensocial.site + +// OpenCraft GmbH : http://opencraft.com/ +// Submitted by Sven Marnach +opencraft.hosting + +// OpenResearch GmbH: https://openresearch.com/ +// Submitted by Philipp Schmid +orsites.com + +// Opera Software, A.S.A. +// Submitted by Yngve Pettersen +operaunite.com + +// Oursky Limited : https://authgear.com/, https://skygear.io/ +// Submited by Authgear Team , Skygear Developer +authgear-staging.com +authgearapps.com +skygearapp.com + +// OutSystems +// Submitted by Duarte Santos +outsystemscloud.com + +// OVHcloud: https://ovhcloud.com +// Submitted by Vincent Cassé +*.webpaas.ovh.net +*.hosting.ovh.net + +// OwnProvider GmbH: http://www.ownprovider.com +// Submitted by Jan Moennich +ownprovider.com +own.pm + +// OwO : https://whats-th.is/ +// Submitted by Dean Sheather +*.owo.codes + +// OX : http://www.ox.rs +// Submitted by Adam Grand +ox.rs + +// oy.lc +// Submitted by Charly Coste +oy.lc + +// Pagefog : https://pagefog.com/ +// Submitted by Derek Myers +pgfog.com + +// Pagefront : https://www.pagefronthq.com/ +// Submitted by Jason Kriss +pagefrontapp.com + +// PageXL : https://pagexl.com +// Submitted by Yann Guichard +pagexl.com + +// Paywhirl, Inc : https://paywhirl.com/ +// Submitted by Daniel Netzer +*.paywhirl.com + +// pcarrier.ca Software Inc: https://pcarrier.ca/ +// Submitted by Pierre Carrier +bar0.net +bar1.net +bar2.net +rdv.to + +// .pl domains (grandfathered) +art.pl +gliwice.pl +krakow.pl +poznan.pl +wroc.pl +zakopane.pl + +// Pantheon Systems, Inc. : https://pantheon.io/ +// Submitted by Gary Dylina +pantheonsite.io +gotpantheon.com + +// Peplink | Pepwave : http://peplink.com/ +// Submitted by Steve Leung +mypep.link + +// Perspecta : https://perspecta.com/ +// Submitted by Kenneth Van Alstyne +perspecta.cloud + +// PE Ulyanov Kirill Sergeevich : https://airy.host +// Submitted by Kirill Ulyanov +lk3.ru + +// Planet-Work : https://www.planet-work.com/ +// Submitted by Frédéric VANNIÈRE +on-web.fr + +// Platform.sh : https://platform.sh +// Submitted by Nikola Kotur +bc.platform.sh +ent.platform.sh +eu.platform.sh +us.platform.sh +*.platformsh.site +*.tst.site + +// Platter: https://platter.dev +// Submitted by Patrick Flor +platter-app.com +platter-app.dev +platterp.us + +// Plesk : https://www.plesk.com/ +// Submitted by Anton Akhtyamov +pdns.page +plesk.page +pleskns.com + +// Port53 : https://port53.io/ +// Submitted by Maximilian Schieder +dyn53.io + +// Positive Codes Technology Company : http://co.bn/faq.html +// Submitted by Zulfais +co.bn + +// Postman, Inc : https://postman.com +// Submitted by Rahul Dhawan +postman-echo.com +pstmn.io +mock.pstmn.io +httpbin.org + +//prequalifyme.today : https://prequalifyme.today +//Submitted by DeepakTiwari deepak@ivylead.io +prequalifyme.today + +// prgmr.com : https://prgmr.com/ +// Submitted by Sarah Newman +xen.prgmr.com + +// priv.at : http://www.nic.priv.at/ +// Submitted by registry +priv.at + +// privacytools.io : https://www.privacytools.io/ +// Submitted by Jonah Aragon +prvcy.page + +// Protocol Labs : https://protocol.ai/ +// Submitted by Michael Burns +*.dweb.link + +// Protonet GmbH : http://protonet.io +// Submitted by Martin Meier +protonet.io + +// Publication Presse Communication SARL : https://ppcom.fr +// Submitted by Yaacov Akiba Slama +chirurgiens-dentistes-en-france.fr +byen.site + +// pubtls.org: https://www.pubtls.org +// Submitted by Kor Nielsen +pubtls.org + +// PythonAnywhere LLP: https://www.pythonanywhere.com +// Submitted by Giles Thomas +pythonanywhere.com +eu.pythonanywhere.com + +// QOTO, Org. +// Submitted by Jeffrey Phillips Freeman +qoto.io + +// Qualifio : https://qualifio.com/ +// Submitted by Xavier De Cock +qualifioapp.com + +// QuickBackend: https://www.quickbackend.com +// Submitted by Dani Biro +qbuser.com + +// Rad Web Hosting: https://radwebhosting.com +// Submitted by Scott Claeys +cloudsite.builders + +// Redgate Software: https://red-gate.com +// Submitted by Andrew Farries +instances.spawn.cc + +// Redstar Consultants : https://www.redstarconsultants.com/ +// Submitted by Jons Slemmer +instantcloud.cn + +// Russian Academy of Sciences +// Submitted by Tech Support +ras.ru + +// QA2 +// Submitted by Daniel Dent (https://www.danieldent.com/) +qa2.com + +// QCX +// Submitted by Cassandra Beelen +qcx.io +*.sys.qcx.io + +// QNAP System Inc : https://www.qnap.com +// Submitted by Nick Chang +dev-myqnapcloud.com +alpha-myqnapcloud.com +myqnapcloud.com + +// Quip : https://quip.com +// Submitted by Patrick Linehan +*.quipelements.com + +// Qutheory LLC : http://qutheory.io +// Submitted by Jonas Schwartz +vapor.cloud +vaporcloud.io + +// Rackmaze LLC : https://www.rackmaze.com +// Submitted by Kirill Pertsev +rackmaze.com +rackmaze.net + +// Rakuten Games, Inc : https://dev.viberplay.io +// Submitted by Joshua Zhang +g.vbrplsbx.io + +// Rancher Labs, Inc : https://rancher.com +// Submitted by Vincent Fiduccia +*.on-k3s.io +*.on-rancher.cloud +*.on-rio.io + +// Read The Docs, Inc : https://www.readthedocs.org +// Submitted by David Fischer +readthedocs.io + +// Red Hat, Inc. OpenShift : https://openshift.redhat.com/ +// Submitted by Tim Kramer +rhcloud.com + +// Render : https://render.com +// Submitted by Anurag Goel +app.render.com +onrender.com + +// Repl.it : https://repl.it +// Submitted by Mason Clayton +repl.co +id.repl.co +repl.run + +// Resin.io : https://resin.io +// Submitted by Tim Perry +resindevice.io +devices.resinstaging.io + +// RethinkDB : https://www.rethinkdb.com/ +// Submitted by Chris Kastorff +hzc.io + +// Revitalised Limited : http://www.revitalised.co.uk +// Submitted by Jack Price +wellbeingzone.eu +wellbeingzone.co.uk + +// Rico Developments Limited : https://adimo.co +// Submitted by Colin Brown +adimo.co.uk + +// Riseup Networks : https://riseup.net +// Submitted by Micah Anderson +itcouldbewor.se + +// Rochester Institute of Technology : http://www.rit.edu/ +// Submitted by Jennifer Herting +git-pages.rit.edu + +// Rusnames Limited: http://rusnames.ru/ +// Submitted by Sergey Zotov +биз.рус +ком.рус +крым.рус +мир.рус +мск.рус +орг.рус +самара.рус +сочи.рус +спб.рус +я.рус + +// Sandstorm Development Group, Inc. : https://sandcats.io/ +// Submitted by Asheesh Laroia +sandcats.io + +// SBE network solutions GmbH : https://www.sbe.de/ +// Submitted by Norman Meilick +logoip.de +logoip.com + +// schokokeks.org GbR : https://schokokeks.org/ +// Submitted by Hanno Böck +schokokeks.net + +// Scottish Government: https://www.gov.scot +// Submitted by Martin Ellis +gov.scot +service.gov.scot + +// Scry Security : http://www.scrysec.com +// Submitted by Shante Adam +scrysec.com + +// Securepoint GmbH : https://www.securepoint.de +// Submitted by Erik Anders +firewall-gateway.com +firewall-gateway.de +my-gateway.de +my-router.de +spdns.de +spdns.eu +firewall-gateway.net +my-firewall.org +myfirewall.org +spdns.org + +// Seidat : https://www.seidat.com +// Submitted by Artem Kondratev +seidat.net + +// Sellfy : https://sellfy.com +// Submitted by Yuriy Romadin +sellfy.store + +// Senseering GmbH : https://www.senseering.de +// Submitted by Felix Mönckemeyer +senseering.net + +// Sendmsg: https://www.sendmsg.co.il +// Submitted by Assaf Stern +minisite.ms + +// Service Magnet : https://myservicemagnet.com +// Submitted by Dave Sanders +magnet.page + +// Service Online LLC : http://drs.ua/ +// Submitted by Serhii Bulakh +biz.ua +co.ua +pp.ua + +// Shift Crypto AG : https://shiftcrypto.ch +// Submitted by alex +shiftcrypto.dev +shiftcrypto.io + +// ShiftEdit : https://shiftedit.net/ +// Submitted by Adam Jimenez +shiftedit.io + +// Shopblocks : http://www.shopblocks.com/ +// Submitted by Alex Bowers +myshopblocks.com + +// Shopify : https://www.shopify.com +// Submitted by Alex Richter +myshopify.com + +// Shopit : https://www.shopitcommerce.com/ +// Submitted by Craig McMahon +shopitsite.com + +// shopware AG : https://shopware.com +// Submitted by Jens Küper +shopware.store + +// Siemens Mobility GmbH +// Submitted by Oliver Graebner +mo-siemens.io + +// SinaAppEngine : http://sae.sina.com.cn/ +// Submitted by SinaAppEngine +1kapp.com +appchizi.com +applinzi.com +sinaapp.com +vipsinaapp.com + +// Siteleaf : https://www.siteleaf.com/ +// Submitted by Skylar Challand +siteleaf.net + +// Skyhat : http://www.skyhat.io +// Submitted by Shante Adam +bounty-full.com +alpha.bounty-full.com +beta.bounty-full.com + +// Small Technology Foundation : https://small-tech.org +// Submitted by Aral Balkan +small-web.org + +// Smoove.io : https://www.smoove.io/ +// Submitted by Dan Kozak +vp4.me + +// Snowplow Analytics : https://snowplowanalytics.com/ +// Submitted by Ian Streeter +try-snowplow.com + +// SourceHut : https://sourcehut.org +// Submitted by Drew DeVault +srht.site + +// Stackhero : https://www.stackhero.io +// Submitted by Adrien Gillon +stackhero-network.com + +// Staclar : https://staclar.com +// Submitted by Matthias Merkel +novecore.site + +// staticland : https://static.land +// Submitted by Seth Vincent +static.land +dev.static.land +sites.static.land + +// Storebase : https://www.storebase.io +// Submitted by Tony Schirmer +storebase.store + +// Strategic System Consulting (eApps Hosting): https://www.eapps.com/ +// Submitted by Alex Oancea +vps-host.net +atl.jelastic.vps-host.net +njs.jelastic.vps-host.net +ric.jelastic.vps-host.net + +// Sony Interactive Entertainment LLC : https://sie.com/ +// Submitted by David Coles +playstation-cloud.com + +// SourceLair PC : https://www.sourcelair.com +// Submitted by Antonis Kalipetis +apps.lair.io +*.stolos.io + +// SpaceKit : https://www.spacekit.io/ +// Submitted by Reza Akhavan +spacekit.io + +// SpeedPartner GmbH: https://www.speedpartner.de/ +// Submitted by Stefan Neufeind +customer.speedpartner.de + +// Spreadshop (sprd.net AG) : https://www.spreadshop.com/ +// Submitted by Martin Breest +myspreadshop.at +myspreadshop.com.au +myspreadshop.be +myspreadshop.ca +myspreadshop.ch +myspreadshop.com +myspreadshop.de +myspreadshop.dk +myspreadshop.es +myspreadshop.fi +myspreadshop.fr +myspreadshop.ie +myspreadshop.it +myspreadshop.net +myspreadshop.nl +myspreadshop.no +myspreadshop.pl +myspreadshop.se +myspreadshop.co.uk + +// Standard Library : https://stdlib.com +// Submitted by Jacob Lee +api.stdlib.com + +// Storj Labs Inc. : https://storj.io/ +// Submitted by Philip Hutchins +storj.farm + +// Studenten Net Twente : http://www.snt.utwente.nl/ +// Submitted by Silke Hofstra +utwente.io + +// Student-Run Computing Facility : https://www.srcf.net/ +// Submitted by Edwin Balani +soc.srcf.net +user.srcf.net + +// Sub 6 Limited: http://www.sub6.com +// Submitted by Dan Miller +temp-dns.com + +// Supabase : https://supabase.io +// Submitted by Inian Parameshwaran +supabase.co +supabase.in +supabase.net +su.paba.se + +// Symfony, SAS : https://symfony.com/ +// Submitted by Fabien Potencier +*.s5y.io +*.sensiosite.cloud + +// Syncloud : https://syncloud.org +// Submitted by Boris Rybalkin +syncloud.it + +// Synology, Inc. : https://www.synology.com/ +// Submitted by Rony Weng +diskstation.me +dscloud.biz +dscloud.me +dscloud.mobi +dsmynas.com +dsmynas.net +dsmynas.org +familyds.com +familyds.net +familyds.org +i234.me +myds.me +synology.me +vpnplus.to +direct.quickconnect.to + +// Tabit Technologies Ltd. : https://tabit.cloud/ +// Submitted by Oren Agiv +tabitorder.co.il + +// TAIFUN Software AG : http://taifun-software.de +// Submitted by Bjoern Henke +taifun-dns.de + +// Tailscale Inc. : https://www.tailscale.com +// Submitted by David Anderson +beta.tailscale.net +ts.net + +// TASK geographical domains (www.task.gda.pl/uslugi/dns) +gda.pl +gdansk.pl +gdynia.pl +med.pl +sopot.pl + +// Teckids e.V. : https://www.teckids.org +// Submitted by Dominik George +edugit.io +s3.teckids.org + +// Telebit : https://telebit.cloud +// Submitted by AJ ONeal +telebit.app +telebit.io +*.telebit.xyz + +// The Gwiddle Foundation : https://gwiddlefoundation.org.uk +// Submitted by Joshua Bayfield +gwiddle.co.uk + +// Thingdust AG : https://thingdust.com/ +// Submitted by Adrian Imboden +*.firenet.ch +*.svc.firenet.ch +reservd.com +thingdustdata.com +cust.dev.thingdust.io +cust.disrec.thingdust.io +cust.prod.thingdust.io +cust.testing.thingdust.io +reservd.dev.thingdust.io +reservd.disrec.thingdust.io +reservd.testing.thingdust.io + +// ticket i/O GmbH : https://ticket.io +// Submitted by Christian Franke +tickets.io + +// Tlon.io : https://tlon.io +// Submitted by Mark Staarink +arvo.network +azimuth.network +tlon.network + +// Tor Project, Inc. : https://torproject.org +// Submitted by Antoine Beaupré +bloxcms.com +townnews-staging.com + +// TradableBits: https://tradablebits.com +// Submitted by Dmitry Khrisanov dmitry@tradablebits.com +tbits.me + +// TrafficPlex GmbH : https://www.trafficplex.de/ +// Submitted by Phillipp Röll +12hp.at +2ix.at +4lima.at +lima-city.at +12hp.ch +2ix.ch +4lima.ch +lima-city.ch +trafficplex.cloud +de.cool +12hp.de +2ix.de +4lima.de +lima-city.de +1337.pictures +clan.rip +lima-city.rocks +webspace.rocks +lima.zone + +// TransIP : https://www.transip.nl +// Submitted by Rory Breuk +*.transurl.be +*.transurl.eu +*.transurl.nl + +// TuxFamily : http://tuxfamily.org +// Submitted by TuxFamily administrators +tuxfamily.org + +// TwoDNS : https://www.twodns.de/ +// Submitted by TwoDNS-Support +dd-dns.de +diskstation.eu +diskstation.org +dray-dns.de +draydns.de +dyn-vpn.de +dynvpn.de +mein-vigor.de +my-vigor.de +my-wan.de +syno-ds.de +synology-diskstation.de +synology-ds.de + +// Typeform : https://www.typeform.com +// Submitted by Sergi Ferriz +pro.typeform.com + +// Uberspace : https://uberspace.de +// Submitted by Moritz Werner +uber.space +*.uberspace.de + +// UDR Limited : http://www.udr.hk.com +// Submitted by registry +hk.com +hk.org +ltd.hk +inc.hk + +// United Gameserver GmbH : https://united-gameserver.de +// Submitted by Stefan Schwarz +virtualuser.de +virtual-user.de + +// Upli : https://upli.io +// Submitted by Lenny Bakkalian +upli.io + +// urown.net : https://urown.net +// Submitted by Hostmaster +urown.cloud +dnsupdate.info + +// .US +// Submitted by Ed Moore +lib.de.us + +// VeryPositive SIA : http://very.lv +// Submitted by Danko Aleksejevs +2038.io + +// Vercel, Inc : https://vercel.com/ +// Submitted by Connor Davis +vercel.app +vercel.dev +now.sh + +// Viprinet Europe GmbH : http://www.viprinet.com +// Submitted by Simon Kissel +router.management + +// Virtual-Info : https://www.virtual-info.info/ +// Submitted by Adnan RIHAN +v-info.info + +// Voorloper.com: https://voorloper.com +// Submitted by Nathan van Bakel +voorloper.cloud + +// Voxel.sh DNS : https://voxel.sh/dns/ +// Submitted by Mia Rehlinger +neko.am +nyaa.am +be.ax +cat.ax +es.ax +eu.ax +gg.ax +mc.ax +us.ax +xy.ax +nl.ci +xx.gl +app.gp +blog.gt +de.gt +to.gt +be.gy +cc.hn +blog.kg +io.kg +jp.kg +tv.kg +uk.kg +us.kg +de.ls +at.md +de.md +jp.md +to.md +indie.porn +vxl.sh +ch.tc +me.tc +we.tc +nyan.to +at.vg +blog.vu +dev.vu +me.vu + +// V.UA Domain Administrator : https://domain.v.ua/ +// Submitted by Serhii Rostilo +v.ua + +// Waffle Computer Inc., Ltd. : https://docs.waffleinfo.com +// Submitted by Masayuki Note +wafflecell.com + +// WapBlog.ID : https://www.wapblog.id +// Submitted by Fajar Sodik +idnblogger.com +indowapblog.com +bloger.id +wblog.id +wbq.me +fastblog.net + +// WebHare bv: https://www.webhare.com/ +// Submitted by Arnold Hendriks +*.webhare.dev + +// WebHotelier Technologies Ltd: https://www.webhotelier.net/ +// Submitted by Apostolos Tsakpinis +reserve-online.net +reserve-online.com +bookonline.app +hotelwithflight.com + +// WeDeploy by Liferay, Inc. : https://www.wedeploy.com +// Submitted by Henrique Vicente +wedeploy.io +wedeploy.me +wedeploy.sh + +// Western Digital Technologies, Inc : https://www.wdc.com +// Submitted by Jung Jin +remotewd.com + +// WIARD Enterprises : https://wiardweb.com +// Submitted by Kidd Hustle +pages.wiardweb.com + +// Wikimedia Labs : https://wikitech.wikimedia.org +// Submitted by Arturo Borrero Gonzalez +wmflabs.org +toolforge.org +wmcloud.org + +// WISP : https://wisp.gg +// Submitted by Stepan Fedotov +panel.gg +daemon.panel.gg + +// WoltLab GmbH : https://www.woltlab.com +// Submitted by Tim Düsterhus +woltlab-demo.com +myforum.community +community-pro.de +diskussionsbereich.de +community-pro.net +meinforum.net + +// Woods Valldata : https://www.woodsvalldata.co.uk/ +// Submitted by Chris Whittle +affinitylottery.org.uk +raffleentry.org.uk +weeklylottery.org.uk + +// WP Engine : https://wpengine.com/ +// Submitted by Michael Smith +// Submitted by Brandon DuRette +wpenginepowered.com +js.wpenginepowered.com + +// Wix.com, Inc. : https://www.wix.com +// Submitted by Shahar Talmi +wixsite.com +editorx.io + +// XenonCloud GbR: https://xenoncloud.net +// Submitted by Julian Uphoff +half.host + +// XnBay Technology : http://www.xnbay.com/ +// Submitted by XnBay Developer +xnbay.com +u2.xnbay.com +u2-local.xnbay.com + +// XS4ALL Internet bv : https://www.xs4all.nl/ +// Submitted by Daniel Mostertman +cistron.nl +demon.nl +xs4all.space + +// Yandex.Cloud LLC: https://cloud.yandex.com +// Submitted by Alexander Lodin +yandexcloud.net +storage.yandexcloud.net +website.yandexcloud.net + +// YesCourse Pty Ltd : https://yescourse.com +// Submitted by Atul Bhouraskar +official.academy + +// Yola : https://www.yola.com/ +// Submitted by Stefano Rivera +yolasite.com + +// Yombo : https://yombo.net +// Submitted by Mitch Schwenk +ybo.faith +yombo.me +homelink.one +ybo.party +ybo.review +ybo.science +ybo.trade + +// Yunohost : https://yunohost.org +// Submitted by Valentin Grimaud +ynh.fr +nohost.me +noho.st + +// ZaNiC : http://www.za.net/ +// Submitted by registry +za.net +za.org + +// Zine EOOD : https://zine.bg/ +// Submitted by Martin Angelov +bss.design + +// Zitcom A/S : https://www.zitcom.dk +// Submitted by Emil Stahl +basicserver.io +virtualserver.io +enterprisecloud.nu + +// ===END PRIVATE DOMAINS=== diff --git a/queue/dsn.go b/queue/dsn.go new file mode 100644 index 0000000..4f6430e --- /dev/null +++ b/queue/dsn.go @@ -0,0 +1,181 @@ +package queue + +import ( + "bufio" + "fmt" + "os" + "time" + + "github.com/mjl-/mox/dns" + "github.com/mjl-/mox/dsn" + "github.com/mjl-/mox/message" + "github.com/mjl-/mox/mlog" + "github.com/mjl-/mox/mox-" + "github.com/mjl-/mox/smtp" + "github.com/mjl-/mox/store" +) + +func queueDSNFailure(log *mlog.Log, m Msg, remoteMTA dsn.NameIP, secodeOpt, errmsg string) { + const subject = "mail delivery failed" + message := fmt.Sprintf(` +Delivery has failed permanently for your email to: + + %s + +No further deliveries will be attempted. + +Error during the last delivery attempt: + + %s +`, m.Recipient().XString(m.SMTPUTF8), errmsg) + + queueDSN(log, m, remoteMTA, secodeOpt, errmsg, true, nil, subject, message) +} + +func queueDSNDelay(log *mlog.Log, m Msg, remoteMTA dsn.NameIP, secodeOpt, errmsg string, retryUntil time.Time) { + const subject = "mail delivery delayed" + message := fmt.Sprintf(` +Delivery has been delayed of your email to: + + %s + +Next attempts to deliver: in 4 hours, 8 hours and 16 hours. +If these attempts all fail, you will receive a notice. + +Error during the last delivery attempt: + + %s +`, m.Recipient().XString(false), errmsg) + + queueDSN(log, m, remoteMTA, secodeOpt, errmsg, false, &retryUntil, subject, message) +} + +// We only queue DSNs for delivery failures for emails submitted by authenticated +// users. So we are delivering to local users. ../rfc/5321:1466 +// ../rfc/5321:1494 +// ../rfc/7208:490 +// todo future: when we implement relaying, we should be able to send DSNs to non-local users. and possibly specify a null mailfrom. ../rfc/5321:1503 +func queueDSN(log *mlog.Log, m Msg, remoteMTA dsn.NameIP, secodeOpt, errmsg string, permanent bool, retryUntil *time.Time, subject, textBody string) { + kind := "delayed delivery" + if permanent { + kind = "failure" + } + + qlog := func(text string, err error) { + log.Errorx("queue dsn: "+text+": sender will not be informed about dsn", err, mlog.Field("sender", m.Sender().XString(m.SMTPUTF8)), mlog.Field("kind", kind)) + } + + msgf, err := os.Open(m.MessagePath()) + if err != nil { + qlog("opening queued message", err) + return + } + msgr := store.FileMsgReader(m.MsgPrefix, msgf) + defer msgr.Close() + headers, err := message.ReadHeaders(bufio.NewReader(msgr)) + if err != nil { + qlog("reading headers of queued message", err) + return + } + + var action dsn.Action + var status string + if permanent { + status = "5." + action = dsn.Failed + } else { + action = dsn.Delayed + status = "4." + } + if secodeOpt != "" { + status += secodeOpt + } else { + status += "0.0" + } + diagCode := errmsg + if !dsn.HasCode(diagCode) { + diagCode = status + " " + errmsg + } + + dsnMsg := &dsn.Message{ + SMTPUTF8: m.SMTPUTF8, + From: smtp.Path{Localpart: "postmaster", IPDomain: dns.IPDomain{Domain: mox.Conf.Static.HostnameDomain}}, + To: m.Sender(), + Subject: subject, + TextBody: textBody, + + ReportingMTA: mox.Conf.Static.HostnameDomain.ASCII, + ArrivalDate: m.Queued, + + Recipients: []dsn.Recipient{ + { + FinalRecipient: m.Recipient(), + Action: action, + Status: status, + RemoteMTA: remoteMTA, + DiagnosticCode: diagCode, + LastAttemptDate: *m.LastAttempt, + WillRetryUntil: retryUntil, + }, + }, + + Original: headers, + } + msgData, err := dsnMsg.Compose(log, m.SMTPUTF8) + if err != nil { + qlog("composing dsn", err) + return + } + + msgData = append(msgData, []byte("Return-Path: <"+dsnMsg.From.XString(m.SMTPUTF8)+">\r\n")...) + + mailbox := "Inbox" + acc, err := store.OpenAccount(m.SenderAccount) + if err != nil { + acc, err = store.OpenAccount(mox.Conf.Static.Postmaster.Account) + if err != nil { + qlog("looking up postmaster account after sender account was not found", err) + return + } + mailbox = mox.Conf.Static.Postmaster.Mailbox + } + defer func() { + if err := acc.Close(); err != nil { + log.Errorx("queue dsn: closing account", err, mlog.Field("sender", m.Sender().XString(m.SMTPUTF8)), mlog.Field("kind", kind)) + } + }() + + msgFile, err := store.CreateMessageTemp("queue-dsn") + if err != nil { + qlog("creating temporary message file", err) + return + } + defer func() { + if msgFile != nil { + if err := os.Remove(msgFile.Name()); err != nil { + log.Errorx("removing message file", err, mlog.Field("path", msgFile.Name())) + } + msgFile.Close() + } + }() + + msgWriter := &message.Writer{Writer: msgFile} + if _, err := msgWriter.Write(msgData); err != nil { + qlog("writing dsn message", err) + return + } + + msg := &store.Message{ + Received: time.Now(), + Size: msgWriter.Size, + MsgPrefix: []byte{}, + } + acc.WithWLock(func() { + if err := acc.DeliverMailbox(log, mailbox, msg, msgFile, true); err != nil { + qlog("delivering dsn to mailbox", err) + return + } + }) + msgFile.Close() + msgFile = nil +} diff --git a/queue/queue.go b/queue/queue.go new file mode 100644 index 0000000..cd2d34a --- /dev/null +++ b/queue/queue.go @@ -0,0 +1,897 @@ +// Package queue is in charge of outgoing messages, queueing them when submitted, +// attempting a first delivery over SMTP, retrying with backoff and sending DSNs +// for delayed or failed deliveries. +package queue + +import ( + "bytes" + "context" + "errors" + "fmt" + "io" + "net" + "os" + "path/filepath" + "runtime/debug" + "sort" + "strings" + "time" + + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" + + "github.com/mjl-/bstore" + + "github.com/mjl-/mox/dns" + "github.com/mjl-/mox/dsn" + "github.com/mjl-/mox/metrics" + "github.com/mjl-/mox/mlog" + "github.com/mjl-/mox/mox-" + "github.com/mjl-/mox/moxio" + "github.com/mjl-/mox/mtasts" + "github.com/mjl-/mox/mtastsdb" + "github.com/mjl-/mox/smtp" + "github.com/mjl-/mox/smtpclient" + "github.com/mjl-/mox/store" +) + +var xlog = mlog.New("queue") + +var ( + metricConnection = promauto.NewCounterVec( + prometheus.CounterOpts{ + Name: "mox_queue_connection_total", + Help: "Queue client connections, outgoing.", + }, + []string{ + "result", // "ok", "timeout", "canceled", "error" + }, + ) + metricDeliveryHost = promauto.NewHistogramVec( + prometheus.HistogramOpts{ + Name: "mox_queue_delivery_duration_seconds", + Help: "SMTP client delivery attempt to single host.", + Buckets: []float64{0.01, 0.05, 0.100, 0.5, 1, 5, 10, 20, 30, 60, 120}, + }, + []string{ + "attempt", // Number of attempts. + "tlsmode", // strict, opportunistic, skip + "result", // ok, timeout, canceled, temperror, permerror, error + }, + ) +) + +// Used to dial remote SMTP servers. +// Overridden for tests. +var dial = func(ctx context.Context, timeout time.Duration, addr string, laddr net.Addr) (net.Conn, error) { + dialer := &net.Dialer{Timeout: timeout, LocalAddr: laddr} + return dialer.DialContext(ctx, "tcp", addr) +} + +var queueDB *bstore.DB + +// Msg is a message in the queue. +type Msg struct { + ID int64 + Queued time.Time `bstore:"default now"` + SenderAccount string // Failures are delivered back to this local account. + SenderLocalpart smtp.Localpart // Should be a local user and domain. + SenderDomain dns.IPDomain + RecipientLocalpart smtp.Localpart // Typically a remote user and domain. + RecipientDomain dns.IPDomain + RecipientDomainStr string // For filtering. + Attempts int // Next attempt is based on last attempt and exponential back off based on attempts. + DialedIPs map[string][]net.IP // For each host, the IPs that were dialed. Used for IP selection for later attempts. + NextAttempt time.Time // For scheduling. + LastAttempt *time.Time + LastError string + Has8bit bool // Whether message contains bytes with high bit set, determines whether 8BITMIME SMTP extension is needed. + SMTPUTF8 bool // Whether message requires use of SMTPUTF8. + Size int64 // Full size of message, combined MsgPrefix with contents of message file. + MsgPrefix []byte + DSNUTF8 []byte // If set, this message is a DSN and this is a version using utf-8, for the case the remote MTA supports smtputf8. In this case, Size and MsgPrefix are not relevant. +} + +// Sender of message as used in MAIL FROM. +func (m Msg) Sender() smtp.Path { + return smtp.Path{Localpart: m.SenderLocalpart, IPDomain: m.SenderDomain} +} + +// Recipient of message as used in RCPT TO. +func (m Msg) Recipient() smtp.Path { + return smtp.Path{Localpart: m.RecipientLocalpart, IPDomain: m.RecipientDomain} +} + +// MessagePath returns the path where the message is stored. +func (m Msg) MessagePath() string { + return mox.DataDirPath(filepath.Join("queue", store.MessagePath(m.ID))) +} + +// Init opens the queue database without starting delivery. +func Init() error { + qpath := mox.DataDirPath("queue/index.db") + os.MkdirAll(filepath.Dir(qpath), 0770) + isNew := false + if _, err := os.Stat(qpath); err != nil && os.IsNotExist(err) { + isNew = true + } + + var err error + queueDB, err = bstore.Open(qpath, &bstore.Options{Timeout: 5 * time.Second, Perm: 0660}, Msg{}) + if err != nil { + if isNew { + os.Remove(qpath) + } + return fmt.Errorf("open queue database: %s", err) + } + return nil +} + +// Shutdown closes the queue database. The delivery process isn't stopped. For tests only. +func Shutdown() { + if err := queueDB.Close(); err != nil { + xlog.Errorx("closing queue db", err) + } + queueDB = nil +} + +// List returns all messages in the delivery queue. +// Ordered by earliest delivery attempt first. +func List() ([]Msg, error) { + qmsgs, err := bstore.QueryDB[Msg](queueDB).List() + if err != nil { + return nil, err + } + sort.Slice(qmsgs, func(i, j int) bool { + a := qmsgs[i] + b := qmsgs[j] + la := a.LastAttempt != nil + lb := b.LastAttempt != nil + if !la && lb { + return true + } else if la && !lb { + return false + } + if !la && !lb || a.LastAttempt.Equal(*b.LastAttempt) { + return a.ID < b.ID + } + return a.LastAttempt.Before(*b.LastAttempt) + }) + return qmsgs, nil +} + +// Add a new message to the queue. The queue is kicked immediately to start a +// first delivery attempt. +// +// If consumeFile is true, it is removed as part of delivery (by rename or copy +// and remove). msgFile is never closed by Add. +// +// dnsutf8Opt is a utf8-version of the message, to be used only for DNSs. If set, +// this data is used as the message when delivering the DSN and the remote SMTP +// server supports SMTPUTF8. If the remote SMTP server does not support SMTPUTF8, +// the regular non-utf8 message is delivered. +func Add(log *mlog.Log, senderAccount string, mailFrom, rcptTo smtp.Path, has8bit, smtputf8 bool, size int64, msgPrefix []byte, msgFile *os.File, dsnutf8Opt []byte, consumeFile bool) error { + // todo: Add should accept multiple rcptTo if they are for the same domain. so we can queue them for delivery in one (or just a few) session(s), transferring the data only once. ../rfc/5321:3759 + + tx, err := queueDB.Begin(true) + if err != nil { + return fmt.Errorf("begin transaction: %w", err) + } + defer func() { + if tx != nil { + if err := tx.Rollback(); err != nil { + log.Errorx("rollback for queue", err) + } + } + }() + + now := time.Now() + qm := Msg{0, now, senderAccount, mailFrom.Localpart, mailFrom.IPDomain, rcptTo.Localpart, rcptTo.IPDomain, formatIPDomain(rcptTo.IPDomain), 0, nil, now, nil, "", has8bit, smtputf8, size, msgPrefix, dsnutf8Opt} + + if err := tx.Insert(&qm); err != nil { + return err + } + + dst := mox.DataDirPath(filepath.Join("queue", store.MessagePath(qm.ID))) + defer func() { + if dst != "" { + if err := os.Remove(dst); err != nil { + log.Infox("removing destination message file for queue", err, mlog.Field("path", dst)) + } + } + }() + dstDir := filepath.Dir(dst) + os.MkdirAll(dstDir, 0770) + if consumeFile { + if err := os.Rename(msgFile.Name(), dst); err != nil { + // Could be due to cross-filesystem rename. Users shouldn't configure their systems that way. + return fmt.Errorf("move message into queue dir: %w", err) + } + } else if err := os.Link(msgFile.Name(), dst); err != nil { + // Assume file system does not support hardlinks. Copy it instead. + if err := writeFile(dst, &moxio.AtReader{R: msgFile}); err != nil { + return fmt.Errorf("copying message to new file: %s", err) + } + } + + if err := moxio.SyncDir(dstDir); err != nil { + return fmt.Errorf("sync directory: %v", err) + } + + if err := tx.Commit(); err != nil { + return fmt.Errorf("commit transaction: %s", err) + } + tx = nil + dst = "" + + queuekick() + return nil +} + +// write contents of r to new file dst, for delivering a message. +func writeFile(dst string, r io.Reader) error { + df, err := os.OpenFile(dst, os.O_CREATE|os.O_EXCL|os.O_WRONLY, 0660) + if err != nil { + return fmt.Errorf("create: %w", err) + } + defer func() { + if df != nil { + df.Close() + } + }() + if _, err := io.Copy(df, r); err != nil { + return fmt.Errorf("copy: %s", err) + } else if err := df.Sync(); err != nil { + return fmt.Errorf("sync: %s", err) + } else if err := df.Close(); err != nil { + return fmt.Errorf("close: %s", err) + } + df = nil + return nil +} + +func formatIPDomain(d dns.IPDomain) string { + if len(d.IP) > 0 { + return "[" + d.IP.String() + "]" + } + return d.Domain.Name() +} + +var ( + kick = make(chan struct{}, 1) + deliveryResult = make(chan string, 1) +) + +func queuekick() { + select { + case kick <- struct{}{}: + default: + } +} + +// Kick sets the NextAttempt for messages matching all parameters that are nonzero, +// and kicks the queue, attempting delivery of those messages. If all parameters +// are zero, all messages are kicked. +// Returns number of messages queued for immediate delivery. +func Kick(ID int64, toDomain string, recipient string) (int, error) { + q := bstore.QueryDB[Msg](queueDB) + if ID > 0 { + q.FilterID(ID) + } + if toDomain != "" { + q.FilterEqual("RecipientDomainStr", toDomain) + } + if recipient != "" { + q.FilterFn(func(qm Msg) bool { + return qm.Recipient().XString(true) == recipient + }) + } + n, err := q.UpdateNonzero(Msg{NextAttempt: time.Now()}) + if err != nil { + return 0, fmt.Errorf("selecting and updating messages in queue: %v", err) + } + queuekick() + return n, nil +} + +// Drop removes messages from the queue that match all nonzero parameters. +// If all parameters are zero, all messages are removed. +// Returns number of messages removed. +func Drop(ID int64, toDomain string, recipient string) (int, error) { + q := bstore.QueryDB[Msg](queueDB) + if ID > 0 { + q.FilterID(ID) + } + if toDomain != "" { + q.FilterEqual("RecipientDomainStr", toDomain) + } + if recipient != "" { + q.FilterFn(func(qm Msg) bool { + return qm.Recipient().XString(true) == recipient + }) + } + n, err := q.Delete() + if err != nil { + return 0, fmt.Errorf("selecting and deleting messages from queue: %v", err) + } + return n, nil +} + +// OpenMessage opens a message present in the queue. +func OpenMessage(id int64) (io.ReadCloser, error) { + qm := Msg{ID: id} + err := queueDB.Get(&qm) + if err != nil { + return nil, err + } + f, err := os.Open(qm.MessagePath()) + if err != nil { + return nil, fmt.Errorf("open message file: %s", err) + } + r := store.FileMsgReader(qm.MsgPrefix, f) + return r, err +} + +const maxConcurrentDeliveries = 10 + +// Start opens the database by calling Init, then starts the delivery process. +func Start(resolver dns.Resolver, done chan struct{}) error { + if err := Init(); err != nil { + return err + } + + // High-level delivery strategy advice: ../rfc/5321:3685 + go func() { + // Map keys are either dns.Domain.Name()'s, or string-formatted IP addresses. + busyDomains := map[string]struct{}{} + + timer := time.NewTimer(0) + + for { + select { + case <-mox.Shutdown: + done <- struct{}{} + return + case <-kick: + case <-timer.C: + case domain := <-deliveryResult: + delete(busyDomains, domain) + } + + if len(busyDomains) >= maxConcurrentDeliveries { + continue + } + + launchWork(resolver, busyDomains) + timer.Reset(nextWork(busyDomains)) + } + }() + return nil +} + +func nextWork(busyDomains map[string]struct{}) time.Duration { + q := bstore.QueryDB[Msg](queueDB) + if len(busyDomains) > 0 { + var doms []any + for d := range busyDomains { + doms = append(doms, d) + } + q.FilterNotEqual("RecipientDomainStr", doms...) + } + q.SortAsc("NextAttempt") + q.Limit(1) + qm, err := q.Get() + if err == bstore.ErrAbsent { + return 24 * time.Hour + } else if err != nil { + xlog.Errorx("finding time for next delivery attempt", err) + return 1 * time.Minute + } + return time.Until(qm.NextAttempt) +} + +func launchWork(resolver dns.Resolver, busyDomains map[string]struct{}) int { + q := bstore.QueryDB[Msg](queueDB) + q.FilterLessEqual("NextAttempt", time.Now()) + q.SortAsc("NextAttempt") + q.Limit(maxConcurrentDeliveries) + if len(busyDomains) > 0 { + var doms []any + for d := range busyDomains { + doms = append(doms, d) + } + q.FilterNotEqual("RecipientDomainStr", doms...) + } + msgs, err := q.List() + if err != nil { + xlog.Errorx("querying for work in queue", err) + mox.Sleep(mox.Context, 1*time.Second) + return -1 + } + + for _, m := range msgs { + busyDomains[formatIPDomain(m.RecipientDomain)] = struct{}{} + go deliver(resolver, m) + } + return len(msgs) +} + +// Remove message from queue in database and file system. +func queueDelete(msgID int64) error { + if err := queueDB.Delete(&Msg{ID: msgID}); err != nil { + return err + } + // If removing from database fails, we'll also leave the file in the file system. + + p := mox.DataDirPath(filepath.Join("queue", store.MessagePath(msgID))) + if err := os.Remove(p); err != nil { + return fmt.Errorf("removing queue message from file system: %v", err) + } + + return nil +} + +// deliver attempts to deliver a message. +// The queue is updated, either by removing a delivered or permanently failed +// message, or updating the time for the next attempt. A DSN may be sent. +func deliver(resolver dns.Resolver, m Msg) { + cid := mox.Cid() + qlog := xlog.WithCid(cid).Fields(mlog.Field("from", m.Sender()), mlog.Field("recipient", m.Recipient()), mlog.Field("attempts", m.Attempts), mlog.Field("msgID", m.ID)) + + defer func() { + deliveryResult <- formatIPDomain(m.RecipientDomain) + + x := recover() + if x != nil { + qlog.Error("deliver panic", mlog.Field("panic", x)) + debug.PrintStack() + metrics.PanicInc("queue") + } + }() + + // We register this attempt by setting last_attempt, and already next_attempt time + // in the future with exponential backoff. If we run into trouble delivery below, + // at least we won't be bothering the receiving server with our problems. + // Delivery attempts: immediately, 7.5m, 15m, 30m, 1h, 2h (send delayed DSN), 4h, + // 8h, 16h (send permanent failure DSN). + // ../rfc/5321:3703 + // todo future: make the back off times configurable. ../rfc/5321:3713 + backoff := (7*60 + 30) * time.Second + for i := 0; i < m.Attempts; i++ { + backoff *= time.Duration(2) + } + m.Attempts++ + now := time.Now() + m.LastAttempt = &now + m.NextAttempt = now.Add(backoff) + qup := bstore.QueryDB[Msg](queueDB) + qup.FilterID(m.ID) + update := Msg{Attempts: m.Attempts, NextAttempt: m.NextAttempt, LastAttempt: m.LastAttempt} + if _, err := qup.UpdateNonzero(update); err != nil { + qlog.Errorx("storing delivery attempt", err) + return + } + + fail := func(permanent bool, remoteMTA dsn.NameIP, secodeOpt, errmsg string) { + if permanent || m.Attempts >= 8 { + qlog.Errorx("permanent failure delivering from queue", errors.New(errmsg)) + queueDSNFailure(qlog, m, remoteMTA, secodeOpt, errmsg) + + if err := queueDelete(m.ID); err != nil { + qlog.Errorx("deleting message from queue after permanent failure", err) + } + return + } + + qup := bstore.QueryDB[Msg](queueDB) + qup.FilterID(m.ID) + if _, err := qup.UpdateNonzero(Msg{LastError: errmsg, DialedIPs: m.DialedIPs}); err != nil { + qlog.Errorx("storing delivery error", err, mlog.Field("deliveryError", errmsg)) + } + + if m.Attempts == 5 { + // We've attempted deliveries at these intervals: 0, 7.5m, 15m, 30m, 1h, 2u. + // Let sender know delivery is delayed. + qlog.Errorx("temporary failure delivering from queue, sending delayed dsn", errors.New(errmsg), mlog.Field("backoff", backoff)) + + retryUntil := m.LastAttempt.Add((4 + 8 + 16) * time.Hour) + queueDSNDelay(qlog, m, remoteMTA, secodeOpt, errmsg, retryUntil) + } else { + qlog.Errorx("temporary failure delivering from queue", errors.New(errmsg), mlog.Field("backoff", backoff), mlog.Field("nextattempt", m.NextAttempt)) + } + } + + hosts, effectiveDomain, permanent, err := gatherHosts(resolver, m, cid, qlog) + if err != nil { + fail(permanent, dsn.NameIP{}, "", err.Error()) + return + } + + // Check for MTA-STS policy and enforce it if needed. We have to check the + // effective domain (found after following CNAME record(s)): there will certainly + // not be an mtasts record for the original recipient domain, because that is not + // allowed when a CNAME record is present. + var policyFresh bool + var policy *mtasts.Policy + if !effectiveDomain.IsZero() { + cidctx := context.WithValue(mox.Context, mlog.CidKey, cid) + policy, policyFresh, err = mtastsdb.Get(cidctx, resolver, effectiveDomain) + if err != nil { + fail(false, dsn.NameIP{}, "", err.Error()) + return + } + // note: policy can be nil, if a domain does not implement MTA-STS or its the first + // time we fetch the policy and it we encountered an error. + } + + // We try delivery to each record until we have success or a permanent failure. So + // for transient errors, we'll try the next MX record. For MX records pointing to a + // dual stack host, we turn a permanent failure due to policy on the first delivery + // attempt into a temporary failure and make sure to try the other address family + // the next attempt. This should reduce issues due to one of our IPs being on a + // block list. We won't try multiple IPs of the same address family. Surprisingly, + // RFC 5321 does not specify a clear algorithm, but common practicie is probably + // ../rfc/3974:268. + var remoteMTA dsn.NameIP + var secodeOpt, errmsg string + permanent = false + mtastsFailure := true + // todo: should make distinction between host permanently not accepting the message, and the message not being deliverable permanently. e.g. a mx host may have a size limit, or not accept 8bitmime, while another host in the list does accept the message. same for smtputf8, ../rfc/6531:555 + for _, h := range hosts { + var badTLS, ok bool + + // ../rfc/8461:913 + if policy != nil && policy.Mode == mtasts.ModeEnforce && !policy.Matches(h.Domain) { + errmsg = fmt.Sprintf("mx host %v does not match enforced mta-sts policy", h.Domain) + qlog.Error("mx host does not match enforce mta-sts policy, skipping", mlog.Field("host", h.Domain)) + continue + } + + qlog.Info("delivering to remote", mlog.Field("remote", h), mlog.Field("queuecid", cid)) + cid := mox.Cid() + nqlog := qlog.WithCid(cid) + var remoteIP net.IP + var tlsMode smtpclient.TLSMode + if policy != nil && policy.Mode == mtasts.ModeEnforce { + tlsMode = smtpclient.TLSStrict + } else { + tlsMode = smtpclient.TLSOpportunistic + } + permanent, badTLS, secodeOpt, remoteIP, errmsg, ok = deliverHost(nqlog, resolver, cid, h, &m, tlsMode) + if !ok && badTLS && tlsMode == smtpclient.TLSOpportunistic { + // In case of failure with opportunistic TLS, try again without TLS. ../rfc/7435:459 + // todo future: revisit this decision. perhaps it should be a configuration option that defaults to not doing this? + nqlog.Info("connecting again for delivery attempt without TLS") + permanent, badTLS, secodeOpt, remoteIP, errmsg, ok = deliverHost(nqlog, resolver, cid, h, &m, smtpclient.TLSSkip) + } + if ok { + nqlog.Info("delivered from queue") + if err := queueDelete(m.ID); err != nil { + nqlog.Errorx("deleting message from queue after delivery", err) + } + return + } + remoteMTA = dsn.NameIP{Name: h.XString(false), IP: remoteIP} + if !badTLS { + mtastsFailure = false + } + if permanent { + break + } + } + if mtastsFailure && policyFresh { + permanent = true + } + + fail(permanent, remoteMTA, secodeOpt, errmsg) +} + +var ( + errCNAMELoop = errors.New("cname loop") + errCNAMELimit = errors.New("too many cname records") + errNoRecord = errors.New("no dns record") + errDNS = errors.New("dns lookup error") + errNoMail = errors.New("domain does not accept email as indicated with single dot for mx record") +) + +// Gather hosts to try to deliver to. We start with the straight-forward MX record. +// If that does not exist, we'll look for CNAME of the entire domain (following +// chains if needed). If a CNAME does not exist, but the domain name has an A or +// AAAA record, we'll try delivery directly to that host. +// ../rfc/5321:3824 +func gatherHosts(resolver dns.Resolver, m Msg, cid int64, qlog *mlog.Log) (hosts []dns.IPDomain, effectiveDomain dns.Domain, permanent bool, err error) { + if len(m.RecipientDomain.IP) > 0 { + return []dns.IPDomain{m.RecipientDomain}, effectiveDomain, false, nil + } + + // We start out delivering to the recipient domain. We follow CNAMEs a few times. + rcptDomain := m.RecipientDomain.Domain + // Domain we are actually delivering to, after following CNAME record(s). + effectiveDomain = rcptDomain + domainsSeen := map[string]bool{} + for i := 0; ; i++ { + if domainsSeen[effectiveDomain.ASCII] { + return nil, effectiveDomain, true, fmt.Errorf("%w: recipient domain %s: already saw %s", errCNAMELoop, rcptDomain, effectiveDomain) + } + domainsSeen[effectiveDomain.ASCII] = true + + // note: The Go resolver returns the requested name if the domain has no CNAME record but has a host record. + if i == 16 { + // We have a maximum number of CNAME records we follow. There is no hard limit for + // DNS, and you might think folks wouldn't configure CNAME chains at all, but for + // (non-mail) domains, CNAME chains of 10 records have been encountered according + // to the internet. + return nil, effectiveDomain, true, fmt.Errorf("%w: recipient domain %s, last resolved domain %s", errCNAMELimit, rcptDomain, effectiveDomain) + } + + cidctx := context.WithValue(mox.Context, mlog.CidKey, cid) + ctx, cancel := context.WithTimeout(cidctx, 30*time.Second) + defer cancel() + // Note: LookupMX can return an error and still return records: Invalid records are + // filtered out and an error returned. We must process any records that are valid. + // Only if all are unusable will we return an error. ../rfc/5321:3851 + mxl, err := resolver.LookupMX(ctx, effectiveDomain.ASCII+".") + cancel() + if err != nil && len(mxl) == 0 { + if !dns.IsNotFound(err) { + return nil, effectiveDomain, false, fmt.Errorf("%w: mx lookup for %s: %v", errDNS, effectiveDomain, err) + } + + // No MX record. First attempt CNAME lookup. ../rfc/5321:3838 ../rfc/3974:197 + cname, err := resolver.LookupCNAME(ctx, effectiveDomain.ASCII+".") + if err != nil && !dns.IsNotFound(err) { + return nil, effectiveDomain, false, fmt.Errorf("%w: cname lookup for %s: %v", errDNS, effectiveDomain, err) + } + if err == nil && cname != effectiveDomain.ASCII+"." { + d, err := dns.ParseDomain(strings.TrimSuffix(cname, ".")) + if err != nil { + return nil, effectiveDomain, true, fmt.Errorf("%w: parsing cname domain %s: %v", errDNS, effectiveDomain, err) + } + effectiveDomain = d + // Start again with new domain. + continue + } + + // See if the host exists. If so, attempt delivery directly to host. ../rfc/5321:3842 + ctx, cancel = context.WithTimeout(cidctx, 30*time.Second) + defer cancel() + _, err = resolver.LookupHost(ctx, effectiveDomain.ASCII+".") + cancel() + if dns.IsNotFound(err) { + return nil, effectiveDomain, true, fmt.Errorf("%w: recipient domain/host %v", errNoRecord, effectiveDomain) + } else if err != nil { + return nil, effectiveDomain, false, fmt.Errorf("%w: looking up host %v because of no mx record: %v", errDNS, effectiveDomain, err) + } + hosts = []dns.IPDomain{{Domain: effectiveDomain}} + } else if err != nil { + qlog.Infox("partial mx failure, attempting delivery to valid mx records", err) + } + + // ../rfc/7505:122 + if err == nil && len(mxl) == 1 && mxl[0].Host == "." { + return nil, effectiveDomain, true, errNoMail + } + + // The Go resolver already sorts by preference, randomizing records of same + // preference. ../rfc/5321:3885 + for _, mx := range mxl { + host, err := dns.ParseDomain(strings.TrimSuffix(mx.Host, ".")) + if err != nil { + // note: should not happen because Go resolver already filters these out. + return nil, effectiveDomain, true, fmt.Errorf("%w: invalid host name in mx record %q: %v", errDNS, mx.Host, err) + } + hosts = append(hosts, dns.IPDomain{Domain: host}) + } + if len(hosts) > 0 { + err = nil + } + return hosts, effectiveDomain, false, err + } +} + +// deliverHost attempts to deliver m to host. +// deliverHost updated m.DialedIPs, which must be saved in case of failure to deliver. +func deliverHost(log *mlog.Log, resolver dns.Resolver, cid int64, host dns.IPDomain, m *Msg, tlsMode smtpclient.TLSMode) (permanent, badTLS bool, secodeOpt string, remoteIP net.IP, errmsg string, ok bool) { + // About attempting delivery to multiple addresses of a host: ../rfc/5321:3898 + + start := time.Now() + var deliveryResult string + defer func() { + metricDeliveryHost.WithLabelValues(fmt.Sprintf("%d", m.Attempts), string(tlsMode), deliveryResult).Observe(float64(time.Since(start)) / float64(time.Second)) + log.Debug("queue deliverhost result", mlog.Field("host", host), mlog.Field("attempt", m.Attempts), mlog.Field("tlsmode", tlsMode), mlog.Field("permanent", permanent), mlog.Field("badTLS", badTLS), mlog.Field("secodeOpt", secodeOpt), mlog.Field("errmsg", errmsg), mlog.Field("ok", ok), mlog.Field("duration", time.Since(start))) + }() + + f, err := os.Open(m.MessagePath()) + if err != nil { + return false, false, "", nil, fmt.Sprintf("open message file: %s", err), false + } + msgr := store.FileMsgReader(m.MsgPrefix, f) + defer msgr.Close() + + cidctx := context.WithValue(mox.Context, mlog.CidKey, cid) + ctx, cancel := context.WithTimeout(cidctx, 30*time.Second) + defer cancel() + + conn, ip, dualstack, err := dialHost(ctx, log, resolver, host, m) + remoteIP = ip + cancel() + var result string + switch { + case err == nil: + result = "ok" + case errors.Is(err, os.ErrDeadlineExceeded), errors.Is(err, context.DeadlineExceeded): + result = "timeout" + case errors.Is(err, context.Canceled): + result = "canceled" + default: + result = "error" + } + metricConnection.WithLabelValues(result).Inc() + if err != nil { + log.Debugx("connecting to remote smtp", err, mlog.Field("host", host)) + return false, false, "", ip, fmt.Sprintf("dialing smtp server: %v", err), false + } + + var mailFrom string + if m.SenderLocalpart != "" || !m.SenderDomain.IsZero() { + mailFrom = m.Sender().XString(m.SMTPUTF8) + } + rcptTo := m.Recipient().XString(m.SMTPUTF8) + + // todo future: get closer to timeouts specified in rfc? ../rfc/5321:3610 + log = log.Fields(mlog.Field("remoteip", ip)) + ctx, cancel = context.WithTimeout(cidctx, 30*time.Minute) + defer cancel() + mox.Connections.Register(conn, "smtpclient", "queue") + sc, err := smtpclient.New(ctx, log, conn, tlsMode, host.String(), "") + defer func() { + if sc == nil { + conn.Close() + } else { + sc.Close() + } + mox.Connections.Unregister(conn) + }() + if err == nil { + has8bit := m.Has8bit + smtputf8 := m.SMTPUTF8 + var msg io.Reader = msgr + size := m.Size + if m.DSNUTF8 != nil && sc.Supports8BITMIME() && sc.SupportsSMTPUTF8() { + has8bit = true + smtputf8 = true + size = int64(len(m.DSNUTF8)) + msg = bytes.NewReader(m.DSNUTF8) + } + err = sc.Deliver(ctx, mailFrom, rcptTo, size, msg, has8bit, smtputf8) + } + if err != nil { + log.Infox("delivery failed", err) + } + var cerr smtpclient.Error + switch { + case err == nil: + deliveryResult = "ok" + case errors.Is(err, os.ErrDeadlineExceeded), errors.Is(err, context.DeadlineExceeded): + deliveryResult = "timeout" + case errors.Is(err, context.Canceled): + deliveryResult = "canceled" + case errors.As(err, &cerr): + deliveryResult = "temperror" + if cerr.Permanent { + deliveryResult = "permerror" + } + default: + deliveryResult = "error" + } + if err == nil { + return false, false, "", ip, "", true + } else if cerr, ok := err.(smtpclient.Error); ok { + // If we are being rejected due to policy reasons on the first + // attempt and remote has both IPv4 and IPv6, we'll give it + // another try. Our first IP may be in a block list, the address for + // the other family perhaps is not. + permanent := cerr.Permanent + if permanent && m.Attempts == 1 && dualstack && strings.HasPrefix(cerr.Secode, "7.") { + permanent = false + } + return permanent, errors.Is(cerr, smtpclient.ErrTLS), cerr.Secode, ip, cerr.Error(), false + } else { + return false, errors.Is(cerr, smtpclient.ErrTLS), "", ip, err.Error(), false + } +} + +// dialHost dials host for delivering Msg, taking previous attempts into accounts. +// If the previous attempt used IPv4, this attempt will use IPv6 (in case one of the IPs is in a DNSBL). +// The second attempt for an address family we prefer the same IP as earlier, to increase our chances if remote is doing greylisting. +// dialHost updates m with the dialed IP and m should be saved in case of failure. +// If we have fully specified local smtp listen IPs, we set those for the outgoing +// connection. The admin probably configured these same IPs in SPF, but others +// possibly not. +func dialHost(ctx context.Context, log *mlog.Log, resolver dns.Resolver, host dns.IPDomain, m *Msg) (conn net.Conn, ip net.IP, dualstack bool, rerr error) { + var ips []net.IP + if len(host.IP) > 0 { + ips = []net.IP{host.IP} + } else { + // todo: The Go resolver automatically follows CNAMEs, which is not allowed for + // host names in MX records. ../rfc/5321:3861 ../rfc/2181:661 + name := host.Domain.ASCII + "." + ipaddrs, err := resolver.LookupIPAddr(ctx, name) + if err != nil || len(ipaddrs) == 0 { + return nil, nil, false, fmt.Errorf("looking up %q: %v", name, err) + } + var have4, have6 bool + for _, ipaddr := range ipaddrs { + ips = append(ips, ipaddr.IP) + if ipaddr.IP.To4() == nil { + have6 = true + } else { + have4 = true + } + } + dualstack = have4 && have6 + prevIPs := m.DialedIPs[host.String()] + if len(prevIPs) > 0 { + prevIP := prevIPs[len(prevIPs)-1] + prevIs4 := prevIP.To4() != nil + sameFamily := 0 + for _, ip := range prevIPs { + is4 := ip.To4() != nil + if prevIs4 == is4 { + sameFamily++ + } + } + preferPrev := sameFamily == 1 + // We use stable sort so any preferred/randomized listing from DNS is kept intact. + sort.SliceStable(ips, func(i, j int) bool { + aIs4 := ips[i].To4() != nil + bIs4 := ips[j].To4() != nil + if aIs4 != bIs4 { + // Prefer "i" if it is not same address family. + return aIs4 != prevIs4 + } + // Prefer "i" if it is the same as last and we should be preferring it. + return preferPrev && ips[i].Equal(prevIP) + }) + log.Debug("ordered ips for dialing", mlog.Field("ips", ips)) + } + } + + var timeout time.Duration + deadline, ok := ctx.Deadline() + if !ok { + timeout = 30 * time.Second + } else { + timeout = time.Until(deadline) / time.Duration(len(ips)) + } + + var lastErr error + var lastIP net.IP + for _, ip := range ips { + addr := net.JoinHostPort(ip.String(), "25") + log.Debug("dialing remote smtp", mlog.Field("addr", addr)) + var laddr net.Addr + for _, lip := range mox.Conf.Static.SpecifiedSMTPListenIPs { + ipIs4 := ip.To4() != nil + lipIs4 := lip.To4() != nil + if ipIs4 == lipIs4 { + laddr = &net.TCPAddr{IP: lip} + break + } + } + conn, err := dial(ctx, timeout, addr, laddr) + if err == nil { + log.Debug("connected for smtp delivery", mlog.Field("host", host), mlog.Field("addr", addr), mlog.Field("laddr", laddr)) + if m.DialedIPs == nil { + m.DialedIPs = map[string][]net.IP{} + } + name := host.String() + m.DialedIPs[name] = append(m.DialedIPs[name], ip) + return conn, ip, dualstack, nil + } + log.Debugx("connection attempt for smtp delivery", err, mlog.Field("host", host), mlog.Field("addr", addr), mlog.Field("laddr", laddr)) + lastErr = err + lastIP = ip + } + return nil, lastIP, dualstack, lastErr +} diff --git a/queue/queue_test.go b/queue/queue_test.go new file mode 100644 index 0000000..bd670ea --- /dev/null +++ b/queue/queue_test.go @@ -0,0 +1,535 @@ +package queue + +import ( + "bufio" + "context" + "errors" + "fmt" + "io" + "net" + "os" + "reflect" + "strings" + "testing" + "time" + + "github.com/mjl-/bstore" + + "github.com/mjl-/mox/dns" + "github.com/mjl-/mox/mox-" + "github.com/mjl-/mox/smtp" + "github.com/mjl-/mox/store" +) + +func tcheck(t *testing.T, err error, msg string) { + if err != nil { + t.Helper() + t.Fatalf("%s: %s", msg, err) + } +} + +func setup(t *testing.T) (*store.Account, func()) { + // Prepare config so email can be delivered to mjl@mox.example. + os.RemoveAll("../testdata/queue/data") + mox.Context = context.Background() + mox.ConfigStaticPath = "../testdata/queue/mox.conf" + mox.MustLoadConfig() + acc, err := store.OpenAccount("mjl") + tcheck(t, err, "open account") + err = acc.SetPassword("testtest") + tcheck(t, err, "set password") + switchDone := store.Switchboard() + mox.Shutdown = make(chan struct{}) + return acc, func() { + acc.Close() + close(mox.Shutdown) + mox.Shutdown = make(chan struct{}) + Shutdown() + close(switchDone) + } +} + +var testmsg = strings.ReplaceAll(`From: +To: +Subject: test + +test email +`, "\n", "\r\n") + +func prepareFile(t *testing.T) *os.File { + t.Helper() + msgFile, err := store.CreateMessageTemp("queue") + tcheck(t, err, "create temp message for delivery to queue") + _, err = msgFile.Write([]byte(testmsg)) + tcheck(t, err, "write message file") + return msgFile +} + +func TestQueue(t *testing.T) { + acc, cleanup := setup(t) + defer cleanup() + err := Init() + tcheck(t, err, "queue init") + + msgs, err := List() + tcheck(t, err, "listing messages in queue") + if len(msgs) != 0 { + t.Fatalf("got %d messages in queue, expected 0", len(msgs)) + } + + path := smtp.Path{Localpart: "mjl", IPDomain: dns.IPDomain{Domain: dns.Domain{ASCII: "mox.example"}}} + err = Add(xlog, "mjl", path, path, false, false, int64(len(testmsg)), nil, prepareFile(t), nil, true) + tcheck(t, err, "add message to queue for delivery") + + mf2 := prepareFile(t) + err = Add(xlog, "mjl", path, path, false, false, int64(len(testmsg)), nil, mf2, nil, false) + tcheck(t, err, "add message to queue for delivery") + os.Remove(mf2.Name()) + + msgs, err = List() + tcheck(t, err, "listing queue") + if len(msgs) != 2 { + t.Fatalf("got msgs %v, expected 1", msgs) + } + msg := msgs[0] + if msg.Attempts != 0 { + t.Fatalf("msg attempts %d, expected 0", msg.Attempts) + } + n, err := Drop(msgs[1].ID, "", "") + tcheck(t, err, "drop") + if n != 1 { + t.Fatalf("dropped %d, expected 1", n) + } + + next := nextWork(nil) + if next > 0 { + t.Fatalf("nextWork in %s, should be now", next) + } + busy := map[string]struct{}{"mox.example": {}} + if x := nextWork(busy); x != 24*time.Hour { + t.Fatalf("nextWork in %s for busy domain, should be in 24 hours", x) + } + if nn := launchWork(nil, busy); nn != 0 { + t.Fatalf("launchWork launched %d deliveries, expected 0", nn) + } + + // Override dial function. We'll make connecting fail for now. + resolver := dns.MockResolver{ + A: map[string][]string{"mox.example.": {"127.0.0.1"}}, + MX: map[string][]*net.MX{"mox.example.": {{Host: "mox.example", Pref: 10}}}, + } + dialed := make(chan struct{}, 1) + dial = func(ctx context.Context, timeout time.Duration, addr string, laddr net.Addr) (net.Conn, error) { + dialed <- struct{}{} + return nil, fmt.Errorf("failure from test") + } + + launchWork(resolver, map[string]struct{}{}) + + // Wait until we see the dial and the failed attempt. + timer := time.NewTimer(time.Second) + defer timer.Stop() + select { + case <-dialed: + i := 0 + for { + m, err := bstore.QueryDB[Msg](queueDB).Get() + tcheck(t, err, "get") + if m.Attempts == 1 { + break + } + i++ + if i == 10 { + t.Fatalf("message in queue not updated") + } + time.Sleep(100 * time.Millisecond) + } + case <-timer.C: + t.Fatalf("no dial within 1s") + } + <-deliveryResult // Deliver sends here. + + _, err = OpenMessage(msg.ID + 1) + if err != bstore.ErrAbsent { + t.Fatalf("OpenMessage, got %v, expected ErrAbsent", err) + } + reader, err := OpenMessage(msg.ID) + tcheck(t, err, "open message") + defer reader.Close() + msgbuf, err := io.ReadAll(reader) + tcheck(t, err, "read message") + if string(msgbuf) != testmsg { + t.Fatalf("message mismatch, got %q, expected %q", string(msgbuf), testmsg) + } + + n, err = Kick(msg.ID+1, "", "") + tcheck(t, err, "kick") + if n != 0 { + t.Fatalf("kick %d, expected 0", n) + } + n, err = Kick(msg.ID, "", "") + tcheck(t, err, "kick") + if n != 1 { + t.Fatalf("kicked %d, expected 1", n) + } + + // Setting up a pipe. We'll start a fake smtp server on the server-side. And return the + // client-side to the invocation dial, for the attempted delivery from the queue. + // The delivery should succeed. + server, client := net.Pipe() + defer server.Close() + defer client.Close() + + smtpdone := make(chan struct{}) + go func() { + // We do a minimal fake smtp server. We cannot import smtpserver.Serve due to cyclic dependencies. + fmt.Fprintf(server, "220 mox.example\r\n") + br := bufio.NewReader(server) + br.ReadString('\n') // Should be EHLO. + fmt.Fprintf(server, "250 ok\r\n") + br.ReadString('\n') // Should be MAIL FROM. + fmt.Fprintf(server, "250 ok\r\n") + br.ReadString('\n') // Should be RCPT TO. + fmt.Fprintf(server, "250 ok\r\n") + br.ReadString('\n') // Should be DATA. + fmt.Fprintf(server, "354 continue\r\n") + reader := smtp.NewDataReader(br) + io.Copy(io.Discard, reader) + fmt.Fprintf(server, "250 ok\r\n") + br.ReadString('\n') // Should be QUIT. + fmt.Fprintf(server, "221 ok\r\n") + + smtpdone <- struct{}{} + }() + + dial = func(ctx context.Context, timeout time.Duration, addr string, laddr net.Addr) (net.Conn, error) { + dialed <- struct{}{} + return client, nil + } + launchWork(resolver, map[string]struct{}{}) + + timer.Reset(time.Second) + select { + case <-dialed: + select { + case <-smtpdone: + i := 0 + for { + xmsgs, err := List() + tcheck(t, err, "list queue") + if len(xmsgs) == 0 { + break + } + i++ + if i == 10 { + t.Fatalf("%d messages in queue, expected 0", len(xmsgs)) + } + time.Sleep(100 * time.Millisecond) + } + case <-timer.C: + t.Fatalf("no deliver within 1s") + } + case <-timer.C: + t.Fatalf("no dial within 1s") + } + <-deliveryResult // Deliver sends here. + + // Add another message that we'll fail to deliver entirely. + err = Add(xlog, "mjl", path, path, false, false, int64(len(testmsg)), nil, prepareFile(t), nil, true) + tcheck(t, err, "add message to queue for delivery") + + msgs, err = List() + tcheck(t, err, "list queue") + if len(msgs) != 1 { + t.Fatalf("queue has %d messages, expected 1", len(msgs)) + } + msg = msgs[0] + + prepServer := func(code string) (net.Conn, func()) { + server, client := net.Pipe() + go func() { + fmt.Fprintf(server, "%s mox.example\r\n", code) + server.Close() + }() + return client, func() { + server.Close() + client.Close() + } + } + + conn2, cleanup2 := prepServer("220") + conn3, cleanup3 := prepServer("451") + defer func() { + cleanup2() + cleanup3() + }() + + seq := 0 + dial = func(ctx context.Context, timeout time.Duration, addr string, laddr net.Addr) (net.Conn, error) { + seq++ + switch seq { + default: + return nil, fmt.Errorf("connect error from test") + case 2: + return conn2, nil + case 3: + return conn3, nil + } + } + + comm := store.RegisterComm(acc) + defer comm.Unregister() + + for i := 1; i < 8; i++ { + go func() { <-deliveryResult }() // Deliver sends here. + deliver(resolver, msg) + err = queueDB.Get(&msg) + tcheck(t, err, "get msg") + if msg.Attempts != i { + t.Fatalf("got attempt %d, expected %d", msg.Attempts, i) + } + if msg.Attempts == 5 { + timer.Reset(time.Second) + changes := make(chan struct{}, 1) + go func() { + comm.Get() + changes <- struct{}{} + }() + select { + case <-changes: + case <-timer.C: + t.Fatalf("no dsn in 1s") + } + } + } + + // Trigger final failure. + go func() { <-deliveryResult }() // Deliver sends here. + deliver(resolver, msg) + err = queueDB.Get(&msg) + if err != bstore.ErrAbsent { + t.Fatalf("attempt to fetch delivered and removed message from queue, got err %v, expected ErrAbsent", err) + } + + timer.Reset(time.Second) + changes := make(chan struct{}, 1) + go func() { + comm.Get() + changes <- struct{}{} + }() + select { + case <-changes: + case <-timer.C: + t.Fatalf("no dsn in 1s") + } +} + +// test Start and that it attempts to deliver. +func TestQueueStart(t *testing.T) { + // Override dial function. We'll make connecting fail and check the attempt. + resolver := dns.MockResolver{ + A: map[string][]string{"mox.example.": {"127.0.0.1"}}, + MX: map[string][]*net.MX{"mox.example.": {{Host: "mox.example", Pref: 10}}}, + } + dialed := make(chan struct{}, 1) + dial = func(ctx context.Context, timeout time.Duration, addr string, laddr net.Addr) (net.Conn, error) { + dialed <- struct{}{} + return nil, fmt.Errorf("failure from test") + } + + _, cleanup := setup(t) + defer cleanup() + done := make(chan struct{}, 1) + defer func() { + close(mox.Shutdown) + <-done + mox.Shutdown = make(chan struct{}) + }() + err := Start(resolver, done) + tcheck(t, err, "queue start") + + checkDialed := func(need bool) { + t.Helper() + d := time.Second / 10 + if need { + d = time.Second + } + timer := time.NewTimer(d) + defer timer.Stop() + select { + case <-dialed: + if !need { + t.Fatalf("unexpected dial attempt") + } + case <-timer.C: + if need { + t.Fatalf("expected to see a dial attempt") + } + } + } + + path := smtp.Path{Localpart: "mjl", IPDomain: dns.IPDomain{Domain: dns.Domain{ASCII: "mox.example"}}} + err = Add(xlog, "mjl", path, path, false, false, int64(len(testmsg)), nil, prepareFile(t), nil, true) + tcheck(t, err, "add message to queue for delivery") + checkDialed(true) + + // Don't change message nextattempt time, but kick queue. Message should not be delivered. + queuekick() + checkDialed(false) + + // Kick for real, should see another attempt. + n, err := Kick(0, "mox.example", "") + tcheck(t, err, "kick queue") + if n != 1 { + t.Fatalf("kick changed %d messages, expected 1", n) + } + checkDialed(true) + time.Sleep(100 * time.Millisecond) // Racy... we won't get notified when work is done... +} + +func TestWriteFile(t *testing.T) { + name := "../testdata/queue.test" + os.Remove(name) + defer os.Remove(name) + err := writeFile(name, strings.NewReader("test")) + if err != nil { + t.Fatalf("writeFile, unexpected error %v", err) + } + buf, err := os.ReadFile(name) + if err != nil || string(buf) != "test" { + t.Fatalf("writeFile, read file, got err %v, data %q", err, buf) + } +} + +func TestGatherHosts(t *testing.T) { + mox.Context = context.Background() + + // Test basic MX lookup case, but also following CNAME, detecting CNAME loops and + // having a CNAME limit, connecting directly to a host, and domain that does not + // exist or has temporary error. + + resolver := dns.MockResolver{ + MX: map[string][]*net.MX{ + "basic.example.": {{Host: "mail.basic.example.", Pref: 10}}, + "multimx.example.": {{Host: "mail1.multimx.example.", Pref: 10}, {Host: "mail2.multimx.example.", Pref: 10}}, + "nullmx.example.": {{Host: ".", Pref: 10}}, + "temperror-mx.example.": {{Host: "absent.example.", Pref: 10}}, + }, + A: map[string][]string{ + "mail.basic.example": {"10.0.0.1"}, + "justhost.example.": {"10.0.0.1"}, // No MX record for domain, only an A record. + "temperror-a.example.": {"10.0.0.1"}, + }, + AAAA: map[string][]string{ + "justhost6.example.": {"2001:db8::1"}, // No MX record for domain, only an AAAA record. + }, + CNAME: map[string]string{ + "cname.example.": "basic.example.", + "cnameloop.example.": "cnameloop2.example.", + "cnameloop2.example.": "cnameloop.example.", + "danglingcname.example.": "absent.example.", // Points to missing name. + "temperror-cname.example.": "absent.example.", + }, + Fail: map[dns.Mockreq]struct{}{ + {Type: "mx", Name: "temperror-mx.example."}: {}, + {Type: "host", Name: "temperror-a.example."}: {}, + {Type: "cname", Name: "temperror-cname.example."}: {}, + }, + } + for i := 0; i <= 16; i++ { + s := fmt.Sprintf("cnamelimit%d.example.", i) + next := fmt.Sprintf("cnamelimit%d.example.", i+1) + resolver.CNAME[s] = next + } + + test := func(ipd dns.IPDomain, expHosts []dns.IPDomain, expDomain dns.Domain, expPerm bool, expErr error) { + t.Helper() + + m := Msg{RecipientDomain: ipd} + hosts, ed, perm, err := gatherHosts(resolver, m, 1, xlog) + if (err == nil) != (expErr == nil) || err != nil && !errors.Is(err, expErr) { + // todo: could also check the individual errors? code currently does not have structured errors. + t.Fatalf("gather hosts: %v", err) + } + if err != nil { + return + } + if !reflect.DeepEqual(hosts, expHosts) || ed != expDomain || perm != expPerm { + t.Fatalf("got hosts %#v, effectiveDomain %#v, permanent %#v, expected %#v %#v %#v", hosts, ed, perm, expHosts, expDomain, expPerm) + } + } + + domain := func(s string) dns.Domain { + d, err := dns.ParseDomain(s) + if err != nil { + t.Fatalf("parse domain: %v", err) + } + return d + } + ipdomain := func(s string) dns.IPDomain { + ip := net.ParseIP(s) + if ip != nil { + return dns.IPDomain{IP: ip} + } + d, err := dns.ParseDomain(s) + if err != nil { + t.Fatalf("parse domain %q: %v", s, err) + } + return dns.IPDomain{Domain: d} + } + + ipdomains := func(s ...string) (l []dns.IPDomain) { + for _, e := range s { + l = append(l, ipdomain(e)) + } + return + } + + var zerodom dns.Domain + + test(ipdomain("10.0.0.1"), ipdomains("10.0.0.1"), zerodom, false, nil) + test(ipdomain("basic.example"), ipdomains("mail.basic.example"), domain("basic.example"), false, nil) // Basic with simple MX. + test(ipdomain("multimx.example"), ipdomains("mail1.multimx.example", "mail2.multimx.example"), domain("multimx.example"), false, nil) // Basic with simple MX. + test(ipdomain("justhost.example"), ipdomains("justhost.example"), domain("justhost.example"), false, nil) // Only an A record. + test(ipdomain("justhost6.example"), ipdomains("justhost6.example"), domain("justhost6.example"), false, nil) // Only an AAAA record. + test(ipdomain("cname.example"), ipdomains("mail.basic.example"), domain("basic.example"), false, nil) // Follow CNAME. + test(ipdomain("cnamelimit1.example"), nil, zerodom, true, errCNAMELimit) + test(ipdomain("cnameloop.example"), nil, zerodom, true, errCNAMELoop) + test(ipdomain("absent.example"), nil, zerodom, true, errNoRecord) + test(ipdomain("danglingcname.example"), nil, zerodom, true, errNoRecord) + test(ipdomain("nullmx.example"), nil, zerodom, true, errNoMail) + test(ipdomain("temperror-mx.example"), nil, zerodom, false, errDNS) + test(ipdomain("temperror-cname.example"), nil, zerodom, false, errDNS) + test(ipdomain("temperror-a.example"), nil, zerodom, false, errDNS) +} + +func TestDialHost(t *testing.T) { + // We mostly want to test that dialing a second time switches to the other address family. + + resolver := dns.MockResolver{ + A: map[string][]string{ + "dualstack.example.": {"10.0.0.1"}, + }, + AAAA: map[string][]string{ + "dualstack.example.": {"2001:db8::1"}, + }, + } + + dial = func(ctx context.Context, timeout time.Duration, addr string, laddr net.Addr) (net.Conn, error) { + return nil, nil // No error, nil connection isn't used. + } + + ipdomain := func(s string) dns.IPDomain { + return dns.IPDomain{Domain: dns.Domain{ASCII: s}} + } + + m := Msg{DialedIPs: map[string][]net.IP{}} + _, ip, dualstack, err := dialHost(context.Background(), xlog, resolver, ipdomain("dualstack.example"), &m) + if err != nil || ip.String() != "10.0.0.1" || !dualstack { + t.Fatalf("expected err nil, address 10.0.0.1, dualstack true, got %v %v %v", err, ip, dualstack) + } + _, ip, dualstack, err = dialHost(context.Background(), xlog, resolver, ipdomain("dualstack.example"), &m) + if err != nil || ip.String() != "2001:db8::1" || !dualstack { + t.Fatalf("expected err nil, address 2001:db8::1, dualstack true, got %v %v %v", err, ip, dualstack) + } +} diff --git a/quickstart.go b/quickstart.go new file mode 100644 index 0000000..a5eded9 --- /dev/null +++ b/quickstart.go @@ -0,0 +1,441 @@ +package main + +import ( + "bytes" + "context" + "fmt" + "log" + "net" + "os" + "os/user" + "path/filepath" + "runtime" + "strings" + "time" + + _ "embed" + + "golang.org/x/crypto/bcrypt" + + "github.com/mjl-/sconf" + + "github.com/mjl-/mox/config" + "github.com/mjl-/mox/dns" + "github.com/mjl-/mox/mox-" + "github.com/mjl-/mox/smtp" + "github.com/mjl-/mox/store" +) + +//go:embed mox.service +var moxService string + +func pwgen() string { + rand := mox.NewRand() + chars := "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!@#$%^&*-_;:,<.>/" + s := "" + for i := 0; i < 12; i++ { + s += string(chars[rand.Intn(len(chars))]) + } + return s +} + +func cmdQuickstart(c *cmd) { + c.params = "user@domain" + c.help = `Quickstart generates configuration files and prints instructions to quickly set up a mox instance. + +Quickstart prints initial admin and account passwords, configuration files, DNS +records you should create, instructions for setting correct user/group and +permissions, and if you run it on Linux it prints a systemd service file. +` + args := c.Parse() + if len(args) != 1 { + c.Usage() + } + + // We take care to cleanup created files when we error out. + // We don't want to get a new user into trouble with half of the files + // after encountering an error. + + // We use fatalf instead of log.Fatal* to cleanup files. + var cleanupPaths []string + fatalf := func(format string, args ...any) { + // We remove in reverse order because dirs would have been created first and must + // be removed last, after their files have been removed. + for i := len(cleanupPaths) - 1; i >= 0; i-- { + p := cleanupPaths[i] + if err := os.Remove(p); err != nil { + log.Printf("cleaning up %q: %s", p, err) + } + } + + log.Fatalf(format, args...) + } + + xwritefile := func(path string, data []byte, perm os.FileMode) { + os.MkdirAll(filepath.Dir(path), 0770) + f, err := os.OpenFile(path, os.O_WRONLY|os.O_CREATE|os.O_EXCL, perm) + if err != nil { + fatalf("creating file %q: %s", path, err) + } + cleanupPaths = append(cleanupPaths, path) + _, err = f.Write(data) + if err == nil { + err = f.Close() + } + if err != nil { + fatalf("writing file %q: %s", path, err) + } + } + + addr, err := smtp.ParseAddress(args[0]) + if err != nil { + fatalf("parsing email address: %s", err) + } + username := addr.Localpart.String() + domain := addr.Domain + + for _, c := range username { + if c > 0x7f { + fmt.Printf("NOTE: Username %q is not ASCII-only. It is recommended you also configure an\nASCII-only alias. Both for delivery of email from other systems, and for\nlogging in with IMAP.\n\n", username) + break + } + } + + var hostname dns.Domain + hostnameStr, err := os.Hostname() + if err != nil { + fatalf("hostname: %s", err) + } + if strings.Contains(hostnameStr, ".") { + hostname, err = dns.ParseDomain(hostnameStr) + if err != nil { + fatalf("parsing hostname: %v", err) + } + } else { + hostname, err = dns.ParseDomain(hostnameStr + "." + domain.Name()) + if err != nil { + fatalf("parsing hostname: %v", err) + } + } + + // todo: lookup without going through /etc/hosts, because a machine typically has its name configured there, and LookupIPAddr will return it, but we care about DNS settings that the rest of the world uses to find us. perhaps we should check if the address resolves to 127.0.0.0/8? + fmt.Printf("Looking up hostname %q...", hostname) + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + _, err = dns.StrictResolver{}.LookupIPAddr(ctx, hostname.ASCII+".") + if err != nil { + fmt.Printf("\n\nWARNING: Quickstart assumes hostname %q and generates a config for that host,\nbut could not retrieve that name from DNS:\n\n\t%s\n\n", hostname, err) + } else { + fmt.Printf(" OK\n") + } + cancel() + + dc := config.Dynamic{} + sc := config.Static{DataDir: "../data"} + os.MkdirAll(sc.DataDir, 0770) + sc.LogLevel = "info" + sc.Hostname = hostname.Name() + sc.ACME = map[string]config.ACME{ + "letsencrypt": { + DirectoryURL: "https://acme-v02.api.letsencrypt.org/directory", + ContactEmail: args[0], // todo: let user specify an alternative fallback address? + }, + } + sc.AdminPasswordFile = "adminpasswd" + adminpw := pwgen() + adminpwhash, err := bcrypt.GenerateFromPassword([]byte(adminpw), bcrypt.DefaultCost) + if err != nil { + fatalf("generating hash for generated admin password: %s", err) + } + xwritefile(filepath.Join("config", sc.AdminPasswordFile), adminpwhash, 0660) + fmt.Printf("Admin password: %s\n", adminpw) + + // Gather IP addresses for public and private listeners. + // If we cannot find addresses for a category we fallback to all ips or localhost ips. + // We look at each network interface. If an interface has a private address, we + // conservatively assume all addresses on that interface are private. + ifaces, err := net.Interfaces() + if err != nil { + fatalf("listing network interfaces: %s", err) + } + var privateIPs, publicIPs []string + parseAddrIP := func(s string) net.IP { + if strings.HasPrefix(s, "[") && strings.HasSuffix(s, "]") { + s = s[1 : len(s)-1] + } + ip, _, _ := net.ParseCIDR(s) + return ip + } + for _, iface := range ifaces { + if iface.Flags&net.FlagUp == 0 { + continue + } + addrs, err := iface.Addrs() + if err != nil { + fatalf("listing address for network interface: %s", err) + } + if len(addrs) == 0 { + continue + } + + // todo: should we detect temporary/ephemeral ipv6 addresses and not add them? + var nonpublic bool + for _, addr := range addrs { + ip := parseAddrIP(addr.String()) + if ip.IsInterfaceLocalMulticast() || ip.IsLinkLocalMulticast() || ip.IsLinkLocalUnicast() || ip.IsMulticast() { + continue + } + if ip.IsLoopback() || ip.IsPrivate() { + nonpublic = true + break + } + } + + for _, addr := range addrs { + ip := parseAddrIP(addr.String()) + if ip == nil { + continue + } + if ip.IsInterfaceLocalMulticast() || ip.IsLinkLocalMulticast() || ip.IsLinkLocalUnicast() || ip.IsMulticast() { + continue + } + if nonpublic { + privateIPs = append(privateIPs, ip.String()) + } else { + publicIPs = append(publicIPs, ip.String()) + } + } + } + + publicListenerIPs := []string{"0.0.0.0", "::"} + privateListenerIPs := []string{"127.0.0.1", "::1"} + if len(publicIPs) > 0 { + publicListenerIPs = publicIPs + } + if len(privateIPs) > 0 { + privateListenerIPs = privateIPs + } + + public := config.Listener{ + IPs: publicListenerIPs, + TLS: &config.TLS{ + ACME: "letsencrypt", + }, + } + public.SMTP.Enabled = true + public.Submissions.Enabled = true + public.IMAPS.Enabled = true + public.AutoconfigHTTPS.Enabled = true + public.MTASTSHTTPS.Enabled = true + + // Suggest blocklists, but we'll comment them out after generating the config. + public.SMTP.DNSBLs = []string{"sbl.spamhaus.org", "bl.spamcop.net"} + + internal := config.Listener{ + IPs: privateListenerIPs, + Hostname: "localhost", + } + internal.AdminHTTP.Enabled = true + internal.MetricsHTTP.Enabled = true + + sc.Listeners = map[string]config.Listener{ + "public": public, + "internal": internal, + } + sc.Postmaster.Account = username + sc.Postmaster.Mailbox = "Postmaster" + + mox.ConfigStaticPath = "config/mox.conf" + mox.ConfigDynamicPath = "config/domains.conf" + + mox.Conf.DynamicLastCheck = time.Now() // Prevent error logging by Make calls below. + + accountConf := mox.MakeAccountConfig(addr) + confDomain, keyPaths, err := mox.MakeDomainConfig(context.Background(), domain, hostname, username) + if err != nil { + fatalf("making domain config: %s", err) + } + cleanupPaths = append(cleanupPaths, keyPaths...) + + dc.Domains = map[string]config.Domain{ + domain.Name(): confDomain, + } + dc.Accounts = map[string]config.Account{ + username: accountConf, + } + + // Build config in memory, so we can easily comment out the DNSBLs config. + var sb strings.Builder + sc.CheckUpdates = true // Commented out below. + if err := sconf.WriteDocs(&sb, &sc); err != nil { + fatalf("generating static config: %v", err) + } + confstr := sb.String() + confstr = strings.ReplaceAll(confstr, "\nCheckUpdates: true\n", "\n#\n# RECOMMENDED: please enable to stay up to date\n#\n#CheckUpdates: true\n") + confstr = strings.ReplaceAll(confstr, "DNSBLs:\n", "#DNSBLs:\n") + for _, bl := range public.SMTP.DNSBLs { + confstr = strings.ReplaceAll(confstr, "- "+bl+"\n", "#- "+bl+"\n") + } + xwritefile("config/mox.conf", []byte(confstr), 0660) + + // Generate domains config, and add a commented out example for delivery to a mailing list. + var db bytes.Buffer + if err := sconf.WriteDocs(&db, &dc); err != nil { + fatalf("generating domains config: %v", err) + } + + // This approach is a bit horrible, but it generates a convenient + // example that includes the comments. Though it is gone by the first + // write of the file by mox. + odests := fmt.Sprintf("\t\tDestinations:\n\t\t\t%s: nil\n", addr.Localpart.String()) + var destsExample = struct { + Destinations map[string]config.Destination + }{ + Destinations: map[string]config.Destination{ + addr.Localpart.String(): { + Rulesets: []config.Ruleset{ + { + VerifiedDomain: "list.example.org", + HeadersRegexp: map[string]string{ + "^list-id$": ``, + }, + ListAllowDomain: "list.example.org", + Mailbox: "Lists/Example", + }, + }, + }, + }, + } + var destBuf strings.Builder + if err := sconf.Describe(&destBuf, destsExample); err != nil { + fatalf("describing destination example: %v", err) + } + ndests := odests + "#\t\t\tIf you receive email from mailing lists, you probably want to configure them like the example below.\n" + for _, line := range strings.Split(destBuf.String(), "\n")[1:] { + ndests += "#\t\t" + line + "\n" + } + dconfstr := strings.ReplaceAll(db.String(), odests, ndests) + xwritefile("config/domains.conf", []byte(dconfstr), 0660) + + // Verify config. + mc, errs := mox.ParseConfig(context.Background(), "config/mox.conf", true) + if len(errs) > 0 { + if len(errs) > 1 { + log.Printf("checking generated config, multiple errors:") + for _, err := range errs { + log.Println(err) + } + fatalf("aborting due to multiple config errors") + } + fatalf("checking generated config: %s", errs[0]) + } + mox.SetConfig(mc) + // NOTE: Now that we've prepared the config, we can open the account + // and set a passsword, and the public key for the DKIM private keys + // are available for generating the DKIM DNS records below. + + confDomain, ok := mc.Domain(domain) + if !ok { + fatalf("cannot find domain in new config") + } + + acc, _, err := store.OpenEmail(args[0]) + if err != nil { + fatalf("open account: %s", err) + } + cleanupPaths = append(cleanupPaths, sc.DataDir, filepath.Join(sc.DataDir, "accounts"), filepath.Join(sc.DataDir, "accounts", username), filepath.Join(sc.DataDir, "accounts", username, "index.db")) + + password := pwgen() + if err := acc.SetPassword(password); err != nil { + fatalf("setting password: %s", err) + } + if err := acc.Close(); err != nil { + fatalf("closing account: %s", err) + } + fmt.Printf("IMAP and SMTP submission password for %s: %s\n\n", args[0], password) + fmt.Println(`When configuring your email client, use the email address as username. If +autoconfig/autodiscover does not work, use the settings below.`) + fmt.Println("") + printClientConfig(domain) + + fmt.Println("") + fmt.Println(`Configuration files have been written to config/mox.conf and +config/domains.sc. You should review them. Then create the DNS records below. +You can also skip creating the DNS records and start mox immediately. The admin +interface can show these same records, and has a page to check they have been +configured correctly.`) + + // We do not verify the records exist: If they don't exist, we would only be + // priming dns caches with negative/absent records, causing our "quick setup" to + // appear to fail or take longer than "quick". + + records, err := mox.DomainRecords(confDomain, domain) + if err != nil { + fatalf("making required DNS records") + } + fmt.Print("\n\n\n" + strings.Join(records, "\n") + "\n\n\n\n") + + fmt.Printf(`WARNING: The configuration and DNS records above assume you do not currently +have email configured for your domain. If you do already have email configured, +or if you are sending email for your domain from other machines/services, you +should understand the consequences of the DNS records above before +continuing! + +You can now start mox with "mox serve", but see below for recommended ownership +and permissions. + +`) + + userName := "root" + groupName := "root" + if u, err := user.Current(); err != nil { + log.Printf("get current user: %v", err) + } else { + userName = u.Username + if g, err := user.LookupGroupId(u.Gid); err != nil { + log.Printf("get current group: %v", err) + } else { + groupName = g.Name + } + } + fmt.Printf(`Assuming the mox binary is in the current directory, and you will run mox under +user name "mox", and the admin user is the current user, the following command +sets the correct permissions: + + sudo useradd --no-create-home --home-dir $PWD mox + sudo chown %s:mox . mox + sudo chown -R mox:%s config data + sudo chmod 751 . + sudo chmod 750 mox + sudo chmod -R u=rwX,g=rwX,o= config data + sudo chmod g+s $(find . -type d) + +`, userName, groupName) + + // For now, we only give service config instructions for linux. + if runtime.GOOS == "linux" { + pwd, err := os.Getwd() + if err != nil { + log.Printf("current working directory: %v", err) + pwd = "/home/service/mox" + } + service := strings.ReplaceAll(moxService, "/home/service/mox", pwd) + xwritefile("mox.service", []byte(service), 0644) + cleanupPaths = append(cleanupPaths, "mox.service") + fmt.Printf(`See mox.service for a systemd service file. To enable and start: + + sudo chmod 644 mox.service + sudo systemctl enable $PWD/mox.service + sudo systemctl start mox.service + sudo journalctl -f -u mox.service # See logs + +`) + } + + fmt.Println(`For secure email exchange you should have a strictly validating DNSSEC +resolver. An easy and the recommended way is to install unbound. + +Enjoy!`) + + cleanupPaths = nil +} diff --git a/rfc/Makefile b/rfc/Makefile new file mode 100644 index 0000000..7ee44a9 --- /dev/null +++ b/rfc/Makefile @@ -0,0 +1,7 @@ +default: fetch link + +fetch: + ./fetch.sh + +link: + go run -tags link link.go -- ../*.go ../*/*.go diff --git a/rfc/errata.go b/rfc/errata.go new file mode 100644 index 0000000..fb1506f --- /dev/null +++ b/rfc/errata.go @@ -0,0 +1,59 @@ +//go:build errata + +package main + +// Convert eid html file, e.g. https://www.rfc-editor.org/errata/eid3192 to text with leading blank line for references. +// See Makefile, run with "go run errata.go < eid.html >eid.txt" +// I could not find a source for the text version of errata. + +import ( + "bufio" + "fmt" + "log" + "os" + + "golang.org/x/net/html" +) + +func xcheckf(err error, format string, args ...any) { + if err != nil { + log.Fatalf("%s: %s", fmt.Sprintf(format, args...), err) + } +} + +func main() { + log.SetFlags(0) + doc, err := html.Parse(os.Stdin) + xcheckf(err, "parsing html") + out := bufio.NewWriter(os.Stdout) + _, err = out.WriteString("\n") // First line for references. + xcheckf(err, "write") + + // We will visit the html nodes. We skip
's. We turn on text + // output when we encounter an h3, and we stop again when we see a div + // or form. This works at the moment, but may break in the future. + output := false + var walk func(*html.Node) + walk = func(n *html.Node) { + if n.Type == html.ElementNode { + if n.Data == "form" { + return + } + if !output && n.Data == "h3" { + output = true + } else if output && (n.Data == "div" || n.Data == "form") { + output = false + } + } + if output && n.Type == html.TextNode { + _, err := out.WriteString(n.Data) + xcheckf(err, "write") + } + for c := n.FirstChild; c != nil; c = c.NextSibling { + walk(c) + } + } + walk(doc) + err = out.Flush() + xcheckf(err, "flush") +} diff --git a/rfc/fetch.sh b/rfc/fetch.sh new file mode 100755 index 0000000..aa17b7a --- /dev/null +++ b/rfc/fetch.sh @@ -0,0 +1,14 @@ +#!/bin/sh +for number in $(sed -n 's/^\([0-9][0-9]*\)[ \t].*$/\1/p' index.md); do + if ! test -f "$number"; then + curl https://www.rfc-editor.org/rfc/rfc$number.txt >$number || rm $number + fi +done + +for name in $(sed -n 's/^\([0-9][0-9]*-eid[0-9][0-9]*\)[ \t].*$/\1/p' index.md); do + if ! test -f "$name"; then + rfc=$(echo $name | cut -f1 -d-) + eid=$(echo $name | cut -f2 -d-) + curl https://www.rfc-editor.org/errata/$eid | go run errata.go >$name || rm $name + fi +done diff --git a/rfc/index.md b/rfc/index.md new file mode 100644 index 0000000..48d98be --- /dev/null +++ b/rfc/index.md @@ -0,0 +1,301 @@ +This file lists RFC's by number and title. "make" fetches the RFC's and adds references back to the source code where they are referenced. + +Also see IANA assignments, https://www.iana.org/protocols + +# Mail, message format, MIME +822 Standard for ARPA Internet Text Messages +2045 Multipurpose Internet Mail Extensions (MIME) Part One: Format of Internet Message Bodies +2046 Multipurpose Internet Mail Extensions (MIME) Part Two: Media Types +2047 MIME (Multipurpose Internet Mail Extensions) Part Three: Message Header Extensions for Non-ASCII Text +2049 Multipurpose Internet Mail Extensions (MIME) Part Five: Conformance Criteria and Examples +2231 MIME Parameter Value and Encoded Word Extensions: Character Sets, Languages, and Continuations +3629 UTF-8, a transformation format of ISO 10646 +3834 Recommendations for Automatic Responses to Electronic Mail +5234 Augmented BNF for Syntax Specifications: ABNF +5322 Internet Message Format +5598 Internet Mail Architecture +7405 Case-Sensitive String Support in ABNF + +# SMTP + +821 (obsoleted by RFC 2821) SIMPLE MAIL TRANSFER PROTOCOL +2821 (obsoleted by RFC 5321) Simple Mail Transfer Protocol +5321 Simple Mail Transfer Protocol + +1870 SMTP Service Extension for Message Size Declaration +1985 SMTP Service Extension for Remote Message Queue Starting +2034 SMTP Service Extension for Returning Enhanced Error Codes +2852 Deliver By SMTP Service Extension +2920 SMTP Service Extension for Command Pipelining +2505 Anti-Spam Recommendations for SMTP MTAs +2852 Deliver By SMTP Service Extension +3207 SMTP Service Extension for Secure SMTP over Transport Layer Security (STARTTLS) +3030 SMTP Service Extensions for Transmission of Large and Binary MIME Messages +3461 Simple Mail Transfer Protocol (SMTP) Service Extension for Delivery Status Notifications (DSNs) +3462 (obsoleted by RFC 6522) The Multipart/Report Content Type for the Reporting of Mail System Administrative Messages +3463 Enhanced Mail System Status Codes +3464 An Extensible Message Format for Delivery Status Notifications +3798 (obsoleted by RFC 8098) Message Disposition Notification +3848 ESMTP and LMTP Transmission Types Registration +3865 A No Soliciting Simple Mail Transfer Protocol (SMTP) Service Extension +3885 SMTP Service Extension for Message Tracking +3974 SMTP Operational Experience in Mixed IPv4/v6 Environments +4409 (obsoleted by RFC 6409) Message Submission for Mail +4865 SMTP Submission Service Extension for Future Message Release +4954 SMTP Service Extension for Authentication +5068 Email Submission Operations: Access and Accountability Requirements +5248 A Registry for SMTP Enhanced Mail System Status Codes +5335 (obsoleted by RFC 6532) Internationalized Email Headers +5336 (obsoleted by RFC 6531) SMTP Extension for Internationalized Email Addresses +5337 (obsoleted by RFC 6533) Internationalized Delivery Status and Disposition Notifications +6008 Authentication-Results Registration for Differentiating among Cryptographic Results +6152 SMTP Service Extension for 8-bit MIME Transport +6409 Message Submission for Mail +6522 The Multipart/Report Media Type for the Reporting of Mail System Administrative Messages +6530 Overview and Framework for Internationalized Email +6531 SMTP Extension for Internationalized Email +6532 Internationalized Email Headers +6533 Internationalized Delivery Status and Disposition Notifications +6729 Indicating Email Handling States in Trace Fields +7293 The Require-Recipient-Valid-Since Header Field and SMTP Service Extension +7372 Email Authentication Status Codes +7435 Opportunistic Security: Some Protection Most of the Time +7504 SMTP 521 and 556 Reply Codes +7505 A "Null MX" No Service Resource Record for Domains That Accept No Mail +8098 Message Disposition Notification +8601 Message Header Field for Indicating Message Authentication Status +8689 SMTP Require TLS Option + +# SPF +4408 (obsoleted by RFC 7208) Sender Policy Framework (SPF) for Authorizing Use of Domains in E-Mail, Version 1 +6652 Sender Policy Framework (SPF) Authentication Failure Reporting Using the Abuse Reporting Format +7208 Sender Policy Framework (SPF) for Authorizing Use of Domains in Email, Version 1 +7208-eid5436 errata: header-field FWS +7208-eid6721 errata: corrected smtp example response +7208-eid4751 errata (not verified): ptr mechanism +7208-eid5227 errata (not verified): ptr lookup order +7208-eid6595 errata (not verified): 2 void lookups vs exists +7208-eid6216 errata (not verified): ptr in multiple requirements example from appendix A.4 + +# DKIM +6376 DomainKeys Identified Mail (DKIM) Signatures +6376-eid4810 errata: q= qp-hdr-value +6376-eid5070 errata: tag-spec + +4686 Analysis of Threats Motivating DomainKeys Identified Mail (DKIM) +4871 (obsoleted by RFC 6376) DomainKeys Identified Mail (DKIM) Signatures +5016 Requirements for a DomainKeys Identified Mail (DKIM) Signing Practices Protocol +5585 DomainKeys Identified Mail (DKIM) Service Overview +5672 (obsoleted by RFC 6376) DomainKeys Identified Mail (DKIM) Signatures -- Update +5863 DomainKeys Identified Mail (DKIM) Development, Deployment, and Operations +6377 DomainKeys Identified Mail (DKIM) and Mailing Lists +8032 Edwards-Curve Digital Signature Algorithm (EdDSA) +8301 Cryptographic Algorithm and Key Usage Update to DomainKeys Identified Mail (DKIM) +8463 A New Cryptographic Signature Method for DomainKeys Identified Mail (DKIM) + +# DMARC +7489 Domain-based Message Authentication, Reporting, and Conformance (DMARC) +7489-eid5440 errata: valid dmarc records with(out) semicolon +7489-eid6729 errata (not verified): publicsuffix list only for ICANN DOMAINS +7960 Interoperability Issues between Domain-based Message Authentication, Reporting, and Conformance (DMARC) and Indirect Email Flows +9091 Experimental Domain-Based Message Authentication, Reporting, and Conformance (DMARC) Extension for Public Suffix Domains + +# DKIM/SPF/DMARC +8616 Email Authentication for Internationalized Mail + +# Greylisting +6647 Email Greylisting: An Applicability Statement for SMTP + +# DNSBL/DNSWL +5782 DNS Blacklists and Whitelists +8904 DNS Whitelist (DNSWL) Email Authentication Method Extension + +# DANE +6698 The DNS-Based Authentication of Named Entities (DANE) Transport Layer Security (TLS) Protocol: TLSA +7218 Adding Acronyms to Simplify Conversations about DNS-Based Authentication of Named Entities (DANE) +7671 The DNS-Based Authentication of Named Entities (DANE) Protocol: Updates and Operational Guidance +7672 SMTP Security via Opportunistic DNS-Based Authentication of Named Entities (DANE) Transport Layer Security (TLS) + +# TLS-RPT +8460 SMTP TLS Reporting +8460-eid6241 Wrong example for JSON field "mx-host". + +# MTA-STS +8461 SMTP MTA Strict Transport Security (MTA-STS) + +# ARC +8617 The Authenticated Received Chain (ARC) Protocol + +# ARF +5965 An Extensible Format for Email Feedback Reports +6650 Creation and Use of Email Feedback Reports: An Applicability Statement for the Abuse Reporting Format (ARF) +6591 Authentication Failure Reporting Using the Abuse Reporting Format +6692 Source Ports in Abuse Reporting Format (ARF) Reports + +# IMAP + +1730 (obsoleted by RFC 2060) INTERNET MESSAGE ACCESS PROTOCOL - VERSION 4 +2060 (obsoleted by RFC 3501) INTERNET MESSAGE ACCESS PROTOCOL - VERSION 4rev1 +3501 (obsoleted by RFC 9051) INTERNET MESSAGE ACCESS PROTOCOL - VERSION 4rev1 +9051 Internet Message Access Protocol (IMAP) - Version 4rev2 + +1733 DISTRIBUTED ELECTRONIC MAIL MODELS IN IMAP4 +2087 IMAP4 QUOTA extension +2088 (obsoleted by RFC 7888) IMAP4 non-synchronizing literals +2152 UTF-7 A Mail-Safe Transformation Format of Unicode +2177 IMAP4 IDLE command +2180 IMAP4 Multi-Accessed Mailbox Practice +2193 IMAP4 Mailbox Referrals +2342 IMAP4 Namespace +2683 IMAP4 Implementation Recommendations +2971 IMAP4 ID extension +3348 (obsoleted by RFC 5258) The Internet Message Action Protocol (IMAP4) Child Mailbox Extension +3502 Internet Message Access Protocol (IMAP) - MULTIAPPEND Extension +3503 Message Disposition Notification (MDN) profile for Internet Message Access Protocol (IMAP) +3516 IMAP4 Binary Content Extension +3691 Internet Message Access Protocol (IMAP) UNSELECT command +4314 IMAP4 Access Control List (ACL) Extension +4315 Internet Message Access Protocol (IMAP) - UIDPLUS extension +4466 Collected Extensions to IMAP4 ABNF +4467 Internet Message Access Protocol (IMAP) - URLAUTH Extension +4469 Internet Message Access Protocol (IMAP) CATENATE Extension +4549 Synchronization Operations for Disconnected IMAP4 Clients +4550 (obsoleted by RFC 5550) Internet Email to Support Diverse Service Environments (Lemonade) Profile +4551 (obsoleted by RFC 7162) IMAP Extension for Conditional STORE Operation or Quick Flag Changes Resynchronization +4731 IMAP4 Extension to SEARCH Command for Controlling What Kind of Information Is Returned +4978 The IMAP COMPRESS Extension +4959 IMAP Extension for Simple Authentication and Security Layer (SASL) Initial Client Response +5032 WITHIN Search Extension to the IMAP Protocol +5092 IMAP URL Scheme +5161 The IMAP ENABLE Extension +5162 (obsoleted by RFC 7162) IMAP4 Extensions for Quick Mailbox Resynchronization +5182 IMAP Extension for Referencing the Last SEARCH Result +5255 Internet Message Access Protocol Internationalization +5256 Internet Message Access Protocol - SORT and THREAD Extensions +5257 Internet Message Access Protocol - ANNOTATE Extension +5258 Internet Message Access Protocol version 4 - LIST Command Extensions +5259 Internet Message Access Protocol - CONVERT Extension +5267 Contexts for IMAP4 +5464 The IMAP METADATA Extension +5465 The IMAP NOTIFY Extension +5466 IMAP4 Extension for Named Searches (Filters) +5530 IMAP Response Codes +5550 The Internet Email to Support Diverse Service Environments (Lemonade) Profile +5738 (obsoleted by RFC 6855) IMAP Support for UTF-8 +5788 IMAP4 Keyword Registry +5819 IMAP4 Extension for Returning STATUS Information in Extended LIST +5957 Display-Based Address Sorting for the IMAP4 SORT Extension +6154 IMAP LIST Extension for Special-Use Mailboxes +6203 IMAP4 Extension for Fuzzy Search +6237 (obsoleted by RFC 7377) IMAP4 Multimailbox SEARCH Extension +6851 Internet Message Access Protocol (IMAP) - MOVE Extension +6855 IMAP Support for UTF-8 +6858 Simplified POP and IMAP Downgrading for Internationalized Email +7162 IMAP Extensions: Quick Flag Changes Resynchronization (CONDSTORE) and Quick Mailbox Resynchronization (QRESYNC) +7377 IMAP4 Multimailbox SEARCH Extension +7888 IMAP4 Non-synchronizing Literals +7889 The IMAP APPENDLIMIT Extension +8437 IMAP UNAUTHENTICATE Extension for Connection Reuse +8474 IMAP Extension for Object Identifiers +8438 IMAP Extension for STATUS=SIZE +8457 IMAP "$Important" Keyword and "\Important" Special-Use Attribute +8508 IMAP REPLACE Extension +8514 Internet Message Access Protocol (IMAP) - SAVEDATE Extension +8970 IMAP4 Extension: Message Preview Generation + +5198 Unicode Format for Network Interchange + +# Mailing list +2369 The Use of URLs as Meta-Syntax for Core Mail List Commands and their Transport through Message Header Fields +2919 List-Id: A Structured Field and Namespace for the Identification of Mailing Lists + +# Sieve +5228 Sieve: An Email Filtering Language +and many more, see http://sieve.info/documents + + +# Vouch by reference +5518 Vouch By Reference + +# TLS +6125 Representation and Verification of Domain-Based Application Service Identity within Internet Public Key Infrastructure Using X.509 (PKIX) Certificates in the Context of Transport Layer Security (TLS) +7525 Recommendations for Secure Use of Transport Layer Security (TLS) and Datagram Transport Layer Security (DTLS) +8314 Cleartext Considered Obsolete: Use of Transport Layer Security (TLS) for Email Submission and Access +8996 Deprecating TLS 1.0 and TLS 1.1 +8997 Deprecation of TLS 1.1 for Email Submission and Access + +# SASL + +4013 (obsoleted by RFC 7613) SASLprep: Stringprep Profile for User Names and Passwords +4422 Simple Authentication and Security Layer (SASL) +4505 Anonymous Simple Authentication and Security Layer (SASL) Mechanism +4616 The PLAIN Simple Authentication and Security Layer (SASL) Mechanism +5802 Salted Challenge Response Authentication Mechanism (SCRAM) SASL and GSS-API Mechanisms +6331 Moving DIGEST-MD5 to Historic +7613 (obsoleted by RFC 8265) Preparation, Enforcement, and Comparison of Internationalized Strings Representing Usernames and Passwords +7677 SCRAM-SHA-256 and SCRAM-SHA-256-PLUS Simple Authentication and Security Layer (SASL) Mechanisms +8265 Preparation, Enforcement, and Comparison of Internationalized Strings Representing Usernames and Passwords + +# IDNA +3492 Punycode: A Bootstring encoding of Unicode for Internationalized Domain Names in Applications (IDNA) +5890 Internationalized Domain Names for Applications (IDNA): Definitions and Document Framework +5891 Internationalized Domain Names in Applications (IDNA): Protocol +5892 The Unicode Code Points and Internationalized Domain Names for Applications (IDNA) +5893 Right-to-Left Scripts for Internationalized Domain Names for Applications (IDNA) +5894 Internationalized Domain Names for Applications (IDNA): Background, Explanation, and Rationale + +# ACME +8555 Automatic Certificate Management Environment (ACME) +8737 Automated Certificate Management Environment (ACME) TLS Application-Layer Protocol Negotiation (ALPN) Challenge Extension + +# DNS +1034 DOMAIN NAMES - CONCEPTS AND FACILITIES +1035 DOMAIN NAMES - IMPLEMENTATION AND SPECIFICATION +1101 DNS Encoding of Network Names and Other Types +1536 Common DNS Implementation Errors and Suggested Fixes +2181 Clarifications to the DNS Specification +2308 Negative Caching of DNS Queries (DNS NCACHE) +3363 Representing Internet Protocol version 6 (IPv6) Addresses in the Domain Name System (DNS) +3596 DNS Extensions to Support IP Version 6 +3597 Handling of Unknown DNS Resource Record (RR) Types +4343 Domain Name System (DNS) Case Insensitivity Clarification +4592 The Role of Wildcards in the Domain Name System +5452 Measures for Making DNS More Resilient against Forged Answers +6604 xNAME RCODE and Status Bits Clarification +6672 DNAME Redirection in the DNS +6891 Extension Mechanisms for DNS (EDNS(0)) +6895 Domain Name System (DNS) IANA Considerations +7766 DNS Transport over TCP - Implementation Requirements +8020 NXDOMAIN: There Really Is Nothing Underneath +8482 Providing Minimal-Sized Responses to DNS Queries That Have QTYPE=ANY +8490 DNS Stateful Operations +8767 Serving Stale Data to Improve DNS Resiliency +9210 DNS Transport over TCP - Operational Requirements + +# DNSSEC +3225 Indicating Resolver Support of DNSSEC +3658 Delegation Signer (DS) Resource Record (RR) +4033 DNS Security Introduction and Requirements +4034 Resource Records for the DNS Security Extensions +4035 Protocol Modifications for the DNS Security Extensions +4470 Minimally Covering NSEC Records and DNSSEC On-line Signing +4956 DNS Security (DNSSEC) Opt-In +5155 DNS Security (DNSSEC) Hashed Authenticated Denial of Existence +5702 Use of SHA-2 Algorithms with RSA in DNSKEY and RRSIG Resource Records for DNSSEC +5933 Use of GOST Signature Algorithms in DNSKEY and RRSIG Resource Records for DNSSEC +6014 Cryptographic Algorithm Identifier Allocation for DNSSEC +6781 DNSSEC Operational Practices, Version 2 +6840 Clarifications and Implementation Notes for DNS Security (DNSSEC) +8198 Aggressive Use of DNSSEC-Validated Cache +8624 Algorithm Implementation Requirements and Usage Guidance for DNSSEC +8749 Moving DNSSEC Lookaside Validation (DLV) to Historic Status +9077 NSEC and NSEC3: TTLs and Aggressive Use +9157 Revised IANA Considerations for DNSSEC +9276 Guidance for NSEC3 Parameter Settings + +# More + +3986 Uniform Resource Identifier (URI): Generic Syntax +5617 (Historic) DomainKeys Identified Mail (DKIM) Author Domain Signing Practices (ADSP) +6186 (not used in practice) Use of SRV Records for Locating Email Submission/Access Services +7817 Updated Transport Layer Security (TLS) Server Identity Check Procedure for Email-Related Protocols diff --git a/rfc/link.go b/rfc/link.go new file mode 100644 index 0000000..2172b88 --- /dev/null +++ b/rfc/link.go @@ -0,0 +1,199 @@ +//go:build link + +package main + +// Read source files and RFC and errata files, and cross-link them. + +import ( + "bytes" + "flag" + "fmt" + "go/parser" + "go/token" + "log" + "os" + "path/filepath" + "regexp" + "strconv" + "strings" +) + +func usage() { + log.Println("usage: link ../*.go ../*/*.go") + flag.PrintDefaults() + os.Exit(2) +} + +func main() { + log.SetFlags(0) + flag.Usage = usage + flag.Parse() + args := flag.Args() + if len(args) == 0 { + usage() + } + + type ref struct { + srcpath string + srclineno int + dstpath string + dstlineno int + dstisrfc bool + dstrfc string // e.g. "5322" or "6376-eid4810" + } + + // RFC-file to RFC-line to references to list of file+line (possibly RFCs). + rfcLineSources := map[string]map[int][]ref{} + + // Source-file to source-line to references of RFCs. + sourceLineRFCs := map[string]map[int][]ref{} + + re := regexp.MustCompile(`((../)*)rfc/([0-9]{4,5})(-eid([1-9][0-9]*))?(:([1-9][0-9]*))?`) + + addRef := func(m map[string]map[int][]ref, rfc string, lineno int, r ref) { + lineRefs := m[rfc] + if lineRefs == nil { + lineRefs = map[int][]ref{} + m[rfc] = lineRefs + } + lineRefs[lineno] = append(lineRefs[lineno], r) + } + + // Parse all .go files on the cli, assumed to be relative to current dir. + fset := token.NewFileSet() + for _, arg := range args { + f, err := parser.ParseFile(fset, arg, nil, parser.ParseComments|parser.SkipObjectResolution) + if err != nil { + log.Fatalf("parse file %q: %s", arg, err) + } + for _, cg := range f.Comments { + for _, c := range cg.List { + lines := strings.Split(c.Text, "\n") + for i, line := range lines { + matches := re.FindAllStringSubmatch(line, -1) + if len(matches) == 0 { + continue + } + + srcpath := arg + srclineno := fset.Position(c.Pos()).Line + i + dir := filepath.Dir(srcpath) + for _, m := range matches { + pre := m[1] + rfc := m[3] + eid := m[5] + lineStr := m[7] + if eid != "" && lineStr != "" { + log.Fatalf("%s:%d: cannot reference both errata (eid %q) to specified line number", srcpath, srclineno, eid) + } + var dstlineno int + if lineStr != "" { + v, err := strconv.ParseInt(lineStr, 10, 32) + if err != nil { + log.Fatalf("%s:%d: bad linenumber %q: %v", srcpath, srclineno, lineStr, err) + } + dstlineno = int(v) + } + if dstlineno <= 0 { + dstlineno = 1 + } + if eid != "" { + rfc += "-eid" + eid + } + dstpath := filepath.Join(dir, pre+"rfc", rfc) + if _, err := os.Stat(dstpath); err != nil { + log.Fatalf("%s:%d: references %s: %v", srcpath, srclineno, dstpath, err) + } + r := ref{srcpath, srclineno, dstpath, dstlineno, true, rfc} + addRef(sourceLineRFCs, r.srcpath, r.srclineno, r) + addRef(rfcLineSources, r.dstrfc, r.dstlineno, ref{r.dstrfc, r.dstlineno, r.srcpath, r.srclineno, false, ""}) + } + } + } + } + } + + files, err := os.ReadDir(".") + if err != nil { + log.Fatalf("readdir: %v", err) + } + for _, de := range files { + name := de.Name() + isrfc := isRFC(name) + iserrata := isErrata(name) + if !isrfc && !iserrata { + continue + } + oldBuf, err := os.ReadFile(name) + if err != nil { + log.Fatalf("readdir: %v", err) + } + old := string(oldBuf) + b := &bytes.Buffer{} + lineRefs := rfcLineSources[name] + lines := strings.Split(old, "\n") + if len(lines) > 0 && lines[len(lines)-1] == "" { + lines = lines[:len(lines)-1] + } + for i, line := range lines { + if !iserrata && len(line) > 80 { + line = strings.TrimRight(line[:80], " ") + } + refs := lineRefs[i+1] + if len(refs) > 0 { + if iserrata { + line = "" + } else { + line = fmt.Sprintf("%-80s", line) + } + + // Lookup source files for rfc:line, so we can cross-link the rfcs. + done := map[string]bool{} + for _, r := range refs { + for _, xr := range sourceLineRFCs[r.dstpath][r.dstlineno] { + sref := fmt.Sprintf(" %s:%d", xr.dstrfc, xr.dstlineno) + if xr.dstrfc == name && xr.dstlineno == i+1 || done[sref] { + continue + } + line += sref + done[sref] = true + } + } + + // Add link from rfc to source code. + for _, r := range refs { + line += fmt.Sprintf(" %s:%d", r.dstpath, r.dstlineno) + } + if iserrata { + line = line[1:] + } + } + line += "\n" + b.WriteString(line) + } + newBuf := b.Bytes() + if !bytes.Equal(oldBuf, newBuf) { + if err := os.WriteFile(name, newBuf, 0660); err != nil { + log.Printf("writefile %q: %s", name, err) + } + log.Print(name) + } + } +} + +func isRFC(name string) bool { + if len(name) < 4 || len(name) > 5 { + return false + } + for _, c := range name { + if c < '0' || c > '9' { + return false + } + } + return true +} + +func isErrata(name string) bool { + t := strings.Split(name, "-") + return len(t) == 2 && isRFC(t[0]) && strings.HasPrefix(t[1], "eid") +} diff --git a/scram/parse.go b/scram/parse.go new file mode 100644 index 0000000..28cd517 --- /dev/null +++ b/scram/parse.go @@ -0,0 +1,266 @@ +package scram + +import ( + "encoding/base64" + "errors" + "fmt" + "strconv" + "strings" +) + +type parser struct { + s string // Original casing. + lower string // Lower casing, for case-insensitive token consumption. + o int // Offset in s/lower. +} + +type parseError struct{ err error } + +func (e parseError) Error() string { + return e.err.Error() +} + +func (e parseError) Unwrap() error { + return e.err +} + +// toLower lower cases bytes that are A-Z. strings.ToLower does too much. and +// would replace invalid bytes with unicode replacement characters, which would +// break our requirement that offsets into the original and upper case strings +// point to the same character. +func toLower(s string) string { + r := []byte(s) + for i, c := range r { + if c >= 'A' && c <= 'Z' { + r[i] = c + 0x20 + } + } + return string(r) +} + +func newParser(buf []byte) *parser { + s := string(buf) + return &parser{s, toLower(s), 0} +} + +// Turn panics of parseError into a descriptive ErrInvalidEncoding. Called with +// defer by functions that parse. +func (p *parser) recover(rerr *error) { + x := recover() + if x == nil { + return + } + err, ok := x.(error) + if !ok { + panic(x) + } + var xerr Error + if errors.As(err, &xerr) { + *rerr = err + return + } + *rerr = fmt.Errorf("%w: %s", ErrInvalidEncoding, err) +} + +func (p *parser) xerrorf(format string, args ...any) { + panic(parseError{fmt.Errorf(format, args...)}) +} + +func (p *parser) xcheckf(err error, format string, args ...any) { + if err != nil { + panic(parseError{fmt.Errorf("%s: %w", fmt.Sprintf(format, args...), err)}) + } +} + +func (p *parser) xempty() { + if p.o != len(p.s) { + p.xerrorf("leftover data") + } +} + +func (p *parser) xnonempty() { + if p.o >= len(p.s) { + p.xerrorf("unexpected end") + } +} + +func (p *parser) xbyte() byte { + p.xnonempty() + c := p.lower[p.o] + p.o++ + return c +} + +func (p *parser) peek(s string) bool { + return strings.HasPrefix(p.lower[p.o:], s) +} + +func (p *parser) take(s string) bool { + if p.peek(s) { + p.o += len(s) + return true + } + return false +} + +func (p *parser) xtake(s string) { + if !p.take(s) { + p.xerrorf("expected %q", s) + } +} + +func (p *parser) xauthzid() string { + p.xtake("a=") + return p.xsaslname() +} + +func (p *parser) xusername() string { + p.xtake("n=") + return p.xsaslname() +} + +func (p *parser) xnonce() string { + p.xtake("r=") + o := p.o + for ; o < len(p.s); o++ { + c := p.s[o] + if c <= ' ' || c >= 0x7f || c == ',' { + break + } + } + if o == p.o { + p.xerrorf("empty nonce") + } + r := p.s[p.o:o] + p.o = o + return r +} + +func (p *parser) xattrval() { + c := p.xbyte() + if !(c >= 'a' && c <= 'z' || c >= 'A' && c <= 'Z') { + p.xerrorf("expected alpha for attr-val") + } + p.xtake("=") + p.xvalue() +} + +func (p *parser) xvalue() string { + for o, c := range p.s[p.o:] { + if c == 0 || c == ',' { + if o == 0 { + p.xerrorf("invalid empty value") + } + r := p.s[p.o : p.o+o] + p.o = o + return r + } + } + p.xnonempty() + r := p.s[p.o:] + p.o = len(p.s) + return r +} + +func (p *parser) xbase64() []byte { + o := p.o + for ; o < len(p.s); o++ { + c := p.s[o] + if !(c >= 'a' && c <= 'z' || c >= 'A' && c <= 'Z' || c >= '0' && c <= '9' || c == '/' || c == '+' || c == '=') { + break + } + } + buf, err := base64.StdEncoding.DecodeString(p.s[p.o:o]) + p.xcheckf(err, "decoding base64") + p.o = o + return buf +} + +func (p *parser) xsaslname() string { + var esc string + var is bool + var r string + for o, c := range p.s[p.o:] { + if c == 0 || c == ',' { + if is { + p.xerrorf("saslname unexpected end") + } + if o == 0 { + p.xerrorf("saslname cannot be empty") + } + p.o += o + return r + } + if is { + esc += string(c) + if len(esc) < 2 { + continue + } + switch esc { + case "2c", "2C": + r += "," + case "3d", "3D": + r += "=" + default: + p.xerrorf("bad escape %q in saslanem", esc) + } + is = false + esc = "" + continue + } else if c == '=' { + is = true + continue + } + r += string(c) + } + if is { + p.xerrorf("saslname unexpected end") + } + if r == "" { + p.xerrorf("saslname cannot be empty") + } + p.o = len(p.s) + return r +} + +func (p *parser) xchannelBinding() string { + p.xtake("c=") + return string(p.xbase64()) +} + +func (p *parser) xproof() []byte { + p.xtake("p=") + return p.xbase64() +} + +func (p *parser) xsalt() []byte { + p.xtake("s=") + return p.xbase64() +} + +func (p *parser) xtakefn1(fn func(rune, int) bool) string { + for o, c := range p.s[p.o:] { + if !fn(c, o) { + if o == 0 { + p.xerrorf("non-empty match required") + } + r := p.s[p.o : p.o+o] + p.o += o + return r + } + } + p.xnonempty() + r := p.s[p.o:] + p.o = len(p.s) + return r +} + +func (p *parser) xiterations() int { + p.xtake("i=") + digits := p.xtakefn1(func(c rune, i int) bool { + return c >= '1' && c <= '9' || i > 0 && c == '0' + }) + v, err := strconv.ParseInt(digits, 10, 32) + p.xcheckf(err, "parsing int") + return int(v) +} diff --git a/scram/scram.go b/scram/scram.go new file mode 100644 index 0000000..98dd1b9 --- /dev/null +++ b/scram/scram.go @@ -0,0 +1,368 @@ +// Package scram implements the SCRAM-SHA256 SASL authentication mechanism, RFC 7677. +// +// SCRAM-SHA256 allows a client to authenticate to a server using a password +// without handing plaintext password over to the server. The client also +// verifies the server knows (a derivative of) the password. +package scram + +// todo: test with messages that contains extensions +// todo: some tests for the parser +// todo: figure out how invalid parameters etc should be handled. just abort? perhaps mostly a problem for imap. + +import ( + "bytes" + "crypto/hmac" + cryptorand "crypto/rand" + "crypto/sha256" + "encoding/base64" + "errors" + "fmt" + "strings" + + "golang.org/x/crypto/pbkdf2" + "golang.org/x/text/unicode/norm" +) + +// Errors at scram protocol level. Can be exchanged between client and server. +var ( + ErrInvalidEncoding Error = "invalid-encoding" + ErrExtensionsNotSupported Error = "extensions-not-supported" + ErrInvalidProof Error = "invalid-proof" + ErrChannelBindingsDontMatch Error = "channel-bindings-dont-match" + ErrServerDoesSupportChannelBinding Error = "server-does-support-channel-binding" + ErrChannelBindingNotSupported Error = "channel-binding-not-supported" + ErrUnsupportedChannelBindingType Error = "unsupported-channel-binding-type" + ErrUnknownUser Error = "unknown-user" + ErrNoResources Error = "no-resources" + ErrOtherError Error = "other-error" +) + +var scramErrors = makeErrors() + +func makeErrors() map[string]Error { + l := []Error{ + ErrInvalidEncoding, + ErrExtensionsNotSupported, + ErrInvalidProof, + ErrChannelBindingsDontMatch, + ErrServerDoesSupportChannelBinding, + ErrChannelBindingNotSupported, + ErrUnsupportedChannelBindingType, + ErrUnknownUser, + ErrNoResources, + ErrOtherError, + } + m := map[string]Error{} + for _, e := range l { + m[string(e)] = e + } + return m +} + +var ( + ErrNorm = errors.New("parameter not unicode normalized") // E.g. if client sends non-normalized username or authzid. + ErrUnsafe = errors.New("unsafe parameter") // E.g. salt, nonce too short, or too few iterations. + ErrProtocol = errors.New("protocol error") // E.g. server responded with a nonce not prefixed by the client nonce. +) + +type Error string + +func (e Error) Error() string { + return string(e) +} + +// MakeRandom returns a cryptographically random buffer for use as salt or as +// nonce. +func MakeRandom() []byte { + buf := make([]byte, 12) + _, err := cryptorand.Read(buf) + if err != nil { + panic("generate random") + } + return buf +} + +// SaltPassword returns a salted password. +func SaltPassword(password string, salt []byte, iterations int) []byte { + password = norm.NFC.String(password) + return pbkdf2.Key([]byte(password), salt, iterations, sha256.Size, sha256.New) +} + +// HMAC returns the hmac with key over msg. +func HMAC(key []byte, msg string) []byte { + mac := hmac.New(sha256.New, key) + mac.Write([]byte(msg)) + return mac.Sum(nil) +} + +func xor(a, b []byte) { + for i := range a { + a[i] ^= b[i] + } +} + +// Server represents the server-side of a SCRAM-SHA-256 authentication. +type Server struct { + Authentication string // Username for authentication, "authc". Always set and non-empty. + Authorization string // If set, role of user to assume after authentication, "authz". + + // Messages used in hash calculations. + clientFirstBare string + serverFirst string + clientFinalWithoutProof string + + gs2header string + clientNonce string // Client-part of the nonce. + serverNonceOverride string // If set, server does not generate random nonce, but uses this. For tests with the test vector. + nonce string // Full client + server nonce. +} + +// NewServer returns a server given the first SCRAM message from a client. +// +// The sequence for data and calls on a server: +// +// - Read initial data from client, call NewServer (this call), then ServerFirst and write to the client. +// - Read response from client, call Finish or FinishFinal and write the resulting string. +func NewServer(clientFirst []byte) (server *Server, rerr error) { + p := newParser(clientFirst) + defer p.recover(&rerr) + + server = &Server{} + + // ../rfc/5802:949 ../rfc/5802:910 + gs2cbindFlag := p.xbyte() + switch gs2cbindFlag { + case 'n', 'y': + case 'p': + p.xerrorf("gs2 header with p: %w", ErrChannelBindingNotSupported) + } + p.xtake(",") + if !p.take(",") { + server.Authorization = p.xauthzid() + if norm.NFC.String(server.Authorization) != server.Authorization { + return nil, fmt.Errorf("%w: authzid", ErrNorm) + } + p.xtake(",") + } + server.gs2header = p.s[:p.o] + server.clientFirstBare = p.s[p.o:] + + // ../rfc/5802:945 + if p.take("m=") { + p.xerrorf("unexpected mandatory extension: %w", ErrExtensionsNotSupported) + } + server.Authentication = p.xusername() + if norm.NFC.String(server.Authentication) != server.Authentication { + return nil, fmt.Errorf("%w: username", ErrNorm) + } + p.xtake(",") + server.clientNonce = p.xnonce() + if len(server.clientNonce) < 8 { + return nil, fmt.Errorf("%w: client nonce too short", ErrUnsafe) + } + // Extensions, we don't recognize them. + for p.take(",") { + p.xattrval() + } + p.xempty() + return server, nil +} + +// ServerFirst returns the string to send back to the client. To be called after NewServer. +func (s *Server) ServerFirst(iterations int, salt []byte) (string, error) { + // ../rfc/5802:959 + serverNonce := s.serverNonceOverride + if serverNonce == "" { + serverNonce = base64.StdEncoding.EncodeToString(MakeRandom()) + } + s.nonce = s.clientNonce + serverNonce + s.serverFirst = fmt.Sprintf("r=%s,s=%s,i=%d", s.nonce, base64.StdEncoding.EncodeToString(salt), iterations) + return s.serverFirst, nil +} + +// Finish takes the final client message, and the salted password (probably +// from server storage), verifies the client, and returns a message to return +// to the client. If err is nil, authentication was successful. If the +// authorization requested is not acceptable, the server should call +// FinishError instead. +func (s *Server) Finish(clientFinal []byte, saltedPassword []byte) (serverFinal string, rerr error) { + p := newParser(clientFinal) + defer p.recover(&rerr) + + cbind := p.xchannelBinding() + if cbind != s.gs2header { + return "e=" + string(ErrChannelBindingsDontMatch), ErrChannelBindingsDontMatch + } + p.xtake(",") + nonce := p.xnonce() + if nonce != s.nonce { + return "e=" + string(ErrInvalidProof), ErrInvalidProof + } + for !p.peek(",p=") { + p.xtake(",") + p.xattrval() // Ignored. + } + s.clientFinalWithoutProof = p.s[:p.o] + p.xtake(",") + proof := p.xproof() + p.xempty() + + msg := s.clientFirstBare + "," + s.serverFirst + "," + s.clientFinalWithoutProof + + clientKey := HMAC(saltedPassword, "Client Key") + storedKey0 := sha256.Sum256(clientKey) + storedKey := storedKey0[:] + + clientSig := HMAC(storedKey, msg) + xor(clientSig, clientKey) // Now clientProof. + if !bytes.Equal(clientSig, proof) { + return "e=" + string(ErrInvalidProof), ErrInvalidProof + } + + serverKey := HMAC(saltedPassword, "Server Key") + serverSig := HMAC(serverKey, msg) + return fmt.Sprintf("v=%s", base64.StdEncoding.EncodeToString(serverSig)), nil +} + +// FinishError returns an error message to write to the client for the final +// server message. +func (s *Server) FinishError(err Error) string { + return "e=" + string(err) +} + +// Client represents the client-side of a SCRAM-SHA-256 authentication. +type Client struct { + authc string + authz string + + // Messages used in hash calculations. + clientFirstBare string + serverFirst string + clientFinalWithoutProof string + authMessage string + + gs2header string + clientNonce string + nonce string // Full client + server nonce. + saltedPassword []byte +} + +// NewClient returns a client for authentication authc, optionally for +// authorization with role authz. +// +// The sequence for data and calls on a client: +// +// - ClientFirst, write result to server. +// - Read response from server, feed to ServerFirst, write response to server. +// - Read response from server, feed to ServerFinal. +func NewClient(authc, authz string) *Client { + authc = norm.NFC.String(authc) + authz = norm.NFC.String(authz) + return &Client{authc: authc, authz: authz} +} + +// ClientFirst returns the first client message to write to the server. +// No channel binding is done/supported. +// A random nonce is generated. +func (c *Client) ClientFirst() (clientFirst string, rerr error) { + c.gs2header = fmt.Sprintf("n,%s,", saslname(c.authz)) + if c.clientNonce == "" { + c.clientNonce = base64.StdEncoding.EncodeToString(MakeRandom()) + } + c.clientFirstBare = fmt.Sprintf("n=%s,r=%s", saslname(c.authc), c.clientNonce) + return c.gs2header + c.clientFirstBare, nil +} + +// ServerFirst processes the first response message from the server. The +// provided nonce, salt and iterations are checked. If valid, a final client +// message is calculated and returned. This message must be written to the +// server. It includes proof that the client knows the password. +func (c *Client) ServerFirst(serverFirst []byte, password string) (clientFinal string, rerr error) { + c.serverFirst = string(serverFirst) + p := newParser(serverFirst) + defer p.recover(&rerr) + + // ../rfc/5802:959 + if p.take("m=") { + p.xerrorf("unsupported mandatory extension: %w", ErrExtensionsNotSupported) + } + + c.nonce = p.xnonce() + p.xtake(",") + salt := p.xsalt() + p.xtake(",") + iterations := p.xiterations() + // We ignore extensions that we don't know about. + for p.take(",") { + p.xattrval() + } + p.xempty() + + if !strings.HasPrefix(c.nonce, c.clientNonce) { + return "", fmt.Errorf("%w: server dropped our nonce", ErrProtocol) + } + if len(c.nonce)-len(c.clientNonce) < 8 { + return "", fmt.Errorf("%w: server nonce too short", ErrUnsafe) + } + if len(salt) < 8 { + return "", fmt.Errorf("%w: salt too short", ErrUnsafe) + } + if iterations < 2048 { + return "", fmt.Errorf("%w: too few iterations", ErrUnsafe) + } + + c.clientFinalWithoutProof = fmt.Sprintf("c=%s,r=%s", base64.StdEncoding.EncodeToString([]byte(c.gs2header)), c.nonce) + + c.authMessage = c.clientFirstBare + "," + c.serverFirst + "," + c.clientFinalWithoutProof + + c.saltedPassword = SaltPassword(password, salt, iterations) + clientKey := HMAC(c.saltedPassword, "Client Key") + storedKey0 := sha256.Sum256(clientKey) + storedKey := storedKey0[:] + clientSig := HMAC(storedKey, c.authMessage) + xor(clientSig, clientKey) // Now clientProof. + clientProof := clientSig + + r := c.clientFinalWithoutProof + ",p=" + base64.StdEncoding.EncodeToString(clientProof) + return r, nil +} + +// ServerFinal processes the final message from the server, verifying that the +// server knows the password. +func (c *Client) ServerFinal(serverFinal []byte) (rerr error) { + p := newParser(serverFinal) + defer p.recover(&rerr) + + if p.take("e=") { + errstr := p.xvalue() + var err error = scramErrors[errstr] + if err == Error("") { + err = errors.New(errstr) + } + return fmt.Errorf("error from server: %w", err) + } + p.xtake("v=") + verifier := p.xbase64() + + serverKey := HMAC(c.saltedPassword, "Server Key") + serverSig := HMAC(serverKey, c.authMessage) + if !bytes.Equal(verifier, serverSig) { + return fmt.Errorf("incorrect server signature") + } + return nil +} + +// Convert "," to =2C and "=" to =3D. +func saslname(s string) string { + var r string + for _, c := range s { + if c == ',' { + r += "=2C" + } else if c == '=' { + r += "=3D" + } else { + r += string(c) + } + } + return r +} diff --git a/scram/scram_test.go b/scram/scram_test.go new file mode 100644 index 0000000..69334d3 --- /dev/null +++ b/scram/scram_test.go @@ -0,0 +1,169 @@ +package scram + +import ( + "encoding/base64" + "errors" + "testing" +) + +func base64Decode(s string) []byte { + buf, err := base64.StdEncoding.DecodeString(s) + if err != nil { + panic("bad base64") + } + return buf +} + +func tcheck(t *testing.T, err error, msg string) { + t.Helper() + if err != nil { + t.Fatalf("%s: %s", msg, err) + } +} + +func TestScramServer(t *testing.T) { + // Test vector from ../rfc/7677:122 + salt := base64Decode("W22ZaJ0SNY7soEsUEjb6gQ==") + saltedPassword := SaltPassword("pencil", salt, 4096) + + server, err := NewServer([]byte("n,,n=user,r=rOprNGfwEbeRWgbNEkqO")) + server.serverNonceOverride = "%hvYDpWUa2RaTCAfuxFIlj)hNlF$k0" + tcheck(t, err, "newserver") + resp, err := server.ServerFirst(4096, salt) + tcheck(t, err, "server first") + if resp != "r=rOprNGfwEbeRWgbNEkqO%hvYDpWUa2RaTCAfuxFIlj)hNlF$k0,s=W22ZaJ0SNY7soEsUEjb6gQ==,i=4096" { + t.Fatalf("bad server first") + } + serverFinal, err := server.Finish([]byte("c=biws,r=rOprNGfwEbeRWgbNEkqO%hvYDpWUa2RaTCAfuxFIlj)hNlF$k0,p=dHzbZapWIk4jUhN+Ute9ytag9zjfMHgsqmmiz7AndVQ="), saltedPassword) + tcheck(t, err, "finish") + if serverFinal != "v=6rriTRBi23WpRR/wtup+mMhUZUn/dB5nLTJRsjl95G4=" { + t.Fatalf("bad server final") + } +} + +// Bad attempt with wrong password. +func TestScramServerBadPassword(t *testing.T) { + salt := base64Decode("W22ZaJ0SNY7soEsUEjb6gQ==") + saltedPassword := SaltPassword("marker", salt, 4096) + + server, err := NewServer([]byte("n,,n=user,r=rOprNGfwEbeRWgbNEkqO")) + server.serverNonceOverride = "%hvYDpWUa2RaTCAfuxFIlj)hNlF$k0" + tcheck(t, err, "newserver") + _, err = server.ServerFirst(4096, salt) + tcheck(t, err, "server first") + _, err = server.Finish([]byte("c=biws,r=rOprNGfwEbeRWgbNEkqO%hvYDpWUa2RaTCAfuxFIlj)hNlF$k0,p=dHzbZapWIk4jUhN+Ute9ytag9zjfMHgsqmmiz7AndVQ="), saltedPassword) + if !errors.Is(err, ErrInvalidProof) { + t.Fatalf("got %v, expected ErrInvalidProof", err) + } +} + +// Bad attempt with different number of rounds. +func TestScramServerBadIterations(t *testing.T) { + salt := base64Decode("W22ZaJ0SNY7soEsUEjb6gQ==") + saltedPassword := SaltPassword("pencil", salt, 2048) + + server, err := NewServer([]byte("n,,n=user,r=rOprNGfwEbeRWgbNEkqO")) + server.serverNonceOverride = "%hvYDpWUa2RaTCAfuxFIlj)hNlF$k0" + tcheck(t, err, "newserver") + _, err = server.ServerFirst(4096, salt) + tcheck(t, err, "server first") + _, err = server.Finish([]byte("c=biws,r=rOprNGfwEbeRWgbNEkqO%hvYDpWUa2RaTCAfuxFIlj)hNlF$k0,p=dHzbZapWIk4jUhN+Ute9ytag9zjfMHgsqmmiz7AndVQ="), saltedPassword) + if !errors.Is(err, ErrInvalidProof) { + t.Fatalf("got %v, expected ErrInvalidProof", err) + } +} + +// Another attempt but with a randomly different nonce. +func TestScramServerBad(t *testing.T) { + salt := base64Decode("W22ZaJ0SNY7soEsUEjb6gQ==") + saltedPassword := SaltPassword("pencil", salt, 4096) + + server, err := NewServer([]byte("n,,n=user,r=rOprNGfwEbeRWgbNEkqO")) + tcheck(t, err, "newserver") + _, err = server.ServerFirst(4096, salt) + tcheck(t, err, "server first") + _, err = server.Finish([]byte("c=biws,r="+server.nonce+",p=dHzbZapWIk4jUhN+Ute9ytag9zjfMHgsqmmiz7AndVQ="), saltedPassword) + if !errors.Is(err, ErrInvalidProof) { + t.Fatalf("got %v, expected ErrInvalidProof", err) + } +} + +func TestScramClient(t *testing.T) { + c := NewClient("user", "") + c.clientNonce = "rOprNGfwEbeRWgbNEkqO" + clientFirst, err := c.ClientFirst() + tcheck(t, err, "ClientFirst") + if clientFirst != "n,,n=user,r=rOprNGfwEbeRWgbNEkqO" { + t.Fatalf("bad clientFirst") + } + clientFinal, err := c.ServerFirst([]byte("r=rOprNGfwEbeRWgbNEkqO%hvYDpWUa2RaTCAfuxFIlj)hNlF$k0,s=W22ZaJ0SNY7soEsUEjb6gQ==,i=4096"), "pencil") + tcheck(t, err, "ServerFirst") + if clientFinal != "c=biws,r=rOprNGfwEbeRWgbNEkqO%hvYDpWUa2RaTCAfuxFIlj)hNlF$k0,p=dHzbZapWIk4jUhN+Ute9ytag9zjfMHgsqmmiz7AndVQ=" { + t.Fatalf("bad clientFinal") + } + err = c.ServerFinal([]byte("v=6rriTRBi23WpRR/wtup+mMhUZUn/dB5nLTJRsjl95G4=")) + tcheck(t, err, "ServerFinal") +} + +func TestScram(t *testing.T) { + run := func(expErr error, username, authzid, password string, iterations int, clientNonce, serverNonce string) { + t.Helper() + + defer func() { + x := recover() + if x == nil || x == "" { + return + } + panic(x) + }() + + // check err is either nil or the expected error. if the expected error, panic to abort the authentication session. + xerr := func(err error, msg string) { + t.Helper() + if err != nil && !errors.Is(err, expErr) { + t.Fatalf("%s: got %v, expected %v", msg, err, expErr) + } + if err != nil { + panic("") // Abort test. + } + } + + salt := MakeRandom() + saltedPassword := SaltPassword(password, salt, iterations) + + client := NewClient(username, "") + client.clientNonce = clientNonce + clientFirst, err := client.ClientFirst() + xerr(err, "client.ClientFirst") + + server, err := NewServer([]byte(clientFirst)) + xerr(err, "NewServer") + server.serverNonceOverride = serverNonce + + serverFirst, err := server.ServerFirst(iterations, salt) + xerr(err, "server.ServerFirst") + + clientFinal, err := client.ServerFirst([]byte(serverFirst), password) + xerr(err, "client.ServerFirst") + + serverFinal, err := server.Finish([]byte(clientFinal), saltedPassword) + xerr(err, "server.Finish") + + err = client.ServerFinal([]byte(serverFinal)) + xerr(err, "client.ServerFinal") + + if expErr != nil { + t.Fatalf("got no error, expected %v", expErr) + } + } + + run(nil, "user", "", "pencil", 4096, "", "") + run(nil, "mjl@mox.example", "", "testtest", 4096, "", "") + run(nil, "mjl@mox.example", "", "short", 4096, "", "") + run(nil, "mjl@mox.example", "", "short", 2048, "", "") + run(nil, "mjl@mox.example", "mjl@mox.example", "testtest", 4096, "", "") + run(nil, "mjl@mox.example", "other@mox.example", "testtest", 4096, "", "") + run(ErrUnsafe, "user", "", "pencil", 1, "", "") // Few iterations. + run(ErrUnsafe, "user", "", "pencil", 2048, "short", "") // Short client nonce. + run(ErrUnsafe, "user", "", "pencil", 2048, "test1234", "test") // Server added too few random data. +} diff --git a/serve.go b/serve.go new file mode 100644 index 0000000..d0a7c84 --- /dev/null +++ b/serve.go @@ -0,0 +1,351 @@ +package main + +import ( + "context" + cryptorand "crypto/rand" + "fmt" + "net" + "os" + "os/signal" + "path/filepath" + "runtime/debug" + "strconv" + "strings" + "sync" + "syscall" + "time" + + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" + + "github.com/mjl-/mox/dns" + "github.com/mjl-/mox/dnsbl" + "github.com/mjl-/mox/message" + "github.com/mjl-/mox/metrics" + "github.com/mjl-/mox/mlog" + "github.com/mjl-/mox/mox-" + "github.com/mjl-/mox/moxio" + "github.com/mjl-/mox/moxvar" + "github.com/mjl-/mox/store" + "github.com/mjl-/mox/updates" +) + +func monitorDNSBL(log *mlog.Log) { + defer func() { + // On error, don't bring down the entire server. + x := recover() + if x != nil { + log.Error("monitordnsbl panic", mlog.Field("panic", x)) + debug.PrintStack() + metrics.PanicInc("serve") + } + }() + + l, ok := mox.Conf.Static.Listeners["public"] + if !ok { + log.Info("no listener named public, not monitoring our ips at dnsbls") + return + } + + var zones []dns.Domain + for _, zone := range l.SMTP.DNSBLs { + d, err := dns.ParseDomain(zone) + if err != nil { + log.Fatalx("parsing dnsbls zone", err, mlog.Field("zone", zone)) + } + zones = append(zones, d) + } + if len(zones) == 0 { + return + } + + type key struct { + zone dns.Domain + ip string + } + metrics := map[key]prometheus.GaugeFunc{} + var statusMutex sync.Mutex + statuses := map[key]bool{} + + resolver := dns.StrictResolver{Pkg: "dnsblmonitor"} + var sleep time.Duration // No sleep on first iteration. + for { + time.Sleep(sleep) + sleep = 3 * time.Hour + + ips, err := mox.IPs(mox.Context) + if err != nil { + log.Errorx("listing ips for dnsbl monitor", err) + continue + } + for _, ip := range ips { + if ip.IsLoopback() || ip.IsPrivate() { + continue + } + + for _, zone := range zones { + status, expl, err := dnsbl.Lookup(mox.Context, resolver, zone, ip) + if err != nil { + log.Errorx("dnsbl monitor lookup", err, mlog.Field("ip", ip), mlog.Field("zone", zone), mlog.Field("expl", expl), mlog.Field("status", status)) + } + k := key{zone, ip.String()} + + statusMutex.Lock() + statuses[k] = status == dnsbl.StatusPass + statusMutex.Unlock() + + if _, ok := metrics[k]; !ok { + metrics[k] = promauto.NewGaugeFunc( + prometheus.GaugeOpts{ + Name: "mox_dnsbl_ips_success", + Help: "DNSBL lookups to configured DNSBLs of our IPs.", + ConstLabels: prometheus.Labels{ + "zone": zone.String(), + "ip": k.ip, + }, + }, + func() float64 { + statusMutex.Lock() + defer statusMutex.Unlock() + if statuses[k] { + return 1 + } + return 0 + }, + ) + } + time.Sleep(time.Second) + } + } + } +} + +func cmdServe(c *cmd) { + c.help = `Start mox, serving SMTP/IMAP/HTTPS. + +Incoming email is accepted over SMTP. Email can be retrieved by users using +IMAP. HTTP listeners are started for the admin/account web interfaces, and for +automated TLS configuration. Missing essential TLS certificates are immediately +requested, other TLS certificates are requested on demand. +` + args := c.Parse() + if len(args) != 0 { + c.Usage() + } + mox.MustLoadConfig() + + mox.Shutdown = make(chan struct{}) + servectx, servecancel := context.WithCancel(context.Background()) + mox.Context = servectx + + mlog.Logfmt = true + log := mlog.New("serve") + + if os.Getuid() == 0 { + log.Fatal("refusing to run as root, please start mox as unprivileged user") + } + + if fds := os.Getenv("MOX_RESTART_CTL_SOCKET"); fds != "" { + log.Print("restarted") + + fd, err := strconv.ParseUint(fds, 10, 32) + if err != nil { + log.Fatalx("restart with invalid ctl socket", err, mlog.Field("fd", fds)) + } + f := os.NewFile(uintptr(fd), "restartctl") + if _, err := fmt.Fprint(f, "ok\n"); err != nil { + log.Infox("writing ok to restart ctl socket", err) + } + if err := f.Close(); err != nil { + log.Errorx("closing restart ctl socket", err) + } + } + log.Print("starting up", mlog.Field("version", moxvar.Version)) + + shutdown := func() { + // We indicate we are shutting down. Causes new connections and new SMTP commands to be rejected. Should stop active connections pretty quickly. + close(mox.Shutdown) + + // Now we are going to wait for all connections to be gone, up to a timeout. + done := mox.Connections.Done() + select { + case <-done: + log.Print("clean shutdown") + + case <-time.Tick(3 * time.Second): + // We now cancel all pending operations, and set an immediate deadline on sockets. Should get us a clean shutdown relatively quickly. + servecancel() + mox.Connections.Shutdown() + + select { + case <-done: + log.Print("no more connections, shutdown is clean") + case <-time.Tick(time.Second): + log.Print("shutting down with pending sockets") + } + } + servecancel() // Keep go vet happy. + if err := os.Remove(mox.DataDirPath("ctl")); err != nil { + log.Errorx("removing ctl unix domain socket during shutdown", err) + } + } + + if err := moxio.CheckUmask(); err != nil { + log.Errorx("bad umask", err) + } + + if mox.Conf.Static.CheckUpdates { + checkUpdates := func() { + current, lastknown, mtime, err := mox.LastKnown() + if err != nil { + log.Infox("determining own version before checking for updates, trying again in 1h", err) + time.Sleep(time.Hour) + return + } + if !mtime.IsZero() && time.Since(mtime) < 24*time.Hour { + time.Sleep(24*time.Hour - time.Since(mtime)) + } + now := time.Now() + if err := os.Chtimes(mox.DataDirPath("lastknownversion"), now, now); err != nil { + log.Infox("setting mtime on lastknownversion file, for checking only once per 24h, trying again in 1h", err) + return + } + log.Debug("checking for updates", mlog.Field("lastknown", lastknown)) + updatesctx, updatescancel := context.WithTimeout(mox.Context, time.Minute) + latest, _, changelog, err := updates.Check(updatesctx, dns.StrictResolver{}, dns.Domain{ASCII: changelogDomain}, lastknown, changelogURL, changelogPubKey) + updatescancel() + if err != nil { + log.Infox("checking for updates", err, mlog.Field("latest", latest)) + return + } + if !latest.After(lastknown) { + log.Debug("no new version available") + return + } + if len(changelog.Changes) == 0 { + log.Info("new version available, but changelog is empty, ignoring", mlog.Field("latest", latest)) + return + } + + var cl string + for i := len(changelog.Changes) - 1; i >= 0; i-- { + cl += changelog.Changes[i].Text + "\n\n" + } + + a, err := store.OpenAccount(mox.Conf.Static.Postmaster.Account) + if err != nil { + log.Infox("open account for postmaster changelog delivery", err) + return + } + defer a.Close() + f, err := store.CreateMessageTemp("changelog") + if err != nil { + log.Infox("making temporary message file for changelog delivery", err) + return + } + m := &store.Message{Received: time.Now(), Flags: store.Flags{Flagged: true}} + n, err := fmt.Fprintf(f, "Date: %s\r\nSubject: mox update %s available, changelog\r\n\r\nHi!\r\n\r\nVersion %s of mox is available.\r\nThe changes compared to the previous update notification email:\r\n\r\n%s\r\n\r\nDon't forget to update, this install is at %s.\r\nPlease report any issues at https://github.com/mjl-/mox\r\n", time.Now().Format(message.RFC5322Z), latest, latest, strings.ReplaceAll(cl, "\n", "\r\n"), current) + if err != nil { + log.Infox("writing temporary message file for changelog delivery", err) + return + } + m.Size = int64(n) + if err := a.DeliverMailbox(log, mox.Conf.Static.Postmaster.Mailbox, m, f, true); err != nil { + log.Infox("changelog delivery", err) + if err := os.Remove(f.Name()); err != nil { + log.Infox("removing temporary changelog message after delivery failure", err) + } + } + log.Info("delivered changelog", mlog.Field("current", current), mlog.Field("lastknown", lastknown), mlog.Field("latest", latest)) + if err := mox.StoreLastKnown(latest); err != nil { + // This will be awkward, we'll keep notifying the postmaster once every 24h... + log.Infox("updating last known version", err) + } + } + + go func() { + for { + checkUpdates() + } + }() + } + + // Initialize key and random buffer for creating opaque SMTP + // transaction IDs based on "cid"s. + recvidpath := mox.DataDirPath("receivedid.key") + recvidbuf, err := os.ReadFile(recvidpath) + if err != nil || len(recvidbuf) != 16+8 { + recvidbuf = make([]byte, 16+8) + if _, err := cryptorand.Read(recvidbuf); err != nil { + log.Fatalx("reading random recvid data", err) + } + if err := os.WriteFile(recvidpath, recvidbuf, 0660); err != nil { + log.Fatalx("writing recvidpath", err, mlog.Field("path", recvidpath)) + } + } + if err := mox.ReceivedIDInit(recvidbuf[:16], recvidbuf[16:]); err != nil { + log.Fatalx("init receivedid", err) + } + + // We start the network listeners first. If an instance is already running, we'll + // get errors about address being in use. We listen to the unix domain socket + // afterwards, which we always remove before listening. We need to do that because + // we may not have cleaned up our control socket during unexpected shutdown. We + // don't want to remove and listen on the unix domain socket first. If we would, we + // would make the existing instance unreachable over its ctl socket, and then fail + // because the network addresses are taken. + mtastsdbRefresher := true + if err := start(mtastsdbRefresher); err != nil { + log.Fatalx("start", err) + } + + go monitorDNSBL(log) + + ctlpath := mox.DataDirPath("ctl") + os.Remove(ctlpath) + ctl, err := net.Listen("unix", ctlpath) + if err != nil { + log.Fatalx("listen on ctl unix domain socket", err) + } + go func() { + for { + conn, err := ctl.Accept() + if err != nil { + log.Printx("accept for ctl", err) + continue + } + cid := mox.Cid() + ctx := context.WithValue(mox.Context, mlog.CidKey, cid) + go servectl(ctx, log.WithCid(cid), conn, shutdown) + } + }() + + // Remove old temporary files that somehow haven't been cleaned up. + tmpdir := mox.DataDirPath("tmp") + os.MkdirAll(tmpdir, 0770) + tmps, err := os.ReadDir(tmpdir) + if err != nil { + log.Errorx("listing files in tmpdir", err) + } else { + now := time.Now() + for _, e := range tmps { + if fi, err := e.Info(); err != nil { + log.Errorx("stat tmp file", err, mlog.Field("filename", e.Name())) + } else if now.Sub(fi.ModTime()) > 7*24*time.Hour { + p := filepath.Join(tmpdir, e.Name()) + if err := os.Remove(p); err != nil { + log.Errorx("removing stale temporary file", err, mlog.Field("path", p)) + } else { + log.Info("removed stale temporary file", mlog.Field("path", p)) + } + } + } + } + + // Graceful shutdown. + sigc := make(chan os.Signal, 1) + signal.Notify(sigc, os.Interrupt, syscall.SIGTERM) + sig := <-sigc + log.Print("shutting down, waiting max 3s for existing connections", mlog.Field("signal", sig)) + shutdown() +} diff --git a/smtp/address.go b/smtp/address.go new file mode 100644 index 0000000..f25f378 --- /dev/null +++ b/smtp/address.go @@ -0,0 +1,316 @@ +package smtp + +import ( + "errors" + "fmt" + "strings" + + "github.com/mjl-/mox/dns" +) + +var ErrBadAddress = errors.New("invalid email address") + +// Localpart is a decoded local part of an email address, before the "@". +// For quoted strings, values do not hold the double quote or escaping backslashes. +// An empty string can be a valid localpart. +type Localpart string + +// String returns a packed representation of an address, with proper escaping/quoting, for use in SMTP. +func (lp Localpart) String() string { + // See ../rfc/5321:2322 ../rfc/6531:414 + // First we try as dot-string. If not possible we make a quoted-string. + dotstr := true + t := strings.Split(string(lp), ".") + for _, e := range t { + for _, c := range e { + if c >= '0' && c <= '9' || c >= 'a' && c <= 'z' || c >= 'A' && c <= 'Z' || c > 0x7f { + continue + } + switch c { + case '!', '#', '$', '%', '&', '\'', '*', '+', '-', '/', '=', '?', '^', '_', '`', '{', '|', '}', '~': + continue + } + dotstr = false + break + } + dotstr = dotstr && len(e) > 0 + } + dotstr = dotstr && len(t) > 0 + if dotstr { + return string(lp) + } + + // Make quoted-string. + r := `"` + for _, b := range lp { + if b == '"' || b == '\\' { + r += "\\" + string(b) + } else { + r += string(b) + } + } + r += `"` + return r +} + +// DSNString returns the localpart as string for use in a DSN. +// utf8 indicates if the remote MTA supports utf8 messaging. If not, the 7bit DSN +// encoding for "utf-8-addr-xtext" from RFC 6533 is used. +func (lp Localpart) DSNString(utf8 bool) string { + if utf8 { + return lp.String() + } + // ../rfc/6533:259 + r := "" + for _, c := range lp { + if c > 0x20 && c < 0x7f && c != '\\' && c != '+' && c != '=' { + r += string(c) + } else { + r += fmt.Sprintf(`\x{%x}`, c) + } + } + return r +} + +// IsInternational returns if this is an internationalized local part, i.e. has +// non-ASCII characters. +func (lp Localpart) IsInternational() bool { + for _, c := range lp { + if c > 0x7f { + return true + } + } + return false +} + +// Address is a parsed email address. +type Address struct { + Localpart Localpart + Domain dns.Domain // todo: shouldn't we accept an ip address here too? and merge this type into smtp.Path. +} + +// NewAddress returns an address. +func NewAddress(localpart Localpart, domain dns.Domain) Address { + return Address{localpart, domain} +} + +func (a Address) IsZero() bool { + return a == Address{} +} + +// Pack returns the address in string form. If smtputf8 is true, the domain is +// formatted with non-ASCII characters. If localpart has non-ASCII characters, +// they are returned regardless of smtputf8. +func (a Address) Pack(smtputf8 bool) string { + return a.Localpart.String() + "@" + a.Domain.XName(smtputf8) +} + +// String returns the address in string form with non-ASCII characters. +func (a Address) String() string { + return a.Localpart.String() + "@" + a.Domain.Name() +} + +// ParseAddress parses an email address. UTF-8 is allowed. +// Returns ErrBadAddress for invalid addresses. +func ParseAddress(s string) (address Address, err error) { + lp, rem, err := parseLocalPart(s) + if err != nil { + return Address{}, fmt.Errorf("%w: %s", ErrBadAddress, err) + } + if !strings.HasPrefix(rem, "@") { + return Address{}, fmt.Errorf("%w: expected @", ErrBadAddress) + } + rem = rem[1:] + d, err := dns.ParseDomain(rem) + if err != nil { + return Address{}, fmt.Errorf("%w: %s", ErrBadAddress, err) + } + return Address{lp, d}, err +} + +var ErrBadLocalpart = errors.New("invalid localpart") + +// ParseLocalpart parses the local part. +// UTF-8 is allowed. +// Returns ErrBadAddress for invalid addresses. +func ParseLocalpart(s string) (localpart Localpart, err error) { + lp, rem, err := parseLocalPart(s) + if err != nil { + return "", err + } + if rem != "" { + return "", fmt.Errorf("%w: remaining after localpart: %q", ErrBadLocalpart, rem) + } + return lp, nil +} + +func parseLocalPart(s string) (localpart Localpart, remain string, err error) { + p := &parser{s, 0} + + defer func() { + x := recover() + if x == nil { + return + } + e, ok := x.(error) + if !ok { + panic(x) + } + err = fmt.Errorf("%w: %s", ErrBadLocalpart, e) + }() + + lp := p.xlocalpart() + return lp, p.remainder(), nil +} + +type parser struct { + s string + o int +} + +func (p *parser) xerrorf(format string, args ...any) { + panic(fmt.Errorf(format, args...)) +} + +func (p *parser) hasPrefix(s string) bool { + return strings.HasPrefix(p.s[p.o:], s) +} + +func (p *parser) take(s string) bool { + if p.hasPrefix(s) { + p.o += len(s) + return true + } + return false +} + +func (p *parser) xtake(s string) { + if !p.take(s) { + p.xerrorf("expected %q", s) + } +} + +func (p *parser) empty() bool { + return p.o == len(p.s) +} + +func (p *parser) xtaken(n int) string { + r := p.s[p.o : p.o+n] + p.o += n + return r +} + +func (p *parser) remainder() string { + r := p.s[p.o:] + p.o = len(p.s) + return r +} + +// todo: reduce duplication between implementations: ../smtp/address.go:/xlocalpart ../dkim/parser.go:/xlocalpart ../smtpserver/parse.go:/xlocalpart +func (p *parser) xlocalpart() Localpart { + // ../rfc/5321:2316 + var s string + if p.hasPrefix(`"`) { + s = p.xquotedString() + } else { + s = p.xatom() + for p.take(".") { + s += "." + p.xatom() + } + } + // todo: have a strict parser that only allows the actual max of 64 bytes. some services have large localparts because of generated (bounce) addresses. + if len(s) > 128 { + // ../rfc/5321:3486 + p.xerrorf("localpart longer than 64 octets") + } + return Localpart(s) +} + +func (p *parser) xquotedString() string { + p.xtake(`"`) + var s string + var esc bool + for { + c := p.xchar() + if esc { + if c >= ' ' && c < 0x7f { + s += string(c) + esc = false + continue + } + p.xerrorf("invalid localpart, bad escaped char %c", c) + } + if c == '\\' { + esc = true + continue + } + if c == '"' { + return s + } + // todo: should we be accepting utf8 for quoted strings? + if c >= ' ' && c < 0x7f && c != '\\' && c != '"' || c > 0x7f { + s += string(c) + continue + } + p.xerrorf("invalid localpart, invalid character %c", c) + } +} + +func (p *parser) xchar() rune { + // We are careful to track invalid utf-8 properly. + if p.empty() { + p.xerrorf("need another character") + } + var r rune + var o int + for i, c := range p.s[p.o:] { + if i > 0 { + o = i + break + } + r = c + } + if o == 0 { + p.o = len(p.s) + } else { + p.o += o + } + return r +} + +func (p *parser) takefn1(what string, fn func(c rune, i int) bool) string { + if p.empty() { + p.xerrorf("need at least one char for %s", what) + } + for i, c := range p.s[p.o:] { + if !fn(c, i) { + if i == 0 { + p.xerrorf("expected at least one char for %s, got char %c", what, c) + } + return p.xtaken(i) + } + } + return p.remainder() +} + +func (p *parser) xatom() string { + return p.takefn1("atom", func(c rune, i int) bool { + switch c { + case '!', '#', '$', '%', '&', '\'', '*', '+', '-', '/', '=', '?', '^', '_', '`', '{', '|', '}', '~': + return true + } + return isalphadigit(c) || c > 0x7f + }) +} + +func isalpha(c rune) bool { + return c >= 'a' && c <= 'z' || c >= 'A' && c <= 'Z' +} + +func isdigit(c rune) bool { + return c >= '0' && c <= '9' +} + +func isalphadigit(c rune) bool { + return isalpha(c) || isdigit(c) +} diff --git a/smtp/address_test.go b/smtp/address_test.go new file mode 100644 index 0000000..82fbab4 --- /dev/null +++ b/smtp/address_test.go @@ -0,0 +1,92 @@ +package smtp + +import ( + "errors" + "testing" +) + +func TestParseLocalpart(t *testing.T) { + good := func(s string) { + t.Helper() + _, err := ParseLocalpart(s) + if err != nil { + t.Fatalf("unexpected error for localpart %q: %v", s, err) + } + } + + bad := func(s string) { + t.Helper() + _, err := ParseLocalpart(s) + if err == nil { + t.Fatalf("did not see expected error for localpart %q", s) + } + if !errors.Is(err, ErrBadLocalpart) { + t.Fatalf("expected ErrBadLocalpart, got %v", err) + } + } + + good("user") + good("a") + good("a.b.c") + good(`""`) + good(`"ok"`) + good(`"a.bc"`) + bad("") + bad(`"`) // missing ending dquot + bad("\x00") // control not allowed + bad("\"\\") // ending with backslash + bad("\"\x01") // control not allowed in dquote + bad(`""leftover`) // leftover data after close dquote +} + +func TestParseAddress(t *testing.T) { + good := func(s string) { + t.Helper() + _, err := ParseAddress(s) + if err != nil { + t.Fatalf("unexpected error for localpart %q: %v", s, err) + } + } + + bad := func(s string) { + t.Helper() + _, err := ParseAddress(s) + if err == nil { + t.Fatalf("did not see expected error for localpart %q", s) + } + if !errors.Is(err, ErrBadAddress) { + t.Fatalf("expected ErrBadAddress, got %v", err) + } + } + + good("user@example.com") + bad("user@@example.com") + bad("user") // missing @domain + bad("@example.com") // missing localpart + bad(`"@example.com`) // missing ending dquot or domain + bad("\x00@example.com") // control not allowed + bad("\"\\@example.com") // missing @domain + bad("\"\x01@example.com") // control not allowed in dquote + bad(`""leftover@example.com`) // leftover data after close dquot +} + +func TestPackLocalpart(t *testing.T) { + var l = []struct { + input, expect string + }{ + {``, `""`}, // No atom. + {`a.`, `"a."`}, // Empty atom not allowed. + {`a.b`, `a.b`}, // Fine. + {"azAZ09!#$%&'*+-/=?^_`{|}~", "azAZ09!#$%&'*+-/=?^_`{|}~"}, // All ascii that are fine as atom. + {` `, `" "`}, + {"\x01", "\"\x01\""}, // todo: should probably return an error for control characters. + {"<>", `"<>"`}, + } + + for _, e := range l { + r := Localpart(e.input).String() + if r != e.expect { + t.Fatalf("PackLocalpart for %q, expect %q, got %q", e.input, e.expect, r) + } + } +} diff --git a/smtp/addrlit.go b/smtp/addrlit.go new file mode 100644 index 0000000..7883bb7 --- /dev/null +++ b/smtp/addrlit.go @@ -0,0 +1,16 @@ +package smtp + +import ( + "net" +) + +// AddressLiteral returns an IPv4 or IPv6 address literal for use in SMTP. +func AddressLiteral(ip net.IP) string { + // ../rfc/5321:2309 + s := "[" + if ip.To4() == nil { + s += "IPv6:" + } + s += ip.String() + "]" + return s +} diff --git a/smtp/codes.go b/smtp/codes.go new file mode 100644 index 0000000..81d7ab3 --- /dev/null +++ b/smtp/codes.go @@ -0,0 +1,145 @@ +package smtp + +// ../rfc/5321:2863 + +// Reply codes. +var ( + C211SystemStatus = 211 + C214Help = 214 + C220ServiceReady = 220 + C221Closing = 221 + C235AuthSuccess = 235 // ../rfc/4954:573 + + C250Completed = 250 + C251UserNotLocalWillForward = 251 + C252WithoutVrfy = 252 + + C334ContinueAuth = 334 // ../rfc/4954:187 + C354Continue = 354 + + C421ServiceUnavail = 421 + C432PasswdTransitionNeeded = 432 // ../rfc/4954:578 + C454TempAuthFail = 454 // ../rfc/4954:586 + C450MailboxUnavail = 450 + C451LocalErr = 451 + C452StorageFull = 452 // Also for "too many recipients", ../rfc/5321:3576 + C455BadParams = 455 + + C500BadSyntax = 500 + C501BadParamSyntax = 501 + C502CmdNotImpl = 502 + C503BadCmdSeq = 503 + C504ParamNotImpl = 504 + C521HostNoMail = 521 // ../rfc/7504:179 + C530SecurityRequired = 530 // ../rfc/3207:148 ../rfc/4954:623 + C538EncReqForAuth = 538 // ../rfc/4954:630 + C534AuthMechWeak = 534 // ../rfc/4954:593 + C535AuthBadCreds = 535 // ../rfc/4954:600 + C550MailboxUnavail = 550 + C551UserNotLocal = 551 + C552MailboxFull = 552 + C553BadMailbox = 553 + C554TransactionFailed = 554 + C555UnrecognizedAddrParams = 555 + C556DomainNoMail = 556 // ../rfc/7504:207 +) + +// Short enhanced reply codes, without leading number and first dot. +// +// See https://www.iana.org/assignments/smtp-enhanced-status-codes/smtp-enhanced-status-codes.xhtml +var ( + // 0.x - Other or Undefined Status. + // ../rfc/3463:287 + SeOther00 = "0.0" + + // 1.x - Address. + // ../rfc/3463:295 + SeAddr1Other0 = "1.0" + SeAddr1UnknownDestMailbox1 = "1.1" + SeAddr1UnknownSystem2 = "1.2" + SeAddr1MailboxSyntax3 = "1.3" + SeAddr1MailboxAmbiguous4 = "1.4" + SeAddr1DestValid5 = "1.5" // For success responses. + SeAddr1DestMailboxMoved6 = "1.6" + SeAddr1SenderSyntax7 = "1.7" + SeAddr1BadSenderSystemAddress8 = "1.8" + SeAddr1NullMX = "1.10" // ../rfc/7505:237 + + // 2.x - Mailbox. + // ../rfc/3463:361 + SeMailbox2Other0 = "2.0" + SeMailbox2Disabled1 = "2.1" + SeMailbox2Full2 = "2.2" + SeMailbox2MsgLimitExceeded3 = "2.3" + SeMailbox2MailListExpansion4 = "2.4" + + // 3.x - Mail system. + // ../rfc/3463:405 + SeSys3Other0 = "3.0" + SeSys3StorageFull1 = "3.1" + SeSys3NotAccepting2 = "3.2" + SeSys3NotSupported3 = "3.3" + SeSys3MsgLimitExceeded4 = "3.4" + SeSys3Misconfigured5 = "3.5" + + // 4.x - Network and routing. + // ../rfc/3463:455 + SeNet4Other0 = "4.0" + SeNet4NoAnswer1 = "4.1" + SeNet4BadConn2 = "4.2" + SeNet4Name3 = "4.3" + SeNet4Routing4 = "4.4" + SeNet4Congestion5 = "4.5" + SeNet4Loop6 = "4.6" + SeNet4DeliveryExpired7 = "4.7" + + // 5.x - Mail delivery protocol. + // ../rfc/3463:527 + SeProto5Other0 = "5.0" + SeProto5BadCmdOrSeq1 = "5.1" + SeProto5Syntax2 = "5.2" + SeProto5TooManyRcpts3 = "5.3" + SeProto5BadParams4 = "5.4" + SeProto5ProtocolMismatch5 = "5.5" + SeProto5AuthExchangeTooLong = "5.6" // ../rfc/4954:650 + + // 6.x - Message content/media. + // ../rfc/3463:579 + SeMsg6Other0 = "6.0" + SeMsg6MediaUnsupported1 = "6.1" + SeMsg6ConversionProhibited2 = "6.2" + SeMsg6ConversoinUnsupported3 = "6.3" + SeMsg6ConversionWithLoss4 = "6.4" + SeMsg6ConversionFailed5 = "6.5" + SeMsg6NonASCIIAddrNotPermitted7 = "6.7" // ../rfc/6531:735 + SeMsg6UTF8ReplyRequired8 = "6.8" // ../rfc/6531:746 + SeMsg6UTF8CannotTransfer9 = "6.9" // ../rfc/6531:758 + + // 7.x - Security/policy. + // ../rfc/3463:628 + SePol7Other0 = "7.0" + SePol7DeliveryUnauth1 = "7.1" + SePol7ExpnProhibited2 = "7.2" + SePol7ConversionImpossible3 = "7.3" + SePol7Unsupported4 = "7.4" + SePol7CryptoFailure5 = "7.5" + SePol7CryptoUnsupported6 = "7.6" + SePol7MsgIntegrity7 = "7.7" + SePol7AuthBadCreds8 = "7.8" // ../rfc/4954:600 + SePol7AuthWeakMech9 = "7.9" // ../rfc/4954:593 + SePol7EncNeeded10 = "7.10" // ../rfc/5248:359 + SePol7EncReqForAuth11 = "7.11" // ../rfc/4954:630 + SePol7PasswdTransitionReq12 = "7.12" // ../rfc/4954:578 + SePol7AccountDisabled13 = "7.13" // ../rfc/5248:399 + SePol7TrustReq14 = "7.14" // ../rfc/5248:418 + SePol7NoDKIMPass20 = "7.20" // ../rfc/7372:137 + SePol7NoDKIMAccept21 = "7.21" // ../rfc/7372:148 + SePol7NoDKIMAuthorMatch22 = "7.22" // ../rfc/7372:175 + SePol7SPFResultFail23 = "7.23" // ../rfc/7372:192 + SePol7SPFError24 = "7.24" // ../rfc/7372:204 + SePol7RevDNSFail25 = "7.25" // ../rfc/7372:233 + SePol7MultiAuthFails26 = "7.26" // ../rfc/7372:246 + SePol7SenderHasNullMX27 = "7.27" // ../rfc/7505:246 + SePol7ARCFail = "7.29" // ../rfc/8617:1438 + SePol7MissingReqTLS = "7.30" // ../rfc/8689:448 +) diff --git a/smtp/data.go b/smtp/data.go new file mode 100644 index 0000000..bc008f0 --- /dev/null +++ b/smtp/data.go @@ -0,0 +1,138 @@ +package smtp + +import ( + "bufio" + "bytes" + "errors" + "io" +) + +var errMissingCRLF = errors.New("missing crlf at end of message") + +// DataWrite reads data (a mail message) from r, and writes it to smtp +// connection w with dot stuffing, as required by the SMTP data command. +func DataWrite(w io.Writer, r io.Reader) error { + // ../rfc/5321:2003 + + var prevlast, last byte = '\r', '\n' // Start on a new line, so we insert a dot if the first byte is a dot. + // todo: at least for smtp submission we should probably set a max line length, eg 1000 octects including crlf. ../rfc/5321:3512 + // todo: at least for smtp submission or a pedantic mode, we should refuse messages with bare \r or bare \n. + buf := make([]byte, 8*1024) + for { + nr, err := r.Read(buf) + if nr > 0 { + // Process buf by writing a line at a time, and checking if the next character + // after the line starts with a dot. Insert an extra dot if so. + p := buf[:nr] + for len(p) > 0 { + if p[0] == '.' && prevlast == '\r' && last == '\n' { + if _, err := w.Write([]byte{'.'}); err != nil { + return err + } + } + // Look for the next newline, or end of buffer. + n := 0 + for n < len(p) { + c := p[n] + n++ + if c == '\n' { + break + } + } + if _, err := w.Write(p[:n]); err != nil { + return err + } + // Keep track of the last two bytes we've written. + if n == 1 { + prevlast, last = last, p[0] + } else { + prevlast, last = p[n-2], p[n-1] + } + p = p[n:] + } + } + if err == io.EOF { + break + } else if err != nil { + return err + } + } + if prevlast != '\r' || last != '\n' { + return errMissingCRLF + } + if _, err := w.Write(dotcrlf); err != nil { + return err + } + return nil +} + +var dotcrlf = []byte(".\r\n") + +// DataReader is an io.Reader that reads data from an SMTP DATA command, doing dot +// unstuffing and returning io.EOF when a bare dot is received. Use NewDataReader. +type DataReader struct { + // ../rfc/5321:2003 + r *bufio.Reader + plast, last byte + buf []byte // From previous read. + err error // Read error, for after r.buf is exhausted. +} + +// NewDataReader returns an initialized DataReader. +func NewDataReader(r *bufio.Reader) *DataReader { + return &DataReader{ + r: r, + // Set up initial state to accept a message that is only "." and CRLF. + plast: '\r', + last: '\n', + } +} + +// Read implements io.Reader. +func (r *DataReader) Read(p []byte) (int, error) { + wrote := 0 + for len(p) > 0 { + // Read until newline as long as it fits in the buffer. + if len(r.buf) == 0 { + if r.err != nil { + break + } + // todo: set a max length, eg 1000 octets including crlf excluding potential leading dot. ../rfc/5321:3512 + r.buf, r.err = r.r.ReadSlice('\n') + if r.err == bufio.ErrBufferFull { + r.err = nil + } else if r.err == io.EOF { + // Mark EOF as bad for now. If we see the ending dotcrlf below, err becomes regular + // io.EOF again. + r.err = io.ErrUnexpectedEOF + } + } + if len(r.buf) > 0 { + // We require crlf. A bare LF is not a line ending. ../rfc/5321:2032 + // todo: we could return an error for a bare \n. + if r.plast == '\r' && r.last == '\n' { + if bytes.Equal(r.buf, dotcrlf) { + r.buf = nil + r.err = io.EOF + break + } else if r.buf[0] == '.' { + r.buf = r.buf[1:] + } + } + n := len(r.buf) + if n > len(p) { + n = len(p) + } + copy(p, r.buf[:n]) + if n == 1 { + r.plast, r.last = r.last, r.buf[0] + } else if n > 1 { + r.plast, r.last = r.buf[n-2], r.buf[n-1] + } + p = p[n:] + r.buf = r.buf[n:] + wrote += n + } + } + return wrote, r.err +} diff --git a/smtp/data_test.go b/smtp/data_test.go new file mode 100644 index 0000000..085a1d2 --- /dev/null +++ b/smtp/data_test.go @@ -0,0 +1,91 @@ +package smtp + +import ( + "bufio" + "errors" + "io" + "strings" + "testing" +) + +func TestDataWrite(t *testing.T) { + if err := DataWrite(io.Discard, strings.NewReader("bad")); err == nil || !errors.Is(err, errMissingCRLF) { + t.Fatalf("got err %v, expected errMissingCRLF", err) + } + if err := DataWrite(io.Discard, strings.NewReader(".")); err == nil || !errors.Is(err, errMissingCRLF) { + t.Fatalf("got err %v, expected errMissingCRLF", err) + } + + check := func(msg, want string) { + t.Helper() + w := &strings.Builder{} + if err := DataWrite(w, strings.NewReader(msg)); err != nil { + t.Fatalf("writing smtp data: %s", err) + } + got := w.String() + if got != want { + t.Fatalf("got %q, expected %q, for msg %q", got, want, msg) + } + } + + check("", ".\r\n") + check(".\r\n", "..\r\n.\r\n") + check("header: abc\r\n\r\nmessage\r\n", "header: abc\r\n\r\nmessage\r\n.\r\n") +} + +func TestDataReader(t *testing.T) { + // Copy with a 1 byte buffer for reading. + smallCopy := func(d io.Writer, r io.Reader) (int, error) { + var wrote int + buf := make([]byte, 1) + for { + n, err := r.Read(buf) + if n > 0 { + nn, err := d.Write(buf) + if nn > 0 { + wrote += nn + } + if err != nil { + return wrote, err + } + } + if err == io.EOF { + break + } else if err != nil { + return wrote, err + } + } + return wrote, nil + } + + check := func(data, want string) { + t.Helper() + + s := &strings.Builder{} + dr := NewDataReader(bufio.NewReader(strings.NewReader(data))) + if _, err := io.Copy(s, dr); err != nil { + t.Fatalf("got err %v", err) + } else if got := s.String(); got != want { + t.Fatalf("got %q, expected %q, for %q", got, want, data) + } + + s = &strings.Builder{} + dr = NewDataReader(bufio.NewReader(strings.NewReader(data))) + if _, err := smallCopy(s, dr); err != nil { + t.Fatalf("got err %v", err) + } else if got := s.String(); got != want { + t.Fatalf("got %q, expected %q, for %q", got, want, data) + } + } + + check("test\r\n.\r\n", "test\r\n") + check(".\r\n", "") + check(".test\r\n.\r\n", "test\r\n") // Unnecessary dot, but valid in SMTP. + check("..test\r\n.\r\n", ".test\r\n") + + s := &strings.Builder{} + dr := NewDataReader(bufio.NewReader(strings.NewReader("no end"))) + if _, err := io.Copy(s, dr); err != io.ErrUnexpectedEOF { + t.Fatalf("got err %v, expected io.ErrUnexpectedEOF", err) + } +} diff --git a/smtp/doc.go b/smtp/doc.go new file mode 100644 index 0000000..b46c104 --- /dev/null +++ b/smtp/doc.go @@ -0,0 +1,2 @@ +// Package smtp provides SMTP definitions and functions shared between smtpserver and smtpclient. +package smtp diff --git a/smtp/ehlo.go b/smtp/ehlo.go new file mode 100644 index 0000000..98a4c1c --- /dev/null +++ b/smtp/ehlo.go @@ -0,0 +1,17 @@ +package smtp + +import ( + "net" + + "github.com/mjl-/mox/dns" +) + +// Ehlo is the remote identification of an incoming SMTP connection. +type Ehlo struct { + Name dns.IPDomain // Name from EHLO/HELO line. Can be an IP or host name. + ConnIP net.IP // Address of connection. +} + +func (e Ehlo) IsZero() bool { + return e.Name.IsZero() && e.ConnIP == nil +} diff --git a/smtp/path.go b/smtp/path.go new file mode 100644 index 0000000..ae98cd6 --- /dev/null +++ b/smtp/path.go @@ -0,0 +1,67 @@ +package smtp + +import ( + "strings" + + "github.com/mjl-/mox/dns" +) + +// Path is an SMTP forward/reverse path, as used in MAIL FROM and RCPT TO +// commands. +type Path struct { + Localpart Localpart + IPDomain dns.IPDomain +} + +func (p Path) IsZero() bool { + return p.Localpart == "" && p.IPDomain.IsZero() +} + +// String returns a string representation with ASCII-only domain name. +func (p Path) String() string { + return p.XString(false) +} + +// XString is like String, but returns unicode UTF-8 domain names if utf8 is +// true. +func (p Path) XString(utf8 bool) string { + if p.Localpart == "" && p.IPDomain.IsZero() { + return "" + } + return p.Localpart.String() + "@" + p.IPDomain.XString(utf8) +} + +// ASCIIExtra returns an ascii-only path if utf8 is true and the ipdomain is a +// unicode domain. Otherwise returns an empty string. +// +// For use in comments in message headers added during SMTP. +func (p Path) ASCIIExtra(utf8 bool) string { + if utf8 && p.IPDomain.Domain.Unicode != "" { + return p.XString(false) + } + return "" +} + +// DSNString returns a string representation as used with DSN with/without +// UTF-8 support. +// +// If utf8 is false, the domain is represented as US-ASCII (IDNA), and the +// localpart is encoded with in 7bit according to RFC 6533. +func (p Path) DSNString(utf8 bool) string { + if utf8 { + return p.XString(utf8) + } + return p.Localpart.DSNString(utf8) + "@" + p.IPDomain.XString(utf8) +} + +func (p Path) Equal(o Path) bool { + if p.Localpart != o.Localpart { + return false + } + d0 := p.IPDomain + d1 := o.IPDomain + if len(d0.IP) > 0 || len(d1.IP) > 0 { + return d0.IP.Equal(d1.IP) + } + return strings.EqualFold(d0.Domain.ASCII, d1.Domain.ASCII) +} diff --git a/smtpclient/client.go b/smtpclient/client.go new file mode 100644 index 0000000..884c66f --- /dev/null +++ b/smtpclient/client.go @@ -0,0 +1,737 @@ +// Package smtpclient is an SMTP client, used by the queue for sending outgoing messages. +package smtpclient + +import ( + "bufio" + "context" + "crypto/tls" + "errors" + "fmt" + "io" + "net" + "strconv" + "strings" + "time" + + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" + + "github.com/mjl-/mox/metrics" + "github.com/mjl-/mox/mlog" + "github.com/mjl-/mox/mox-" + "github.com/mjl-/mox/moxio" + "github.com/mjl-/mox/smtp" +) + +// todo future: add function to deliver message to multiple recipients. requires more elaborate return value, indicating success per message: some recipients may succeed, others may fail, and we should still deliver. to prevent backscatter, we also sometimes don't allow multiple recipients. ../rfc/5321:1144 + +var ( + metricCommands = promauto.NewHistogramVec( + prometheus.HistogramOpts{ + Name: "mox_smtpclient_command_duration_seconds", + Help: "SMTP client command duration and result codes in seconds.", + Buckets: []float64{0.001, 0.005, 0.01, 0.05, 0.100, 0.5, 1, 5, 10, 20, 30, 60, 120}, + }, + []string{ + "cmd", + "code", + "secode", + }, + ) +) + +var ( + ErrSize = errors.New("message too large for remote smtp server") // SMTP server announced a maximum message size and the message to be delivered exceeds it. + Err8bitmimeUnsupported = errors.New("remote smtp server does not implement 8bitmime extension, required by message") + ErrSMTPUTF8Unsupported = errors.New("remote smtp server does not implement smtputf8 extension, required by message") + ErrStatus = errors.New("remote smtp server sent unexpected response status code") // Relatively common, e.g. when a 250 OK was expected and server sent 451 temporary error. + ErrProtocol = errors.New("smtp protocol error") // After a malformed SMTP response or inconsistent multi-line response. + ErrTLS = errors.New("tls error") // E.g. handshake failure, or hostname validation was required and failed. + ErrBotched = errors.New("smtp connection is botched") // Set on a client, and returned for new operations, after an i/o error or malformed SMTP response. + ErrClosed = errors.New("client is closed") +) + +// TLSMode indicates if TLS must, should or must not be used. +type TLSMode string + +const ( + // TLS with validated certificate is required: matching name, not expired, trusted by CA. + TLSStrict TLSMode = "strict" + + // Use TLS if remote claims to support it, but do not validate the certificate + // (not trusted by CA, different host name or expired certificate is accepted). + TLSOpportunistic TLSMode = "opportunistic" + + // TLS must not be attempted, e.g. due to earlier TLS handshake error. + TLSSkip TLSMode = "skip" +) + +// Client is an SMTP client that can deliver messages to a mail server. +// +// Use New to make a new client. +type Client struct { + // OrigConn is the original (TCP) connection. We'll read from/write to conn, which + // can be wrapped in a tls.Client. We close origConn instead of conn because + // closing the TLS connection would send a TLS close notification, which may block + // for 5s if the server isn't reading it (because it is also sending it). + origConn net.Conn + conn net.Conn + + r *bufio.Reader + w *bufio.Writer + log *mlog.Log + lastlog time.Time // For adding delta timestamps between log lines. + cmds []string // Last or active command, for generating errors and metrics. + cmdStart time.Time // Start of command. + + botched bool // If set, protocol is out of sync and no further commands can be sent. + needRset bool // If set, a new delivery requires an RSET command. + + extEcodes bool // Remote server supports sending extended error codes. + extStartTLS bool // Remote server supports STARTTLS. + ext8bitmime bool + extSize bool // Remote server supports SIZE parameter. + maxSize int64 // Max size of email message. + extPipelining bool // Remote server supports command pipelining. + extSMTPUTF8 bool // Remote server supports SMTPUTF8 extension. +} + +// Error represents a failure to deliver a message. +// +// Code, Secode, Command and Line are only set for SMTP-level errors, and are zero +// values otherwise. +type Error struct { + // Whether failure is permanent, typically because of 5xx response. + Permanent bool + // SMTP response status, e.g. 2xx for success, 4xx for transient error and 5xx for + // permanent failure. + Code int + // Short enhanced status, minus first digit and dot. Can be empty, e.g. for io + // errors or if remote does not send enhanced status codes. If remote responds with + // "550 5.7.1 ...", the Secode will be "7.1". + Secode string + // SMTP command causing failure. + Command string + // For errors due to SMTP responses, the full SMTP line excluding CRLF that caused + // the error. Typically the last line read. + Line string + // Underlying error, e.g. one of the Err variables in this package, or io errors. + Err error +} + +// Unwrap returns the underlying Err. +func (e Error) Unwrap() error { + return e.Err +} + +// Error returns a readable error string. +func (e Error) Error() string { + s := "" + if e.Err != nil { + s = e.Err.Error() + ", " + } + if e.Permanent { + s += "permanent" + } else { + s += "transient" + } + if e.Line != "" { + s += ": " + e.Line + } + return s +} + +// New initializes an SMTP session on the given connection, returning a client that +// can be used to deliver messages. +// +// New reads the server greeting, identifies itself with a HELO or EHLO command, +// initializes TLS if remote supports it and optionally authenticates. If +// successful, a client is returned on which eventually Close must be called. +// Otherwise an error is returned and the caller is responsible for closing the +// connection. +// +// Connecting to the correct host is outside the scope of the client. The queue +// managing outgoing messages decides which host to deliver to, taking multiple MX +// records with preferences, other DNS records, MTA-STS, retries and special +// cases into account. +// +// tlsMode indicates if TLS is required, optional or should not be used. A +// certificate is only validated (trusted, match remoteHostname and not expired) +// for tls mode "required". By default, SMTP does not verify TLS for interopability +// reasons, but MTA-STS or DANE can require it. If opportunistic TLS is used, and a +// TLS error is encountered, the caller may want to try again (on a new connection) +// without TLS. +// +// If auth is non-empty, it is executed as a command after SMTP greeting/EHLO +// initialization, before starting delivery. For authenticating to a submission +// service with AUTH PLAIN, only meant for testing. +func New(ctx context.Context, log *mlog.Log, conn net.Conn, tlsMode TLSMode, remoteHostname, auth string) (*Client, error) { + c := &Client{ + origConn: conn, + conn: conn, + lastlog: time.Now(), + cmds: []string{"(none)"}, + } + c.log = log.Fields(mlog.Field("smtpclient", "")).MoreFields(func() []mlog.Pair { + now := time.Now() + l := []mlog.Pair{ + mlog.Field("delta", now.Sub(c.lastlog)), + } + c.lastlog = now + return l + }) + // We don't wrap reads in a timeoutReader for fear of an optional TLS wrapper doing + // reads without the client asking for it. Such reads could result in a timeout + // error. + c.r = bufio.NewReader(moxio.NewTraceReader(c.log, "RS: ", c.conn)) + // We use a single write timeout of 30 seconds. + // todo future: use different timeouts ../rfc/5321:3610 + c.w = bufio.NewWriter(moxio.NewTraceWriter(c.log, "LC: ", timeoutWriter{c.conn, 30 * time.Second, c.log})) + + if err := c.hello(ctx, tlsMode, remoteHostname, auth); err != nil { + return nil, err + } + return c, nil +} + +// xbotchf generates a temporary error and marks the client as botched. e.g. for +// i/o errors or invalid protocol messages. +func (c *Client) xbotchf(code int, secode string, lastLine, format string, args ...any) { + c.botched = true + c.xerrorf(false, code, secode, lastLine, format, args...) +} + +func (c *Client) xerrorf(permanent bool, code int, secode, lastLine, format string, args ...any) { + var cmd string + if len(c.cmds) > 0 { + cmd = c.cmds[0] + } + panic(Error{permanent, code, secode, cmd, lastLine, fmt.Errorf(format, args...)}) +} + +// timeoutWriter passes each Write on to conn after setting a write deadline on conn based on +// timeout. +type timeoutWriter struct { + conn net.Conn + timeout time.Duration + log *mlog.Log +} + +func (w timeoutWriter) Write(buf []byte) (int, error) { + if err := w.conn.SetWriteDeadline(time.Now().Add(w.timeout)); err != nil { + w.log.Errorx("setting write deadline", err) + } + + return w.conn.Write(buf) +} + +var bufs = moxio.NewBufpool(8, 2*1024) + +func (c *Client) xreadline() string { + // todo: could have per-operation timeouts. and rfc suggests higher minimum timeouts. ../rfc/5321:3610 + if err := c.conn.SetReadDeadline(time.Now().Add(30 * time.Second)); err != nil { + c.log.Errorx("setting read deadline", err) + } + + line, err := bufs.Readline(c.r) + if err != nil { + c.xbotchf(0, "", "", "%s: %w", strings.Join(c.cmds, ","), err) + } + return line +} + +func (c *Client) xwritelinef(format string, args ...any) { + c.xbwritelinef(format, args...) + c.xflush() +} + +func (c *Client) xwriteline(line string) { + c.xbwriteline(line) + c.xflush() +} + +func (c *Client) xbwritelinef(format string, args ...any) { + c.xbwriteline(fmt.Sprintf(format, args...)) +} + +func (c *Client) xbwriteline(line string) { + _, err := fmt.Fprintf(c.w, "%s\r\n", line) + if err != nil { + c.xbotchf(0, "", "", "write: %w", err) + } +} + +func (c *Client) xflush() { + err := c.w.Flush() + if err != nil { + c.xbotchf(0, "", "", "writes: %w", err) + } +} + +// read response, possibly multiline, with supporting extended codes based on configuration in client. +func (c *Client) xread() (code int, secode, lastLine string, texts []string) { + return c.xreadecode(c.extEcodes) +} + +// read response, possibly multiline. +// if ecodes, extended codes are parsed. +func (c *Client) xreadecode(ecodes bool) (code int, secode, lastLine string, texts []string) { + for { + co, sec, text, line, last := c.xread1(ecodes) + texts = append(texts, text) + if code != 0 && co != code { + // ../rfc/5321:2771 + c.xbotchf(0, "", line, "%w: multiline response with different codes, previous %d, last %d", ErrProtocol, code, co) + } + code = co + if last { + cmd := "" + if len(c.cmds) > 0 { + cmd = c.cmds[0] + // We only keep the last, so we're not creating new slices all the time. + if len(c.cmds) > 1 { + c.cmds = c.cmds[1:] + } + } + metricCommands.WithLabelValues(cmd, fmt.Sprintf("%d", co), sec).Observe(float64(time.Since(c.cmdStart)) / float64(time.Second)) + c.log.Debug("smtpclient command result", mlog.Field("cmd", cmd), mlog.Field("code", co), mlog.Field("secode", sec), mlog.Field("duration", time.Since(c.cmdStart))) + return co, sec, line, texts + } + } +} + +// read single response line. +// if ecodes, extended codes are parsed. +func (c *Client) xread1(ecodes bool) (code int, secode, text, line string, last bool) { + line = c.xreadline() + i := 0 + for ; i < len(line) && line[i] >= '0' && line[i] <= '9'; i++ { + } + if i != 3 { + c.xbotchf(0, "", line, "%w: expected response code: %s", ErrProtocol, line) + } + v, err := strconv.ParseInt(line[:i], 10, 32) + if err != nil { + c.xbotchf(0, "", line, "%w: bad response code (%s): %s", ErrProtocol, err, line) + } + code = int(v) + major := code / 100 + s := line[3:] + if strings.HasPrefix(s, "-") || strings.HasPrefix(s, " ") { + last = s[0] == ' ' + s = s[1:] + } else if s == "" { + // Allow missing space. ../rfc/5321:2570 ../rfc/5321:2612 + last = true + } else { + c.xbotchf(0, "", line, "%w: expected space or dash after response code: %s", ErrProtocol, line) + } + + if ecodes { + secode, s = parseEcode(major, s) + } + + return code, secode, s, line, last +} + +func parseEcode(major int, s string) (secode string, remain string) { + o := 0 + bad := false + take := func(need bool, a, b byte) bool { + if !bad && o < len(s) && s[o] >= a && s[o] <= b { + o++ + return true + } + bad = bad || need + return false + } + digit := func(need bool) bool { + return take(need, '0', '9') + } + dot := func() bool { + return take(true, '.', '.') + } + + digit(true) + dot() + xo := o + digit(true) + for digit(false) { + } + dot() + digit(true) + for digit(false) { + } + secode = s[xo:o] + take(false, ' ', ' ') + if bad || int(s[0])-int('0') != major { + return "", s + } + return secode, s[o:] +} + +func (c *Client) recover(rerr *error) { + x := recover() + if x == nil { + return + } + cerr, ok := x.(Error) + if !ok { + metrics.PanicInc("smtpclient") + panic(x) + } + *rerr = cerr +} + +func (c *Client) hello(ctx context.Context, tlsMode TLSMode, remoteHostname, auth string) (rerr error) { + defer c.recover(&rerr) + + // perform EHLO handshake, falling back to HELO if server does not appear to + // implement EHLO. + hello := func(heloOK bool) { + // Write EHLO and parse the supported extensions. + // ../rfc/5321:987 + c.cmds[0] = "ehlo" + c.cmdStart = time.Now() + // Syntax: ../rfc/5321:1827 + c.xwritelinef("EHLO %s", mox.Conf.Static.HostnameDomain.ASCII) + code, _, lastLine, remains := c.xreadecode(false) + switch code { + // ../rfc/5321:997 + // ../rfc/5321:3098 + case smtp.C500BadSyntax, smtp.C501BadParamSyntax, smtp.C502CmdNotImpl, smtp.C503BadCmdSeq, smtp.C504ParamNotImpl: + if !heloOK { + c.xerrorf(true, code, "", lastLine, "%w: remote claims ehlo is not supported", ErrProtocol) + } + // ../rfc/5321:996 + c.cmds[0] = "helo" + c.cmdStart = time.Now() + c.xwritelinef("HELO %s", mox.Conf.Static.HostnameDomain.ASCII) + code, _, lastLine, _ = c.xreadecode(false) + if code != smtp.C250Completed { + c.xerrorf(code/100 == 5, code, "", lastLine, "%w: expected 250 to HELO, got %d", ErrStatus, code) + } + return + case smtp.C250Completed: + default: + c.xerrorf(code/100 == 5, code, "", lastLine, "%w: expected 250, got %d", ErrStatus, code) + } + for _, s := range remains[1:] { + // ../rfc/5321:1869 + s = strings.ToUpper(strings.TrimSpace(s)) + switch s { + case "STARTTLS": + c.extStartTLS = true + case "ENHANCEDSTATUSCODES": + c.extEcodes = true + case "8BITMIME": + c.ext8bitmime = true + case "PIPELINING": + c.extPipelining = true + default: + // For SMTPUTF8 we must ignore any parameter. ../rfc/6531:207 + if s == "SMTPUTF8" || strings.HasPrefix(s, "SMTPUTF8 ") { + c.extSMTPUTF8 = true + } else if strings.HasPrefix(s, "SIZE ") { + c.extSize = true + if v, err := strconv.ParseInt(s[len("SIZE "):], 10, 64); err == nil { + c.maxSize = v + } + } + } + } + } + + // Read greeting. + c.cmds = []string{"(greeting)"} + c.cmdStart = time.Now() + code, _, lastLine, _ := c.xreadecode(false) + if code != smtp.C220ServiceReady { + c.xerrorf(code/100 == 5, code, "", lastLine, "%w: expected 220, got %d", ErrStatus, code) + } + + // Write EHLO, falling back to HELO if server doesn't appear to support it. + hello(true) + + // Attempt TLS if remote understands STARTTLS or if caller requires it. + if c.extStartTLS && tlsMode != TLSSkip || tlsMode == TLSStrict { + c.log.Debug("starting tls client") + c.cmds[0] = "starttls" + c.cmdStart = time.Now() + c.xwritelinef("STARTTLS") + code, secode, lastLine, _ := c.xread() + // ../rfc/3207:107 + if code != smtp.C220ServiceReady { + c.xerrorf(code/100 == 5, code, secode, lastLine, "%w: STARTTLS: got %d, expected 220", ErrTLS, code) + } + + // We don't want to do TLS on top of c.r because it also prints protocol traces: We + // don't want to log the TLS stream. So we'll do TLS on the underlying connection, + // but make sure any bytes already read and in the buffer are used for the TLS + // handshake. + conn := c.conn + if n := c.r.Buffered(); n > 0 { + conn = &moxio.PrefixConn{ + PrefixReader: io.LimitReader(c.r, int64(n)), + Conn: conn, + } + } + + // For TLSStrict, the Go TLS library performs the checks needed for MTA-STS. + // ../rfc/8461:646 + // todo: possibly accept older TLS versions for TLSOpportunistic? + tlsConfig := &tls.Config{ + ServerName: remoteHostname, + RootCAs: mox.Conf.Static.TLS.CertPool, + InsecureSkipVerify: tlsMode != TLSStrict, + MinVersion: tls.VersionTLS12, // ../rfc/8996:31 ../rfc/8997:66 + } + nconn := tls.Client(conn, tlsConfig) + c.conn = nconn + + nctx, cancel := context.WithTimeout(ctx, time.Minute) + defer cancel() + err := nconn.HandshakeContext(nctx) + if err != nil { + c.xerrorf(false, 0, "", "", "%w: STARTTLS TLS handshake: %s", ErrTLS, err) + } + cancel() + c.r = bufio.NewReader(moxio.NewTraceReader(c.log, "RS: ", c.conn)) + c.w = bufio.NewWriter(moxio.NewTraceWriter(c.log, "LC: ", c.conn)) // No need to wrap in timeoutWriter, it would just set the timeout on the underlying connection, which is still active. + + tlsversion, ciphersuite := mox.TLSInfo(nconn) + c.log.Debug("tls client handshake done", mlog.Field("tls", tlsversion), mlog.Field("ciphersuite", ciphersuite)) + + hello(false) + } + + if auth != "" { + // No metrics, only used for tests. + c.cmds[0] = "auth" + c.cmdStart = time.Now() + c.xwriteline(auth) + code, secode, lastLine, _ := c.xread() + if code != smtp.C235AuthSuccess { + c.xerrorf(code/100 == 5, code, secode, lastLine, "%w: auth: got %d, expected 2xx", ErrStatus, code) + } + } + + return +} + +// Supports8BITMIME returns whether the SMTP server supports the 8BITMIME +// extension, needed for sending data with non-ASCII bytes. +func (c *Client) Supports8BITMIME() bool { + return c.ext8bitmime +} + +// SupportsSMTPUTF8 returns whether the SMTP server supports the SMTPUTF8 +// extension, needed for sending messages with UTF-8 in headers or in an (SMTP) +// address. +func (c *Client) SupportsSMTPUTF8() bool { + return c.extSMTPUTF8 +} + +// Deliver attempts to deliver a message to a mail server. +// +// mailFrom must be an email address, or empty in case of a DSN. rcptTo must be +// an email address. +// +// If the message contains bytes with the high bit set, req8bitmime must be true. If +// set, the remote server must support the 8BITMIME extension or delivery will +// fail. +// +// If the message is internationalized, e.g. when headers contain non-ASCII +// character, or when UTF-8 is used in a localpart, reqSMTPUTF8 must be true. If set, +// the remote server must support the SMTPUTF8 extension or delivery will fail. +// +// Deliver uses the following SMTP extensions if the remote server supports them: +// 8BITMIME, SMTPUTF8, SIZE, PIPELINING, ENHANCEDSTATUSCODES, STARTTLS. +// +// Returned errors can be of type Error, one of the Err-variables in this package +// or other underlying errors, e.g. for i/o. Use errors.Is to check. +func (c *Client) Deliver(ctx context.Context, mailFrom string, rcptTo string, msgSize int64, msg io.Reader, req8bitmime, reqSMTPUTF8 bool) (rerr error) { + defer c.recover(&rerr) + + if c.origConn == nil { + return ErrClosed + } else if c.botched { + return ErrBotched + } else if c.needRset { + if err := c.Reset(); err != nil { + return err + } + } + + if !c.ext8bitmime && req8bitmime { + c.xerrorf(true, 0, "", "", "%w", Err8bitmimeUnsupported) + } + if !c.extSMTPUTF8 && reqSMTPUTF8 { + // ../rfc/6531:313 + c.xerrorf(true, 0, "", "", "%w", ErrSMTPUTF8Unsupported) + } + + if c.extSize && msgSize > c.maxSize { + c.xerrorf(true, 0, "", "", "%w: message is %d bytes, remote has a %d bytes maximum size", ErrSize, msgSize, c.maxSize) + } + + var mailSize, bodyType string + if c.extSize { + mailSize = fmt.Sprintf(" SIZE=%d", msgSize) + } + if c.ext8bitmime { + if req8bitmime { + bodyType = " BODY=8BITMIME" + } else { + bodyType = " BODY=7BIT" + } + } + var smtputf8Arg string + if reqSMTPUTF8 { + // ../rfc/6531:213 + smtputf8Arg = " SMTPUTF8" + } + + // Transaction overview: ../rfc/5321:1015 + // MAIL FROM: ../rfc/5321:1879 + // RCPT TO: ../rfc/5321:1916 + // DATA: ../rfc/5321:1992 + lineMailFrom := fmt.Sprintf("MAIL FROM:<%s>%s%s%s", mailFrom, mailSize, bodyType, smtputf8Arg) + lineRcptTo := fmt.Sprintf("RCPT TO:<%s>", rcptTo) + + // We are going into a transaction. We'll clear this when done. + c.needRset = true + + if c.extPipelining { + c.cmds = []string{"mailfrom", "rcptto", "data"} + c.cmdStart = time.Now() + // todo future: write in a goroutine to prevent potential deadlock if remote does not consume our writes before expecting us to read. could potentially happen with greylisting and a small tcp send window? + c.xbwriteline(lineMailFrom) + c.xbwriteline(lineRcptTo) + c.xbwriteline("DATA") + c.xflush() + + mfcode, mfsecode, mflastline, _ := c.xread() + rtcode, rtsecode, rtlastline, _ := c.xread() + datacode, datasecode, datalastline, _ := c.xread() + + if mfcode != smtp.C250Completed { + c.xerrorf(mfcode/100 == 5, mfcode, mfsecode, mflastline, "%w: got %d, expected 2xx", ErrStatus, mfcode) + } + if rtcode != smtp.C250Completed { + c.xerrorf(rtcode/100 == 5, rtcode, rtsecode, rtlastline, "%w: got %d, expected 2xx", ErrStatus, rtcode) + } + if datacode != smtp.C354Continue { + c.xerrorf(datacode/100 == 5, datacode, datasecode, datalastline, "%w: got %d, expected 354", ErrStatus, datacode) + } + } else { + c.cmds[0] = "mailfrom" + c.cmdStart = time.Now() + c.xwriteline(lineMailFrom) + code, secode, lastline, _ := c.xread() + if code != smtp.C250Completed { + c.xerrorf(code/100 == 5, code, secode, lastline, "%w: got %d, expected 2xx", ErrStatus, code) + } + + c.cmds[0] = "rcptto" + c.cmdStart = time.Now() + c.xwriteline(lineRcptTo) + code, secode, lastline, _ = c.xread() + if code != smtp.C250Completed { + c.xerrorf(code/100 == 5, code, secode, lastline, "%w: got %d, expected 2xx", ErrStatus, code) + } + + c.cmds[0] = "data" + c.cmdStart = time.Now() + c.xwriteline("DATA") + code, secode, lastline, _ = c.xread() + if code != smtp.C354Continue { + c.xerrorf(code/100 == 5, code, secode, lastline, "%w: got %d, expected 354", ErrStatus, code) + } + } + + // For a DATA write, the suggested timeout is 3 minutes, we use 30 seconds for all + // writes through timeoutWriter. ../rfc/5321:3651 + err := smtp.DataWrite(c.w, msg) + if err != nil { + c.xbotchf(0, "", "", "writing message as smtp data: %w", err) + } + c.xflush() + code, secode, lastline, _ := c.xread() + if code != smtp.C250Completed { + c.xerrorf(code/100 == 5, code, secode, lastline, "%w: got %d, expected 2xx", ErrStatus, code) + } + + c.needRset = false + return +} + +// Reset sends an SMTP RSET command to reset the message transaction state. Deliver +// automatically sends it if needed. +func (c *Client) Reset() (rerr error) { + if c.origConn == nil { + return ErrClosed + } else if c.botched { + return ErrBotched + } + + defer c.recover(&rerr) + + // ../rfc/5321:2079 + c.cmds[0] = "rset" + c.cmdStart = time.Now() + c.xwriteline("RSET") + code, secode, lastline, _ := c.xread() + if code != smtp.C250Completed { + c.xerrorf(code/100 == 5, code, secode, lastline, "%w: got %d, expected 2xx", ErrStatus, code) + } + c.needRset = false + return +} + +// Botched returns whether this connection is botched, e.g. a protocol error +// occurred and the connection is in unknown state, and cannot be used for message +// delivery. +func (c *Client) Botched() bool { + return c.botched || c.origConn == nil +} + +// Close cleans up the client, closing the underlying connection. +// +// If the connection is in initialized and not botched, a QUIT command is sent and +// the response read with a short timeout before closing the underlying connection. +// +// Close returns any error encountered during QUIT and closing. +func (c *Client) Close() (rerr error) { + if c.origConn == nil { + return ErrClosed + } + + defer c.recover(&rerr) + + if !c.botched { + // ../rfc/5321:2205 + c.cmds[0] = "quit" + c.cmdStart = time.Now() + c.xwriteline("QUIT") + if err := c.conn.SetReadDeadline(time.Now().Add(5 * time.Second)); err != nil { + c.log.Infox("setting read deadline for reading quit response", err) + } else if _, err := bufs.Readline(c.r); err != nil { + rerr = fmt.Errorf("reading response to quit command: %v", err) + c.log.Debugx("reading quit response", err) + } + } + + err := c.origConn.Close() + if c.conn != c.origConn { + // This is the TLS connection. Close will attempt to write a close notification. + // But it will fail quickly because the underlying socket was closed. + c.conn.Close() + } + c.origConn = nil + c.conn = nil + if rerr != nil { + rerr = err + } + return +} diff --git a/smtpclient/client_test.go b/smtpclient/client_test.go new file mode 100644 index 0000000..51d38c7 --- /dev/null +++ b/smtpclient/client_test.go @@ -0,0 +1,616 @@ +package smtpclient + +import ( + "bufio" + "context" + "crypto/ed25519" + cryptorand "crypto/rand" + "crypto/tls" + "crypto/x509" + "errors" + "fmt" + "io" + "math/big" + "net" + "reflect" + "strings" + "testing" + "time" + + "github.com/mjl-/mox/mlog" + "github.com/mjl-/mox/mox-" + "github.com/mjl-/mox/smtp" +) + +func TestClient(t *testing.T) { + ctx := context.Background() + log := mlog.New("smtpclient") + + type options struct { + pipelining bool + ecodes bool + maxSize int + starttls bool + eightbitmime bool + smtputf8 bool + ehlo bool + + tlsMode TLSMode + tlsHostname string + need8bitmime bool + needsmtputf8 bool + + nodeliver bool // For server, whether client will attempt a delivery. + } + + // Make fake cert, and make it trusted. + cert := fakeCert(t, false) + mox.Conf.Static.TLS.CertPool = x509.NewCertPool() + mox.Conf.Static.TLS.CertPool.AddCert(cert.Leaf) + tlsConfig := tls.Config{ + Certificates: []tls.Certificate{cert}, + } + + test := func(msg string, opts options, expClientErr, expDeliverErr, expServerErr error) { + t.Helper() + + if opts.tlsMode == "" { + opts.tlsMode = TLSOpportunistic + } + + clientConn, serverConn := net.Pipe() + defer serverConn.Close() + + result := make(chan error, 2) + + go func() { + defer func() { + x := recover() + if x != nil && x != "stop" { + panic(x) + } + }() + fail := func(format string, args ...any) { + err := fmt.Errorf("server: %w", fmt.Errorf(format, args...)) + if err != nil && expServerErr != nil && (errors.Is(err, expServerErr) || errors.As(err, reflect.New(reflect.ValueOf(expServerErr).Type()).Interface())) { + err = nil + } + result <- err + panic("stop") + } + + br := bufio.NewReader(serverConn) + readline := func(prefix string) { + s, err := br.ReadString('\n') + if err != nil { + fail("expected command: %v", err) + } + if !strings.HasPrefix(strings.ToLower(s), strings.ToLower(prefix)) { + fail("expected command %q, got: %s", prefix, s) + } + } + writeline := func(s string) { + fmt.Fprintf(serverConn, "%s\r\n", s) + } + + haveTLS := false + + ehlo := true // Initially we expect EHLO. + var hello func() + hello = func() { + if !ehlo { + readline("HELO") + writeline("250 mox.example") + return + } + + readline("EHLO") + + if !opts.ehlo { + // Client will try again with HELO. + writeline("500 bad syntax") + ehlo = false + hello() + return + } + + writeline("250-mox.example") + if opts.pipelining { + writeline("250-PIPELINING") + } + if opts.maxSize > 0 { + writeline(fmt.Sprintf("250-SIZE %d", opts.maxSize)) + } + if opts.ecodes { + writeline("250-ENHANCEDSTATUSCODES") + } + if opts.starttls && !haveTLS { + writeline("250-STARTTLS") + } + if opts.eightbitmime { + writeline("250-8BITMIME") + } + if opts.smtputf8 { + writeline("250-SMTPUTF8") + } + writeline("250 UNKNOWN") // To be ignored. + } + + writeline("220 mox.example ESMTP test") + + hello() + + if opts.starttls { + readline("STARTTLS") + writeline("220 go") + tlsConn := tls.Server(serverConn, &tlsConfig) + nctx, cancel := context.WithTimeout(context.Background(), 3*time.Second) + defer cancel() + err := tlsConn.HandshakeContext(nctx) + if err != nil { + fail("tls handshake: %w", err) + } + serverConn = tlsConn + br = bufio.NewReader(serverConn) + + haveTLS = true + hello() + } + + if expClientErr == nil && !opts.nodeliver { + readline("MAIL FROM:") + writeline("250 ok") + readline("RCPT TO:") + writeline("250 ok") + readline("DATA") + writeline("354 continue") + reader := smtp.NewDataReader(br) + io.Copy(io.Discard, reader) + writeline("250 ok") + + if expDeliverErr == nil { + readline("RSET") + writeline("250 ok") + + readline("MAIL FROM:") + writeline("250 ok") + readline("RCPT TO:") + writeline("250 ok") + readline("DATA") + writeline("354 continue") + reader = smtp.NewDataReader(br) + io.Copy(io.Discard, reader) + writeline("250 ok") + } + } + + readline("QUIT") + writeline("221 ok") + result <- nil + }() + + go func() { + defer func() { + x := recover() + if x != nil && x != "stop" { + panic(x) + } + }() + fail := func(format string, args ...any) { + result <- fmt.Errorf("client: %w", fmt.Errorf(format, args...)) + panic("stop") + } + c, err := New(ctx, log, clientConn, opts.tlsMode, opts.tlsHostname, "") + if (err == nil) != (expClientErr == nil) || err != nil && !errors.As(err, reflect.New(reflect.ValueOf(expClientErr).Type()).Interface()) && !errors.Is(err, expClientErr) { + fail("new client: got err %v, expected %#v", err, expClientErr) + } + if err != nil { + result <- nil + return + } + err = c.Deliver(ctx, "postmaster@mox.example", "mjl@mox.example", int64(len(msg)), strings.NewReader(msg), opts.need8bitmime, opts.needsmtputf8) + if (err == nil) != (expDeliverErr == nil) || err != nil && !errors.Is(err, expDeliverErr) { + fail("first deliver: got err %v, expected %v", err, expDeliverErr) + } + if err == nil { + err = c.Reset() + if err != nil { + fail("reset: %v", err) + } + err = c.Deliver(ctx, "postmaster@mox.example", "mjl@mox.example", int64(len(msg)), strings.NewReader(msg), opts.need8bitmime, opts.needsmtputf8) + if (err == nil) != (expDeliverErr == nil) || err != nil && !errors.Is(err, expDeliverErr) { + fail("second deliver: got err %v, expected %v", err, expDeliverErr) + } + } + err = c.Close() + if err != nil { + fail("close client: %v", err) + } + result <- nil + }() + + var errs []error + for i := 0; i < 2; i++ { + err := <-result + if err != nil { + errs = append(errs, err) + } + } + if errs != nil { + t.Fatalf("%v", errs) + } + } + + msg := strings.ReplaceAll(`From: +To: +Subject: test + +test +`, "\n", "\r\n") + + allopts := options{ + pipelining: true, + ecodes: true, + maxSize: 512, + eightbitmime: true, + smtputf8: true, + starttls: true, + ehlo: true, + + tlsMode: TLSStrict, + tlsHostname: "mox.example", + need8bitmime: true, + needsmtputf8: true, + } + + test(msg, options{}, nil, nil, nil) + test(msg, allopts, nil, nil, nil) + test(msg, options{ehlo: true, eightbitmime: true}, nil, nil, nil) + test(msg, options{ehlo: true, eightbitmime: false, need8bitmime: true, nodeliver: true}, nil, Err8bitmimeUnsupported, nil) + test(msg, options{ehlo: true, smtputf8: false, needsmtputf8: true, nodeliver: true}, nil, ErrSMTPUTF8Unsupported, nil) + test(msg, options{ehlo: true, starttls: true, tlsMode: TLSStrict, tlsHostname: "mismatch.example", nodeliver: true}, ErrTLS, nil, &net.OpError{}) // Server TLS handshake is a net.OpError with "remote error" as text. + test(msg, options{ehlo: true, maxSize: len(msg) - 1, nodeliver: true}, nil, ErrSize, nil) + + // Set an expired certificate. For non-strict TLS, we should still accept it. + // ../rfc/7435:424 + cert = fakeCert(t, true) + mox.Conf.Static.TLS.CertPool = x509.NewCertPool() + mox.Conf.Static.TLS.CertPool.AddCert(cert.Leaf) + tlsConfig = tls.Config{ + Certificates: []tls.Certificate{cert}, + } + test(msg, options{ehlo: true, starttls: true}, nil, nil, nil) + + // Again with empty cert pool so it isn't trusted in any way. + mox.Conf.Static.TLS.CertPool = x509.NewCertPool() + tlsConfig = tls.Config{ + Certificates: []tls.Certificate{cert}, + } + test(msg, options{ehlo: true, starttls: true}, nil, nil, nil) +} + +func TestErrors(t *testing.T) { + ctx := context.Background() + log := mlog.New("") + + // Invalid greeting. + run(t, func(s xserver) { + s.writeline("bogus") // Invalid, should be "220 ". + }, func(conn net.Conn) { + _, err := New(ctx, log, conn, TLSOpportunistic, "", "") + var xerr Error + if err == nil || !errors.Is(err, ErrProtocol) || !errors.As(err, &xerr) || xerr.Permanent { + panic(fmt.Errorf("got %#v, expected ErrProtocol without Permanent", err)) + } + }) + + // Server just closes connection. + run(t, func(s xserver) { + s.conn.Close() + }, func(conn net.Conn) { + _, err := New(ctx, log, conn, TLSOpportunistic, "", "") + var xerr Error + if err == nil || !errors.Is(err, io.ErrUnexpectedEOF) || !errors.As(err, &xerr) || xerr.Permanent { + panic(fmt.Errorf("got %#v (%v), expected ErrUnexpectedEOF without Permanent", err, err)) + } + }) + + // Server does not want to speak SMTP. + run(t, func(s xserver) { + s.writeline("521 not accepting connections") + }, func(conn net.Conn) { + _, err := New(ctx, log, conn, TLSOpportunistic, "", "") + var xerr Error + if err == nil || !errors.Is(err, ErrStatus) || !errors.As(err, &xerr) || !xerr.Permanent { + panic(fmt.Errorf("got %#v, expected ErrStatus with Permanent", err)) + } + }) + + // Server has invalid code in greeting. + run(t, func(s xserver) { + s.writeline("2200 mox.example") // Invalid, too many digits. + }, func(conn net.Conn) { + _, err := New(ctx, log, conn, TLSOpportunistic, "", "") + var xerr Error + if err == nil || !errors.Is(err, ErrProtocol) || !errors.As(err, &xerr) || xerr.Permanent { + panic(fmt.Errorf("got %#v, expected ErrProtocol without Permanent", err)) + } + }) + + // Server sends multiline response, but with different codes. + run(t, func(s xserver) { + s.writeline("220 mox.example") + s.readline("EHLO") + s.writeline("250-mox.example") + s.writeline("500 different code") // Invalid. + }, func(conn net.Conn) { + _, err := New(ctx, log, conn, TLSOpportunistic, "", "") + var xerr Error + if err == nil || !errors.Is(err, ErrProtocol) || !errors.As(err, &xerr) || xerr.Permanent { + panic(fmt.Errorf("got %#v, expected ErrProtocol without Permanent", err)) + } + }) + + // Server permanently refuses MAIL FROM. + run(t, func(s xserver) { + s.writeline("220 mox.example") + s.readline("EHLO") + s.writeline("250-mox.example") + s.writeline("250 ENHANCEDSTATUSCODES") + s.readline("MAIL FROM:") + s.writeline("550 5.7.0 not allowed") + }, func(conn net.Conn) { + c, err := New(ctx, log, conn, TLSOpportunistic, "", "") + if err != nil { + panic(err) + } + msg := "" + err = c.Deliver(ctx, "postmaster@other.example", "mjl@mox.example", int64(len(msg)), strings.NewReader(msg), false, false) + var xerr Error + if err == nil || !errors.Is(err, ErrStatus) || !errors.As(err, &xerr) || !xerr.Permanent { + panic(fmt.Errorf("got %#v, expected ErrStatus with Permanent", err)) + } + }) + + // Server temporarily refuses MAIL FROM. + run(t, func(s xserver) { + s.writeline("220 mox.example") + s.readline("EHLO") + s.writeline("250 mox.example") + s.readline("MAIL FROM:") + s.writeline("451 bad sender") + }, func(conn net.Conn) { + c, err := New(ctx, log, conn, TLSOpportunistic, "", "") + if err != nil { + panic(err) + } + msg := "" + err = c.Deliver(ctx, "postmaster@other.example", "mjl@mox.example", int64(len(msg)), strings.NewReader(msg), false, false) + var xerr Error + if err == nil || !errors.Is(err, ErrStatus) || !errors.As(err, &xerr) || xerr.Permanent { + panic(fmt.Errorf("got %#v, expected ErrStatus with not-Permanent", err)) + } + }) + + // Server temporarily refuses RCPT TO. + run(t, func(s xserver) { + s.writeline("220 mox.example") + s.readline("EHLO") + s.writeline("250 mox.example") + s.readline("MAIL FROM:") + s.writeline("250 ok") + s.readline("RCPT TO:") + s.writeline("451") + }, func(conn net.Conn) { + c, err := New(ctx, log, conn, TLSOpportunistic, "", "") + if err != nil { + panic(err) + } + msg := "" + err = c.Deliver(ctx, "postmaster@other.example", "mjl@mox.example", int64(len(msg)), strings.NewReader(msg), false, false) + var xerr Error + if err == nil || !errors.Is(err, ErrStatus) || !errors.As(err, &xerr) || xerr.Permanent { + panic(fmt.Errorf("got %#v, expected ErrStatus with not-Permanent", err)) + } + }) + + // Server permanently refuses DATA. + run(t, func(s xserver) { + s.writeline("220 mox.example") + s.readline("EHLO") + s.writeline("250 mox.example") + s.readline("MAIL FROM:") + s.writeline("250 ok") + s.readline("RCPT TO:") + s.writeline("250 ok") + s.readline("DATA") + s.writeline("550 no!") + }, func(conn net.Conn) { + c, err := New(ctx, log, conn, TLSOpportunistic, "", "") + if err != nil { + panic(err) + } + msg := "" + err = c.Deliver(ctx, "postmaster@other.example", "mjl@mox.example", int64(len(msg)), strings.NewReader(msg), false, false) + var xerr Error + if err == nil || !errors.Is(err, ErrStatus) || !errors.As(err, &xerr) || !xerr.Permanent { + panic(fmt.Errorf("got %#v, expected ErrStatus with Permanent", err)) + } + }) + + // TLS is required, so we attempt it regardless of whether it is advertised. + run(t, func(s xserver) { + s.writeline("220 mox.example") + s.readline("EHLO") + s.writeline("250 mox.example") + s.readline("STARTTLS") + s.writeline("502 command not implemented") + }, func(conn net.Conn) { + _, err := New(ctx, log, conn, TLSStrict, "mox.example", "") + var xerr Error + if err == nil || !errors.Is(err, ErrTLS) || !errors.As(err, &xerr) || !xerr.Permanent { + panic(fmt.Errorf("got %#v, expected ErrTLS with Permanent", err)) + } + }) + + // If TLS is available, but we don't want to use it, client should skip it. + run(t, func(s xserver) { + s.writeline("220 mox.example") + s.readline("EHLO") + s.writeline("250-mox.example") + s.writeline("250 STARTTLS") + s.readline("MAIL FROM:") + s.writeline("451 enough") + }, func(conn net.Conn) { + c, err := New(ctx, log, conn, TLSSkip, "mox.example", "") + if err != nil { + panic(err) + } + msg := "" + err = c.Deliver(ctx, "postmaster@other.example", "mjl@mox.example", int64(len(msg)), strings.NewReader(msg), false, false) + var xerr Error + if err == nil || !errors.Is(err, ErrStatus) || !errors.As(err, &xerr) || xerr.Permanent { + panic(fmt.Errorf("got %#v, expected ErrStatus with non-Permanent", err)) + } + }) + + // A transaction is aborted. If we try another one, we should send a RSET. + run(t, func(s xserver) { + s.writeline("220 mox.example") + s.readline("EHLO") + s.writeline("250 mox.example") + s.readline("MAIL FROM:") + s.writeline("250 ok") + s.readline("RCPT TO:") + s.writeline("451 not now") + s.readline("RSET") + s.writeline("250 ok") + s.readline("MAIL FROM:") + s.writeline("250 ok") + s.readline("RCPT TO:") + s.writeline("250 ok") + s.readline("DATA") + s.writeline("550 not now") + }, func(conn net.Conn) { + c, err := New(ctx, log, conn, TLSOpportunistic, "", "") + if err != nil { + panic(err) + } + + msg := "" + err = c.Deliver(ctx, "postmaster@other.example", "mjl@mox.example", int64(len(msg)), strings.NewReader(msg), false, false) + var xerr Error + if err == nil || !errors.Is(err, ErrStatus) || !errors.As(err, &xerr) || xerr.Permanent { + panic(fmt.Errorf("got %#v, expected ErrStatus with non-Permanent", err)) + } + + // Another delivery. + err = c.Deliver(ctx, "postmaster@other.example", "mjl@mox.example", int64(len(msg)), strings.NewReader(msg), false, false) + if err == nil || !errors.Is(err, ErrStatus) || !errors.As(err, &xerr) || !xerr.Permanent { + panic(fmt.Errorf("got %#v, expected ErrStatus with Permanent", err)) + } + }) +} + +type xserver struct { + conn net.Conn + br *bufio.Reader +} + +func (s xserver) check(err error, msg string) { + if err != nil { + panic(fmt.Errorf("%s: %w", msg, err)) + } +} + +func (s xserver) errorf(format string, args ...any) { + panic(fmt.Errorf(format, args...)) +} + +func (s xserver) writeline(line string) { + _, err := fmt.Fprintf(s.conn, "%s\r\n", line) + s.check(err, "write") +} + +func (s xserver) readline(prefix string) { + line, err := s.br.ReadString('\n') + s.check(err, "reading command") + if !strings.HasPrefix(strings.ToLower(line), strings.ToLower(prefix)) { + s.errorf("expected command %q, got: %s", prefix, line) + } +} + +func run(t *testing.T, server func(s xserver), client func(conn net.Conn)) { + t.Helper() + + result := make(chan error, 2) + clientConn, serverConn := net.Pipe() + go func() { + defer func() { + serverConn.Close() + x := recover() + if x != nil { + result <- fmt.Errorf("server: %v", x) + } else { + result <- nil + } + }() + server(xserver{serverConn, bufio.NewReader(serverConn)}) + }() + go func() { + defer func() { + clientConn.Close() + x := recover() + if x != nil { + result <- fmt.Errorf("client: %v", x) + } else { + result <- nil + } + }() + client(clientConn) + }() + var errs []error + for i := 0; i < 2; i++ { + err := <-result + if err != nil { + errs = append(errs, err) + } + } + if errs != nil { + t.Fatalf("errors: %v", errs) + } +} + +// Just a cert that appears valid. SMTP client will not verify anything about it +// (that is opportunistic TLS for you, "better some than none"). Let's enjoy this +// one moment where it makes life easier. +func fakeCert(t *testing.T, expired bool) tls.Certificate { + notAfter := time.Now() + if expired { + notAfter = notAfter.Add(-time.Hour) + } else { + notAfter = notAfter.Add(time.Hour) + } + + privKey := ed25519.NewKeyFromSeed(make([]byte, ed25519.SeedSize)) // Fake key, don't use this for real! + template := &x509.Certificate{ + SerialNumber: big.NewInt(1), // Required field... + DNSNames: []string{"mox.example"}, + NotBefore: time.Now().Add(-time.Hour), + NotAfter: notAfter, + } + localCertBuf, err := x509.CreateCertificate(cryptorand.Reader, template, template, privKey.Public(), privKey) + if err != nil { + t.Fatalf("making certificate: %s", err) + } + cert, err := x509.ParseCertificate(localCertBuf) + if err != nil { + t.Fatalf("parsing generated certificate: %s", err) + } + c := tls.Certificate{ + Certificate: [][]byte{localCertBuf}, + PrivateKey: privKey, + Leaf: cert, + } + return c +} diff --git a/smtpserver/alignment.go b/smtpserver/alignment.go new file mode 100644 index 0000000..cbb6b1d --- /dev/null +++ b/smtpserver/alignment.go @@ -0,0 +1,42 @@ +package smtpserver + +import ( + "context" + + "github.com/mjl-/mox/dkim" + "github.com/mjl-/mox/dns" + "github.com/mjl-/mox/publicsuffix" + "github.com/mjl-/mox/spf" + "github.com/mjl-/mox/store" +) + +// Alignment compares the msgFromDomain with the dkim and spf results, and returns +// a validation, one of: Strict, Relaxed, None. +func alignment(ctx context.Context, msgFromDomain dns.Domain, dkimResults []dkim.Result, spfStatus spf.Status, spfIdentity *dns.Domain) store.Validation { + var strict, relaxed bool + msgFromOrgDomain := publicsuffix.Lookup(ctx, msgFromDomain) + + // todo: should take temperror and permerror into account. + for _, dr := range dkimResults { + if dr.Status != dkim.StatusPass || dr.Sig == nil { + continue + } + if dr.Sig.Domain == msgFromDomain { + strict = true + break + } else { + relaxed = relaxed || msgFromOrgDomain == publicsuffix.Lookup(ctx, dr.Sig.Domain) + } + } + if !strict && spfStatus == spf.StatusPass { + strict = msgFromDomain == *spfIdentity + relaxed = relaxed || msgFromOrgDomain == publicsuffix.Lookup(ctx, *spfIdentity) + } + if strict { + return store.ValidationStrict + } + if relaxed { + return store.ValidationRelaxed + } + return store.ValidationNone +} diff --git a/smtpserver/analyze.go b/smtpserver/analyze.go new file mode 100644 index 0000000..34ee5b6 --- /dev/null +++ b/smtpserver/analyze.go @@ -0,0 +1,327 @@ +package smtpserver + +import ( + "context" + "net" + "os" + "time" + + "github.com/mjl-/bstore" + + "github.com/mjl-/mox/dkim" + "github.com/mjl-/mox/dmarc" + "github.com/mjl-/mox/dmarcrpt" + "github.com/mjl-/mox/dns" + "github.com/mjl-/mox/dnsbl" + "github.com/mjl-/mox/iprev" + "github.com/mjl-/mox/mlog" + "github.com/mjl-/mox/mox-" + "github.com/mjl-/mox/smtp" + "github.com/mjl-/mox/store" + "github.com/mjl-/mox/subjectpass" + "github.com/mjl-/mox/tlsrpt" +) + +type delivery struct { + m *store.Message + dataFile *os.File + rcptAcc rcptAccount + acc *store.Account + msgFrom smtp.Address + dnsBLs []dns.Domain + dmarcUse bool + dmarcResult dmarc.Result + dkimResults []dkim.Result + iprevStatus iprev.Status +} + +type analysis struct { + accept bool + code int + secode string + userError bool + errmsg string + err error // For our own logging, not sent to remote. + dmarcReport *dmarcrpt.Feedback // Validated dmarc aggregate report, not yet stored. + tlsReport *tlsrpt.Report // Validated TLS report, not yet stored. + reason string // If non-empty, reason for this decision. Can be one of reputationMethod and a few other tokens. +} + +const ( + reasonListAllow = "list-allow" + reasonDMARCPolicy = "dmarc-policy" + reasonReputationError = "reputation-error" + reasonReporting = "reporting" + reasonSPFPolicy = "spf-policy" + reasonJunkClassifyError = "junk-classify-error" + reasonJunkFilterError = "junk-filter-error" + reasonGiveSubjectpass = "give-subjectpass" + reasonNoBadSignals = "no-bad-signals" + reasonJunkContent = "junk-content" + reasonJunkContentStrict = "junk-content-strict" + reasonDNSBlocklisted = "dns-blocklisted" + reasonSubjectpass = "subjectpass" + reasonSubjectpassError = "subjectpass-error" + reasonIPrev = "iprev" // No or mil junk reputation signals, and bad iprev. +) + +func analyze(ctx context.Context, log *mlog.Log, resolver dns.Resolver, d delivery) analysis { + reject := func(code int, secode string, errmsg string, err error, reason string) analysis { + return analysis{false, code, secode, err == nil, errmsg, err, nil, nil, reason} + } + + // If destination mailbox has a mailing list domain (for SPF/DKIM) configured, + // check it for a pass. + // todo: should use this evaluation for final delivery as well + rs := store.MessageRuleset(log, d.rcptAcc.destination, d.m, d.m.MsgPrefix, d.dataFile) + if rs != nil && !rs.ListAllowDNSDomain.IsZero() { + ld := rs.ListAllowDNSDomain + // todo: on temporary failures, reject temporarily? + if d.m.MailFromValidated && ld.Name() == d.m.MailFromDomain { + return analysis{accept: true, reason: reasonListAllow} + } + for _, r := range d.dkimResults { + if r.Status == dkim.StatusPass && r.Sig.Domain == ld { + return analysis{accept: true, reason: reasonListAllow} + } + } + } + + if d.dmarcUse && d.dmarcResult.Reject { + return reject(smtp.C550MailboxUnavail, smtp.SePol7MultiAuthFails26, "rejecting per dmarc policy", nil, reasonDMARCPolicy) + } + // todo: should we also reject messages that have a dmarc pass but an spf record "v=spf1 -all"? suggested by m3aawg best practices. + + // If destination is the DMARC reporting mailbox, do additional checks and keep + // track of the report. We'll check reputation, defaulting to accept. + var dmarcReport *dmarcrpt.Feedback + if d.rcptAcc.destination.DMARCReports { + // Messages with DMARC aggregate reports must have a dmarc pass. ../rfc/7489:1866 + if d.dmarcResult.Status != dmarc.StatusPass { + log.Info("received DMARC report without DMARC pass, not processing as DMARC report") + } else if report, err := dmarcrpt.ParseMessageReport(store.FileMsgReader(d.m.MsgPrefix, d.dataFile)); err != nil { + log.Infox("parsing dmarc report", err) + } else if d, err := dns.ParseDomain(report.PolicyPublished.Domain); err != nil { + log.Infox("parsing domain in dmarc report", err) + } else if _, ok := mox.Conf.Domain(d); !ok { + log.Info("dmarc report for domain not configured, ignoring", mlog.Field("domain", d)) + } else if report.ReportMetadata.DateRange.End > time.Now().Unix()+60 { + log.Info("dmarc report with end date in the future, ignoring", mlog.Field("domain", d), mlog.Field("end", time.Unix(report.ReportMetadata.DateRange.End, 0))) + } else { + dmarcReport = report + } + } + + // Similar to DMARC reporting, we check for the required DKIM. We'll check + // reputation, defaulting to accept. + var tlsReport *tlsrpt.Report + if d.rcptAcc.destination.TLSReports { + // Valid DKIM signature for domain must be present. We take "valid" to assume + // "passing", not "syntactically valid". We also check for "tlsrpt" as service. + // This check is optional, but if anyone goes through the trouble to explicitly + // list allowed services, they would be surprised to see them ignored. + // ../rfc/8460:320 + ok := false + for _, r := range d.dkimResults { + if r.Status == dkim.StatusPass && r.Sig.Domain == d.msgFrom.Domain && r.Sig.Length < 0 && r.Record.ServiceAllowed("tlsrpt") { + ok = true + break + } + } + + if !ok { + log.Info("received mail to TLSRPT without acceptable DKIM signature, not processing as TLSRPT") + } else if report, err := tlsrpt.ParseMessage(store.FileMsgReader(d.m.MsgPrefix, d.dataFile)); err != nil { + log.Infox("parsing TLSRPT report", err) + } else { + var known bool + for _, p := range report.Policies { + log.Info("tlsrpt policy domain", mlog.Field("domain", p.Policy.Domain)) + if d, err := dns.ParseDomain(p.Policy.Domain); err != nil { + log.Infox("parsing domain in TLSRPT report", err) + } else if _, ok := mox.Conf.Domain(d); ok { + known = true + break + } + } + if !known { + log.Info("TLSRPT report without one of configured domains, ignoring") + } else { + tlsReport = report + } + } + } + + // Determine if message is acceptable based on DMARC domain, DKIM identities, or + // host-based reputation. + var isjunk *bool + var conclusive bool + var method reputationMethod + var reason string + var err error + d.acc.WithRLock(func() { + err = d.acc.DB.Read(func(tx *bstore.Tx) error { + // Set message MailboxID to which mail will be delivered. Reputation is + // per-mailbox. If referenced mailbox is not found (e.g. does not yet exist), we + // can still use determine a reputation because we also base it on outgoing + // messages and those account-global. + mailbox := d.rcptAcc.destination.Mailbox + if mailbox == "" { + mailbox = "Inbox" + } + if rs != nil { + mailbox = rs.Mailbox + } + mb := d.acc.MailboxFindX(tx, mailbox) + if mb != nil { + d.m.MailboxID = mb.ID + } else { + log.Debug("mailbox not found in database", mlog.Field("mailbox", mailbox)) + } + + var err error + isjunk, conclusive, method, err = reputation(tx, log, d.m) + reason = string(method) + return err + }) + }) + if err != nil { + log.Infox("determining reputation", err, mlog.Field("message", d.m)) + return reject(smtp.C451LocalErr, smtp.SeSys3Other0, "error processing", err, reasonReputationError) + } + log.Info("reputation analyzed", mlog.Field("conclusive", conclusive), mlog.Field("isjunk", isjunk), mlog.Field("method", string(method))) + if conclusive { + if !*isjunk { + return analysis{accept: true, dmarcReport: dmarcReport, tlsReport: tlsReport, reason: reason} + } + return reject(smtp.C451LocalErr, smtp.SeSys3Other0, "error processing", err, string(method)) + } else if dmarcReport != nil || tlsReport != nil { + log.Info("accepting dmarc reporting or tlsrpt message without reputation") + return analysis{accept: true, dmarcReport: dmarcReport, tlsReport: tlsReport, reason: reasonReporting} + } + // If there was no previous message from sender or its domain, and we have an SPF + // (soft)fail, reject the message. + switch method { + case methodDKIMSPF, methodIP1, methodIP2, methodIP3, methodNone: + switch d.m.MailFromValidation { + case store.ValidationFail, store.ValidationSoftfail: + return reject(smtp.C451LocalErr, smtp.SeSys3Other0, "error processing", nil, reasonSPFPolicy) + } + } + + // Senders without reputation and without iprev pass, are likely spam. + var suspiciousIPrevFail bool + switch method { + case methodDKIMSPF, methodIP1, methodIP2, methodIP3, methodNone: + suspiciousIPrevFail = d.iprevStatus != iprev.StatusPass + } + + // With already a mild junk signal, an iprev fail on top is enough to reject. + if suspiciousIPrevFail && isjunk != nil && *isjunk { + return reject(smtp.C451LocalErr, smtp.SeSys3Other0, "error processing", nil, reasonIPrev) + } + + var subjectpassKey string + conf, _ := d.acc.Conf() + if conf.SubjectPass.Period > 0 { + subjectpassKey, err = d.acc.Subjectpass(d.rcptAcc.canonicalAddress) + if err != nil { + log.Errorx("get key for verifying subject token", err) + return reject(smtp.C451LocalErr, smtp.SeSys3Other0, "error processing", err, reasonSubjectpassError) + } + err = subjectpass.Verify(d.dataFile, []byte(subjectpassKey), conf.SubjectPass.Period) + pass := err == nil + log.Infox("pass by subject token", err, mlog.Field("pass", pass)) + if pass { + return analysis{accept: true, reason: reasonSubjectpass} + } + } + + reason = reasonNoBadSignals + accept := true + var junkSubjectpass bool + f, jf, err := d.acc.OpenJunkFilter(log) + if err == nil { + defer func() { + if err := f.Close(); err != nil { + log.Errorx("closing junkfilter", err) + } + }() + contentProb, _, _, _, err := f.ClassifyMessageReader(store.FileMsgReader(d.m.MsgPrefix, d.dataFile), d.m.Size) + if err != nil { + log.Errorx("testing for spam", err) + return reject(smtp.C451LocalErr, smtp.SeSys3Other0, "error processing", err, reasonJunkClassifyError) + } + // todo: if isjunk is not nil (i.e. there was inconclusive reputation), use it in the probability calculation. give reputation a score of 0.25 or .75 perhaps? + // todo: if there aren't enough historic messages, we should just let messages in. + // todo: we could require nham and nspam to be above a certain number when there were plenty of words in the message, and in the database. can indicate a spammer is misspelling words. however, it can also mean a message in a different language/script... + + // If we don't accept, we may still respond with a "subjectpass" hint below. + // We add some jitter to the threshold we use. So we don't act as too easy an + // oracle for words that are a strong indicator of haminess. + // todo: we should rate-limit uses of the junkfilter. + jitter := (jitterRand.Float64() - 0.5) / 10 + threshold := jf.Threshold + jitter + + // With an iprev fail, we set a higher bar for content. + reason = reasonJunkContent + if suspiciousIPrevFail && threshold > 0.25 { + threshold = 0.25 + log.Info("setting junk threshold due to iprev fail", mlog.Field("threshold", 0.25)) + reason = reasonJunkContentStrict + } + accept = contentProb <= threshold + junkSubjectpass = contentProb < threshold-0.2 + log.Info("content analyzed", mlog.Field("accept", accept), mlog.Field("contentProb", contentProb), mlog.Field("subjectpass", junkSubjectpass)) + } else if err != store.ErrNoJunkFilter { + log.Errorx("open junkfilter", err) + return reject(smtp.C451LocalErr, smtp.SeSys3Other0, "error processing", err, reasonJunkFilterError) + } + + // If content looks good, we'll still look at DNS block lists for a reason to + // reject. We normally won't get here if we've communicated with this sender + // before. + var dnsblocklisted bool + if accept { + blocked := func(zone dns.Domain) bool { + dnsblctx, dnsblcancel := context.WithTimeout(ctx, 30*time.Second) + defer dnsblcancel() + if !checkDNSBLHealth(dnsblctx, resolver, zone) { + log.Info("dnsbl not healthy, skipping", mlog.Field("zone", zone)) + return false + } + + status, expl, err := dnsbl.Lookup(dnsblctx, resolver, zone, net.ParseIP(d.m.RemoteIP)) + dnsblcancel() + if status == dnsbl.StatusFail { + log.Info("rejecting due to listing in dnsbl", mlog.Field("zone", zone), mlog.Field("explanation", expl)) + return true + } else if err != nil { + log.Infox("dnsbl lookup", err, mlog.Field("zone", zone), mlog.Field("status", status)) + } + return false + } + + // Note: We don't check in parallel, we are in no hurry to accept possible spam. + for _, zone := range d.dnsBLs { + if blocked(zone) { + accept = false + dnsblocklisted = true + reason = reasonDNSBlocklisted + break + } + } + } + + if accept { + return analysis{accept: true, reason: reasonNoBadSignals} + } + + if subjectpassKey != "" && d.dmarcResult.Status == dmarc.StatusPass && method == methodNone && (dnsblocklisted || junkSubjectpass) { + log.Info("permanent reject with subjectpass hint of moderately spammy email without reputation") + pass := subjectpass.Generate(d.msgFrom, []byte(subjectpassKey), time.Now()) + return reject(smtp.C550MailboxUnavail, smtp.SePol7DeliveryUnauth1, subjectpass.Explanation+pass, nil, reasonGiveSubjectpass) + } + + return reject(smtp.C451LocalErr, smtp.SeSys3Other0, "error processing", nil, reason) +} diff --git a/smtpserver/authresults.go b/smtpserver/authresults.go new file mode 100644 index 0000000..5b2d13b --- /dev/null +++ b/smtpserver/authresults.go @@ -0,0 +1,116 @@ +package smtpserver + +import ( + "fmt" + + "github.com/mjl-/mox/message" +) + +// ../rfc/8601:577 + +// Authentication-Results header, see RFC 8601. +type AuthResults struct { + Hostname string + Comment string // If not empty, header comment without "()", added after Hostname. + Methods []AuthMethod +} + +// ../rfc/8601:598 + +// AuthMethod is a result for one authentication method. +// +// Example encoding in the header: "spf=pass smtp.mailfrom=example.net". +type AuthMethod struct { + // E.g. "dkim", "spf", "iprev", "auth". + Method string + Result string // Each method has a set of known values, e.g. "pass", "temperror", etc. + Comment string // Optional, message header comment. + Reason string // Optional. + Props []AuthProp +} + +// ../rfc/8601:606 + +// AuthProp describes properties for an authentication method. +// Each method has a set of known properties. +// Encoded in the header as "type.property=value", e.g. "smtp.mailfrom=example.net" +// for spf. +type AuthProp struct { + // Valid values maintained at https://www.iana.org/assignments/email-auth/email-auth.xhtml + Type string + Property string + Value string + // Whether value is address-like (localpart@domain, or domain). Or another value, + // which is subject to escaping. + IsAddrLike bool + Comment string // If not empty, header comment withtout "()", added after Value. +} + +// todo future: we could store fields as dns.Domain, and when we encode as non-ascii also add the ascii version as a comment. + +// Header returns an Authentication-Results header, possibly spanning multiple +// lines, always ending in crlf. +func (h AuthResults) Header() string { + // Escaping of values: ../rfc/8601:684 ../rfc/2045:661 + + optComment := func(s string) string { + if s != "" { + return " (" + s + ")" + } + return s + } + + w := &message.HeaderWriter{} + w.Add("", "Authentication-Results:"+optComment(h.Comment)+" "+value(h.Hostname)+";") + for i, m := range h.Methods { + tokens := []string{} + addf := func(format string, args ...any) { + s := fmt.Sprintf(format, args...) + tokens = append(tokens, s) + } + addf("%s=%s", m.Method, m.Result) + if m.Comment != "" && (m.Reason != "" || len(m.Props) > 0) { + addf("(%s)", m.Comment) + } + if m.Reason != "" { + addf("reason=%s", value(m.Reason)) + } + for _, p := range m.Props { + v := p.Value + if !p.IsAddrLike { + v = value(v) + } + addf("%s.%s=%s%s", p.Type, p.Property, v, optComment(p.Comment)) + } + for j, t := range tokens { + if j == len(tokens)-1 && i < len(h.Methods)-1 { + t += ";" + } + w.Add(" ", t) + } + } + return w.String() +} + +func value(s string) string { + quote := s == "" + for _, c := range s { + // utf-8 does not have to be quoted. ../rfc/6532:242 + if c == '"' || c == '\\' || c <= ' ' || c == 0x7f { + quote = true + break + } + } + if !quote { + return s + } + r := `"` + for _, c := range s { + if c == '"' || c == '\\' { + r += "\\" + } + r += string(c) + } + r += `"` + return r +} diff --git a/smtpserver/authresults_test.go b/smtpserver/authresults_test.go new file mode 100644 index 0000000..63a1de0 --- /dev/null +++ b/smtpserver/authresults_test.go @@ -0,0 +1,26 @@ +package smtpserver + +import ( + "testing" + + "github.com/mjl-/mox/dns" +) + +func TestAuthResults(t *testing.T) { + dom, err := dns.ParseDomain("møx.example") + if err != nil { + t.Fatalf("parsing domain: %v", err) + } + authRes := AuthResults{ + Hostname: dom.XName(true), + Comment: dom.ASCIIExtra(true), + Methods: []AuthMethod{ + {"dkim", "pass", "", "", []AuthProp{{"header", "d", dom.XName(true), true, dom.ASCIIExtra(true)}}}, + }, + } + s := authRes.Header() + const exp = "Authentication-Results: (xn--mx-lka.example) møx.example; dkim=pass\r\n\theader.d=møx.example (xn--mx-lka.example)\r\n" + if s != exp { + t.Fatalf("got %q, expected %q", s, exp) + } +} diff --git a/smtpserver/dnsbl.go b/smtpserver/dnsbl.go new file mode 100644 index 0000000..0f732b6 --- /dev/null +++ b/smtpserver/dnsbl.go @@ -0,0 +1,36 @@ +package smtpserver + +import ( + "context" + "errors" + "sync" + "time" + + "github.com/mjl-/mox/dns" + "github.com/mjl-/mox/dnsbl" +) + +var dnsblHealth = struct { + sync.Mutex + zones map[dns.Domain]dnsblStatus +}{ + zones: map[dns.Domain]dnsblStatus{}, +} + +type dnsblStatus struct { + last time.Time + err error // nil, dnsbl.ErrDNS or other +} + +// checkDNSBLHealth checks healthiness of DNSBL "zone", keeping the result cached for 4 hours. +func checkDNSBLHealth(ctx context.Context, resolver dns.Resolver, zone dns.Domain) (rok bool) { + dnsblHealth.Lock() + defer dnsblHealth.Unlock() + status, ok := dnsblHealth.zones[zone] + if !ok || time.Since(status.last) > 4*time.Hour { + status.err = dnsbl.CheckHealth(ctx, resolver, zone) + status.last = time.Now() + dnsblHealth.zones[zone] = status + } + return status.err == nil || errors.Is(status.err, dnsbl.ErrDNS) +} diff --git a/smtpserver/dsn.go b/smtpserver/dsn.go new file mode 100644 index 0000000..b8cdc60 --- /dev/null +++ b/smtpserver/dsn.go @@ -0,0 +1,56 @@ +package smtpserver + +import ( + "fmt" + "os" + + "github.com/mjl-/mox/dsn" + "github.com/mjl-/mox/queue" + "github.com/mjl-/mox/smtp" + "github.com/mjl-/mox/store" +) + +// compose dsn message and add it to the queue for delivery to rcptTo. +func queueDSN(c *conn, rcptTo smtp.Path, m dsn.Message) error { + buf, err := m.Compose(c.log, false) + if err != nil { + return err + } + var bufUTF8 []byte + if c.smtputf8 { + bufUTF8, err = m.Compose(c.log, true) + if err != nil { + c.log.Errorx("composing dsn with utf-8 for incoming delivery for unknown user, continuing with ascii-only dsn", err) + } + } + + f, err := store.CreateMessageTemp("smtp-dsn") + if err != nil { + return fmt.Errorf("creating temp file: %w", err) + } + defer func() { + if f != nil { + if err := os.Remove(f.Name()); err != nil { + c.log.Errorx("removing temporary dsn message file", err) + } + f.Close() + } + }() + if _, err := f.Write([]byte(buf)); err != nil { + return fmt.Errorf("writing dsn file: %w", err) + } + + // Queue DSN with null reverse path so failures to deliver will eventually drop the + // message instead of causing delivery loops. + // ../rfc/3464:433 + const has8bit = false + const smtputf8 = false + if err := queue.Add(c.log, "", smtp.Path{}, rcptTo, has8bit, smtputf8, int64(len(buf)), nil, f, bufUTF8, true); err != nil { + return err + } + if err := f.Close(); err != nil { + c.log.Errorx("closing dsn file", err) + } + f = nil + return nil +} diff --git a/smtpserver/error.go b/smtpserver/error.go new file mode 100644 index 0000000..1bc43d7 --- /dev/null +++ b/smtpserver/error.go @@ -0,0 +1,36 @@ +package smtpserver + +import ( + "fmt" + + "github.com/mjl-/mox/smtp" +) + +func xcheckf(err error, format string, args ...any) { + if err != nil { + panic(smtpError{smtp.C451LocalErr, smtp.SeSys3Other0, fmt.Errorf("%s: %w", fmt.Sprintf(format, args...), err), true, false}) + } +} + +type smtpError struct { + code int + secode string + err error + printStack bool + userError bool // If this is an error on the user side, which causes logging at a lower level. +} + +func (e smtpError) Error() string { return e.err.Error() } +func (e smtpError) Unwrap() error { return e.err } + +func xsmtpErrorf(code int, secode string, userError bool, format string, args ...any) { + panic(smtpError{code, secode, fmt.Errorf(format, args...), false, userError}) +} + +func xsmtpServerErrorf(codes codes, format string, args ...any) { + xsmtpErrorf(codes.code, codes.secode, false, format, args...) +} + +func xsmtpUserErrorf(code int, secode string, format string, args ...any) { + xsmtpErrorf(code, secode, true, format, args...) +} diff --git a/smtpserver/fuzz_test.go b/smtpserver/fuzz_test.go new file mode 100644 index 0000000..1d87596 --- /dev/null +++ b/smtpserver/fuzz_test.go @@ -0,0 +1,113 @@ +package smtpserver + +import ( + "context" + "fmt" + "net" + "os" + "testing" + "time" + + "github.com/mjl-/mox/dns" + "github.com/mjl-/mox/mox-" + "github.com/mjl-/mox/queue" + "github.com/mjl-/mox/store" +) + +// Fuzz the server. For each fuzz string, we set up servers in various connection states, and write the string as command. +func FuzzServer(f *testing.F) { + f.Add("HELO remote") + f.Add("EHLO remote") + f.Add("AUTH PLAIN") + f.Add("MAIL FROM:") + f.Add("RCPT TO:") + f.Add("DATA") + f.Add(".") + f.Add("RSET") + f.Add("VRFY x") + f.Add("EXPN x") + f.Add("HELP") + f.Add("NOOP") + f.Add("QUIT") + + mox.Context = context.Background() + mox.ConfigStaticPath = "../testdata/smtp/mox.conf" + mox.MustLoadConfig() + dataDir := mox.ConfigDirPath(mox.Conf.Static.DataDir) + os.RemoveAll(dataDir) + acc, err := store.OpenAccount("mjl") + if err != nil { + f.Fatalf("open account: %v", err) + } + defer acc.Close() + err = acc.SetPassword("testtest") + if err != nil { + f.Fatalf("set password: %v", err) + } + done := store.Switchboard() + defer close(done) + err = queue.Init() + if err != nil { + f.Fatalf("queue init: %v", err) + } + defer queue.Shutdown() + + comm := store.RegisterComm(acc) + defer comm.Unregister() + + var cid int64 = 1 + + var fl *os.File + if false { + fl, err = os.Create("fuzz.log") + if err != nil { + f.Fatalf("fuzz log") + } + defer fl.Close() + } + flog := func(err error, msg string) { + if fl != nil && err != nil { + fmt.Fprintf(fl, "%s: %v\n", msg, err) + } + } + + f.Fuzz(func(t *testing.T, s string) { + run := func(cmds []string) { + serverConn, clientConn := net.Pipe() + defer serverConn.Close() + defer clientConn.Close() + + go func() { + err := clientConn.SetDeadline(time.Now().Add(time.Second)) + flog(err, "set client deadline") + _, err = clientConn.Read(make([]byte, 1024)) + flog(err, "read ehlo") + for _, cmd := range cmds { + _, err = clientConn.Write([]byte(cmd + "\r\n")) + flog(err, "write command") + _, err = clientConn.Read(make([]byte, 1024)) + flog(err, "read response") + } + _, err = clientConn.Write([]byte(s + "\r\n")) + flog(err, "write test command") + _, err = clientConn.Read(make([]byte, 1024)) + flog(err, "read test response") + clientConn.Close() + serverConn.Close() + }() + + resolver := dns.MockResolver{} + const submission = false + err := serverConn.SetDeadline(time.Now().Add(time.Second)) + flog(err, "set server deadline") + serve("test", cid, dns.Domain{ASCII: "mox.example"}, nil, serverConn, resolver, submission, false, 100<<10, false, false, nil) + cid++ + } + + run([]string{}) + run([]string{"EHLO remote"}) + run([]string{"EHLO remote", "MAIL FROM:"}) + run([]string{"EHLO remote", "MAIL FROM:", "RCPT TO:"}) + // todo: submission with login + }) +} diff --git a/smtpserver/limitwriter.go b/smtpserver/limitwriter.go new file mode 100644 index 0000000..d309ba4 --- /dev/null +++ b/smtpserver/limitwriter.go @@ -0,0 +1,25 @@ +package smtpserver + +import ( + "errors" + "io" +) + +var errMessageTooLarge = errors.New("maximum message size exceeded") + +type limitWriter struct { + maxSize int64 + w io.Writer + written int64 +} + +func (w *limitWriter) Write(buf []byte) (int, error) { + if w.written+int64(len(buf)) > w.maxSize { + return 0, errMessageTooLarge + } + n, err := w.w.Write(buf) + if n > 0 { + w.written += int64(n) + } + return n, err +} diff --git a/smtpserver/mx.go b/smtpserver/mx.go new file mode 100644 index 0000000..d41a168 --- /dev/null +++ b/smtpserver/mx.go @@ -0,0 +1,37 @@ +package smtpserver + +import ( + "context" + "net" + + "github.com/mjl-/mox/dns" +) + +// checks if domain can accept email. +// i.e. if it has no null mx record, regular mx records or resolve to an address. +func checkMXRecords(ctx context.Context, resolver dns.Resolver, d dns.Domain) (bool, error) { + // Note: LookupMX can return an error and still return records. + mx, err := resolver.LookupMX(ctx, d.ASCII+".") + if err == nil && len(mx) == 1 && mx[0].Host == "." { + // Null MX record, explicit signal that remote does not accept email. + return false, nil + } + // Treat all errors that are not "no mx record" as temporary. E.g. timeout, malformed record, remote server error. + if err != nil && !dns.IsNotFound(err) { + return false, err + } + if len(mx) == 0 { + mx = []*net.MX{{Host: d.ASCII + "."}} + } + var lastErr error + for _, x := range mx { + ips, err := resolver.LookupIPAddr(ctx, x.Host) + if len(ips) > 0 { + return true, nil + } + if err != nil && !dns.IsNotFound(err) { + lastErr = err + } + } + return false, lastErr +} diff --git a/smtpserver/parse.go b/smtpserver/parse.go new file mode 100644 index 0000000..81f1ee2 --- /dev/null +++ b/smtpserver/parse.go @@ -0,0 +1,447 @@ +package smtpserver + +import ( + "fmt" + "net" + "strconv" + "strings" + + "github.com/mjl-/mox/dns" + "github.com/mjl-/mox/smtp" +) + +// Parser holds the original string and string with ascii a-z upper-cased for easy +// case-insensitive parsing. +type parser struct { + orig string + upper string + o int // Offset into orig/upper. + smtputf8 bool // Whether SMTPUTF8 extension is enabled, making IDNA domains and utf8 localparts valid. + conn *conn + utf8LocalpartCode int // If non-zero, error for utf-8 localpart when smtputf8 not enabled. +} + +// toUpper upper cases bytes that are a-z. strings.ToUpper does too much. and +// would replace invalid bytes with unicode replacement characters, which would +// break our requirement that offsets into the original and upper case strings +// point to the same character. +func toUpper(s string) string { + r := []byte(s) + for i, c := range r { + if c >= 'a' && c <= 'z' { + r[i] = c - 0x20 + } + } + return string(r) +} + +func newParser(s string, smtputf8 bool, conn *conn) *parser { + return &parser{orig: s, upper: toUpper(s), smtputf8: smtputf8, conn: conn} +} + +func (p *parser) xerrorf(format string, args ...any) { + // ../rfc/5321:2377 + xsmtpUserErrorf(smtp.C501BadParamSyntax, smtp.SeProto5Syntax2, "%s (remaining: %q)", fmt.Sprintf(format, args...), p.orig[p.o:]) +} + +func (p *parser) xutf8localparterrorf() { + code := p.utf8LocalpartCode + if code == 0 { + code = smtp.C550MailboxUnavail + } + // ../rfc/6531:466 + xsmtpUserErrorf(code, smtp.SeMsg6NonASCIIAddrNotPermitted7, "non-ascii address not permitted without smtputf8") +} + +func (p *parser) empty() bool { + return p.o == len(p.orig) +} + +// note: use xend() for check for end of line with remaining white space, to be used by commands. +func (p *parser) xempty() { + if p.o != len(p.orig) { + p.xerrorf("expected end of line") + } +} + +// check we are at the end of a command. +func (p *parser) xend() { + // For submission, we are strict. + if p.conn.submission { + p.xempty() + } + // Otherwise we allow trailing white space. ../rfc/5321:1758 + rem := p.remainder() + for _, c := range rem { + if c != ' ' && c != '\t' { + p.xerrorf("trailing data, not white space: %q", rem) + } + } +} + +func (p *parser) hasPrefix(s string) bool { + return strings.HasPrefix(p.upper[p.o:], s) +} + +func (p *parser) take(s string) bool { + if p.hasPrefix(s) { + p.o += len(s) + return true + } + return false +} + +func (p *parser) xtake(s string) { + if !p.take(s) { + p.xerrorf("expected %q", s) + } +} + +func (p *parser) space() bool { + return p.take(" ") +} + +func (p *parser) xspace() { + p.xtake(" ") +} + +func (p *parser) xtaken(n int) string { + r := p.orig[p.o : p.o+n] + p.o += n + return r +} + +func (p *parser) remainder() string { + r := p.orig[p.o:] + p.o = len(p.orig) + return r +} + +func (p *parser) peekchar() rune { + for _, c := range p.upper[p.o:] { + return c + } + return -1 +} + +func (p *parser) takefn1(what string, fn func(c rune, i int) bool) string { + if p.empty() { + p.xerrorf("need at least one char for %s", what) + } + for i, c := range p.upper[p.o:] { + if !fn(c, i) { + if i == 0 { + p.xerrorf("expected at least one char for %s", what) + } + return p.xtaken(i) + } + } + return p.remainder() +} + +func (p *parser) takefn(fn func(c rune, i int) bool) string { + for i, c := range p.upper[p.o:] { + if !fn(c, i) { + return p.xtaken(i) + } + } + return p.remainder() +} + +// xrawReversePath returns the raw string between the <>'s. We cannot parse it +// immediately, because if this is an IDNA (internationalization) address, we would +// only see the SMTPUTF8 indicator after having parsed the reverse path here. So we +// parse the raw data here, and validate it after having seen all parameters. +// ../rfc/5321:2260 +func (p *parser) xrawReversePath() string { + p.xtake("<") + s := p.takefn(func(c rune, i int) bool { + return c != '>' + }) + p.xtake(">") + return s +} + +// xbareReversePath parses a reverse-path without <>, as returned by +// xrawReversePath. It takes smtputf8 into account. +// ../rfc/5321:2260 +func (p *parser) xbareReversePath() smtp.Path { + if p.empty() { + return smtp.Path{} + } + // ../rfc/6531:468 + p.utf8LocalpartCode = smtp.C550MailboxUnavail + defer func() { + p.utf8LocalpartCode = 0 + }() + return p.xbarePath() +} + +func (p *parser) xforwardPath() smtp.Path { + // ../rfc/6531:466 + p.utf8LocalpartCode = smtp.C553BadMailbox + defer func() { + p.utf8LocalpartCode = 0 + }() + return p.xpath() +} + +// ../rfc/5321:2264 +func (p *parser) xpath() smtp.Path { + o := p.o + p.xtake("<") + r := p.xbarePath() + p.xtake(">") + if p.o-o > 256 { + // ../rfc/5321:3495 + p.xerrorf("path longer than 256 octets") + } + return r +} + +func (p *parser) xbarePath() smtp.Path { + // We parse but ignore any source routing. + // ../rfc/5321:1081 ../rfc/5321:1430 ../rfc/5321:1925 + if p.take("@") { + p.xdomain() + for p.take(",") { + p.xtake("@") + p.xdomain() + } + p.xtake(":") + } + return p.xmailbox() +} + +// ../rfc/5321:2291 +func (p *parser) xdomain() dns.Domain { + s := p.xsubdomain() + for p.take(".") { + s += "." + p.xsubdomain() + } + d, err := dns.ParseDomain(s) + if err != nil { + p.xerrorf("parsing domain name %q: %s", s, err) + } + if len(s) > 255 { + // ../rfc/5321:3491 + p.xerrorf("domain longer than 255 octets") + } + return d +} + +// ../rfc/5321:2303 +// ../rfc/5321:2303 ../rfc/6531:411 +func (p *parser) xsubdomain() string { + return p.takefn1("subdomain", func(c rune, i int) bool { + return c >= '0' && c <= '9' || c >= 'A' && c <= 'Z' || i > 0 && c == '-' || c > 0x7f && p.smtputf8 + }) +} + +// ../rfc/5321:2314 +func (p *parser) xmailbox() smtp.Path { + localpart := p.xlocalpart() + p.xtake("@") + return smtp.Path{Localpart: localpart, IPDomain: p.xipdomain()} +} + +// ../rfc/5321:2307 +func (p *parser) xldhstr() string { + return p.takefn1("ldh-str", func(c rune, i int) bool { + return c >= 'A' && c <= 'Z' || c >= '0' && c <= '9' || i == 0 && c == '-' + }) +} + +// parse address-literal or domain. +func (p *parser) xipdomain() dns.IPDomain { + // ../rfc/5321:2309 + // ../rfc/5321:2397 + if p.take("[") { + c := p.peekchar() + var ipv6 bool + if !(c >= '0' && c <= '9') { + addrlit := p.xldhstr() + p.xtake(":") + if !strings.EqualFold(addrlit, "IPv6") { + p.xerrorf("unrecognized address literal %q", addrlit) + } + ipv6 = true + } + ipaddr := p.takefn1("address literal", func(c rune, i int) bool { + return c != ']' + }) + p.take("]") + ip := net.ParseIP(ipaddr) + if ip == nil { + p.xerrorf("invalid ip in address: %q", ipaddr) + } + isv4 := ip.To4() != nil + if ipv6 && isv4 { + p.xerrorf("ip is not ipv6") + } else if !ipv6 && !isv4 { + p.xerrorf("ip is not ipv4") + } + return dns.IPDomain{IP: ip} + } + return dns.IPDomain{Domain: p.xdomain()} +} + +// todo: reduce duplication between implementations: ../smtp/address.go:/xlocalpart ../dkim/parser.go:/xlocalpart ../smtpserver/parse.go:/xlocalpart +func (p *parser) xlocalpart() smtp.Localpart { + // ../rfc/5321:2316 + var s string + if p.hasPrefix(`"`) { + s = p.xquotedString(true) + } else { + s = p.xatom(true) + for p.take(".") { + s += "." + p.xatom(true) + } + } + // todo: have a strict parser that only allows the actual max of 64 bytes. some services have large localparts because of generated (bounce) addresses. + if len(s) > 128 { + // ../rfc/5321:3486 + p.xerrorf("localpart longer than 64 octets") + } + return smtp.Localpart(s) +} + +// ../rfc/5321:2324 +func (p *parser) xquotedString(islocalpart bool) string { + var s string + var esc bool + for { + c := p.xchar() + if esc { + if c >= ' ' && c < 0x7f { + s += string(c) + esc = false + continue + } + p.xerrorf("invalid localpart, bad escaped char %c", c) + } + if c == '\\' { + esc = true + continue + } + if c == '"' { + return s + } + // ../rfc/5321:2332 ../rfc/6531:419 + if islocalpart && c > 0x7f && !p.smtputf8 { + p.xutf8localparterrorf() + } + if c >= ' ' && c < 0x7f && c != '\\' && c != '"' || (c > 0x7f && p.smtputf8) { + s += string(c) + continue + } + p.xerrorf("invalid localpart, invalid character %c", c) + } +} + +func (p *parser) xchar() rune { + // We are careful to track invalid utf-8 properly. + if p.empty() { + p.xerrorf("need another character") + } + var r rune + var o int + for i, c := range p.orig[p.o:] { + if i > 0 { + o = i + break + } + r = c + } + if o == 0 { + p.o = len(p.orig) + } else { + p.o += o + } + return r +} + +// ../rfc/5321:2320 ../rfc/6531:414 +func (p *parser) xatom(islocalpart bool) string { + return p.takefn1("atom", func(c rune, i int) bool { + switch c { + case '!', '#', '$', '%', '&', '\'', '*', '+', '-', '/', '=', '?', '^', '_', '`', '{', '|', '}', '~': + return true + } + if islocalpart && c > 0x7f && !p.smtputf8 { + p.xutf8localparterrorf() + } + return c >= '0' && c <= '9' || c >= 'A' && c <= 'Z' || (c > 0x7f && p.smtputf8) + }) +} + +// ../rfc/5321:2338 +func (p *parser) xstring() string { + if p.peekchar() == '"' { + return p.xquotedString(false) + } + return p.xatom(false) +} + +// ../rfc/5321:2279 +func (p *parser) xparamKeyword() string { + return p.takefn1("parameter keyword", func(c rune, i int) bool { + return c >= '0' && c <= '9' || c >= 'A' && c <= 'Z' || (i > 0 && c == '-') + }) +} + +// ../rfc/5321:2281 ../rfc/6531:422 +func (p *parser) xparamValue() string { + return p.takefn1("parameter value", func(c rune, i int) bool { + return c > ' ' && c < 0x7f && c != '=' || (c > 0x7f && p.smtputf8) + }) +} + +// for smtp parameters that take a numeric parameter with specified number of +// digits, eg SIZE=... for MAIL FROM. +func (p *parser) xnumber(maxDigits int) int64 { + s := p.takefn1("number", func(c rune, i int) bool { + return c >= '0' && c <= '9' && i < maxDigits + }) + v, err := strconv.ParseInt(s, 10, 64) + if err != nil { + p.xerrorf("bad number %q: %s", s, err) + } + return v +} + +// sasl mechanism, for AUTH command. +// ../rfc/4422:436 +func (p *parser) xsaslMech() string { + return p.takefn1("sasl-mech", func(c rune, i int) bool { + return i < 20 && (c >= 'A' && c <= 'Z' || c >= '0' && c <= '9' || c == '-' || c == '_') + }) +} + +// ../rfc/4954:696 ../rfc/6533:259 +func (p *parser) xtext() string { + r := "" + for !p.empty() { + b := p.orig[p.o] + if b >= 0x21 && b < 0x7f && b != '+' && b != '=' && b != ' ' { + r += string(b) + p.xtaken(1) + continue + } + if b != '+' { + break + } + p.xtaken(1) + x := p.xtaken(2) + for _, b := range x { + if b >= '0' && b <= '9' || b >= 'A' && b <= 'F' { + continue + } + p.xerrorf("parsing xtext: invalid hexadecimal %q", x) + } + const hex = "0123456789ABCDEF" + b = byte(strings.IndexByte(hex, x[0])<<4) | byte(strings.IndexByte(hex, x[1])<<0) + r += string(rune(b)) + } + return r +} diff --git a/smtpserver/parse_test.go b/smtpserver/parse_test.go new file mode 100644 index 0000000..5ab2293 --- /dev/null +++ b/smtpserver/parse_test.go @@ -0,0 +1,23 @@ +package smtpserver + +import ( + "reflect" + "testing" + + "github.com/mjl-/mox/dns" + "github.com/mjl-/mox/smtp" +) + +func tcompare(t *testing.T, got, exp any) { + t.Helper() + if !reflect.DeepEqual(got, exp) { + t.Fatalf("got %v, expected %v", got, exp) + } +} + +func TestParse(t *testing.T) { + tcompare(t, newParser("<@hosta.int,@jkl.org:userc@d.bar.org>", false, nil).xpath(), smtp.Path{Localpart: "userc", IPDomain: dns.IPDomain{Domain: dns.Domain{ASCII: "d.bar.org"}}}) + + tcompare(t, newParser("e+3Dmc2@example.com", false, nil).xtext(), "e=mc2@example.com") + tcompare(t, newParser("", false, nil).xtext(), "") +} diff --git a/smtpserver/rejects.go b/smtpserver/rejects.go new file mode 100644 index 0000000..4455253 --- /dev/null +++ b/smtpserver/rejects.go @@ -0,0 +1,67 @@ +package smtpserver + +import ( + "bytes" + "crypto/sha256" + "fmt" + "io" + "os" + + "github.com/mjl-/bstore" + + "github.com/mjl-/mox/message" + "github.com/mjl-/mox/mlog" + "github.com/mjl-/mox/moxio" + "github.com/mjl-/mox/store" +) + +// rejectPresent returns whether the message is already present in the rejects mailbox. +func rejectPresent(log *mlog.Log, acc *store.Account, rejectsMailbox string, m *store.Message, f *os.File) (present bool, msgID string, hash []byte, rerr error) { + if p, err := message.Parse(store.FileMsgReader(m.MsgPrefix, f)); err != nil { + log.Infox("parsing reject message for message-id", err) + } else if header, err := p.Header(); err != nil { + log.Infox("parsing reject message header for message-id", err) + } else { + msgID = header.Get("Message-Id") + } + + // We must not read MsgPrefix, it will likely change for subsequent deliveries. + h := sha256.New() + if _, err := io.Copy(h, &moxio.AtReader{R: f}); err != nil { + log.Infox("copying reject message to hash", err) + } else { + hash = h.Sum(nil) + } + + if msgID == "" && len(hash) == 0 { + return false, "", nil, fmt.Errorf("no message-id or hash for determining reject message presence") + } + + var exists bool + var err error + acc.WithRLock(func() { + err = acc.DB.Read(func(tx *bstore.Tx) error { + mbq := bstore.QueryTx[store.Mailbox](tx) + mbq.FilterNonzero(store.Mailbox{Name: rejectsMailbox}) + mb, err := mbq.Get() + if err == bstore.ErrAbsent { + return nil + } + if err != nil { + return fmt.Errorf("looking for rejects mailbox: %w", err) + } + + q := bstore.QueryTx[store.Message](tx) + q.FilterNonzero(store.Message{MailboxID: mb.ID}) + q.FilterFn(func(m store.Message) bool { + return msgID != "" && m.MessageID == msgID || len(hash) > 0 && bytes.Equal(m.MessageHash, hash) + }) + exists, err = q.Exists() + return err + }) + }) + if err != nil { + return false, "", nil, fmt.Errorf("querying for presence of reject message: %w", err) + } + return exists, msgID, hash, nil +} diff --git a/smtpserver/reputation.go b/smtpserver/reputation.go new file mode 100644 index 0000000..1a7e3d6 --- /dev/null +++ b/smtpserver/reputation.go @@ -0,0 +1,380 @@ +package smtpserver + +import ( + "errors" + "fmt" + "time" + + "github.com/mjl-/bstore" + + "github.com/mjl-/mox/mlog" + "github.com/mjl-/mox/smtp" + "github.com/mjl-/mox/store" +) + +type reputationMethod string + +const ( + methodMsgfromFull reputationMethod = "msgfromfull" + methodMsgtoFull reputationMethod = "msgtofull" + methodMsgfromDomain reputationMethod = "msgfromdomain" + methodMsgfromOrgDomain reputationMethod = "msgfromorgdomain" + methodMsgtoDomain reputationMethod = "msgtodomain" + methodMsgtoOrgDomain reputationMethod = "msgtoorgdomain" + methodDKIMSPF reputationMethod = "dkimspf" + methodIP1 reputationMethod = "ip1" + methodIP2 reputationMethod = "ip2" + methodIP3 reputationMethod = "ip3" + methodNone reputationMethod = "none" +) + +// Reputation returns whether message m is likely junk. +// +// This function is called after checking for a manually configured spf mailfrom +// allow (e.g. for mailing lists), and after checking for a dmarc reject policy. +// +// The decision is made based on historic messages delivered to the same +// destination mailbox, MailboxOrigID. Because each mailbox may have a different +// accept policy, for example mailing lists with an SPF mailfrom allow. We only use +// messages that have been marked as read. We expect users to mark junk messages as +// such when they read it. And to keep it in their inbox, regular trash or archive +// if it is not. +// +// The basic idea is to keep accepting messages that were accepted in the past, and +// keep rejecting those that were rejected. This is relatively easy to check if +// mail passes SPF and/or DKIM with Message-From alignment. Regular email from +// known people will be let in. But spammers are trickier. They will use new +// (sub)domains, no or newly created SPF and/or DKIM identifiers, new localparts, +// etc. This function likely ends up returning "inconclusive" for such emails. The +// junkfilter will have to take care of a final decision. +// +// In case of doubt, it doesn't hurt much to accept another mail that a user has +// communicated successfully with in the past. If the most recent message is marked +// as junk that could have happened accidental. If another message is let in, and +// it is again junk, future messages will be rejected. +// +// Actual spammers will probably try to use identifiers, i.e. (sub)domain, dkim/spf +// identifiers and ip addresses for which we have no history. We may only have +// ip-based reputation, perhaps only an ip range, perhaps nothing. +// +// Some profiles of first-time senders: +// +// - Individuals. They can typically get past the junkfilter if needed. +// - Transaction emails. They should get past the junkfilter. If they use one of +// the larger email service providers, their reputation could help. If the +// junkfilter rejects the message, users can recover the message from the Rejects +// mailbox. The first message is typically initiated by a user, e.g. by registering. +// - Desired commercial email will have to get past the junkfilter based on its +// content. There will typically be earlier communication with the (organizational) +// domain that would let the message through. +// - Mailing list. May get past the junkfilter. If delivery is to a separate +// mailbox, the junkfilter will let it in because of little history. Long enough to +// build reputation based on DKIM/SPF signals. +// +// The decision-making process looks at historic messages. The following properties +// are checked until matching messages are found. If they are found, a decision is +// returned, which may be inconclusive. The next property on the list is only +// checked if a step did not match any messages. +// +// - Messages matching full "message from" address, either with strict/relaxed +// dkim/spf-verification, or without. +// - Messages the user sent to the "message from" address. +// - Messages matching only the domain of the "message from" address (different +// localpart), again with verification or without. +// - Messages sent to an address in the domain of the "message from" address. +// - The previous two checks again, but now checking against the organizational +// domain instead of the exact domain. +// - Matching DKIM domains and a matching SPF mailfrom, or mailfrom domain, or ehlo +// domain. +// - "Exact" IP, or nearby IPs (/24 or /48). +// +// References: +// ../rfc/5863 +// ../rfc/7960 +// ../rfc/6376:1915 +// ../rfc/6376:3716 +// ../rfc/7208:2167 +func reputation(tx *bstore.Tx, log *mlog.Log, m *store.Message) (rjunk *bool, rconclusive bool, rmethod reputationMethod, rerr error) { + boolptr := func(v bool) *bool { + return &v + } + xfalse := boolptr(false) + xtrue := boolptr(true) + + type queryError string + + defer func() { + x := recover() + if x == nil { + return + } + if xerr, ok := x.(queryError); ok { + rerr = errors.New(string(xerr)) + return + } + panic(x) + }() + + now := time.Now() + + // messageQuery returns a base query for historic seen messages to the same + // mailbox, at most maxAge old, and at most maxCount messages. + messageQuery := func(fm *store.Message, maxAge time.Duration, maxCount int) *bstore.Query[store.Message] { + q := bstore.QueryTx[store.Message](tx) + q.FilterEqual("MailboxOrigID", m.MailboxID) + q.FilterEqual("Seen", true) + if fm != nil { + q.FilterNonzero(*fm) + } + q.FilterGreaterEqual("Received", now.Add(-maxAge)) + q.Limit(maxCount) + q.SortDesc("Received") + return q + } + + // Execute the query, returning messages or returning error through panic. + xmessageList := func(q *bstore.Query[store.Message], descr string) []store.Message { + t0 := time.Now() + l, err := q.List() + log.Debugx("querying messages for reputation", err, mlog.Field("msgs", len(l)), mlog.Field("descr", descr), mlog.Field("queryduration", time.Since(t0))) + if err != nil { + panic(queryError(fmt.Sprintf("listing messages: %v", err))) + } + return l + } + + xrecipientExists := func(q *bstore.Query[store.Recipient]) bool { + exists, err := q.Exists() + if err != nil { + panic(queryError(fmt.Sprintf("checking for recipient: %v", err))) + } + return exists + } + + const year = 365 * 24 * time.Hour + + // Look for historic messages with same "message from" address. We'll + // treat any validation (strict/dmarc/relaxed) the same, but "none" + // separately. + // + // We only need 1 message, and sometimes look at a second message. If + // the last message or the message before was an accept, we accept. If + // the single last or last two were a reject, we reject. + // + // If there was no validation, any signal is inconclusive. + if m.MsgFromDomain != "" { + q := messageQuery(&store.Message{MsgFromLocalpart: m.MsgFromLocalpart, MsgFromDomain: m.MsgFromDomain}, 3*year, 2) + q.FilterEqual("MsgFromValidated", m.MsgFromValidated) + msgs := xmessageList(q, "mgsfromfull") + if len(msgs) > 0 { + ham := !msgs[0].Junk || len(msgs) > 1 && !msgs[1].Junk + conclusive := m.MsgFromValidated + // todo: we may want to look at dkim/spf in this case. + spam := !ham + return &spam, conclusive, methodMsgfromFull, nil + } + if !m.MsgFromValidated { + // Look for historic messages that were validated. If present, this is likely spam. + // Only return as conclusively spam if history also says this From-address sent + // spam. + q := messageQuery(&store.Message{MsgFromLocalpart: m.MsgFromLocalpart, MsgFromDomain: m.MsgFromDomain, MsgFromValidated: true}, 3*year, 2) + msgs = xmessageList(q, "msgfromfull-validated") + if len(msgs) > 0 { + ham := !msgs[0].Junk || len(msgs) > 1 && !msgs[1].Junk + return xtrue, !ham, methodMsgfromFull, nil + } + } + + // Look if we ever sent to this address. If so, we accept, + qr := bstore.QueryTx[store.Recipient](tx) + qr.FilterEqual("Localpart", m.MsgFromLocalpart) + qr.FilterEqual("Domain", m.MsgFromDomain) + qr.FilterGreaterEqual("Sent", now.Add(-3*year)) + if xrecipientExists(qr) { + return xfalse, true, methodMsgtoFull, nil + } + + // Look for domain match, then for organizational domain match. + for _, orgdomain := range []bool{false, true} { + qm := store.Message{} + var method reputationMethod + var descr string + if orgdomain { + qm.MsgFromOrgDomain = m.MsgFromOrgDomain + method = methodMsgfromOrgDomain + descr = "msgfromorgdomain" + } else { + qm.MsgFromDomain = m.MsgFromDomain + method = methodMsgfromDomain + descr = "msgfromdomain" + } + + q := messageQuery(&qm, 2*year, 20) + q.FilterEqual("MsgFromValidated", m.MsgFromValidated) + msgs := xmessageList(q, descr) + if len(msgs) > 0 { + nham := 0 + for _, m := range msgs { + if !m.Junk { + nham++ + } + } + if 100*nham/len(msgs) > 80 { + return xfalse, true, method, nil + } + if nham == 0 { + // Only conclusive with at least 3 different localparts. + localparts := map[smtp.Localpart]struct{}{} + for _, m := range msgs { + localparts[m.MsgFromLocalpart] = struct{}{} + if len(localparts) == 3 { + return xtrue, true, method, nil + } + } + return xtrue, false, method, nil + } + // Mixed signals from domain. We don't want to block a new sender. + return nil, false, method, nil + } + if !m.MsgFromValidated { + // Look for historic messages that were validated. If present, this is likely spam. + // Only return as conclusively spam if history also says this From-address sent + // spam. + q := messageQuery(&qm, 2*year, 2) + q.FilterEqual("MsgFromValidated", true) + msgs = xmessageList(q, descr+"-validated") + if len(msgs) > 0 { + ham := !msgs[0].Junk || len(msgs) > 1 && !msgs[1].Junk + return xtrue, !ham, method, nil + } + } + + // Look if we ever sent to this address. If so, we accept, + qr := bstore.QueryTx[store.Recipient](tx) + if orgdomain { + qr.FilterEqual("OrgDomain", m.MsgFromOrgDomain) + method = methodMsgtoOrgDomain + } else { + qr.FilterEqual("Domain", m.MsgFromDomain) + method = methodMsgtoDomain + } + qr.FilterGreaterEqual("Sent", now.Add(-2*year)) + if xrecipientExists(qr) { + return xfalse, true, method, nil + } + } + } + + // DKIM and SPF. + // We only use identities that passed validation. Failed identities are ignored. ../rfc/6376:2447 + // todo future: we could do something with the DKIM identity (i=) field if it is more specific than just the domain (d=). + dkimspfsignals := []float64{} + dkimspfmsgs := 0 + for _, dom := range m.DKIMDomains { + // todo: should get dkimdomains in an index for faster lookup. bstore does not yet support "in" indexes. + q := messageQuery(nil, year/2, 50) + q.FilterFn(func(m store.Message) bool { + for _, d := range m.DKIMDomains { + if d == dom { + return true + } + } + return false + }) + msgs := xmessageList(q, "dkimdomain") + if len(msgs) > 0 { + nspam := 0 + for _, m := range msgs { + if m.Junk { + nspam++ + } + } + pspam := float64(nspam) / float64(len(msgs)) + dkimspfsignals = append(dkimspfsignals, pspam) + dkimspfmsgs = len(msgs) + } + } + if m.MailFromValidated || m.EHLOValidated { + var msgs []store.Message + if m.MailFromValidated && m.MailFromDomain != "" { + q := messageQuery(&store.Message{MailFromLocalpart: m.MailFromLocalpart, MailFromDomain: m.MailFromDomain}, year/2, 50) + msgs = xmessageList(q, "mailfrom") + if len(msgs) == 0 { + q := messageQuery(&store.Message{MailFromDomain: m.MailFromDomain}, year/2, 50) + msgs = xmessageList(q, "mailfromdomain") + } + } + if len(msgs) == 0 && m.EHLOValidated && m.EHLODomain != "" { + q := messageQuery(&store.Message{EHLODomain: m.EHLODomain}, year/2, 50) + msgs = xmessageList(q, "ehlodomain") + } + if len(msgs) > 0 { + nspam := 0 + for _, m := range msgs { + if m.Junk { + nspam++ + } + } + pspam := float64(nspam) / float64(len(msgs)) + dkimspfsignals = append(dkimspfsignals, pspam) + if len(msgs) > dkimspfmsgs { + dkimspfmsgs = len(msgs) + } + } + } + if len(dkimspfsignals) > 0 { + var nham, nspam int + for _, p := range dkimspfsignals { + if p < .1 { + nham++ + } else if p > .9 { + nspam++ + } + } + if nham > 0 && nspam == 0 { + return xfalse, true, methodDKIMSPF, nil + } + if nspam > 0 && nham == 0 { + return xtrue, dkimspfmsgs > 1, methodDKIMSPF, nil + } + return nil, false, methodDKIMSPF, nil + } + + // IP-based. A wider mask needs more messages to be conclusive. + // We require the resulting signal to be strong, i.e. likely ham or likely spam. + q := messageQuery(&store.Message{RemoteIPMasked1: m.RemoteIPMasked1}, year/4, 50) + msgs := xmessageList(q, "ip1") + need := 2 + method := methodIP1 + if len(msgs) == 0 { + q := messageQuery(&store.Message{RemoteIPMasked2: m.RemoteIPMasked2}, year/4, 50) + msgs = xmessageList(q, "ip2") + need = 5 + method = methodIP2 + } + if len(msgs) == 0 { + q := messageQuery(&store.Message{RemoteIPMasked3: m.RemoteIPMasked3}, year/4, 50) + msgs = xmessageList(q, "ip3") + need = 10 + method = methodIP3 + } + if len(msgs) > 0 { + nspam := 0 + for _, m := range msgs { + if m.Junk { + nspam++ + } + } + pspam := float64(nspam) / float64(len(msgs)) + var spam *bool + if pspam < .25 { + spam = xfalse + } else if pspam > .75 { + spam = xtrue + } + conclusive := len(msgs) >= need && (pspam <= 0.1 || pspam >= 0.9) + return spam, conclusive, method, nil + } + + return nil, false, methodNone, nil +} diff --git a/smtpserver/reputation_test.go b/smtpserver/reputation_test.go new file mode 100644 index 0000000..69dd035 --- /dev/null +++ b/smtpserver/reputation_test.go @@ -0,0 +1,421 @@ +package smtpserver + +import ( + "context" + "fmt" + "net" + "os" + "testing" + "time" + + "github.com/mjl-/bstore" + + "github.com/mjl-/mox/dns" + "github.com/mjl-/mox/publicsuffix" + "github.com/mjl-/mox/smtp" + "github.com/mjl-/mox/store" +) + +func TestReputation(t *testing.T) { + boolptr := func(v bool) *bool { + return &v + } + xtrue := boolptr(true) + xfalse := boolptr(false) + + now := time.Now() + var uidgen store.UID + + message := func(junk bool, ageDays int, ehlo, mailfrom, msgfrom, rcptto string, msgfromvalidation store.Validation, dkimDomains []string, mailfromValid, ehloValid bool, ip string) store.Message { + + mailFromValidation := store.ValidationNone + if mailfromValid { + mailFromValidation = store.ValidationPass + } + ehloValidation := store.ValidationNone + if ehloValid { + ehloValidation = store.ValidationPass + } + + msgFrom, err := smtp.ParseAddress(msgfrom) + if err != nil { + panic(fmt.Errorf("parsing msgfrom %q: %w", msgfrom, err)) + } + + rcptTo, err := smtp.ParseAddress(rcptto) + if err != nil { + panic(fmt.Errorf("parsing rcptto %q: %w", rcptto, err)) + } + + mailFrom := msgFrom + if mailfrom != "" { + mailFrom, err = smtp.ParseAddress(mailfrom) + if err != nil { + panic(fmt.Errorf("parsing mailfrom %q: %w", mailfrom, err)) + } + } + + var ipmasked1, ipmasked2, ipmasked3 string + var xip = net.ParseIP(ip) + if xip.To4() != nil { + ipmasked1 = xip.String() + ipmasked2 = xip.Mask(net.CIDRMask(26, 32)).String() + ipmasked3 = xip.Mask(net.CIDRMask(21, 32)).String() + } else { + ipmasked1 = xip.Mask(net.CIDRMask(64, 128)).String() + ipmasked2 = xip.Mask(net.CIDRMask(48, 128)).String() + ipmasked3 = xip.Mask(net.CIDRMask(32, 128)).String() + } + + uidgen++ + m := store.Message{ + UID: uidgen, // Not relevant here. + MailboxID: 1, + MailboxOrigID: 1, + Received: now.Add(time.Duration(-ageDays) * 24 * time.Hour), + RemoteIP: ip, + RemoteIPMasked1: ipmasked1, + RemoteIPMasked2: ipmasked2, + RemoteIPMasked3: ipmasked3, + + EHLODomain: ehlo, + MailFrom: mailfrom, + MailFromLocalpart: mailFrom.Localpart, + MailFromDomain: mailFrom.Domain.Name(), + RcptToLocalpart: rcptTo.Localpart, + RcptToDomain: rcptTo.Domain.Name(), + + MsgFromLocalpart: msgFrom.Localpart, + MsgFromDomain: msgFrom.Domain.Name(), + MsgFromOrgDomain: publicsuffix.Lookup(context.Background(), msgFrom.Domain).Name(), + + MailFromValidated: mailfromValid, + EHLOValidated: ehloValid, + MsgFromValidated: msgfromvalidation == store.ValidationStrict || msgfromvalidation == store.ValidationRelaxed || msgfromvalidation == store.ValidationDMARC, + + MailFromValidation: mailFromValidation, + EHLOValidation: ehloValidation, + MsgFromValidation: msgfromvalidation, + + DKIMDomains: dkimDomains, + + Flags: store.Flags{ + Junk: junk, + Seen: true, + }, + } + return m + } + + check := func(m store.Message, history []store.Message, expJunk *bool, expConclusive bool, expMethod reputationMethod) { + t.Helper() + + p := "../testdata/smtpserver-reputation.db" + defer os.Remove(p) + + db, err := bstore.Open(p, &bstore.Options{Timeout: 5 * time.Second}, store.Message{}, store.Recipient{}, store.Mailbox{}) + tcheck(t, err, "open db") + defer db.Close() + + err = db.Write(func(tx *bstore.Tx) error { + err = tx.Insert(&store.Mailbox{ID: 1, Name: "Inbox"}) + tcheck(t, err, "insert into db") + + for _, hm := range history { + err := tx.Insert(&hm) + tcheck(t, err, "insert message") + + rcptToDomain, err := dns.ParseDomain(hm.RcptToDomain) + tcheck(t, err, "parse rcptToDomain") + rcptToOrgDomain := publicsuffix.Lookup(context.Background(), rcptToDomain) + r := store.Recipient{MessageID: hm.ID, Localpart: hm.RcptToLocalpart, Domain: hm.RcptToDomain, OrgDomain: rcptToOrgDomain.Name(), Sent: hm.Received} + err = tx.Insert(&r) + tcheck(t, err, "insert recipient") + } + + return nil + }) + tcheck(t, err, "commit") + + var isjunk *bool + var conclusive bool + var method reputationMethod + err = db.Read(func(tx *bstore.Tx) error { + var err error + isjunk, conclusive, method, err = reputation(tx, xlog, &m) + return err + }) + tcheck(t, err, "read tx") + + if method != expMethod { + t.Fatalf("got method %q, expected %q", method, expMethod) + } + if conclusive != expConclusive { + t.Fatalf("got conclusive %v, expected %v", conclusive, expConclusive) + } + if (isjunk == nil) != (expJunk == nil) || (isjunk != nil && expJunk != nil && *isjunk != *expJunk) { + t.Fatalf("got isjunk %v, expected %v", isjunk, expJunk) + } + } + + var msgs []store.Message + var m store.Message + + msgs = []store.Message{ + message(false, 4, "host.othersite.example", "", "other@remote.example", "mjl@local.example", store.ValidationDMARC, []string{"othersite.example"}, true, true, "10.0.0.1"), + message(true, 3, "host.othersite.example", "", "other@remote.example", "mjl@local.example", store.ValidationStrict, []string{"othersite.example"}, true, true, "10.0.0.1"), + message(false, 2, "host.othersite.example", "", "other@remote.example", "mjl@local.example", store.ValidationStrict, []string{"othersite.example"}, true, true, "10.0.0.1"), // causes accept + message(true, 1, "host.othersite.example", "", "other@remote.example", "mjl@local.example", store.ValidationRelaxed, []string{"othersite.example"}, true, true, "10.0.0.1"), + } + m = message(false, 0, "host.othersite.example", "", "other@remote.example", "mjl@local.example", store.ValidationDMARC, []string{"othersite.example"}, true, true, "10.0.0.1") + check(m, msgs, xfalse, true, methodMsgfromFull) + + // Two most recents are spam, reject. + msgs = []store.Message{ + message(false, 3, "host.othersite.example", "", "other@remote.example", "mjl@local.example", store.ValidationStrict, []string{"othersite.example"}, true, true, "10.0.0.1"), + message(true, 2, "host.othersite.example", "", "other@remote.example", "mjl@local.example", store.ValidationStrict, []string{"othersite.example"}, true, true, "10.0.0.1"), + message(true, 1, "host.othersite.example", "", "other@remote.example", "mjl@local.example", store.ValidationDMARC, []string{"othersite.example"}, true, true, "10.0.0.1"), + } + m = message(false, 0, "host.othersite.example", "", "other@remote.example", "mjl@local.example", store.ValidationStrict, []string{"othersite.example"}, true, true, "10.0.0.1") + check(m, msgs, xtrue, true, methodMsgfromFull) + + // If localpart matches, other localsparts are not used. + msgs = []store.Message{ + message(true, 3, "host.othersite.example", "", "other@remote.example", "mjl@local.example", store.ValidationStrict, []string{"othersite.example"}, true, true, "10.0.0.1"), + message(true, 2, "host.othersite.example", "", "other@remote.example", "mjl@local.example", store.ValidationRelaxed, []string{"othersite.example"}, true, true, "10.0.0.1"), + message(false, 1, "host.othersite.example", "", "b@remote.example", "mjl@local.example", store.ValidationDMARC, []string{"othersite.example"}, true, true, "10.0.0.1"), // other localpart, ignored + } + m = message(false, 0, "host.othersite.example", "", "other@remote.example", "mjl@local.example", store.ValidationRelaxed, []string{"othersite.example"}, true, true, "10.0.0.1") + check(m, msgs, xtrue, true, methodMsgfromFull) + + // Incoming message, we have only seen other unverified msgs from sender. + msgs = []store.Message{ + message(true, 3, "host.othersite.example", "", "other@remote.example", "mjl@local.example", store.ValidationNone, []string{"othersite.example"}, true, true, "10.0.0.1"), + message(true, 2, "host.othersite.example", "", "other@remote.example", "mjl@local.example", store.ValidationNone, []string{"othersite.example"}, true, true, "10.0.0.1"), + } + m = message(false, 0, "host.othersite.example", "", "other@remote.example", "mjl@local.example", store.ValidationNone, []string{"othersite.example"}, true, true, "10.0.0.1") + check(m, msgs, xtrue, false, methodMsgfromFull) + + // Incoming message, we have only seen verified msgs from sender, and at least two, so this is a likely but inconclusive spam. + msgs = []store.Message{ + message(false, 3, "host.othersite.example", "", "other@remote.example", "mjl@local.example", store.ValidationStrict, []string{"remote.example"}, true, true, "10.0.0.1"), + message(false, 2, "host.othersite.example", "", "other@remote.example", "mjl@local.example", store.ValidationStrict, []string{"remote.example"}, true, true, "10.0.0.1"), + } + m = message(false, 0, "host.othersite.example", "", "other@remote.example", "mjl@local.example", store.ValidationNone, []string{}, false, false, "10.10.0.1") + check(m, msgs, xtrue, false, methodMsgfromFull) + + // Incoming message, we have only seen 1 verified message from sender, so inconclusive for reject. + msgs = []store.Message{ + message(false, 2, "host.othersite.example", "", "other@remote.example", "mjl@local.example", store.ValidationStrict, []string{"remote.example"}, true, true, "10.0.0.1"), + } + m = message(false, 0, "host.othersite.example", "", "other@remote.example", "mjl@local.example", store.ValidationNone, []string{}, false, false, "10.10.0.1") + check(m, msgs, xtrue, false, methodMsgfromFull) + + // Incoming message, we have only seen 1 verified message from sender, and it was spam, so we can safely reject. + msgs = []store.Message{ + message(true, 2, "host.othersite.example", "", "other@remote.example", "mjl@local.example", store.ValidationStrict, []string{"remote.example"}, true, true, "10.0.0.1"), + } + m = message(false, 0, "host.othersite.example", "", "other@remote.example", "mjl@local.example", store.ValidationNone, []string{}, false, false, "10.10.0.1") + check(m, msgs, xtrue, true, methodMsgfromFull) + + // We received spam from other senders in the domain, but we sent to msgfrom. + msgs = []store.Message{ + message(true, 3, "host.othersite.example", "", "a@remote.example", "mjl@local.example", store.ValidationStrict, []string{"remote.example"}, true, true, "10.0.0.1"), // other localpart + message(false, 2, "host.local.example", "", "mjl@local.example", "other@remote.example", store.ValidationNone, []string{}, false, false, "127.0.0.1"), // we sent to remote, accept + message(true, 1, "host.othersite.example", "", "a@remote.example", "mjl@local.example", store.ValidationStrict, []string{"remote.example"}, true, true, "10.0.0.1"), // other localpart + message(true, 1, "host.othersite.example", "", "a@remote.example", "mjl@local.example", store.ValidationStrict, []string{"remote.example"}, true, true, "10.0.0.1"), // other localpart + } + m = message(false, 0, "host.othersite.example", "", "other@remote.example", "mjl@local.example", store.ValidationNone, []string{}, false, false, "10.10.0.1") + check(m, msgs, xfalse, true, methodMsgtoFull) + + // Other messages in same domain, inconclusive. + msgs = []store.Message{ + message(true, 7*30, "host.othersite.example", "", "second@remote.example", "mjl@local.example", store.ValidationStrict, []string{"othersite.example"}, true, true, "10.0.0.1"), + message(false, 3*30, "host.othersite.example", "", "second@remote.example", "mjl@local.example", store.ValidationStrict, []string{"othersite.example"}, true, true, "10.0.0.1"), + message(true, 3*30, "host.othersite.example", "", "second@remote.example", "mjl@local.example", store.ValidationStrict, []string{"othersite.example"}, true, true, "10.0.0.1"), + message(false, 3*30, "host.othersite.example", "", "second@remote.example", "mjl@local.example", store.ValidationStrict, []string{"othersite.example"}, true, true, "10.0.0.1"), + message(true, 3*30, "host.othersite.example", "", "second@remote.example", "mjl@local.example", store.ValidationStrict, []string{"othersite.example"}, true, true, "10.0.0.1"), + message(false, 8, "host.othersite.example", "", "second@remote.example", "mjl@local.example", store.ValidationStrict, []string{"othersite.example"}, true, true, "10.0.0.1"), + message(false, 8, "host.othersite.example", "", "second@remote.example", "mjl@local.example", store.ValidationStrict, []string{"othersite.example"}, true, true, "10.0.0.1"), + message(false, 4, "host.othersite.example", "", "second@remote.example", "mjl@local.example", store.ValidationStrict, []string{"othersite.example"}, true, true, "10.0.0.1"), + message(true, 2, "host.othersite.example", "", "second@remote.example", "mjl@local.example", store.ValidationStrict, []string{"othersite.example"}, true, true, "10.0.0.1"), + message(false, 1, "host.othersite.example", "", "second@remote.example", "mjl@local.example", store.ValidationStrict, []string{"othersite.example"}, true, true, "10.0.0.1"), + } + m = message(false, 0, "host.othersite.example", "", "other@remote.example", "mjl@local.example", store.ValidationStrict, []string{"othersite.example"}, true, true, "10.0.0.1") + check(m, msgs, nil, false, methodMsgfromDomain) + + // Mostly ham, so we'll allow it. + msgs = []store.Message{ + message(false, 7*30, "host1.othersite.example", "", "second@remote.example", "mjl@local.example", store.ValidationStrict, []string{"othersite.example"}, true, true, "10.0.0.1"), + message(false, 3*30, "host1.othersite.example", "", "second@remote.example", "mjl@local.example", store.ValidationStrict, []string{"othersite.example"}, true, true, "10.0.0.1"), + message(false, 3*30, "host2.othersite.example", "", "second@remote.example", "mjl@local.example", store.ValidationStrict, []string{"othersite3.example"}, true, true, "10.0.0.2"), + message(false, 3*30, "host2.othersite.example", "", "second@remote.example", "mjl@local.example", store.ValidationStrict, []string{"othersite2.example"}, true, true, "10.0.0.2"), + message(false, 3*30, "host3.othersite.example", "", "second@remote.example", "mjl@local.example", store.ValidationStrict, []string{"othersite.example"}, true, true, "10.0.0.100"), + message(false, 8, "host3.othersite.example", "", "second@remote.example", "mjl@local.example", store.ValidationStrict, []string{"othersite2.example"}, true, true, "10.0.0.100"), + message(false, 8, "host4.othersite.example", "", "second@remote.example", "mjl@local.example", store.ValidationStrict, []string{"othersite2.example"}, true, true, "10.0.0.4"), + message(false, 4, "host4.othersite.example", "", "second@remote.example", "mjl@local.example", store.ValidationStrict, []string{"othersite.example"}, true, true, "10.0.0.4"), + message(false, 2, "host5.othersite.example", "", "second@remote.example", "mjl@local.example", store.ValidationStrict, []string{"othersite.example"}, true, true, "10.10.0.5"), + message(true, 1, "host5.othersite.example", "", "second@remote.example", "mjl@local.example", store.ValidationStrict, []string{"none.example"}, true, true, "10.10.0.5"), + } + m = message(false, 0, "host.othersite.example", "", "other@remote.example", "mjl@local.example", store.ValidationStrict, []string{"othersite.example", "othersite3.example"}, true, true, "10.0.0.1") + check(m, msgs, xfalse, true, methodMsgfromDomain) + + // Not clearly spam, so inconclusive. + msgs = []store.Message{ + message(true, 3*30, "host3.othersite.example", "", "second@remote.example", "mjl@local.example", store.ValidationStrict, []string{"othersite.example"}, true, true, "10.0.0.100"), + message(true, 4, "host4.othersite.example", "", "second@remote.example", "mjl@local.example", store.ValidationStrict, []string{"othersite.example"}, true, true, "10.0.0.4"), + message(true, 2, "host5.othersite.example", "", "second@remote.example", "mjl@local.example", store.ValidationStrict, []string{"othersite.example"}, true, true, "10.10.0.5"), + message(false, 1, "host5.othersite.example", "", "second@remote.example", "mjl@local.example", store.ValidationStrict, []string{"none.example"}, true, true, "10.10.0.5"), + } + m = message(false, 0, "host.othersite.example", "", "other@remote.example", "mjl@local.example", store.ValidationStrict, []string{"othersite.example", "othersite3.example"}, true, true, "10.0.0.1") + check(m, msgs, nil, false, methodMsgfromDomain) + + // We only received spam from this domain by at least 3 localparts: reject. + msgs = []store.Message{ + message(true, 3*30, "host3.othersite.example", "", "a@remote.example", "mjl@local.example", store.ValidationStrict, []string{"othersite.example"}, true, true, "10.0.0.100"), + message(true, 4, "host4.othersite.example", "", "b@remote.example", "mjl@local.example", store.ValidationStrict, []string{"othersite.example"}, true, true, "10.0.0.4"), + message(true, 2, "host5.othersite.example", "", "c@remote.example", "mjl@local.example", store.ValidationStrict, []string{"othersite.example"}, true, true, "10.10.0.5"), + message(true, 1, "host5.othersite.example", "", "c@remote.example", "mjl@local.example", store.ValidationStrict, []string{"none.example"}, true, true, "10.10.0.5"), + } + m = message(false, 0, "host.othersite.example", "", "other@remote.example", "mjl@local.example", store.ValidationStrict, []string{"othersite.example", "othersite3.example"}, true, true, "10.0.0.1") + check(m, msgs, xtrue, true, methodMsgfromDomain) + + // We only received spam from this org domain by at least 3 localparts. so reject. + msgs = []store.Message{ + message(true, 3*30, "host3.othersite.example", "", "a@a.remote.example", "mjl@local.example", store.ValidationStrict, []string{"othersite.example"}, true, true, "10.0.0.100"), + message(true, 4, "host4.othersite.example", "", "b@b.remote.example", "mjl@local.example", store.ValidationStrict, []string{"othersite.example"}, true, true, "10.0.0.4"), + message(true, 2, "host5.othersite.example", "", "c@c.remote.example", "mjl@local.example", store.ValidationStrict, []string{"othersite.example"}, true, true, "10.10.0.5"), + message(true, 1, "host5.othersite.example", "", "c@c.remote.example", "mjl@local.example", store.ValidationStrict, []string{"none.example"}, true, true, "10.10.0.5"), + } + m = message(false, 0, "host.othersite.example", "", "other@d.remote.example", "mjl@local.example", store.ValidationStrict, []string{"othersite.example", "othersite3.example"}, true, true, "10.0.0.1") + check(m, msgs, xtrue, true, methodMsgfromOrgDomain) + + // We've only seen spam, but we don"t want to reject an entire domain with only 2 froms, so inconclusive. + msgs = []store.Message{ + message(true, 2*30, "host3.othersite.example", "", "a@remote.example", "mjl@local.example", store.ValidationStrict, []string{"remote.example"}, true, true, "10.0.0.100"), + message(true, 4, "host4.othersite.example", "", "a@remote.example", "mjl@local.example", store.ValidationStrict, []string{"remote.example"}, true, true, "10.0.0.4"), + message(true, 2, "host5.othersite.example", "", "b@remote.example", "mjl@local.example", store.ValidationStrict, []string{"remote.example"}, true, true, "10.10.0.5"), + message(true, 1, "host5.othersite.example", "", "b@remote.example", "mjl@local.example", store.ValidationStrict, []string{"remote.example"}, true, true, "10.10.0.5"), + } + m = message(false, 0, "host.othersite.example", "", "other@remote.example", "mjl@local.example", store.ValidationStrict, []string{"remote.example"}, true, true, "10.0.0.1") + check(m, msgs, xtrue, false, methodMsgfromDomain) + + // we"ve only seen spam, but we don"t want to reject an entire orgdomain with only 2 froms, so inconclusive. + msgs = []store.Message{ + message(true, 2*30, "host3.othersite.example", "", "a@a.remote.example", "mjl@local.example", store.ValidationStrict, []string{"remote.example"}, true, true, "10.0.0.100"), + message(true, 4, "host4.othersite.example", "", "a@a.remote.example", "mjl@local.example", store.ValidationStrict, []string{"remote.example"}, true, true, "10.0.0.4"), + message(true, 2, "host5.othersite.example", "", "b@b.remote.example", "mjl@local.example", store.ValidationStrict, []string{"remote.example"}, true, true, "10.10.0.5"), + message(true, 1, "host5.othersite.example", "", "b@b.remote.example", "mjl@local.example", store.ValidationStrict, []string{"remote.example"}, true, true, "10.10.0.5"), + } + m = message(false, 0, "host.othersite.example", "", "other@remote.example", "mjl@local.example", store.ValidationStrict, []string{"remote.example"}, true, true, "10.0.0.1") + check(m, msgs, xtrue, false, methodMsgfromOrgDomain) + + // All dkim/spf signs are good, so accept. + msgs = []store.Message{ + message(false, 2*30, "host3.esp.example", "bulk@esp.example", "a@espcustomer1.example", "mjl@local.example", store.ValidationNone, []string{"esp.example"}, true, true, "10.0.0.100"), + message(false, 4, "host4.esp.example", "bulk@esp.example", "b@espcustomer2.example", "mjl@local.example", store.ValidationNone, []string{"esp.example"}, true, true, "10.0.0.4"), + message(false, 2, "host5.esp.example", "bulk@esp.example", "c@espcustomer2.example", "mjl@local.example", store.ValidationNone, []string{"esp.example"}, true, true, "10.10.0.5"), + message(false, 1, "host5.esp.example", "bulk@esp.example", "d@espcustomer2.example", "mjl@local.example", store.ValidationNone, []string{"espcustomer2.example", "esp.example"}, true, true, "10.10.0.5"), + } + m = message(false, 0, "host3.esp.example", "bulk@esp.example", "other@espcustomer3.example", "mjl@local.example", store.ValidationNone, []string{"espcustomer3.example", "esp.example"}, true, true, "10.0.0.1") + check(m, msgs, xfalse, true, "dkimspf") + + // All dkim/spf signs are bad, so reject. + msgs = []store.Message{ + message(true, 2*30, "host3.esp.example", "bulk@esp.example", "a@espcustomer1.example", "mjl@local.example", store.ValidationNone, []string{"esp.example"}, true, true, "10.0.0.100"), + message(true, 4, "host4.esp.example", "bulk@esp.example", "b@espcustomer2.example", "mjl@local.example", store.ValidationNone, []string{"esp.example"}, true, true, "10.0.0.4"), + message(true, 2, "host5.esp.example", "bulk@esp.example", "c@espcustomer2.example", "mjl@local.example", store.ValidationNone, []string{"esp.example"}, true, true, "10.10.0.5"), + message(true, 1, "host5.esp.example", "bulk@esp.example", "d@espcustomer2.example", "mjl@local.example", store.ValidationNone, []string{"espcustomer2.example", "esp.example"}, true, true, "10.10.0.5"), + } + m = message(false, 0, "host3.esp.example", "bulk@esp.example", "other@espcustomer3.example", "mjl@local.example", store.ValidationNone, []string{"espcustomer3.example", "esp.example"}, true, true, "10.0.0.1") + check(m, msgs, xtrue, true, "dkimspf") + + // Mixed dkim/spf signals, inconclusive. + msgs = []store.Message{ + message(false, 2*30, "host3.esp.example", "bulk@esp.example", "a@espcustomer1.example", "mjl@local.example", store.ValidationNone, []string{"esp.example"}, true, true, "10.0.0.100"), + message(false, 4, "host4.esp.example", "bulk@esp.example", "b@espcustomer2.example", "mjl@local.example", store.ValidationNone, []string{"esp.example"}, true, true, "10.0.0.4"), + message(true, 2, "host5.esp.example", "bulk@esp.example", "c@espcustomer2.example", "mjl@local.example", store.ValidationNone, []string{"esp.example"}, true, true, "10.10.0.5"), + message(true, 1, "host5.esp.example", "bulk@esp.example", "d@espcustomer2.example", "mjl@local.example", store.ValidationNone, []string{"espcustomer2.example", "esp.example"}, true, true, "10.10.0.5"), + } + m = message(false, 0, "host3.esp.example", "bulk@esp.example", "other@espcustomer3.example", "mjl@local.example", store.ValidationNone, []string{"espcustomer3.example", "esp.example"}, true, true, "10.0.0.1") + check(m, msgs, nil, false, "dkimspf") + + // Just one dkim/spf message, enough for accept. + msgs = []store.Message{ + message(false, 4, "host4.esp.example", "bulk@esp.example", "b@espcustomer2.example", "mjl@local.example", store.ValidationNone, []string{"esp.example"}, true, true, "10.0.0.4"), + } + m = message(false, 0, "host3.esp.example", "bulk@esp.example", "other@espcustomer3.example", "mjl@local.example", store.ValidationNone, []string{"espcustomer3.example", "esp.example"}, true, true, "10.0.0.1") + check(m, msgs, xfalse, true, "dkimspf") + + // Just one dkim/spf message, not enough for reject. + msgs = []store.Message{ + message(true, 4, "host4.esp.example", "bulk@esp.example", "b@espcustomer2.example", "mjl@local.example", store.ValidationNone, []string{"esp.example"}, true, true, "10.0.0.4"), + } + m = message(false, 0, "host3.esp.example", "bulk@esp.example", "other@espcustomer3.example", "mjl@local.example", store.ValidationNone, []string{"espcustomer3.example", "esp.example"}, true, true, "10.0.0.1") + check(m, msgs, xtrue, false, "dkimspf") + + // The exact IP is almost bad, but we need 3 msgs. Other IPs don't matter. + msgs = []store.Message{ + message(false, 7*30, "host1.othersite.example", "", "second@remote.example", "mjl@local.example", store.ValidationStrict, []string{"othersite.example"}, true, true, "10.0.0.1"), // too old + message(true, 4*30, "host1.othersite.example", "", "second@remote.example", "mjl@local.example", store.ValidationStrict, []string{"othersite.example"}, true, true, "10.0.0.1"), + message(true, 2*30, "host1.othersite.example", "", "second@remote.example", "mjl@local.example", store.ValidationStrict, []string{"othersite.example"}, true, true, "10.0.0.1"), + message(true, 1*30, "host2.othersite.example", "", "second@remote.example", "mjl@local.example", store.ValidationStrict, []string{"othersite3.example"}, true, true, "10.0.0.2"), // irrelevant + } + m = message(false, 0, "host.different.example", "sender@different.example", "other@other.example", "mjl@local.example", store.ValidationStrict, []string{}, true, true, "10.0.0.1") + check(m, msgs, xtrue, false, "ip1") + + // The exact IP is almost ok, so accept. + msgs = []store.Message{ + message(true, 7*30, "host1.othersite.example", "", "second@remote.example", "mjl@local.example", store.ValidationStrict, []string{"othersite.example"}, true, true, "10.0.0.1"), // too old + message(false, 2*30, "host1.othersite.example", "", "second@remote.example", "mjl@local.example", store.ValidationStrict, []string{"othersite.example"}, true, true, "10.0.0.1"), + message(false, 2*30, "host1.othersite.example", "", "second@remote.example", "mjl@local.example", store.ValidationStrict, []string{"othersite.example"}, true, true, "10.0.0.1"), + message(false, 2*30, "host1.othersite.example", "", "second@remote.example", "mjl@local.example", store.ValidationStrict, []string{"othersite.example"}, true, true, "10.0.0.1"), + message(false, 1*30, "host2.othersite.example", "", "second@remote.example", "mjl@local.example", store.ValidationStrict, []string{"othersite3.example"}, true, true, "10.0.0.2"), // irrelevant + } + m = message(false, 0, "host.different.example", "sender@different.example", "other@other.example", "mjl@local.example", store.ValidationStrict, []string{}, true, true, "10.0.0.1") + check(m, msgs, xfalse, true, "ip1") + + // The exact IP is bad, with enough msgs. Other IPs don't matter. + msgs = []store.Message{ + message(true, 4*30, "host1.othersite.example", "", "second@remote.example", "mjl@local.example", store.ValidationStrict, []string{"othersite.example"}, true, true, "10.0.0.1"), // too old + message(true, 2*30, "host1.othersite.example", "", "second@remote.example", "mjl@local.example", store.ValidationStrict, []string{"othersite.example"}, true, true, "10.0.0.1"), + message(true, 2*30, "host1.othersite.example", "", "second@remote.example", "mjl@local.example", store.ValidationStrict, []string{"othersite.example"}, true, true, "10.0.0.1"), + message(true, 1*30, "host1.othersite.example", "", "second@remote.example", "mjl@local.example", store.ValidationStrict, []string{"othersite.example"}, true, true, "10.0.0.1"), + message(true, 1*30, "host2.othersite.example", "", "second@remote.example", "mjl@local.example", store.ValidationStrict, []string{"othersite3.example"}, true, true, "10.0.0.2"), // irrelevant + } + m = message(false, 0, "host.different.example", "sender@different.example", "other@other.example", "mjl@local.example", store.ValidationStrict, []string{}, true, true, "10.0.0.1") + check(m, msgs, xtrue, true, "ip1") + + // No exact ip match, nearby IPs (we need 5) are all bad, so reject. + msgs = []store.Message{ + message(true, 2*30, "host2.othersite.example", "sender3@othersite3.example", "second@remote.example", "mjl@local.example", store.ValidationStrict, []string{"othersite3.example"}, true, true, "10.0.0.2"), + message(true, 2*30, "host2.othersite.example", "sender@othersite2.example", "second@remote.example", "mjl@local.example", store.ValidationStrict, []string{"othersite2.example"}, true, true, "10.0.0.2"), + message(false, 2*30, "host3.othersite.example", "sender@othersite.example", "second@remote.example", "mjl@local.example", store.ValidationStrict, []string{"othersite.example"}, true, true, "10.0.0.100"), // other ip + message(false, 8, "host3.othersite.example", "sender@othersite2.example", "second@remote.example", "mjl@local.example", store.ValidationStrict, []string{"othersite2.example"}, true, true, "10.0.0.100"), // other ip + message(true, 8, "host4.othersite.example", "sender@othersite2.example", "second@remote.example", "mjl@local.example", store.ValidationStrict, []string{"othersite2.example"}, true, true, "10.0.0.4"), + message(true, 4, "host4.othersite.example", "sender@othersite.example", "second@remote.example", "mjl@local.example", store.ValidationStrict, []string{"othersite.example"}, true, true, "10.0.0.4"), + message(true, 2, "host4.othersite.example", "sender@othersite.example", "second@remote.example", "mjl@local.example", store.ValidationStrict, []string{"othersite.example"}, true, true, "10.0.0.4"), + message(false, 2, "host5.othersite.example", "sender@othersite.example", "second@remote.example", "mjl@local.example", store.ValidationStrict, []string{"othersite.example"}, true, true, "10.10.0.5"), // other ip + message(false, 1, "host5.othersite.example", "sender@othersite.example", "second@remote.example", "mjl@local.example", store.ValidationStrict, []string{"none.example"}, true, true, "10.10.0.5"), // other ip + } + m = message(false, 0, "host.different.example", "sender@different.example", "other@other.example", "mjl@local.example", store.ValidationStrict, []string{}, true, true, "10.0.0.1") + check(m, msgs, xtrue, true, "ip2") + + // IPs further away are bad (we need 10), reject. + msgs = []store.Message{ + message(true, 2*30, "host2.othersite.example", "sender3@othersite3.example", "second@remote.example", "mjl@local.example", store.ValidationStrict, []string{"othersite3.example"}, true, true, "10.0.0.100"), + message(true, 2*30, "host2.othersite.example", "sender@othersite2.example", "second@remote.example", "mjl@local.example", store.ValidationStrict, []string{"othersite2.example"}, true, true, "10.0.0.100"), + message(true, 2*30, "host2.othersite.example", "sender@othersite2.example", "second@remote.example", "mjl@local.example", store.ValidationStrict, []string{"othersite2.example"}, true, true, "10.0.0.100"), + message(true, 2*30, "host3.othersite.example", "sender@othersite.example", "second@remote.example", "mjl@local.example", store.ValidationStrict, []string{"othersite.example"}, true, true, "10.0.0.100"), + message(true, 8, "host3.othersite.example", "sender@othersite2.example", "second@remote.example", "mjl@local.example", store.ValidationStrict, []string{"othersite2.example"}, true, true, "10.0.0.100"), + message(true, 8, "host4.othersite.example", "sender@othersite2.example", "second@remote.example", "mjl@local.example", store.ValidationStrict, []string{"othersite2.example"}, true, true, "10.0.0.100"), + message(true, 4, "host4.othersite.example", "sender@othersite.example", "second@remote.example", "mjl@local.example", store.ValidationStrict, []string{"othersite.example"}, true, true, "10.0.0.100"), + message(true, 4, "host4.othersite.example", "sender@othersite.example", "second@remote.example", "mjl@local.example", store.ValidationStrict, []string{"othersite.example"}, true, true, "10.0.0.100"), + message(true, 4, "host4.othersite.example", "sender@othersite.example", "second@remote.example", "mjl@local.example", store.ValidationStrict, []string{"othersite.example"}, true, true, "10.0.0.100"), + message(true, 4, "host4.othersite.example", "sender@othersite.example", "second@remote.example", "mjl@local.example", store.ValidationStrict, []string{"othersite.example"}, true, true, "10.0.0.100"), + message(false, 2, "host5.othersite.example", "sender@othersite.example", "second@remote.example", "mjl@local.example", store.ValidationStrict, []string{"othersite.example"}, true, true, "10.10.0.5"), + message(false, 1, "host5.othersite.example", "sender@othersite.example", "second@remote.example", "mjl@local.example", store.ValidationStrict, []string{"none.example"}, true, true, "10.10.0.5"), + } + m = message(false, 0, "host.different.example", "sender@different.example", "other@other.example", "mjl@local.example", store.ValidationStrict, []string{}, true, true, "10.0.0.1") + check(m, msgs, xtrue, true, "ip3") +} diff --git a/smtpserver/server.go b/smtpserver/server.go new file mode 100644 index 0000000..5a9a0bd --- /dev/null +++ b/smtpserver/server.go @@ -0,0 +1,2070 @@ +// Package smtpserver implements an SMTP server for submission and incoming delivery of mail messages. +package smtpserver + +import ( + "bufio" + "bytes" + "context" + "crypto/rsa" + "crypto/tls" + "encoding/base64" + "errors" + "fmt" + "io" + "net" + "os" + "runtime/debug" + "strings" + "sync" + "time" + + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" + + "github.com/mjl-/mox/config" + "github.com/mjl-/mox/dkim" + "github.com/mjl-/mox/dmarc" + "github.com/mjl-/mox/dmarcdb" + "github.com/mjl-/mox/dns" + "github.com/mjl-/mox/dsn" + "github.com/mjl-/mox/iprev" + "github.com/mjl-/mox/message" + "github.com/mjl-/mox/metrics" + "github.com/mjl-/mox/mlog" + "github.com/mjl-/mox/mox-" + "github.com/mjl-/mox/moxio" + "github.com/mjl-/mox/moxvar" + "github.com/mjl-/mox/publicsuffix" + "github.com/mjl-/mox/queue" + "github.com/mjl-/mox/smtp" + "github.com/mjl-/mox/spf" + "github.com/mjl-/mox/store" + "github.com/mjl-/mox/tlsrptdb" +) + +const defaultMaxMsgSize = 100 * 1024 * 1024 + +// Most logging should be done through conn.log* functions. +// Only use log in contexts without connection. +var xlog = mlog.New("smtpserver") + +// We use panic and recover for error handling while executing commands. +// These errors signal the connection must be closed. +var ( + errIO = errors.New("fatal io error") +) + +type codes struct { + code int + secode string // Enhanced code, but without the leading major int from code. +} + +var ( + metricConnection = promauto.NewCounterVec( + prometheus.CounterOpts{ + Name: "mox_smtpserver_connection_total", + Help: "Incoming SMTP connections.", + }, + []string{ + "kind", // "deliver" or "submit" + }, + ) + metricCommands = promauto.NewHistogramVec( + prometheus.HistogramOpts{ + Name: "mox_smtpserver_command_duration_seconds", + Help: "SMTP server command duration and result codes in seconds.", + Buckets: []float64{0.001, 0.005, 0.01, 0.05, 0.100, 0.5, 1, 5, 10, 20, 30, 60, 120}, + }, + []string{ + "kind", // "deliver" or "submit" + "cmd", + "code", + "ecode", + }, + ) + metricDelivery = promauto.NewCounterVec( + prometheus.CounterOpts{ + Name: "mox_smtpserver_delivery_total", + Help: "SMTP incoming message delivery from external source, not submission. Result values: delivered, reject, unknownuser, accounterror, delivererror. Reason indicates why a message was rejected/accepted.", + }, + []string{ + "result", + "reason", + }, + ) + metricSubmission = promauto.NewCounterVec( + prometheus.CounterOpts{ + Name: "mox_smtpserver_submission_total", + Help: "SMTP server incoming message submissions queue.", + }, + []string{ + "result", + }, + ) + metricServerErrors = promauto.NewCounterVec( + prometheus.CounterOpts{ + Name: "mox_smtpserver_errors_total", + Help: "SMTP server errors, known error values: dkimsign, queuedsn.", + }, + []string{ + "error", + }, + ) +) + +var jitterRand = mox.NewRand() + +// ListenAndServe starts network listeners that serve incoming SMTP connection. +func ListenAndServe() { + for name, listener := range mox.Conf.Static.Listeners { + var tlsConfig *tls.Config + if listener.TLS != nil { + tlsConfig = listener.TLS.Config + } + + maxMsgSize := listener.SMTPMaxMessageSize + if maxMsgSize == 0 { + maxMsgSize = defaultMaxMsgSize + } + + if listener.SMTP.Enabled { + hostname := mox.Conf.Static.HostnameDomain + if listener.Hostname != "" { + hostname = listener.HostnameDomain + } + port := config.Port(listener.SMTP.Port, 25) + for _, ip := range listener.IPs { + go listenServe("smtp", name, ip, port, hostname, tlsConfig, false, false, maxMsgSize, false, listener.SMTP.RequireSTARTTLS, listener.SMTP.DNSBLZones) + } + } + if listener.Submission.Enabled { + hostname := mox.Conf.Static.HostnameDomain + if listener.Hostname != "" { + hostname = listener.HostnameDomain + } + port := config.Port(listener.Submission.Port, 587) + for _, ip := range listener.IPs { + go listenServe("submission", name, ip, port, hostname, tlsConfig, true, false, maxMsgSize, !listener.Submission.NoRequireSTARTTLS, !listener.Submission.NoRequireSTARTTLS, nil) + } + } + + if listener.Submissions.Enabled { + hostname := mox.Conf.Static.HostnameDomain + if listener.Hostname != "" { + hostname = listener.HostnameDomain + } + port := config.Port(listener.Submissions.Port, 465) + for _, ip := range listener.IPs { + go listenServe("submissions", name, ip, port, hostname, tlsConfig, true, true, maxMsgSize, true, true, nil) + } + } + } +} + +func listenServe(protocol, name, ip string, port int, hostname dns.Domain, tlsConfig *tls.Config, submission, xtls bool, maxMessageSize int64, requireTLSForAuth, requireTLSForDelivery bool, dnsBLs []dns.Domain) { + addr := net.JoinHostPort(ip, fmt.Sprintf("%d", port)) + xlog.Print("listening for smtp", mlog.Field("listener", name), mlog.Field("address", addr), mlog.Field("protocol", protocol)) + network := mox.Network(ip) + var ln net.Listener + var err error + if xtls { + ln, err = tls.Listen(network, addr, tlsConfig) + } else { + ln, err = net.Listen(network, addr) + } + if err != nil { + xlog.Fatalx("smtp: listen for smtp"+mox.LinuxSetcapHint(err), err, mlog.Field("protocol", protocol), mlog.Field("listener", name)) + } + for { + conn, err := ln.Accept() + if err != nil { + xlog.Infox("smtp: accept", err, mlog.Field("protocol", protocol), mlog.Field("listener", name)) + continue + } + resolver := dns.StrictResolver{} // By leaving Pkg empty, it'll be set by each package that uses the resolver, e.g. spf/dkim/dmarc. + go serve(name, mox.Cid(), hostname, tlsConfig, conn, resolver, submission, xtls, maxMessageSize, requireTLSForAuth, requireTLSForDelivery, dnsBLs) + } +} + +type conn struct { + cid int64 + + // OrigConn is the original (TCP) connection. We'll read from/write to conn, which + // can be wrapped in a tls.Server. We close origConn instead of conn because + // closing the TLS connection would send a TLS close notification, which may block + // for 5s if the server isn't reading it (because it is also sending it). + origConn net.Conn + conn net.Conn + + tls bool + resolver dns.Resolver + r *bufio.Reader + w *bufio.Writer + lastlog time.Time // Used for printing the delta time since the previous logging for this connection. + submission bool // ../rfc/6409:19 applies + tlsConfig *tls.Config + localIP net.IP + remoteIP net.IP + hostname dns.Domain + log *mlog.Log + maxMessageSize int64 + requireTLSForAuth bool + requireTLSForDelivery bool + cmd string // Current command. + cmdStart time.Time // Start of current command. + dnsBLs []dns.Domain + + // todo future: add a flag for "pedantic" mode, causing us to be strict. e.g. interpreting some SHOULD as MUST. ../rfc/5321:4076 + + // If non-zero, taken into account during Read and Write. Set while processing DATA + // command, we don't want the entire delivery to take too long. + deadline time.Time + + hello dns.IPDomain // Claimed remote name. Can be ip address for ehlo. + ehlo bool // If set, we had EHLO instead of HELO. + + authFailed int // Number of failed auth attempts. For slowing down remote with many failures. + username string // Only when authenticated. + account *store.Account // Only when authenticated. + + // We track good/bad message transactions to disconnect spammers trying to guess addresses. + transactionGood int + transactionBad int + + // Message transaction. + mailFrom *smtp.Path + has8bitmime bool // If MAIL FROM parameter BODY=8BITMIME was sent. Required for SMTPUTF8. + smtputf8 bool // todo future: we should keep track of this per recipient. perhaps only a specific recipient requires smtputf8, e.g. due to a utf8 localpart. we should decide ourselves if the message needs smtputf8, e.g. due to utf8 header values. + recipients []rcptAccount +} + +type rcptAccount struct { + rcptTo smtp.Path + local bool // Whether recipient is a local user. + + // Only valid for local delivery. + accountName string + destination config.Destination + canonicalAddress string // Optional catchall part stripped and/or lowercased. +} + +func isClosed(err error) bool { + return errors.Is(err, errIO) || moxio.IsClosed(err) +} + +// completely reset connection state as if greeting has just been sent. +// ../rfc/3207:210 +func (c *conn) reset() { + c.ehlo = false + c.hello = dns.IPDomain{} + c.username = "" + if c.account != nil { + c.account.Close() + } + c.account = nil + c.rset() +} + +// for rset command, and a few more cases that reset the mail transaction state. +// ../rfc/5321:2502 +func (c *conn) rset() { + c.mailFrom = nil + c.has8bitmime = false + c.smtputf8 = false + c.recipients = nil +} + +func (c *conn) earliestDeadline(d time.Duration) time.Time { + e := time.Now().Add(d) + if !c.deadline.IsZero() && c.deadline.Before(e) { + return c.deadline + } + return e +} + +func (c *conn) xcheckAuth() { + if c.submission && c.account == nil { + // ../rfc/4954:623 + xsmtpUserErrorf(smtp.C530SecurityRequired, smtp.SePol7Other0, "authentication required") + } +} + +// Write writes to the connection. It panics on i/o errors, which is handled by the +// connection command loop. +func (c *conn) Write(buf []byte) (int, error) { + // We set a single deadline for Write and Read. This may be a TLS connection. + // SetDeadline works on the underlying connection. If we wouldn't touch the read + // deadline, and only set the write deadline and do a bunch of writes, the TLS + // library would still have to do reads on the underlying connection, and may reach + // a read deadline that was set for some earlier read. + if err := c.conn.SetDeadline(c.earliestDeadline(30 * time.Second)); err != nil { + c.log.Errorx("setting deadline for write", err) + } + + n, err := c.conn.Write(buf) + if err != nil { + panic(fmt.Errorf("write: %s (%w)", err, errIO)) + } + return n, err +} + +// Read reads from the connection. It panics on i/o errors, which is handled by the +// connection command loop. +func (c *conn) Read(buf []byte) (int, error) { + // todo future: make deadline configurable for callers, and through config file? ../rfc/5321:3610 ../rfc/6409:492 + // See comment about Deadline instead of individual read/write deadlines at Write. + if err := c.conn.SetDeadline(c.earliestDeadline(30 * time.Second)); err != nil { + c.log.Errorx("setting deadline for read", err) + } + + n, err := c.conn.Read(buf) + if err != nil { + panic(fmt.Errorf("read: %s (%w)", err, errIO)) + } + return n, err +} + +// Cache of line buffers for reading commands. +// Filled on demand. +var bufpool = moxio.NewBufpool(8, 2*1024) + +func (c *conn) readline() string { + line, err := bufpool.Readline(c.r) + if err != nil && errors.Is(err, moxio.ErrLineTooLong) { + c.writecodeline(smtp.C500BadSyntax, smtp.SeProto5Other0, "line too line, smtp max is 512, we reached 2048", nil) + panic(fmt.Errorf("%s (%w)", err, errIO)) + } else if err != nil { + panic(fmt.Errorf("%s (%w)", err, errIO)) + } + return line +} + +// Buffered-write command response line to connection with codes and msg. +// Err is not sent to remote but is used for logging and can be empty. +func (c *conn) bwritecodeline(code int, secode string, msg string, err error) { + var ecode string + if secode != "" { + ecode = fmt.Sprintf("%d.%s", code/100, secode) + } + metricCommands.WithLabelValues(c.kind(), c.cmd, fmt.Sprintf("%d", code), ecode).Observe(float64(time.Since(c.cmdStart)) / float64(time.Second)) + c.log.Debugx("smtp command result", err, mlog.Field("kind", c.kind()), mlog.Field("cmd", c.cmd), mlog.Field("code", fmt.Sprintf("%d", code)), mlog.Field("ecode", ecode), mlog.Field("duration", time.Since(c.cmdStart))) + + var sep string + if ecode != "" { + sep = " " + } + + // Separate by newline and wrap long lines. + lines := strings.Split(msg, "\n") + for i, line := range lines { + // ../rfc/5321:3506 ../rfc/5321:2583 ../rfc/5321:2756 + var prelen = 3 + 1 + len(ecode) + len(sep) + for prelen+len(line) > 510 { + e := 510 - prelen + for ; e > 400 && line[e] != ' '; e-- { + } + // todo future: understand if ecode should be on each line. won't hurt. at least as long as we don't do expn or vrfy. + c.bwritelinef("%d-%s%s%s", code, ecode, sep, line[:e]) + line = line[e:] + } + spdash := " " + if i < len(lines)-1 { + spdash = "-" + } + c.bwritelinef("%d%s%s%s%s", code, spdash, ecode, sep, line) + } +} + +// Buffered-write a formatted response line to connection. +func (c *conn) bwritelinef(format string, args ...any) { + msg := fmt.Sprintf(format, args...) + fmt.Fprint(c.w, msg+"\r\n") +} + +// Flush pending buffered writes to connection. +func (c *conn) xflush() { + c.w.Flush() // Errors will have caused a panic in Write. +} + +// Write (with flush) a response line with codes and message. err is not written, used for logging and can be nil. +func (c *conn) writecodeline(code int, secode string, msg string, err error) { + c.bwritecodeline(code, secode, msg, err) + c.xflush() +} + +// Write (with flush) a formatted response line to connection. +func (c *conn) writelinef(format string, args ...any) { + c.bwritelinef(format, args...) + c.xflush() +} + +var cleanClose struct{} // Sentinel value for panic/recover indicating clean close of connection. + +func serve(listenerName string, cid int64, hostname dns.Domain, tlsConfig *tls.Config, nc net.Conn, resolver dns.Resolver, submission, tls bool, maxMessageSize int64, requireTLSForAuth, requireTLSForDelivery bool, dnsBLs []dns.Domain) { + var localIP, remoteIP net.IP + if a, ok := nc.LocalAddr().(*net.TCPAddr); ok { + localIP = a.IP + } else { + // For net.Pipe, during tests. + localIP = net.ParseIP("127.0.0.10") + } + if a, ok := nc.RemoteAddr().(*net.TCPAddr); ok { + remoteIP = a.IP + } else { + // For net.Pipe, during tests. + remoteIP = net.ParseIP("127.0.0.10") + } + + c := &conn{ + cid: cid, + origConn: nc, + conn: nc, + submission: submission, + tls: tls, + resolver: resolver, + lastlog: time.Now(), + tlsConfig: tlsConfig, + localIP: localIP, + remoteIP: remoteIP, + hostname: hostname, + maxMessageSize: maxMessageSize, + requireTLSForAuth: requireTLSForAuth, + requireTLSForDelivery: requireTLSForDelivery, + dnsBLs: dnsBLs, + } + c.log = xlog.MoreFields(func() []mlog.Pair { + now := time.Now() + l := []mlog.Pair{ + mlog.Field("cid", c.cid), + mlog.Field("delta", now.Sub(c.lastlog)), + } + c.lastlog = now + if c.username != "" { + l = append(l, mlog.Field("username", c.username)) + } + return l + }) + c.r = bufio.NewReader(moxio.NewTraceReader(c.log, "RC: ", c)) + c.w = bufio.NewWriter(moxio.NewTraceWriter(c.log, "LS: ", c)) + + metricConnection.WithLabelValues(c.kind()).Inc() + c.log.Info("new connection", mlog.Field("remote", c.conn.RemoteAddr()), mlog.Field("local", c.conn.LocalAddr()), mlog.Field("submission", submission), mlog.Field("tls", tls), mlog.Field("listener", listenerName)) + + defer func() { + c.origConn.Close() // Close actual TCP socket, regardless of TLS on top. + c.conn.Close() // Will try to write alert notification to already closed socket, returning error quickly. + + if c.account != nil { + if err := c.account.Close(); err != nil { + c.log.Infox("smtp: account close", err) + } + c.account = nil + } + + x := recover() + if x == nil || x == cleanClose { + c.log.Info("connection closed") + } else if err, ok := x.(error); ok && isClosed(err) { + c.log.Infox("connection closed", err) + } else { + c.log.Error("unhandled error", mlog.Field("err", x)) + debug.PrintStack() + metrics.PanicInc("smtpserver") + } + }() + + select { + case <-mox.Shutdown: + // ../rfc/5321:2811 ../rfc/5321:1666 ../rfc/3463:420 + c.writecodeline(smtp.C421ServiceUnavail, smtp.SeSys3NotAccepting2, "shutting down", nil) + return + default: + } + + // We register and unregister the original connection, in case c.conn is replaced + // with a TLS connection later on. + mox.Connections.Register(nc, "smtp", listenerName) + defer mox.Connections.Unregister(nc) + + // ../rfc/5321:964 ../rfc/5321:4294 about announcing software and version + // Syntax: ../rfc/5321:2586 + // We include the string ESMTP. https://cr.yp.to/smtp/greeting.html recommends it. + // Should not be too relevant nowadays, but does not hurt and default blackbox + // exporter SMTP health check expects it. + c.writelinef("%d %s ESMTP mox %s", smtp.C220ServiceReady, c.hostname.ASCII, moxvar.Version) + + for { + command(c) + + // If another command is present, don't flush our buffered response yet. Holding + // off will cause us to respond with a single packet. + n := c.r.Buffered() + if n > 0 { + buf, err := c.r.Peek(n) + if err == nil && bytes.IndexByte(buf, '\n') >= 0 { + continue + } + } + c.xflush() + } +} + +var commands = map[string]func(c *conn, p *parser){ + "helo": (*conn).cmdHelo, + "ehlo": (*conn).cmdEhlo, + "starttls": (*conn).cmdStarttls, + "auth": (*conn).cmdAuth, + "mail": (*conn).cmdMail, + "rcpt": (*conn).cmdRcpt, + "data": (*conn).cmdData, + "rset": (*conn).cmdRset, + "vrfy": (*conn).cmdVrfy, + "expn": (*conn).cmdExpn, + "help": (*conn).cmdHelp, + "noop": (*conn).cmdNoop, + "quit": (*conn).cmdQuit, +} + +func command(c *conn) { + defer func() { + x := recover() + if x == nil { + return + } + err, ok := x.(error) + if !ok { + panic(x) + } + + if isClosed(err) { + panic(err) + } + + var serr smtpError + if errors.As(err, &serr) { + c.writecodeline(serr.code, serr.secode, serr.err.Error(), serr.err) + if serr.printStack { + debug.PrintStack() + } + } else { + // Other type of panic, we pass it on, aborting the connection. + c.log.Errorx("command panic", err) + panic(x) + } + }() + + // todo future: we could wait for either a line or shutdown, and just close the connection on shutdown. + + line := c.readline() + t := strings.SplitN(line, " ", 2) + var args string + if len(t) == 2 { + args = " " + t[1] + } + cmd := t[0] + cmdl := strings.ToLower(cmd) + + // todo future: should we return an error for lines that are too long? perhaps for submission or in a pedantic mode. we would have to take extensions for MAIL into account. ../rfc/5321:3500 ../rfc/5321:3552 + + select { + case <-mox.Shutdown: + // ../rfc/5321:2811 ../rfc/5321:1666 ../rfc/3463:420 + c.writecodeline(smtp.C421ServiceUnavail, smtp.SeSys3NotAccepting2, "shutting down", nil) + panic(errIO) + default: + } + + c.cmd = cmdl + c.cmdStart = time.Now() + + p := newParser(args, c.smtputf8, c) + fn, ok := commands[cmdl] + if !ok { + c.cmd = "(unknown)" + // note: not "command not implemented", see ../rfc/5321:2934 ../rfc/5321:2539 + xsmtpUserErrorf(smtp.C500BadSyntax, smtp.SeProto5BadCmdOrSeq1, "unknown command") + } + fn(c, p) +} + +// For use in metric labels. +func (c *conn) kind() string { + if c.submission { + return "submission" + } + return "smtp" +} + +func (c *conn) xneedHello() { + if c.hello.IsZero() { + xsmtpUserErrorf(smtp.C503BadCmdSeq, smtp.SeProto5BadCmdOrSeq1, "no ehlo/helo yet") + } +} + +// If smtp server is configured to require TLS for all mail delivery, abort command. +func (c *conn) xneedTLSForDelivery() { + if c.requireTLSForDelivery && !c.tls { + // ../rfc/3207:148 + xsmtpUserErrorf(smtp.C530SecurityRequired, smtp.SePol7Other0, "STARTTLS required for mail delivery") + } +} + +func (c *conn) cmdHelo(p *parser) { + c.cmdHello(p, false) +} + +func (c *conn) cmdEhlo(p *parser) { + c.cmdHello(p, true) +} + +// ../rfc/5321:1783 +func (c *conn) cmdHello(p *parser, ehlo bool) { + // ../rfc/5321:1827, though a few paragraphs earlier at ../rfc/5321:1802 is a claim + // additional data can occur. + p.xspace() + var remote dns.IPDomain + if ehlo { + remote = p.xipdomain() + } else { + remote = dns.IPDomain{Domain: p.xdomain()} + if !c.submission { + // Verify a remote domain name has an A or AAAA record, CNAME not allowed. ../rfc/5321:722 + cidctx := context.WithValue(mox.Context, mlog.CidKey, c.cid) + ctx, cancel := context.WithTimeout(cidctx, time.Minute) + _, err := c.resolver.LookupIPAddr(ctx, remote.Domain.ASCII+".") + cancel() + if dns.IsNotFound(err) { + xsmtpUserErrorf(smtp.C550MailboxUnavail, smtp.SeProto5Other0, "your ehlo domain does not resolve to an IP address") + } + // For success or temporary resolve errors, we'll just continue. + } + } + p.remainder() // ../rfc/5321:1802 + p.xend() + + // Reset state as if RSET command has been issued. ../rfc/5321:2093 ../rfc/5321:2453 + c.rset() + + c.ehlo = ehlo + c.hello = remote + + // https://www.iana.org/assignments/mail-parameters/mail-parameters.xhtml + + c.bwritelinef("250-%s", c.hostname.ASCII) + c.bwritelinef("250-PIPELINING") // ../rfc/2920:108 + c.bwritelinef("250-SIZE %d", c.maxMessageSize) // ../rfc/1870:70 + // ../rfc/3207:237 + if !c.tls && c.tlsConfig != nil { + // ../rfc/3207:90 + c.bwritelinef("250-STARTTLS") + } + if c.submission { + // todo future: implement SCRAM-SHA-256. + // ../rfc/4954:123 + if c.tls || !c.requireTLSForAuth { + c.bwritelinef("250-AUTH PLAIN") + } else { + c.bwritelinef("250-AUTH ") + } + } + c.bwritelinef("250-ENHANCEDSTATUSCODES") // ../rfc/2034:71 + // todo future? c.writelinef("250-DSN") + c.bwritelinef("250-8BITMIME") // ../rfc/6152:86 + c.bwritecodeline(250, "", "SMTPUTF8", nil) // ../rfc/6531:201 + c.xflush() +} + +// ../rfc/3207:96 +func (c *conn) cmdStarttls(p *parser) { + c.xneedHello() + p.xend() + + if c.tls { + // ../rfc/3207:235 + xsmtpUserErrorf(smtp.C503BadCmdSeq, smtp.SeProto5BadCmdOrSeq1, "already speaking tls") + } + if c.account != nil { + xsmtpUserErrorf(smtp.C503BadCmdSeq, smtp.SeProto5BadCmdOrSeq1, "cannot starttls after authentication") + } + + // We don't want to do TLS on top of c.r because it also prints protocol traces: We + // don't want to log the TLS stream. So we'll do TLS on the underlying connection, + // but make sure any bytes already read and in the buffer are used for the TLS + // handshake. + conn := c.conn + if n := c.r.Buffered(); n > 0 { + conn = &moxio.PrefixConn{ + PrefixReader: io.LimitReader(c.r, int64(n)), + Conn: conn, + } + } + + c.writecodeline(smtp.C220ServiceReady, smtp.SeOther00, "go!", nil) + tlsConn := tls.Server(conn, c.tlsConfig) + cidctx := context.WithValue(mox.Context, mlog.CidKey, c.cid) + ctx, cancel := context.WithTimeout(cidctx, time.Minute) + defer cancel() + c.log.Debug("starting tls server handshake") + if err := tlsConn.HandshakeContext(ctx); err != nil { + panic(fmt.Errorf("starttls handshake: %s (%w)", err, errIO)) + } + cancel() + tlsversion, ciphersuite := mox.TLSInfo(tlsConn) + c.log.Debug("tls server handshake done", mlog.Field("tls", tlsversion), mlog.Field("ciphersuite", ciphersuite)) + c.conn = tlsConn + c.r = bufio.NewReader(moxio.NewTraceReader(c.log, "RC: ", c)) + c.w = bufio.NewWriter(moxio.NewTraceWriter(c.log, "LS: ", c)) + + c.reset() // ../rfc/3207:210 + c.tls = true +} + +// ../rfc/4954:139 +func (c *conn) cmdAuth(p *parser) { + c.xneedHello() + + if !c.submission { + xsmtpUserErrorf(smtp.C503BadCmdSeq, smtp.SeProto5BadCmdOrSeq1, "authentication only allowed on submission ports") + } + if c.account != nil { + // ../rfc/4954:152 + xsmtpUserErrorf(smtp.C503BadCmdSeq, smtp.SeProto5BadCmdOrSeq1, "already authenticated") + } + if c.mailFrom != nil { + // ../rfc/4954:157 + xsmtpUserErrorf(smtp.C503BadCmdSeq, smtp.SeProto5BadCmdOrSeq1, "authentication not allowed during mail transaction") + } + + // todo future: we may want to normalize usernames and passwords, see stringprep in ../rfc/4013:38 and possibly newer mechanisms (though they are opt-in and that may not have happened yet). + + // For many failed auth attempts, slow down verification attempts. + // Dropping the connection could also work, but more so when we have a connection rate limiter. + // ../rfc/4954:770 + if c.authFailed > 3 { + // ../rfc/4954:770 + mox.Sleep(mox.Context, time.Duration(c.authFailed-3)*time.Second) + } + c.authFailed++ // Compensated on success. + + var authVariant string + authResult := "error" + defer func() { + metrics.AuthenticationInc("submission", authVariant, authResult) + }() + + // todo: implement "AUTH LOGIN"? it looks like PLAIN, but without the continuation. it is an obsolete sasl mechanism. an account in desktop outlook appears to go through the cloud, attempting to submit email only with unadvertised and AUTH LOGIN. it appears they don't know "plain". + + // ../rfc/4954:699 + p.xspace() + mech := p.xsaslMech() + switch mech { + case "PLAIN": + authVariant = "plain" + + // ../rfc/4954:343 + // ../rfc/4954:326 + if !c.tls && c.requireTLSForAuth { + xsmtpUserErrorf(smtp.C538EncReqForAuth, smtp.SePol7EncReqForAuth11, "authentication requires tls") + } + + var auth string + if p.empty() { + c.writelinef("%d ", smtp.C334ContinueAuth) // ../rfc/4954:205 + // todo future: handle max length of 12288 octets and return proper responde codes otherwise ../rfc/4954:253 + auth = c.readline() + if auth == "*" { + // ../rfc/4954:193 + authResult = "aborted" + xsmtpUserErrorf(smtp.C501BadParamSyntax, smtp.SeProto5Other0, "authentication aborted") + } + } else { + p.xspace() + auth = p.remainder() + if auth == "" { + // ../rfc/4954:235 + xsmtpUserErrorf(smtp.C501BadParamSyntax, smtp.SeProto5Syntax2, "missing initial auth base64 parameter after space") + } else if auth == "=" { + // ../rfc/4954:214 + auth = "" // Base64 decode below will result in empty buffer. + } + } + buf, err := base64.StdEncoding.DecodeString(auth) + if err != nil { + // ../rfc/4954:235 + xsmtpUserErrorf(smtp.C501BadParamSyntax, smtp.SeProto5Syntax2, "invalid base64: %s", err) + } + plain := bytes.Split(buf, []byte{0}) + if len(plain) != 3 { + xsmtpUserErrorf(smtp.C501BadParamSyntax, smtp.SeProto5BadParams4, "auth data should have 3 nul-separated tokens, got %d", len(plain)) + } + authz := string(plain[0]) + authc := string(plain[1]) + password := string(plain[2]) + acc, err := store.OpenEmailAuth(authc, password) + if err != nil && errors.Is(err, store.ErrUnknownCredentials) { + // ../rfc/4954:274 + authResult = "badcreds" + xsmtpUserErrorf(smtp.C535AuthBadCreds, smtp.SePol7AuthBadCreds8, "bad user/pass") + } + xcheckf(err, "verifying credentials") + if authz != "" && authz != authc { + authResult = "badcreds" + xsmtpUserErrorf(smtp.C535AuthBadCreds, smtp.SePol7AuthBadCreds8, "cannot assume other role") + } + + authResult = "ok" + c.authFailed = 0 + c.account = acc + c.username = authc + // ../rfc/4954:276 + c.writecodeline(smtp.C235AuthSuccess, smtp.SePol7Other0, "nice", nil) + + default: + // todo future: implement scram-sha-256 ../rfc/7677 + // todo future: possibly implement cram-md5, at least where we allow PLAIN. ../rfc/4954:348 + // ../rfc/4954:176 + xsmtpUserErrorf(smtp.C504ParamNotImpl, smtp.SeProto5BadParams4, "mechanism %s not supported", mech) + } +} + +// ../rfc/5321:1879 ../rfc/5321:1025 +func (c *conn) cmdMail(p *parser) { + // requirements for maximum line length: + // ../rfc/5321:3500 (base max of 512 including crlf) ../rfc/4954:134 (+500) ../rfc/1870:92 (+26) ../rfc/6152:90 (none specified) ../rfc/6531:231 (+10) + // todo future: enforce? + + if c.transactionBad > 10 && c.transactionGood == 0 { + // If we get many bad transactions, it's probably a spammer that is guessing user names. + // Useful in combination with rate limiting. + // ../rfc/5321:4349 + c.writecodeline(smtp.C550MailboxUnavail, smtp.SeAddr1Other0, "too many failures", nil) + panic(errIO) + } + + c.xneedHello() + c.xcheckAuth() + c.xneedTLSForDelivery() + if c.mailFrom != nil { + // ../rfc/5321:2507, though ../rfc/5321:1029 contradicts, implying a MAIL would also reset, but ../rfc/5321:1160 decides. + xsmtpUserErrorf(smtp.C503BadCmdSeq, smtp.SeProto5BadCmdOrSeq1, "already have MAIL") + } + // Ensure clear transaction state on failure. + defer func() { + x := recover() + if x != nil { + // ../rfc/5321:2514 + c.rset() + panic(x) + } + }() + p.xtake(" FROM:") + // note: no space after colon. ../rfc/5321:1093 + rawRevPath := p.xrawReversePath() + paramSeen := map[string]bool{} + for p.space() { + // ../rfc/5321:2273 + key := p.xparamKeyword() + + K := strings.ToUpper(key) + if paramSeen[K] { + // e.g. ../rfc/6152:128 + xsmtpUserErrorf(smtp.C501BadParamSyntax, smtp.SeProto5BadParams4, "duplicate param %q", key) + } + paramSeen[K] = true + + switch K { + case "SIZE": + p.xtake("=") + size := p.xnumber(20) // ../rfc/1870:90 + if size > c.maxMessageSize { + // ../rfc/1870:136 ../rfc/3463:382 + ecode := smtp.SeSys3MsgLimitExceeded4 + if size < defaultMaxMsgSize { + ecode = smtp.SeMailbox2MsgLimitExceeded3 + } + xsmtpUserErrorf(smtp.C552MailboxFull, ecode, "message too large") + } + // We won't verify the message is exactly the size the remote claims. Buf if it is + // larger, we'll abort the transaction when remote crosses the boundary. + case "BODY": + p.xtake("=") + // ../rfc/6152:90 + v := p.xparamValue() + switch strings.ToUpper(v) { + case "7BIT": + c.has8bitmime = false + case "8BITMIME": + c.has8bitmime = true + default: + xsmtpUserErrorf(smtp.C555UnrecognizedAddrParams, smtp.SeProto5BadParams4, "unrecognized parameter %q", key) + } + case "AUTH": + // ../rfc/4954:455 + + // We act as if we don't trust the client to specify a mailbox. Instead, we always + // check the rfc5321.mailfrom and rfc5322.from before accepting the submission. + // ../rfc/4954:538 + + // ../rfc/4954:704 + // todo future: should we accept utf-8-addr-xtext if there is no smtputf8, and utf-8 if there is? need to find a spec ../rfc/6533:259 + p.xtake("=") + p.xtake("<") + p.xtext() + p.xtake(">") + case "SMTPUTF8": + // ../rfc/6531:213 + c.smtputf8 = true + default: + // ../rfc/5321:2230 + xsmtpUserErrorf(smtp.C555UnrecognizedAddrParams, smtp.SeSys3NotSupported3, "unrecognized parameter %q", key) + } + } + + // We now know if we have to parse the address with support for utf8. + pp := newParser(rawRevPath, c.smtputf8, c) + rpath := pp.xbareReversePath() + pp.xempty() + pp = nil + p.xend() + + // For submission, check if reverse path is allowed. I.e. authenticated account + // must have the rpath configured. We do a check again on rfc5322.from during DATA. + rpathAllowed := func() bool { + // ../rfc/6409:349 + if rpath.IsZero() { + return true + } + accName, _, _, err := mox.FindAccount(rpath.Localpart, rpath.IPDomain.Domain, false) + return err == nil && accName == c.account.Name + } + + if !c.submission && !rpath.IPDomain.Domain.IsZero() { + // If rpath domain has null MX record or is otherwise not accepting email, reject. + // ../rfc/7505:181 + // ../rfc/5321:4045 + cidctx := context.WithValue(mox.Context, mlog.CidKey, c.cid) + ctx, cancel := context.WithTimeout(cidctx, time.Minute) + valid, err := checkMXRecords(ctx, c.resolver, rpath.IPDomain.Domain) + cancel() + if err != nil { + c.log.Infox("temporary reject for temporary mx lookup error", err) + xsmtpServerErrorf(codes{smtp.C451LocalErr, smtp.SeNet4Other0}, "cannot verify mx records for mailfrom domain") + } else if !valid { + c.log.Info("permanent reject because mailfrom domain does not accept mail") + xsmtpUserErrorf(smtp.C550MailboxUnavail, smtp.SePol7SenderHasNullMX27, "mailfrom domain not configured for mail") + } + } + + if c.submission && (len(rpath.IPDomain.IP) > 0 || !rpathAllowed()) { + // ../rfc/6409:522 + c.log.Info("submission with unconfigured mailfrom", mlog.Field("user", c.username), mlog.Field("mailfrom", rpath.String())) + xsmtpUserErrorf(smtp.C550MailboxUnavail, smtp.SePol7DeliveryUnauth1, "must match authenticated user") + } else if !c.submission && len(rpath.IPDomain.IP) > 0 { + // todo future: allow if the IP is the same as this connection is coming from? does later code allow this? + c.log.Info("delivery from address without domain", mlog.Field("mailfrom", rpath.String())) + xsmtpUserErrorf(smtp.C550MailboxUnavail, smtp.SePol7Other0, "domain name required") + } + c.mailFrom = &rpath + + c.bwritecodeline(smtp.C250Completed, smtp.SeAddr1Other0, "looking good", nil) +} + +// ../rfc/5321:1916 ../rfc/5321:1054 +func (c *conn) cmdRcpt(p *parser) { + c.xneedHello() + c.xcheckAuth() + c.xneedTLSForDelivery() + if c.mailFrom == nil { + // ../rfc/5321:1088 + xsmtpUserErrorf(smtp.C503BadCmdSeq, smtp.SeProto5BadCmdOrSeq1, "missing MAIL FROM") + } + + // ../rfc/5321:1985 + p.xtake(" TO:") + // note: no space after colon. ../rfc/5321:1093 + var fpath smtp.Path + if p.take("") { + fpath = smtp.Path{Localpart: "postmaster"} + } else { + fpath = p.xforwardPath() + } + for p.space() { + // ../rfc/5321:2275 + key := p.xparamKeyword() + K := strings.ToUpper(key) + switch K { + // todo future: DSN, ../rfc/3461, with "NOTIFY" + default: + // ../rfc/5321:2230 + xsmtpUserErrorf(smtp.C555UnrecognizedAddrParams, smtp.SeSys3NotSupported3, "unrecognized parameter %q", key) + } + } + p.xend() + + // todo future: for submission, should we do explicit verification that domains are fully qualified? also for mail from. ../rfc/6409:420 + + if len(c.recipients) >= 100 { + // ../rfc/5321:3535 ../rfc/5321:3571 + xsmtpUserErrorf(smtp.C452StorageFull, smtp.SeProto5TooManyRcpts3, "max of 100 recipients reached") + } + + // We don't want to allow delivery to multiple recipients with a null reverse path. + // Why would anyone send like that? Null reverse path is intended for delivery + // notifications, they should go to a single recipient. + if !c.submission && len(c.recipients) > 0 && c.mailFrom.IsZero() { + xsmtpUserErrorf(smtp.C452StorageFull, smtp.SeProto5TooManyRcpts3, "only one recipient allowed with null reverse address") + } + + // Do not accept multiple recipients if remote does not pass SPF. Because we don't + // want to generate DSNs to unverified domains. This is the moment we + // can refuse individual recipients, DATA will be too late. Because mail + // servers must handle a max recipient limit gracefully and still send to the + // recipients that are accepted, this should not cause problems. Though we are in + // violation because the limit must be >= 100. + // ../rfc/5321:3598 + // ../rfc/5321:4045 + // Also see ../rfc/7489:2214 + if !c.submission && len(c.recipients) == 1 { + // note: because of check above, mailFrom cannot be the null address. + var pass bool + d := c.mailFrom.IPDomain.Domain + if !d.IsZero() { + // todo: use this spf result for DATA. + spfArgs := spf.Args{ + RemoteIP: c.remoteIP, + MailFromLocalpart: c.mailFrom.Localpart, + MailFromDomain: d, + HelloDomain: c.hello, + LocalIP: c.localIP, + LocalHostname: c.hostname, + } + cidctx := context.WithValue(mox.Context, mlog.CidKey, c.cid) + spfctx, spfcancel := context.WithTimeout(cidctx, time.Minute) + defer spfcancel() + receivedSPF, _, _, err := spf.Verify(spfctx, c.resolver, spfArgs) + spfcancel() + if err != nil { + c.log.Errorx("spf verify for multiple reciepients", err) + } + pass = receivedSPF.Identity == spf.ReceivedMailFrom && receivedSPF.Result == spf.StatusPass + } + if !pass { + xsmtpUserErrorf(smtp.C452StorageFull, smtp.SeProto5TooManyRcpts3, "only one recipient allowed without spf pass") + } + } + + if len(fpath.IPDomain.IP) > 0 { + if !c.submission { + xsmtpUserErrorf(smtp.C550MailboxUnavail, smtp.SeAddr1UnknownDestMailbox1, "not accepting email for ip") + } + c.recipients = append(c.recipients, rcptAccount{fpath, false, "", config.Destination{}, ""}) + } else if accountName, canonical, addr, err := mox.FindAccount(fpath.Localpart, fpath.IPDomain.Domain, true); err == nil { + // note: a bare postmaster, without domain, is handled by FindAccount. ../rfc/5321:735 + c.recipients = append(c.recipients, rcptAccount{fpath, true, accountName, addr, canonical}) + } else if errors.Is(err, mox.ErrDomainNotFound) { + if !c.submission { + xsmtpUserErrorf(smtp.C550MailboxUnavail, smtp.SeAddr1UnknownDestMailbox1, "not accepting email for domain") + } + // We'll be delivering this email. + c.recipients = append(c.recipients, rcptAccount{fpath, false, "", config.Destination{}, ""}) + } else if errors.Is(err, mox.ErrAccountNotFound) { + if c.submission { + // For submission, we're transparent about which user exists. Should be fine for the typical small-scale deploy. + // ../rfc/5321:1071 + xsmtpUserErrorf(smtp.C550MailboxUnavail, smtp.SeAddr1UnknownDestMailbox1, "no such user") + } + // We pretend to accept. We don't want to let remote know the user does not exist + // until after DATA. Because then remote has committed to sending a message. + // note: not local for !c.submission is the signal this address is in error. + c.recipients = append(c.recipients, rcptAccount{fpath, false, "", config.Destination{}, ""}) + } else { + c.log.Errorx("looking up account for delivery", err, mlog.Field("rcptto", fpath)) + xsmtpServerErrorf(codes{smtp.C451LocalErr, smtp.SeSys3Other0}, "error processing") + } + c.bwritecodeline(smtp.C250Completed, smtp.SeAddr1Other0, "now on the list", nil) +} + +// ../rfc/5321:1992 ../rfc/5321:1098 +func (c *conn) cmdData(p *parser) { + c.xneedHello() + c.xcheckAuth() + c.xneedTLSForDelivery() + if c.mailFrom == nil { + // ../rfc/5321:1130 + xsmtpUserErrorf(smtp.C503BadCmdSeq, smtp.SeProto5BadCmdOrSeq1, "missing MAIL FROM") + } + if len(c.recipients) == 0 { + // ../rfc/5321:1130 + xsmtpUserErrorf(smtp.C503BadCmdSeq, smtp.SeProto5BadCmdOrSeq1, "missing RCPT TO") + } + + // ../rfc/5321:2066 + p.xend() + + // todo future: we could start a reader for a single line. we would then create a context that would be canceled on i/o errors. + + // Entire delivery should be done within 30 minutes, or we abort. + cidctx := context.WithValue(mox.Context, mlog.CidKey, c.cid) + cmdctx, cmdcancel := context.WithTimeout(cidctx, 30*time.Minute) + defer cmdcancel() + // Deadline is taken into account by Read and Write. + c.deadline, _ = cmdctx.Deadline() + defer func() { + c.deadline = time.Time{} + }() + + // ../rfc/5321:1994 + c.writelinef("354 see you at the bare dot") + + // We read the data into a temporary file. We limit the size and do basic analysis while reading. + dataFile, err := store.CreateMessageTemp("smtp-deliver") + if err != nil { + xsmtpServerErrorf(errCodes(smtp.C451LocalErr, smtp.SeSys3Other0, err), "creating temporary file for message: %s", err) + } + defer func() { + if dataFile != nil { + if err := os.Remove(dataFile.Name()); err != nil { + c.log.Infox("removing temporary message file", err, mlog.Field("path", dataFile.Name())) + } + dataFile.Close() + } + }() + msgWriter := &message.Writer{Writer: dataFile} + dr := smtp.NewDataReader(c.r) + if n, err := io.Copy(&limitWriter{maxSize: c.maxMessageSize, w: msgWriter}, dr); err != nil { + if errors.Is(err, errMessageTooLarge) { + // ../rfc/1870:136 and ../rfc/3463:382 + ecode := smtp.SeSys3MsgLimitExceeded4 + if n < defaultMaxMsgSize { + ecode = smtp.SeMailbox2MsgLimitExceeded3 + } + c.writecodeline(smtp.C451LocalErr, ecode, "error copying data to file", err) + panic(fmt.Errorf("remote sent too much DATA: %w", errIO)) + } + + // Something is failing on our side. We want to let remote know. So write an error response, + // then discard the remaining data so the remote client is more likely to see our + // response. Our write is synchronous, there is a risk no window/buffer space is + // available and our write blocks us from reading remaining data, leading to + // deadlock. We have a timeout on our connection writes though, so worst case we'll + // abort the connection due to expiration. + c.writecodeline(smtp.C451LocalErr, smtp.SeSys3Other0, "error copying data to file", err) + io.Copy(io.Discard, dr) + return + } + + // Basic sanity checks on messages before we send them out to the world. Just + // trying to be strict in what we do to others and liberal in what we accept. + if c.submission { + if !msgWriter.HaveHeaders { + // ../rfc/6409:541 + xsmtpUserErrorf(smtp.C554TransactionFailed, smtp.SeMsg6Other0, "message requires both header and body section") + } + // todo: check disabled because ios mail will attempt to send smtputf8 with non-ascii in message from localpart without using 8bitmime. we should have a non-lax mode that disallows this behaviour. + if false && msgWriter.Has8bit && !c.has8bitmime { + // ../rfc/5321:906 + xsmtpUserErrorf(smtp.C500BadSyntax, smtp.SeMsg6Other0, "message with non-us-ascii requires 8bitmime extension") + } + } + + // Prepare "Received" header. + // ../rfc/5321:2051 ../rfc/5321:3302 + // ../rfc/5321:3311 ../rfc/6531:578 + var recvFrom string + var iprevStatus iprev.Status // Only for delivery, not submission. + if c.submission { + // Hide internal hosts. + // todo future: make this a config option, where admins specify ip ranges that they don't want exposed. also see ../rfc/5321:4321 + recvFrom = messageHeaderCommentDomain(mox.Conf.Static.HostnameDomain, c.smtputf8) + } else { + if len(c.hello.IP) > 0 { + recvFrom = smtp.AddressLiteral(c.hello.IP) + } else { + // ASCII-only version added after the extended-domain syntax below, because the + // comment belongs to "BY" which comes immediately after "FROM". + recvFrom = c.hello.Domain.XName(c.smtputf8) + } + iprevctx, iprevcancel := context.WithTimeout(cmdctx, time.Minute) + var revName string + var revNames []string + iprevStatus, revName, revNames, err = iprev.Lookup(iprevctx, c.resolver, c.remoteIP) + iprevcancel() + if err != nil { + c.log.Infox("reverse-forward lookup", err, mlog.Field("remoteip", c.remoteIP)) + } + c.log.Info("dns iprev check", mlog.Field("addr", c.remoteIP), mlog.Field("status", iprevStatus)) + var name string + if revName != "" { + name = revName + } else if len(revNames) > 0 { + name = revNames[0] + } + name = strings.TrimSuffix(name, ".") + recvFrom += " (" + if name != "" && name != c.hello.Domain.XName(c.smtputf8) { + recvFrom += name + " " + } + recvFrom += smtp.AddressLiteral(c.remoteIP) + ")" + if c.smtputf8 && c.hello.Domain.Unicode != "" { + recvFrom += " (" + c.hello.Domain.ASCII + ")" + } + } + recvBy := mox.Conf.Static.HostnameDomain.XName(c.smtputf8) + recvBy += " (" + smtp.AddressLiteral(c.localIP) + ")" + if c.smtputf8 && mox.Conf.Static.HostnameDomain.Unicode != "" { + // This syntax is part of "VIA". + recvBy += " (" + mox.Conf.Static.HostnameDomain.ASCII + ")" + } + + // ../rfc/3848:34 ../rfc/6531:791 + with := "SMTP" + if c.smtputf8 { + with = "UTF8SMTP" + } else if c.ehlo { + with = "ESMTP" + } + if c.tls { + with += "S" + } + if c.account != nil { + // ../rfc/4954:660 + with += "A" + } + + // Assume transaction does not succeed. If it does, we'll compensate. + c.transactionBad++ + + recvHdrFor := func(rcptTo string) string { + recvHdr := &message.HeaderWriter{} + // For additional Received-header clauses, see: + // https://www.iana.org/assignments/mail-parameters/mail-parameters.xhtml#table-mail-parameters-8 + recvHdr.Add(" ", "Received:", "from", recvFrom, "by", recvBy, "via", "tcp", "with", with, "id", mox.ReceivedID(c.cid)) // ../rfc/5321:3158 + recvHdr.Add(" ", c.tlsReceivedComment()...) + recvHdr.Add(" ", "for", "<"+rcptTo+">;", time.Now().Format(message.RFC5322Z)) + return recvHdr.String() + } + + // Submission is easiest because user is trusted. Far fewer checks to make. So + // handle it first, and leave the rest of the function for handling wild west + // internet traffic. + if c.submission { + c.submit(cmdctx, recvHdrFor, msgWriter, &dataFile) + } else { + c.deliver(cmdctx, recvHdrFor, msgWriter, iprevStatus, &dataFile) + } +} + +// returns domain name optionally followed by message header comment with ascii-only name. +// The comment is only present when smtputf8 is true and the domain name is unicode. +// Caller should make sure the comment is allowed in the syntax. E.g. for Received, it is often allowed before the next field, so make sure such a next field is present. +func messageHeaderCommentDomain(domain dns.Domain, smtputf8 bool) string { + s := domain.XName(smtputf8) + if smtputf8 && domain.Unicode != "" { + s += " (" + domain.ASCII + ")" + } + return s +} + +// submit is used for incoming mail from authenticated users. +func (c *conn) submit(ctx context.Context, recvHdrFor func(string) string, msgWriter *message.Writer, pdataFile **os.File) { + dataFile := *pdataFile + + var msgPrefix []byte + + // Check that user is only sending email as one of its configured identities. Not + // for other users. + msgFrom, header, err := message.From(dataFile) + if err != nil { + metricSubmission.WithLabelValues("badmessage").Inc() + c.log.Infox("parsing message From address", err, mlog.Field("user", c.username)) + xsmtpUserErrorf(smtp.C550MailboxUnavail, smtp.SeMsg6Other0, "cannot parse header or From address: %v", err) + } + accName, _, _, err := mox.FindAccount(msgFrom.Localpart, msgFrom.Domain, true) + if err != nil || accName != c.account.Name { + // ../rfc/6409:522 + if err == nil { + err = mox.ErrAccountNotFound + } + metricSubmission.WithLabelValues("badfrom").Inc() + c.log.Infox("verifying message From address", err, mlog.Field("user", c.username), mlog.Field("msgfrom", msgFrom)) + xsmtpUserErrorf(smtp.C550MailboxUnavail, smtp.SePol7DeliveryUnauth1, "must match authenticated user") + } + + // Outgoing messages should not have a Return-Path header. The final receiving mail + // server will add it. + // ../rfc/5321:3233 + if header.Values("Return-Path") != nil { + metricSubmission.WithLabelValues("badheader").Inc() + xsmtpUserErrorf(smtp.C550MailboxUnavail, smtp.SeMsg6Other0, "message must not have Return-Path header") + } + + // Add Message-Id header if missing. + // ../rfc/5321:4131 ../rfc/6409:751 + if header.Get("Message-Id") == "" { + msgPrefix = append(msgPrefix, fmt.Sprintf("Message-Id: <%s>\r\n", mox.MessageIDGen(c.smtputf8))...) + } + + // ../rfc/6409:745 + if header.Get("Date") == "" { + msgPrefix = append(msgPrefix, "Date: "+time.Now().Format(message.RFC5322Z)+"\r\n"...) + } + + // todo future: in a pedantic mode, we can parse the headers, and return an error if rcpt is only in To or Cc header, and not in the non-empty Bcc header. indicates a client that doesn't blind those bcc's. + + // Add DKIM signatures. + domain := c.mailFrom.IPDomain.Domain + confDom, ok := mox.Conf.Domain(domain) + if !ok { + c.log.Error("domain disappeared", mlog.Field("domain", domain)) + xsmtpServerErrorf(codes{smtp.C451LocalErr, smtp.SeSys3Other0}, "internal error") + } + + dkimConfig := confDom.DKIM + if len(dkimConfig.Sign) > 0 { + if canonical, err := mox.CanonicalLocalpart(c.mailFrom.Localpart, confDom); err != nil { + c.log.Errorx("determining canonical localpart for dkim signing", err, mlog.Field("localpart", c.mailFrom.Localpart)) + } else if dkimHeaders, err := dkim.Sign(ctx, canonical, domain, dkimConfig, c.smtputf8, dataFile); err != nil { + c.log.Errorx("dkim sign for domain", err, mlog.Field("domain", domain)) + metricServerErrors.WithLabelValues("dkimsign").Inc() + } else { + msgPrefix = append(msgPrefix, []byte(dkimHeaders)...) + } + } + + authResults := AuthResults{ + Hostname: mox.Conf.Static.HostnameDomain.XName(c.smtputf8), + Comment: mox.Conf.Static.HostnameDomain.ASCIIExtra(c.smtputf8), + Methods: []AuthMethod{ + { + Method: "auth", + Result: "pass", + Props: []AuthProp{ + {"smtp", "mailfrom", c.mailFrom.XString(c.smtputf8), true, c.mailFrom.ASCIIExtra(c.smtputf8)}, + }, + }, + }, + } + msgPrefix = append(msgPrefix, []byte(authResults.Header())...) + + for i, rcptAcc := range c.recipients { + xmsgPrefix := append([]byte(recvHdrFor(rcptAcc.rcptTo.String())), msgPrefix...) + // todo: don't convert the headers to a body? it seems the body part is optional. does this have consequences for us in other places? ../rfc/5322:343 + if !msgWriter.HaveHeaders { + xmsgPrefix = append(xmsgPrefix, "\r\n"...) + } + + msgSize := int64(len(xmsgPrefix)) + msgWriter.Size + if err := queue.Add(c.log, c.account.Name, *c.mailFrom, rcptAcc.rcptTo, msgWriter.Has8bit, c.smtputf8, msgSize, xmsgPrefix, dataFile, nil, i == len(c.recipients)-1); err != nil { + // Aborting the transaction is not great. But continuing and generating DSNs will + // probably result in errors as well... + metricSubmission.WithLabelValues("queueerror").Inc() + c.log.Errorx("queuing message", err) + xsmtpServerErrorf(errCodes(smtp.C451LocalErr, smtp.SeSys3Other0, err), "error delivering message: %v", err) + } + metricSubmission.WithLabelValues("ok").Inc() + c.log.Info("message queued for delivery", mlog.Field("mailfrom", *c.mailFrom), mlog.Field("rcptto", rcptAcc.rcptTo), mlog.Field("smtputf8", c.smtputf8), mlog.Field("msgsize", msgSize)) + } + dataFile.Close() + *pdataFile = nil + + c.transactionGood++ + c.transactionBad-- // Compensate for early earlier pessimistic increase. + + c.rset() + c.writecodeline(smtp.C250Completed, smtp.SeMailbox2Other0, "it is done", nil) +} + +// deliver is called for incoming messages from external, typically untrusted +// sources. i.e. not submitted by authenticated users. +func (c *conn) deliver(ctx context.Context, recvHdrFor func(string) string, msgWriter *message.Writer, iprevStatus iprev.Status, pdataFile **os.File) { + dataFile := *pdataFile + + // todo: in decision making process, if we run into (some) temporary errors, attempt to continue. if we decide to accept, all good. if we decide to reject, we'll make it a temporary reject. + + msgFrom, headers, err := message.From(dataFile) + if err != nil { + c.log.Infox("parsing message for From address", err) + } + + // Basic loop detection. ../rfc/5321:4065 ../rfc/5321:1526 + if len(headers.Values("Received")) > 100 { + xsmtpUserErrorf(smtp.C550MailboxUnavail, smtp.SeNet4Loop6, "loop detected, more than 100 Received headers") + } + + // We'll be building up an Authentication-Results header. + authResults := AuthResults{ + Hostname: mox.Conf.Static.HostnameDomain.XName(c.smtputf8), + } + + // Reverse IP lookup results. + // todo future: how useful is this? + // ../rfc/5321:2481 + authResults.Methods = append(authResults.Methods, AuthMethod{ + Method: "iprev", + Result: string(iprevStatus), + Props: []AuthProp{ + {"policy", "iprev", c.remoteIP.String(), false, ""}, + }, + }) + + // SPF and DKIM verification in parallel. + var wg sync.WaitGroup + + // DKIM + wg.Add(1) + var dkimResults []dkim.Result + var dkimErr error + go func() { + defer func() { + recover() // Should not happen, but don't take program down if it does. + }() + defer wg.Done() + // We always evaluate all signatures. We want to build up reputation for each + // domain in the signature. + const ignoreTestMode = false + // todo future: longer timeout? we have to read through the entire email, which can be large, possibly multiple times. + dkimctx, dkimcancel := context.WithTimeout(ctx, time.Minute) + defer dkimcancel() + // todo future: we could let user configure which dkim headers they require + dkimResults, dkimErr = dkim.Verify(dkimctx, c.resolver, c.smtputf8, dkim.DefaultPolicy, dataFile, ignoreTestMode) + dkimcancel() + }() + + // SPF. + // ../rfc/7208:472 + var receivedSPF spf.Received + var spfDomain dns.Domain + var spfExpl string + var spfErr error + spfArgs := spf.Args{ + RemoteIP: c.remoteIP, + MailFromLocalpart: c.mailFrom.Localpart, + MailFromDomain: c.mailFrom.IPDomain.Domain, // Can be empty. + HelloDomain: c.hello, + LocalIP: c.localIP, + LocalHostname: c.hostname, + } + wg.Add(1) + go func() { + defer func() { + recover() // Should not happen, but don't take program down if it does. + }() + defer wg.Done() + spfctx, spfcancel := context.WithTimeout(ctx, time.Minute) + defer spfcancel() + receivedSPF, spfDomain, spfExpl, spfErr = spf.Verify(spfctx, c.resolver, spfArgs) + spfcancel() + if spfErr != nil { + c.log.Infox("spf verify", spfErr) + } + }() + + // Wait for DKIM and SPF validation to finish. + wg.Wait() + + // Give immediate response if all recipients are unknown. + nunknown := 0 + for _, r := range c.recipients { + if !r.local { + nunknown++ + } + } + if nunknown == len(c.recipients) { + // During RCPT TO we found that the address does not exist. + c.log.Info("deliver attempt to unknown user(s)", mlog.Field("recipients", c.recipients)) + + // Crude attempt to slow down someone trying to guess names. Would work better + // with connection rate limiter. + mox.Sleep(ctx, 1*time.Second) + + // todo future: if remote does not look like a properly configured mail system, respond with generic 451 error? to prevent any random internet system from discovering accounts. we could give proper response if spf for ehlo or mailfrom passes. + xsmtpUserErrorf(smtp.C550MailboxUnavail, smtp.SeAddr1UnknownDestMailbox1, "no such user(s)") + } + + // Add DKIM results to Authentication-Results header. + authResAddDKIM := func(result, comment, reason string, props []AuthProp) { + dm := AuthMethod{ + Method: "dkim", + Result: result, + Comment: comment, + Reason: reason, + Props: props, + } + authResults.Methods = append(authResults.Methods, dm) + } + if dkimErr != nil { + c.log.Errorx("dkim verify", dkimErr) + authResAddDKIM("none", "", dkimErr.Error(), nil) + } else if len(dkimResults) == 0 { + c.log.Info("dkim verify: no dkim-signature header", mlog.Field("mailfrom", c.mailFrom)) + authResAddDKIM("none", "", "no dkim signatures", nil) + } + for i, r := range dkimResults { + var domain, selector dns.Domain + var identity *dkim.Identity + var comment string + var props []AuthProp + if r.Sig != nil { + // todo future: also specify whether dns record was dnssec-signed. + if r.Record != nil && r.Record.PublicKey != nil { + if pubkey, ok := r.Record.PublicKey.(*rsa.PublicKey); ok { + comment = fmt.Sprintf("%d bit rsa", pubkey.N.BitLen()) + } + } + + sig := base64.StdEncoding.EncodeToString(r.Sig.Signature) + sig = sig[:12] // Must be at least 8 characters and unique among the signatures. + props = []AuthProp{ + {"header", "d", r.Sig.Domain.XName(c.smtputf8), true, r.Sig.Domain.ASCIIExtra(c.smtputf8)}, + {"header", "s", r.Sig.Selector.XName(c.smtputf8), true, r.Sig.Selector.ASCIIExtra(c.smtputf8)}, + {"header", "a", r.Sig.Algorithm(), false, ""}, + {"header", "b", sig, false, ""}, // ../rfc/6008:147 + } + domain = r.Sig.Domain + selector = r.Sig.Selector + if r.Sig.Identity != nil { + props = append(props, AuthProp{"header", "i", r.Sig.Identity.String(), true, ""}) + identity = r.Sig.Identity + } + } + var errmsg string + if r.Err != nil { + errmsg = r.Err.Error() + } + authResAddDKIM(string(r.Status), comment, errmsg, props) + c.log.Debugx("dkim verification result", r.Err, mlog.Field("index", i), mlog.Field("mailfrom", c.mailFrom), mlog.Field("status", r.Status), mlog.Field("domain", domain), mlog.Field("selector", selector), mlog.Field("identity", identity)) + } + + // Add SPF results to Authentication-Results header. ../rfc/7208:2141 + var spfIdentity *dns.Domain + var mailFromValidation = store.ValidationUnknown + var ehloValidation = store.ValidationUnknown + switch receivedSPF.Identity { + case spf.ReceivedHELO: + if len(spfArgs.HelloDomain.IP) == 0 { + spfIdentity = &spfArgs.HelloDomain.Domain + } + ehloValidation = store.SPFValidation(receivedSPF.Result) + case spf.ReceivedMailFrom: + spfIdentity = &spfArgs.MailFromDomain + mailFromValidation = store.SPFValidation(receivedSPF.Result) + } + var props []AuthProp + if spfIdentity != nil { + props = []AuthProp{{"smtp", string(receivedSPF.Identity), spfIdentity.XName(c.smtputf8), true, spfIdentity.ASCIIExtra(c.smtputf8)}} + } + authResults.Methods = append(authResults.Methods, AuthMethod{ + Method: "spf", + Result: string(receivedSPF.Result), + Props: props, + }) + switch receivedSPF.Result { + case spf.StatusPass: + c.log.Info("spf pass", mlog.Field("ip", spfArgs.RemoteIP), mlog.Field("mailfromDomain", spfArgs.MailFromDomain.ASCII)) // todo: log the domain that was actually verified. + case spf.StatusFail: + if spfExpl != "" { + // Filter out potentially hostile text. ../rfc/7208:2529 + for _, b := range []byte(spfExpl) { + if b < ' ' || b >= 0x7f { + spfExpl = "" + break + } + } + if spfExpl != "" { + if len(spfExpl) > 800 { + spfExpl = spfExpl[:797] + "..." + } + spfExpl = "remote claims: " + spfExpl + } + } + if spfExpl == "" { + spfExpl = fmt.Sprintf("your ip %s is not on the SPF allowlist for domain %s", spfArgs.RemoteIP, spfDomain.ASCII) + } + c.log.Info("spf fail", mlog.Field("explanation", spfExpl)) // todo future: get this to the client. how? in smtp session in case of a reject due to dmarc fail? + case spf.StatusTemperror: + c.log.Infox("spf temperror", spfErr) + case spf.StatusPermerror: + c.log.Infox("spf permerror", spfErr) + case spf.StatusNone, spf.StatusNeutral, spf.StatusSoftfail: + default: + c.log.Error("unknown spf status, treating as None/Neutral", mlog.Field("status", receivedSPF.Result)) + receivedSPF.Result = spf.StatusNone + } + + // DMARC + var dmarcUse bool + var dmarcResult dmarc.Result + const applyRandomPercentage = true + var dmarcMethod AuthMethod + var msgFromValidation = store.ValidationNone + if msgFrom.IsZero() { + dmarcResult.Status = dmarc.StatusNone + dmarcMethod = AuthMethod{ + Method: "dmarc", + Result: string(dmarcResult.Status), + } + } else { + msgFromValidation = alignment(ctx, msgFrom.Domain, dkimResults, receivedSPF.Result, spfIdentity) + + dmarcctx, dmarccancel := context.WithTimeout(ctx, time.Minute) + defer dmarccancel() + dmarcUse, dmarcResult = dmarc.Verify(dmarcctx, c.resolver, msgFrom.Domain, dkimResults, receivedSPF.Result, spfIdentity, applyRandomPercentage) + dmarccancel() + dmarcMethod = AuthMethod{ + Method: "dmarc", + Result: string(dmarcResult.Status), + Props: []AuthProp{ + // ../rfc/7489:1489 + {"header", "from", msgFrom.Domain.ASCII, true, msgFrom.Domain.ASCIIExtra(c.smtputf8)}, + }, + } + + if dmarcResult.Status == dmarc.StatusPass && msgFromValidation == store.ValidationRelaxed { + msgFromValidation = store.ValidationDMARC + } + + // todo future: consider enforcing an spf fail if there is no dmarc policy or the dmarc policy is none. ../rfc/7489:1507 + } + authResults.Methods = append(authResults.Methods, dmarcMethod) + c.log.Info("dmarc verification", mlog.Field("result", dmarcResult.Status), mlog.Field("domain", msgFrom.Domain)) + + // Prepare for analyzing content, calculating reputation. + var ipmasked1, ipmasked2, ipmasked3 string + if c.remoteIP.To4() != nil { + ipmasked1 = c.remoteIP.String() + ipmasked2 = c.remoteIP.Mask(net.CIDRMask(26, 32)).String() + ipmasked3 = c.remoteIP.Mask(net.CIDRMask(21, 32)).String() + } else { + ipmasked1 = c.remoteIP.Mask(net.CIDRMask(64, 128)).String() + ipmasked2 = c.remoteIP.Mask(net.CIDRMask(48, 128)).String() + ipmasked3 = c.remoteIP.Mask(net.CIDRMask(32, 128)).String() + } + var verifiedDKIMDomains []string + for _, r := range dkimResults { + // A message can have multiple signatures for the same identity. For example when + // signing the message multiple times with different algorithms (rsa and ed25519). + seen := map[string]bool{} + if r.Status != dkim.StatusPass { + continue + } + d := r.Sig.Domain.Name() + if !seen[d] { + seen[d] = true + verifiedDKIMDomains = append(verifiedDKIMDomains, d) + } + } + + // When we deliver, we try to remove from rejects mailbox based on message-id. + // We'll parse it when we need it, but it is the same for each recipient. + var messageID string + var parsedMessageID bool + + // We build up a DSN for each failed recipient. If we have recipients in dsnMsg + // after processing, we queue the DSN. Unless all recipients failed, in which case + // we may just fail the mail transaction instead (could be common for failure to + // deliver to a single recipient, e.g. for junk mail). + // ../rfc/3464:436 + type deliverError struct { + rcptTo smtp.Path + code int + secode string + userError bool + errmsg string + } + var deliverErrors []deliverError + addError := func(rcptAcc rcptAccount, code int, secode string, userError bool, errmsg string) { + e := deliverError{rcptAcc.rcptTo, code, secode, userError, errmsg} + c.log.Info("deliver error", mlog.Field("rcptto", e.rcptTo), mlog.Field("code", code), mlog.Field("secode", "secode"), mlog.Field("usererror", userError), mlog.Field("errmsg", errmsg)) + deliverErrors = append(deliverErrors, e) + } + + // For each recipient, do final spam analysis and delivery. + for _, rcptAcc := range c.recipients { + + log := c.log.Fields(mlog.Field("mailfrom", c.mailFrom), mlog.Field("rcptto", rcptAcc.rcptTo)) + + // If this is not a valid local user, we send back a DSN. This can only happen when + // there are also valid recipients, and only when remote is SPF-verified, so the DSN + // should not cause backscatter. + // In case of serious errors, we abort the transaction. We may have already + // delivered some messages. Perhaps it would be better to continue with other + // deliveries, and return an error at the end? Though the failure conditions will + // probably prevent any other successful deliveries too... + // We'll continue delivering to other recipients. ../rfc/5321:3275 + if !rcptAcc.local { + metricDelivery.WithLabelValues("unknownuser", "").Inc() + addError(rcptAcc, smtp.C550MailboxUnavail, smtp.SeAddr1UnknownDestMailbox1, true, "no such user") + continue + } + + acc, err := store.OpenAccount(rcptAcc.accountName) + if err != nil { + log.Errorx("open account", err, mlog.Field("account", rcptAcc.accountName)) + metricDelivery.WithLabelValues("accounterror", "").Inc() + addError(rcptAcc, smtp.C451LocalErr, smtp.SeSys3Other0, false, "error processing") + continue + } + defer func() { + if acc != nil { + if err := acc.Close(); err != nil { + log.Errorx("closing account after delivery", err) + } + } + }() + + // ../rfc/5321:3204 + // ../rfc/5321:3300 + // Received-SPF header goes before Received. ../rfc/7208:2038 + msgPrefix := []byte("Return-Path: <" + c.mailFrom.String() + ">\r\n" + authResults.Header() + receivedSPF.Header() + recvHdrFor(rcptAcc.rcptTo.String())) + if !msgWriter.HaveHeaders { + msgPrefix = append(msgPrefix, "\r\n"...) + } + + m := &store.Message{ + Received: time.Now(), + RemoteIP: c.remoteIP.String(), + RemoteIPMasked1: ipmasked1, + RemoteIPMasked2: ipmasked2, + RemoteIPMasked3: ipmasked3, + EHLODomain: c.hello.Domain.Name(), + MailFrom: c.mailFrom.String(), + MailFromLocalpart: c.mailFrom.Localpart, + MailFromDomain: c.mailFrom.IPDomain.Domain.Name(), + RcptToLocalpart: rcptAcc.rcptTo.Localpart, + RcptToDomain: rcptAcc.rcptTo.IPDomain.Domain.Name(), + MsgFromLocalpart: msgFrom.Localpart, + MsgFromDomain: msgFrom.Domain.Name(), + MsgFromOrgDomain: publicsuffix.Lookup(ctx, msgFrom.Domain).Name(), + EHLOValidated: ehloValidation == store.ValidationPass, + MailFromValidated: mailFromValidation == store.ValidationPass, + MsgFromValidated: msgFromValidation == store.ValidationStrict || msgFromValidation == store.ValidationDMARC || msgFromValidation == store.ValidationRelaxed, + EHLOValidation: ehloValidation, + MailFromValidation: mailFromValidation, + MsgFromValidation: msgFromValidation, + DKIMDomains: verifiedDKIMDomains, + Size: int64(len(msgPrefix)) + msgWriter.Size, + MsgPrefix: msgPrefix, + } + d := delivery{m, dataFile, rcptAcc, acc, msgFrom, c.dnsBLs, dmarcUse, dmarcResult, dkimResults, iprevStatus} + a := analyze(ctx, log, c.resolver, d) + if a.reason != "" { + xmoxreason := "X-Mox-Reason: " + a.reason + "\r\n" + m.MsgPrefix = append([]byte(xmoxreason), m.MsgPrefix...) + m.Size += int64(len(xmoxreason)) + } + if !a.accept { + conf, _ := acc.Conf() + if conf.RejectsMailbox != "" { + present, messageid, messagehash, err := rejectPresent(log, acc, conf.RejectsMailbox, m, dataFile) + if err != nil { + log.Errorx("checking whether reject is already present", err) + } else if !present { + m.Flags.Seen = true // We don't want to draw attention. + m.MessageID = messageid + m.MessageHash = messagehash + acc.WithWLock(func() { + if hasSpace, err := acc.TidyRejectsMailbox(conf.RejectsMailbox); err != nil { + log.Errorx("tidying rejects mailbox", err) + } else if hasSpace { + if err := acc.DeliverMailbox(log, conf.RejectsMailbox, m, dataFile, false); err != nil { + log.Errorx("delivering spammy mail to rejects mailbox", err) + } else { + log.Info("delivered spammy mail to rejects mailbox") + } + } else { + log.Info("not storing spammy mail to full rejects mailbox") + } + }) + } else { + log.Info("reject message is already present, ignoring") + } + } + + log.Info("incoming message rejected", mlog.Field("reason", a.reason)) + metricDelivery.WithLabelValues("reject", a.reason).Inc() + addError(rcptAcc, a.code, a.secode, a.userError, a.errmsg) + continue + } + + if a.dmarcReport != nil { + // todo future: add rate limiting to prevent DoS attacks. ../rfc/7489:2570 + if err := dmarcdb.AddReport(ctx, a.dmarcReport, msgFrom.Domain); err != nil { + log.Errorx("saving dmarc report in database", err) + } else { + log.Info("dmarc report processed") + m.Flags.Seen = true + } + } + if a.tlsReport != nil { + // todo future: add rate limiting to prevent DoS attacks. + if err := tlsrptdb.AddReport(ctx, msgFrom.Domain, c.mailFrom.String(), a.tlsReport); err != nil { + log.Errorx("saving TLSRPT report in database", err) + } else { + log.Info("tlsrpt report processed") + m.Flags.Seen = true + } + } + + acc.WithWLock(func() { + // Gather the message-id before we deliver and the file may be consumed. + if !parsedMessageID { + if p, err := message.Parse(store.FileMsgReader(m.MsgPrefix, dataFile)); err != nil { + log.Infox("parsing message for message-id", err) + } else if header, err := p.Header(); err != nil { + log.Infox("parsing message header for message-id", err) + } else { + messageID = header.Get("Message-Id") + } + } + + if err := acc.Deliver(log, rcptAcc.destination, m, dataFile, false); err != nil { + log.Errorx("delivering", err) + metricDelivery.WithLabelValues("delivererror", a.reason).Inc() + addError(rcptAcc, smtp.C451LocalErr, smtp.SeSys3Other0, false, "error processing") + return + } + metricDelivery.WithLabelValues("delivered", a.reason).Inc() + log.Info("incoming message delivered", mlog.Field("reason", a.reason)) + + conf, _ := acc.Conf() + if conf.RejectsMailbox != "" && messageID != "" { + acc.RejectsRemove(log, conf.RejectsMailbox, messageID) + } + }) + + if err := acc.Close(); err != nil { + log.Infox("closing account after delivering", err) + } + acc = nil + } + + // If all recipients failed to deliver, return an error. + if len(c.recipients) == len(deliverErrors) { + same := true + e0 := deliverErrors[0] + var serverError bool + var msgs []string + major := 4 + for _, e := range deliverErrors { + serverError = serverError || !e.userError + if e.code != e0.code || e.secode != e0.secode { + same = false + } + msgs = append(msgs, e.errmsg) + if e.code >= 500 { + major = 5 + } + } + if same { + xsmtpErrorf(e0.code, e0.secode, !serverError, "%s", strings.Join(msgs, "\n")) + } + + // Not all failures had the same error. We'll return each error on a separate line. + lines := []string{} + for _, e := range deliverErrors { + s := fmt.Sprintf("%d %d.%s %s", e.code, e.code/100, e.secode, e.errmsg) + lines = append(lines, s) + } + code := smtp.C451LocalErr + secode := smtp.SeSys3Other0 + if major == 5 { + code = smtp.C554TransactionFailed + } + lines = append(lines, "multiple errors") + xsmtpErrorf(code, secode, !serverError, strings.Join(lines, "\n")) + } + // Generate one DSN for all failed recipients. + if len(deliverErrors) > 0 { + now := time.Now() + dsnMsg := dsn.Message{ + SMTPUTF8: c.smtputf8, + From: smtp.Path{Localpart: "postmaster", IPDomain: deliverErrors[0].rcptTo.IPDomain}, + To: *c.mailFrom, + Subject: "mail delivery failure", + + // Per-message details. + ReportingMTA: mox.Conf.Static.HostnameDomain.ASCII, + ReceivedFromMTA: smtp.Ehlo{Name: c.hello, ConnIP: c.remoteIP}, + ArrivalDate: now, + } + + if len(deliverErrors) > 1 { + dsnMsg.TextBody = "Multiple delivery failures occurred.\n\n" + } + + for _, e := range deliverErrors { + kind := "Permanent" + if e.code/100 == 4 { + kind = "Transient" + } + dsnMsg.TextBody += fmt.Sprintf("%s delivery failure to:\n\n\t%s\n\nError:\n\n\t%s\n\n", kind, e.errmsg, e.rcptTo.XString(false)) + rcpt := dsn.Recipient{ + FinalRecipient: e.rcptTo, + Action: dsn.Failed, + Status: fmt.Sprintf("%d.%s", e.code/100, e.secode), + LastAttemptDate: now, + } + dsnMsg.Recipients = append(dsnMsg.Recipients, rcpt) + } + + header, err := message.ReadHeaders(bufio.NewReader(&moxio.AtReader{R: dataFile})) + if err != nil { + c.log.Errorx("reading headers of incoming message for dsn, continuing dsn without headers", err) + } + dsnMsg.Original = header + + if err := queueDSN(c, *c.mailFrom, dsnMsg); err != nil { + metricServerErrors.WithLabelValues("queuedsn").Inc() + c.log.Errorx("queuing DSN for incoming delivery, no DSN sent", err) + } + } + + os.Remove(dataFile.Name()) + dataFile.Close() + *pdataFile = nil + + c.transactionGood++ + c.transactionBad-- // Compensate for early earlier pessimistic increase. + c.rset() + c.writecodeline(smtp.C250Completed, smtp.SeMailbox2Other0, "it is done", nil) +} + +// ecode returns either ecode, or a more specific error based on err. +// For example, ecode can be turned from an "other system" error into a "mail +// system full" if the error indicates no disk space is available. +func errCodes(code int, ecode string, err error) codes { + switch { + case moxio.IsStorageSpace(err): + switch ecode { + case smtp.SeMailbox2Other0: + if code == smtp.C451LocalErr { + code = smtp.C452StorageFull + } + ecode = smtp.SeMailbox2Full2 + case smtp.SeSys3Other0: + if code == smtp.C451LocalErr { + code = smtp.C452StorageFull + } + ecode = smtp.SeSys3StorageFull1 + } + } + return codes{code, ecode} +} + +// ../rfc/5321:2079 +func (c *conn) cmdRset(p *parser) { + // ../rfc/5321:2106 + p.xend() + + c.rset() + c.bwritecodeline(smtp.C250Completed, smtp.SeOther00, "all clear", nil) +} + +// ../rfc/5321:2108 ../rfc/5321:1222 +func (c *conn) cmdVrfy(p *parser) { + // No EHLO/HELO needed. + // ../rfc/5321:2448 + + // ../rfc/5321:2119 ../rfc/6531:641 + p.xspace() + p.xstring() + if p.space() { + p.xtake("SMTPUTF8") + } + p.xend() + + // todo future: we could support vrfy and expn for submission? though would need to see if its rfc defines it. + + // ../rfc/5321:4239 + xsmtpUserErrorf(smtp.C252WithoutVrfy, smtp.SePol7Other0, "no verify but will try delivery") +} + +// ../rfc/5321:2135 ../rfc/5321:1272 +func (c *conn) cmdExpn(p *parser) { + // No EHLO/HELO needed. + // ../rfc/5321:2448 + + // ../rfc/5321:2149 ../rfc/6531:645 + p.xspace() + p.xstring() + if p.space() { + p.xtake("SMTPUTF8") + } + p.xend() + + // ../rfc/5321:4239 + xsmtpUserErrorf(smtp.C252WithoutVrfy, smtp.SePol7Other0, "no expand but will try delivery") +} + +// ../rfc/5321:2151 +func (c *conn) cmdHelp(p *parser) { + // Let's not strictly parse the request for help. We are ignoring the text anyway. + // ../rfc/5321:2166 + + c.bwritecodeline(smtp.C214Help, smtp.SeOther00, "see rfc 5321 (smtp)", nil) +} + +// ../rfc/5321:2191 +func (c *conn) cmdNoop(p *parser) { + // No idea why, but if an argument follows, it must adhere to the string ABNF production... + // ../rfc/5321:2203 + if p.space() { + p.xstring() + } + p.xend() + + c.bwritecodeline(smtp.C250Completed, smtp.SeOther00, "alrighty", nil) +} + +// ../rfc/5321:2205 +func (c *conn) cmdQuit(p *parser) { + // ../rfc/5321:2226 + p.xend() + + c.writecodeline(smtp.C221Closing, smtp.SeOther00, "okay thanks bye", nil) + panic(cleanClose) +} + +// return tokens representing comment in Received header that documents the TLS connection. +func (c *conn) tlsReceivedComment() []string { + if !c.tls { + return nil + } + + // todo future: we could use the "tls" clause for the Received header as specified in ../rfc/8314:496. however, the text implies it is only for submission, not regular smtp. and it cannot specify the tls version. for now, not worth the trouble. + + // Comments from other mail servers: + // gmail.com: (version=TLS1_3 cipher=TLS_AES_128_GCM_SHA256 bits=128/128) + // yahoo.com: (version=TLS1_3 cipher=TLS_AES_128_GCM_SHA256) + // proton.me: (using TLSv1.3 with cipher TLS_AES_256_GCM_SHA384 (256/256 bits) key-exchange X25519 server-signature RSA-PSS (4096 bits) server-digest SHA256) (No client certificate requested) + // outlook.com: (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) + + var l []string + add := func(s string) { + l = append(l, s) + } + + versions := map[uint16]string{ + tls.VersionTLS10: "TLS1.0", + tls.VersionTLS11: "TLS1.1", + tls.VersionTLS12: "TLS1.2", + tls.VersionTLS13: "TLS1.3", + } + + tlsc := c.conn.(*tls.Conn) + st := tlsc.ConnectionState() + if version, ok := versions[st.Version]; ok { + add(version) + } else { + c.log.Info("unknown tls version identifier", mlog.Field("version", st.Version)) + add(fmt.Sprintf("TLS identifier %x", st.Version)) + } + + add(tls.CipherSuiteName(st.CipherSuite)) + + // Make it a comment. + l[0] = "(" + l[0] + l[len(l)-1] = l[len(l)-1] + ")" + + return l +} diff --git a/smtpserver/server_test.go b/smtpserver/server_test.go new file mode 100644 index 0000000..e5f4091 --- /dev/null +++ b/smtpserver/server_test.go @@ -0,0 +1,749 @@ +package smtpserver + +// todo: test delivery with failing spf/dkim/dmarc +// todo: test delivering a message to multiple recipients, and with some of them failing. + +import ( + "bytes" + "context" + "crypto/ed25519" + cryptorand "crypto/rand" + "crypto/tls" + "crypto/x509" + "encoding/base64" + "errors" + "fmt" + "math/big" + "mime/quotedprintable" + "net" + "os" + "path/filepath" + "strings" + "testing" + "time" + + "github.com/mjl-/bstore" + + "github.com/mjl-/mox/config" + "github.com/mjl-/mox/dkim" + "github.com/mjl-/mox/dmarcdb" + "github.com/mjl-/mox/dns" + "github.com/mjl-/mox/mox-" + "github.com/mjl-/mox/queue" + "github.com/mjl-/mox/smtp" + "github.com/mjl-/mox/smtpclient" + "github.com/mjl-/mox/store" + "github.com/mjl-/mox/subjectpass" + "github.com/mjl-/mox/tlsrptdb" +) + +func tcheck(t *testing.T, err error, msg string) { + if err != nil { + t.Helper() + t.Fatalf("%s: %s", msg, err) + } +} + +var submitMessage = strings.ReplaceAll(`From: +To: +Subject: test +Message-Id: + +test email +`, "\n", "\r\n") + +var deliverMessage = strings.ReplaceAll(`From: +To: +Subject: test +Message-Id: + +test email +`, "\n", "\r\n") + +type testserver struct { + t *testing.T + acc *store.Account + switchDone chan struct{} + comm *store.Comm + cid int64 + resolver dns.Resolver + user, pass string + submission bool + dnsbls []dns.Domain +} + +func newTestServer(t *testing.T, configPath string, resolver dns.Resolver) *testserver { + ts := testserver{t: t, cid: 1, resolver: resolver} + + mox.Context = context.Background() + mox.ConfigStaticPath = configPath + mox.MustLoadConfig() + dataDir := mox.ConfigDirPath(mox.Conf.Static.DataDir) + os.RemoveAll(dataDir) + var err error + ts.acc, err = store.OpenAccount("mjl") + tcheck(t, err, "open account") + err = ts.acc.SetPassword("testtest") + tcheck(t, err, "set password") + ts.switchDone = store.Switchboard() + err = queue.Init() + tcheck(t, err, "queue init") + + ts.comm = store.RegisterComm(ts.acc) + + return &ts +} + +func (ts *testserver) close() { + ts.comm.Unregister() + queue.Shutdown() + close(ts.switchDone) + ts.acc.Close() +} + +func (ts *testserver) run(fn func(helloErr error, client *smtpclient.Client)) { + ts.t.Helper() + + ts.cid += 2 + + serverConn, clientConn := net.Pipe() + defer serverConn.Close() + // clientConn is closed as part of closing client. + serverdone := make(chan struct{}) + defer func() { <-serverdone }() + + go func() { + tlsConfig := &tls.Config{ + Certificates: []tls.Certificate{fakeCert(ts.t)}, + } + serve("test", ts.cid-2, dns.Domain{ASCII: "mox.example"}, tlsConfig, serverConn, ts.resolver, ts.submission, false, 100<<20, false, false, ts.dnsbls) + close(serverdone) + }() + + var authLine string + if ts.user != "" { + authLine = fmt.Sprintf("AUTH PLAIN %s", base64.StdEncoding.EncodeToString([]byte(fmt.Sprintf("\u0000%s\u0000%s", ts.user, ts.pass)))) + } + + client, err := smtpclient.New(context.Background(), xlog.WithCid(ts.cid-1), clientConn, smtpclient.TLSOpportunistic, "mox.example", authLine) + if err != nil { + clientConn.Close() + } else { + defer client.Close() + } + fn(err, client) +} + +// Just a cert that appears valid. SMTP client will not verify anything about it +// (that is opportunistic TLS for you, "better some than none"). Let's enjoy this +// one moment where it makes life easier. +func fakeCert(t *testing.T) tls.Certificate { + privKey := ed25519.NewKeyFromSeed(make([]byte, ed25519.SeedSize)) // Fake key, don't use this for real! + template := &x509.Certificate{ + SerialNumber: big.NewInt(1), // Required field... + } + localCertBuf, err := x509.CreateCertificate(cryptorand.Reader, template, template, privKey.Public(), privKey) + if err != nil { + t.Fatalf("making certificate: %s", err) + } + cert, err := x509.ParseCertificate(localCertBuf) + if err != nil { + t.Fatalf("parsing generated certificate: %s", err) + } + c := tls.Certificate{ + Certificate: [][]byte{localCertBuf}, + PrivateKey: privKey, + Leaf: cert, + } + return c +} + +// Test submission from authenticated user. +func TestSubmission(t *testing.T) { + ts := newTestServer(t, "../testdata/smtp/mox.conf", dns.MockResolver{}) + defer ts.close() + + // Set DKIM signing config. + dom, _ := mox.Conf.Domain(dns.Domain{ASCII: "mox.example"}) + sel := config.Selector{ + HashEffective: "sha256", + HeadersEffective: []string{"From", "To", "Subject"}, + Key: ed25519.NewKeyFromSeed(make([]byte, ed25519.SeedSize)), // Fake key, don't use for real. + Domain: dns.Domain{ASCII: "mox.example"}, + } + dom.DKIM = config.DKIM{ + Selectors: map[string]config.Selector{"testsel": sel}, + Sign: []string{"testsel"}, + } + mox.Conf.Dynamic.Domains["mox.example"] = dom + + testAuth := func(user, pass string, expErr *smtpclient.Error) { + t.Helper() + ts.user = user + ts.pass = pass + ts.run(func(err error, client *smtpclient.Client) { + t.Helper() + mailFrom := "mjl@mox.example" + rcptTo := "remote@example.org" + if err == nil { + err = client.Deliver(context.Background(), mailFrom, rcptTo, int64(len(submitMessage)), strings.NewReader(submitMessage), false, false) + } + var cerr smtpclient.Error + if expErr == nil && err != nil || expErr != nil && (err == nil || !errors.As(err, &cerr) || cerr.Secode != expErr.Secode) { + t.Fatalf("got err %#v, expected %#v", err, expErr) + } + }) + } + + ts.submission = true + testAuth("", "", &smtpclient.Error{Permanent: true, Code: smtp.C530SecurityRequired, Secode: smtp.SePol7Other0}) + testAuth("mjl@mox.example", "test", &smtpclient.Error{Secode: smtp.SePol7AuthBadCreds8}) // Bad (short) password. + testAuth("mjl@mox.example", "testtesttest", &smtpclient.Error{Secode: smtp.SePol7AuthBadCreds8}) // Bad password. + testAuth("mjl@mox.example", "testtest", nil) +} + +// Test delivery from external MTA. +func TestDelivery(t *testing.T) { + resolver := dns.MockResolver{ + A: map[string][]string{ + "example.org.": {"127.0.0.10"}, // For mx check. + }, + PTR: map[string][]string{}, + } + ts := newTestServer(t, "../testdata/smtp/mox.conf", resolver) + defer ts.close() + + ts.run(func(err error, client *smtpclient.Client) { + mailFrom := "remote@example.org" + rcptTo := "mjl@127.0.0.10" + if err == nil { + err = client.Deliver(context.Background(), mailFrom, rcptTo, int64(len(deliverMessage)), strings.NewReader(deliverMessage), false, false) + } + var cerr smtpclient.Error + if err == nil || !errors.As(err, &cerr) || cerr.Code != smtp.C550MailboxUnavail { + t.Fatalf("deliver to ip address, got err %v, expected smtpclient.Error with code %d", err, smtp.C550MailboxUnavail) + } + }) + + ts.run(func(err error, client *smtpclient.Client) { + mailFrom := "remote@example.org" + rcptTo := "mjl@test.example" // Not configured as destination. + if err == nil { + err = client.Deliver(context.Background(), mailFrom, rcptTo, int64(len(deliverMessage)), strings.NewReader(deliverMessage), false, false) + } + var cerr smtpclient.Error + if err == nil || !errors.As(err, &cerr) || cerr.Code != smtp.C550MailboxUnavail { + t.Fatalf("deliver to unknown domain, got err %v, expected smtpclient.Error with code %d", err, smtp.C550MailboxUnavail) + } + }) + + ts.run(func(err error, client *smtpclient.Client) { + mailFrom := "remote@example.org" + rcptTo := "unknown@mox.example" // User unknown. + if err == nil { + err = client.Deliver(context.Background(), mailFrom, rcptTo, int64(len(deliverMessage)), strings.NewReader(deliverMessage), false, false) + } + var cerr smtpclient.Error + if err == nil || !errors.As(err, &cerr) || cerr.Code != smtp.C550MailboxUnavail { + t.Fatalf("deliver to unknown user for known domain, got err %v, expected smtpclient.Error with code %d", err, smtp.C550MailboxUnavail) + } + }) + + ts.run(func(err error, client *smtpclient.Client) { + mailFrom := "remote@example.org" + rcptTo := "mjl@mox.example" + if err == nil { + err = client.Deliver(context.Background(), mailFrom, rcptTo, int64(len(deliverMessage)), strings.NewReader(deliverMessage), false, false) + } + var cerr smtpclient.Error + if err == nil || !errors.As(err, &cerr) || cerr.Code != smtp.C451LocalErr { + t.Fatalf("deliver from user without reputation, valid iprev required, got err %v, expected smtpclient.Error with code %d", err, smtp.C451LocalErr) + } + }) + + // Set up iprev to get delivery from unknown user to be accepted. + resolver.PTR["127.0.0.10"] = []string{"example.org."} + ts.run(func(err error, client *smtpclient.Client) { + mailFrom := "remote@example.org" + rcptTo := "mjl@mox.example" + if err == nil { + err = client.Deliver(context.Background(), mailFrom, rcptTo, int64(len(deliverMessage)), strings.NewReader(deliverMessage), false, false) + } + tcheck(t, err, "deliver to remote") + + changes := make(chan []store.Change) + go func() { + changes <- ts.comm.Get() + }() + + timer := time.NewTimer(time.Second) + defer timer.Stop() + select { + case <-changes: + case <-timer.C: + t.Fatalf("no delivery in 1s") + } + }) +} + +func tinsertmsg(t *testing.T, acc *store.Account, mailbox string, m *store.Message, msg string) { + mf, err := store.CreateMessageTemp("queue-dsn") + tcheck(t, err, "temp message") + _, err = mf.Write([]byte(msg)) + tcheck(t, err, "write message") + err = acc.DeliverMailbox(xlog, mailbox, m, mf, true) + tcheck(t, err, "deliver message") + err = mf.Close() + tcheck(t, err, "close message") +} + +func tretrain(t *testing.T, acc *store.Account) { + t.Helper() + + // Fresh empty junkfilter. + basePath := mox.DataDirPath("accounts") + dbPath := filepath.Join(basePath, acc.Name, "junkfilter.db") + bloomPath := filepath.Join(basePath, acc.Name, "junkfilter.bloom") + os.Remove(dbPath) + os.Remove(bloomPath) + jf, _, err := acc.OpenJunkFilter(xlog) + tcheck(t, err, "open junk filter") + defer jf.Close() + + // Fetch messags to retrain on. + q := bstore.QueryDB[store.Message](acc.DB) + q.FilterEqual("Seen", true) + q.FilterFn(func(m store.Message) bool { + return m.Flags.Junk || m.Flags.Notjunk + }) + msgs, err := q.List() + tcheck(t, err, "fetch messages") + + // Retrain the messages. + for _, m := range msgs { + ham := m.Flags.Notjunk + + f, err := os.Open(acc.MessagePath(m.ID)) + tcheck(t, err, "open message") + r := store.FileMsgReader(m.MsgPrefix, f) + + jf.TrainMessage(r, m.Size, ham) + + err = r.Close() + tcheck(t, err, "close message") + } + + err = jf.Save() + tcheck(t, err, "save junkfilter") +} + +// Test accept/reject with DMARC reputation and with spammy content. +func TestSpam(t *testing.T) { + resolver := &dns.MockResolver{ + A: map[string][]string{ + "example.org.": {"127.0.0.1"}, // For mx check. + }, + TXT: map[string][]string{ + "example.org.": {"v=spf1 ip4:127.0.0.10 -all"}, + "_dmarc.example.org.": {"v=DMARC1;p=reject"}, + }, + } + ts := newTestServer(t, "../testdata/smtp/junk/mox.conf", resolver) + defer ts.close() + + // Insert spammy messages. No junkfilter training yet. + m := store.Message{ + RemoteIP: "127.0.0.10", + RemoteIPMasked1: "127.0.0.10", + RemoteIPMasked2: "127.0.0.0", + RemoteIPMasked3: "127.0.0.0", + MailFrom: "remote@example.org", + MailFromLocalpart: smtp.Localpart("remote"), + MailFromDomain: "example.org", + RcptToLocalpart: smtp.Localpart("mjl"), + RcptToDomain: "mox.example", + MsgFromLocalpart: smtp.Localpart("remote"), + MsgFromDomain: "example.org", + MsgFromOrgDomain: "example.org", + MsgFromValidated: true, + MsgFromValidation: store.ValidationStrict, + Flags: store.Flags{Seen: true, Junk: true}, + } + for i := 0; i < 3; i++ { + nm := m + tinsertmsg(t, ts.acc, "Inbox", &nm, deliverMessage) + } + + checkRejectsCount := func(expect int) { + t.Helper() + q := bstore.QueryDB[store.Mailbox](ts.acc.DB) + q.FilterNonzero(store.Mailbox{Name: "Rejects"}) + mb, err := q.Get() + tcheck(t, err, "get rejects mailbox") + qm := bstore.QueryDB[store.Message](ts.acc.DB) + qm.FilterNonzero(store.Message{MailboxID: mb.ID}) + n, err := qm.Count() + tcheck(t, err, "count messages in rejects mailbox") + if n != expect { + t.Fatalf("messages in rejects mailbox, found %d, expected %d", n, expect) + } + } + + // Delivery from sender with bad reputation should fail. + ts.run(func(err error, client *smtpclient.Client) { + mailFrom := "remote@example.org" + rcptTo := "mjl@mox.example" + if err == nil { + err = client.Deliver(context.Background(), mailFrom, rcptTo, int64(len(deliverMessage)), strings.NewReader(deliverMessage), false, false) + } + var cerr smtpclient.Error + if err == nil || !errors.As(err, &cerr) || cerr.Code != smtp.C451LocalErr { + t.Fatalf("delivery by bad sender, got err %v, expected smtpclient.Error with code %d", err, smtp.C451LocalErr) + } + + // Message should now be in Rejects mailbox. + checkRejectsCount(1) + }) + + // Mark the messages as having good reputation. + q := bstore.QueryDB[store.Message](ts.acc.DB) + _, err := q.UpdateFields(map[string]any{"Junk": false, "Notjunk": true}) + tcheck(t, err, "update junkiness") + + // Message should now be accepted. + ts.run(func(err error, client *smtpclient.Client) { + mailFrom := "remote@example.org" + rcptTo := "mjl@mox.example" + if err == nil { + err = client.Deliver(context.Background(), mailFrom, rcptTo, int64(len(deliverMessage)), strings.NewReader(deliverMessage), false, false) + } + tcheck(t, err, "deliver") + + // Message should now be removed from Rejects mailbox. + checkRejectsCount(0) + }) + + // Undo dmarc pass, mark messages as junk, and train the filter. + resolver.TXT = nil + q = bstore.QueryDB[store.Message](ts.acc.DB) + _, err = q.UpdateFields(map[string]any{"Junk": true, "Notjunk": false}) + tcheck(t, err, "update junkiness") + tretrain(t, ts.acc) + + // Message should be refused for spammy content. + ts.run(func(err error, client *smtpclient.Client) { + mailFrom := "remote@example.org" + rcptTo := "mjl@mox.example" + if err == nil { + err = client.Deliver(context.Background(), mailFrom, rcptTo, int64(len(deliverMessage)), strings.NewReader(deliverMessage), false, false) + } + var cerr smtpclient.Error + if err == nil || !errors.As(err, &cerr) || cerr.Code != smtp.C451LocalErr { + t.Fatalf("attempt to deliver spamy message, got err %v, expected smtpclient.Error with code %d", err, smtp.C451LocalErr) + } + }) +} + +// Messages that we sent to, that have passing DMARC, but that are otherwise spammy, should be accepted. +func TestDMARCSent(t *testing.T) { + resolver := &dns.MockResolver{ + A: map[string][]string{ + "example.org.": {"127.0.0.1"}, // For mx check. + }, + TXT: map[string][]string{ + "example.org.": {"v=spf1 ip4:127.0.0.10 -all"}, + "_dmarc.example.org.": {"v=DMARC1;p=reject"}, + }, + } + ts := newTestServer(t, "../testdata/smtp/junk/mox.conf", resolver) + defer ts.close() + + // Insert spammy messages not related to the test message. + m := store.Message{ + MailFrom: "remote@test.example", + RcptToLocalpart: smtp.Localpart("mjl"), + RcptToDomain: "mox.example", + Flags: store.Flags{Seen: true, Junk: true}, + } + for i := 0; i < 3; i++ { + nm := m + tinsertmsg(t, ts.acc, "Archive", &nm, deliverMessage) + } + tretrain(t, ts.acc) + + // Baseline, message should be refused for spammy content. + ts.run(func(err error, client *smtpclient.Client) { + mailFrom := "remote@example.org" + rcptTo := "mjl@mox.example" + if err == nil { + err = client.Deliver(context.Background(), mailFrom, rcptTo, int64(len(deliverMessage)), strings.NewReader(deliverMessage), false, false) + } + var cerr smtpclient.Error + if err == nil || !errors.As(err, &cerr) || cerr.Code != smtp.C451LocalErr { + t.Fatalf("attempt to deliver spamy message, got err %v, expected smtpclient.Error with code %d", err, smtp.C451LocalErr) + } + }) + + // Insert a message that we sent to the address that is about to send to us. + var sentMsg store.Message + tinsertmsg(t, ts.acc, "Sent", &sentMsg, deliverMessage) + err := ts.acc.DB.Insert(&store.Recipient{MessageID: sentMsg.ID, Localpart: "remote", Domain: "example.org", OrgDomain: "example.org", Sent: time.Now()}) + tcheck(t, err, "inserting message recipient") + + // We should now be accepting the message because we recently sent a message. + ts.run(func(err error, client *smtpclient.Client) { + mailFrom := "remote@example.org" + rcptTo := "mjl@mox.example" + if err == nil { + err = client.Deliver(context.Background(), mailFrom, rcptTo, int64(len(deliverMessage)), strings.NewReader(deliverMessage), false, false) + } + tcheck(t, err, "deliver") + }) +} + +// Test DNSBL, then getting through with subjectpass. +func TestBlocklistedSubjectpass(t *testing.T) { + // Set up a DNSBL on dnsbl.example, and get DMARC pass. + resolver := &dns.MockResolver{ + A: map[string][]string{ + "example.org.": {"127.0.0.10"}, // For mx check. + "2.0.0.127.dnsbl.example.": {"127.0.0.2"}, // For healthcheck. + "10.0.0.127.dnsbl.example.": {"127.0.0.10"}, // Where our connection pretends to come from. + }, + TXT: map[string][]string{ + "10.0.0.127.dnsbl.example.": {"blocklisted"}, + "example.org.": {"v=spf1 ip4:127.0.0.10 -all"}, + "_dmarc.example.org.": {"v=DMARC1;p=reject"}, + }, + PTR: map[string][]string{ + "127.0.0.10": {"example.org."}, // For iprev check. + }, + } + ts := newTestServer(t, "../testdata/smtp/mox.conf", resolver) + ts.dnsbls = []dns.Domain{{ASCII: "dnsbl.example"}} + defer ts.close() + + // Message should be refused softly (temporary error) due to DNSBL. + ts.run(func(err error, client *smtpclient.Client) { + mailFrom := "remote@example.org" + rcptTo := "mjl@mox.example" + if err == nil { + err = client.Deliver(context.Background(), mailFrom, rcptTo, int64(len(deliverMessage)), strings.NewReader(deliverMessage), false, false) + } + var cerr smtpclient.Error + if err == nil || !errors.As(err, &cerr) || cerr.Code != smtp.C451LocalErr { + t.Fatalf("attempted deliver from dnsblocklisted ip, got err %v, expected smtpclient.Error with code %d", err, smtp.C451LocalErr) + } + }) + + // Set up subjectpass on account. + acc := mox.Conf.Dynamic.Accounts[ts.acc.Name] + acc.SubjectPass.Period = time.Hour + mox.Conf.Dynamic.Accounts[ts.acc.Name] = acc + + // Message should be refused quickly (permanent error) due to DNSBL and Subjectkey. + var pass string + ts.run(func(err error, client *smtpclient.Client) { + mailFrom := "remote@example.org" + rcptTo := "mjl@mox.example" + if err == nil { + err = client.Deliver(context.Background(), mailFrom, rcptTo, int64(len(deliverMessage)), strings.NewReader(deliverMessage), false, false) + } + var cerr smtpclient.Error + if err == nil || !errors.As(err, &cerr) || cerr.Code != smtp.C550MailboxUnavail { + t.Fatalf("attempted deliver from dnsblocklisted ip, got err %v, expected smtpclient.Error with code %d", err, smtp.C550MailboxUnavail) + } + i := strings.Index(cerr.Line, subjectpass.Explanation) + if i < 0 { + t.Fatalf("got error line %q, expected error line with subjectpass", cerr.Line) + } + pass = cerr.Line[i+len(subjectpass.Explanation):] + }) + + ts.run(func(err error, client *smtpclient.Client) { + mailFrom := "remote@example.org" + rcptTo := "mjl@mox.example" + passMessage := strings.Replace(deliverMessage, "Subject: test", "Subject: test "+pass, 1) + if err == nil { + err = client.Deliver(context.Background(), mailFrom, rcptTo, int64(len(passMessage)), strings.NewReader(passMessage), false, false) + } + tcheck(t, err, "deliver with subjectpass") + }) +} + +// Test accepting a DMARC report. +func TestDMARCReport(t *testing.T) { + resolver := &dns.MockResolver{ + A: map[string][]string{ + "example.org.": {"127.0.0.10"}, // For mx check. + }, + TXT: map[string][]string{ + "example.org.": {"v=spf1 ip4:127.0.0.10 -all"}, + "_dmarc.example.org.": {"v=DMARC1;p=reject"}, + }, + PTR: map[string][]string{ + "127.0.0.10": {"example.org."}, // For iprev check. + }, + } + ts := newTestServer(t, "../testdata/smtp/dmarcreport/mox.conf", resolver) + defer ts.close() + + run := func(report string, n int) { + t.Helper() + ts.run(func(err error, client *smtpclient.Client) { + t.Helper() + + tcheck(t, err, "run") + + mailFrom := "remote@example.org" + rcptTo := "mjl@mox.example" + + msgb := &bytes.Buffer{} + _, xerr := fmt.Fprintf(msgb, "From: %s\r\nTo: %s\r\nSubject: dmarc report\r\nMIME-Version: 1.0\r\nContent-Type: text/xml\r\n\r\n", mailFrom, rcptTo) + tcheck(t, xerr, "write msg headers") + w := quotedprintable.NewWriter(msgb) + _, xerr = w.Write([]byte(strings.ReplaceAll(report, "\n", "\r\n"))) + tcheck(t, xerr, "write message") + msg := msgb.String() + + if err == nil { + err = client.Deliver(context.Background(), mailFrom, rcptTo, int64(len(msg)), strings.NewReader(msg), false, false) + } + tcheck(t, err, "deliver") + + records, err := dmarcdb.Records(context.Background()) + tcheck(t, err, "dmarcdb records") + if len(records) != n { + t.Fatalf("got %d dmarcdb records, expected %d or more", len(records), n) + } + }) + } + + run(dmarcReport, 0) + run(strings.ReplaceAll(dmarcReport, "xmox.nl", "mox.example"), 1) +} + +const dmarcReport = ` + + + example.org + postmaster@example.org + 1 + + 1596412800 + 1596499199 + + + + xmox.nl + r + r +

reject

+ reject + 100 +
+ + + 127.0.0.10 + 1 + + none + pass + pass + + + + xmox.nl + + + + xmox.nl + pass + testsel + + + xmox.nl + pass + + + +
+` + +// Test accepting a TLS report. +func TestTLSReport(t *testing.T) { + // Requires setting up DKIM. + privKey := ed25519.NewKeyFromSeed(make([]byte, ed25519.SeedSize)) // Fake key, don't use this for real! + dkimRecord := dkim.Record{ + Version: "DKIM1", + Hashes: []string{"sha256"}, + Flags: []string{"s"}, + PublicKey: privKey.Public(), + Key: "ed25519", + } + dkimTxt, err := dkimRecord.Record() + tcheck(t, err, "dkim record") + + sel := config.Selector{ + HashEffective: "sha256", + HeadersEffective: []string{"From", "To", "Subject", "Date"}, + Key: privKey, + Domain: dns.Domain{ASCII: "testsel"}, + } + dkimConf := config.DKIM{ + Selectors: map[string]config.Selector{"testsel": sel}, + Sign: []string{"testsel"}, + } + + resolver := &dns.MockResolver{ + A: map[string][]string{ + "example.org.": {"127.0.0.10"}, // For mx check. + }, + TXT: map[string][]string{ + "testsel._domainkey.example.org.": {dkimTxt}, + "example.org.": {"v=spf1 ip4:127.0.0.10 -all"}, + "_dmarc.example.org.": {"v=DMARC1;p=reject"}, + }, + PTR: map[string][]string{ + "127.0.0.10": {"example.org."}, // For iprev check. + }, + } + ts := newTestServer(t, "../testdata/smtp/tlsrpt/mox.conf", resolver) + defer ts.close() + + run := func(tlsrpt string, n int) { + t.Helper() + ts.run(func(err error, client *smtpclient.Client) { + t.Helper() + + mailFrom := "remote@example.org" + rcptTo := "mjl@mox.example" + + msgb := &bytes.Buffer{} + _, xerr := fmt.Fprintf(msgb, "From: %s\r\nTo: %s\r\nSubject: tlsrpt report\r\nMIME-Version: 1.0\r\nContent-Type: application/tlsrpt+json\r\n\r\n%s\r\n", mailFrom, rcptTo, tlsrpt) + tcheck(t, xerr, "write msg") + msg := msgb.String() + + headers, xerr := dkim.Sign(context.Background(), "remote", dns.Domain{ASCII: "example.org"}, dkimConf, false, strings.NewReader(msg)) + tcheck(t, xerr, "dkim sign") + msg = headers + msg + + if err == nil { + err = client.Deliver(context.Background(), mailFrom, rcptTo, int64(len(msg)), strings.NewReader(msg), false, false) + } + tcheck(t, err, "deliver") + + records, err := tlsrptdb.Records(context.Background()) + tcheck(t, err, "tlsrptdb records") + if len(records) != n { + t.Fatalf("got %d tlsrptdb records, expected %d", len(records), n) + } + }) + } + + const tlsrpt = `{"organization-name":"Example.org","date-range":{"start-datetime":"2022-01-07T00:00:00Z","end-datetime":"2022-01-07T23:59:59Z"},"contact-info":"tlsrpt@example.org","report-id":"1","policies":[{"policy":{"policy-type":"no-policy-found","policy-domain":"xmox.nl"},"summary":{"total-successful-session-count":1,"total-failure-session-count":0}}]}` + + run(tlsrpt, 0) + run(strings.ReplaceAll(tlsrpt, "xmox.nl", "mox.example"), 1) + +} diff --git a/spf/parse.go b/spf/parse.go new file mode 100644 index 0000000..2f6748f --- /dev/null +++ b/spf/parse.go @@ -0,0 +1,466 @@ +package spf + +import ( + "fmt" + "net" + "strconv" + "strings" +) + +// Record is a parsed SPF DNS record. +// +// An example record for example.com: +// +// v=spf1 +mx a:colo.example.com/28 -all +type Record struct { + Version string // Must be "spf1". + Directives []Directive // An IP is evaluated against each directive until a match is found. + Redirect string // Modifier that redirects SPF checks to other domain after directives did not match. Optional. For "redirect=". + Explanation string // Modifier for creating a user-friendly error message when an IP results in status "fail". + Other []Modifier // Other modifiers. +} + +// Directive consists of a mechanism that describes how to check if an IP matches, +// an (optional) qualifier indicating the policy for a match, and optional +// parameters specific to the mechanism. +type Directive struct { + Qualifier string // Sets the result if this directive matches. "" and "+" are "pass", "-" is "fail", "?" is "neutral", "~" is "softfail". + Mechanism string // "all", "include", "a", "mx", "ptr", "ip4", "ip6", "exists". + DomainSpec string // For include, a, mx, ptr, exists. Always in lower-case when parsed using ParseRecord. + IP net.IP `json:"-"` // For ip4, ip6. + IPstr string // Original string for IP, always with /subnet. + IP4CIDRLen *int // For a, mx, ip4. + IP6CIDRLen *int // For a, mx, ip6. +} + +// MechanismString returns a directive in string form for use in the Received-SPF header. +func (d Directive) MechanismString() string { + s := d.Qualifier + d.Mechanism + if d.DomainSpec != "" { + s += ":" + d.DomainSpec + } else if d.IP != nil { + s += ":" + d.IP.String() + } + if d.IP4CIDRLen != nil { + s += fmt.Sprintf("/%d", *d.IP4CIDRLen) + } + if d.IP6CIDRLen != nil { + if d.Mechanism != "ip6" { + s += "/" + } + s += fmt.Sprintf("/%d", *d.IP6CIDRLen) + } + return s +} + +// Modifier provides additional information for a policy. +// "redirect" and "exp" are not represented as a Modifier but explicitly in a Record. +type Modifier struct { + Key string // Key is case-insensitive. + Value string +} + +// Record returns an DNS record, to be configured as a TXT record for a domain, +// e.g. a TXT record for example.com. +func (r Record) Record() (string, error) { + b := &strings.Builder{} + b.WriteString("v=") + b.WriteString(r.Version) + for _, d := range r.Directives { + b.WriteString(" " + d.MechanismString()) + } + if r.Redirect != "" { + fmt.Fprintf(b, " redirect=%s", r.Redirect) + } + if r.Explanation != "" { + fmt.Fprintf(b, " exp=%s", r.Explanation) + } + for _, m := range r.Other { + fmt.Fprintf(b, " %s=%s", m.Key, m.Value) + } + return b.String(), nil +} + +type parser struct { + s string + lower string + o int +} + +type parseError string + +func (e parseError) Error() string { + return string(e) +} + +// toLower lower cases bytes that are A-Z. strings.ToLower does too much. and +// would replace invalid bytes with unicode replacement characters, which would +// break our requirement that offsets into the original and upper case strings +// point to the same character. +func toLower(s string) string { + r := []byte(s) + for i, c := range r { + if c >= 'A' && c <= 'Z' { + r[i] = c + 0x20 + } + } + return string(r) +} + +// ParseRecord parses an SPF DNS TXT record. +func ParseRecord(s string) (r *Record, isspf bool, rerr error) { + p := parser{s: s, lower: toLower(s)} + + r = &Record{ + Version: "spf1", + } + + defer func() { + x := recover() + if x == nil { + return + } + if err, ok := x.(parseError); ok { + rerr = err + return + } + panic(x) + }() + + p.xtake("v=spf1") + for !p.empty() { + p.xtake(" ") + isspf = true // ../rfc/7208:825 + for p.take(" ") { + } + if p.empty() { + break + } + + qualifier := p.takelist("+", "-", "?", "~") + mechanism := p.takelist("all", "include:", "a", "mx", "ptr", "ip4:", "ip6:", "exists:") + if qualifier != "" && mechanism == "" { + p.xerrorf("expected mechanism after qualifier") + } + if mechanism == "" { + // ../rfc/7208:2597 + modifier := p.takelist("redirect=", "exp=") + if modifier == "" { + // ../rfc/7208:2600 + name := p.xtakefn1(func(c rune, i int) bool { + alpha := c >= 'a' && c <= 'z' || c >= 'A' && c <= 'Z' + return alpha || i > 0 && (c >= '0' && c <= '9' || c == '-' || c == '_' || c == '.') + }) + p.xtake("=") + v := p.xmacroString(true) + r.Other = append(r.Other, Modifier{name, v}) + continue + } + v := p.xdomainSpec(true) + modifier = strings.TrimSuffix(modifier, "=") + if modifier == "redirect" { + if r.Redirect != "" { + // ../rfc/7208:1419 + p.xerrorf("duplicate redirect modifier") + } + r.Redirect = v + } + if modifier == "exp" { + if r.Explanation != "" { + // ../rfc/7208:1419 + p.xerrorf("duplicate exp modifier") + } + r.Explanation = v + } + continue + } + // ../rfc/7208:2585 + d := Directive{ + Qualifier: qualifier, + Mechanism: strings.TrimSuffix(mechanism, ":"), + } + switch d.Mechanism { + case "all": + case "include": + d.DomainSpec = p.xdomainSpec(false) + case "a", "mx": + if p.take(":") { + d.DomainSpec = p.xdomainSpec(false) + } + if p.take("/") { + if !p.take("/") { + num, _ := p.xnumber() + if num > 32 { + p.xerrorf("invalid ip4 cidr length %d", num) + } + d.IP4CIDRLen = &num + if !p.take("//") { + break + } + } + num, _ := p.xnumber() + if num > 128 { + p.xerrorf("invalid ip6 cidr length %d", num) + } + d.IP6CIDRLen = &num + } + case "ptr": + if p.take(":") { + d.DomainSpec = p.xdomainSpec(false) + } + case "ip4": + d.IP, d.IPstr = p.xip4address() + if p.take("/") { + num, _ := p.xnumber() + if num > 32 { + p.xerrorf("invalid ip4 cidr length %d", num) + } + d.IP4CIDRLen = &num + d.IPstr += fmt.Sprintf("/%d", num) + } else { + d.IPstr += "/32" + } + case "ip6": + d.IP, d.IPstr = p.xip6address() + if p.take("/") { + num, _ := p.xnumber() + if num > 128 { + p.xerrorf("invalid ip6 cidr length %d", num) + } + d.IP6CIDRLen = &num + d.IPstr += fmt.Sprintf("/%d", num) + } else { + d.IPstr += "/128" + } + case "exists": + d.DomainSpec = p.xdomainSpec(false) + default: + return nil, true, fmt.Errorf("internal error, missing case for mechanism %q", d.Mechanism) + } + r.Directives = append(r.Directives, d) + } + return r, true, nil +} + +func (p *parser) xerrorf(format string, args ...any) { + msg := fmt.Sprintf(format, args...) + if !p.empty() { + msg += fmt.Sprintf(" (leftover %q)", p.s[p.o:]) + } + panic(parseError(msg)) +} + +// operates on original-cased characters. +func (p *parser) xtakefn1(fn func(rune, int) bool) string { + r := "" + for i, c := range p.s[p.o:] { + if !fn(c, i) { + break + } + r += string(c) + } + if r == "" { + p.xerrorf("need at least 1 char") + } + p.o += len(r) + return r +} + +// caller should set includingSlash to false when parsing "a" or "mx", or the / would be consumed as valid macro literal. +func (p *parser) xdomainSpec(includingSlash bool) string { + // ../rfc/7208:1579 + // This also consumes the "domain-end" part, which we check below. + s := p.xmacroString(includingSlash) + + // The ABNF says s must either end in macro-expand, or "." toplabel ["."]. The + // toplabel rule implies the intention is to force a valid DNS name. We cannot just + // check if the name is valid, because "macro-expand" is not a valid label. So we + // recognize the macro-expand, and check for valid toplabel otherwise, because we + // syntax errors must result in Permerror. + for _, suf := range []string{"%%", "%_", "%-", "}"} { + // The check for "}" assumes a "%{" precedes it... + if strings.HasSuffix(s, suf) { + return s + } + } + tl := strings.Split(strings.TrimSuffix(s, "."), ".") + t := tl[len(tl)-1] + if t == "" { + p.xerrorf("invalid empty toplabel") + } + nums := 0 + for i, c := range t { + switch { + case c >= 'a' && c <= 'z' || c >= 'A' && c <= 'Z': + case c >= '0' && c <= '9': + nums++ + case c == '-': + if i == 0 { + p.xerrorf("bad toplabel, invalid leading dash") + } + if i == len(t)-1 { + p.xerrorf("bad toplabel, invalid trailing dash") + } + default: + p.xerrorf("bad toplabel, invalid character") + } + } + if nums == len(t) { + p.xerrorf("bad toplabel, cannot be all digits") + } + return s +} + +func (p *parser) xmacroString(includingSlash bool) string { + // ../rfc/7208:1588 + r := "" + for !p.empty() { + w := p.takelist("%{", "%%", "%_", "%-") // "macro-expand" + if w == "" { + // "macro-literal" + if !p.empty() { + b := p.peekchar() + if b > ' ' && b < 0x7f && b != '%' && (includingSlash || b != '/') { + r += string(b) + p.o++ + continue + } + } + break + } + r += w + if w != "%{" { + continue + } + r += p.xtakelist("s", "l", "o", "d", "i", "p", "h", "c", "r", "t", "v") // "macro-letter" + digits := p.digits() + if digits != "" { + if v, err := strconv.Atoi(digits); err != nil { + p.xerrorf("bad digits: %v", err) + } else if v == 0 { + p.xerrorf("bad digits 0 for 0 labels") + } + } + r += digits + if p.take("r") { + r += "r" + } + for { + delimiter := p.takelist(".", "-", "+", ",", "/", "_", "=") + if delimiter == "" { + break + } + r += delimiter + } + r += p.xtake("}") + } + return r +} + +func (p *parser) empty() bool { + return p.o >= len(p.s) +} + +// returns next original-cased character. +func (p *parser) peekchar() byte { + return p.s[p.o] +} + +func (p *parser) xtakelist(l ...string) string { + w := p.takelist(l...) + if w == "" { + p.xerrorf("no match for %v", l) + } + return w +} + +func (p *parser) takelist(l ...string) string { + for _, w := range l { + if strings.HasPrefix(p.lower[p.o:], w) { + p.o += len(w) + return w + } + } + return "" +} + +// digits parses zero or more digits. +func (p *parser) digits() string { + r := "" + for !p.empty() { + b := p.peekchar() + if b >= '0' && b <= '9' { + r += string(b) + p.o++ + } else { + break + } + } + return r +} + +func (p *parser) take(s string) bool { + if strings.HasPrefix(p.lower[p.o:], s) { + p.o += len(s) + return true + } + return false +} + +func (p *parser) xtake(s string) string { + ok := p.take(s) + if !ok { + p.xerrorf("expected %q", s) + } + return s +} + +func (p *parser) xnumber() (int, string) { + s := p.digits() + if s == "" { + p.xerrorf("expected number") + } + if s == "0" { + return 0, s + } + if strings.HasPrefix(s, "0") { + p.xerrorf("bogus leading 0 in number") + } + v, err := strconv.Atoi(s) + if err != nil { + p.xerrorf("parsing number for %q: %s", s, err) + } + return v, s +} + +func (p *parser) xip4address() (net.IP, string) { + // ../rfc/7208:2607 + ip4num := func() (byte, string) { + v, vs := p.xnumber() + if v > 255 { + p.xerrorf("bad ip4 number %d", v) + } + return byte(v), vs + } + a, as := ip4num() + p.xtake(".") + b, bs := ip4num() + p.xtake(".") + c, cs := ip4num() + p.xtake(".") + d, ds := ip4num() + return net.IPv4(a, b, c, d), as + "." + bs + "." + cs + "." + ds +} + +func (p *parser) xip6address() (net.IP, string) { + // ../rfc/7208:2614 + // We just take in a string that has characters that IPv6 uses, then parse it. + s := p.xtakefn1(func(c rune, i int) bool { + return c >= '0' && c <= '9' || c >= 'a' && c <= 'f' || c >= 'A' && c <= 'F' || c == ':' || c == '.' + }) + ip := net.ParseIP(s) + if ip == nil { + p.xerrorf("ip6 address %q not valid", s) + } + return ip, s +} diff --git a/spf/parse_test.go b/spf/parse_test.go new file mode 100644 index 0000000..6af72cb --- /dev/null +++ b/spf/parse_test.go @@ -0,0 +1,138 @@ +package spf + +import ( + "net" + "reflect" + "testing" +) + +func TestParse(t *testing.T) { + intptr := func(v int) *int { + return &v + } + + mustParseIP := func(s string) net.IP { + ip := net.ParseIP(s) + if ip == nil { + t.Fatalf("bad ip %q", s) + } + return ip + } + + test := func(txt string, expRecord *Record) { + t.Helper() + valid := expRecord != nil + r, _, err := ParseRecord(txt) + if valid && err != nil { + t.Fatalf("expected success, got err %s, txt %q", err, txt) + } + if !valid && err == nil { + t.Fatalf("expected error, got record %#v, txt %q", r, txt) + } + if valid && !reflect.DeepEqual(r, expRecord) { + t.Fatalf("unexpected record:\ngot: %v\nexpected: %v, txt %q", r, expRecord, txt) + } + } + + test("", nil) + test("v=spf1", &Record{Version: "spf1"}) + test("v=SPF1", &Record{Version: "spf1"}) + test("V=spf1 ", &Record{Version: "spf1"}) + test("V=spf1 all Include:example.org a ?a -a +a ~a a:x a:x/0 a:x/24//64 a:x//64 mx mx:x ptr ptr:x IP4:10.0.0.1 ip4:0.0.0.0/0 ip4:10.0.0.1/24 ip6:2001:db8::1 ip6:2001:db8::1/128 exists:x REDIRECT=x exp=X Other=x", + &Record{ + Version: "spf1", + Directives: []Directive{ + {Mechanism: "all"}, + {Mechanism: "include", DomainSpec: "example.org"}, + {Mechanism: "a"}, + {Qualifier: "?", Mechanism: "a"}, + {Qualifier: "-", Mechanism: "a"}, + {Qualifier: "+", Mechanism: "a"}, + {Qualifier: "~", Mechanism: "a"}, + {Mechanism: "a", DomainSpec: "x"}, + {Mechanism: "a", DomainSpec: "x", IP4CIDRLen: intptr(0)}, + {Mechanism: "a", DomainSpec: "x", IP4CIDRLen: intptr(24), IP6CIDRLen: intptr(64)}, + {Mechanism: "a", DomainSpec: "x", IP6CIDRLen: intptr(64)}, + {Mechanism: "mx"}, + {Mechanism: "mx", DomainSpec: "x"}, + {Mechanism: "ptr"}, + {Mechanism: "ptr", DomainSpec: "x"}, + {Mechanism: "ip4", IP: mustParseIP("10.0.0.1"), IPstr: "10.0.0.1/32"}, + {Mechanism: "ip4", IP: mustParseIP("0.0.0.0"), IPstr: "0.0.0.0/0", IP4CIDRLen: intptr(0)}, + {Mechanism: "ip4", IP: mustParseIP("10.0.0.1"), IPstr: "10.0.0.1/24", IP4CIDRLen: intptr(24)}, + {Mechanism: "ip6", IP: mustParseIP("2001:db8::1"), IPstr: "2001:db8::1/128"}, + {Mechanism: "ip6", IP: mustParseIP("2001:db8::1"), IPstr: "2001:db8::1/128", IP6CIDRLen: intptr(128)}, + {Mechanism: "exists", DomainSpec: "x"}, + }, + Redirect: "x", + Explanation: "X", + Other: []Modifier{ + {"Other", "x"}, + }, + }, + ) + test("V=spf1 -all", &Record{Version: "spf1", Directives: []Directive{{Qualifier: "-", Mechanism: "all"}}}) + test("v=spf1 !", nil) // Invalid character. + test("v=spf1 ?redirect=bogus", nil) + test("v=spf1 redirect=mox.example redirect=mox2.example", nil) // Duplicate redirect. + test("v=spf1 exp=mox.example exp=mox2.example", nil) // Duplicate exp. + test("v=spf1 ip4:10.0.0.256", nil) // Invalid address. + test("v=spf1 ip6:2001:db8:::1", nil) // Invalid address. + test("v=spf1 ip4:10.0.0.1/33", nil) // IPv4 prefix >32. + test("v=spf1 ip6:2001:db8::1/129", nil) // IPv6 prefix >128. + test("v=spf1 a:mox.example/33", nil) // IPv4 prefix >32. + test("v=spf1 a:mox.example//129", nil) // IPv6 prefix >128. + test("v=spf1 a:mox.example//129", nil) // IPv6 prefix >128. + test("v=spf1 exists:%%.%{l1r+}.%{d}", + &Record{ + Version: "spf1", + Directives: []Directive{ + {Mechanism: "exists", DomainSpec: "%%.%{l1r+}.%{d}"}, + }, + }, + ) + test("v=spf1 exists:%{l1r+}..", nil) // Empty toplabel in domain-end. + test("v=spf1 exists:%{l1r+}._.", nil) // Invalid toplabel in domain-end. + test("v=spf1 exists:%{l1r+}.123.", nil) // Invalid toplabel in domain-end. + test("v=spf1 exists:%{l1r+}.bad-.", nil) // Invalid toplabel in domain-end. + test("v=spf1 exists:%{l1r+}.-bad.", nil) // Invalid toplabel in domain-end. + test("v=spf1 exists:%{l1r+}./.", nil) // Invalid toplabel in domain-end. + test("v=spf1 exists:%{x}", nil) // Unknown macro-letter. + test("v=spf1 exists:%{s0}", nil) // Invalid digits. + test("v=spf1 exists:%{ir}.%{l1r+}.%{d}", + &Record{ + Version: "spf1", + Directives: []Directive{ + {Mechanism: "exists", DomainSpec: "%{ir}.%{l1r+}.%{d}"}, + }, + }, + ) + + orig := `V=SPF1 all Include:example.org a ?a -a +a ~a a:x a:x/0 a:x/24//64 a:x//64 mx mx:x ptr ptr:x IP4:10.0.0.1 ip4:0.0.0.0/0 ip4:10.0.0.1/24 ip6:2001:db8::1 ip6:2001:db8::1/128 exists:x REDIRECT=x exp=X Other=x` + exp := `v=spf1 all include:example.org a ?a -a +a ~a a:x a:x/0 a:x/24//64 a:x//64 mx mx:x ptr ptr:x ip4:10.0.0.1 ip4:0.0.0.0/0 ip4:10.0.0.1/24 ip6:2001:db8::1 ip6:2001:db8::1/128 exists:x redirect=x exp=X Other=x` + r, _, err := ParseRecord(orig) + if err != nil { + t.Fatalf("parsing original: %s", err) + } + record, err := r.Record() + if err != nil { + t.Fatalf("making dns record: %s", err) + } + if record != exp { + t.Fatalf("packing dns record, got %q, expected %q", record, exp) + } +} + +func FuzzParseRecord(f *testing.F) { + f.Add("") + f.Add("v=spf1") + f.Add(`V=SPF1 all Include:example.org a ?a -a +a ~a a:x a:x/0 a:x/24//64 a:x//64 mx mx:x ptr ptr:x IP4:10.0.0.1 ip4:0.0.0.0/0 ip4:10.0.0.1/24 ip6:2001:db8::1 ip6:2001:db8::1/128 exists:x REDIRECT=x exp=X Other=x`) + f.Fuzz(func(t *testing.T, s string) { + r, _, err := ParseRecord(s) + if err == nil { + if _, err := r.Record(); err != nil { + t.Errorf("r.Record for %s, %#v: %s", s, r, err) + } + } + }) +} diff --git a/spf/received.go b/spf/received.go new file mode 100644 index 0000000..e16e880 --- /dev/null +++ b/spf/received.go @@ -0,0 +1,118 @@ +package spf + +import ( + "net" + "strings" + + "github.com/mjl-/mox/dns" + "github.com/mjl-/mox/message" +) + +// ../rfc/7208:2083 + +// Received represents a Received-SPF header with the SPF verify results, to be +// prepended to a message. +// +// Example: +// +// Received-SPF: pass (mybox.example.org: domain of +// myname@example.com designates 192.0.2.1 as permitted sender) +// receiver=mybox.example.org; client-ip=192.0.2.1; +// envelope-from="myname@example.com"; helo=foo.example.com; +type Received struct { + Result Status + Comment string // Additional free-form information about the verification result. Optional. Included in message header comment inside "()". + ClientIP net.IP // IP address of remote SMTP client, "client-ip=". + EnvelopeFrom string // Sender mailbox, typically SMTP MAIL FROM, but will be set to "postmaster" at SMTP EHLO if MAIL FROM is empty, "envelop-from=". + Helo dns.IPDomain // IP or host name from EHLO or HELO command, "helo=". + Problem string // Optional. "problem=" + Receiver string // Hostname of receiving mail server, "receiver=". + Identity Identity // The identity that was checked, "mailfrom" or "helo", for "identity=". + Mechanism string // Mechanism that caused the result, can be "default". Optional. +} + +// Identity that was verified. +type Identity string + +const ( + ReceivedMailFrom Identity = "mailfrom" + ReceivedHELO Identity = "helo" +) + +func receivedValueEncode(s string) string { + if s == "" { + return quotedString("") + } + for i, c := range s { + if c >= 'a' && c <= 'z' || c >= 'A' && c <= 'Z' || c >= '0' && c <= '9' || c > 0x7f { + continue + } + // ../rfc/5322:679 + const atext = "!#$%&'*+-/=?^_`{|}~" + if strings.IndexByte(atext, byte(c)) >= 0 { + continue + } + if c != '.' || (i == 0 || i+1 == len(s)) { + return quotedString(s) + } + } + return s +} + +// ../rfc/5322:736 +func quotedString(s string) string { + w := &strings.Builder{} + w.WriteByte('"') + for _, c := range s { + if c > ' ' && c < 0x7f && c != '"' && c != '\\' || c > 0x7f || c == ' ' || c == '\t' { + // We allow utf-8. This should only be needed when the destination address has an + // utf8 localpart, in which case we are already doing smtputf8. + // We also allow unescaped space and tab. This is FWS, and the name of ABNF + // production "qcontent" implies the FWS is not part of the string, but escaping + // space and tab leads to ugly strings. ../rfc/5322:743 + w.WriteRune(c) + continue + } + switch c { + case ' ', '\t', '"', '\\': + w.WriteByte('\\') + w.WriteRune(c) + } + } + w.WriteByte('"') + return w.String() +} + +// Header returns a Received-SPF header line including trailing crlf that can +// be prepended to an incoming message. +func (r Received) Header() string { + // ../rfc/7208:2043 + w := &message.HeaderWriter{} + w.Add("", "Received-SPF: "+string(r.Result)) + if r.Comment != "" { + w.Add(" ", "("+r.Comment+")") + } + w.Addf(" ", "client-ip=%s;", receivedValueEncode(r.ClientIP.String())) + w.Addf(" ", "envelope-from=%s;", receivedValueEncode(r.EnvelopeFrom)) + var helo string + if len(r.Helo.IP) > 0 { + helo = r.Helo.IP.String() + } else { + helo = r.Helo.Domain.ASCII + } + w.Addf(" ", "helo=%s;", receivedValueEncode(helo)) + if r.Problem != "" { + s := r.Problem + max := 77 - len("problem=; ") + if len(s) > max { + s = s[:max] + } + w.Addf(" ", "problem=%s;", receivedValueEncode(s)) + } + if r.Mechanism != "" { + w.Addf(" ", "mechanism=%s;", receivedValueEncode(r.Mechanism)) + } + w.Addf(" ", "receiver=%s;", receivedValueEncode(r.Receiver)) + w.Addf(" ", "identity=%s", receivedValueEncode(string(r.Identity))) + return w.String() +} diff --git a/spf/received_test.go b/spf/received_test.go new file mode 100644 index 0000000..3383e75 --- /dev/null +++ b/spf/received_test.go @@ -0,0 +1,39 @@ +package spf + +import ( + "net" + "testing" + + "github.com/mjl-/mox/dns" +) + +func TestReceived(t *testing.T) { + test := func(r Received, exp string) { + t.Helper() + s := r.Header() + if s != exp { + t.Fatalf("got %q, expected %q", s, exp) + } + } + + test(Received{ + Result: StatusPass, + Comment: "c", + ClientIP: net.ParseIP("0.0.0.0"), + EnvelopeFrom: "x@x", + Helo: dns.IPDomain{Domain: dns.Domain{ASCII: "y"}}, + Problem: `a b"\`, + Receiver: "z", + Identity: ReceivedMailFrom, + Mechanism: "+ip4:0.0.0.0/0", + }, "Received-SPF: pass (c) client-ip=0.0.0.0; envelope-from=\"x@x\"; helo=y;\r\n\tproblem=\"a b\\\"\\\\\"; mechanism=\"+ip4:0.0.0.0/0\"; receiver=z; identity=mailfrom\r\n") + + test(Received{ + Result: StatusPass, + ClientIP: net.ParseIP("0.0.0.0"), + EnvelopeFrom: "x@x", + Helo: dns.IPDomain{IP: net.ParseIP("2001:db8::1")}, + Receiver: "z", + Identity: ReceivedMailFrom, + }, "Received-SPF: pass client-ip=0.0.0.0; envelope-from=\"x@x\"; helo=\"2001:db8::1\";\r\n\treceiver=z; identity=mailfrom\r\n") +} diff --git a/spf/spf.go b/spf/spf.go new file mode 100644 index 0000000..ee7dbae --- /dev/null +++ b/spf/spf.go @@ -0,0 +1,958 @@ +// Package spf implements Sender Policy Framework (SPF, RFC 7208) for verifying +// remote mail server IPs with their published records. +// +// With SPF a domain can publish a policy as a DNS TXT record describing which IPs +// are allowed to send email with SMTP with the domain in the MAIL FROM command, +// and how to treat SMTP transactions coming from other IPs. +package spf + +import ( + "context" + "errors" + "fmt" + "net" + "net/url" + "strconv" + "strings" + "time" + + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" + + "github.com/mjl-/mox/dns" + "github.com/mjl-/mox/mlog" + "github.com/mjl-/mox/smtp" +) + +// The net package always returns DNS names in absolute, lower-case form. We make +// sure we make names absolute when looking up. For verifying, we do not want to +// verify names relative to our local search domain. + +var xlog = mlog.New("spf") + +var ( + metricSPFVerify = promauto.NewHistogramVec( + prometheus.HistogramOpts{ + Name: "mox_spf_verify_duration_seconds", + Help: "SPF verify, including lookup, duration and result.", + Buckets: []float64{0.001, 0.005, 0.01, 0.05, 0.100, 0.5, 1, 5, 10, 20}, + }, + []string{ + "status", + }, + ) +) + +// cross-link rfc and errata +// ../rfc/7208-eid5436 ../rfc/7208:2043 +// ../rfc/7208-eid6721 ../rfc/7208:1928 +// ../rfc/7208-eid5227 ../rfc/7208:1297 +// ../rfc/7208-eid6595 ../rfc/7208:984 + +var ( + // Lookup errors. + ErrName = errors.New("spf: bad domain name") + ErrNoRecord = errors.New("spf: no txt record") + ErrMultipleRecords = errors.New("spf: multiple spf txt records in dns") + ErrDNS = errors.New("spf: lookup of dns record") + ErrRecordSyntax = errors.New("spf: malformed spf txt record") + + // Evaluation errors. + ErrTooManyDNSRequests = errors.New("spf: too many dns requests") + ErrTooManyVoidLookups = errors.New("spf: too many void lookups") + ErrMacroSyntax = errors.New("spf: bad macro syntax") +) + +const ( + // Maximum number of DNS requests to execute. This excludes some requests, such as + // lookups of MX host results. + dnsRequestsMax = 10 + + // Maximum number of DNS lookups that result in no records before a StatusPermerror + // is returned. This limit aims to prevent abuse. + voidLookupsMax = 2 +) + +// Status is the result of an SPF verification. +type Status string + +// ../rfc/7208:517 +// ../rfc/7208:1836 + +const ( + StatusNone Status = "none" // E.g. no DNS domain name in session, or no SPF record in DNS. + StatusNeutral Status = "neutral" // Explicit statement that nothing is said about the IP, "?" qualifier. None and Neutral must be treated the same. + StatusPass Status = "pass" // IP is authorized. + StatusFail Status = "fail" // IP is exlicitly not authorized. "-" qualifier. + StatusSoftfail Status = "softfail" // Weak statement that IP is probably not authorized, "~" qualifier. + StatusTemperror Status = "temperror" // Trying again later may succeed, e.g. for temporary DNS lookup error. + StatusPermerror Status = "permerror" // Error requiring some intervention to correct. E.g. invalid DNS record. +) + +// Args are the parameters to the SPF verification algorithm ("check_host" in the RFC). +// +// All fields should be set as they can be required for macro expansions. +type Args struct { + // RemoteIP will be checked as sender for email. + RemoteIP net.IP + + // Address from SMTP MAIL FROM command. Zero values for a null reverse path (used for DSNs). + MailFromLocalpart smtp.Localpart + MailFromDomain dns.Domain + + // HelloDomain is from the SMTP EHLO/HELO command. + HelloDomain dns.IPDomain + + LocalIP net.IP + LocalHostname dns.Domain + + // Explanation string to use for failure. In case of "include", where explanation + // from original domain must be used. + // May be set for recursive calls. + explanation *string + + // Domain to validate. + domain dns.Domain + + // Effective sender. Equal to MailFrom if non-zero, otherwise set to "postmaster" at HelloDomain. + senderLocalpart smtp.Localpart + senderDomain dns.Domain + + // To enforce the limit on lookups. Initialized automatically if nil. + dnsRequests *int + voidLookups *int +} + +// Mocked for testing expanding "t" macro. +var timeNow = time.Now + +// Lookup looks up and parses an SPF TXT record for domain. +func Lookup(ctx context.Context, resolver dns.Resolver, domain dns.Domain) (rstatus Status, rtxt string, rrecord *Record, rerr error) { + log := xlog.WithContext(ctx) + start := time.Now() + defer func() { + log.Debugx("spf lookup result", rerr, mlog.Field("domain", domain), mlog.Field("status", rstatus), mlog.Field("record", rrecord), mlog.Field("duration", time.Since(start))) + }() + + // ../rfc/7208:586 + host := domain.ASCII + "." + if err := validateDNS(host); err != nil { + return StatusNone, "", nil, fmt.Errorf("%w: %s: %s", ErrName, domain, err) + } + + // Lookup spf record. + txts, err := dns.WithPackage(resolver, "spf").LookupTXT(ctx, host) + if dns.IsNotFound(err) { + return StatusNone, "", nil, fmt.Errorf("%w for %s", ErrNoRecord, host) + } else if err != nil { + return StatusTemperror, "", nil, fmt.Errorf("%w: %s: %s", ErrDNS, host, err) + } + + // Parse the records. We only handle those that look like spf records. + var record *Record + var text string + for _, txt := range txts { + var isspf bool + r, isspf, err := ParseRecord(txt) + if !isspf { + // ../rfc/7208:595 + continue + } else if err != nil { + // ../rfc/7208:852 + return StatusPermerror, txt, nil, fmt.Errorf("%w: %s", ErrRecordSyntax, err) + } + if record != nil { + // ../rfc/7208:576 + return StatusPermerror, "", nil, ErrMultipleRecords + } + text = txt + record = r + } + if record == nil { + // ../rfc/7208:837 + return StatusNone, "", nil, ErrNoRecord + } + return StatusNone, text, record, nil +} + +// Verify checks if a remote IP is allowed to send email for a domain. +// +// If the SMTP "MAIL FROM" is set, it is used as identity (domain) to verify. +// Otherwise, the EHLO domain is verified if it is a valid domain. +// +// The returned Received.Result status will always be set, regardless of whether an +// error is returned. +// For status Temperror and Permerror, an error is always returned. +// For Fail, explanation may be set, and should be returned in the SMTP session if +// it is the reason the message is rejected. The caller should ensure the +// explanation is valid for use in SMTP, taking line length and ascii-only +// requirement into account. +// +// Verify takes the maximum number of 10 DNS requests into account, and the maximum +// of 2 lookups resulting in no records ("void lookups"). +func Verify(ctx context.Context, resolver dns.Resolver, args Args) (received Received, domain dns.Domain, explanation string, rerr error) { + log := xlog.WithContext(ctx) + start := time.Now() + defer func() { + metricSPFVerify.WithLabelValues(string(received.Result)).Observe(float64(time.Since(start)) / float64(time.Second)) + log.Debugx("spf verify result", rerr, mlog.Field("domain", args.domain), mlog.Field("ip", args.RemoteIP), mlog.Field("status", received.Result), mlog.Field("explanation", explanation), mlog.Field("duration", time.Since(start))) + }() + + isHello, ok := prepare(&args) + if !ok { + received = Received{ + Result: StatusNone, + Comment: "no domain, ehlo is an ip literal and mailfrom is empty", + ClientIP: args.RemoteIP, + EnvelopeFrom: fmt.Sprintf("%s@%s", args.senderLocalpart, args.HelloDomain.IP.String()), + Helo: args.HelloDomain, + Receiver: args.LocalHostname.ASCII, + } + return received, dns.Domain{}, "", nil + } + + status, mechanism, expl, err := checkHost(ctx, resolver, args) + comment := fmt.Sprintf("domain %s", args.domain.ASCII) + if isHello { + comment += ", from ehlo because mailfrom is empty" + } + received = Received{ + Result: status, + Comment: comment, + ClientIP: args.RemoteIP, + EnvelopeFrom: fmt.Sprintf("%s@%s", args.senderLocalpart, args.senderDomain.ASCII), // ../rfc/7208:2090, explicitly "sender", not "mailfrom". + Helo: args.HelloDomain, + Receiver: args.LocalHostname.ASCII, + Mechanism: mechanism, + } + if err != nil { + received.Problem = err.Error() + } + if isHello { + received.Identity = "helo" + } else { + received.Identity = "mailfrom" + } + return received, args.domain, expl, err +} + +// prepare args, setting fields sender* and domain as required for checkHost. +func prepare(args *Args) (isHello bool, ok bool) { + // If MAIL FROM is set, that identity is used. Otherwise the EHLO identity is used. + // MAIL FROM is preferred, because if we accept the message, and we have to send a + // DSN, it helps to know it is a verified sender. If we would check an EHLO + // identity, and it is different from the MAIL FROM, we may be sending the DSN to + // an address with a domain that would not allow sending from the originating IP. + // The RFC seems a bit confused, ../rfc/7208:778 implies MAIL FROM is preferred, + // but ../rfc/7208:424 mentions that a MAIL FROM check can be avoided by first + // doing HELO. + + args.explanation = nil + args.dnsRequests = nil + args.voidLookups = nil + if args.MailFromDomain.IsZero() { + // If there is on EHLO, and it is an IP, there is nothing to SPF-validate. + if !args.HelloDomain.IsDomain() { + return false, false + } + // If we have a mailfrom, we also have a localpart. But for EHLO we won't. ../rfc/7208:810 + args.senderLocalpart = "postmaster" + args.senderDomain = args.HelloDomain.Domain + isHello = true + } else { + args.senderLocalpart = args.MailFromLocalpart + args.senderDomain = args.MailFromDomain + } + args.domain = args.senderDomain + return isHello, true +} + +// lookup spf record, then evaluate args against it. +func checkHost(ctx context.Context, resolver dns.Resolver, args Args) (rstatus Status, mechanism, rexplanation string, rerr error) { + status, _, record, err := Lookup(ctx, resolver, args.domain) + if err != nil { + return status, "", "", err + } + + return evaluate(ctx, record, resolver, args) +} + +// Evaluate evaluates the IP and names from args against the SPF DNS record for the domain. +func Evaluate(ctx context.Context, record *Record, resolver dns.Resolver, args Args) (rstatus Status, mechanism, rexplanation string, rerr error) { + _, ok := prepare(&args) + if !ok { + return StatusNone, "default", "", fmt.Errorf("no domain name to validate") + } + return evaluate(ctx, record, resolver, args) +} + +// evaluate RemoteIP against domain from args, given record. +func evaluate(ctx context.Context, record *Record, resolver dns.Resolver, args Args) (rstatus Status, mechanism, rexplanation string, rerr error) { + log := xlog.WithContext(ctx) + start := time.Now() + defer func() { + log.Debugx("spf evaluate result", rerr, mlog.Field("dnsrequests", *args.dnsRequests), mlog.Field("voidLookups", *args.voidLookups), mlog.Field("domain", args.domain), mlog.Field("status", rstatus), mlog.Field("mechanism", mechanism), mlog.Field("explanation", rexplanation), mlog.Field("duration", time.Since(start))) + }() + + resolver = dns.WithPackage(resolver, "spf") + + if args.dnsRequests == nil { + args.dnsRequests = new(int) + args.voidLookups = new(int) + } + + // To4 returns nil for an IPv6 address. To16 will return an IPv4-to-IPv6-mapped address. + var remote6 net.IP + remote4 := args.RemoteIP.To4() + if remote4 == nil { + remote6 = args.RemoteIP.To16() + } + + // Check if ip matches remote ip, taking cidr mask into account. + checkIP := func(ip net.IP, d Directive) bool { + // ../rfc/7208:1097 + if remote4 != nil { + ip4 := ip.To4() + if ip4 == nil { + return false + } + ones := 32 + if d.IP4CIDRLen != nil { + ones = *d.IP4CIDRLen + } + mask := net.CIDRMask(ones, 32) + return ip4.Mask(mask).Equal(remote4.Mask(mask)) + } + + ip6 := ip.To16() + if ip6 == nil { + return false + } + ones := 128 + if d.IP6CIDRLen != nil { + ones = *d.IP6CIDRLen + } + mask := net.CIDRMask(ones, 128) + return ip6.Mask(mask).Equal(remote6.Mask(mask)) + } + + // Used for "a" and "mx". + checkHostIP := func(domain dns.Domain, d Directive, args *Args) (bool, Status, error) { + network := "ip4" + if remote6 != nil { + network = "ip6" + } + ips, err := resolver.LookupIP(ctx, network, domain.ASCII+".") + trackVoidLookup(err, args) + // If "not found", we must ignore the error and treat as zero records in answer. ../rfc/7208:1116 + if err != nil && !dns.IsNotFound(err) { + return false, StatusTemperror, err + } + for _, ip := range ips { + if checkIP(ip, d) { + return true, StatusPass, nil + } + } + return false, StatusNone, nil + } + + for _, d := range record.Directives { + var match bool + + switch d.Mechanism { + case "include", "a", "mx", "ptr", "exists": + if err := trackLookupLimits(&args); err != nil { + return StatusPermerror, d.MechanismString(), "", err + } + } + + switch d.Mechanism { + case "all": + // ../rfc/7208:1127 + match = true + + case "include": + // ../rfc/7208:1143 + name, err := expandDomainSpecDNS(ctx, resolver, d.DomainSpec, args) + if err != nil { + return StatusPermerror, d.MechanismString(), "", fmt.Errorf("expanding domain-spec for include: %w", err) + } + nargs := args + nargs.domain = dns.Domain{ASCII: strings.TrimSuffix(name, ".")} + nargs.explanation = &record.Explanation // ../rfc/7208:1548 + status, _, _, err := checkHost(ctx, resolver, nargs) + // ../rfc/7208:1202 + switch status { + case StatusPass: + match = true + case StatusTemperror: + return StatusTemperror, d.MechanismString(), "", fmt.Errorf("include %q: %w", name, err) + case StatusPermerror, StatusNone: + return StatusPermerror, d.MechanismString(), "", fmt.Errorf("include %q resulted in status %q: %w", name, status, err) + } + + case "a": + // ../rfc/7208:1249 + // note: the syntax for DomainSpec hints that macros should be expanded. But + // expansion is explicitly documented, and only for "include", "exists" and + // "redirect". This reason for this could be low-effort reuse of the domain-spec + // ABNF rule. It could be an oversight. We are not implementing expansion for the + // mechanism for which it isn't specified. + host, err := evaluateDomainSpec(d.DomainSpec, args.domain) + if err != nil { + return StatusPermerror, d.MechanismString(), "", err + } + hmatch, status, err := checkHostIP(host, d, &args) + if err != nil { + return status, d.MechanismString(), "", err + } + match = hmatch + + case "mx": + // ../rfc/7208:1262 + host, err := evaluateDomainSpec(d.DomainSpec, args.domain) + if err != nil { + return StatusPermerror, d.MechanismString(), "", err + } + // Note: LookupMX can return an error and still return MX records. + mxs, err := resolver.LookupMX(ctx, host.ASCII+".") + trackVoidLookup(err, &args) + // note: we handle "not found" simply as a result of zero mx records. + if err != nil && !dns.IsNotFound(err) { + return StatusTemperror, d.MechanismString(), "", err + } + if err == nil && len(mxs) == 1 && mxs[0].Host == "." { + // Explicitly no MX. + break + } + for i, mx := range mxs { + // ../rfc/7208:947 says that each mx record cannot result in more than 10 DNS + // requests. This seems independent of the overall limit of 10 DNS requests. So an + // MX request resulting in 11 names is valid, but we must return a permerror if we + // found no match before the 11th name. + // ../rfc/7208:945 + if i >= 10 { + return StatusPermerror, d.MechanismString(), "", ErrTooManyDNSRequests + } + mxd, err := dns.ParseDomain(strings.TrimSuffix(mx.Host, ".")) + if err != nil { + return StatusPermerror, d.MechanismString(), "", err + } + hmatch, status, err := checkHostIP(mxd, d, &args) + if err != nil { + return status, d.MechanismString(), "", err + } + if hmatch { + match = hmatch + break + } + } + + case "ptr": + // ../rfc/7208:1281 + host, err := evaluateDomainSpec(d.DomainSpec, args.domain) + if err != nil { + return StatusPermerror, d.MechanismString(), "", err + } + + rnames, err := resolver.LookupAddr(ctx, args.RemoteIP.String()) + trackVoidLookup(err, &args) + if err != nil && !dns.IsNotFound(err) { + return StatusTemperror, d.MechanismString(), "", err + } + lookups := 0 + ptrnames: + for _, rname := range rnames { + rd, err := dns.ParseDomain(strings.TrimSuffix(rname, ".")) + if err != nil { + log.Errorx("bad address in ptr record", err, mlog.Field("address", rname)) + continue + } + // ../rfc/7208-eid4751 ../rfc/7208:1323 + if rd.ASCII != host.ASCII && !strings.HasSuffix(rd.ASCII, "."+host.ASCII) { + continue + } + + // ../rfc/7208:963, we must ignore entries after the first 10. + if lookups >= 10 { + break + } + lookups++ + network := "ip4" + if remote6 != nil { + network = "ip6" + } + ips, err := resolver.LookupIP(ctx, network, rd.ASCII+".") + trackVoidLookup(err, &args) + for _, ip := range ips { + if checkIP(ip, d) { + match = true + break ptrnames + } + } + } + + // ../rfc/7208:1351 + case "ip4": + if remote4 != nil { + match = checkIP(d.IP, d) + } + case "ip6": + if remote6 != nil { + match = checkIP(d.IP, d) + } + + case "exists": + // ../rfc/7208:1382 + name, err := expandDomainSpecDNS(ctx, resolver, d.DomainSpec, args) + if err != nil { + return StatusPermerror, d.MechanismString(), "", fmt.Errorf("expanding domain-spec for exists: %w", err) + } + + ips, err := resolver.LookupIP(ctx, "ip4", ensureAbsDNS(name)) + // Note: we do count this for void lookups, as that is an anti-abuse mechanism. + // ../rfc/7208:1382 does not say anything special, so ../rfc/7208:984 applies. + trackVoidLookup(err, &args) + if err != nil && !dns.IsNotFound(err) { + return StatusTemperror, d.MechanismString(), "", err + } + match = len(ips) > 0 + + default: + return StatusNone, d.MechanismString(), "", fmt.Errorf("internal error, unexpected mechanism %q", d.Mechanism) + } + + if !match { + continue + } + switch d.Qualifier { + case "", "+": + return StatusPass, d.MechanismString(), "", nil + case "?": + return StatusNeutral, d.MechanismString(), "", nil + case "-": + nargs := args + // ../rfc/7208:1489 + expl := explanation(ctx, resolver, record, nargs) + return StatusFail, d.MechanismString(), expl, nil + case "~": + return StatusSoftfail, d.MechanismString(), "", nil + } + return StatusNone, d.MechanismString(), "", fmt.Errorf("internal error, unexpected qualifier %q", d.Qualifier) + } + + if record.Redirect != "" { + // We only know "redirect" for evaluating purposes, ignoring any others. ../rfc/7208:1423 + + // ../rfc/7208:1440 + name, err := expandDomainSpecDNS(ctx, resolver, record.Redirect, args) + if err != nil { + return StatusPermerror, "", "", fmt.Errorf("expanding domain-spec: %w", err) + } + nargs := args + nargs.domain = dns.Domain{ASCII: strings.TrimSuffix(name, ".")} + nargs.explanation = nil // ../rfc/7208:1548 + status, mechanism, expl, err := checkHost(ctx, resolver, nargs) + if status == StatusNone { + return StatusPermerror, mechanism, "", err + } + return status, mechanism, expl, err + } + + // ../rfc/7208:996 ../rfc/7208:2095 + return StatusNeutral, "default", "", nil +} + +// evaluateDomainSpec returns the parsed dns domain for spec if non-empty, and +// otherwise returns d, which must be the Domain in checkHost Args. +func evaluateDomainSpec(spec string, d dns.Domain) (dns.Domain, error) { + // ../rfc/7208:1037 + if spec == "" { + return d, nil + } + d, err := dns.ParseDomain(spec) + if err != nil { + return d, fmt.Errorf("%w: %s", ErrName, err) + } + return d, nil +} + +func expandDomainSpecDNS(ctx context.Context, resolver dns.Resolver, domainSpec string, args Args) (string, error) { + return expandDomainSpec(ctx, resolver, domainSpec, args, true) +} + +func expandDomainSpecExp(ctx context.Context, resolver dns.Resolver, domainSpec string, args Args) (string, error) { + return expandDomainSpec(ctx, resolver, domainSpec, args, false) +} + +// expandDomainSpec interprets macros in domainSpec. +// The expansion can fail due to macro syntax errors or DNS errors. +// Caller should typically treat failures as StatusPermerror. ../rfc/7208:1641 +// ../rfc/7208:1639 +// ../rfc/7208:1047 +func expandDomainSpec(ctx context.Context, resolver dns.Resolver, domainSpec string, args Args, dns bool) (string, error) { + exp := !dns + + s := domainSpec + + b := &strings.Builder{} + i := 0 + n := len(s) + for i < n { + c := s[i] + i++ + if c != '%' { + b.WriteByte(c) + continue + } + + if i >= n { + return "", fmt.Errorf("%w: trailing bare %%", ErrMacroSyntax) + } + c = s[i] + i++ + if c == '%' { + b.WriteByte(c) + continue + } else if c == '_' { + b.WriteByte(' ') + continue + } else if c == '-' { + b.WriteString("%20") + continue + } else if c != '{' { + return "", fmt.Errorf("%w: invalid macro opening %%%c", ErrMacroSyntax, c) + } + + if i >= n { + return "", fmt.Errorf("%w: missing macro ending }", ErrMacroSyntax) + } + c = s[i] + i++ + + upper := false + if c >= 'A' && c <= 'Z' { + upper = true + c += 'a' - 'A' + } + + var v string + switch c { + case 's': + // todo: should we check for utf8 in localpart, and fail? we may now generate utf8 strings to places that may not be able to parse them. it will probably lead to relatively harmless error somewhere else. perhaps we can just transform the localpart to IDN? because it may be used in a dns lookup. ../rfc/7208:1507 + v = smtp.NewAddress(args.senderLocalpart, args.senderDomain).String() + case 'l': + // todo: same about utf8 as for 's'. + v = string(args.senderLocalpart) + case 'o': + v = args.senderDomain.ASCII + case 'd': + v = args.domain.ASCII + case 'i': + v = expandIP(args.RemoteIP) + case 'p': + // ../rfc/7208:937 + if err := trackLookupLimits(&args); err != nil { + return "", err + } + names, err := resolver.LookupAddr(ctx, args.RemoteIP.String()) + trackVoidLookup(err, &args) + if len(names) == 0 || err != nil { + // ../rfc/7208:1709 + v = "unknown" + break + } + + // Verify finds the first dns name that resolves to the remote ip. + verify := func(matchfn func(string) bool) (string, error) { + for _, name := range names { + if !matchfn(name) { + continue + } + network := "ip4" + if args.RemoteIP.To4() == nil { + network = "ip6" + } + ips, err := resolver.LookupIP(ctx, network, name) + trackVoidLookup(err, &args) + // ../rfc/7208:1714, we don't have to check other errors. + for _, ip := range ips { + if ip.Equal(args.RemoteIP) { + return name, nil + } + } + } + return "", nil + } + + // First exact domain name matches, then subdomains, finally other names. + domain := args.domain.ASCII + "." + dotdomain := "." + domain + v, err = verify(func(name string) bool { return name == domain }) + if err != nil { + return "", err + } + if v == "" { + v, err = verify(func(name string) bool { return strings.HasSuffix(name, dotdomain) }) + if err != nil { + return "", err + } + } + if v == "" { + v, err = verify(func(name string) bool { return name != domain && !strings.HasSuffix(name, dotdomain) }) + if err != nil { + return "", err + } + } + if v == "" { + // ../rfc/7208:1709 + v = "unknown" + } + + case 'v': + if args.RemoteIP.To4() != nil { + v = "in-addr" + } else { + v = "ip6" + } + case 'h': + if args.HelloDomain.IsIP() { + // ../rfc/7208:1621 explicitly says "domain", not "ip". We'll handle IP, probably does no harm. + v = expandIP(args.HelloDomain.IP) + } else { + v = args.HelloDomain.Domain.ASCII + } + case 'c', 'r', 't': + if !exp { + return "", fmt.Errorf("%w: macro letter %c only allowed in exp", ErrMacroSyntax, c) + } + switch c { + case 'c': + v = args.LocalIP.String() + case 'r': + v = args.LocalHostname.ASCII + case 't': + v = fmt.Sprintf("%d", timeNow().Unix()) + } + default: + return "", fmt.Errorf("%w: unknown macro letter %c", ErrMacroSyntax, c) + } + + digits := "" + for i < n && s[i] >= '0' && s[i] <= '9' { + digits += string(s[i]) + i++ + } + nlabels := -1 + if digits != "" { + v, err := strconv.Atoi(digits) + if err != nil { + return "", fmt.Errorf("%w: bad macro transformer digits %q: %s", ErrMacroSyntax, digits, err) + } + nlabels = v + if nlabels == 0 { + return "", fmt.Errorf("%w: zero labels for digits transformer", ErrMacroSyntax) + } + } + + // If "r" follows, we must reverse the resulting name, splitting on a dot by default. + // ../rfc/7208:1655 + reverse := false + if i < n && (s[i] == 'r' || s[i] == 'R') { + reverse = true + i++ + } + + // Delimiters to split on, for subset of labels and/or reversing. + delim := "" + for i < n { + switch s[i] { + case '.', '-', '+', ',', '/', '_', '=': + delim += string(s[i]) + i++ + continue + } + break + } + + if i >= n || s[i] != '}' { + return "", fmt.Errorf("%w: missing closing } for macro", ErrMacroSyntax) + } + i++ + + // Only split and subset and/or reverse if necessary. + if nlabels >= 0 || reverse || delim != "" { + if delim == "" { + delim = "." + } + t := split(v, delim) + // ../rfc/7208:1655 + if reverse { + nt := len(t) + h := nt / 2 + for i := 0; i < h; i++ { + t[i], t[nt-1-i] = t[nt-1-i], t[i] + } + } + if nlabels > 0 && nlabels < len(t) { + t = t[len(t)-nlabels:] + } + // Always join on dot. ../rfc/7208:1659 + v = strings.Join(t, ".") + } + + // ../rfc/7208:1755 + if upper { + v = url.QueryEscape(v) + } + + b.WriteString(v) + } + r := b.String() + if dns { + isAbs := strings.HasSuffix(r, ".") + r = ensureAbsDNS(r) + if err := validateDNS(r); err != nil { + return "", fmt.Errorf("invalid dns name: %s", err) + } + // If resulting name is too large, cut off labels on the left until it fits. ../rfc/7208:1749 + if len(r) > 253+1 { + labels := strings.Split(r, ".") + for i := range labels { + if i == len(labels)-1 { + return "", fmt.Errorf("expanded dns name too long") + } + s := strings.Join(labels[i+1:], ".") + if len(s) <= 254 { + r = s + break + } + } + } + if !isAbs { + r = r[:len(r)-1] + } + } + return r, nil +} + +func expandIP(ip net.IP) string { + ip4 := ip.To4() + if ip4 != nil { + return ip4.String() + } + v := "" + for i, b := range ip.To16() { + if i > 0 { + v += "." + } + v += fmt.Sprintf("%x.%x", b>>4, b&0xf) + } + return v +} + +// validateDNS checks if a DNS name is valid. Must not end in dot. This does not +// check valid host names, e.g. _ is allows in DNS but not in a host name. +func validateDNS(s string) error { + // ../rfc/7208:800 + // note: we are not checking for max 253 bytes length, because one of the callers may be chopping off labels to "correct" the name. + labels := strings.Split(s, ".") + if len(labels) > 128 { + return fmt.Errorf("more than 128 labels") + } + for _, label := range labels[:len(labels)-1] { + if len(label) > 63 { + return fmt.Errorf("label longer than 63 bytes") + } + + if label == "" { + return fmt.Errorf("empty dns label") + } + } + return nil +} + +func split(v, delim string) (r []string) { + isdelim := func(c rune) bool { + for _, d := range delim { + if d == c { + return true + } + } + return false + } + + s := 0 + for i, c := range v { + if isdelim(c) { + r = append(r, v[s:i]) + s = i + 1 + } + } + r = append(r, v[s:]) + return r +} + +// explanation does a best-effort attempt to fetch an explanation for a StatusFail response. +// If no explanation could be composed, an empty string is returned. +func explanation(ctx context.Context, resolver dns.Resolver, r *Record, args Args) string { + // ../rfc/7208:1485 + + // If this record is the result of an "include", we have to use the explanation + // string of the original domain, not of this domain. + // ../rfc/7208:1548 + expl := r.Explanation + if args.explanation != nil { + expl = *args.explanation + } + + // ../rfc/7208:1491 + if expl == "" { + return "" + } + + // Limits for dns requests and void lookups should not be taken into account. + // Starting with zero ensures they aren't triggered. + args.dnsRequests = new(int) + args.voidLookups = new(int) + name, err := expandDomainSpecDNS(ctx, resolver, r.Explanation, args) + if err != nil || name == "" { + return "" + } + txts, err := resolver.LookupTXT(ctx, ensureAbsDNS(name)) + if err != nil || len(txts) == 0 { + return "" + } + txt := strings.Join(txts, "") + s, err := expandDomainSpecExp(ctx, resolver, txt, args) + if err != nil { + return "" + } + return s +} + +func ensureAbsDNS(s string) string { + if !strings.HasSuffix(s, ".") { + return s + "." + } + return s +} + +func trackLookupLimits(args *Args) error { + // ../rfc/7208:937 + if *args.dnsRequests >= dnsRequestsMax { + return ErrTooManyDNSRequests + } + // ../rfc/7208:988 + if *args.voidLookups >= voidLookupsMax { + return ErrTooManyVoidLookups + } + *args.dnsRequests++ + return nil +} + +func trackVoidLookup(err error, args *Args) { + if dns.IsNotFound(err) { + *args.voidLookups++ + } +} diff --git a/spf/spf_test.go b/spf/spf_test.go new file mode 100644 index 0000000..a390623 --- /dev/null +++ b/spf/spf_test.go @@ -0,0 +1,521 @@ +package spf + +import ( + "context" + "errors" + "fmt" + "net" + "reflect" + "testing" + "time" + + "github.com/mjl-/mox/dns" + "github.com/mjl-/mox/smtp" +) + +func TestLookup(t *testing.T) { + resolver := dns.MockResolver{ + TXT: map[string][]string{ + "temperror.example.": {"irrelevant"}, + "malformed.example.": {"v=spf1 !"}, + "multiple.example.": {"v=spf1", "v=spf1"}, + "nonspf.example.": {"something else"}, + "ok.example.": {"v=spf1"}, + }, + Fail: map[dns.Mockreq]struct{}{ + {Type: "txt", Name: "temperror.example."}: {}, + }, + } + + test := func(domain string, expStatus Status, expRecord *Record, expErr error) { + t.Helper() + + d := dns.Domain{ASCII: domain} + status, txt, record, err := Lookup(context.Background(), resolver, d) + if (err == nil) != (expErr == nil) || err != nil && !errors.Is(err, expErr) { + t.Fatalf("got err %v, expected err %v", err, expErr) + } + if err != nil { + return + } + if status != expStatus || txt == "" || !reflect.DeepEqual(record, expRecord) { + t.Fatalf("got status %q, txt %q, record %#v, expected %q, ..., %#v", status, txt, record, expStatus, expRecord) + } + } + + test("..", StatusNone, nil, ErrName) + test("absent.example", StatusNone, nil, ErrNoRecord) + test("temperror.example", StatusTemperror, nil, ErrDNS) + test("malformed.example", StatusPermerror, nil, ErrRecordSyntax) + test("multiple.example", StatusPermerror, nil, ErrMultipleRecords) + test("nonspf.example", StatusNone, nil, ErrNoRecord) + test("ok.example", StatusNone, &Record{Version: "spf1"}, nil) +} + +func TestExpand(t *testing.T) { + defArgs := Args{ + senderLocalpart: "strong-bad", + senderDomain: dns.Domain{ASCII: "email.example.com"}, + domain: dns.Domain{ASCII: "email.example.com"}, + + MailFromLocalpart: "x", + MailFromDomain: dns.Domain{ASCII: "mox.example"}, + HelloDomain: dns.IPDomain{Domain: dns.Domain{ASCII: "mx.mox.example"}}, + LocalIP: net.ParseIP("10.10.10.10"), + LocalHostname: dns.Domain{ASCII: "self.example"}, + } + + resolver := dns.MockResolver{ + PTR: map[string][]string{ + "10.0.0.1": {"other.example.", "sub.mx.mox.example.", "mx.mox.example."}, + "10.0.0.2": {"other.example.", "sub.mx.mox.example.", "mx.mox.example."}, + "10.0.0.3": {"other.example.", "sub.mx.mox.example.", "mx.mox.example."}, + }, + A: map[string][]string{ + "mx.mox.example.": {"10.0.0.1"}, + "sub.mx.mox.example.": {"10.0.0.2"}, + "other.example.": {"10.0.0.3"}, + }, + } + + mustParseIP := func(s string) net.IP { + ip := net.ParseIP(s) + if ip == nil { + t.Fatalf("bad ip %q", s) + } + return ip + } + + ctx := context.Background() + + // Examples from ../rfc/7208:1777 + test := func(dns bool, macro, ip, exp string) { + t.Helper() + + args := defArgs + args.dnsRequests = new(int) + args.voidLookups = new(int) + if ip != "" { + args.RemoteIP = mustParseIP(ip) + } + + r, err := expandDomainSpec(ctx, resolver, macro, args, dns) + if (err == nil) != (exp != "") { + t.Fatalf("got err %v, expected expansion %q, for macro %q", err, exp, macro) + } + if r != exp { + t.Fatalf("got expansion %q, expected %q, for macro %q", r, exp, macro) + } + } + + testDNS := func(macro, ip, exp string) { + t.Helper() + test(true, macro, ip, exp) + } + + testExpl := func(macro, ip, exp string) { + t.Helper() + test(false, macro, ip, exp) + } + + testDNS("%{s}", "", "strong-bad@email.example.com") + testDNS("%{o}", "", "email.example.com") + testDNS("%{d}", "", "email.example.com") + testDNS("%{d4}", "", "email.example.com") + testDNS("%{d3}", "", "email.example.com") + testDNS("%{d2}", "", "example.com") + testDNS("%{d1}", "", "com") + testDNS("%{dr}", "", "com.example.email") + testDNS("%{d2r}", "", "example.email") + testDNS("%{l}", "", "strong-bad") + testDNS("%{l-}", "", "strong.bad") + testDNS("%{lr}", "", "strong-bad") + testDNS("%{lr-}", "", "bad.strong") + testDNS("%{l1r-}", "", "strong") + + testDNS("%", "", "") + testDNS("%b", "", "") + testDNS("%{", "", "") + testDNS("%{s", "", "") + testDNS("%{s1", "", "") + testDNS("%{s0}", "", "") + testDNS("%{s1r", "", "") + testDNS("%{s99999999999999999999999999999999999999999999999999999999999999999999999}", "", "") + + testDNS("%{ir}.%{v}._spf.%{d2}", "192.0.2.3", "3.2.0.192.in-addr._spf.example.com") + testDNS("%{lr-}.lp._spf.%{d2}", "192.0.2.3", "bad.strong.lp._spf.example.com") + testDNS("%{lr-}.lp.%{ir}.%{v}._spf.%{d2}", "192.0.2.3", "bad.strong.lp.3.2.0.192.in-addr._spf.example.com") + testDNS("%{ir}.%{v}.%{l1r-}.lp._spf.%{d2}", "192.0.2.3", "3.2.0.192.in-addr.strong.lp._spf.example.com") + testDNS("%{d2}.trusted-domains.example.net", "192.0.2.3", "example.com.trusted-domains.example.net") + + testDNS("%{ir}.%{v}._spf.%{d2}", "2001:db8::cb01", "1.0.b.c.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.8.b.d.0.1.0.0.2.ip6._spf.example.com") + + // Additional. + testDNS("%%%-%_", "10.0.0.1", "%%20 ") + testDNS("%{p}", "10.0.0.1", "mx.mox.example.") + testDNS("%{p}", "10.0.0.2", "sub.mx.mox.example.") + testDNS("%{p}", "10.0.0.3", "other.example.") + testDNS("%{p}", "10.0.0.4", "unknown") + testExpl("%{c}", "10.0.0.1", "10.10.10.10") + testExpl("%{r}", "10.0.0.1", "self.example") + orig := timeNow + now := orig() + defer func() { + timeNow = orig + }() + timeNow = func() time.Time { + return now + } + testExpl("%{t}", "10.0.0.1", fmt.Sprintf("%d", now.Unix())) + // DNS name can be 253 bytes long, each label can be 63 bytes. + xlabel := make([]byte, 62) + for i := range xlabel { + xlabel[i] = 'a' + } + label := string(xlabel) + name := label + "." + label + "." + label + "." + label // 4*62+3 = 251 + testDNS("x."+name, "10.0.0.1", "x."+name) // Still fits. + testDNS("xx."+name, "10.0.0.1", name) // Does not fit, "xx." is truncated to make it fit. + testDNS("%{p}..", "10.0.0.1", "") + testDNS("%{h}", "10.0.0.1", "mx.mox.example") +} + +func TestVerify(t *testing.T) { + xip := func(s string) net.IP { + ip := net.ParseIP(s) + if ip == nil { + t.Fatalf("bad ip: %q", s) + } + return ip + } + iplist := func(l ...string) []net.IP { + r := make([]net.IP, len(l)) + for i, s := range l { + r[i] = xip(s) + } + return r + } + + // ../rfc/7208:2975 Appendix A. Extended Examples + r := dns.MockResolver{ + PTR: map[string][]string{ + "192.0.2.10": {"example.com."}, + "192.0.2.11": {"example.com."}, + "192.0.2.65": {"amy.example.com."}, + "192.0.2.66": {"bob.example.com."}, + "192.0.2.129": {"mail-a.example.com."}, + "192.0.2.130": {"mail-b.example.com."}, + "192.0.2.140": {"mail-c.example.org."}, + "10.0.0.4": {"bob.example.com."}, + }, + TXT: map[string][]string{ + // Additional from DNSBL, ../rfc/7208:3115 + "mobile-users._spf.example.com.": {"v=spf1 exists:%{l1r+}.%{d}"}, + "remote-users._spf.example.com.": {"v=spf1 exists:%{ir}.%{l1r+}.%{d}"}, + + // Additional ../rfc/7208:3171 + "ip4._spf.example.com.": {"v=spf1 -ip4:192.0.2.0/24 +all"}, + "ptr._spf.example.com.": {"v=spf1 -ptr:example.com +all"}, // ../rfc/7208-eid6216 ../rfc/7208:3172 + + // Additional tests + "_spf.example.com.": {"v=spf1 include:_netblock.example.com -all"}, + "_netblock.example.com.": {"v=spf1 ip4:192.0.2.128/28 -all"}, + }, + A: map[string][]string{ + "example.com.": {"192.0.2.10", "192.0.2.11"}, + "amy.example.com.": {"192.0.2.65"}, + "bob.example.com.": {"192.0.2.66"}, + "mail-a.example.com.": {"192.0.2.129"}, + "mail-b.example.com.": {"192.0.2.130"}, + "mail-c.example.org.": {"192.0.2.140"}, + + // Additional from DNSBL, ../rfc/7208:3115 + "mary.mobile-users._spf.example.com.": {"127.0.0.2"}, + "fred.mobile-users._spf.example.com.": {"127.0.0.2"}, + "15.15.168.192.joel.remote-users._spf.example.com.": {"127.0.0.2"}, + "16.15.168.192.joel.remote-users._spf.example.com.": {"127.0.0.2"}, + }, + AAAA: map[string][]string{}, + MX: map[string][]*net.MX{ + "example.com.": { + {Host: "mail-a.example.com.", Pref: 10}, + {Host: "mail-b.example.com.", Pref: 20}, + }, + "example.org.": { + {Host: "mail-c.example.org.", Pref: 10}, + }, + }, + Fail: map[dns.Mockreq]struct{}{}, + } + + ctx := context.Background() + + verify := func(ip net.IP, localpart string, status Status) { + t.Helper() + + args := Args{ + MailFromLocalpart: smtp.Localpart(localpart), + MailFromDomain: dns.Domain{ASCII: "example.com"}, + RemoteIP: ip, + LocalIP: xip("127.0.0.1"), + LocalHostname: dns.Domain{ASCII: "localhost"}, + } + received, _, _, err := Verify(ctx, r, args) + if received.Result != status { + t.Fatalf("got status %q, expected %q, for ip %q (err %v)", received.Result, status, ip, err) + } + if err != nil { + t.Fatalf("unexpected error: %s", err) + } + } + + test := func(txt string, ips []net.IP, only bool) { + r.TXT["example.com."] = []string{txt} + seen := map[string]struct{}{} + for _, ip := range ips { + verify(ip, "", StatusPass) + seen[ip.String()] = struct{}{} + } + if !only { + return + } + for ip := range r.PTR { + if _, ok := seen[ip]; ok { + continue + } + verify(xip(ip), "", StatusFail) + } + } + + // ../rfc/7208:3031 A.1. Simple Examples + test("v=spf1 +all", iplist("192.0.2.129", "1.2.3.4"), false) + test("v=spf1 a -all", iplist("192.0.2.10", "192.0.2.11"), true) + test("v=spf1 a:example.org -all", iplist(), true) + test("v=spf1 mx -all", iplist("192.0.2.129", "192.0.2.130"), true) + test("v=spf1 mx:example.org -all", iplist("192.0.2.140"), true) + test("v=spf1 mx mx:example.org -all", iplist("192.0.2.129", "192.0.2.130", "192.0.2.140"), true) + test("v=spf1 mx/30 mx:example.org/30 -all", iplist("192.0.2.129", "192.0.2.130", "192.0.2.140"), true) + test("v=spf1 ptr -all", iplist("192.0.2.10", "192.0.2.11", "192.0.2.65", "192.0.2.66", "192.0.2.129", "192.0.2.130"), true) + test("v=spf1 ip4:192.0.2.128/28 -all", iplist("192.0.2.129", "192.0.2.130", "192.0.2.140"), true) + + // Additional tests + test("v=spf1 redirect=_spf.example.com", iplist("192.0.2.129", "192.0.2.130", "192.0.2.140"), true) + + // Additional from DNSBL, ../rfc/7208:3115 + r.TXT["example.com."] = []string{"v=spf1 mx include:mobile-users._spf.%{d} include:remote-users._spf.%{d} -all"} + verify(xip("1.2.3.4"), "mary", StatusPass) + verify(xip("1.2.3.4"), "fred", StatusPass) + verify(xip("1.2.3.4"), "fred+wildcard", StatusPass) + verify(xip("1.2.3.4"), "joel", StatusFail) + verify(xip("1.2.3.4"), "other", StatusFail) + verify(xip("192.168.15.15"), "joel", StatusPass) + verify(xip("192.168.15.16"), "joel", StatusPass) + verify(xip("192.168.15.17"), "joel", StatusFail) + verify(xip("192.168.15.17"), "other", StatusFail) + + // Additional ../rfc/7208:3171 + r.TXT["example.com."] = []string{"v=spf1 -include:ip4._spf.%{d} -include:ptr._spf.%{d} +all"} + r.PTR["192.0.2.1"] = []string{"a.example.com."} + r.PTR["192.0.0.1"] = []string{"b.example.com."} + r.A["a.example.com."] = []string{"192.0.2.1"} + r.A["b.example.com."] = []string{"192.0.0.1"} + + verify(xip("192.0.2.1"), "", StatusPass) // IP in range and PTR matches. + verify(xip("192.0.2.2"), "", StatusFail) // IP in range but no PTR match. + verify(xip("192.0.0.1"), "", StatusFail) // PTR match but IP not in range. + verify(xip("192.0.0.2"), "", StatusFail) // No PTR match and IP not in range. +} + +// ../rfc/7208:3093 +func TestVerifyMultipleDomain(t *testing.T) { + resolver := dns.MockResolver{ + TXT: map[string][]string{ + "example.org.": {"v=spf1 include:example.com include:example.net -all"}, + "la.example.org.": {"v=spf1 redirect=example.org"}, + "example.com.": {"v=spf1 ip4:10.0.0.1 -all"}, + "example.net.": {"v=spf1 ip4:10.0.0.2 -all"}, + }, + } + + verify := func(domain, ip string, status Status) { + t.Helper() + + args := Args{ + MailFromDomain: dns.Domain{ASCII: domain}, + RemoteIP: net.ParseIP(ip), + LocalIP: net.ParseIP("127.0.0.1"), + LocalHostname: dns.Domain{ASCII: "localhost"}, + } + received, _, _, err := Verify(context.Background(), resolver, args) + if err != nil { + t.Fatalf("unexpected error: %s", err) + } + if received.Result != status { + t.Fatalf("got status %q, expected %q, for ip %q", received.Result, status, ip) + } + } + + verify("example.com", "10.0.0.1", StatusPass) + verify("example.net", "10.0.0.2", StatusPass) + verify("example.com", "10.0.0.2", StatusFail) + verify("example.net", "10.0.0.1", StatusFail) + verify("example.org", "10.0.0.1", StatusPass) + verify("example.org", "10.0.0.2", StatusPass) + verify("example.org", "10.0.0.3", StatusFail) + verify("la.example.org", "10.0.0.1", StatusPass) + verify("la.example.org", "10.0.0.2", StatusPass) + verify("la.example.org", "10.0.0.3", StatusFail) +} + +func TestVerifyScenarios(t *testing.T) { + test := func(resolver dns.Resolver, args Args, expStatus Status, expDomain string, expExpl string, expErr error) { + t.Helper() + + recv, d, expl, err := Verify(context.Background(), resolver, args) + if (err == nil) != (expErr == nil) || err != nil && !errors.Is(err, expErr) { + t.Fatalf("got err %v, expected %v", err, expErr) + } + if expStatus != recv.Result || expDomain != "" && d.ASCII != expDomain || expExpl != "" && expl != expExpl { + t.Fatalf("got status %q, domain %q, expl %q, err %v", recv.Result, d, expl, err) + } + } + + r := dns.MockResolver{ + TXT: map[string][]string{ + "mox.example.": {"v=spf1 ip6:2001:db8::0/64 -all"}, + "void.example.": {"v=spf1 exists:absent1.example exists:absent2.example ip4:1.2.3.4 exists:absent3.example -all"}, + "loop.example.": {"v=spf1 include:loop.example -all"}, + "a-unknown.example.": {"v=spf1 a:absent.example"}, + "include-bad-expand.example.": {"v=spf1 include:%{c}"}, // macro 'c' only valid while expanding for "exp". + "exists-bad-expand.example.": {"v=spf1 exists:%{c}"}, // macro 'c' only valid while expanding for "exp". + "redir-bad-expand.example.": {"v=spf1 redirect=%{c}"}, // macro 'c' only valid while expanding for "exp". + "a-bad-expand.example.": {"v=spf1 a:%{c}"}, // macro 'c' only valid while expanding for "exp". + "mx-bad-expand.example.": {"v=spf1 mx:%{c}"}, // macro 'c' only valid while expanding for "exp". + "ptr-bad-expand.example.": {"v=spf1 ptr:%{c}"}, // macro 'c' only valid while expanding for "exp". + "include-temperror.example.": {"v=spf1 include:temperror.example"}, + "include-none.example.": {"v=spf1 include:absent.example"}, + "include-permerror.example.": {"v=spf1 include:permerror.example"}, + "permerror.example.": {"v=spf1 a:%%"}, + "no-mx.example.": {"v=spf1 mx -all"}, + "many-mx.example.": {"v=spf1 mx -all"}, + "many-ptr.example.": {"v=spf1 ptr:many-mx.example ~all"}, + "expl.example.": {"v=spf1 ip4:10.0.1.1 -ip4:10.0.1.2 ?all exp=details.expl.example"}, + "details.expl.example.": {"your ip %{i} is not allowed"}, + "expl-multi.example.": {"v=spf1 ip4:10.0.1.1 -ip4:10.0.1.2 ~all exp=details-multi.expl.example"}, + "details-multi.expl.example.": {"your ip ", "%{i} is not allowed"}, + }, + A: map[string][]string{ + "mail.mox.example.": {"10.0.0.1"}, + "mx1.many-mx.example.": {"10.0.1.1"}, + "mx2.many-mx.example.": {"10.0.1.2"}, + "mx3.many-mx.example.": {"10.0.1.3"}, + "mx4.many-mx.example.": {"10.0.1.4"}, + "mx5.many-mx.example.": {"10.0.1.5"}, + "mx6.many-mx.example.": {"10.0.1.6"}, + "mx7.many-mx.example.": {"10.0.1.7"}, + "mx8.many-mx.example.": {"10.0.1.8"}, + "mx9.many-mx.example.": {"10.0.1.9"}, + "mx10.many-mx.example.": {"10.0.1.10"}, + "mx11.many-mx.example.": {"10.0.1.11"}, + }, + AAAA: map[string][]string{ + "mail.mox.example.": {"2001:db8::1"}, + }, + MX: map[string][]*net.MX{ + "no-mx.example.": {{Host: ".", Pref: 10}}, + "many-mx.example.": { + {Host: "mx1.many-mx.example.", Pref: 1}, + {Host: "mx2.many-mx.example.", Pref: 2}, + {Host: "mx3.many-mx.example.", Pref: 3}, + {Host: "mx4.many-mx.example.", Pref: 4}, + {Host: "mx5.many-mx.example.", Pref: 5}, + {Host: "mx6.many-mx.example.", Pref: 6}, + {Host: "mx7.many-mx.example.", Pref: 7}, + {Host: "mx8.many-mx.example.", Pref: 8}, + {Host: "mx9.many-mx.example.", Pref: 9}, + {Host: "mx10.many-mx.example.", Pref: 10}, + {Host: "mx11.many-mx.example.", Pref: 11}, + }, + }, + PTR: map[string][]string{ + "2001:db8::1": {"mail.mox.example."}, + "10.0.1.1": {"mx1.many-mx.example.", "mx2.many-mx.example.", "mx3.many-mx.example.", "mx4.many-mx.example.", "mx5.many-mx.example.", "mx6.many-mx.example.", "mx7.many-mx.example.", "mx8.many-mx.example.", "mx9.many-mx.example.", "mx10.many-mx.example.", "mx11.many-mx.example."}, + }, + Fail: map[dns.Mockreq]struct{}{ + {Type: "txt", Name: "temperror.example."}: {}, + }, + } + + // IPv6 remote IP. + test(r, Args{RemoteIP: net.ParseIP("2001:db8::1"), MailFromLocalpart: "x", MailFromDomain: dns.Domain{ASCII: "mox.example"}}, StatusPass, "", "", nil) + test(r, Args{RemoteIP: net.ParseIP("2001:fa11::1"), MailFromLocalpart: "x", MailFromDomain: dns.Domain{ASCII: "mox.example"}}, StatusFail, "", "", nil) + + // Use EHLO identity. + test(r, Args{RemoteIP: net.ParseIP("2001:db8::1"), HelloDomain: dns.IPDomain{Domain: dns.Domain{ASCII: "mox.example"}}}, StatusPass, "", "", nil) + test(r, Args{RemoteIP: net.ParseIP("2001:db8::1"), HelloDomain: dns.IPDomain{Domain: dns.Domain{ASCII: "mail.mox.example"}}}, StatusNone, "", "", ErrNoRecord) + + // Too many void lookups. + test(r, Args{RemoteIP: net.ParseIP("1.2.3.4"), MailFromLocalpart: "x", MailFromDomain: dns.Domain{ASCII: "void.example"}}, StatusPass, "", "", nil) // IP found after 2 void lookups, but before 3rd. + test(r, Args{RemoteIP: net.ParseIP("1.1.1.1"), MailFromLocalpart: "x", MailFromDomain: dns.Domain{ASCII: "void.example"}}, StatusPermerror, "", "", ErrTooManyVoidLookups) // IP not found, not doing 3rd lookup. + + // Too many DNS requests. + test(r, Args{RemoteIP: net.ParseIP("1.2.3.4"), MailFromLocalpart: "x", MailFromDomain: dns.Domain{ASCII: "loop.example"}}, StatusPermerror, "", "", ErrTooManyDNSRequests) // Self-referencing record, will abort after 10 includes. + + // a:other where other does not exist. + test(r, Args{RemoteIP: net.ParseIP("1.2.3.4"), MailFromLocalpart: "x", MailFromDomain: dns.Domain{ASCII: "a-unknown.example"}}, StatusNeutral, "", "", nil) + + // Expand with an invalid macro. + test(r, Args{RemoteIP: net.ParseIP("1.2.3.4"), MailFromLocalpart: "x", MailFromDomain: dns.Domain{ASCII: "include-bad-expand.example"}}, StatusPermerror, "", "", ErrMacroSyntax) + test(r, Args{RemoteIP: net.ParseIP("1.2.3.4"), MailFromLocalpart: "x", MailFromDomain: dns.Domain{ASCII: "exists-bad-expand.example"}}, StatusPermerror, "", "", ErrMacroSyntax) + test(r, Args{RemoteIP: net.ParseIP("1.2.3.4"), MailFromLocalpart: "x", MailFromDomain: dns.Domain{ASCII: "redir-bad-expand.example"}}, StatusPermerror, "", "", ErrMacroSyntax) + + // Expand with invalid character (because macros are not expanded). + test(r, Args{RemoteIP: net.ParseIP("1.2.3.4"), MailFromLocalpart: "x", MailFromDomain: dns.Domain{ASCII: "a-bad-expand.example"}}, StatusPermerror, "", "", ErrName) + test(r, Args{RemoteIP: net.ParseIP("1.2.3.4"), MailFromLocalpart: "x", MailFromDomain: dns.Domain{ASCII: "mx-bad-expand.example"}}, StatusPermerror, "", "", ErrName) + test(r, Args{RemoteIP: net.ParseIP("1.2.3.4"), MailFromLocalpart: "x", MailFromDomain: dns.Domain{ASCII: "ptr-bad-expand.example"}}, StatusPermerror, "", "", ErrName) + + // Include with varying results. + test(r, Args{RemoteIP: net.ParseIP("1.2.3.4"), MailFromLocalpart: "x", MailFromDomain: dns.Domain{ASCII: "include-temperror.example"}}, StatusTemperror, "", "", ErrDNS) + test(r, Args{RemoteIP: net.ParseIP("1.2.3.4"), MailFromLocalpart: "x", MailFromDomain: dns.Domain{ASCII: "include-none.example"}}, StatusPermerror, "", "", ErrNoRecord) + test(r, Args{RemoteIP: net.ParseIP("1.2.3.4"), MailFromLocalpart: "x", MailFromDomain: dns.Domain{ASCII: "include-permerror.example"}}, StatusPermerror, "", "", ErrName) + + // MX with explicit "." for "no mail". + test(r, Args{RemoteIP: net.ParseIP("1.2.3.4"), MailFromLocalpart: "x", MailFromDomain: dns.Domain{ASCII: "no-mx.example"}}, StatusFail, "", "", nil) + + // MX names beyond 10th entry result in Permerror. + test(r, Args{RemoteIP: net.ParseIP("10.0.1.1"), MailFromLocalpart: "x", MailFromDomain: dns.Domain{ASCII: "many-mx.example"}}, StatusPass, "", "", nil) + test(r, Args{RemoteIP: net.ParseIP("10.0.1.10"), MailFromLocalpart: "x", MailFromDomain: dns.Domain{ASCII: "many-mx.example"}}, StatusPass, "", "", nil) + test(r, Args{RemoteIP: net.ParseIP("10.0.1.11"), MailFromLocalpart: "x", MailFromDomain: dns.Domain{ASCII: "many-mx.example"}}, StatusPermerror, "", "", ErrTooManyDNSRequests) + test(r, Args{RemoteIP: net.ParseIP("10.0.1.254"), MailFromLocalpart: "x", MailFromDomain: dns.Domain{ASCII: "many-mx.example"}}, StatusPermerror, "", "", ErrTooManyDNSRequests) + + // PTR names beyond 10th entry are ignored. + test(r, Args{RemoteIP: net.ParseIP("10.0.1.1"), MailFromLocalpart: "x", MailFromDomain: dns.Domain{ASCII: "many-ptr.example"}}, StatusPass, "", "", nil) + test(r, Args{RemoteIP: net.ParseIP("10.0.1.2"), MailFromLocalpart: "x", MailFromDomain: dns.Domain{ASCII: "many-ptr.example"}}, StatusSoftfail, "", "", nil) + + // Explanation from txt records. + test(r, Args{RemoteIP: net.ParseIP("10.0.1.1"), MailFromLocalpart: "x", MailFromDomain: dns.Domain{ASCII: "expl.example"}}, StatusPass, "", "", nil) + test(r, Args{RemoteIP: net.ParseIP("10.0.1.2"), MailFromLocalpart: "x", MailFromDomain: dns.Domain{ASCII: "expl.example"}}, StatusFail, "", "your ip 10.0.1.2 is not allowed", nil) + test(r, Args{RemoteIP: net.ParseIP("10.0.1.3"), MailFromLocalpart: "x", MailFromDomain: dns.Domain{ASCII: "expl.example"}}, StatusNeutral, "", "", nil) + test(r, Args{RemoteIP: net.ParseIP("10.0.1.2"), MailFromLocalpart: "x", MailFromDomain: dns.Domain{ASCII: "expl-multi.example"}}, StatusFail, "", "your ip 10.0.1.2 is not allowed", nil) + + // Verify with IP EHLO. + test(r, Args{RemoteIP: net.ParseIP("2001:db8::1"), HelloDomain: dns.IPDomain{IP: net.ParseIP("::1")}}, StatusNone, "", "", nil) +} + +func TestEvaluate(t *testing.T) { + record := &Record{} + resolver := dns.MockResolver{} + args := Args{} + status, _, _, _ := Evaluate(context.Background(), record, resolver, args) + if status != StatusNone { + t.Fatalf("got status %q, expected none", status) + } + + args = Args{ + HelloDomain: dns.IPDomain{Domain: dns.Domain{ASCII: "test.example"}}, + } + status, mechanism, _, err := Evaluate(context.Background(), record, resolver, args) + if status != StatusNeutral || mechanism != "default" || err != nil { + t.Fatalf("got status %q, mechanism %q, err %v, expected neutral, default, no error", status, mechanism, err) + } +} diff --git a/start.go b/start.go new file mode 100644 index 0000000..57b52c0 --- /dev/null +++ b/start.go @@ -0,0 +1,44 @@ +package main + +import ( + "fmt" + + "github.com/mjl-/mox/dmarcdb" + "github.com/mjl-/mox/dns" + "github.com/mjl-/mox/http" + "github.com/mjl-/mox/imapserver" + "github.com/mjl-/mox/mtastsdb" + "github.com/mjl-/mox/queue" + "github.com/mjl-/mox/smtpserver" + "github.com/mjl-/mox/store" + "github.com/mjl-/mox/tlsrptdb" +) + +// start initializes all packages, starts all listeners and the switchboard +// goroutine, then returns. +func start(mtastsdbRefresher bool) error { + if err := dmarcdb.Init(); err != nil { + return fmt.Errorf("dmarc init: %s", err) + } + + if err := mtastsdb.Init(mtastsdbRefresher); err != nil { + return fmt.Errorf("mtasts init: %s", err) + } + + if err := tlsrptdb.Init(); err != nil { + return fmt.Errorf("tlsrpt init: %s", err) + } + + done := make(chan struct{}, 1) + if err := queue.Start(dns.StrictResolver{Pkg: "queue"}, done); err != nil { + return fmt.Errorf("queue start: %s", err) + } + + smtpserver.ListenAndServe() + imapserver.ListenAndServe() + http.ListenAndServe() + go func() { + <-store.Switchboard() + }() + return nil +} diff --git a/store/account.go b/store/account.go new file mode 100644 index 0000000..8278c3c --- /dev/null +++ b/store/account.go @@ -0,0 +1,1139 @@ +/* +Package store implements storage for accounts, their mailboxes, IMAP +subscriptions and messages, and broadcasts updates (e.g. mail delivery) to +interested sessions (e.g. IMAP connections). + +Layout of storage for accounts: + + /accounts//index.db + /accounts//msg/[a-zA-Z0-9_-]+/ + +Index.db holds tables for user information, mailboxes, and messages. Messages +are stored in the msg/ subdirectory, each in their own file. The on-disk message +does not contain headers generated during an incoming SMTP transaction, such as +Received and Authentication-Results headers. Those are in the database to +prevent having to rewrite incoming messages (e.g. Authentication-Result for DKIM +signatures can only be determined after having read the message). Messages must +be read through MsgReader, which transparently adds the prefix from the +database. +*/ +package store + +// todo: make up a function naming scheme that indicates whether caller should broadcast changes. +// todo: fewer (no?) "X" functions, but only explicit error handling. + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "os" + "path/filepath" + "strings" + "sync" + "time" + + "golang.org/x/crypto/bcrypt" + "golang.org/x/text/unicode/norm" + + "github.com/mjl-/bstore" + + "github.com/mjl-/mox/config" + "github.com/mjl-/mox/dns" + "github.com/mjl-/mox/message" + "github.com/mjl-/mox/mlog" + "github.com/mjl-/mox/mox-" + "github.com/mjl-/mox/moxio" + "github.com/mjl-/mox/publicsuffix" + "github.com/mjl-/mox/scram" + "github.com/mjl-/mox/smtp" +) + +var xlog = mlog.New("store") + +var ( + ErrUnknownMailbox = errors.New("no such mailbox") + ErrUnknownCredentials = errors.New("credentials not found") + ErrAccountUnknown = errors.New("no such account") +) + +var subjectpassRand = mox.NewRand() + +var InitialMailboxes = []string{"Inbox", "Sent", "Archive", "Trash", "Drafts", "Junk"} + +// Password holds a bcrypt hash for logging in with SMTP/IMAP/admin. +type Password struct { + Hash string + SCRAMSHA256 struct { + Salt []byte + Iterations int + SaltedPassword []byte + } +} + +// Subjectpass holds the secret key used to sign subjectpass tokens. +type Subjectpass struct { + Email string // Our destination address (canonical, with catchall localpart stripped). + Key string +} + +// NextUIDValidity is a singleton record in the database with the next UIDValidity +// to use for the next mailbox. +type NextUIDValidity struct { + ID int // Just a single record with ID 1. + Next uint32 +} + +// Mailbox is collection of messages, e.g. Inbox or Sent. +type Mailbox struct { + ID int64 + + // "Inbox" is the name for the special IMAP "INBOX". Slash separated + // for hierarchy. + Name string `bstore:"nonzero,unique"` + + // If UIDs are invalidated, e.g. when renaming a mailbox to a previously existing + // name, UIDValidity must be changed. Used by IMAP for synchronization. + UIDValidity uint32 + + // UID likely to be assigned to next message. Used by IMAP to detect messages + // delivered to a mailbox. + UIDNext UID + + // Special-use hints. The mailbox holds these types of messages. Used + // in IMAP LIST (mailboxes) response. + Archive bool + Draft bool + Junk bool + Sent bool + Trash bool +} + +// Subscriptions are separate from existence of mailboxes. +type Subscription struct { + Name string +} + +// Flags for a mail message. +type Flags struct { + Seen bool + Answered bool + Flagged bool + Forwarded bool + Junk bool + Notjunk bool + Deleted bool + Draft bool + Phishing bool + MDNSent bool +} + +// FlagsAll is all flags set, for use as mask. +var FlagsAll = Flags{true, true, true, true, true, true, true, true, true, true} + +// Validation of "message From" domain. +type Validation uint8 + +const ( + ValidationUnknown Validation = 0 + ValidationStrict Validation = 1 // Like DMARC, with strict policies. + ValidationDMARC Validation = 2 // Actual DMARC policy. + ValidationRelaxed Validation = 3 // Like DMARC, with relaxed policies. + ValidationPass Validation = 4 // For SPF. + ValidationNeutral Validation = 5 // For SPF. + ValidationTemperror Validation = 6 + ValidationPermerror Validation = 7 + ValidationFail Validation = 8 + ValidationSoftfail Validation = 9 // For SPF. + ValidationNone Validation = 10 // E.g. No records. +) + +// Message stored in database and per-message file on disk. +// +// Contents are always the combined data from MsgPrefix and the on-disk file named +// based on ID. +// +// Messages always have a header section, even if empty. Incoming messages without +// header section must get an empty header section added before inserting. +type Message struct { + // ID, unchanged over lifetime, determines path to on-disk msg file. + // Set during deliver. + ID int64 + + UID UID `bstore:"nonzero"` // UID, for IMAP. Set during deliver. + MailboxID int64 `bstore:"nonzero,unique MailboxID+UID,index MailboxID+Received,ref Mailbox"` + + // Mailbox message originally delivered to. I.e. not changed when moved to Trash or + // Junk. Useful for per-mailbox reputation. Not a bstore reference to prevent + // having to update all messages in a mailbox when the original mailbox is removed. + // Use of this field requires checking if the mailbox still exists. + MailboxOrigID int64 + + Received time.Time `bstore:"default now,index"` + + // Full IP address of remote SMTP server. Empty if not delivered over + // SMTP. + RemoteIP string + RemoteIPMasked1 string `bstore:"index RemoteIPMasked1+Received"` // For IPv4 /32, for IPv6 /64, for reputation. + RemoteIPMasked2 string `bstore:"index RemoteIPMasked2+Received"` // For IPv4 /26, for IPv6 /48. + RemoteIPMasked3 string `bstore:"index RemoteIPMasked3+Received"` // For IPv4 /21, for IPv6 /32. + + EHLODomain string `bstore:"index EHLODomain+Received"` // Only set if present and not an IP address. Unicode string. + MailFrom string // With localpart and domain. Can be empty. + MailFromLocalpart smtp.Localpart // SMTP "MAIL FROM", can be empty. + MailFromDomain string `bstore:"index MailFromDomain+Received"` // Only set if it is a domain, not an IP. Unicode string. + RcptToLocalpart smtp.Localpart // SMTP "RCPT TO", can be empty. + RcptToDomain string // Unicode string. + + // Parsed "From" message header, used for reputation along with domain validation. + MsgFromLocalpart smtp.Localpart + MsgFromDomain string `bstore:"index MsgFromDomain+Received"` // Unicode string. + MsgFromOrgDomain string `bstore:"index MsgFromOrgDomain+Received"` // Unicode string. + + // Simplified statements of the Validation fields below, used for incoming messages + // to check reputation. + EHLOValidated bool + MailFromValidated bool + MsgFromValidated bool + + EHLOValidation Validation // Validation can also take reverse IP lookup into account, not only SPF. + MailFromValidation Validation // Can have SPF-specific validations like ValidationSoftfail. + MsgFromValidation Validation // Desirable validations: Strict, DMARC, Relaxed. Will not be just Pass. + + // todo: needs an "in" index, which bstore does not yet support. for performance while checking reputation. + DKIMDomains []string // Domains with verified DKIM signatures. Unicode string. + + // Value of Message-Id header. Only set for messages that were + // delivered to the rejects mailbox. For ensuring such messages are + // delivered only once. Value includes <>. + MessageID string `bstore:"index"` + + MessageHash []byte // Hash of message. For rejects delivery, so optional like MessageID. + Flags + Size int64 + MsgPrefix []byte // Typically holds received headers and/or header separator. + + // ParsedBuf message structure. Currently saved as JSON of message.Part because bstore + // cannot yet store recursive types. Created when first needed, and saved in the + // database. + ParsedBuf []byte +} + +// LoadPart returns a message.Part by reading from m.ParsedBuf. +func (m Message) LoadPart(r io.ReaderAt) (message.Part, error) { + if m.ParsedBuf == nil { + return message.Part{}, fmt.Errorf("message not parsed") + } + var p message.Part + err := json.Unmarshal(m.ParsedBuf, &p) + if err != nil { + return p, fmt.Errorf("unmarshal message part") + } + p.SetReaderAt(r) + return p, nil +} + +// Recipient represents the recipient of a message. It is tracked to allow +// first-time incoming replies from users this account has sent messages to. On +// IMAP append to Sent, the message is parsed and recipients are inserted as +// recipient. Recipients are never removed other than for removing the message. On +// IMAP move/copy, recipients aren't modified either. don't modify anything either. +// This works by the assumption that an IMAP client simply appends messages to the +// Sent mailbox (as opposed to copying messages from some place). +type Recipient struct { + ID int64 + MessageID int64 `bstore:"nonzero,ref Message"` // Ref gives it its own index, useful for fast removal as well. + Localpart smtp.Localpart `bstore:"nonzero"` + Domain string `bstore:"nonzero,index Domain+Localpart"` // Unicode string. + OrgDomain string `bstore:"nonzero,index"` // Unicode string. + Sent time.Time `bstore:"nonzero"` +} + +// Account holds the information about a user, includings mailboxes, messages, imap subscriptions. +type Account struct { + Name string // Name, according to configuration. + Dir string // Directory where account files, including the database, bloom filter, and mail messages, are stored for this account. + DBPath string // Path to database with mailboxes, messages, etc. + DB *bstore.DB // Open database connection. + + // Write lock must be held for account/mailbox modifications including message delivery. + // Read lock for reading mailboxes/messages. + // When making changes to mailboxes/messages, changes must be broadcasted before + // releasing the lock to ensure proper UID ordering. + sync.RWMutex + + nused int // Reference count, while >0, this account is alive and shared. +} + +func xcheckf(err error, format string, args ...any) { + if err != nil { + msg := fmt.Sprintf(format, args...) + panic(fmt.Errorf("%s: %w", msg, err)) + } +} + +// InitialUIDValidity returns a UIDValidity used for initializing an account. +// It can be replaced during tests with a predictable value. +var InitialUIDValidity = func() uint32 { + return uint32(time.Now().Unix() >> 1) // A 2-second resolution will get us far enough beyond 2038. +} + +var openAccounts = struct { + names map[string]*Account + sync.Mutex +}{ + names: map[string]*Account{}, +} + +func closeAccount(acc *Account) (rerr error) { + openAccounts.Lock() + acc.nused-- + defer openAccounts.Unlock() + if acc.nused == 0 { + rerr = acc.DB.Close() + acc.DB = nil + delete(openAccounts.names, acc.Name) + } + return +} + +// OpenAccount opens an account by name. +// +// No additional data path prefix or ".db" suffix should be added to the name. +// A single shared account exists per name. +func OpenAccount(name string) (*Account, error) { + openAccounts.Lock() + defer openAccounts.Unlock() + if acc, ok := openAccounts.names[name]; ok { + acc.nused++ + return acc, nil + } + + if _, ok := mox.Conf.Account(name); !ok { + return nil, ErrAccountUnknown + } + + acc, err := openAccount(name) + if err != nil { + return nil, err + } + acc.nused++ + openAccounts.names[name] = acc + return acc, nil +} + +// openAccount opens an existing account, or creates it if it is missing. +func openAccount(name string) (a *Account, rerr error) { + dir := filepath.Join(mox.DataDirPath("accounts"), name) + dbpath := filepath.Join(dir, "index.db") + + // Create account if it doesn't exist yet. + isNew := false + if _, err := os.Stat(dbpath); err != nil && os.IsNotExist(err) { + isNew = true + os.MkdirAll(dir, 0770) + } + + db, err := bstore.Open(dbpath, &bstore.Options{Timeout: 5 * time.Second, Perm: 0660}, NextUIDValidity{}, Message{}, Recipient{}, Mailbox{}, Subscription{}, Password{}, Subjectpass{}) + if err != nil { + return nil, err + } + + defer func() { + if rerr != nil { + db.Close() + if isNew { + os.Remove(dbpath) + } + } + }() + + if isNew { + if err := initAccount(db); err != nil { + return nil, fmt.Errorf("initializing account: %v", err) + } + } + + return &Account{ + Name: name, + Dir: dir, + DBPath: dbpath, + DB: db, + }, nil +} + +func initAccount(db *bstore.DB) error { + return db.Write(func(tx *bstore.Tx) error { + uidvalidity := InitialUIDValidity() + + mailboxes := InitialMailboxes + defaultMailboxes := mox.Conf.Static.DefaultMailboxes + if len(defaultMailboxes) > 0 { + mailboxes = []string{"Inbox"} + for _, name := range defaultMailboxes { + if strings.EqualFold(name, "Inbox") { + continue + } + mailboxes = append(mailboxes, name) + } + } + for _, name := range mailboxes { + mb := Mailbox{Name: name, UIDValidity: uidvalidity, UIDNext: 1} + if strings.HasPrefix(name, "Archive") { + mb.Archive = true + } else if strings.HasPrefix(name, "Drafts") { + mb.Draft = true + } else if strings.HasPrefix(name, "Junk") { + mb.Junk = true + } else if strings.HasPrefix(name, "Sent") { + mb.Sent = true + } else if strings.HasPrefix(name, "Trash") { + mb.Trash = true + } + if err := tx.Insert(&mb); err != nil { + return fmt.Errorf("creating mailbox: %w", err) + } + + if err := tx.Insert(&Subscription{name}); err != nil { + return fmt.Errorf("adding subscription: %w", err) + } + } + + uidvalidity++ + if err := tx.Insert(&NextUIDValidity{1, uidvalidity}); err != nil { + return fmt.Errorf("inserting nextuidvalidity: %w", err) + } + return nil + }) +} + +// Close reduces the reference count, and closes the database connection when +// it was the last user. +func (a *Account) Close() error { + return closeAccount(a) +} + +// Conf returns the configuration for this account if it still exists. During +// an SMTP session, a configuration update may drop an account. +func (a *Account) Conf() (config.Account, bool) { + return mox.Conf.Account(a.Name) +} + +// NextUIDValidity returns the next new/unique uidvalidity to use for this account. +func (a *Account) NextUIDValidity(tx *bstore.Tx) (uint32, error) { + nuv := NextUIDValidity{ID: 1} + if err := tx.Get(&nuv); err != nil { + return 0, err + } + v := nuv.Next + nuv.Next++ + if err := tx.Update(&nuv); err != nil { + return 0, err + } + return v, nil +} + +// WithWLock runs fn with account writelock held. Necessary for account/mailbox modification. For message delivery, a read lock is required. +func (a *Account) WithWLock(fn func()) { + a.Lock() + defer a.Unlock() + fn() +} + +// WithRLock runs fn with account read lock held. Needed for message delivery. +func (a *Account) WithRLock(fn func()) { + a.RLock() + defer a.RUnlock() + fn() +} + +// DeliverX delivers a mail message to the account. +// +// If consumeFile is set, the original msgFile is moved/renamed or copied and +// removed as part of delivery. +// +// The message, with msg.MsgPrefix and msgFile combined, must have a header +// section. The caller is responsible for adding a header separator to +// msg.MsgPrefix if missing from an incoming message. +// +// If isSent is true, the message is parsed for its recipients (to/cc/bcc). Their +// domains are added to Recipients for use in dmarc reputation. +// +// If sync is true, the message file and its directory are synced. Should be true +// for regular mail delivery, but can be false when importing many messages. +// +// if train is true, the junkfilter (if configured) is trained with the message. +// Should be used for regular mail delivery, but can be false when importing many +// messages. +// +// Must be called with account rlock or wlock. +// +// Caller must broadcast new message. +func (a *Account) DeliverX(log *mlog.Log, tx *bstore.Tx, m *Message, msgFile *os.File, consumeFile, isSent, sync, train bool) { + mb := Mailbox{ID: m.MailboxID} + err := tx.Get(&mb) + xcheckf(err, "get mailbox") + m.UID = mb.UIDNext + mb.UIDNext++ + err = tx.Update(&mb) + xcheckf(err, "updating mailbox nextuid") + + var part *message.Part + if m.ParsedBuf == nil { + mr := FileMsgReader(m.MsgPrefix, msgFile) // We don't close, it would close the msgFile. + p, err := message.EnsurePart(mr, m.Size) + if err != nil { + log.Infox("parsing delivered message", err, mlog.Field("parse", ""), mlog.Field("message", m.ID)) + // We continue, p is still valid. + } + part = &p + buf, err := json.Marshal(part) + xcheckf(err, "marshal parsed message") + m.ParsedBuf = buf + } + + err = tx.Insert(m) + xcheckf(err, "inserting message") + + if isSent { + // Attempt to parse the message for its To/Cc/Bcc headers, which we insert into Recipient. + if part == nil { + var p message.Part + if err := json.Unmarshal(m.ParsedBuf, &p); err != nil { + log.Errorx("unmarshal parsed message for its to,cc,bcc headers, continuing", err, mlog.Field("parse", "")) + } else { + part = &p + } + } + if part != nil && part.Envelope != nil { + e := part.Envelope + sent := e.Date + if sent.IsZero() { + sent = m.Received + } + if sent.IsZero() { + sent = time.Now() + } + addrs := append(append(e.To, e.CC...), e.BCC...) + for _, addr := range addrs { + if addr.User == "" { + // Would trigger error because Recipient.Localpart must be nonzero. todo: we could allow empty localpart in db, and filter by not using FilterNonzero. + log.Info("to/cc/bcc address with empty localpart, not inserting as recipient", mlog.Field("address", addr)) + continue + } + d, err := dns.ParseDomain(addr.Host) + if err != nil { + log.Debugx("parsing domain in to/cc/bcc address", err, mlog.Field("address", addr)) + continue + } + mr := Recipient{ + MessageID: m.ID, + Localpart: smtp.Localpart(addr.User), + Domain: d.Name(), + OrgDomain: publicsuffix.Lookup(context.TODO(), d).Name(), + Sent: sent, + } + err = tx.Insert(&mr) + xcheckf(err, "inserting sent message recipients") + } + } + } + + msgPath := a.MessagePath(m.ID) + msgDir := filepath.Dir(msgPath) + os.MkdirAll(msgDir, 0770) + + // Sync file data to disk. + if sync { + err = msgFile.Sync() + xcheckf(err, "fsync message file") + } + + if consumeFile { + err := os.Rename(msgFile.Name(), msgPath) + xcheckf(err, "moving msg file to destination directory") + } else if err := os.Link(msgFile.Name(), msgPath); err != nil { + // Assume file system does not support hardlinks. Copy it instead. + err := writeFile(msgPath, &moxio.AtReader{R: msgFile}) + xcheckf(err, "copying message to new file") + } + + if sync { + err = moxio.SyncDir(msgDir) + xcheckf(err, "sync directory") + } + + if train { + conf, _ := a.Conf() + if mb.Name != conf.RejectsMailbox { + err := a.Train(log, []Message{*m}) + xcheckf(err, "train junkfilter with new message") + } + } +} + +// write contents of r to new file dst, for delivering a message. +func writeFile(dst string, r io.Reader) error { + df, err := os.OpenFile(dst, os.O_CREATE|os.O_EXCL|os.O_WRONLY, 0660) + if err != nil { + return fmt.Errorf("create: %w", err) + } + defer func() { + if df != nil { + df.Close() + } + }() + if _, err := io.Copy(df, r); err != nil { + return fmt.Errorf("copy: %s", err) + } else if err := df.Sync(); err != nil { + return fmt.Errorf("sync: %s", err) + } else if err := df.Close(); err != nil { + return fmt.Errorf("close: %s", err) + } + df = nil + return nil +} + +// SetPassword saves a new password for this account. This password is used for +// IMAP, SMTP (submission) sessions and the HTTP account web page. +func (a *Account) SetPassword(password string) error { + hash, err := bcrypt.GenerateFromPassword([]byte(password), bcrypt.DefaultCost) + if err != nil { + return fmt.Errorf("generating password hash: %w", err) + } + + err = a.DB.Write(func(tx *bstore.Tx) error { + if _, err := bstore.QueryTx[Password](tx).Delete(); err != nil { + return fmt.Errorf("deleting existing password: %v", err) + } + var pw Password + pw.Hash = string(hash) + pw.SCRAMSHA256.Salt = scram.MakeRandom() + pw.SCRAMSHA256.Iterations = 4096 + pw.SCRAMSHA256.SaltedPassword = scram.SaltPassword(password, pw.SCRAMSHA256.Salt, pw.SCRAMSHA256.Iterations) + if err := tx.Insert(&pw); err != nil { + return fmt.Errorf("inserting new password: %v", err) + } + return nil + }) + if err == nil { + xlog.Info("new password set for account", mlog.Field("account", a.Name)) + } + return err +} + +// Subjectpass returns the signing key for use with subjectpass for the given +// email address with canonical localpart. +func (a *Account) Subjectpass(email string) (key string, err error) { + return key, a.DB.Write(func(tx *bstore.Tx) error { + v := Subjectpass{Email: email} + err := tx.Get(&v) + if err == nil { + key = v.Key + return nil + } + if !errors.Is(err, bstore.ErrAbsent) { + return fmt.Errorf("get subjectpass key from accounts database: %w", err) + } + key = "" + const chars = "abcdefghijklmnopqrstuvwxyz0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ" + for i := 0; i < 16; i++ { + key += string(chars[subjectpassRand.Intn(len(chars))]) + } + v.Key = key + return tx.Insert(&v) + }) +} + +// Ensure mailbox is present in database, adding records for the mailbox and its +// parents if they aren't present. +// +// If subscribe is true, any mailboxes that were created will also be subscribed to. +// Caller must hold account wlock. +// Caller must propagate changes if any. +func (a *Account) MailboxEnsureX(tx *bstore.Tx, name string, subscribe bool) (mb Mailbox, changes []Change) { + if norm.NFC.String(name) != name { + panic("mailbox name not normalized") + } + + // Quick sanity check. + if strings.EqualFold(name, "inbox") && name != "Inbox" { + panic("bad casing for inbox") + } + + elems := strings.Split(name, "/") + q := bstore.QueryTx[Mailbox](tx) + q.FilterFn(func(mb Mailbox) bool { + return mb.Name == elems[0] || strings.HasPrefix(mb.Name, elems[0]+"/") + }) + l, err := q.List() + xcheckf(err, "list mailboxes") + + mailboxes := map[string]Mailbox{} + for _, xmb := range l { + mailboxes[xmb.Name] = xmb + } + + p := "" + for _, elem := range elems { + if p != "" { + p += "/" + } + p += elem + var ok bool + mb, ok = mailboxes[p] + if ok { + continue + } + uidval, err := a.NextUIDValidity(tx) + xcheckf(err, "next uid validity") + mb = Mailbox{ + Name: p, + UIDValidity: uidval, + UIDNext: 1, + } + err = tx.Insert(&mb) + xcheckf(err, "creating new mailbox") + + if subscribe { + err := tx.Insert(&Subscription{p}) + if err != nil && !errors.Is(err, bstore.ErrUnique) { + xcheckf(err, "subscribing to mailbox") + } + } + changes = append(changes, ChangeAddMailbox{Name: p, Flags: []string{`\Subscribed`}}) + } + return +} + +// Check if mailbox exists. +// Caller must hold account rlock. +func (a *Account) MailboxExistsX(tx *bstore.Tx, name string) bool { + q := bstore.QueryTx[Mailbox](tx) + q.FilterEqual("Name", name) + exists, err := q.Exists() + xcheckf(err, "checking existence") + return exists +} + +// MailboxFindX finds a mailbox by name. +func (a *Account) MailboxFindX(tx *bstore.Tx, name string) *Mailbox { + q := bstore.QueryTx[Mailbox](tx) + q.FilterEqual("Name", name) + mb, err := q.Get() + if err == bstore.ErrAbsent { + return nil + } + xcheckf(err, "lookup mailbox") + return &mb +} + +// SubscriptionEnsureX ensures a subscription for name exists. The mailbox does not +// have to exist. Any parents are not automatically subscribed. +// Changes are broadcasted. +func (a *Account) SubscriptionEnsureX(tx *bstore.Tx, name string) []Change { + err := tx.Get(&Subscription{name}) + if err == nil { + return nil + } + + err = tx.Insert(&Subscription{name}) + xcheckf(err, "inserting subscription") + + q := bstore.QueryTx[Mailbox](tx) + q.FilterEqual("Name", name) + exists, err := q.Exists() + xcheckf(err, "looking up mailbox for subscription") + if exists { + return []Change{ChangeAddSubscription{name}} + } + return []Change{ChangeAddMailbox{Name: name, Flags: []string{`\Subscribed`, `\NonExistent`}}} +} + +// List mailboxes. Only those that exist, so names with only a subscription are not returned. +// Caller must have account rlock held. +func (a *Account) MailboxesX(tx *bstore.Tx) []Mailbox { + l, err := bstore.QueryTx[Mailbox](tx).List() + xcheckf(err, "fetching mailboxes") + return l +} + +// MessageRuleset returns the first ruleset (if any) that message the message +// represented by msgPrefix and msgFile, with smtp and validation fields from m. +func MessageRuleset(log *mlog.Log, dest config.Destination, m *Message, msgPrefix []byte, msgFile *os.File) *config.Ruleset { + if len(dest.Rulesets) == 0 { + return nil + } + + mr := FileMsgReader(msgPrefix, msgFile) // We don't close, it would close the msgFile. + p, err := message.Parse(mr) + if err != nil { + log.Errorx("parsing message for evaluating rulesets, continuing with headers", err, mlog.Field("parse", "")) + // note: part is still set. + } + // todo optimize: only parse header if needed for rulesets. and probably reuse an earlier parsing. + header, err := p.Header() + if err != nil { + log.Errorx("parsing message headers for evaluating rulesets, delivering to default mailbox", err, mlog.Field("parse", "")) + // todo: reject message? + return nil + } + +ruleset: + for _, rs := range dest.Rulesets { + if rs.SMTPMailFromRegexpCompiled != nil { + if !rs.SMTPMailFromRegexpCompiled.MatchString(m.MailFrom) { + continue ruleset + } + } + + if !rs.VerifiedDNSDomain.IsZero() { + d := rs.VerifiedDNSDomain.Name() + suffix := "." + d + matchDomain := func(s string) bool { + return s == d || strings.HasSuffix(s, suffix) + } + var ok bool + if m.EHLOValidated && matchDomain(m.EHLODomain) { + ok = true + } + if m.MailFromValidated && matchDomain(m.MailFromDomain) { + ok = true + } + for _, d := range m.DKIMDomains { + if matchDomain(d) { + ok = true + break + } + } + if !ok { + continue ruleset + } + } + + header: + for _, t := range rs.HeadersRegexpCompiled { + for k, vl := range header { + k = strings.ToLower(k) + if !t[0].MatchString(k) { + continue + } + for _, v := range vl { + v = strings.ToLower(strings.TrimSpace(v)) + if t[1].MatchString(v) { + continue header + } + } + } + continue ruleset + } + return &rs + } + return nil +} + +// MessagePath returns the file system path of a message. +func (a *Account) MessagePath(messageID int64) string { + return filepath.Join(a.Dir, "msg", MessagePath(messageID)) +} + +// MessageReader opens a message for reading, transparently combining the +// message prefix with the original incoming message. +func (a *Account) MessageReader(m Message) *MsgReader { + return &MsgReader{prefix: m.MsgPrefix, path: a.MessagePath(m.ID), size: m.Size} +} + +// Deliver delivers an email to dest, based on the configured rulesets. +// +// Caller must hold account wlock (mailbox may be created). +// Message delivery and possible mailbox creation are broadcasted. +func (a *Account) Deliver(log *mlog.Log, dest config.Destination, m *Message, msgFile *os.File, consumeFile bool) error { + var mailbox string + rs := MessageRuleset(log, dest, m, m.MsgPrefix, msgFile) + if rs != nil { + mailbox = rs.Mailbox + } else if dest.Mailbox == "" { + mailbox = "Inbox" + } else { + mailbox = dest.Mailbox + } + return a.DeliverMailbox(log, mailbox, m, msgFile, consumeFile) +} + +// DeliverMailbox delivers an email to the specified mailbox. +// +// Caller must hold account wlock (mailbox may be created). +// Message delivery and possible mailbox creation are broadcasted. +func (a *Account) DeliverMailbox(log *mlog.Log, mailbox string, m *Message, msgFile *os.File, consumeFile bool) error { + var changes []Change + err := extransact(a.DB, true, func(tx *bstore.Tx) error { + mb, chl := a.MailboxEnsureX(tx, mailbox, true) + m.MailboxID = mb.ID + m.MailboxOrigID = mb.ID + changes = append(changes, chl...) + + a.DeliverX(log, tx, m, msgFile, consumeFile, mb.Sent, true, true) + return nil + }) + // todo: if rename succeeded but transaction failed, we should remove the file. + if err != nil { + return err + } + + changes = append(changes, ChangeAddUID{m.MailboxID, m.UID, m.Flags}) + comm := RegisterComm(a) + defer comm.Unregister() + comm.Broadcast(changes) + return nil +} + +// TidyRejectsMailbox removes old reject emails, and returns whether there is space for a new delivery. +// +// Caller most hold account wlock. +// Changes are broadcasted. +func (a *Account) TidyRejectsMailbox(rejectsMailbox string) (hasSpace bool, rerr error) { + var changes []Change + + err := extransact(a.DB, true, func(tx *bstore.Tx) error { + mb := a.MailboxFindX(tx, rejectsMailbox) + if mb == nil { + // No messages have been delivered yet. + hasSpace = true + return nil + } + + // Gather old messages to remove. + old := time.Now().Add(-24 * time.Hour) + qdel := bstore.QueryTx[Message](tx) + qdel.FilterNonzero(Message{MailboxID: mb.ID}) + qdel.FilterLess("Received", old) + remove, err := qdel.List() + xcheckf(err, "listing old messages") + + changes = a.xremoveMessages(tx, mb, remove) + + // We allow up to n messages. + qcount := bstore.QueryTx[Message](tx) + qcount.FilterNonzero(Message{MailboxID: mb.ID}) + qcount.Limit(1000) + n, err := qcount.Count() + xcheckf(err, "counting rejects") + hasSpace = n < 1000 + + return nil + }) + + comm := RegisterComm(a) + defer comm.Unregister() + comm.Broadcast(changes) + + return hasSpace, err +} + +func (a *Account) xremoveMessages(tx *bstore.Tx, mb *Mailbox, l []Message) []Change { + if len(l) == 0 { + return nil + } + ids := make([]int64, len(l)) + anyids := make([]any, len(l)) + for i, m := range l { + ids[i] = m.ID + anyids[i] = m.ID + } + + // Remove any message recipients. Should not happen, but a user can move messages + // from a Sent mailbox to the rejects mailbox... + qdmr := bstore.QueryTx[Recipient](tx) + qdmr.FilterEqual("MessageID", anyids...) + _, err := qdmr.Delete() + xcheckf(err, "deleting from message recipient") + + // Actually remove the messages. + qdm := bstore.QueryTx[Message](tx) + qdm.FilterIDs(ids) + _, err = qdm.Delete() + xcheckf(err, "deleting from message recipient") + + changes := make([]Change, len(l)) + for i, m := range l { + changes[i] = ChangeRemoveUIDs{mb.ID, []UID{m.UID}} + } + return changes +} + +// RejectsRemove removes a message from the rejects mailbox if present. +// Caller most hold account wlock. +// Changes are broadcasted. +func (a *Account) RejectsRemove(log *mlog.Log, rejectsMailbox, messageID string) { + var changes []Change + + err := extransact(a.DB, true, func(tx *bstore.Tx) error { + mb := a.MailboxFindX(tx, rejectsMailbox) + if mb == nil { + return nil + } + + // Note: these cannot have Recipients. + var remove []Message + q := bstore.QueryTx[Message](tx) + q.FilterNonzero(Message{MailboxID: mb.ID, MessageID: messageID}) + remove, err := q.List() + xcheckf(err, "listing messages to remove") + + changes = a.xremoveMessages(tx, mb, remove) + + return err + }) + if err != nil { + log.Errorx("removing message from rejects mailbox", err, mlog.Field("account", a.Name), mlog.Field("rejectsMailbox", rejectsMailbox), mlog.Field("messageID", messageID)) + } + + comm := RegisterComm(a) + defer comm.Unregister() + comm.Broadcast(changes) +} + +// We keep a cache of recent successful authentications, so we don't have to bcrypt successful calls each time. +var authCache struct { + sync.Mutex + success map[authKey]string +} + +type authKey struct { + email, hash string +} + +func init() { + authCache.success = map[authKey]string{} + go func() { + for { + authCache.Lock() + authCache.success = map[authKey]string{} + authCache.Unlock() + time.Sleep(15 * time.Minute) + } + }() +} + +// OpenEmailAuth opens an account given an email address and password. +// +// The email address may contain a catchall separator. +func OpenEmailAuth(email string, password string) (acc *Account, rerr error) { + acc, _, rerr = OpenEmail(email) + if rerr != nil { + return + } + + defer func() { + if rerr != nil && acc != nil { + acc.Close() + acc = nil + } + }() + + pw, err := bstore.QueryDB[Password](acc.DB).Get() + if err != nil { + if err == bstore.ErrAbsent { + return acc, ErrUnknownCredentials + } + return acc, fmt.Errorf("looking up password: %v", err) + } + authCache.Lock() + ok := len(password) >= 8 && authCache.success[authKey{email, pw.Hash}] == password + authCache.Unlock() + if ok { + return + } + if err := bcrypt.CompareHashAndPassword([]byte(pw.Hash), []byte(password)); err != nil { + rerr = ErrUnknownCredentials + } else { + authCache.Lock() + authCache.success[authKey{email, pw.Hash}] = password + authCache.Unlock() + } + return +} + +// OpenEmail opens an account given an email address. +// +// The email address may contain a catchall separator. +func OpenEmail(email string) (*Account, config.Destination, error) { + addr, err := smtp.ParseAddress(email) + if err != nil { + return nil, config.Destination{}, fmt.Errorf("%w: %v", ErrUnknownCredentials, err) + } + accountName, _, dest, err := mox.FindAccount(addr.Localpart, addr.Domain, false) + if err != nil && (errors.Is(err, mox.ErrAccountNotFound) || errors.Is(err, mox.ErrDomainNotFound)) { + return nil, config.Destination{}, ErrUnknownCredentials + } else if err != nil { + return nil, config.Destination{}, fmt.Errorf("looking up address: %v", err) + } + acc, err := OpenAccount(accountName) + if err != nil { + return nil, config.Destination{}, err + } + return acc, dest, nil +} + +// 64 characters, must be power of 2 for MessagePath +const msgDirChars = "abcdefghijklmnopqrstuvwxyz0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ-_" + +// MessagePath returns the filename of the on-disk filename, relative to the containing directory such as /msg or queue. +// Returns names like "AB/1". +func MessagePath(messageID int64) string { + v := messageID >> 13 // 8k files per directory. + dir := "" + for { + dir += string(msgDirChars[int(v)&(len(msgDirChars)-1)]) + v >>= 6 + if v == 0 { + break + } + } + return fmt.Sprintf("%s/%d", dir, messageID) +} + +// Set returns a copy of f, with each flag that is true in mask set to the +// value from flags. +func (f Flags) Set(mask, flags Flags) Flags { + set := func(d *bool, m, v bool) { + if m { + *d = v + } + } + r := f + set(&r.Seen, mask.Seen, flags.Seen) + set(&r.Answered, mask.Answered, flags.Answered) + set(&r.Flagged, mask.Flagged, flags.Flagged) + set(&r.Forwarded, mask.Forwarded, flags.Forwarded) + set(&r.Junk, mask.Junk, flags.Junk) + set(&r.Notjunk, mask.Notjunk, flags.Notjunk) + set(&r.Deleted, mask.Deleted, flags.Deleted) + set(&r.Draft, mask.Draft, flags.Draft) + set(&r.Phishing, mask.Phishing, flags.Phishing) + set(&r.MDNSent, mask.MDNSent, flags.MDNSent) + return r +} + +// FlagsQuerySet returns a map with the flags that are true in mask, with +// values from flags. +func FlagsQuerySet(mask, flags Flags) map[string]any { + r := map[string]any{} + set := func(f string, m, v bool) { + if m { + r[f] = v + } + } + set("Seen", mask.Seen, flags.Seen) + set("Answered", mask.Answered, flags.Answered) + set("Flagged", mask.Flagged, flags.Flagged) + set("Forwarded", mask.Forwarded, flags.Forwarded) + set("Junk", mask.Junk, flags.Junk) + set("Notjunk", mask.Notjunk, flags.Notjunk) + set("Deleted", mask.Deleted, flags.Deleted) + set("Draft", mask.Draft, flags.Draft) + set("Phishing", mask.Phishing, flags.Phishing) + set("MDNSent", mask.MDNSent, flags.MDNSent) + return r +} diff --git a/store/account_test.go b/store/account_test.go new file mode 100644 index 0000000..cfb562f --- /dev/null +++ b/store/account_test.go @@ -0,0 +1,273 @@ +package store + +import ( + "os" + "regexp" + "strings" + "testing" + "time" + + "github.com/mjl-/bstore" + "github.com/mjl-/sconf" + + "github.com/mjl-/mox/config" + "github.com/mjl-/mox/message" + "github.com/mjl-/mox/mlog" + "github.com/mjl-/mox/mox-" +) + +func tcheck(t *testing.T, err error, msg string) { + t.Helper() + if err != nil { + t.Fatalf("%s: %s", msg, err) + } +} + +func TestMailbox(t *testing.T) { + os.RemoveAll("../testdata/store/data") + mox.ConfigStaticPath = "../testdata/store/mox.conf" + mox.MustLoadConfig() + acc, err := OpenAccount("mjl") + tcheck(t, err, "open account") + defer acc.Close() + switchDone := Switchboard() + defer close(switchDone) + + log := mlog.New("store") + + msgFile, err := CreateMessageTemp("account-test") + if err != nil { + t.Fatalf("creating temp msg file: %s", err) + } + defer msgFile.Close() + msgWriter := &message.Writer{Writer: msgFile} + if _, err := msgWriter.Write([]byte(" message")); err != nil { + t.Fatalf("writing to temp message: %s", err) + } + + msgPrefix := []byte("From: \r\nCc: Subject: test\r\nMessage-Id: \r\n\r\n") + msgPrefixCatchall := []byte("Subject: catchall\r\n\r\n") + m := Message{ + Received: time.Now(), + Size: int64(len(msgPrefix)) + msgWriter.Size, + MsgPrefix: msgPrefix, + } + msent := m + var mbsent Mailbox + mbrejects := Mailbox{Name: "Rejects", UIDValidity: 1, UIDNext: 1} + mreject := m + mconsumed := Message{ + Received: m.Received, + Size: int64(len(msgPrefixCatchall)) + msgWriter.Size, + MsgPrefix: msgPrefixCatchall, + } + acc.WithWLock(func() { + conf, _ := acc.Conf() + err := acc.Deliver(xlog, conf.Destinations["mjl"], &m, msgFile, false) + tcheck(t, err, "deliver without consume") + + err = acc.DB.Write(func(tx *bstore.Tx) error { + var err error + mbsent, err = bstore.QueryTx[Mailbox](tx).FilterNonzero(Mailbox{Name: "Sent"}).Get() + tcheck(t, err, "sent mailbox") + msent.MailboxID = mbsent.ID + msent.MailboxOrigID = mbsent.ID + acc.DeliverX(xlog, tx, &msent, msgFile, false, true, true, true) + + err = tx.Insert(&mbrejects) + tcheck(t, err, "insert rejects mailbox") + mreject.MailboxID = mbrejects.ID + mreject.MailboxOrigID = mbrejects.ID + acc.DeliverX(xlog, tx, &mreject, msgFile, false, false, true, true) + + return nil + }) + tcheck(t, err, "deliver as sent and rejects") + + err = acc.Deliver(xlog, conf.Destinations["mjl"], &mconsumed, msgFile, true) + tcheck(t, err, "deliver with consume") + }) + + m.Junk = true + err = acc.Train(log, []Message{m}) + tcheck(t, err, "train as junk") + + flags := m.Flags + + m.Seen = true + m.Junk = false + jf, _, err := acc.OpenJunkFilter(log) + tcheck(t, err, "open junk filter") + err = acc.Retrain(log, jf, flags, m) + tcheck(t, err, "retrain as non-junk") + err = jf.Close() + tcheck(t, err, "close junk filter") + + err = acc.Untrain(log, []Message{m}) + tcheck(t, err, "untrain non-junk") + + err = acc.SetPassword("testtest") + tcheck(t, err, "set password") + + key0, err := acc.Subjectpass("test@localhost") + tcheck(t, err, "subjectpass") + key1, err := acc.Subjectpass("test@localhost") + tcheck(t, err, "subjectpass") + if key0 != key1 { + t.Fatalf("different keys for same address") + } + key2, err := acc.Subjectpass("test2@localhost") + tcheck(t, err, "subjectpass") + if key2 == key0 { + t.Fatalf("same key for different address") + } + + acc.WithWLock(func() { + err := acc.DB.Write(func(tx *bstore.Tx) error { + acc.MailboxEnsureX(tx, "Testbox", true) + return nil + }) + tcheck(t, err, "ensure mailbox exists") + err = acc.DB.Read(func(tx *bstore.Tx) error { + acc.MailboxEnsureX(tx, "Testbox", true) + return nil + }) + tcheck(t, err, "ensure mailbox exists") + + err = acc.DB.Write(func(tx *bstore.Tx) error { + acc.MailboxEnsureX(tx, "Testbox2", false) + tcheck(t, err, "create mailbox") + + exists := acc.MailboxExistsX(tx, "Testbox2") + if !exists { + t.Fatalf("mailbox does not exist") + } + + exists = acc.MailboxExistsX(tx, "Testbox3") + if exists { + t.Fatalf("mailbox does exist") + } + + xmb := acc.MailboxFindX(tx, "Testbox3") + if xmb != nil { + t.Fatalf("did find Testbox3: %v", xmb) + } + xmb = acc.MailboxFindX(tx, "Testbox2") + if xmb == nil { + t.Fatalf("did not find Testbox2") + } + + changes := acc.SubscriptionEnsureX(tx, "Testbox2") + if len(changes) == 0 { + t.Fatalf("new subscription did not result in changes") + } + changes = acc.SubscriptionEnsureX(tx, "Testbox2") + if len(changes) != 0 { + t.Fatalf("already present subscription resulted in changes") + } + + return nil + }) + tcheck(t, err, "write tx") + + // todo: check that messages are removed and changes sent. + hasSpace, err := acc.TidyRejectsMailbox("Rejects") + tcheck(t, err, "tidy rejects mailbox") + if !hasSpace { + t.Fatalf("no space for more rejects") + } + + acc.RejectsRemove(log, "Rejects", "m01@mox.example") + }) + + // Run the auth tests twice for possible cache effects. + for i := 0; i < 2; i++ { + _, err := OpenEmailAuth("mjl@mox.example", "bogus") + if err != ErrUnknownCredentials { + t.Fatalf("got %v, expected ErrUnknownCredentials", err) + } + } + + for i := 0; i < 2; i++ { + acc2, err := OpenEmailAuth("mjl@mox.example", "testtest") + tcheck(t, err, "open for email with auth") + err = acc2.Close() + tcheck(t, err, "close account") + } + + acc2, err := OpenEmailAuth("other@mox.example", "testtest") + tcheck(t, err, "open for email with auth") + err = acc2.Close() + tcheck(t, err, "close account") + + _, err = OpenEmailAuth("bogus@mox.example", "testtest") + if err != ErrUnknownCredentials { + t.Fatalf("got %v, expected ErrUnknownCredentials", err) + } + + _, err = OpenEmailAuth("mjl@test.example", "testtest") + if err != ErrUnknownCredentials { + t.Fatalf("got %v, expected ErrUnknownCredentials", err) + } +} + +func TestWriteFile(t *testing.T) { + name := "../testdata/account.test" + os.Remove(name) + defer os.Remove(name) + err := writeFile(name, strings.NewReader("test")) + if err != nil { + t.Fatalf("writeFile, unexpected error %v", err) + } + buf, err := os.ReadFile(name) + if err != nil || string(buf) != "test" { + t.Fatalf("writeFile, read file, got err %v, data %q", err, buf) + } +} + +func TestMessageRuleset(t *testing.T) { + f, err := os.Open("/dev/null") + tcheck(t, err, "open") + defer f.Close() + msgBuf := []byte(strings.ReplaceAll(`List-ID: + +test +`, "\n", "\r\n")) + + const destConf = ` +Rulesets: + - + HeadersRegexp: + list-id: + Mailbox: test +` + var dest config.Destination + err = sconf.Parse(strings.NewReader(destConf), &dest) + tcheck(t, err, "parse config") + // todo: should use regular config initialization functions for this. + var hdrs [][2]*regexp.Regexp + for k, v := range dest.Rulesets[0].HeadersRegexp { + rk, err := regexp.Compile(k) + tcheck(t, err, "compile key") + rv, err := regexp.Compile(v) + tcheck(t, err, "compile value") + hdrs = append(hdrs, [...]*regexp.Regexp{rk, rv}) + } + dest.Rulesets[0].HeadersRegexpCompiled = hdrs + + c := MessageRuleset(xlog, dest, &Message{}, msgBuf, f) + if c == nil { + t.Fatalf("expected ruleset match") + } + + msg2Buf := []byte(strings.ReplaceAll(`From: + +test +`, "\n", "\r\n")) + c = MessageRuleset(xlog, dest, &Message{}, msg2Buf, f) + if c != nil { + t.Fatalf("expected no ruleset match") + } + + // todo: test the SMTPMailFrom and VerifiedDomains rule. +} diff --git a/store/msgreader.go b/store/msgreader.go new file mode 100644 index 0000000..277faab --- /dev/null +++ b/store/msgreader.go @@ -0,0 +1,135 @@ +package store + +import ( + "errors" + "fmt" + "io" + "os" +) + +// MsgReader provides access to a message. Reads return the "msg_prefix" in the +// database (typically received headers), followed by the on-disk msg file +// contents. MsgReader is an io.Reader, io.ReaderAt and io.Closer. +type MsgReader struct { + prefix []byte // First part of the message. Typically contains received headers. + path string // To on-disk message file. + size int64 // Total size of message, including prefix and contents from path. + offset int64 // Current reading offset. + f *os.File // Opened path, automatically opened after prefix has been read. + err error // If set, error to return for reads. Sets io.EOF for readers, but ReadAt ignores them. +} + +var errMsgClosed = errors.New("msg is closed") + +// FileMsgReader makes a MsgReader for an open file. +// If initialization fails, reads will return the error. +// Only call close on the returned MsgReader if you want to close msgFile. +func FileMsgReader(prefix []byte, msgFile *os.File) *MsgReader { + mr := &MsgReader{prefix: prefix, path: msgFile.Name(), f: msgFile} + fi, err := msgFile.Stat() + if err != nil { + mr.err = err + return mr + } + mr.size = int64(len(prefix)) + fi.Size() + return mr +} + +// Read reads data from the msg, taking prefix and on-disk msg file into account. +// The read offset is adjusted after the read. +func (m *MsgReader) Read(buf []byte) (int, error) { + return m.read(buf, m.offset, false) +} + +// ReadAt reads data from the msg, taking prefix and on-disk msg file into account. +// The read offset is not affected by ReadAt. +func (m *MsgReader) ReadAt(buf []byte, off int64) (n int, err error) { + return m.read(buf, off, true) +} + +// read always fill buf as far as possible, for ReadAt semantics. +func (m *MsgReader) read(buf []byte, off int64, pread bool) (int, error) { + // If a reader has consumed the file and reached EOF, further ReadAt must not return eof. + if m.err != nil && (!pread || m.err != io.EOF) { + return 0, m.err + } + var o int + for o < len(buf) { + // First attempt to read from m.prefix. + pn := int64(len(m.prefix)) - off + if pn > 0 { + n := len(buf) + if int64(n) > pn { + n = int(pn) + } + copy(buf[o:], m.prefix[int(off):int(off)+n]) + o += n + off += int64(n) + if !pread { + m.offset += int64(n) + } + continue + } + + // Now we need to read from file. Ensure it is open. + if m.f == nil { + f, err := os.Open(m.path) + if err != nil { + m.err = err + break + } + m.f = f + } + n, err := m.f.ReadAt(buf[o:], off-int64(len(m.prefix))) + if !pread && n > 0 { + m.offset += int64(n) + } + if !pread || err != io.EOF { + m.err = err + } + if n > 0 { + o += n + off += int64(n) + } + if err == io.EOF { + if off > m.size && (m.err == nil || m.err == io.EOF) { + err = fmt.Errorf("on-disk message larger than expected (off %d, size %d)", off, m.size) + m.err = err + } + return o, err + } + if n <= 0 { + break + } + } + if off > m.size && (m.err == nil || m.err == io.EOF) { + m.err = fmt.Errorf("on-disk message larger than expected (off %d, size %d, prefix %d)", off, m.size, len(m.prefix)) + } + return o, m.err +} + +// Close ensures the msg file is closed. Further reads will fail. +func (m *MsgReader) Close() error { + if m.f != nil { + if err := m.f.Close(); err != nil { + return err + } + m.f = nil + } + if m.err == errMsgClosed { + return m.err + } + m.err = errMsgClosed + return nil +} + +// Reset rewinds the offset and clears error conditions, making it usable as a fresh reader. +func (m *MsgReader) Reset() { + m.offset = 0 + m.err = nil +} + +// Size returns the total size of the contents of the message. +func (m *MsgReader) Size() int64 { + return m.size +} diff --git a/store/msgreader_test.go b/store/msgreader_test.go new file mode 100644 index 0000000..afa6ff4 --- /dev/null +++ b/store/msgreader_test.go @@ -0,0 +1,77 @@ +package store + +import ( + "io" + "os" + "testing" +) + +func TestMsgreader(t *testing.T) { + _, err := io.Copy(io.Discard, &MsgReader{prefix: []byte("hello"), path: "bogus.txt", size: int64(len("hello"))}) + if err == nil || !os.IsNotExist(err) { + t.Fatalf("expected error for non-existing file, got %s", err) + } + + if buf, err := io.ReadAll(&MsgReader{prefix: []byte("hello"), path: "/dev/null", size: int64(len("hello"))}); err != nil { + t.Fatalf("readall: %s", err) + } else if string(buf) != "hello" { + t.Fatalf("got %q, expected %q", buf, "hello") + } + + if err := os.WriteFile("msgreader_test.txt", []byte(" world"), 0660); err != nil { + t.Fatalf("writing msgreader_test.txt: %s", err) + } + defer os.Remove("msgreader_test.txt") + mr := &MsgReader{prefix: []byte("hello"), path: "msgreader_test.txt", size: int64(len("hello world"))} + if buf, err := io.ReadAll(mr); err != nil { + t.Fatalf("readall: %s", err) + } else if string(buf) != "hello world" { + t.Fatalf("got %q, expected %q", buf, "hello world") + } + + mr.Reset() + buf := make([]byte, 32) + if n, err := mr.ReadAt(buf, 1); err != nil && err != io.EOF { + t.Fatalf("readat: n %d, s %q, err %s", n, buf[:n], err) + } else if n != len("ello world") || string(buf[:n]) != "ello world" { + t.Fatalf("readat: got %d bytes (%q), expected %d (%q)", n, buf, int64(len("ello world")), "ello world") + } + + // Read with 1 byte at a time to exercise the offset/buffer-length calculations. + buf = make([]byte, 1) + var result []byte + mr = &MsgReader{prefix: []byte("hello"), path: "msgreader_test.txt", size: int64(len("hello world"))} + for { + n, err := mr.Read(buf) + if n > 0 { + result = append(result, buf...) + } + if err == io.EOF { + break + } + if err != nil { + t.Fatalf("read: %s", err) + } + } + if string(result) != "hello world" { + t.Fatalf("got %q, want %q", result, "hello world") + } + + if err := mr.Close(); err != nil { + t.Fatalf("close: %v", err) + } + + f, err := os.Open("msgreader_test.txt") + if err != nil { + t.Fatalf("open: %v", err) + } + mr = FileMsgReader([]byte("hello"), f) + + if mr.Size() != int64(len("hello world")) { + t.Fatalf("size, got %d, expect %d", mr.Size(), len("hello world")) + } + + if err := mr.Close(); err != nil { + t.Fatalf("close: %v", err) + } +} diff --git a/store/state.go b/store/state.go new file mode 100644 index 0000000..e59a6ce --- /dev/null +++ b/store/state.go @@ -0,0 +1,157 @@ +package store + +import ( + "sync/atomic" +) + +var ( + register = make(chan *Comm) + unregister = make(chan *Comm) + broadcast = make(chan changeReq) + get = make(chan *Comm) +) + +type changeReq struct { + comm *Comm + changes []Change +} + +type UID uint32 // IMAP UID. + +// Change to mailboxes/subscriptions/messages in an account. One of the Change* +// types in this package. +type Change any + +// ChangeAddUID is sent for a new message in a mailbox. +type ChangeAddUID struct { + MailboxID int64 + UID UID + Flags Flags +} + +// ChangeRemoveUIDs is sent for removal of one or more messages from a mailbox. +type ChangeRemoveUIDs struct { + MailboxID int64 + UIDs []UID +} + +// ChangeFlags is sent for an update to flags for a message, e.g. "Seen". +type ChangeFlags struct { + MailboxID int64 + UID UID + Mask Flags // Which flags are actually modified. + Flags Flags // New flag values. All are set, not just mask. +} + +// ChangeRemoveMailbox is sent for a removed mailbox. +type ChangeRemoveMailbox struct { + Name string +} + +// ChangeAddMailbox is sent for a newly created mailbox. +type ChangeAddMailbox struct { + Name string + Flags []string +} + +// ChangeRenameMailbox is sent for a rename mailbox. +type ChangeRenameMailbox struct { + OldName string + NewName string + Flags []string +} + +// ChangeAddSubscription is sent for an added subscription to a mailbox. +type ChangeAddSubscription struct { + Name string +} + +var switchboardBusy atomic.Bool + +// Switchboard distributes changes to accounts to interested listeners. See Comm and Change. +func Switchboard() chan struct{} { + regs := map[*Account]map[*Comm][]Change{} + done := make(chan struct{}) + + if !switchboardBusy.CompareAndSwap(false, true) { + panic("switchboard already busy") + } + + go func() { + for { + select { + case c := <-register: + if _, ok := regs[c.acc]; !ok { + regs[c.acc] = map[*Comm][]Change{} + } + regs[c.acc][c] = nil + case c := <-unregister: + delete(regs[c.acc], c) + if len(regs[c.acc]) == 0 { + delete(regs, c.acc) + } + case chReq := <-broadcast: + acc := chReq.comm.acc + for c, changes := range regs[acc] { + // Do not send the broadcaster back their own changes. + if c == chReq.comm { + continue + } + regs[acc][c] = append(changes, chReq.changes...) + select { + case c.Changes <- regs[acc][c]: + regs[acc][c] = nil + default: + } + } + chReq.comm.r <- struct{}{} + case c := <-get: + c.Changes <- regs[c.acc][c] + regs[c.acc][c] = nil + case <-done: + if !switchboardBusy.CompareAndSwap(true, false) { + panic("switchboard already unregistered?") + } + return + } + } + }() + return done +} + +// Comm handles communication with the goroutine that maintains the +// account/mailbox/message state. +type Comm struct { + Changes chan []Change // Receives block until changes come in, e.g. for IMAP IDLE. + acc *Account + r chan struct{} +} + +// Register starts a Comm for the account. Unregister must be called. +func RegisterComm(acc *Account) *Comm { + c := &Comm{make(chan []Change), acc, make(chan struct{})} + register <- c + return c +} + +// Unregister stops this Comm. +func (c *Comm) Unregister() { + unregister <- c +} + +// Broadcast ensures changes are sent to other Comms. +func (c *Comm) Broadcast(ch []Change) { + if len(ch) == 0 { + return + } + broadcast <- changeReq{c, ch} + <-c.r +} + +// Get retrieves pending changes. If no changes are pending a nil or empty list +// is returned. +func (c *Comm) Get() []Change { + get <- c + changes := <-c.Changes + return changes +} diff --git a/store/tmp.go b/store/tmp.go new file mode 100644 index 0000000..8baa84b --- /dev/null +++ b/store/tmp.go @@ -0,0 +1,26 @@ +package store + +import ( + "os" + + "github.com/mjl-/mox/mox-" +) + +// CreateMessageTemp creates a temporary file for a message to be delivered. +// Caller is responsible for removing the temporary file on error, and for closing the file. +// Caller should ensure the contents of the file are synced to disk before +// attempting to deliver the message. +func CreateMessageTemp(pattern string) (*os.File, error) { + dir := mox.DataDirPath("tmp") + os.MkdirAll(dir, 0770) + f, err := os.CreateTemp(dir, pattern) + if err != nil { + return nil, err + } + err = f.Chmod(0660) + if err != nil { + f.Close() + return nil, err + } + return f, err +} diff --git a/store/train.go b/store/train.go new file mode 100644 index 0000000..c6b3b5a --- /dev/null +++ b/store/train.go @@ -0,0 +1,136 @@ +package store + +import ( + "errors" + "os" + "path/filepath" + + "github.com/mjl-/mox/config" + "github.com/mjl-/mox/junk" + "github.com/mjl-/mox/mlog" + "github.com/mjl-/mox/mox-" +) + +// ErrNoJunkFilter indicates user did not configure/enable a junk filter. +var ErrNoJunkFilter = errors.New("junkfilter: not configured") + +// OpenJunkFilter returns an opened junk filter for the account. +// If the account does not have a junk filter enabled, ErrNotConfigured is returned. +// Do not forget to save the filter after modifying, and to always close the filter when done. +// An empty filter is initialized on first access of the filter. +func (a *Account) OpenJunkFilter(log *mlog.Log) (*junk.Filter, *config.JunkFilter, error) { + conf, ok := mox.Conf.Account(a.Name) + if !ok { + return nil, nil, ErrAccountUnknown + } + jf := conf.JunkFilter + if jf == nil { + return nil, jf, ErrNoJunkFilter + } + + basePath := mox.DataDirPath("accounts") + dbPath := filepath.Join(basePath, a.Name, "junkfilter.db") + bloomPath := filepath.Join(basePath, a.Name, "junkfilter.bloom") + + if _, xerr := os.Stat(dbPath); xerr != nil && os.IsNotExist(xerr) { + f, err := junk.NewFilter(log, jf.Params, dbPath, bloomPath) + return f, jf, err + } + f, err := junk.OpenFilter(log, jf.Params, dbPath, bloomPath, false) + return f, jf, err +} + +// Train new messages, if relevant given their flags. +func (a *Account) Train(log *mlog.Log, msgs []Message) error { + return a.xtrain(log, msgs, false, true) +} + +// Untrain removed messages, if relevant given their flags. +func (a *Account) Untrain(log *mlog.Log, msgs []Message) error { + return a.xtrain(log, msgs, true, false) +} + +// train or untrain messages, if relevant given their flags. +func (a *Account) xtrain(log *mlog.Log, msgs []Message, untrain, train bool) (rerr error) { + if len(msgs) == 0 { + return nil + } + + var jf *junk.Filter + + for _, m := range msgs { + if !m.Seen && !m.Junk { + continue + } + // Lazy open the junk filter. + if jf == nil { + var err error + jf, _, err = a.OpenJunkFilter(log) + if err != nil && errors.Is(err, ErrNoJunkFilter) { + // No junk filter configured. Nothing more to do. + return nil + } + defer func() { + if jf != nil { + err := jf.Close() + if rerr == nil { + rerr = err + } + } + }() + } + ham := !m.Junk + err := xtrainMessage(log, a, jf, m, untrain, ham, train, ham) + if err != nil { + return err + } + } + return nil +} + +// Retrain message, if relevant given old flags and the new flags in m. +func (a *Account) Retrain(log *mlog.Log, jf *junk.Filter, old Flags, m Message) error { + untrain := old.Seen || old.Junk + train := m.Seen || m.Junk + untrainHam := !old.Junk + trainHam := !m.Junk + + if !untrain && !train || (untrain && train && trainHam == untrainHam) { + return nil + } + + return xtrainMessage(log, a, jf, m, untrain, untrainHam, train, trainHam) +} + +func xtrainMessage(log *mlog.Log, a *Account, jf *junk.Filter, m Message, untrain, untrainHam, train, trainHam bool) error { + log.Info("updating junk filter", mlog.Field("untrain", untrain), mlog.Field("untrainHam", untrainHam), mlog.Field("train", train), mlog.Field("trainHam", trainHam)) + + mr := a.MessageReader(m) + defer mr.Close() + + p, err := m.LoadPart(mr) + if err != nil { + log.Errorx("loading part for message", err) + return nil + } + + words, err := jf.ParseMessage(p) + if err != nil { + log.Errorx("parsing message for updating junk filter", err, mlog.Field("parse", "")) + return nil + } + + if untrain { + err := jf.Untrain(untrainHam, words) + if err != nil { + return err + } + } + if train { + err := jf.Train(trainHam, words) + if err != nil { + return err + } + } + return nil +} diff --git a/store/transact.go b/store/transact.go new file mode 100644 index 0000000..45433f1 --- /dev/null +++ b/store/transact.go @@ -0,0 +1,24 @@ +package store + +import ( + "github.com/mjl-/bstore" +) + +// todo: get rid of this. it's a bad idea to indiscriminately turn all panics into an error. +func extransact(db *bstore.DB, write bool, fn func(tx *bstore.Tx) error) (rerr error) { + defer func() { + x := recover() + if x == nil { + return + } + if err, ok := x.(error); ok { + rerr = err + } else { + panic(x) + } + }() + if write { + return db.Write(fn) + } + return db.Read(fn) +} diff --git a/store/validation.go b/store/validation.go new file mode 100644 index 0000000..0febcb5 --- /dev/null +++ b/store/validation.go @@ -0,0 +1,24 @@ +package store + +import ( + "github.com/mjl-/mox/spf" +) + +var spfValidations = map[spf.Status]Validation{ + spf.StatusNone: ValidationNone, + spf.StatusNeutral: ValidationNeutral, + spf.StatusPass: ValidationPass, + spf.StatusFail: ValidationFail, + spf.StatusSoftfail: ValidationSoftfail, + spf.StatusTemperror: ValidationTemperror, + spf.StatusPermerror: ValidationPermerror, +} + +// SPFValidation returns a Validation for an spf.Status. +func SPFValidation(status spf.Status) Validation { + v, ok := spfValidations[status] + if !ok { + panic("missing spf status validation") + } + return v +} diff --git a/subjectpass/subjectpass.go b/subjectpass/subjectpass.go new file mode 100644 index 0000000..dcb5958 --- /dev/null +++ b/subjectpass/subjectpass.go @@ -0,0 +1,155 @@ +// Package subjectpass implements a mechanism for reject an incoming message with a challenge to include a token in a next delivery attempt. +package subjectpass + +import ( + "crypto/hmac" + "crypto/sha256" + "encoding/base64" + "errors" + "fmt" + "io" + "strings" + "time" + + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" + + "github.com/mjl-/mox/dns" + "github.com/mjl-/mox/message" + "github.com/mjl-/mox/mlog" + "github.com/mjl-/mox/smtp" +) + +var log = mlog.New("subjectpass") + +var ( + metricGenerate = promauto.NewCounter( + prometheus.CounterOpts{ + Name: "mox_subjectpass_generate_total", + Help: "Number of generated subjectpass challenges.", + }, + ) + metricVerify = promauto.NewCounterVec( + prometheus.CounterOpts{ + Name: "mox_subjectpass_verify_total", + Help: "Number of subjectpass verifications.", + }, + []string{ + "result", // ok, fail + }, + ) +) + +var ( + ErrMessage = errors.New("subjectpass: malformed message") + ErrAbsent = errors.New("subjectpass: no token found") + ErrFrom = errors.New("subjectpass: bad From") + ErrInvalid = errors.New("subjectpass: malformed token") + ErrVerify = errors.New("subjectpass: verification failed") + ErrExpired = errors.New("subjectpass: token expired") +) + +var Explanation = "Your message resembles spam. If your email is legitimate, please send it again with the following added to the email message subject: " + +// Generate generates a token that is valid for "mailFrom", starting from "tm" +// and signed with "key". +// The token is of the form: (pass:) +func Generate(mailFrom smtp.Address, key []byte, tm time.Time) string { + metricGenerate.Inc() + log.Debug("subjectpass generate", mlog.Field("mailfrom", mailFrom)) + + // We discard the lower 8 bits of the time, we can do with less precision. + t := tm.Unix() + buf := []byte{ + 0 | (byte(t>>32) & 0x0f), // 4 bits version, 4 bits time + byte(t>>24) & 0xff, + byte(t>>16) & 0xff, + byte(t>>8) & 0xff, + } + mac := hmac.New(sha256.New, key) + mac.Write(buf) + mac.Write([]byte(mailFrom.String())) + h := mac.Sum(nil)[:12] + buf = append(buf, h...) + return "(pass:" + base64.RawURLEncoding.EncodeToString(buf) + ")" +} + +// Verify parses "message" and checks if it includes a subjectpass token in its +// Subject header that is still valid (within "period") and signed with "key". +func Verify(r io.ReaderAt, key []byte, period time.Duration) (rerr error) { + var token string + + defer func() { + result := "fail" + if rerr == nil { + result = "ok" + } + metricVerify.WithLabelValues(result).Inc() + + log.Debugx("subjectpass verify result", rerr, mlog.Field("token", token), mlog.Field("period", period)) + }() + + p, err := message.Parse(r) + if err != nil { + return fmt.Errorf("%w: parse message: %s", ErrMessage, err) + } + header, err := p.Header() + if err != nil { + return fmt.Errorf("%w: parse message headers: %s", ErrMessage, err) + } + subject := header.Get("Subject") + if subject == "" { + log.Info("no subject header") + return fmt.Errorf("%w: no subject header", ErrAbsent) + } + t := strings.SplitN(subject, "(pass:", 2) + if len(t) != 2 { + return fmt.Errorf("%w: no token in subject", ErrAbsent) + } + t = strings.SplitN(t[1], ")", 2) + if len(t) != 2 { + return fmt.Errorf("%w: no token in subject (2)", ErrAbsent) + } + token = t[0] + + if len(p.Envelope.From) != 1 { + return fmt.Errorf("%w: need 1 from address, got %d", ErrFrom, len(p.Envelope.From)) + } + from := p.Envelope.From[0] + d, err := dns.ParseDomain(from.Host) + if err != nil { + return fmt.Errorf("%w: from address with bad domain: %v", ErrFrom, err) + } + addr := smtp.Address{Localpart: smtp.Localpart(from.User), Domain: d}.Pack(true) + + buf, err := base64.RawURLEncoding.DecodeString(token) + if err != nil { + return fmt.Errorf("%w: parsing base64: %s", ErrInvalid, err) + } + + if len(buf) == 0 { + return fmt.Errorf("%w: empty pass token", ErrInvalid) + } + + version := buf[0] >> 4 + if version != 0 { + return fmt.Errorf("%w: unknown version %d", ErrInvalid, version) + } + if len(buf) != 4+12 { + return fmt.Errorf("%w: bad length of pass token, %d", ErrInvalid, len(buf)) + } + mac := hmac.New(sha256.New, key) + mac.Write(buf[:4]) + mac.Write([]byte(addr)) + h := mac.Sum(nil)[:12] + if !hmac.Equal(buf[4:], h) { + return ErrVerify + } + + tsign := time.Unix(int64(buf[0]&0x0f)<<32|int64(buf[1])<<24|int64(buf[2])<<16|int64(buf[3])<<8, 0) + if time.Since(tsign) > period { + return fmt.Errorf("%w: pass token expired, signed at %s, period %s", ErrExpired, tsign, period) + } + + return nil +} diff --git a/subjectpass/subjectpass_test.go b/subjectpass/subjectpass_test.go new file mode 100644 index 0000000..b9948f7 --- /dev/null +++ b/subjectpass/subjectpass_test.go @@ -0,0 +1,32 @@ +package subjectpass + +import ( + "errors" + "fmt" + "strings" + "testing" + "time" + + "github.com/mjl-/mox/smtp" +) + +func TestSubjectPass(t *testing.T) { + key := []byte("secret token") + addr, _ := smtp.ParseAddress("mox@mox.example") + sig := Generate(addr, key, time.Now()) + + message := fmt.Sprintf("From: \r\nSubject: let me in %s\r\n\r\nthe message", sig) + if err := Verify(strings.NewReader(message), key, time.Hour); err != nil { + t.Fatalf("verifyPassToken: %s", err) + } + + if err := Verify(strings.NewReader(message), []byte("bad key"), time.Hour); err == nil { + t.Fatalf("verifyPassToken did not fail") + } + + sig = Generate(addr, key, time.Now().Add(-time.Hour-257)) + message = fmt.Sprintf("From: \r\nSubject: let me in %s\r\n\r\nthe message", sig) + if err := Verify(strings.NewReader(message), key, time.Hour); !errors.Is(err, ErrExpired) { + t.Fatalf("verifyPassToken should have expired") + } +} diff --git a/testdata/dmarc-reports/google.eml b/testdata/dmarc-reports/google.eml new file mode 100644 index 0000000..9e2e4db --- /dev/null +++ b/testdata/dmarc-reports/google.eml @@ -0,0 +1,64 @@ +Return-Path: +X-Original-To: postmaster@ueber.net +Delivered-To: postmaster@ueber.net +Received-SPF: Pass (sender SPF authorized) identity=mailfrom; client-ip=2607:f8b0:4864:20::849; helo=mail-qt1-x849.google.com; envelope-from=noreply-dmarc-support@google.com; receiver=postmaster@ueber.net +Authentication-Results: koriander.ueber.net; + dkim=pass (2048-bit key; unprotected) header.d=google.com header.i=@google.com header.b=RNB1nVFh; + dkim-atps=neutral +Received: from mail-qt1-x849.google.com (mail-qt1-x849.google.com [IPv6:2607:f8b0:4864:20::849]) + by koriander.ueber.net (Postfix) with ESMTPS id 177EBDEB0B + for ; Wed, 13 Oct 2021 14:30:07 +0200 (CEST) +Received: by mail-qt1-x849.google.com with SMTP id 103-20020aed2170000000b002a79b815862so1364607qtc.11 + for ; Wed, 13 Oct 2021 05:30:06 -0700 (PDT) +DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; + d=google.com; s=20210112; + h=mime-version:date:message-id:subject:from:to:content-disposition + :content-transfer-encoding; + bh=Ndqn8qm0yyRYMUBh1AzXgzQAuZNsb/n2FuIxppjrPJw=; + b=RNB1nVFhEUzHK4jaOG8IsLbZYHrvBMOByWbe8Yk64YQUUTuobdyUpgyNgZYg41jOoy + IjfOnpLgvfnY5v9blZ/DCBUGO7eLqQQN3rQ4YNsu3nNZYEKenZ7hNdOoCC3AXdPnO2G+ + 7mmoZY5tihUlzsXXMCqeQMQ6z+zoNm202aaqu/CXIjC077NfBUdbpDL9+PpW+7iwv5BW + vf8AqYe5xLhXZ3htxAQBNRmKOXDVxT0f5SQHzrn+18wAChm76YUXy2uabVrI/DPzHBdi + AiAT7/bNJ6QtTqgyPcJKkWLeb5AvILUEiqha0d1GChiJibKDtFgL+RSvXRziRxX4JeWZ + h7mQ== +X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; + d=1e100.net; s=20210112; + h=x-gm-message-state:mime-version:date:message-id:subject:from:to + :content-disposition:content-transfer-encoding; + bh=Ndqn8qm0yyRYMUBh1AzXgzQAuZNsb/n2FuIxppjrPJw=; + b=cp7PlYOsJD70Eor5YFlZZuK9zhaGub3xuq14l+IO+m2B5QamZ+GluDchErM3k2hRtA + kKGG36+3ZFofWo6nObN14N5BuICxqMHOTwbriG6Z9imSPibIxP1gHXTrIQScGBqXjnxR + MoJ76wiJOWBPdGpVQPSbLjWhzwc1AY5aH+6d5rWdvr8PAwJ9fHA0nOB+Cts0SsrQdeOk + mBmWHaNnVTFdiKhGfiWJKjALbjfLTVp2MKhn0PDgRbRtQUXUTPu/6QxIwOXilddxff08 + u6YcbnbS9vUPVD57QqyavYato9bAiQU3//iY61vtmItURPhiuBZZ16JTQp6zzee/voVG + 2j8w== +X-Gm-Message-State: AOAM533yrBCJ7U4xXJzMzw1MdStYEr87Kaacr8u3+VjzSyfLjHnCrpIH + 9D1SpVugrBqB5FtGUiVqwQ== +X-Google-Smtp-Source: ABdhPJyFKZ2XIrrlwPal5hKh8bEJ4LzXFpEHhzuZ6c7kIeB2DEUdE3AMuZ7qsAD/sZDdb/YcgQ1/bt2OhOJj4A== +MIME-Version: 1.0 +X-Received: by 2002:a0c:ab17:: with SMTP id h23mr35287505qvb.25.1634128205982; + Wed, 13 Oct 2021 05:30:05 -0700 (PDT) +Date: Tue, 12 Oct 2021 16:59:59 -0700 +Message-ID: <13488712042596170873@google.com> +Subject: Report domain: ueber.net Submitter: google.com Report-ID: 13488712042596170873 +From: noreply-dmarc-support@google.com +To: postmaster@ueber.net +Content-Type: application/zip; + name="google.com!ueber.net!1633996800!1634083199.zip" +Content-Disposition: attachment; + filename="google.com!ueber.net!1633996800!1634083199.zip" +Content-Transfer-Encoding: base64 + +UEsDBAoAAAAIAMFWTVNcQTfoFwIAAAMHAAAuAAAAZ29vZ2xlLmNvbSF1ZWJlci5uZXQhMTYzMzk5 +NjgwMCExNjM0MDgzMTk5LnhtbO1Vy5KbMBC871e4fDdvY6C02pzyBcmZksWAFYOkksQ+/j7DIjDx +JqlU5ZicLPfMtKZb7TJ5eh363TMYK5R83MdBtN+B5KoRsnvcf/3y+VDsd0/0gbQAzZnxK33Y7YgB +rYyrB3CsYY5NGKLKdLVkA9BOqa6HgKuBhCs498DARE+lQob+7dAMzPCDHfVE92k7Nvf5mVdnWM2V +dIy7WshW0Ytz2lZh6EeD22jIQibtC5gwyfL8WETI9XF+JvYyREPjNCuKU5xEWXIs8/gUFaeUhLf6 +3I9aoTZMdl4NQmfohKRxnqZlmRcR3jYjSx1kM1WzqEjjssRd5EIW/si23rY1lWjVC/5W6/HcC3uB +dRGF9kg6whlMIMEh2YzMZdZcxUANCeeDB61u37Hpc4Y0NfANOI5rj9gbZBdMc0fjSdl0eN/0Z1uh +mVyZZUGjXlYLrBoNh1pomrAoqZLTKarSKqqSmFUZa9uqBRZXBTud8dK1d5nmapSOJiScDwvsV4Bn +1o/oY7MUJnOE1coKh4HGoElAczbIpm/yRjNrsWG1ybvQ+sLq1Ub23Z34cItYIhqQTrQCf07r2AVY +A6ZujRq2D7aFPc+HacJGd6kN2LF3N8K7ZX+fBZ/zicFL8l82aqHHB1eGXpURTOJWqHvBVvHbW8nG +lr/eYGMyhvNO8NS8xOpPEpblQVwG6TGIk+zXaUr/p+kfTxMJb/9n3wFQSwECCgAKAAAACADBVk1T +XEE36BcCAAADBwAALgAAAAAAAAAAAAAAAAAAAAAAZ29vZ2xlLmNvbSF1ZWJlci5uZXQhMTYzMzk5 +NjgwMCExNjM0MDgzMTk5LnhtbFBLBQYAAAAAAQABAFwAAABjAgAAAAA= diff --git a/testdata/dmarc-reports/mailru.eml b/testdata/dmarc-reports/mailru.eml new file mode 100644 index 0000000..1815152 --- /dev/null +++ b/testdata/dmarc-reports/mailru.eml @@ -0,0 +1,52 @@ +Return-Path: +X-Original-To: postmaster@ueber.net +Delivered-To: postmaster@ueber.net +Received-SPF: Pass (sender SPF authorized) identity=mailfrom; client-ip=94.100.178.51; helo=relay7.m.smailru.net; envelope-from=dmarc_support@corp.mail.ru; receiver=postmaster@ueber.net +Authentication-Results: koriander.ueber.net; + dkim=pass (1024-bit key; unprotected) header.d=corp.mail.ru header.i=@corp.mail.ru header.b=BcnJkLoY; + dkim-atps=neutral +Received: from relay7.m.smailru.net (relay7.m.smailru.net [94.100.178.51]) + by koriander.ueber.net (Postfix) with ESMTPS id 776AFDEB1A + for ; Fri, 7 Jun 2019 02:33:54 +0200 (CEST) +DKIM-Signature: v=1; a=rsa-sha256; q=dns/txt; c=relaxed/relaxed; d=corp.mail.ru; s=mail; + h=Date:Message-ID:To:From:Subject:MIME-Version:Content-Type; bh=MrPDZJiL98IfURE6QX7TU5bRFq5lPjXGFQRQR8lJgSE=; + b=BcnJkLoY+mIX2I2wVU3KPYsIxzWHU0PpI31r3WL+zgziFX9conii4ub1jQfBWkHa4dL6EnxWGxHocvHEei/HoENm6OJfdu2xggDiUKNJSgNu48N3xGOTzkYt9ChOHNYEzGqeNMtn8wYQq0jxjvmNjreBCde19OVL0qiKeefkOUU=; +Received: from [10.161.4.115] (port=33872 helo=60) + by relay7.m.smailru.net with esmtp (envelope-from ) + id 1hZ2pN-0008Kp-5K + for postmaster@ueber.net; Fri, 07 Jun 2019 03:33:53 +0300 +Content-Type: multipart/mixed; boundary="===============0297484745123892753==" +MIME-Version: 1.0 +Subject: Report Domain: ueber.net; Submitter: Mail.Ru; + Report-ID: 82403907998914102491559779200 +From: dmarc_support@corp.mail.ru +To: postmaster@ueber.net +Message-ID: +Date: Fri, 07 Jun 2019 03:33:53 +0300 +Auto-Submitted: auto-generated +Authentication-Results: relay7.m.smailru.net; auth=pass smtp.auth=dmarc_support@corp.mail.ru smtp.mailfrom=dmarc_support@corp.mail.ru; iprev=pass policy.iprev=10.161.4.115 + +--===============0297484745123892753== +MIME-Version: 1.0 +Content-Type: text/plain; charset="utf-8" +Content-Transfer-Encoding: base64 + +VGhpcyBpcyBhbiBhZ2dyZWdhdGUgcmVwb3J0IGZyb20gTWFpbC5SdS4= + +--===============0297484745123892753== +Content-Type: application/gzip +MIME-Version: 1.0 +Content-Transfer-Encoding: base64 +Content-Disposition: attachment; + filename="mail.ru!ueber.net!1559779200!1559865600.xml.gz" + +H4sICPGw+VwC/21haWwucnUhdWViZXIubmV0ITE1NTk3NzkyMDAhMTU1OTg2NTYwMC54bWwAZVNL +kqQgEN3PKXpXqwa16+dENj0XmM1cgKAwLZlSIAC7em4/iYaWHbUxMx/5eS9B+Pga+pdPDNE4+74r +WbF7QatdY+z1fTem9vW8+xA/oEVsLkrfBAT0LiQ5YFKNSkqAC1dp1YDitzI9+zMCXxHAgTDRDCpo +GUefK39pFzzLOAuUO2cAfqWgpHY2KZ2ksa0TXUr+J+cd9ms6z/Y1I1T4XLFwM404V/virS5OdX2u +y31ZVPu6PBzq06muigL4IxFIBMqg7JXoXvBqrNgmzgigbSb4fDwcM5xj4NtS/rQX73qj/0k/XnoT +O8yjHPG3YsQLBmYxUYcZAdXczCAC8NkBFX07hdmCFwH/oqZ8LyA+InLB6yTKTCk79H0aGpAWnq27 +U7Ebg0ZpvChPR1aWb6yqGAmjXusJaDdaagp8dhYh+Kn6kRRnISZ6F02iN7OS2WIwqWjpsgifBGUd +czwp4s89+UTQNGiTaQ09SAEdqgaDbIMbtkvbwsC/VagxdTJgHPsU56HL0m/D7d6xO63C03lk2g2P +9UftPIohdySCUwBzF2GdxXy5U7Cw/z6GLyvm61/yHwz0ru1XAwAA +--===============0297484745123892753==-- diff --git a/testdata/dmarc-reports/outlook.eml b/testdata/dmarc-reports/outlook.eml new file mode 100644 index 0000000..69e7e65 --- /dev/null +++ b/testdata/dmarc-reports/outlook.eml @@ -0,0 +1,116 @@ +Return-Path: +X-Original-To: postmaster@ueber.net +Delivered-To: postmaster@ueber.net +Received-SPF: Pass (sender SPF authorized) identity=mailfrom; client-ip=40.107.92.67; helo=nam10-bn7-obe.outbound.protection.outlook.com; envelope-from=dmarcreport@microsoft.com; receiver=postmaster@ueber.net +Authentication-Results: koriander.ueber.net; + dkim=pass (2048-bit key; unprotected) header.d=notification.microsoft.com header.i=@notification.microsoft.com header.b=DYmARHjM; + dkim-atps=neutral +Received: from NAM10-BN7-obe.outbound.protection.outlook.com (mail-bn7nam10on2067.outbound.protection.outlook.com [40.107.92.67]) + by koriander.ueber.net (Postfix) with ESMTPS id 46C24DEB0F + for ; Sun, 17 Oct 2021 09:06:36 +0200 (CEST) +ARC-Seal: i=1; a=rsa-sha256; s=arcselector9901; d=microsoft.com; cv=none; + b=kweehcgRqVbTx1Fby9ebSI0rW1klDvwh5BNl7H8RYyhPqlds43Oo3Ry6t9Cgx0QWHuFnTiVtbuAHBgz4DiN++JRVLCQ1m2R0QC6sWFT1qIxxCdEfQrO+6BiimbQXl6P27Ke3J/3kHE8AWWxRQBMAL5QZmq62EMadWK5MKBNTgaZRGvj/87xJJBnuGUfRDmeMjPdfYDOgaBdgPubQBRe5GkR1QTm80OcDTKqgTpHChH5aL5oM4VrlmvGxaSzDFZThQyf0pEvraLjfg1Yio7zl7uHhDfXJI2YWmN1fqBq7ITfRATGGOzu3m6E2hT6iMZ9d4q5Pk6d+DsO/Q0+7fJP/6g== +ARC-Message-Signature: i=1; a=rsa-sha256; c=relaxed/relaxed; d=microsoft.com; + s=arcselector9901; + h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-AntiSpam-MessageData-ChunkCount:X-MS-Exchange-AntiSpam-MessageData-0:X-MS-Exchange-AntiSpam-MessageData-1; + bh=gBhC0LXpYvOFvCbx6njlrSaUy7Od1G73vZ1CbAelGts=; + b=oMaybHe3nNVKWlwTSl4z6bvvDp9y3GcfMwYeeBcJurVr7oA92bBTh/oMzVtPnlSHKe/4pc+IJ1QKLqSVMDz7SvMgGh/N6wALMcCRyxw3JBm4gtODry0ILO19Zxr+OobGH3JHnueV26uCeVcxUv/oMpYvldBCuaNS1JM1daWDS3cRb/K6y57qhRlR94IM4KVorNfZUsP4o4Fhr6HIECVRTwBEt6QvYNe4AOTTMoQiAeOyF9Giv1qSGae3UY6kQc1kpLJjJf/KhWEF7R2DySZGh35Iecy6OrazNsDAQihdsZW1yW4ofJW8z+DoQQvZzbFQ65ia8mLrwrjsyhaYMn7T8Q== +ARC-Authentication-Results: i=1; mx.microsoft.com 1; spf=none; dmarc=none + action=none header.from=microsoft.com; dkim=none (message not signed); + arc=none +DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; + d=notification.microsoft.com; s=selector1; + h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-SenderADCheck; + bh=gBhC0LXpYvOFvCbx6njlrSaUy7Od1G73vZ1CbAelGts=; + b=DYmARHjMY6uAf8BPJd8VZ0H4TY7Vk37NMl+lN9dfOeLuRODQTi7kCc/mm8cQ0GKsN1iS1c/SwAm5Y5Twon6sfji+MXcuPGFt7D4jcGbfgoBoZnljkkjYFUoF2Y3Ymjcz0xyGsRaha/Uh5YuN7ktveXnkDS+batdqJafxajnteuzR1u9yZQTj5kh/DYyvYykj9nNC4iJyTMLRJSk9RON6W0G6i4GwuiMMqhV7xtudDUrcrXk6YEqp2hm019QvV574DI3mVAmTEucelkFc5tnPznvA2gCDQyg6JzTcu8bSJffATKwhNv7PVfkpgWmPHfwyvvyg2hkGtt5IDpUW3aWf7g== +Authentication-Results: dkim=none (message not signed) + header.d=none;dmarc=none action=none header.from=microsoft.com; +Received: from BN9PR03CA0788.namprd03.prod.outlook.com (2603:10b6:408:13f::13) + by DM6PR18MB3601.namprd18.prod.outlook.com (2603:10b6:5:2aa::24) with + Microsoft SMTP Server (version=TLS1_2, + cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.20.4608.16; Sun, 17 Oct + 2021 07:06:34 +0000 +Received: from BN7NAM10FT015.eop-nam10.prod.protection.outlook.com + (2603:10b6:408:13f:cafe::dd) by BN9PR03CA0788.outlook.office365.com + (2603:10b6:408:13f::13) with Microsoft SMTP Server (version=TLS1_2, + cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.20.4608.15 via Frontend + Transport; Sun, 17 Oct 2021 07:06:34 +0000 +Received: from nam10.map.protection.outlook.com (104.47.118.94) by + BN7NAM10FT015.mail.protection.outlook.com (10.13.157.114) with Microsoft SMTP + Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id + 15.20.4608.15 via Frontend Transport; Sun, 17 Oct 2021 07:06:34 +0000 +Message-ID: <8be4eb9d89044800947965b8a040291c@microsoft.com> +X-Sender: XATTRDIRECT=Originating XATTRORGID=xorgid:96f9e21d-a1c4-44a3-99e4-37191ac61848 +MIME-Version: 1.0 +From: "DMARC Aggregate Report" +To: +Subject: =?utf-8?B?W1ByZXZpZXddIFJlcG9ydCBEb21haW46IHVlYmVyLm5ldCBTdWJtaXR0ZXI6IHByb3RlY3Rpb24ub3V0bG9vay5jb20gUmVwb3J0LUlEOiA4YmU0ZWI5ZDg5MDQ0ODAwOTQ3OTY1YjhhMDQwMjkxYw==?= +Content-Type: multipart/mixed; + boundary="_mpm_a4bcd9a515b44b9d8eceb05d7333675fpiotk5m200exchangecorpm_" +Date: Sun, 17 Oct 2021 07:06:34 +0000 +X-EOPAttributedMessage: 0 +X-MS-PublicTrafficType: Email +X-MS-Office365-Filtering-Correlation-Id: b8ca90d3-5bfc-4fc0-7e4d-08d9913ca790 +X-MS-TrafficTypeDiagnostic: DM6PR18MB3601: +X-Microsoft-Antispam-PRVS: + +X-MS-Oob-TLC-OOBClassifiers: OLM:5236; +X-MS-Exchange-SenderADCheck: 2 +X-MS-Exchange-AntiSpam-Relay: 0 +X-Microsoft-Antispam: BCL:0; +X-Microsoft-Antispam-Message-Info: + jFCRjRBKS2s7ikf6JYgKMMK161o7+pt6VWRk5owvT56oTVe5Eyd6kXzvKb4sQ6Y1GJU5uZwU87zyloLXUrHi0FKxbwPAAHh1Y6e7Bl6vZopv3lgT+R0lb0J24qGih3W2lKicUx22XXgm9kGZy4s0E0AKNmzu5Tf3p2OfAPCRQS0r/ReXYxG8GBEGIFh8E4eVQqKElYgj0q0ECC7nogTfBlVpbHqv97p+dEJ9eR1q8Fre3+v1XXnt7P8d1/riEs1Y4qKbGWdj9fh6udU7bshYmufAWkkxg8v2zngNc77b42lgrIjSaqX5mbeVyIU6fAHflA28WQY98jv5t5r2k5w8iyUla26X/16cpGvXdAE3Y3KGX3T2SbKqQK9KYGuGLsgeXvFBpiy/mBPVGpWAuFLmYXClJanSWKHD/6WYOGtiPRWkU2oqLIGm30HOyGBOCC8Fqv6xyl3KtfnyjhU+M6o6i7Tj/IQxZtvHLuZhvP5FluzZNo4mqfScjLMQIIY2cGFePtdeSBRB0hWHDn97I3vVUzbEztLecw0OcFpRDV7wnjHKxTpunxhiSQW79gnnenbQmhuZjOOl/luUvHDj/NxFtQbjiignMsjZtcM/2zLuw7Q= +X-Forefront-Antispam-Report: + CIP:255.255.255.255;CTRY:;LANG:en;SCL:1;SRV:;IPV:NLI;SFV:NSPM;H:nam10.map.protection.outlook.com;PTR:;CAT:NONE;SFS:(366004)(47540400005)(508600001)(86362001)(52230400001)(235185007)(36756003)(316002)(6512007)(121820200001)(8936002)(2616005)(956004)(10290500003)(6916009)(108616005)(36736006)(5660300002)(24736004)(166002)(26005)(2906002)(4001150100001)(6506007)(83380400001)(6486002)(68406010)(85236043)(8676002);DIR:OUT;SFP:1101; +X-OriginatorOrg: dmarcrep.onmicrosoft.com +X-MS-Exchange-CrossTenant-OriginalArrivalTime: 17 Oct 2021 07:06:34.4166 + (UTC) +X-MS-Exchange-CrossTenant-Network-Message-Id: b8ca90d3-5bfc-4fc0-7e4d-08d9913ca790 +X-MS-Exchange-CrossTenant-AuthSource: BN7NAM10FT015.eop-nam10.prod.protection.outlook.com +X-MS-Exchange-CrossTenant-AuthAs: Internal +X-MS-Exchange-CrossTenant-Id: 96f9e21d-a1c4-44a3-99e4-37191ac61848 +X-MS-Exchange-CrossTenant-FromEntityHeader: Internet +X-MS-Exchange-Transport-CrossTenantHeadersStamped: DM6PR18MB3601 + +This is a multi-part message in MIME format. + +--_mpm_a4bcd9a515b44b9d8eceb05d7333675fpiotk5m200exchangecorpm_ +Content-Type: multipart/related; + boundary="_rv_a4bcd9a515b44b9d8eceb05d7333675fpiotk5m200exchangecorpm_" + +--_rv_a4bcd9a515b44b9d8eceb05d7333675fpiotk5m200exchangecorpm_ +Content-Type: multipart/alternative; + boundary="_av_a4bcd9a515b44b9d8eceb05d7333675fpiotk5m200exchangecorpm_" + +--_av_a4bcd9a515b44b9d8eceb05d7333675fpiotk5m200exchangecorpm_ + + +--_av_a4bcd9a515b44b9d8eceb05d7333675fpiotk5m200exchangecorpm_ +Content-Type: text/html; charset=us-ascii +Content-Transfer-Encoding: base64 + +PGRpdiBzdHlsZSA9ImZvbnQtZmFtaWx5OlNlZ29lIFVJOyBmb250LXNpemU6MTRweDsiPlRoaXMgaXMgYSBETUFSQyBhZ2dyZWdhdGUgcmVwb3J0IGZyb20gTWljcm9zb2Z0IENvcnBvcmF0aW9uLiBGb3IgRW1haWxzIHJlY2VpdmVkIGJldHdlZW4gMjAyMS0xMC0xNSAwMDowMDowMCBVVEMgdG8gMjAyMS0xMC0xNiAwMDowMDowMCBVVEMuPC8gZGl2PjxiciAvPjxiciAvPjxkaXYgc3R5bGUgPSJmb250LWZhbWlseTpTZWdvZSBVSTsgZm9udC1zaXplOjEycHg7IGNvbG9yOiM2NjY2NjY7Ij5QbGVhc2UgZG8gbm90IHJlc3BvbmQgdG8gdGhpcyBlLW1haWwuIFRoaXMgbWFpbGJveCBpcyBub3QgbW9uaXRvcmVkIGFuZCB5b3Ugd2lsbCBub3QgcmVjZWl2ZSBhIHJlc3BvbnNlLiBGb3IgYW55IGZlZWRiYWNrL3N1Z2dlc3Rpb25zLCBraW5kbHkgbWFpbCB0byBkbWFyY3JlcG9ydGZlZWRiYWNrQG1pY3Jvc29mdC5jb20uPGJyIC8+PGJyIC8+TWljcm9zb2Z0IHJlc3BlY3RzIHlvdXIgcHJpdmFjeS4gUmV2aWV3IG91ciBPbmxpbmUgU2VydmljZXMgPGEgaHJlZiA9Imh0dHBzOi8vcHJpdmFjeS5taWNyb3NvZnQuY29tL2VuLXVzL3ByaXZhY3lzdGF0ZW1lbnQiPlByaXZhY3kgU3RhdGVtZW50PC9hPi48YnIgLz5PbmUgTWljcm9zb2Z0IFdheSwgUmVkbW9uZCwgV0EsIFVTQSA5ODA1Mi48LyBkaXYgPg== + +--_av_a4bcd9a515b44b9d8eceb05d7333675fpiotk5m200exchangecorpm_-- + +--_rv_a4bcd9a515b44b9d8eceb05d7333675fpiotk5m200exchangecorpm_-- + +--_mpm_a4bcd9a515b44b9d8eceb05d7333675fpiotk5m200exchangecorpm_ +Content-Type: application/gzip +Content-Transfer-Encoding: base64 +Content-ID: +Content-Description: protection.outlook.com!ueber.net!1634256000!1634342400.xml.gz +Content-Disposition: attachment; filename="protection.outlook.com!ueber.net!1634256000!1634342400.xml.gz"; + +H4sIAAAAAAAEAI1UTY+bMBC9V+p/iHIvhoSkycrr7Q9o1UMvvSFjhsQNeJBtkvTfdwjmI0ml3QvYbx +4zb57H8LdrXS3OYJ1G87pMonj5Jj5/4iVAkUt1WlDYuJerK16XR++bF8Yul0t0WUdoD2wVxwn7/eP7 +L3WEWi5Hsn6f/EUb56VRsKRyiwUPEkTM2bC84RYatD6rwctCenkDCaaEmZE1iJ+trxBPkcKasxENLK +qjK1HU0qo+z7daK4sOS99/0BMG9tVbmSk0XiqfaVPigg2xIEMXYpdDCvm+2O3jNN3F8T79ut9u8p2M +03i1TxRnEzd8TMIhs9IcBmGE5XDQRiTbdbrabOOY2u6RkQCmuIWJkHbhbh/ysYeEY8k7l3iDlVZ/s6 +bNK+2OMMlB6tqIFnKwkQFP+XokxGVx0rWwnPWLAXVNeQO7d8AaYeEPKMrQDJCbMDeCjfIi6ZroFgEr +USSc0bPv4L9iyXaFdhRu8TL547C1CjLdiHQbJftovYmSVUpVR3ykKmyNFyvO+sWIh5pwllVLjhZjpD +NJuwad9t0gGjRAJs2QObHzqJHOEWOyK3hRhsjk2azXx7p0jGODXBdgvC413YX5TJyhwgYyjwLngz8P +PLNLi/X8uO8DI/8IsgD7xJ7Dg9BndVy2/phZcG3lZ4ofLXln9nrboKL5QStOaLU0VJwMHLAZry8WHA +6byeS7ynx+AB+VocgiUXd9U/3b5sPFZ3eEPRnT8cNY0wUIP1rxD8hZRMKKBQAA + +--_mpm_a4bcd9a515b44b9d8eceb05d7333675fpiotk5m200exchangecorpm_-- diff --git a/testdata/dmarc-reports/xs4all.eml b/testdata/dmarc-reports/xs4all.eml new file mode 100644 index 0000000..0c26881 --- /dev/null +++ b/testdata/dmarc-reports/xs4all.eml @@ -0,0 +1,70 @@ +Return-Path: +X-Original-To: postmaster@ueber.net +Delivered-To: postmaster@ueber.net +Received-SPF: Pass (sender SPF authorized) identity=mailfrom; client-ip=194.109.24.21; helo=lb1-smtp-cloud8.xs4all.net; envelope-from=noreply+postmaster=ueber.net@dmarc-reports.xs4all.net; receiver=postmaster@ueber.net +Authentication-Results: koriander.ueber.net; + dkim=pass (2048-bit key; secure) header.d=dmarc-reports.xs4all.net header.i=@dmarc-reports.xs4all.net header.b=h2T0IB2w; + dkim-atps=neutral +Received: from lb1-smtp-cloud8.xs4all.net (lb1-smtp-cloud8.xs4all.net [194.109.24.21]) + by koriander.ueber.net (Postfix) with ESMTPS id 66607DEB76 + for ; Tue, 27 Jul 2021 02:49:47 +0200 (CEST) +Received: from nsacron3.xs4all.net ([194.109.23.165]) + by smtp-cloud8.xs4all.net with ESMTP + id 8BI1m1cKbXTlc8BI1mNgHo; Tue, 27 Jul 2021 02:49:45 +0200 +DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; + d=dmarc-reports.xs4all.net; s=s1; t=1627346985; + bh=AZBHYUTZOecz9axaEcCHh+B9Si1tcVYedtc//9KuCO4=; + h=From:To:Subject:Message-ID:MIME-Version:Content-Type:Date:From: + Subject; + b=h2T0IB2wrRlxKq9AuQ6FMMpUKp8RvtzGlhzPYArN85s52LHtXT0NEAHIY+yqT1v0o + nsjFIQG2p6LfyeMYjV2nPte5/tm62rvFa1nfSQB9NdZfaS3qQ1D150lno2gkjked4D + +pI3UD/3mRuZCrMOM9mCvmnHqyL5m4UXjkSe4i6mSaZZI32XfDA3+OaqBC0NrfopRr + Eo+dBU49uhiafVF2ggZjfyuhU7KYQp+jvbAJxyn8w3LXwoK7C0AkrnlCFvGotWlrDc + 8T/w/qOfems0wul0jgIW3XtwNf9VSrphVGZGmywHR3zlL+ixfxyPQXWvgOcbs84KKG + taLlTNtszInAg== +Received: by nsacron3.xs4all.net (Postfix, from userid 0) + id 9FEA95869; Tue, 27 Jul 2021 02:49:45 +0200 (CEST) +From: noreply@dmarc-reports.xs4all.net +To: postmaster@ueber.net +Subject: Report Domain: ueber.net + Submitter: xs4all.nl + Report-ID: +Message-ID: +MIME-Version: 1.0 +Content-Type: multipart/mixed; + boundary="xs4all.nl-1627349683-9725429" +Content-Transfer-Encoding: binary +X-Auto-Response-Suppress: OOF +Precedence: bulk +Date: Tue, 27 Jul 2021 02:49:45 +0200 (CEST) +X-CMAE-Envelope: MS4xfMiAcZo7QL98IFLYtwhtR/AR1Vv+/e692sq/8sI6BHh85YN/XGYMNfsrnkF360hpvXsEsV6huvYpvZqSnT0u9tdn3Lw5M2txNdoRZ/vgD3lFvftT9p9S + cgupm6nZPfFN6EH62IUPbwfPw1FXKU4rz7i2V1mSPFIvo3+662WrPzRiikY12ZDQEehFa7XYCNZzXyH5DuSu5l+ruL55xHhWb/v0Wm1BJyl0vol09HQRJ/31 + wBnGhJF0ITbk7SJJst8UiA== + +--xs4all.nl-1627349683-9725429 +Content-Disposition: inline +Content-Transfer-Encoding: 8bit +Content-Type: text/plain; charset="UTF-8" + +This is a DMARC (rfc7489) aggregate report for your domain. +For more information, see https://dmarc.org/ + +--xs4all.nl-1627349683-9725429 +Content-Type: application/gzip; + name="xs4all.nl!ueber.net!1627257600!1627343999!9725429.xml.gz" +Content-Disposition: attachment; + filename="xs4all.nl!ueber.net!1627257600!1627343999!9725429.xml.gz" +Content-Transfer-Encoding: base64 + +H4sIAClY/2AA/42UTXLjIBCF9zmFy/tIkfwXTRGSVU4ws1Zh1LIZI1AB8ji3nyaApNgpV1aG102/ +7g8s8nrp5OIMxgqtXpZF9rRcgOK6Eerwsvzz+/3xebl4pQ+kBWj2jJ/ow2JBYj7FdJKnjQ8Y6LVx +dQeONcwxr6GqzaFWrAN6sWsmZaYkyUctpEDHhKRKYwH58dZ0zPDHUMxm6RQ4koe8eObiDKu5Vo5x +VwvVauqjTv9iTSfUvSq3J0PJ2L9oplazYlvuVutq+1xm1a7crMvqTuWpQCiIFKA2TB3ioCjt4SAQ +HVYtN7vtExIMSoqDamjwXFVVhc2qVCz/Wm10m+MmvZaCf9T9sJfCHmFsRCMbRQfYgwmtRiWEWXMS +HTUkD4so2r791PxvkHpq4C9wPN5HxU6STVrPHS38ZH4RJERckDyARv2bJhE+1yb1a/S/kYjVg+FQ +i56ut1lRZatNVpRr9Bv1lMn1oJw3CoskRzs4MzkgwiYFPBdhe22F8y9YaQXIZabM8jyWnlmLCSOh +CKCNgRHTbMQrT7yzNBgRDSgnWoH/n9nln0HqHurW6G5+W18DKf0IrAFzkzyXo+2NGWGDO9YG7CDd +5H812/1XE8YHiZevDT1pI5hCXwSRtCktGEVQcTOymruSGcUfdsCRC+38tGj9ufmh7/Su82saPjk8 +R3y147fvPz/sTtUvBQAA + +--xs4all.nl-1627349683-9725429-- diff --git a/testdata/dmarc-reports/yahoo.eml b/testdata/dmarc-reports/yahoo.eml new file mode 100644 index 0000000..f45e569 --- /dev/null +++ b/testdata/dmarc-reports/yahoo.eml @@ -0,0 +1,49 @@ +Return-Path: +X-Original-To: postmaster@ueber.net +Delivered-To: postmaster@ueber.net +Received-SPF: None (no SPF record) identity=mailfrom; client-ip=66.163.187.193; helo=sonic332-12.consmr.mail.ne1.yahoo.com; envelope-from=noreply@dmarc.yahoo.com; receiver=postmaster@ueber.net +Authentication-Results: koriander.ueber.net; + dkim=pass (2048-bit key; unprotected) header.d=yahoo.com header.i=@yahoo.com header.b=Nxc1Fcfb; + dkim-atps=neutral +Received: from sonic332-12.consmr.mail.ne1.yahoo.com (sonic332-12.consmr.mail.ne1.yahoo.com [66.163.187.193]) + by koriander.ueber.net (Postfix) with ESMTP id 6DD60DEB22 + for ; Tue, 4 Jun 2019 09:21:22 +0200 (CEST) +DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=yahoo.com; s=s2048; t=1559632880; bh=8HhQoiZcSB3sM2GICQqvsrqY1AEZaR+0yDu3XRsL6e8=; h=Date:From:To:Subject:From:Subject; b=Nxc1FcfbwWSpGsiHRGIsO49RynyYxB8SkXdwstMn1P1IFfieZoghJbEz+FHILXKTHMrOa7xfpwtSvKUzTuYhntPOKdHFiso32IQg1Y1P0bSEq4HI88YOLO2EWplojx+B/Pvu8LUa6MoMeuSEQEqN8VoGrTFsOsjDNisYRdC9wmAmm6EcLAJsZtlMkx2i6pM0bdT+esEgX1GN5XGBR+JLiDBUZuYZ3MEsF31TWtBgt+PblMBZAgt6dUFNLYkKfJCICUHFZdxAEk131bB3/AVKUUVEYbWYgZ7vJ9rfQdAzsbHpqW5dwQ4n7aqMHWnXn3L6T+hF2bRj/Ykb0JmcHMt/XA== +Received: from sonic.gate.mail.ne1.yahoo.com by sonic332.consmr.mail.ne1.yahoo.com with HTTP; Tue, 4 Jun 2019 07:21:20 +0000 +MIME-Version: 1.0 +Content-Transfer-Encoding: binary +Content-Type: multipart/mixed; boundary="_----------=_1559632880408154728" +X-Mailer: MIME::Lite 3.028 (F2.82; A2.11; B3.13; Q3.13) +Date: Tue, 4 Jun 2019 00:21:20 -0700 +From: noreply@dmarc.yahoo.com +To: postmaster@ueber.net +Subject: Report Domain: ueber.net Submitter: yahoo.com Report-ID: <1559613526.307161> +Message-Id: <1559632880.96079@dmarc.yahoo.com> + +This is a multi-part message in MIME format. + +--_----------=_1559632880408154728 +Content-Disposition: inline +Content-Length: 37 +Content-Transfer-Encoding: binary +Content-Type: text/plain + +This is an aggregate report from Oath +--_----------=_1559632880408154728 +Content-Disposition: attachment; filename="yahoo.com!ueber.net!1559520000!1559606399.xml.gz" +Content-Transfer-Encoding: base64 +Content-Type: application/gzip; name="yahoo.com!ueber.net!1559520000!1559606399.xml.gz" + +H4sIAO8b9lwAA61TS1LrMBBcP05hOIAUJ9ivUiUEW47AyqXIk0RgfUqS+dye +cazIIlDFBm8s9/T0TI/G7P5dD9Ur+KCsubupyermnv+7YnuAfifkC56rinlw +1sdOQxS9iOIEImz9oTNCA38SR2uvq0cjCaMZTSzQQg3c2RC1CBH8Q6+Fl+Rj +yiHSakZnRqKnWqrnddNs23rTrFuyWf2v25rRJZjY2A50XpjDuRxiOzgoc8pu +1it8GJ2RTACTxFftZrutsAGTBemFYq75xTxzdlDyo3PjblDhCEs/Fr0YPsIO +PDEQUW9GUlz0L0pzz+h8OKPB7U/g9E6Y4x6eQaKCy5CMvJ78TIe5uR/7wBlK +63NP3r4t3oMdvYROOX7bknpLNg2p17eMLnimSjsaLMjofMh4qgmvYhhxWH2O +TP5VwJtWEbeJG2sA/RdISZzsOxECMpZJzC3iEObIMo7C62VdvKFskKkeTFR7 +hQu9ZB5B9OC7vbe6vJkSPkt9z2dijMfOQxiHWGheNv3LxafVnkSSt/Sx2Pui +yErrfyJfrBb9Zmnip5VhtPj3PwHaJxO1HwQAAA== + +--_----------=_1559632880408154728-- + diff --git a/testdata/dsn/domains.conf b/testdata/dsn/domains.conf new file mode 100644 index 0000000..d988250 --- /dev/null +++ b/testdata/dsn/domains.conf @@ -0,0 +1,26 @@ +Domains: + mox.example: + DKIM: + Selectors: + testsel: + PrivateKeyFile: testsel.rsakey.pkcs8.pem + Hash: sha256 + Headers: + - From + - To + - Cc + - Bcc + - Reply-To + - References + - In-Reply-To + - Subject + - Date + - Message-ID + - Content-Type + Sign: + - testsel +Accounts: + mjl: + Domain: mox.example + Destinations: + mjl: nil diff --git a/testdata/dsn/mox.conf b/testdata/dsn/mox.conf new file mode 100644 index 0000000..997512c --- /dev/null +++ b/testdata/dsn/mox.conf @@ -0,0 +1,8 @@ +DataDir: data +LogLevel: trace +Hostname: mox.example +Postmaster: + Account: mjl + Mailbox: postmaster +Listeners: + local: nil diff --git a/testdata/dsn/testsel.rsakey.pkcs8.pem b/testdata/dsn/testsel.rsakey.pkcs8.pem new file mode 100644 index 0000000..73d742c --- /dev/null +++ b/testdata/dsn/testsel.rsakey.pkcs8.pem @@ -0,0 +1,30 @@ +-----BEGIN PRIVATE KEY----- +Note: RSA private key for use with DKIM, generated by mox + +MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQDdkh3fKzvRUWym +n9UwVrEw6s2Mc0+DTg04TWJKGKHXpvcTHuEcE6ALVS9MZKasyVsIHU7FNeS9/qNb +pLihhGdlhU3KAfrMpTBhiFpJoYiDXED98Of4iBxNHIuheLMxSBSClMbLGE2vAgha +/6LuONuzdMqk/c1TijBD+vGjCZI2qD58cgXWWKRK9e+WNhKNoVdedZ9iJtbtN0MI +UWk3iwHmjXf5qzS7i8vDoy86Ln0HW0vKl7UtwemLVv09/E23OdNN163eQvSlrEhx +a0odPQsM9SizxhiaI9rmcZtSqULt37hhPaNA+/AbELCzWijZPDqePVRqKGd5gYDK +8STLj0UHAgMBAAECggEBAKVkJJgplYUx2oCmXmSu0aVKIBTvHjNNV+DnIq9co7Ju +F5BWRILIw3ayJ5RGrYPc6e6ssdfT2uNX6GjIFGm8g9HsJ5zazXNk+zBSr9K2mUg0 +3O6xnPaP41BMNo5ZoqjuvSCcHagMhDBWvBXxLJXWK2lRjNKMAXCSfmTANQ8WXeYd +XG2nYTPtBu6UgY8W6sKAx1xetxBrzk8q6JTxb5eVG22BSiUniWYif+XVmAj1u6TH +0m6X0Kb6zsMYYgKPC2hmDsxD3uZ7qBNxxJzzLjpK6eP9aeFKzNyfnaoO4s+9K6Di +31oxTBpqLI4dcrvg4xWl+YkEknXXaomMqM8hyDzfcAECgYEA9/zmjRpoTAoY3fu9 +mn16wxReFXZZZhqV0+c+gyYtao2Kf2pUNAdhD62HQv7KtAPPHKvLfL8PH0u7bzK0 +vVNzBUukwxGI7gsoTMdc3L5x4v9Yb6jUx7RrDZn93sDod/1f/sb56ARCFQoqbUck +dSjnVUyF/l5oeh6CgKhvtghJ/AcCgYEA5Lq4kL82qWjIuNUT/C3lzjPfQVU+WvQ9 +wa+x4B4mxm5r4na3AU1T8H+peh4YstAJUgscGfYnLzxuMGuP1ReIuWYy29eDptKl +WTzVZDcZrAPciP1FOL6jm03PT2UAEuoPRr4OHLg8DxoOqG8pxqk1izDSHG2Tof6l +0ToafeIALwECgYEA8wvLTgnOpI/U1WNP7aUDd0Rz/WbzsW1m4Lsn+lOleWPllIE6 +q4974mi5Q8ECG7IL/9aj5cw/XvXTauVwXIn4Ff2QKpr58AvBYJaX/cUtS0PlgfIf +MOczcK43MWUxscADoGmVLn9V4NcIw/dQ1P7U0zXfsXEHxoA2eTAb5HV1RWsCgYBd +TcXoVfgIV1Q6AcGrR1XNLd/OmOVc2PEwR2l6ERKkM3sS4HZ6s36gRpNt20Ub/D0x +GJMYDA+j9zTDz7zWokkFyCjLATkVHiyRIH2z6b4xK0oVH6vTIAFBYxZEPuEu1gfx +RaogEQ9+4ZRFJUOXZIMRCpNLQW/Nz0D4/oi7/SsyAQKBgHEA27Js8ivt+EFCBjwB +UbkW+LonDAXuUbw91lh5jICCigqUg73HNmV5xpoYI9JNPc6fy6wLyInVUC2w9tpO +eH2Rl8n79vQMLbzsFClGEC/Q1kAbK5bwUjlfvKBZjvE0RknWX9e1ZY04DSsunSrM +prS2eHVZ24hecd7j9XfAbHLC +-----END PRIVATE KEY----- diff --git a/testdata/imap/domains.conf b/testdata/imap/domains.conf new file mode 100644 index 0000000..66d3e18 --- /dev/null +++ b/testdata/imap/domains.conf @@ -0,0 +1,15 @@ +Domains: + mox.example: + LocalpartCaseSensitive: false +Accounts: + mjl: + Domain: mox.example + Destinations: + mjl: nil + JunkFilter: + Threshold: 0.95 + Params: + Twograms: true + MaxPower: 0.1 + TopWords: 10 + IgnoreWords: 0.1 diff --git a/testdata/imap/mox.conf b/testdata/imap/mox.conf new file mode 100644 index 0000000..1df5357 --- /dev/null +++ b/testdata/imap/mox.conf @@ -0,0 +1,14 @@ +DataDir: data +LogLevel: trace +Hostname: mox.example +Listeners: + local: + IPs: + - 0.0.0.0 + IMAP: + Enabled: true + Port: 1143 + NoRequireSTARTTLS: true +Postmaster: + Account: mjl + Mailbox: postmaster diff --git a/testdata/imaptest/domains.conf b/testdata/imaptest/domains.conf new file mode 100644 index 0000000..66d3e18 --- /dev/null +++ b/testdata/imaptest/domains.conf @@ -0,0 +1,15 @@ +Domains: + mox.example: + LocalpartCaseSensitive: false +Accounts: + mjl: + Domain: mox.example + Destinations: + mjl: nil + JunkFilter: + Threshold: 0.95 + Params: + Twograms: true + MaxPower: 0.1 + TopWords: 10 + IgnoreWords: 0.1 diff --git a/testdata/imaptest/imaptest.mbox b/testdata/imaptest/imaptest.mbox new file mode 100644 index 0000000..aff45fa --- /dev/null +++ b/testdata/imaptest/imaptest.mbox @@ -0,0 +1,1778 @@ +From mox Wed Oct 26 19:48:13 2022 +Return-Path: +Received: from x1.mox.example ([10.1.1.1]) by x1.a.mox ([10.1.1.1]) + with ESMTP for mjl@a.mox; 23 Jan 2022 21:02 +0100 +Authentication-Results: x1.a.mox; iprev=fail policy.iprev=10.1.1.1; + dkim=pass header.d=c.mox header.s=2021 header.a=rsa-sha256 + header.i=mjl+thunderbird@c.mox; spf=none smtp.mailfrom=c.mox; dmarc=pass + header.from=c.mox +Received-SPF: none client-ip=10.1.1.1; + envelope-from="mjl+thunderbird@c.mox"; helo=x1.mox.example; + problem="no\ spf\ txt\ record:\ no\ txt\ record"; received=x1.a.mox; + identity=mailfrom +Received: from x1.mox.example by x1.mox.example ([10.1.1.2]) with + ESMTP for mjl@a.mox; 23 Jan 2022 20:44 +0100 +Authentication-Results: x1.mox.example; auth=pass + smtp.mailfrom=mjl+thunderbird@c.mox +DKIM-Signature: v=1; a=rsa-sha256; d=c.mox; s=2021; i=mjl+thunderbird@c.mox; + t=1642967081; h=From:To:Cc:Bcc:Reply-To:References:In-Reply-To:Subject:Date: + Message-ID:Content-Type:From:To:References:In-Reply-To:Subject:Date: + Message-ID:Content-Type; bh=+hvQ86IUwm3g0+Cc89zR75MCV0hhAuzz4QlwQpov/jI=; b=L + OArJvrcSZdCHhSN579GnVoAyMlNQi5n8kHWz1QxN4WytxIdeVnzxInzHefBgJZWKEiOUc+3c2MxQJ + 76+rDxT+eiuIZHiWyS31mSmCnJnXTBa6FAaLL5F2S4ZDYXLOjsbj4vfTBv7U0ykUfJS7TEkg9GpMc + wUf1l2kOgskgmu77A5FNZgTdQHVdtD+kHIGNfU3LcGM5SI4nbUXIdRQScaOkWv1em8jOrIdY60+Vw + 0t1wwIlC46CFkTxZFDM9WFA+EHl94z9DtysMGdzXrC3Lgv3jMb+Mhg/5hUq6foTixYaAMHUf9iKaL + DQb1Qa3FXJ3Ox9zPDIsOtM5iINSXJqxxA== +Message-ID: +Date: Sun, 23 Jan 2022 20:44:41 +0100 +MIME-Version: 1.0 +User-Agent: Mozilla/5.0 (X11; Linux x86_64; rv:91.0) Gecko/20100101 + Thunderbird/91.4.0 +Subject: Re: test +Content-Language: nl +From: thunderbird c +To: mjl@a.mox +References: <3297a6c7-38ca-0604-a30d-fdb9bc06618b@a.mox> + <2fb463a1-3e20-71ea-9ffa-b851c6bba7b8@a.mox> +In-Reply-To: <2fb463a1-3e20-71ea-9ffa-b851c6bba7b8@a.mox> +Content-Type: text/plain; charset=UTF-8; format=flowed +Content-Transfer-Encoding: 7bit + +On 1/23/22 20:44, thunderbird wrote: +> On 1/23/22 20:43, thunderbird wrote: +>> test +> test3 +test4 + +From mox Wed Oct 26 19:48:13 2022 +Return-Path: +Received: from x1.c.mox.example ([10.1.1.1]) by + x1.a.mox.example ([10.1.1.1]) with ESMTPS for mjl@a.mox.example; + 15 Jul 2022 17:37 +0200 +Authentication-Results: x1.a.mox.example; iprev=pass policy.iprev=10.1.1.1; + dkim=pass header.d=c.mox.example header.s=2021 header.a=rsa-sha256 + header.i=mjl+thunderbird@c.mox.example; spf=none smtp.mailfrom=c.mox.example; + dmarc=pass header.from=c.mox.example +Received-SPF: client-ip=""; envelope-from=; helo=; received=; identity= +Received: from x1.c.mox.example by x1.c.mox.example ([10.1.1.2]) with + ESMTPSA for mjl@a.mox.example; 15 Jul 2022 17:37 +0200 +DKIM-Signature: v=1; a=rsa-sha256; d=c.mox.example; s=2021; + i=mjl+thunderbird@c.mox.example; t=1657899471; h=From:To:Cc:Bcc:Reply-To: + References:In-Reply-To:Subject:Date:Message-ID:Content-Type:From:To:Subject: + Date:Message-ID:Content-Type; + bh=s14J+iztnrytnRYzb7lhFG/jS/vrxWJnnahfijFMnco=; b=bPxXiKY6nrA+GBzJ6M5Mo6awjV + pdPtghXve590Kowmm5XgVz75+CS2u69lU7xG4EgkF6EHX3+EecZ/AlqLK/6gAHWHw3QJqZ8uO7jrj + 4OKQdQx5lksetGdQCHVAMtPF1TZ+l+JUAKeGbvQmZqjF1m1tsh3q9V73teZG1noDlRFCmYC6yPlOZ + eTJw0YBSwixZJrqV9W06oa1z9Rx8/LONiB5oWTaK9djjilL+bRGUVEuMRxfW9uKQskSt24eObuArR + Z4FiK9KbPoz4M6SjlU3nXZrdfH91VYzGlMnl61LUi+4RG0CCtPobd2+5zfa1+HmaNtvuAQR1b5bb1 + i+xhQ4qQ== +Authentication-Results: x1.c.mox.example; auth=pass + smtp.mailfrom=mjl+thunderbird@c.mox.example +Message-ID: +Date: Fri, 15 Jul 2022 17:37:51 +0200 +MIME-Version: 1.0 +User-Agent: Mozilla/5.0 (X11; Linux x86_64; rv:91.0) Gecko/20100101 + Thunderbird/91.11.0 +Content-Language: nl +To: mjl@a.mox.example +From: thunderbird c +Subject: x +Content-Type: text/plain; charset=UTF-8; format=flowed +Content-Transfer-Encoding: 7bit + +x + +From mox Wed Oct 26 19:48:13 2022 +Return-Path: +Received: from x1.c.mox.example ([10.1.1.1]) by + x1.a.mox.example ([10.1.1.1]) with ESMTPS (TLS1.3 TLS_AES_128_GCM_SHA256) + for ; 25 Sep 2022 08:49 +0200 +Authentication-Results: x1.a.mox.example; iprev=pass policy.iprev=10.1.1.1; + dkim=pass header.d=c.mox.example header.s=2021 header.a=rsa-sha256 + header.i=mjl@c.mox.example; spf=none smtp.mailfrom=c.mox.example; dmarc=pass + header.from=c.mox.example +Received-SPF: none client-ip=10.1.1.1; + envelope-from="mjl+thunderbird@c.mox.example"; helo=x1.c.mox.example; + problem="spf:\ no\ spf\ txt\ record:\ no\ txt\ record"; + received=x1.a.mox.example; identity=mailfrom +Received: from x1.c.mox.example by x1.c.mox.example ([10.1.1.2]) with + ESMTPSA (TLS1.3 TLS_AES_128_GCM_SHA256) for ; + 25 Sep 2022 08:49 +0200 +DKIM-Signature: v=1; a=rsa-sha256; d=c.mox.example; s=2021; + i=mjl@c.mox.example; t=1664088551; h=From:To:Cc:Bcc:Reply-To:References: + In-Reply-To:Subject:Date:Message-ID:Content-Type:From:To:Subject:Date: + Message-ID:Content-Type; bh=PP3v5HYpJBLm5ymsWqDfiUw2mxsPkJktMPScIvhHIhk=; b=z + dmtuMCKHEqmoGv44Ndadibx/ib/FvluDeyhSiat4Lx7MYQyAR26BR/bNUaQ73ZqbCaj8VohJzk51d + 0A3IbSla76IuntHkw88Uo9X/gusRufzYRQ+d5/9AGYN7OTLfzkVzHoBqMe4m1sp7oI2P+YlZf+Abu + Ph255qXUV4FFXl2/FAwjjdv5i9NFunahtx2x1qL7ee9oKN81oK4ZW9M89C5Zf1WFWIbCgkCEN+wJT + WzqvmDxUjIbgyLPjvfYAeUBXTkhTp6PgCl0DXgkw4DSujpWe9L2zQ5mwetsXRDJ1O2b2xfPXi5C9M + LO/bcoomFXjiWHpeAd7nGHb7OozkM/ToQ== +Authentication-Results: x1.c.mox.example; auth=pass + smtp.mailfrom=mjl+thunderbird@c.mox.example +Message-ID: +Date: Sun, 25 Sep 2022 08:49:11 +0200 +MIME-Version: 1.0 +User-Agent: Mozilla/5.0 (X11; Linux x86_64; rv:102.0) Gecko/20100101 + Thunderbird/102.1.2 +Content-Language: en-US +To: mjl@a.mox.example +From: c mox +Subject: test na filename wijziging +Content-Type: text/plain; charset=UTF-8; format=flowed +Content-Transfer-Encoding: 7bit + +test na filename wijziging + + +From mox Wed Oct 26 19:48:13 2022 +Return-Path: +Received: from x1.xn--h-bga.mox.example ([10.1.1.1]) by + x1.a.mox.example ([10.1.1.1]) with ESMTPS for mjl@a.mox.example; + 06 Feb 2022 20:47 +0100 +Authentication-Results: x1.a.mox.example; iprev=pass policy.iprev=10.1.1.1; + dkim=neutral; spf=none smtp.mailfrom=xn--h-bga.mox.example; dmarc=fail + header.from=xn--h-bga.mox.example +Received-SPF: client-ip=""; envelope-from=; helo=; received=; identity= +Received: from x1.hé.mox.example by x1.hé.mox.example ([10.1.1.2]) with + ESMTPSA for mjl@a.mox.example; 06 Feb 2022 20:47 +0100 +DKIM-Signature: v=1; a=rsa-sha256; d=xn--h-bga.mox.example; s=2021; + i=mjl@xn--h-bga.mox.example; t=1644176876; h=From:To:Cc:Bcc:Reply-To: + References:In-Reply-To:Subject:Date:Message-ID:Content-Type:From:To:Subject: + Date:Message-ID:Content-Type; + bh=KFwoz3+PGL66l/CGi3KZjGmp93eU5Z0LNuofRTvivgc=; b=ElnjNZH9j/EfF4+XZJwFVw7Uj3 + 7rRJdwCzXOKGhKsCTIiionjXe/jodqjhWdrfmJaOkzf/gqT8PGJCzz2wGe1SkRxFEv2bSVTwcqKr+ + gfK2MxCAB2PkMiNUucW4U7Gfc0jZL6Tk8HV0rh67BYNECQ0qE6n5wHpk+CR1KEsU1HCoXiipK8cNT + UEfcsq+Wrt6SUBICtfR06rpVemotv5R2RHLZPOpQwNFQYNrDjrfnMsxGuO9BF7driMks2dkep0RSd + fBZ2MdThCfYuZ7DQrKcDud4fkelxHjW3AuJ+g8wdDYqPRTUU8GyDBwuEr3MBLAcErXl9nZqYHnAgq + 038zr4Ow== +Authentication-Results: x1.hé.mox.example; auth=pass + smtp.mailfrom=mjl@hé.mox.example +Message-ID: <74480b84-1f34-a307-5d1b-65ed4f4142dc@xn--h-bga.mox.example> +Date: Sun, 6 Feb 2022 20:47:56 +0100 +MIME-Version: 1.0 +User-Agent: Mozilla/5.0 (X11; Linux x86_64; rv:91.0) Gecko/20100101 + Thunderbird/91.5.1 +Content-Language: nl +To: mjl@a.mox.example +From: mjl he +Subject: test4 +Content-Type: text/plain; charset=UTF-8; format=flowed +Content-Transfer-Encoding: 7bit + +test4 + +From mox Wed Oct 26 19:48:13 2022 +Return-Path: +Received: from x1.c.mox.example ([10.1.1.1]) by + x1.a.mox.example ([10.1.1.1]) with ESMTPS (TLS1.3 TLS_AES_128_GCM_SHA256) + for ; 25 Sep 2022 23:32 +0200 +Authentication-Results: x1.a.mox.example; iprev=pass policy.iprev=10.1.1.1; + dkim=pass header.d=c.mox.example header.s=2021 header.a=rsa-sha256 + header.i=mjl@c.mox.example; spf=none smtp.mailfrom=c.mox.example; dmarc=pass + header.from=c.mox.example +Received-SPF: none client-ip=10.1.1.1; + envelope-from="mjl+thunderbird@c.mox.example"; helo=x1.c.mox.example; + problem="spf:\ no\ spf\ txt\ record:\ no\ txt\ record"; + received=x1.a.mox.example; identity=mailfrom +Received: from x1.c.mox.example by x1.c.mox.example ([10.1.1.2]) with + ESMTPSA (TLS1.3 TLS_AES_128_GCM_SHA256) for ; + 25 Sep 2022 23:25 +0200 +DKIM-Signature: v=1; a=rsa-sha256; d=c.mox.example; s=2021; + i=mjl@c.mox.example; t=1664141110; h=From:To:Cc:Bcc:Reply-To:References: + In-Reply-To:Subject:Date:Message-ID:Content-Type:From:To:Subject:Date: + Message-ID:Content-Type; bh=Q4TD9u9o0ruWTXUN8h9lhVGaZxrGT5cCA8nuSeuOKtk=; b=M + XQpHlwTyAMCsIv/gkiIkefSw/eSCtLR+vGd1ZVdCpD+j3rou07qqK+26k9RxdRsj2hcF4KnFHGGFv + PtQ8eBDjsR0I/uu9p6GMtPwziA8EP+SdvmaESGej2DY8L33eF6yTDxveGL+xXC4NgQAUscfKqIwtA + wgVab2txYlVOrIbOgby5j7+4TWHFBuTr286mt8gzonikzwUNJRI9Ti1cXgJGLHiuBtw/Ugu4gesp3 + 3RBou7jMRo+FlJ9s8oM66mr0MWU62r1t2BXiaD81oq5Qa8KvM9kcHkbOUWvM8qm7/OYG5s5pzbio+ + fflc9R1ACCyMLfRaXEWE/sbwf53QK/9Rg== +Authentication-Results: x1.c.mox.example; auth=pass + smtp.mailfrom=mjl+thunderbird@c.mox.example +Message-ID: <8bfd540b-49cc-6ab0-3053-4bcceb845850@c.mox.example> +Date: Sun, 25 Sep 2022 23:25:10 +0200 +MIME-Version: 1.0 +User-Agent: Mozilla/5.0 (X11; Linux x86_64; rv:102.0) Gecko/20100101 + Thunderbird/102.1.2 +Content-Language: en-US +To: mjl@a.mox.example +From: c mox +Subject: test na prometheus metrics 2 +Content-Type: text/plain; charset=UTF-8; format=flowed +Content-Transfer-Encoding: 7bit + +test na prometheus metrics 2 + + +From mox Wed Oct 26 19:48:13 2022 +Return-Path: +Received: from x1.xn--h-bga.mox.example ([10.1.1.1]) by + x1.a.mox.example ([10.1.1.1]) with ESMTPS for mjl@a.mox.example; + 06 Feb 2022 21:03 +0100 +Authentication-Results: x1.a.mox.example; iprev=pass policy.iprev=10.1.1.1; + dkim=neutral; spf=none smtp.mailfrom=xn--h-bga.mox.example; dmarc=fail + header.from=xn--h-bga.mox.example +Received-SPF: client-ip=""; envelope-from=; helo=; received=; identity= +Received: from x1.hé.mox.example by x1.hé.mox.example ([10.1.1.2]) with + ESMTPSA for mjl@a.mox.example; 06 Feb 2022 20:11 +0100 +DKIM-Signature: v=1; a=rsa-sha256; d=hé.mox.example; s=2021; + i=mjl@hé.mox.example; t=1644174660; h=From:To:Cc:Bcc:Reply-To:References: + In-Reply-To:Subject:Date:Message-ID:Content-Type:From:To:Subject:Date: + Message-ID:Content-Type; bh=I3H+2RR4Gh+ISvkep1BNhywvLlNcy8ptbpJ3zaZuKCQ=; b=H + urduT6wxGnsufB+pKPMbUVYvS5bhyroOb8CMwTk6apaXEywjWHj7nBC1nPZoFSbMLGNpyU3klm+5I + rgRV364+xXjXitJxUWo+n5Xf9/ZL7rRnA05zyXbLPQRUnZaZBVtG5z55a9dwt/VzVr6kIDCdZVHR1 + Mc7AOGuxgfESECIH/jXUSs63xVH7jR7xDEULM6fgL5YpcvESzyBtEwQt49dcks6miXOb5LCDyCsOP + ON/VAlUFb3WoxHeE4dxSpMIiORfNkeeWHQOgtiie9c30CANXwXKCrwd4LjLedVbHZ/Y/9Sb/uRHnM + +9EhV4GjebF69Ra8meJPw77YAHAs4p4uQ== +Authentication-Results: x1.hé.mox.example; auth=pass + smtp.mailfrom=mjl@hé.mox.example +Message-ID: +Date: Sun, 6 Feb 2022 20:11:00 +0100 +MIME-Version: 1.0 +User-Agent: Mozilla/5.0 (X11; Linux x86_64; rv:91.0) Gecko/20100101 + Thunderbird/91.5.1 +Content-Language: nl +To: mjl@a.mox.example +From: mjl he +Subject: test1 +Content-Type: text/plain; charset=UTF-8; format=flowed +Content-Transfer-Encoding: 7bit + +test1 + +From mox Wed Oct 26 19:48:13 2022 +Return-Path: +Received: from x1.mox.example ([10.1.1.1]) by x1.a.mox ([10.1.1.1]) + with ESMTP for mjl@a.mox; 23 Jan 2022 21:02 +0100 +Authentication-Results: x1.a.mox; iprev=fail policy.iprev=10.1.1.1; + dkim=pass header.d=c.mox header.s=2021 header.a=rsa-sha256 + header.i=mjl+thunderbird@c.mox; spf=none smtp.mailfrom=c.mox; dmarc=pass + header.from=c.mox +Received-SPF: none client-ip=10.1.1.1; + envelope-from="mjl+thunderbird@c.mox"; helo=x1.mox.example; + problem="no\ spf\ txt\ record:\ no\ txt\ record"; received=x1.a.mox; + identity=mailfrom +Received: from x1.mox.example by x1.mox.example ([10.1.1.2]) with + ESMTP for mjl@a.mox; 23 Jan 2022 20:39 +0100 +Authentication-Results: x1.mox.example; auth=pass + smtp.mailfrom=mjl+thunderbird@c.mox +DKIM-Signature: v=1; a=rsa-sha256; d=c.mox; s=2021; i=mjl+thunderbird@c.mox; + t=1642966793; h=From:To:Cc:Bcc:Reply-To:References:In-Reply-To:Subject:Date: + Message-ID:Content-Type:From:To:Subject:Date:Message-ID:Content-Type; + bh=jhmPv2Vh8l0Ezw0V1P64SjmGjgfM2tek6qiEL0zehQc=; b=h4NspINb2TA+VkSr+Try4Rz24W + hor/vjkfX4EyDg6nb0mB4RUlgQiwPrqnjJLLkp9DnUhSuJEwGjMUdRG5160K04c4/KDkzCctj6Bot + IrOCOJ3yyC4z5wUAdivn4OOZmjq9d5eBEBvbiXFGVesZODzAGLZGAiGuSey+8ap18i1FaiRZeMB7e + X5tjAMMlxIGU/1eN6xAchpi8/Pww7VBU13rhq3ge4cFo1rhftF8wHBNSehlBqvA6/WYEAMD/4DD7S + owenI72sQapxo3Yc2EdZ2f/ZYJgKgR5i6WmE6E/sTVZzDJ2eOYIUHwF1bYBeLNM7ITfAAoPotn0KB + hZpchIQw== +Message-ID: <406af0b6-71ce-a2bd-ec57-7e320bd0e6e0@c.mox> +Date: Sun, 23 Jan 2022 20:39:53 +0100 +MIME-Version: 1.0 +User-Agent: Mozilla/5.0 (X11; Linux x86_64; rv:91.0) Gecko/20100101 + Thunderbird/91.4.0 +Content-Language: nl +To: mjl@a.mox +From: thunderbird c +Subject: test van c +Content-Type: text/plain; charset=UTF-8; format=flowed +Content-Transfer-Encoding: 7bit +Status: RO +Content-Length: 11 +Lines: 1 + +test van c + + +From mox Wed Oct 26 19:48:13 2022 +Return-Path: +Received: from x1.mox.example ([10.1.1.1]) by x1.a.mox ([10.1.1.1]) + with ESMTP for mjl@a.mox; 23 Jan 2022 21:02 +0100 +Authentication-Results: x1.a.mox; iprev=fail policy.iprev=10.1.1.1; + dkim=pass header.d=c.mox header.s=2021 header.a=rsa-sha256 + header.i=mjl+thunderbird@c.mox; spf=none smtp.mailfrom=c.mox; dmarc=pass + header.from=c.mox +Received-SPF: none client-ip=10.1.1.1; + envelope-from="mjl+thunderbird@c.mox"; helo=x1.mox.example; + problem="no\ spf\ txt\ record:\ no\ txt\ record"; received=x1.a.mox; + identity=mailfrom +Received: from x1.mox.example by x1.mox.example ([10.1.1.2]) with + ESMTP for mjl@a.mox; 23 Jan 2022 20:39 +0100 +Authentication-Results: x1.mox.example; auth=pass + smtp.mailfrom=mjl+thunderbird@c.mox +DKIM-Signature: v=1; a=rsa-sha256; d=c.mox; s=2021; i=mjl+thunderbird@c.mox; + t=1642966793; h=From:To:Cc:Bcc:Reply-To:References:In-Reply-To:Subject:Date: + Message-ID:Content-Type:From:To:Subject:Date:Message-ID:Content-Type; + bh=jhmPv2Vh8l0Ezw0V1P64SjmGjgfM2tek6qiEL0zehQc=; b=h4NspINb2TA+VkSr+Try4Rz24W + hor/vjkfX4EyDg6nb0mB4RUlgQiwPrqnjJLLkp9DnUhSuJEwGjMUdRG5160K04c4/KDkzCctj6Bot + IrOCOJ3yyC4z5wUAdivn4OOZmjq9d5eBEBvbiXFGVesZODzAGLZGAiGuSey+8ap18i1FaiRZeMB7e + X5tjAMMlxIGU/1eN6xAchpi8/Pww7VBU13rhq3ge4cFo1rhftF8wHBNSehlBqvA6/WYEAMD/4DD7S + owenI72sQapxo3Yc2EdZ2f/ZYJgKgR5i6WmE6E/sTVZzDJ2eOYIUHwF1bYBeLNM7ITfAAoPotn0KB + hZpchIQw== +Message-ID: <407af0b6-71ce-a2bd-ec57-7e320bd0e6e0@c.mox> +Date: Sun, 23 Jan 2022 20:39:53 +0100 +MIME-Version: 1.0 +User-Agent: Mozilla/5.0 (X11; Linux x86_64; rv:91.0) Gecko/20100101 + Thunderbird/91.4.0 +Content-Language: nl +To: mjl@a.mox +From: thunderbird c +Subject: test van c +Content-Type: text/plain; charset=UTF-8; format=flowed +Content-Transfer-Encoding: 7bit + +test van c + +From mox Wed Oct 26 19:48:13 2022 +Return-Path: +Received: from x1.xn--h-bga.mox.example ([10.1.1.1]) by + x1.a.mox.example ([10.1.1.1]) with ESMTPS for mjl@a.mox.example; + 06 Feb 2022 20:58 +0100 +Authentication-Results: x1.a.mox.example; iprev=pass policy.iprev=10.1.1.1; + dkim=pass header.d=xn--h-bga.mox.example header.s=2021 header.a=rsa-sha256 + header.i=mjl@xn--h-bga.mox.example; spf=none + smtp.mailfrom=xn--h-bga.mox.example; dmarc=pass + header.from=xn--h-bga.mox.example +Received-SPF: client-ip=""; envelope-from=; helo=; received=; identity= +Received: from x1.hé.mox.example by x1.hé.mox.example ([10.1.1.2]) with + ESMTPSA for mjl@a.mox.example; 06 Feb 2022 20:35 +0100 +DKIM-Signature: v=1; a=rsa-sha256; d=xn--h-bga.mox.example; s=2021; + i=mjl@xn--h-bga.mox.example; t=1644176145; h=From:To:Cc:Bcc:Reply-To: + References:In-Reply-To:Subject:Date:Message-ID:Content-Type:From:To:Subject: + Date:Message-ID:Content-Type; + bh=I3H+2RR4Gh+ISvkep1BNhywvLlNcy8ptbpJ3zaZuKCQ=; b=PdyoNTOfFMGGoZnWBT6XlwVe0R + GgDFaq3fJU8hk70KQ27BQYznKahFNy4L30yPab7U8oyiepbVc/KwKyHme7Hr936Bf1eHsTiDWPkqM + EMJFmgts7/PhxsfcuOjkVuY/cQuDrzNYconwAfrshkWiHlWk8nmt88hj0CShOMoTXxjq9VwC6jDlt + hOSafk4hBsXCQPu9TCUOVjKWjJdSBlAm2LW0uScA4c/koNyaPNGzmffSXrtrDQ/gypdE4q0ZhQ/6Y + KZomdz0Uub82f8ho3bK/1sShkeFs6480Qp6HGenKLzQj3PjY0d/RaEeL52j99KQ4ClTpHiLbrgUpF + W58ax5Xg== +Authentication-Results: x1.hé.mox.example; auth=pass + smtp.mailfrom=mjl@hé.mox.example +Message-ID: <407b5c4b-e62c-f172-46ec-d0a98e5ec267@xn--h-bga.mox.example> +Date: Sun, 6 Feb 2022 20:35:45 +0100 +MIME-Version: 1.0 +User-Agent: Mozilla/5.0 (X11; Linux x86_64; rv:91.0) Gecko/20100101 + Thunderbird/91.5.1 +Content-Language: nl +To: mjl@a.mox.example +From: mjl he +Subject: test1 +Content-Type: text/plain; charset=UTF-8; format=flowed +Content-Transfer-Encoding: 7bit + +test1 + +From mox Wed Oct 26 19:48:13 2022 +Return-Path: <> +From: mjl@mox.test +To: mjl@mox.test +Subject: hi +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 7bit +Date: Wed, 10 Nov 2021 23:47:13 +0100 +Message-ID: <12312313-f95c-09ec-97c6-94d124f0932d@mox.test> +MIME-Version: 1.0 + +test +test2 +end + +From mox Wed Oct 26 19:48:13 2022 +Return-Path: +Received: from x1.c.mox.example ([10.1.1.1]) by + x1.a.mox.example ([10.1.1.1]) with ESMTPS (TLS1.3 TLS_AES_128_GCM_SHA256) + for ; 25 Sep 2022 12:17 +0200 +Authentication-Results: x1.a.mox.example; iprev=pass policy.iprev=10.1.1.1; + dkim=pass header.d=c.mox.example header.s=2021 header.a=rsa-sha256 + header.i=mjl@c.mox.example; spf=none smtp.mailfrom=c.mox.example; dmarc=pass + header.from=c.mox.example +Received-SPF: none client-ip=10.1.1.1; + envelope-from="mjl+thunderbird@c.mox.example"; helo=x1.c.mox.example; + problem="spf:\ no\ spf\ txt\ record:\ no\ txt\ record"; + received=x1.a.mox.example; identity=mailfrom +Received: from x1.c.mox.example by x1.c.mox.example ([10.1.1.2]) with + ESMTPSA (TLS1.3 TLS_AES_128_GCM_SHA256) for ; + 25 Sep 2022 12:17 +0200 +DKIM-Signature: v=1; a=rsa-sha256; d=c.mox.example; s=2021; + i=mjl@c.mox.example; t=1664101058; h=From:To:Cc:Bcc:Reply-To:References: + In-Reply-To:Subject:Date:Message-ID:Content-Type:From:To:Subject:Date: + Message-ID:Content-Type; bh=BuZ9+lBz28OK0Thkvakezt2oPFFJA9ZNXl5n8Yy+ISc=; b=g + Unuo0/NAkuTD+oQnazVx9iNt8JyCZdLRGrQnoY1YHEKbcQyld5qskq3nIKtt159Gg6P1yTdOo6VeN + +nwxZsqRyRgJiepA0TTzyVEykkGtkg2Hf+jtBmr+xEgaFLYEWgbNo4Z5vlHASHjl9Ow4Y55DIuvqY + yCAxMF7j7qP+G6E0S7s/fli1ogo/kgeVtQdjuoNBMQ5O7fTneDEsjJ+VXx5V2qV2+TR951KLwwYd8 + Pz3DmMcrd7HU/4F+679EkACVHkhgyOnnIOK6DHCDF3pXo6SUylVjEovCQAy1jhhqJ0nNizK2gB2hE + 5pk3JUgS0sxZNXgW0HWcA1LC2J7ckdXpw== +Authentication-Results: x1.c.mox.example; auth=pass + smtp.mailfrom=mjl+thunderbird@c.mox.example +Message-ID: <16f75f4b-288f-78fe-cb23-f4c1ed4920d1@c.mox.example> +Date: Sun, 25 Sep 2022 12:17:38 +0200 +MIME-Version: 1.0 +User-Agent: Mozilla/5.0 (X11; Linux x86_64; rv:102.0) Gecko/20100101 + Thunderbird/102.1.2 +Content-Language: en-US +To: mjl@a.mox.example +From: c mox +Subject: test na wijzigen connection close defers en queue shutdown +Content-Type: text/plain; charset=UTF-8; format=flowed +Content-Transfer-Encoding: 7bit + +test na wijzigen connection close defers en queue shutdown + + +From mox Wed Oct 26 19:48:13 2022 +Return-Path: <> +From: mjl@mox.test +To: mjl@mox.test +Subject: hi +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 7bit +Date: Wed, 10 Nov 2021 23:47:13 +0100 +Message-ID: <12312314-f95c-09ec-97c6-94d124f0932d@mox.test> +MIME-Version: 1.0 + +test +test2 +end + +From mox Wed Oct 26 19:48:13 2022 +Return-Path: +Received: from x1.c.mox.example ([10.1.1.1]) by + x1.a.mox.example ([10.1.1.1]) with ESMTPS for mjl@a.mox.example; + 15 Jul 2022 22:22 +0200 +Authentication-Results: x1.a.mox.example; iprev=pass policy.iprev=10.1.1.1; + dkim=pass header.d=c.mox.example header.s=2021 header.a=rsa-sha256 + header.i=mjl+thunderbird@c.mox.example; spf=none smtp.mailfrom=c.mox.example; + dmarc=pass header.from=c.mox.example +Received-SPF: client-ip=""; envelope-from=; helo=; received=; identity= +Received: from x1.c.mox.example by x1.c.mox.example ([10.1.1.2]) with + ESMTPSA for mjl@a.mox.example; 15 Jul 2022 22:22 +0200 +DKIM-Signature: v=1; a=rsa-sha256; d=c.mox.example; s=2021; + i=mjl+thunderbird@c.mox.example; t=1657916541; h=From:To:Cc:Bcc:Reply-To: + References:In-Reply-To:Subject:Date:Message-ID:Content-Type:From:To:Subject: + Date:Message-ID:Content-Type; + bh=BnDjrcuyvG//jXGf34+qvSz0yL0ahE6oty2w3RmhdPE=; b=dDjfKyIFqOCYpxrs1w0gZlBZa8 + vb9zY+VSSMYl9rTD3vcjrJFAi4kxCkjAJJVxHG5sqigX6DlSDku/uMu3V6yiWeY81109nQL3jrwBs + rUEvBP2zG7OPQhT+sF3S4/HfosRKiuMCsE4EypnZwKXYSrkP5aw/Pb+ok89lVKoUBR4D5BLkSx+Db + 0928yadIwbM4mTUiRYFEZbAScyl2y/9qAStUob9CRCTh9YsvXUTrQ5TEksObVQ66FHnmBcy0JDkhh + lwzbqfl7jvPnnHlu++IhtdfHJgkciJ6VdZp0npmLgpZyPlcheV05Qfj1qqDkTxZBNXI+lTDV0+4LI + Dieypzmw== +Authentication-Results: x1.c.mox.example; auth=pass + smtp.mailfrom=mjl+thunderbird@c.mox.example +Message-ID: <263022b9-5f65-a267-012c-2a06081a023c@c.mox.example> +Date: Fri, 15 Jul 2022 22:22:20 +0200 +MIME-Version: 1.0 +User-Agent: Mozilla/5.0 (X11; Linux x86_64; rv:91.0) Gecko/20100101 + Thunderbird/91.11.0 +Content-Language: nl +To: mjl@a.mox.example +From: thunderbird c +Subject: test a +Content-Type: text/plain; charset=UTF-8; format=flowed +Content-Transfer-Encoding: 7bit + +test a + +From mox Wed Oct 26 19:48:13 2022 +Return-Path: +Received: from x1.mox.example ([10.1.1.1]) by x1.a.mox ([10.1.1.1]) + with ESMTP for mjl@a.mox; 23 Jan 2022 21:02 +0100 +Authentication-Results: x1.a.mox; iprev=fail policy.iprev=10.1.1.1; + dkim=pass header.d=c.mox header.s=2021 header.a=rsa-sha256 + header.i=mjl+thunderbird@c.mox; spf=none smtp.mailfrom=c.mox; dmarc=pass + header.from=c.mox +Received-SPF: none client-ip=10.1.1.1; + envelope-from="mjl+thunderbird@c.mox"; helo=x1.mox.example; + problem="no\ spf\ txt\ record:\ no\ txt\ record"; received=x1.a.mox; + identity=mailfrom +Received: from x1.mox.example by x1.mox.example ([10.1.1.2]) with + ESMTP for mjl@a.mox; 23 Jan 2022 20:39 +0100 +Authentication-Results: x1.mox.example; auth=pass + smtp.mailfrom=mjl+thunderbird@c.mox +DKIM-Signature: v=1; a=rsa-sha256; d=c.mox; s=2021; i=mjl+thunderbird@c.mox; + t=1642966793; h=From:To:Cc:Bcc:Reply-To:References:In-Reply-To:Subject:Date: + Message-ID:Content-Type:From:To:Subject:Date:Message-ID:Content-Type; + bh=jhmPv2Vh8l0Ezw0V1P64SjmGjgfM2tek6qiEL0zehQc=; b=h4NspINb2TA+VkSr+Try4Rz24W + hor/vjkfX4EyDg6nb0mB4RUlgQiwPrqnjJLLkp9DnUhSuJEwGjMUdRG5160K04c4/KDkzCctj6Bot + IrOCOJ3yyC4z5wUAdivn4OOZmjq9d5eBEBvbiXFGVesZODzAGLZGAiGuSey+8ap18i1FaiRZeMB7e + X5tjAMMlxIGU/1eN6xAchpi8/Pww7VBU13rhq3ge4cFo1rhftF8wHBNSehlBqvA6/WYEAMD/4DD7S + owenI72sQapxo3Yc2EdZ2f/ZYJgKgR5i6WmE6E/sTVZzDJ2eOYIUHwF1bYBeLNM7ITfAAoPotn0KB + hZpchIQw== +Message-ID: <408af0b6-71ce-a2bd-ec57-7e320bd0e6e0@c.mox> +Date: Sun, 23 Jan 2022 20:39:53 +0100 +MIME-Version: 1.0 +User-Agent: Mozilla/5.0 (X11; Linux x86_64; rv:91.0) Gecko/20100101 + Thunderbird/91.4.0 +Content-Language: nl +To: mjl@a.mox +From: thunderbird c +Subject: test van c +Content-Type: text/plain; charset=UTF-8; format=flowed +Content-Transfer-Encoding: 7bit +Status: RO +Content-Length: 11 +Lines: 1 + +test van c + + +From mox Wed Oct 26 19:48:13 2022 +Return-Path: +Received: from x1.a.mox.example ([10.1.1.1]) by + x1.a.mox.example ([10.1.1.1]) with ESMTPS for mjl@a.mox.example; + 06 Sep 2022 17:45 +0200 +Authentication-Results: x1.a.mox.example; iprev=pass policy.iprev=10.1.1.1; + dkim=pass header.d=a.mox.example header.s=2022 header.a=rsa-sha256 + header.i=mjl+thunderbird@a.mox.example; spf=none smtp.mailfrom=a.mox.example; + dmarc=pass header.from=a.mox.example +Received-SPF: client-ip=""; envelope-from=; helo=; received=; identity= +Received: from x1.a.mox.example by x1.a.mox.example ([10.1.1.1]) with + ESMTPSA for mjl@a.mox.example; 06 Sep 2022 17:45 +0200 +DKIM-Signature: v=1; a=rsa-sha256; d=a.mox.example; s=2022; + i=mjl+thunderbird@a.mox.example; t=1662479140; h=From:To:Cc:Bcc:Reply-To: + References:In-Reply-To:Subject:Date:Message-ID:Content-Type:From:To: + References:In-Reply-To:Subject:Date:Message-ID:Content-Type; + bh=Erng4Kmz1psEvsqzXer+LoPl7XvHb2xoZ1mf215MUrk=; b=pNR28rJ0fKcjVc4HXKZ6s9t+3R + 5URN9Xijgmd4nsFrMF8Uf79DCGe0sZxWwYiiXJOBcQfL3wft/MJGkDy0cZ5meBrYVlEcCzPmqZar0 + Rmplde6uYrDfYV8Wh7HSHcbL97Idj16gncV7zDmGkb+QfuVSdoSDwGo+ox5rTsflDhcW2+Rmx1l/V + UnmYBUHqvWpHoGM1fuw2TCclLGCMIHdUvCN3yo7XWVE8Mkg5sNms5hUxunuVXp9EpZMHqIWx9XVFd + 6MD3pGRqhvOP5dgl0tOmvbx/l0oMc5TimqfH8/PO/rnV0JqvuEKPqKr5ARgXyc9MtU7vtXrKXlFPb + 7tiXYS8g== +Authentication-Results: x1.a.mox.example; auth=pass + smtp.mailfrom=mjl+thunderbird@a.mox.example +Message-ID: +Date: Tue, 6 Sep 2022 17:45:40 +0200 +MIME-Version: 1.0 +User-Agent: Mozilla/5.0 (X11; Linux x86_64; rv:102.0) Gecko/20100101 + Thunderbird/102.1.2 +Subject: Re: test a +Content-Language: nl +From: thunderbird +To: mjl@a.mox.example +References: <263022b9-5f65-a267-012c-2a06081a023c@c.mox.example> + +In-Reply-To: +Content-Type: text/plain; charset=UTF-8; format=flowed +Content-Transfer-Encoding: 7bit + +On 9/6/22 17:45, thunderbird c wrote: +> On 7/15/22 22:22, thunderbird c wrote: +>> test a +> test + + +From mox Wed Oct 26 19:48:13 2022 +Return-Path: <> +From: mjl@mox.test +To: mjl@mox.test +Subject: hi +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 7bit +Date: Wed, 10 Nov 2021 23:47:13 +0100 +Message-ID: <12312316-f95c-09ec-97c6-94d124f0932d@mox.test> +MIME-Version: 1.0 + +test +test2 +end + +From mox Wed Oct 26 19:48:13 2022 +Return-Path: +Received: from x1.hé.mox.example ([10.1.1.1]) by + x1.a.mox.example ([10.1.1.1]) with ESMTPS for mjl@a.mox.example; + 01 Feb 2022 14:04 +0100 +Authentication-Results: x1.a.mox.example; iprev=fail policy.iprev=10.1.1.1; + dkim=neutral; spf=none smtp.mailfrom=h; dmarc=none +Received-SPF: client-ip=""; envelope-from=; helo=; received=; identity= +Received: from x1.hé.mox.example by x1.hé.mox.example ([10.1.1.2]) with + UTF8SMTPSA for mjl@a.mox.example; 01 Feb 2022 14:04 +0100 +DKIM-Signature: v=1; a=rsa-sha256; d=hé.mox.example; s=2021; + i=mjl+iosmail@hé.mox.example; t=1643720650; h=From:To:Cc:Bcc:Reply-To: + References:In-Reply-To:Subject:Date:Message-ID:Content-Type:From:To:Subject: + Date:Message-ID:Content-Type; + bh=I3H+2RR4Gh+ISvkep1BNhywvLlNcy8ptbpJ3zaZuKCQ=; b=xziEG/j6v3Cm24222eLNFrax07 + A5NSZEEhPzIWwtO0yKqfKM9BH3sETutwwFT3Yy6tKobEIlDkE4sMgA64S4n/4dBA/uTdAo0QeKhT4 + 4tKi6IDsEgKXJLxmrQ1zItQQGc2qq0kkqWQXXZikus05yT0mHeS2bDvqbWp+XEG+r/kFBmRn5AWTx + rFZC9fCjlqM/ycfWc49x9PxjskwsU7d/e9XAblyXxITNgOglyLRp4dNvOX8eBJKB865IgWeYdWyfh + HAYaP4lz9eD8CTBxUt6OBS6+m4QrVoGkg2BPaUdbm79WUjqfGXjrWbNQIQndePng3abebApjrhq7G + 6gQVfSJA== +Authentication-Results: x1.hé.mox.example; auth=pass + smtp.mailfrom=mjl+iosmail@hé.mox.example +Content-Type: text/plain; charset=us-ascii +Content-Transfer-Encoding: 7bit +From: mjl iosmail +Mime-Version: 1.0 (1.0) +Date: Tue, 1 Feb 2022 14:03:59 +0100 +Subject: test1 +Message-Id: +To: mjl@a.mox.example +X-Mailer: iPhone Mail (19C63) + +test1 + +From mox Wed Oct 26 19:48:13 2022 +Return-Path: +Received: from x1.xn--h-bga.mox.example ([10.1.1.1]) by + x1.a.mox.example ([10.1.1.1]) with ESMTPS for mjl@a.mox.example; + 06 Feb 2022 20:49 +0100 +Authentication-Results: x1.a.mox.example; iprev=pass policy.iprev=10.1.1.1; + dkim=neutral; spf=none smtp.mailfrom=xn--h-bga.mox.example; dmarc=fail + header.from=xn--h-bga.mox.example +Received-SPF: client-ip=""; envelope-from=; helo=; received=; identity= +Received: from x1.hé.mox.example by x1.hé.mox.example ([10.1.1.2]) with + ESMTPSA for mjl@a.mox.example; 06 Feb 2022 20:49 +0100 +DKIM-Signature: v=1; a=rsa-sha256; d=xn--h-bga.mox.example; s=2021; + i=mjl@xn--h-bga.mox.example; t=1644176981; h=From:To:Cc:Bcc:Reply-To: + References:In-Reply-To:Subject:Date:Message-ID:Content-Type:From:To:Subject: + Date:Message-ID:Content-Type; + bh=xqK1ujfbrqGufnky9E/HjXzDmt4UPWikEXkiAYrjPgI=; b=y1QRAjmWN+pBigBuVYxGIt8lyb + xSKWix7aIJ7KYZezdS4ecPOLXR3NV2pXWr5zSCz2wQK8hjD9rxcSVlqhZwpE9MCiC1DLrpKCM8m0w + Frsyb9VDiWq5E6nWUenv2aLQ7DjRyO+++LTGbKO3USpFj3C66BZTBkDHoyNFK5Je5H13xqZD2T0Vn + QbMGiKILyx788fnHvGzywIsE/HxqEZ4WQMfmIb8u8k9GIGbl8EAFofYhLDBuFGxuPEjVfaohf+oVC + 7IyoDzoWcIrbNdlsnMfEgE+zDIPv5JDEFWSucEgdSrxEH/ybiIMRHxxyXfmcfBd/hwo9leqyrnpcq + 0Ma+WDew== +Authentication-Results: x1.hé.mox.example; auth=pass + smtp.mailfrom=mjl@hé.mox.example +Message-ID: <41bef79f-e5a5-cefa-1a4a-169d28d7820e@xn--h-bga.mox.example> +Date: Sun, 6 Feb 2022 20:49:41 +0100 +MIME-Version: 1.0 +User-Agent: Mozilla/5.0 (X11; Linux x86_64; rv:91.0) Gecko/20100101 + Thunderbird/91.5.1 +Content-Language: nl +To: mjl@a.mox.example +From: mjl he +Subject: test6 +Content-Type: text/plain; charset=UTF-8; format=flowed +Content-Transfer-Encoding: 7bit + +test6 + +From mox Wed Oct 26 19:48:13 2022 +Return-Path: +Received: from x1.c.mox.example ([10.1.1.1]) by + x1.a.mox.example ([10.1.1.1]) with ESMTPS for mjl@a.mox.example; + 06 Sep 2022 20:35 +0200 +Authentication-Results: x1.a.mox.example; iprev=pass policy.iprev=10.1.1.1; + dkim=pass header.d=c.mox.example header.s=2021 header.a=rsa-sha256 + header.i=mjl+thunderbird@c.mox.example; spf=none smtp.mailfrom=c.mox.example; + dmarc=pass header.from=c.mox.example +Received-SPF: client-ip=""; envelope-from=; helo=; received=; identity= +Received: from x1.c.mox.example by x1.c.mox.example ([10.1.1.2]) with + ESMTPSA for mjl@a.mox.example; 06 Sep 2022 20:35 +0200 +DKIM-Signature: v=1; a=rsa-sha256; d=c.mox.example; s=2021; + i=mjl+thunderbird@c.mox.example; t=1662489322; h=From:To:Cc:Bcc:Reply-To: + References:In-Reply-To:Subject:Date:Message-ID:Content-Type:From:To:Subject: + Date:Message-ID:Content-Type; + bh=3g7dCsSnBa/3DzRzTpCh0KHYt2q+S7U/Pqk0vBBbOxc=; b=weKyL1MQkViqkWTA4tvGudNPtZ + XWvjQbhNL2oerIWopDqRy/fqhK1nu8icryGVosdNEJd54nx1tk5Yti2bd3SYrQlGMM1Py4uMpp+1R + 1TYVHUCX/bP1W2QCaLtF6hSho818b/8ltvBoPOgPpHOb5D/x8pAzbx5nZuf088uLUBZCwyctPVWE0 + CghCKE/uGH34E31o68ohq6NMG8AJY3b79gPjEfaZwhJPlxXNKhlOJDZyNzvgYyWP+BDLpo2LvDdrJ + Al98GpS89/WAIJ90bRsYaGBjRT7ZMZD9k4+zO2hDjILHuR5y2f/jLPn9ZA+deI9DTtKIyi1FUlK0x + 4YhY1GEQ== +Authentication-Results: x1.c.mox.example; auth=pass + smtp.mailfrom=mjl+thunderbird@c.mox.example +Message-ID: <30faec06-e134-f01a-c74a-8fd548a31df3@c.mox.example> +Date: Tue, 6 Sep 2022 20:35:22 +0200 +MIME-Version: 1.0 +User-Agent: Mozilla/5.0 (X11; Linux x86_64; rv:102.0) Gecko/20100101 + Thunderbird/102.1.2 +Content-Language: en-US +To: mjl@a.mox.example +From: c mox +Subject: test2 +Content-Type: text/plain; charset=UTF-8; format=flowed +Content-Transfer-Encoding: 7bit + +test2 + + +From mox Wed Oct 26 19:48:13 2022 +Return-Path: +Received: from x1.c.mox.example ([10.1.1.1]) by + x1.a.mox.example ([10.1.1.1]) with ESMTPS (TLS1.3 TLS_AES_128_GCM_SHA256) + for ; 26 Sep 2022 09:01 +0200 +Authentication-Results: x1.a.mox.example; iprev=pass policy.iprev=10.1.1.1; + dkim=pass header.d=c.mox.example header.s=2021 header.a=rsa-sha256 + header.i=mjl@c.mox.example; spf=none smtp.mailfrom=c.mox.example; dmarc=pass + header.from=c.mox.example +Received-SPF: none client-ip=10.1.1.1; + envelope-from="mjl+thunderbird@c.mox.example"; helo=x1.c.mox.example; + problem="spf:\ no\ spf\ txt\ record:\ no\ txt\ record"; + received=x1.a.mox.example; identity=mailfrom +Received: from x1.c.mox.example by x1.c.mox.example ([10.1.1.2]) with + ESMTPSA (TLS1.3 TLS_AES_128_GCM_SHA256) for ; + 25 Sep 2022 23:24 +0200 +DKIM-Signature: v=1; a=rsa-sha256; d=c.mox.example; s=2021; + i=mjl@c.mox.example; t=1664141087; h=From:To:Cc:Bcc:Reply-To:References: + In-Reply-To:Subject:Date:Message-ID:Content-Type:From:To:Subject:Date: + Message-ID:Content-Type; bh=Ytt8GQS9eBVBiG5Yflve/ewRg0q/74GkJaDesBbKHsI=; b=v + gRI5TsdFto4JndjmVJA++n87144Hjkr+j/aFTAL08js4dEL3cJQ1bl81lXRbWuzW+PHt77iWHx7mh + BUIWprlDf19IPuM7iJ5BnWQvz1FhCnXlDjukiOsk85gePjQrpyi8im65okY+msN6jsDtJ47ct8BPo + 7QcQRP3NKvKsRZphDA4aQw4Vdpf7HoWS++RPPwJlTrBhlWTvRw4wZFbkqiRdyNnl7UW708bTgdAQg + mjij1XbemSZY+r1a9IbINUO+JcUlMLocADSZwSBorrX59L/4/URE1iIt5p+KrPoIYIcYhHCbL8vcN + e5BAnhRCL/wXJjUKZVY5g2t85+AVCddoA== +Authentication-Results: x1.c.mox.example; auth=pass + smtp.mailfrom=mjl+thunderbird@c.mox.example +Message-ID: <10ea32c09-ebb9-8490-9666-88b8c5cf92e3@c.mox.example> +Date: Sun, 25 Sep 2022 23:24:47 +0200 +MIME-Version: 1.0 +User-Agent: Mozilla/5.0 (X11; Linux x86_64; rv:102.0) Gecko/20100101 + Thunderbird/102.1.2 +Content-Language: en-US +To: mjl@a.mox.example +From: c mox +Subject: test na prometheus metrics +Content-Type: text/plain; charset=UTF-8; format=flowed +Content-Transfer-Encoding: 7bit + +test na prometheus metrics + + +From mox Wed Oct 26 19:48:13 2022 +Return-Path: +Received: from x1.c.mox.example ([10.1.1.1]) by + x1.a.mox.example ([10.1.1.1]) with ESMTPS (TLS1.3 TLS_AES_128_GCM_SHA256) + for ; 20 Sep 2022 16:58 +0200 +Authentication-Results: x1.a.mox.example; iprev=pass policy.iprev=10.1.1.1; + dkim=pass header.d=c.mox.example header.s=2021 header.a=rsa-sha256 + header.i=mjl@c.mox.example; spf=none smtp.mailfrom=c.mox.example; dmarc=pass + header.from=c.mox.example +Received-SPF: none client-ip=10.1.1.1; + envelope-from="mjl+thunderbird@c.mox.example"; helo=x1.c.mox.example; + problem="spf:\ no\ spf\ txt\ record:\ no\ txt\ record"; + received=x1.a.mox.example; identity=mailfrom +Received: from x1.c.mox.example by x1.c.mox.example ([10.1.1.2]) with + ESMTPSA (TLS1.3 TLS_AES_128_GCM_SHA256) for ; + 20 Sep 2022 16:58 +0200 +DKIM-Signature: v=1; a=rsa-sha256; d=c.mox.example; s=2021; + i=mjl@c.mox.example; t=1663685929; h=From:To:Cc:Bcc:Reply-To:References: + In-Reply-To:Subject:Date:Message-ID:Content-Type:From:To:Subject:Date: + Message-ID:Content-Type; bh=e8j+z5LQaqcJxKyB6pIQRiZD7LsNaA1UzPun4zvSf8g=; b=N + rRcTNOPxhlKSKoFLx/Tc9fViRaMuaYHqMM+hjZNod7VecDHNxh/sesk0pzKxSQUahr61L8xtTX4+x + 6njyRt1PpdDaVTs8/bi6k/yOZhVGNu/1cB6OXty6iC63qP++o6qFdR2PgkrO2i/JuVipKlNHRWcUc + vZuA0A7LGCVBcd+MocaRtU4ElF6xlaBxU80w1Ek68KdS3J6F5qMBkkdkFI8tLgzF0xZX1L82He5f9 + nhq2gzNXY3K7aqE5u3ZX0pFrlvcbW036OR0YkTLYJ0Q9JDlbmZPHtQy97A7ambz5GbViIwH+258EU + Yn2cgg7HnVl79HGKwpXEl4MTTO8Iua+1g== +Authentication-Results: x1.c.mox.example; auth=pass + smtp.mailfrom=mjl+thunderbird@c.mox.example +Message-ID: <7b82cf3d-353d-352b-45e1-3b375967ca04@c.mox.example> +Date: Tue, 20 Sep 2022 16:58:48 +0200 +MIME-Version: 1.0 +User-Agent: Mozilla/5.0 (X11; Linux x86_64; rv:102.0) Gecko/20100101 + Thunderbird/102.1.2 +Content-Language: en-US +To: mjl@a.mox.example +From: c mox +Subject: test na bufpool +Content-Type: text/plain; charset=UTF-8; format=flowed +Content-Transfer-Encoding: 7bit + +test na bufpool + + +From mox Wed Oct 26 19:48:13 2022 +Return-Path: +Received: from x1.mox.example ([10.1.1.1]) by x1.a.mox ([10.1.1.1]) + with ESMTP for mjl@a.mox; 23 Jan 2022 21:02 +0100 +Authentication-Results: x1.a.mox; iprev=fail policy.iprev=10.1.1.1; + dkim=pass header.d=c.mox header.s=2021 header.a=rsa-sha256 + header.i=mjl+thunderbird@c.mox; spf=none smtp.mailfrom=c.mox; dmarc=pass + header.from=c.mox +Received-SPF: none client-ip=10.1.1.1; + envelope-from="mjl+thunderbird@c.mox"; helo=x1.mox.example; + problem="no\ spf\ txt\ record:\ no\ txt\ record"; received=x1.a.mox; + identity=mailfrom +Received: from x1.mox.example by x1.mox.example ([10.1.1.2]) with + ESMTP for mjl@a.mox; 23 Jan 2022 20:39 +0100 +Authentication-Results: x1.mox.example; auth=pass + smtp.mailfrom=mjl+thunderbird@c.mox +DKIM-Signature: v=1; a=rsa-sha256; d=c.mox; s=2021; i=mjl+thunderbird@c.mox; + t=1642966793; h=From:To:Cc:Bcc:Reply-To:References:In-Reply-To:Subject:Date: + Message-ID:Content-Type:From:To:Subject:Date:Message-ID:Content-Type; + bh=jhmPv2Vh8l0Ezw0V1P64SjmGjgfM2tek6qiEL0zehQc=; b=h4NspINb2TA+VkSr+Try4Rz24W + hor/vjkfX4EyDg6nb0mB4RUlgQiwPrqnjJLLkp9DnUhSuJEwGjMUdRG5160K04c4/KDkzCctj6Bot + IrOCOJ3yyC4z5wUAdivn4OOZmjq9d5eBEBvbiXFGVesZODzAGLZGAiGuSey+8ap18i1FaiRZeMB7e + X5tjAMMlxIGU/1eN6xAchpi8/Pww7VBU13rhq3ge4cFo1rhftF8wHBNSehlBqvA6/WYEAMD/4DD7S + owenI72sQapxo3Yc2EdZ2f/ZYJgKgR5i6WmE6E/sTVZzDJ2eOYIUHwF1bYBeLNM7ITfAAoPotn0KB + hZpchIQw== +Message-ID: <409af0b6-71ce-a2bd-ec57-7e320bd0e6e0@c.mox> +Date: Sun, 23 Jan 2022 20:39:53 +0100 +MIME-Version: 1.0 +User-Agent: Mozilla/5.0 (X11; Linux x86_64; rv:91.0) Gecko/20100101 + Thunderbird/91.4.0 +Content-Language: nl +To: mjl@a.mox +From: thunderbird c +Subject: test van c +Content-Type: text/plain; charset=UTF-8; format=flowed +Content-Transfer-Encoding: 7bit + +test van c + +From mox Wed Oct 26 19:48:13 2022 +Return-Path: <> +From: mjl@mox.test +To: mjl@mox.test +Subject: hi +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 7bit +Date: Wed, 10 Nov 2021 23:47:13 +0100 +Message-ID: <12312318-f95c-09ec-97c6-94d124f0932d@mox.test> +MIME-Version: 1.0 +Status: RO +Content-Length: 15 +Lines: 3 + +test +test2 +end + + +From mox Wed Oct 26 19:48:13 2022 +Return-Path: <> +From: mjl@mox.test +To: mjl@mox.test +Subject: hi +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 7bit +Date: Wed, 10 Nov 2021 23:47:13 +0100 +Message-ID: <12312319-f95c-09ec-97c6-94d124f0932d@mox.test> +MIME-Version: 1.0 +Status: RO +Content-Length: 15 +Lines: 3 + +test +test2 +end + + +From mox Wed Oct 26 19:48:13 2022 +Return-Path: +Received: from x1.a.mox.example ([10.1.1.1]) by + x1.a.mox.example ([10.1.1.1]) with ESMTPS for mjl@a.mox.example; + 08 Jul 2022 14:06 +0200 +Authentication-Results: x1.a.mox.example; iprev=pass policy.iprev=10.1.1.1; + dkim=pass header.d=a.mox.example header.s=2022 header.a=rsa-sha256 + header.i=mjl+thunderbird@a.mox.example; spf=none smtp.mailfrom=a.mox.example; + dmarc=pass header.from=a.mox.example +Received-SPF: client-ip=""; envelope-from=; helo=; received=; identity= +Received: from x1.a.mox.example by x1.a.mox.example ([10.1.1.1]) with + ESMTPSA for mjl@a.mox.example; 08 Jul 2022 14:06 +0200 +DKIM-Signature: v=1; a=rsa-sha256; d=a.mox.example; s=2022; + i=mjl+thunderbird@a.mox.example; t=1657282016; h=From:To:Cc:Bcc:Reply-To: + References:In-Reply-To:Subject:Date:Message-ID:Content-Type:From:To:Subject: + Date:Message-ID:Content-Type; + bh=i0QP7X7ADTgTY8llG4UeY5Dnal1PGxvfE7uKPgHSKoM=; b=PO1FlEbilq1f6lv2Fsg0tE9iG1 + 9skoPU+a4KL6FaL3QVosUYcYjVMIJiZ4uOiBNTnOfMzthIkrtgmCD5cv6d7mKA8i5DZ1WQDPWACjZ + pLT00WYLXb2tbBDmiXmZSc6WErf/XbBzPtXTLLakD6/jS/ipLGfDhulYT9YoHziTZnVHyKCu+uwJC + 54esXHXprbcWd5iLrzqNT3RnUbtWYRx2sv15b9uwWk/fexh/uOq/SuyEIlXq2Layx9Pgym80EINxF + +ork2Ow791JvJjWWH6A4F4e6Jo5qkDcYqI0C4wuqUdg1atlXjei2t66GgBkZdtY+Ry2jbkDhMuXti + 88Q06A2A== +Authentication-Results: x1.a.mox.example; auth=pass + smtp.mailfrom=mjl+thunderbird@a.mox.example +Message-ID: <40f8a864-9176-1ba7-77ef-c42e7a50126e@a.mox.example> +Date: Fri, 8 Jul 2022 14:06:55 +0200 +MIME-Version: 1.0 +User-Agent: Mozilla/5.0 (X11; Linux x86_64; rv:91.0) Gecko/20100101 + Thunderbird/91.11.0 +Content-Language: nl +To: mjl@a.mox.example +From: thunderbird +Subject: test123 +Content-Type: text/plain; charset=UTF-8; format=flowed +Content-Transfer-Encoding: 7bit + +test123 + +From mox Wed Oct 26 19:48:13 2022 +Return-Path: +Received: from x1.c.mox.example ([10.1.1.1]) by + x1.a.mox.example ([10.1.1.1]) with ESMTPS for mjl@a.mox.example; + 06 Sep 2022 17:45 +0200 +Authentication-Results: x1.a.mox.example; iprev=pass policy.iprev=10.1.1.1; + dkim=pass header.d=c.mox.example header.s=2021 header.a=rsa-sha256 + header.i=mjl+thunderbird@c.mox.example; spf=none smtp.mailfrom=c.mox.example; + dmarc=pass header.from=c.mox.example +Received-SPF: client-ip=""; envelope-from=; helo=; received=; identity= +Received: from x1.c.mox.example by x1.c.mox.example ([10.1.1.2]) with + ESMTPSA for mjl@a.mox.example; 06 Sep 2022 17:45 +0200 +DKIM-Signature: v=1; a=rsa-sha256; d=c.mox.example; s=2021; + i=mjl+thunderbird@c.mox.example; t=1662479109; h=From:To:Cc:Bcc:Reply-To: + References:In-Reply-To:Subject:Date:Message-ID:Content-Type:From:To: + References:In-Reply-To:Subject:Date:Message-ID:Content-Type; + bh=+cpUGw3EUpPOCD8r5q1I5efcDrVEJxF1/P+t+XkAroI=; b=zIw2rJ0f/gvEqCfJGYByiXe6Mr + 719VGSqyPDsyeoLPdboEPQ3ODRM21SdAHBDBxs+UZUmbkmWIyr/FXGk/3MzFoXQkyLuZ9UKA2nBt/ + Q/zRh1h6gl46lug/bZdrfOKoBP6eXTfXeaaKUJK8dUJ0aPLhpYVTO7Tnf6sCPoq2Ayj78GAvnyRI4 + EhJ/GrVlHxsLST8dmNxDsNCoSj5uTxDHMFPgIJ9imHonGtJJE++eirNbI7ll/OtSnUIZBrt+cT/WL + BPKaON8XSpDKYa2McW+Bp2odcUDbPwHskrw9l2magBlB6DBPngooXgAFlIKVCFMYpOFNz2rCHioBd + CduC0ulw== +Authentication-Results: x1.c.mox.example; auth=pass + smtp.mailfrom=mjl+thunderbird@c.mox.example +Message-ID: +Date: Tue, 6 Sep 2022 17:45:09 +0200 +MIME-Version: 1.0 +User-Agent: Mozilla/5.0 (X11; Linux x86_64; rv:102.0) Gecko/20100101 + Thunderbird/102.1.2 +Subject: Re: test a +Content-Language: nl +From: thunderbird c +To: mjl@a.mox.example +References: <263022b9-5f65-a267-012c-2a06081a023c@c.mox.example> +In-Reply-To: <263022b9-5f65-a267-012c-2a06081a023c@c.mox.example> +Content-Type: text/plain; charset=UTF-8; format=flowed +Content-Transfer-Encoding: 7bit + +On 7/15/22 22:22, thunderbird c wrote: +> test a +test + +From mox Wed Oct 26 19:48:13 2022 +Return-Path: +Received: from x1.hé.mox.example ([10.1.1.1]) by + x1.a.mox.example ([10.1.1.1]) with ESMTPS for mjl@a.mox.example; + 01 Feb 2022 13:40 +0100 +Authentication-Results: x1.a.mox.example; iprev=fail policy.iprev=10.1.1.1; + dkim=neutral; spf=none smtp.mailfrom=h; dmarc=none +Received-SPF: client-ip=""; envelope-from=; helo=; received=; identity= +Received: from x1.hé.mox.example by x1.hé.mox.example ([10.1.1.2]) with + UTF8SMTPSA for mjl@a.mox.example; 01 Feb 2022 13:40 +0100 +DKIM-Signature: v=1; a=rsa-sha256; d=hé.mox.example; s=2021; + i=mjl+iosmail@hé.mox.example; t=1643719203; h=From:To:Cc:Bcc:Reply-To: + References:In-Reply-To:Subject:Date:Message-ID:Content-Type:From:To:Subject: + Date:Message-ID:Content-Type; + bh=g3zLYH4xKxcPrHOD18z9YfpQcnk/GaJedfustWU5uGs=; b=dtgAOl71h/dNPQrmZTi3SBVkm+ + EjMnF7sWGT123fa5g+m6nGpPue+I+067wwtkWQhsedbDkqT7gZb5WaG5baZsr9e/XpJ/iX4g6YXpr + 07aLY8eF9jazcGcRCVCqLtyq0UJQ2Oz/ML74aYu1beh3jXsoI+k3fJ+0/gKSVC7enCFpNe1HhbXVS + 4HRy/Rw261OEIy2e20lyPT4iDk2oODabzYa28HnXIciIMELjbc/sSawG68SAnhwdkWBrRzBDMCCHm + wvkmgDsVJWtdzjJqjxK2mYVxBMJT0lvsutXgYQ+rr6BLtjHsOb8GMSbQGzY5SJ3N8TP02pw5OykBu + B/aHff1A== +Authentication-Results: x1.hé.mox.example; auth=pass + smtp.mailfrom=mjl+iosmail@hé.mox.example +Content-Type: text/plain; charset=us-ascii +Content-Transfer-Encoding: 7bit +From: mjl iosmail +Mime-Version: 1.0 (1.0) +Date: Tue, 1 Feb 2022 13:40:01 +0100 +Subject: test +Message-Id: +To: mjl@a.mox.example +X-Mailer: iPhone Mail (19C63) + +test + +From mox Wed Oct 26 19:48:13 2022 +Return-Path: +Received: from x1.c.mox.example ([10.1.1.1]) by + x1.a.mox.example ([10.1.1.1]) with ESMTPS (TLS1.3 TLS_AES_128_GCM_SHA256) + for ; 22 Sep 2022 12:41 +0200 +Authentication-Results: x1.a.mox.example; iprev=pass policy.iprev=10.1.1.1; + dkim=pass header.d=c.mox.example header.s=2021 header.a=rsa-sha256 + header.i=mjl@c.mox.example; spf=none smtp.mailfrom=c.mox.example; dmarc=pass + header.from=c.mox.example +Received-SPF: none client-ip=10.1.1.1; + envelope-from="mjl+thunderbird@c.mox.example"; helo=x1.c.mox.example; + problem="spf:\ no\ spf\ txt\ record:\ no\ txt\ record"; + received=x1.a.mox.example; identity=mailfrom +Received: from x1.c.mox.example by x1.c.mox.example ([10.1.1.2]) with + ESMTPSA (TLS1.3 TLS_AES_128_GCM_SHA256) for ; + 22 Sep 2022 12:41 +0200 +DKIM-Signature: v=1; a=rsa-sha256; d=c.mox.example; s=2021; + i=mjl@c.mox.example; t=1663843264; h=From:To:Cc:Bcc:Reply-To:References: + In-Reply-To:Subject:Date:Message-ID:Content-Type:From:To:Subject:Date: + Message-ID:Content-Type; bh=/sQMbmMf0DE73/QCnA1b5wxKbrNjXe97jpqidX0fnTk=; b=Q + 6fVt0pwKTuTfAaRxEVirsy1ViXKAWTDMA4foIY47IydVXJl4QCSpLZQepxKSaEAjENdsfh2e+eNjM + JSsVy4LeXF/j6Os3p4umRGdkqAoP43XoYpoHwMHdHb1P1nS8YvAUoVId0EZ1Z35yrf3an5xnGlUQi + nWWCMW4srdquP6XeQ6Wgmb+O8yFJ/K2m1jlbmTsMetimgepHusDDLFwHtS7YZ9WZPWACzMDNQzLBA + FEMMIKnwo7KXoRNSx97qTF/2EGmZSIqag0e7PKxblqpDXlIBbBNrXtP13nSaU89oulkja1J1ewhmp + yX9nCAemM59TWECnb13QL91H7gHUTBEBQ== +Authentication-Results: x1.c.mox.example; auth=pass + smtp.mailfrom=mjl+thunderbird@c.mox.example +Message-ID: <4c1fb8ac-a065-63d1-ac50-4cc0c8068b86@c.mox.example> +Date: Thu, 22 Sep 2022 12:41:04 +0200 +MIME-Version: 1.0 +User-Agent: Mozilla/5.0 (X11; Linux x86_64; rv:102.0) Gecko/20100101 + Thunderbird/102.1.2 +Content-Language: en-US +To: mjl@a.mox.example +From: c mox +Subject: test after import changes +Content-Type: text/plain; charset=UTF-8; format=flowed +Content-Transfer-Encoding: 7bit + +test after import changes + + +From mox Wed Oct 26 19:48:13 2022 +Return-Path: +Received: from x1.c.mox.example ([10.1.1.1]) by + x1.a.mox.example ([10.1.1.1]) with ESMTPS (TLS1.3 TLS_AES_128_GCM_SHA256) + for ; 20 Sep 2022 15:33 +0200 +Authentication-Results: x1.a.mox.example; iprev=pass policy.iprev=10.1.1.1; + dkim=pass header.d=c.mox.example header.s=2021 header.a=rsa-sha256 + header.i=mjl@c.mox.example; spf=none smtp.mailfrom=c.mox.example; dmarc=pass + header.from=c.mox.example +Received-SPF: none client-ip=10.1.1.1; + envelope-from="mjl+thunderbird@c.mox.example"; helo=x1.c.mox.example; + problem="spf:\ no\ spf\ txt\ record:\ no\ txt\ record"; + received=x1.a.mox.example; identity=mailfrom +Received: from x1.c.mox.example by x1.c.mox.example ([10.1.1.2]) with + ESMTPSA (TLS1.3 TLS_AES_128_GCM_SHA256) for ; + 20 Sep 2022 15:33 +0200 +DKIM-Signature: v=1; a=rsa-sha256; d=c.mox.example; s=2021; + i=mjl@c.mox.example; t=1663680823; h=From:To:Cc:Bcc:Reply-To:References: + In-Reply-To:Subject:Date:Message-ID:Content-Type:From:To:Subject:Date: + Message-ID:Content-Type; bh=ZL8zYWHNWHtPjTT+blpoDA6RpomK+xl3vI9i6/PCmD4=; b=Z + dBZ34gMKoDQivdxj+s10h54dAWnU6Ur7Bef2h/ytHRJKfaNm5FKtAhfYA1KMwLFrHwjgmYSzsOJml + Oii0i6SSBy+RHQoo5afv8xhrW596FAJHDAWHT8OEtP8pk/tDk4P9fv+uaGApZrkGGisPPgYb/Z+sJ + 3HFDPly1z3G0NXs9hk7WOQYXoBLDAqaQY8b7127Glz3moXpZn0Sq294e6fwOSAydtGgCJqjKfVL6n + LEnRDYCHQ+8hX6j6oqWJjb7Hzgsso17vvoUWhaP64QIHM5m9V9CSasyon6xRtmFuFchJ33Nf25FHS + +MgZdEkXv6rbEZgeHpbSClrQ2kaqo6HCA== +Authentication-Results: x1.c.mox.example; auth=pass + smtp.mailfrom=mjl+thunderbird@c.mox.example +Message-ID: <497e124d-7b0c-8921-c59c-04118b750bd3@c.mox.example> +Date: Tue, 20 Sep 2022 15:33:42 +0200 +MIME-Version: 1.0 +User-Agent: Mozilla/5.0 (X11; Linux x86_64; rv:102.0) Gecko/20100101 + Thunderbird/102.1.2 +Content-Language: en-US +From: c mox +To: mjl@a.mox.example +Subject: test na imap line limit +Content-Type: text/plain; charset=UTF-8; format=flowed +Content-Transfer-Encoding: 7bit + +test na imap line limit + + +From mox Wed Oct 26 19:48:13 2022 +Return-Path: +Received: from x1.mox.example ([10.1.1.1]) by x1.a.mox ([10.1.1.1]) + with ESMTP for mjl@a.mox; 23 Jan 2022 21:02 +0100 +Authentication-Results: x1.a.mox; iprev=fail policy.iprev=10.1.1.1; + dkim=pass header.d=c.mox header.s=2021 header.a=rsa-sha256 + header.i=mjl+thunderbird@c.mox; spf=none smtp.mailfrom=c.mox; dmarc=pass + header.from=c.mox +Received-SPF: none client-ip=10.1.1.1; + envelope-from="mjl+thunderbird@c.mox"; helo=x1.mox.example; + problem="no\ spf\ txt\ record:\ no\ txt\ record"; received=x1.a.mox; + identity=mailfrom +Received: from x1.mox.example by x1.mox.example ([10.1.1.2]) with + ESMTP for mjl@a.mox; 23 Jan 2022 20:39 +0100 +Authentication-Results: x1.mox.example; auth=pass + smtp.mailfrom=mjl+thunderbird@c.mox +DKIM-Signature: v=1; a=rsa-sha256; d=c.mox; s=2021; i=mjl+thunderbird@c.mox; + t=1642966793; h=From:To:Cc:Bcc:Reply-To:References:In-Reply-To:Subject:Date: + Message-ID:Content-Type:From:To:Subject:Date:Message-ID:Content-Type; + bh=jhmPv2Vh8l0Ezw0V1P64SjmGjgfM2tek6qiEL0zehQc=; b=h4NspINb2TA+VkSr+Try4Rz24W + hor/vjkfX4EyDg6nb0mB4RUlgQiwPrqnjJLLkp9DnUhSuJEwGjMUdRG5160K04c4/KDkzCctj6Bot + IrOCOJ3yyC4z5wUAdivn4OOZmjq9d5eBEBvbiXFGVesZODzAGLZGAiGuSey+8ap18i1FaiRZeMB7e + X5tjAMMlxIGU/1eN6xAchpi8/Pww7VBU13rhq3ge4cFo1rhftF8wHBNSehlBqvA6/WYEAMD/4DD7S + owenI72sQapxo3Yc2EdZ2f/ZYJgKgR5i6WmE6E/sTVZzDJ2eOYIUHwF1bYBeLNM7ITfAAoPotn0KB + hZpchIQw== +Message-ID: <410af0b6-71ce-a2bd-ec57-7e320bd0e6e0@c.mox> +Date: Sun, 23 Jan 2022 20:39:53 +0100 +MIME-Version: 1.0 +User-Agent: Mozilla/5.0 (X11; Linux x86_64; rv:91.0) Gecko/20100101 + Thunderbird/91.4.0 +Content-Language: nl +To: mjl@a.mox +From: thunderbird c +Subject: test van c +Content-Type: text/plain; charset=UTF-8; format=flowed +Content-Transfer-Encoding: 7bit + +test van c + +From mox Wed Oct 26 19:48:13 2022 +Return-Path: +Received: from x1.hé.mox.example ([10.1.1.1]) by + x1.a.mox.example ([10.1.1.1]) with ESMTPS for mjl@a.mox.example; + 06 Feb 2022 15:06 +0100 +Authentication-Results: x1.a.mox.example; iprev=fail policy.iprev=10.1.1.1; + dkim=neutral; spf=none smtp.mailfrom=hé.mox.example; dmarc=none +Received-SPF: client-ip=""; envelope-from=; helo=; received=; identity= +Received: from x1.hé.mox.example by x1.hé.mox.example ([10.1.1.2]) with + ESMTPSA for mjl@a.mox.example; 06 Feb 2022 15:06 +0100 +DKIM-Signature: v=1; a=rsa-sha256; d=hé.mox.example; s=2021; + i=mjl@hé.mox.example; t=1644156403; h=From:To:Cc:Bcc:Reply-To:References: + In-Reply-To:Subject:Date:Message-ID:Content-Type:From:To:Subject:Date: + Message-ID:Content-Type; bh=g3zLYH4xKxcPrHOD18z9YfpQcnk/GaJedfustWU5uGs=; b=v + YiSBQoUxNVndZsN6lgkQc7Fsp9KYLohvZpKH7czIeyPx2d/w6Ky4/BaLzu7xN0a/6qpJr7auSzPew + dW2bPuuiPCnI9/n1MpPrwLunUUK6qFO5MLzvWztId/4ISD3bXrKSQDypePtoXswIdIwM/5EujE/lF + Z3k8piNSO9tffdl9UFz74vbrpBT5wyoVptmqbs9KP+MwUIB+uc3R2XTaQ7VexZWdfO9038czhrYVC + TAIAl7it6vQVLNKX4oxcqyM+FNEznfya2IEiUFCnsuRXOkRoBg1AzgneV1jvh74Hwe7q7h+dueozU + 5ZAQRRclUJdd7YMv973MRnjhHe9r8F6oQ== +Authentication-Results: x1.hé.mox.example; auth=pass + smtp.mailfrom=mjl@hé.mox.example +Message-ID: <9e16de9d-a933-c2a8-4684-d49c7a486573@xn--h-bga.mox.example> +Date: Sun, 6 Feb 2022 15:06:43 +0100 +MIME-Version: 1.0 +User-Agent: Mozilla/5.0 (X11; Linux x86_64; rv:91.0) Gecko/20100101 + Thunderbird/91.5.1 +Content-Language: nl +To: mjl@a.mox.example +From: mjl he +Subject: test +Content-Type: text/plain; charset=UTF-8; format=flowed +Content-Transfer-Encoding: 7bit + +test + +From mox Wed Oct 26 19:48:13 2022 +Return-Path: +Received: from x1.c.mox.example ([10.1.1.1]) by + x1.a.mox.example ([10.1.1.1]) with ESMTPS (TLS1.3 TLS_AES_128_GCM_SHA256) + for ; 25 Sep 2022 23:25 +0200 +Authentication-Results: x1.a.mox.example; iprev=pass policy.iprev=10.1.1.1; + dkim=pass header.d=c.mox.example header.s=2021 header.a=rsa-sha256 + header.i=mjl@c.mox.example; spf=none smtp.mailfrom=c.mox.example; dmarc=pass + header.from=c.mox.example +Received-SPF: none client-ip=10.1.1.1; + envelope-from="mjl+thunderbird@c.mox.example"; helo=x1.c.mox.example; + problem="spf:\ no\ spf\ txt\ record:\ no\ txt\ record"; + received=x1.a.mox.example; identity=mailfrom +Received: from x1.c.mox.example by x1.c.mox.example ([10.1.1.2]) with + ESMTPSA (TLS1.3 TLS_AES_128_GCM_SHA256) for ; + 25 Sep 2022 23:25 +0200 +DKIM-Signature: v=1; a=rsa-sha256; d=c.mox.example; s=2021; + i=mjl@c.mox.example; t=1664141110; h=From:To:Cc:Bcc:Reply-To:References: + In-Reply-To:Subject:Date:Message-ID:Content-Type:From:To:Subject:Date: + Message-ID:Content-Type; bh=Q4TD9u9o0ruWTXUN8h9lhVGaZxrGT5cCA8nuSeuOKtk=; b=M + XQpHlwTyAMCsIv/gkiIkefSw/eSCtLR+vGd1ZVdCpD+j3rou07qqK+26k9RxdRsj2hcF4KnFHGGFv + PtQ8eBDjsR0I/uu9p6GMtPwziA8EP+SdvmaESGej2DY8L33eF6yTDxveGL+xXC4NgQAUscfKqIwtA + wgVab2txYlVOrIbOgby5j7+4TWHFBuTr286mt8gzonikzwUNJRI9Ti1cXgJGLHiuBtw/Ugu4gesp3 + 3RBou7jMRo+FlJ9s8oM66mr0MWU62r1t2BXiaD81oq5Qa8KvM9kcHkbOUWvM8qm7/OYG5s5pzbio+ + fflc9R1ACCyMLfRaXEWE/sbwf53QK/9Rg== +Authentication-Results: x1.c.mox.example; auth=pass + smtp.mailfrom=mjl+thunderbird@c.mox.example +Message-ID: <9bfd540b-49cc-6ab0-3053-4bcceb845850@c.mox.example> +Date: Sun, 25 Sep 2022 23:25:10 +0200 +MIME-Version: 1.0 +User-Agent: Mozilla/5.0 (X11; Linux x86_64; rv:102.0) Gecko/20100101 + Thunderbird/102.1.2 +Content-Language: en-US +To: mjl@a.mox.example +From: c mox +Subject: test na prometheus metrics 2 +Content-Type: text/plain; charset=UTF-8; format=flowed +Content-Transfer-Encoding: 7bit + +test na prometheus metrics 2 + + +From mox Wed Oct 26 19:48:13 2022 +Return-Path: +Received: from x1.xn--h-bga.mox.example ([10.1.1.1]) by + x1.a.mox.example ([10.1.1.1]) with ESMTPS for mjl@a.mox.example; + 06 Feb 2022 20:45 +0100 +Authentication-Results: x1.a.mox.example; iprev=pass policy.iprev=10.1.1.1; + dkim=neutral; spf=none smtp.mailfrom=xn--h-bga.mox.example; dmarc=fail + header.from=xn--h-bga.mox.example +Received-SPF: client-ip=""; envelope-from=; helo=; received=; identity= +Received: from x1.hé.mox.example by x1.hé.mox.example ([10.1.1.2]) with + ESMTPSA for mjl@a.mox.example; 06 Feb 2022 20:45 +0100 +DKIM-Signature: v=1; a=rsa-sha256; d=xn--h-bga.mox.example; s=2021; + i=mjl@xn--h-bga.mox.example; t=1644176735; h=From:To:Cc:Bcc:Reply-To: + References:In-Reply-To:Subject:Date:Message-ID:Content-Type:From:To:Subject: + Date:Message-ID:Content-Type; + bh=3g7dCsSnBa/3DzRzTpCh0KHYt2q+S7U/Pqk0vBBbOxc=; b=ZkGoQJKG6xGXW3uhfkhuEELzaX + 14XDJtJT8fCOG4vjh6jnGl7SUhQMyycmsvjW7flhjMmhzEksz83SN7HkaEz/aD3CjKM4TLrsmA99W + Me/hViNeNdQGEK29FM/E0HB1RqTeW1ROgfpsIIRqC8gbXSu9dqhdIfQyTCgb10bBFqGAD6zLkjwk0 + ziaEU+NVL+qHtyNCwa/5R+LWYEvoLLBoMy8P/+yhp/0olzls5pS1AgBLgMbKFHQn18j4otYJ0tUlY + LTEKKeOsQKgUFUFfhOuPAV+Mdjhp1oErFBVLhIj76hhUmPc0PQNJTeecdwwVaGj3ZG8Pzu8NDsetY + S/oCuniA== +Authentication-Results: x1.hé.mox.example; auth=pass + smtp.mailfrom=mjl@hé.mox.example +Message-ID: +Date: Sun, 6 Feb 2022 20:45:35 +0100 +MIME-Version: 1.0 +User-Agent: Mozilla/5.0 (X11; Linux x86_64; rv:91.0) Gecko/20100101 + Thunderbird/91.5.1 +Content-Language: nl +To: mjl@a.mox.example +From: mjl he +Subject: test2 +Content-Type: text/plain; charset=UTF-8; format=flowed +Content-Transfer-Encoding: 7bit + +test2 + +From mox Wed Oct 26 19:48:13 2022 +Return-Path: +Received: from x1.c.mox.example ([10.1.1.1]) by + x1.a.mox.example ([10.1.1.1]) with ESMTPS (TLS1.3 TLS_AES_128_GCM_SHA256) + for ; 21 Sep 2022 10:35 +0200 +Authentication-Results: x1.a.mox.example; iprev=pass policy.iprev=10.1.1.1; + dkim=pass header.d=c.mox.example header.s=2021 header.a=rsa-sha256 + header.i=mjl@c.mox.example; spf=none smtp.mailfrom=c.mox.example; dmarc=pass + header.from=c.mox.example +Received-SPF: none client-ip=10.1.1.1; + envelope-from="mjl+thunderbird@c.mox.example"; helo=x1.c.mox.example; + problem="spf:\ no\ spf\ txt\ record:\ no\ txt\ record"; + received=x1.a.mox.example; identity=mailfrom +Received: from x1.c.mox.example by x1.c.mox.example ([10.1.1.2]) with + ESMTPSA (TLS1.3 TLS_AES_128_GCM_SHA256) for ; + 21 Sep 2022 10:35 +0200 +DKIM-Signature: v=1; a=rsa-sha256; d=c.mox.example; s=2021; + i=mjl@c.mox.example; t=1663749312; h=From:To:Cc:Bcc:Reply-To:References: + In-Reply-To:Subject:Date:Message-ID:Content-Type:From:To:Subject:Date: + Message-ID:Content-Type; bh=OaPpT1HPhUc/X6EyZUb+TOklqDkHqkUhx+nOVjmQmw4=; b=K + q3TrpJNo5V8GCfg0yXsoC3Cu5WyWhf/plu7T5KV/qKv175bKOpJ+Gos9Ow2huQzoNG/69xxAXHJ9j + 97hcE1UYNcxYyHenrFYu10ewe0zWuhqCjztTadcZ927C4YLl2HOIIiuDEkaE4ZiAxJBpnuB4tZcJX + qk4Atnvm1Q/RZ+QB8DUeMRqFEsioEYcSfJpZJwsMuHCK0LMV3lCqcUyz1j27JRPm6gdb5cArCMnea + DJyos5HRHoAKwHao7pO8JCVFXnedey+dc3OXE1PBuiQEngfTClRLVi0U+912yqaLxbspX+HoLPQ7H + iDUmYFUOsIwvfYYBoPPvajEr/3j1x+LXA== +Authentication-Results: x1.c.mox.example; auth=pass + smtp.mailfrom=mjl+thunderbird@c.mox.example +Message-ID: <4239ace8-10e5-3795-e76d-d377c81fcb0a@c.mox.example> +Date: Wed, 21 Sep 2022 10:35:12 +0200 +MIME-Version: 1.0 +User-Agent: Mozilla/5.0 (X11; Linux x86_64; rv:102.0) Gecko/20100101 + Thunderbird/102.1.2 +Content-Language: en-US +To: mjl@a.mox.example +From: c mox +Subject: test with shutdown +Content-Type: text/plain; charset=UTF-8; format=flowed +Content-Transfer-Encoding: 7bit + +test with shutdown + + +From mox Wed Oct 26 19:48:13 2022 +Return-Path: +Received: from x1.c.mox.example ([10.1.1.1]) by + x1.a.mox.example ([10.1.1.1]) with ESMTPS (TLS1.3 TLS_AES_128_GCM_SHA256) + for ; 25 Sep 2022 23:32 +0200 +Authentication-Results: x1.a.mox.example; iprev=pass policy.iprev=10.1.1.1; + dkim=pass header.d=c.mox.example header.s=2021 header.a=rsa-sha256 + header.i=mjl@c.mox.example; spf=none smtp.mailfrom=c.mox.example; dmarc=pass + header.from=c.mox.example +Received-SPF: none client-ip=10.1.1.1; + envelope-from="mjl+thunderbird@c.mox.example"; helo=x1.c.mox.example; + problem="spf:\ no\ spf\ txt\ record:\ no\ txt\ record"; + received=x1.a.mox.example; identity=mailfrom +Received: from x1.c.mox.example by x1.c.mox.example ([10.1.1.2]) with + ESMTPSA (TLS1.3 TLS_AES_128_GCM_SHA256) for ; + 25 Sep 2022 23:24 +0200 +DKIM-Signature: v=1; a=rsa-sha256; d=c.mox.example; s=2021; + i=mjl@c.mox.example; t=1664141087; h=From:To:Cc:Bcc:Reply-To:References: + In-Reply-To:Subject:Date:Message-ID:Content-Type:From:To:Subject:Date: + Message-ID:Content-Type; bh=Ytt8GQS9eBVBiG5Yflve/ewRg0q/74GkJaDesBbKHsI=; b=v + gRI5TsdFto4JndjmVJA++n87144Hjkr+j/aFTAL08js4dEL3cJQ1bl81lXRbWuzW+PHt77iWHx7mh + BUIWprlDf19IPuM7iJ5BnWQvz1FhCnXlDjukiOsk85gePjQrpyi8im65okY+msN6jsDtJ47ct8BPo + 7QcQRP3NKvKsRZphDA4aQw4Vdpf7HoWS++RPPwJlTrBhlWTvRw4wZFbkqiRdyNnl7UW708bTgdAQg + mjij1XbemSZY+r1a9IbINUO+JcUlMLocADSZwSBorrX59L/4/URE1iIt5p+KrPoIYIcYhHCbL8vcN + e5BAnhRCL/wXJjUKZVY5g2t85+AVCddoA== +Authentication-Results: x1.c.mox.example; auth=pass + smtp.mailfrom=mjl+thunderbird@c.mox.example +Message-ID: <11ea32c09-ebb9-8490-9666-88b8c5cf92e3@c.mox.example> +Date: Sun, 25 Sep 2022 23:24:47 +0200 +MIME-Version: 1.0 +User-Agent: Mozilla/5.0 (X11; Linux x86_64; rv:102.0) Gecko/20100101 + Thunderbird/102.1.2 +Content-Language: en-US +To: mjl@a.mox.example +From: c mox +Subject: test na prometheus metrics +Content-Type: text/plain; charset=UTF-8; format=flowed +Content-Transfer-Encoding: 7bit + +test na prometheus metrics + + +From mox Wed Oct 26 19:48:13 2022 +Return-Path: +Received: from x1.xn--h-bga.mox.example ([10.1.1.1]) by + x1.a.mox.example ([10.1.1.1]) with ESMTPS for mjl@a.mox.example; + 06 Feb 2022 20:56 +0100 +Authentication-Results: x1.a.mox.example; iprev=pass policy.iprev=10.1.1.1; + dkim=pass header.d=xn--h-bga.mox.example header.s=2021 header.a=rsa-sha256 + header.i=mjl@xn--h-bga.mox.example; spf=none + smtp.mailfrom=xn--h-bga.mox.example; dmarc=pass + header.from=xn--h-bga.mox.example +Received-SPF: client-ip=""; envelope-from=; helo=; received=; identity= +Received: from x1.hé.mox.example by x1.hé.mox.example ([10.1.1.2]) with + ESMTPSA for mjl@a.mox.example; 06 Feb 2022 20:56 +0100 +DKIM-Signature: v=1; a=rsa-sha256; d=xn--h-bga.mox.example; s=2021; + i=mjl@xn--h-bga.mox.example; t=1644177382; h=From:To:Cc:Bcc:Reply-To: + References:In-Reply-To:Subject:Date:Message-ID:Content-Type:From:To:Subject: + Date:Message-ID:Content-Type; + bh=HCUCD5NcipzH9VGYlghyFuRJ7ITprxMz6Z+i7qrE5HA=; b=HvW+kjDziZsy5jOIaZ7o6cUAbi + cnSHgnv1OLoUKD7jYMcuLz2lV8GfDz5TeoV+uGz4bmFRBxe+jS3Nzl2ytkBcUBW4vJ343xfokSUbi + JvgvemNMpv/gej4u+uN8CVLNbzNUu08U59JlSHOL2shpHp/J/5ToJ7pNstXIf8uCWO9KsOx8TfXgF + kLZjTsSr5SQyCA7is3t7cpcIOBzOHew44yPECiV5tzTF8vciBOmUEXQo2NlwbnIHZLCDDt1vBRzyb + h+34GcwYPKqLUHxR4OFeSSZxcxlnevvtMhoF9QzsYp9nJ2MsYxG1LD+1wPafJT5na1x6uOn4OeGDc + 7fPWeI5g== +Authentication-Results: x1.hé.mox.example; auth=pass + smtp.mailfrom=mjl@hé.mox.example +Message-ID: <0a5fc37d-c80e-5c80-2d15-281cbe580df6@xn--h-bga.mox.example> +Date: Sun, 6 Feb 2022 20:56:22 +0100 +MIME-Version: 1.0 +User-Agent: Mozilla/5.0 (X11; Linux x86_64; rv:91.0) Gecko/20100101 + Thunderbird/91.5.1 +Content-Language: nl +To: mjl@a.mox.example +From: mjl he +Subject: test7 +Content-Type: text/plain; charset=UTF-8; format=flowed +Content-Transfer-Encoding: 7bit + +test7 + +From mox Wed Oct 26 19:48:13 2022 +Return-Path: +Received: from x1.c.mox.exapmle ([10.1.1.1]) by + x1.a.mox.example ([10.1.1.1]) with ESMTPS for mjl@a.mox.example; + 15 Jul 2022 16:40 +0200 +Authentication-Results: x1.a.mox.example; iprev=pass policy.iprev=10.1.1.1; + dkim=pass header.d=c.mox.example header.s=2021 header.a=rsa-sha256 + header.i=mjl+thunderbird@c.mox.example; spf=none smtp.mailfrom=c.mox.example; + dmarc=pass header.from=c.mox.example +Received-SPF: client-ip=""; envelope-from=; helo=; received=; identity= +Received: from x1.c.mox.exapmle by x1.c.mox.exapmle ([10.1.1.2]) with + ESMTPSA for mjl@a.mox.example; 15 Jul 2022 16:40 +0200 +DKIM-Signature: v=1; a=rsa-sha256; d=c.mox.example; s=2021; + i=mjl+thunderbird@c.mox.example; t=1657896010; h=From:To:Cc:Bcc:Reply-To: + References:In-Reply-To:Subject:Date:Message-ID:Content-Type:From:To:Subject: + Date:Message-ID:Content-Type; + bh=tbMSoeWQIUECKP8XJnOMfWfJtduErx4/MeR2+ZEnHmU=; b=EtXl8c1tASahKin6otunl9blpN + fKhUsnZAmLmungItgj5yxfUl7jch4p90qS0bTWxdyud+nIXRZU9pHr8HS/PbwxWPGsYYv5PB996ad + NUFFS8BTTW88tQ1Ems68H7QCz2OPAVBWop3WZCUsD4+SgRJxD4RHtYVKqnUT4hptcwX08x+j7rKeh + dju99oEE7sZ0QeJObVDYTFw730GGnxCHdErvrJgSMmYr+/T1uFL17OCSd6hd5F90Rw8JDEvxyuuic + H4dOHkTgGS3X6FVvHlSvMTLxgB8ZCO4utS95lfdU2S85o6sQ3WgEl8IWJRdeH7QOm71Gk/UEb+6ZV + f1QIF6vg== +Authentication-Results: x1.c.mox.exapmle; auth=pass + smtp.mailfrom=mjl+thunderbird@c.mox.example +Message-ID: +Date: Fri, 15 Jul 2022 16:40:10 +0200 +MIME-Version: 1.0 +User-Agent: Mozilla/5.0 (X11; Linux x86_64; rv:91.0) Gecko/20100101 + Thunderbird/91.11.0 +Content-Language: nl +To: mjl@a.mox.example +From: thunderbird c +Subject: x +Content-Type: text/plain; charset=UTF-8; format=flowed +Content-Transfer-Encoding: 7bit + +testx + +From mox Wed Oct 26 19:48:13 2022 +Return-Path: +Received: from x1.c.mox.example ([10.1.1.1]) by + x1.a.mox.example ([10.1.1.1]) with ESMTPS for mjl@a.mox.example; + 15 Jul 2022 17:47 +0200 +Authentication-Results: x1.a.mox.example; iprev=pass policy.iprev=10.1.1.1; + dkim=pass header.d=c.mox.example header.s=2021 header.a=rsa-sha256 + header.i=mjl+thunderbird@c.mox.example; spf=none smtp.mailfrom=c.mox.example; + dmarc=pass header.from=c.mox.example +Received-SPF: client-ip=""; envelope-from=; helo=; received=; identity= +Received: from x1.c.mox.example by x1.c.mox.example ([10.1.1.2]) with + ESMTPSA for mjl@a.mox.example; 15 Jul 2022 17:47 +0200 +DKIM-Signature: v=1; a=rsa-sha256; d=c.mox.example; s=2021; + i=mjl+thunderbird@c.mox.example; t=1657900071; h=From:To:Cc:Bcc:Reply-To: + References:In-Reply-To:Subject:Date:Message-ID:Content-Type:From:To:Subject: + Date:Message-ID:Content-Type; + bh=Lb7wCGLWuBz07TM2dxyTm2z6TpxVWnD1QHVH07/BbkM=; b=TEVxcPrHHSQS49JAqhDwyTc2q9 + ahbq7mL712cDFOE9I2CW49jrsu2E2bXalyWS6BIzKICESRCw5qhoeDabfoVNzVJD8GACjdwk99MIW + 9nm/8znFgLvY2mh2U2bWVLenmEudTVl3rTy+3NLUlGYaPSF8/7LimYVki0nE992f1l8djN4PqBzIF + UvjbE8UD4O3zg5DwgHC7aNaFiNTHQ16WwOThSS7oqbrMBOpj0OrwoUNmDiDgDUtc8dZMnto7TvCRk + ZYa1ZlZdxDKauDErDYv/zuTRjND+5eilX0nwDSET5V6QzHtN1Yq+pOeGCfYx2hFEUEMRD0lkQJPZA + xN3+fVbQ== +Authentication-Results: x1.c.mox.example; auth=pass + smtp.mailfrom=mjl+thunderbird@c.mox.example +Message-ID: <0d1098b8-2fea-e4d9-4d00-dcaf20d728e5@c.mox.example> +Date: Fri, 15 Jul 2022 17:47:51 +0200 +MIME-Version: 1.0 +User-Agent: Mozilla/5.0 (X11; Linux x86_64; rv:91.0) Gecko/20100101 + Thunderbird/91.11.0 +Content-Language: nl +To: mjl@a.mox.example +From: thunderbird c +Subject: test111 +Content-Type: text/plain; charset=UTF-8; format=flowed +Content-Transfer-Encoding: 7bit + +test111 + +From mox Wed Oct 26 19:48:13 2022 +Return-Path: +Received: from x1.c.mox.example ([10.1.1.1]) by + x1.a.mox.example ([10.1.1.1]) with ESMTPS for mjl@a.mox.example; + 06 Sep 2022 20:31 +0200 +Authentication-Results: x1.a.mox.example; iprev=pass policy.iprev=10.1.1.1; + dkim=pass header.d=c.mox.example header.s=2021 header.a=rsa-sha256 + header.i=mjl+thunderbird@c.mox.example; spf=none smtp.mailfrom=c.mox.example; + dmarc=pass header.from=c.mox.example +Received-SPF: client-ip=""; envelope-from=; helo=; received=; identity= +Received: from x1.c.mox.example by x1.c.mox.example ([10.1.1.2]) with + ESMTPSA for mjl@a.mox.example; 06 Sep 2022 20:31 +0200 +DKIM-Signature: v=1; a=rsa-sha256; d=c.mox.example; s=2021; + i=mjl+thunderbird@c.mox.example; t=1662489091; h=From:To:Cc:Bcc:Reply-To: + References:In-Reply-To:Subject:Date:Message-ID:Content-Type:From:To:Subject: + Date:Message-ID:Content-Type; + bh=KChINmr8SnXr/BUYisNUZ+ur9TxUJIoLNosMFS1P6bQ=; b=DbiprIPszJp//HaNn16ISls18J + befcPXAXfet475dkhK9Nue5Ww3pgKb0CNC7/wUTG0/cyGxkONBKzMVfAHbQ7PbKsIJt25z5uJO7yh + dAV/G75DQSskf8LWv5sFUQ6P27pg6r8LbPcrI38TIYVFBp3oa/AOnbZyCGor+oic862p+RartiLEP + a7petckT0fIaXJDKjoNTZeq3bmnVcO/GjzKfbr9fbd3tNqBsPxmemkRbLGvYAKIRUcHsxJWFSHMjg + F7I28+P+YdvgmM0Mb0KlvvEx9Yh4yQsHhiiLT8PgzQ5E+Ggw8LerqpmAOokSkxBXZH2K3B6SgadZO + vlKUKL7Q== +Authentication-Results: x1.c.mox.example; auth=pass + smtp.mailfrom=mjl+thunderbird@c.mox.example +Message-ID: +Date: Tue, 6 Sep 2022 20:31:31 +0200 +MIME-Version: 1.0 +User-Agent: Mozilla/5.0 (X11; Linux x86_64; rv:102.0) Gecko/20100101 + Thunderbird/102.1.2 +Content-Language: en-US +To: mjl@a.mox.example +From: c mox +Subject: dit is een normaal bericht +Content-Type: text/plain; charset=UTF-8; format=flowed +Content-Transfer-Encoding: 7bit + +dit is een normaal bericht + + +From mox Wed Oct 26 19:48:13 2022 +Return-Path: +Received: from x1.mox.example ([10.1.1.1]) by x1.a.mox ([10.1.1.1]) + with ESMTP for mjl@a.mox; 23 Jan 2022 21:02 +0100 +Authentication-Results: x1.a.mox; iprev=fail policy.iprev=10.1.1.1; + dkim=pass header.d=c.mox header.s=2021 header.a=rsa-sha256 + header.i=mjl+thunderbird@c.mox; spf=none smtp.mailfrom=c.mox; dmarc=pass + header.from=c.mox +Received-SPF: none client-ip=10.1.1.1; + envelope-from="mjl+thunderbird@c.mox"; helo=x1.mox.example; + problem="no\ spf\ txt\ record:\ no\ txt\ record"; received=x1.a.mox; + identity=mailfrom +Received: from x1.mox.example by x1.mox.example ([10.1.1.2]) with + ESMTP for mjl@a.mox; 23 Jan 2022 20:39 +0100 +Authentication-Results: x1.mox.example; auth=pass + smtp.mailfrom=mjl+thunderbird@c.mox +DKIM-Signature: v=1; a=rsa-sha256; d=c.mox; s=2021; i=mjl+thunderbird@c.mox; + t=1642966793; h=From:To:Cc:Bcc:Reply-To:References:In-Reply-To:Subject:Date: + Message-ID:Content-Type:From:To:Subject:Date:Message-ID:Content-Type; + bh=jhmPv2Vh8l0Ezw0V1P64SjmGjgfM2tek6qiEL0zehQc=; b=h4NspINb2TA+VkSr+Try4Rz24W + hor/vjkfX4EyDg6nb0mB4RUlgQiwPrqnjJLLkp9DnUhSuJEwGjMUdRG5160K04c4/KDkzCctj6Bot + IrOCOJ3yyC4z5wUAdivn4OOZmjq9d5eBEBvbiXFGVesZODzAGLZGAiGuSey+8ap18i1FaiRZeMB7e + X5tjAMMlxIGU/1eN6xAchpi8/Pww7VBU13rhq3ge4cFo1rhftF8wHBNSehlBqvA6/WYEAMD/4DD7S + owenI72sQapxo3Yc2EdZ2f/ZYJgKgR5i6WmE6E/sTVZzDJ2eOYIUHwF1bYBeLNM7ITfAAoPotn0KB + hZpchIQw== +Message-ID: <411af0b6-71ce-a2bd-ec57-7e320bd0e6e0@c.mox> +Date: Sun, 23 Jan 2022 20:39:53 +0100 +MIME-Version: 1.0 +User-Agent: Mozilla/5.0 (X11; Linux x86_64; rv:91.0) Gecko/20100101 + Thunderbird/91.4.0 +Content-Language: nl +To: mjl@a.mox +From: thunderbird c +Subject: test van c +Content-Type: text/plain; charset=UTF-8; format=flowed +Content-Transfer-Encoding: 7bit + +test van c + +From mox Wed Oct 26 19:48:13 2022 +Return-Path: +Received: from x1.mox.example ([10.1.1.1]) by x1.a.mox ([10.1.1.1]) + with ESMTP for mjl@a.mox; 31 Jan 2022 11:54 +0100 +Authentication-Results: x1.a.mox; iprev=fail policy.iprev=10.1.1.1; + dkim=pass header.d=c.mox header.s=2021 header.a=rsa-sha256 + header.i=mjl+thunderbird@c.mox; spf=none smtp.mailfrom=c.mox; dmarc=pass + header.from=c.mox +Received-SPF: none client-ip=10.1.1.1; + envelope-from="mjl+thunderbird@c.mox"; helo=x1.mox.example; + problem="no\ spf\ txt\ record:\ no\ txt\ record"; received=x1.a.mox; + identity=mailfrom +Received: from x1.mox.example by x1.mox.example ([10.1.1.2]) with + ESMTP for mjl@a.mox; 31 Jan 2022 11:54 +0100 +DKIM-Signature: v=1; a=rsa-sha256; d=c.mox; s=2021; i=mjl+thunderbird@c.mox; + t=1643626475; h=From:To:Cc:Bcc:Reply-To:References:In-Reply-To:Subject:Date: + Message-ID:Content-Type:From:To:References:In-Reply-To:Subject:Date: + Message-ID:Content-Type; bh=IzfqNYt1UUFF7ZtjwQ4BRmIszJBWJrgkEWL+FWasxG8=; b=H + EJpjTN6FWiYp1alO/wycCX+qze9yXNihKv1TXzP36SoZpOXyWaefnosHKr4kITk/BwaU7pDSrkv7t + g2p1HpiDsensePIEO2OpRpM/QzX2EIQD9RryZT8P6+j3S31aVO300sAY2e9NoY/xXesN2lEYXFL4C + A4YLCfcLm/j6cQ51nw9NsJk0FjrGrQ4rkzYODUB26TnVX9cstkXSSX/2NGeuiMSrdkjmgCbfzTPu4 + mXAsFbPfkqbkiTgUYJmi6QxwUSAzDgimudVZ+ktKeKOFSWzs+zTU1ZoC27zZlswOFZMxG/nuXtRiN + 7WY7roNZi+l7H7tu/OAT5NjeTtrbtXfuA== +Authentication-Results: x1.mox.example; auth=pass + smtp.mailfrom=mjl+thunderbird@c.mox +Message-ID: +Date: Mon, 31 Jan 2022 11:54:34 +0100 +MIME-Version: 1.0 +User-Agent: Mozilla/5.0 (X11; Linux x86_64; rv:91.0) Gecko/20100101 + Thunderbird/91.5.0 +Subject: Re: test +Content-Language: nl +From: thunderbird c +To: mjl@a.mox +References: +In-Reply-To: +Content-Type: text/plain; charset=UTF-8; format=flowed +Content-Transfer-Encoding: 7bit + +On 1/31/22 11:51, thunderbird wrote: +> test +test2 + +From mox Wed Oct 26 19:48:13 2022 +Return-Path: +Received: from x1.mox.example ([10.1.1.1]) by x1.a.mox ([10.1.1.1]) + with ESMTP for mjl@a.mox; 01 Feb 2022 08:20 +0100 +Authentication-Results: x1.a.mox; iprev=fail policy.iprev=10.1.1.1; + dkim=pass header.d=c.mox header.s=2021 header.a=rsa-sha256 + header.i=mjl+thunderbird@c.mox; spf=none smtp.mailfrom=c.mox; dmarc=pass + header.from=c.mox +Received-SPF: client-ip=""; envelope-from=; helo=; received=; identity= +Received: from x1.mox.example by x1.mox.example ([10.1.1.2]) with + ESMTP for mjl@a.mox; 31 Jan 2022 22:06 +0100 +DKIM-Signature: v=1; a=rsa-sha256; d=c.mox; s=2021; i=mjl+thunderbird@c.mox; + t=1643663203; h=From:To:Cc:Bcc:Reply-To:References:In-Reply-To:Subject:Date: + Message-ID:Content-Type:From:To:Subject:Date:Message-ID:Content-Type; + bh=3g7dCsSnBa/3DzRzTpCh0KHYt2q+S7U/Pqk0vBBbOxc=; b=dVbdxmJoE8hXUSIhlcPjjqZXaq + av9UWouPTwszX08D/NaVe2txzQYHlEzf0Zo2mEEc1oFOlqFfuaIDA9oCZk6n70bhpkEhFF/bLN8D3 + VPtYfk6+84SwoQZs7yApRMptEp/paP/A8bA2TD7EzPAuEpU3+GCy64Y9PPO0Ta/QqfStyTf0Xmsdu + XwC+JZxCl6Bm0Ck1TXSarYcXj7AI6wlQEyYCBjUL96VWun5whUhEm3uGCRIAfWR9q6pT6hSyiCmHf + FnrABLxIt1mrIvVQSch3Q/wb75mPUPn5YLxd2X9C1s6avOo+msACeRdOEKGgjqL/I+c+Iyn8FlSS1 + YOs5A1hQ== +Authentication-Results: x1.mox.example; auth=pass + smtp.mailfrom=mjl+thunderbird@c.mox +Message-ID: <74272016-1933-78f0-19cb-fb4d97598fab@c.mox> +Date: Mon, 31 Jan 2022 22:06:43 +0100 +MIME-Version: 1.0 +User-Agent: Mozilla/5.0 (X11; Linux x86_64; rv:91.0) Gecko/20100101 + Thunderbird/91.5.1 +Content-Language: nl +To: mjl@a.mox +From: thunderbird c +Subject: test2 +Content-Type: text/plain; charset=UTF-8; format=flowed +Content-Transfer-Encoding: 7bit + +test2 + +From mox Wed Oct 26 19:48:13 2022 +Return-Path: +Received: from x1.hé.mox.example ([10.1.1.1]) by + x1.a.mox.example ([10.1.1.1]) with ESMTPS for mjl@a.mox.example; + 01 Feb 2022 13:10 +0100 +Authentication-Results: x1.a.mox.example; iprev=fail policy.iprev=10.1.1.1; + dkim=neutral; spf=none smtp.mailfrom=h; dmarc=none +Received-SPF: client-ip=""; envelope-from=; helo=; received=; identity= +Received: from x1.hé.mox.example by x1.hé.mox.example ([10.1.1.2]) with + ESMTPSA for mjl@a.mox.example; 01 Feb 2022 13:10 +0100 +DKIM-Signature: v=1; a=rsa-sha256; d=hé.mox.example; s=2021; + i=mjl@hé.mox.example; t=1643717402; h=From:To:Cc:Bcc:Reply-To:References: + In-Reply-To:Subject:Date:Message-ID:Content-Type:From:To:Subject:Date: + Message-ID:Content-Type; bh=MEDGQU6b7wF0e0eqeCWbk6omL7MQzs8qXbIlLqh6rI8=; b=U + yXqqSxbwM7bnjePCvuAXdCavntwDnww5GqFnaVmiQ/e7/aZCNLPGqDvdEJ1GQ9qDzhNR2dBr8k6HQ + +KeBu1tZXtG+HraWMVXN2A8hRpfJEP9xkBYdhth3m/Wlj8saarR+4rTJHX8osJcj8rUoRy4G3QVaU + PLef+Rh1cGfICgbKY4vmHuZgvfDTuyKhRkNsmzMCzCWwfg+BUcfmPNCVPxQabixkV7Uz8/KVbGcRL + cEUoRqC2ce4uybsV6K3w+LswZu57HwM5MNLLDb5UAFnTkvDEjHlw2oCxiGgwZd4t6v2yu0MtHP/xp + tgCww/uJDzLnSPwks1RHne2vMgrW/IDSw== +Authentication-Results: x1.hé.mox.example; auth=pass + smtp.mailfrom=mjl@hé.mox.example +Message-ID: +Date: Tue, 1 Feb 2022 13:10:02 +0100 +MIME-Version: 1.0 +User-Agent: Mozilla/5.0 (X11; Linux x86_64; rv:91.0) Gecko/20100101 + Thunderbird/91.5.1 +Content-Language: nl +To: mjl@a.mox.example +From: mjl he +Subject: test from he +Content-Type: text/plain; charset=UTF-8; format=flowed +Content-Transfer-Encoding: 7bit + +test from he + +From mox Wed Oct 26 19:48:13 2022 +Return-Path: <> +From: mjl@mox.test +To: mjl@mox.test +Subject: hi +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 7bit +Date: Wed, 10 Nov 2021 23:47:13 +0100 +Message-ID: <12312320-f95c-09ec-97c6-94d124f0932d@mox.test> +MIME-Version: 1.0 + +test +test2 +end + +From mox Wed Oct 26 19:48:13 2022 +Return-Path: +Received: from x1.c.mox.example ([10.1.1.1]) by + x1.a.mox.example ([10.1.1.1]) with ESMTPS for mjl@a.mox.example; + 16 Sep 2022 21:12 +0200 +Authentication-Results: x1.a.mox.example; iprev=pass policy.iprev=10.1.1.1; + dkim=pass header.d=c.mox.example header.s=2021 header.a=rsa-sha256 + header.i=mjl@c.mox.example; spf=none smtp.mailfrom=c.mox.example; dmarc=pass + header.from=c.mox.example +Received-SPF: none client-ip=10.1.1.1; + envelope-from="mjl+thunderbird@c.mox.example"; helo=x1.c.mox.example; + problem="spf:\ no\ spf\ txt\ record:\ no\ txt\ record"; + received=x1.a.mox.example; identity=mailfrom +Received: from x1.c.mox.example by x1.c.mox.example ([10.1.1.2]) with + ESMTPSA for mjl@a.mox.example; 16 Sep 2022 21:12 +0200 +DKIM-Signature: v=1; a=rsa-sha256; d=c.mox.example; s=2021; + i=mjl@c.mox.example; t=1663355561; h=From:To:Cc:Bcc:Reply-To:References: + In-Reply-To:Subject:Date:Message-ID:Content-Type:From:To:References: + In-Reply-To:Subject:Date:Message-ID:Content-Type; + bh=hvjOcG2rA4YPIh0KYL9Y/nOuXh9Fx3nYfsSzu0Bd8ik=; b=GN/QWHXpCTDHujJGVk1ewBeNS3 + DQr6y4hTnZMYVKe86XaanSag2h3j4LyVOy/YCaS7zaLviQZlzy79RVqweJ6CQZsJiiz4yZyFr8TD9 + ZwKsQoBOvQQlkCdYhTSm1pW9XQdxwVktNJlqLMvgbIQoRWT1llCSv/WHZFKkGm9mkf2CyVsuKKp02 + DghiJbmwqIFT0jqjWrEljYeJMQc8eCqbumSxt7hBgSqMgyhe8Wc+nrHc9dpUszT8/YEYd+kTaM6x4 + qzpygTywPjoIO7zDi+dFlM8j36gl6vX8G/kz5fQx2DaWr5V1tHmHi+2wp9rcsJmgJ6R3tmAnLZ6kb + cUmPQCMg== +Authentication-Results: x1.c.mox.example; auth=pass + smtp.mailfrom=mjl+thunderbird@c.mox.example +Message-ID: <487fdb6c-aa96-7939-9282-4ab4a25fd01f@c.mox.example> +Date: Fri, 16 Sep 2022 21:12:41 +0200 +MIME-Version: 1.0 +User-Agent: Mozilla/5.0 (X11; Linux x86_64; rv:102.0) Gecko/20100101 + Thunderbird/102.1.2 +Subject: Re: test1 +Content-Language: en-US +From: c mox +To: mjl@a.mox.example +References: <0d67861f-c0de-282d-6250-ad6c39ce0025@a.mox.example> +In-Reply-To: <0d67861f-c0de-282d-6250-ad6c39ce0025@a.mox.example> +Content-Type: text/plain; charset=UTF-8; format=flowed +Content-Transfer-Encoding: 7bit + + +On 9/16/22 21:12, a mox wrote: +> test1 +> + +From mox Wed Oct 26 19:48:13 2022 +Return-Path: +Received: from x1.mox.example ([10.1.1.1]) by x1.a.mox ([10.1.1.1]) + with ESMTP for mjl@a.mox; 30 Jan 2022 14:13 +0100 +Authentication-Results: x1.a.mox; iprev=fail policy.iprev=10.1.1.1; + dkim=pass header.d=c.mox header.s=2021 header.a=rsa-sha256 + header.i=mjl+thunderbird@c.mox; spf=none smtp.mailfrom=c.mox; dmarc=pass + header.from=c.mox +Received-SPF: none client-ip=10.1.1.1; + envelope-from="mjl+thunderbird@c.mox"; helo=x1.mox.example; + problem="no\ spf\ txt\ record:\ no\ txt\ record"; received=x1.a.mox; + identity=mailfrom +Received: from x1.mox.example by x1.mox.example ([10.1.1.2]) with + ESMTP for mjl@a.mox; 30 Jan 2022 14:13 +0100 +DKIM-Signature: v=1; a=rsa-sha256; d=c.mox; s=2021; i=mjl+thunderbird@c.mox; + t=1643548417; h=From:To:Cc:Bcc:Reply-To:References:In-Reply-To:Subject:Date: + Message-ID:Content-Type:From:To:Subject:Date:Message-ID:Content-Type; + bh=I3H+2RR4Gh+ISvkep1BNhywvLlNcy8ptbpJ3zaZuKCQ=; b=v8zsXWzie9T4vhe3Y95UuZ4FkS + FVF9DjCgvn2TkMKzTM+YPiafBbVYob7S67KM5TGlgdBN57LNXlmgOThmdmQ0vmIN5rJGarJMB3HtF + SQa2isnaDptGZSP0I+IjpHRLhcwGYm/yut8vnII5Ms+rAaoAzpoDEw9D/aFsw5IU4FdT+2K/Mrmte + gUX6Z1NBQAtSSIPcoUmrH5SNNsvVCrn78c32mEevMqnmEroFnWkAd4mEOaQk1ZeYQbuAYzEzrycsK + nlnrI9e4xOWpH3RAzXBESUjiejc6rt5Xs1Di8tBBSsjYBSO5QeXrhQ6vnDv6zPBuUnKJEjT679+i5 + 4NCwBrAQ== +Authentication-Results: x1.mox.example; auth=pass + smtp.mailfrom=mjl+thunderbird@c.mox +Message-ID: +Date: Sun, 30 Jan 2022 14:13:36 +0100 +MIME-Version: 1.0 +User-Agent: Mozilla/5.0 (X11; Linux x86_64; rv:91.0) Gecko/20100101 + Thunderbird/91.5.0 +Content-Language: nl +To: mjl@a.mox +From: thunderbird c +Subject: test +Content-Type: text/plain; charset=UTF-8; format=flowed +Content-Transfer-Encoding: 7bit + +test1 + +From mjl+thunderbird@c.mox.example Thu Oct 27 11:03:27 2022 +Return-Path: +Received: from x1.c.mox.example ([10.1.1.1]) by + x1.a.mox.example ([10.1.1.1]) with ESMTPS (TLS1.3 TLS_AES_128_GCM_SHA256) + for ; 27 Oct 2022 11:03 +0200 +Authentication-Results: x1.a.mox.example; iprev=pass policy.iprev=10.1.1.1; + dkim=pass header.d=c.mox.example header.s=2021 header.a=rsa-sha256 + header.i=mjl@c.mox.example; spf=none smtp.mailfrom=c.mox.example; dmarc=pass + header.from=c.mox.example +Received-SPF: none client-ip=10.1.1.1; + envelope-from="mjl+thunderbird@c.mox.example"; helo=x1.c.mox.example; + problem="spf:\ no\ spf\ txt\ record:\ no\ txt\ record"; + received=x1.a.mox.example; identity=mailfrom +Received: from x1.c.mox.example by x1.c.mox.example ([10.1.1.2]) with + ESMTPSA (TLS1.3 TLS_AES_128_GCM_SHA256) for ; + 27 Oct 2022 11:03 +0200 +DKIM-Signature: v=1; a=rsa-sha256; d=c.mox.example; s=2021; + i=mjl@c.mox.example; t=1666861407; h=From:To:Cc:Bcc:Reply-To:References: + In-Reply-To:Subject:Date:Message-ID:Content-Type:From:To:Subject:Date: + Message-ID:Content-Type; bh=L3BOGwg1CYgb1C6mJluWhu1s2zGw965x5MMNCkKXKm8=; b=W + 7e4TWY8uXLyeugnk76ODNYqgWT5N8tRnK87yHCXhFkcITb7J82kBxxl4Jil70yQqBRdcq6bDt5e0/ + OBZ8umUlJBNAsFVlzeknt4nPTUaEviRzUZoobfQqaEVwvncYjI2cW9WGYXKJrOwNVEScQTIuuRIpB + 5W+oSjhnTf2jRhXgOynVS6VtMhQWmqPKZ2OwT02i1netWudrx23EwvEmq+KyRfMKwr7VSDUmmeZfk + 6VMxrPl2sQKu1fV8+XNe9kVFbwJO/OpKAlpPgoW9mIvtolFyB/2qBvA84sI1m1z0zSRMgLi5KpNOU + zjLJMu4AoJpfib2f5luxBd5uzsAwyt6lQ== +Authentication-Results: x1.c.mox.example; auth=pass + smtp.mailfrom=mjl+thunderbird@c.mox.example +Message-ID: <82cd1921-a2a6-3126-e81b-3149390320fa@c.mox.example> +Date: Thu, 27 Oct 2022 11:03:27 +0200 +MIME-Version: 1.0 +User-Agent: Mozilla/5.0 (X11; Linux x86_64; rv:102.0) Gecko/20100101 + Thunderbird/102.1.2 +Content-Language: en-US +To: mjl@a.mox.example +From: c mox +Subject: test after smtp queue/client/server split +Content-Type: text/plain; charset=UTF-8; format=flowed +Content-Transfer-Encoding: 7bit + +test after smtp queue/client/server split + + +From mjl+thunderbird@c.mox.example Thu Oct 27 15:36:16 2022 +Return-Path: +Received: from x1.c.mox.example ([10.1.1.2]) by + x1.a.mox.example ([10.1.1.1]) with ESMTPS (TLS1.3 TLS_AES_128_GCM_SHA256) + for ; 27 Oct 2022 15:36 +0200 +Authentication-Results: x1.a.mox.example; iprev=pass policy.iprev=10.1.1.2; + dkim=pass header.d=c.mox.example header.s=2021 header.a=rsa-sha256 + header.i=mjl@c.mox.example; spf=none smtp.mailfrom=c.mox.example; dmarc=pass + header.from=c.mox.example +Received-SPF: none client-ip=10.1.1.2; + envelope-from="mjl+thunderbird@c.mox.example"; helo=x1.c.mox.example; + problem="spf:\ no\ spf\ txt\ record:\ no\ txt\ record"; + received=x1.a.mox.example; identity=mailfrom +Received: from x1.c.mox.example by x1.c.mox.example ([10.1.1.2]) with + ESMTPSA (TLS1.3 TLS_AES_128_GCM_SHA256) for ; + 27 Oct 2022 15:36 +0200 +DKIM-Signature: v=1; a=rsa-sha256; d=c.mox.example; s=2021; + i=mjl@c.mox.example; t=1666877776; h=From:To:Cc:Bcc:Reply-To:References: + In-Reply-To:Subject:Date:Message-ID:Content-Type:From:To:References: + In-Reply-To:Subject:Date:Message-ID:Content-Type; + bh=kiRI/YsxDujtPXeDleqfqVm0/UKFRTC4qlX/1FpAGHk=; b=UnI4/8G95wBmHEplt1shMxpb7h + RqKd6wB6VVP0Vp772HiSYEvtWsrQ/Z4lFsrtvFP56M9cAxM+za0EzdZTmlawfBK50rbfFtPB97g02 + Ox19uUFkDKQshvpJkpfJT07ROTDUMfIrLydK+CyjfQUtFy2gdBuhyYPj9QTkfBi4im+FpAWOcDuG7 + mdNOVSPGnH3/tJ3irJxL8Lte9lAHAaRBuDDb9fHzdnLL+/bRLWUcSQcmXEoJVYhzLiRqv+JtewqNO + i2gJlgatltRQE5rhB2AOOd8cXKFk7rFFAY6y6O1hxh522lYQ862BMKkWuP5nNcs4PuFOT6B5m5MVP + y5bQ2a5w== +Authentication-Results: x1.c.mox.example; auth=pass + smtp.mailfrom=mjl+thunderbird@c.mox.example +Message-ID: <86a068f4-d80a-e5fd-2104-48e343acf3c9@c.mox.example> +Date: Thu, 27 Oct 2022 15:36:16 +0200 +MIME-Version: 1.0 +User-Agent: Mozilla/5.0 (X11; Linux x86_64; rv:102.0) Gecko/20100101 + Thunderbird/102.1.2 +Subject: Re: test na rejects cleanup na delivery +Content-Language: en-US +From: c mox +To: mjl@a.mox.example +References: <31bcbba8-80ed-d11b-b63f-687a22909fed@a.mox.example> +In-Reply-To: <31bcbba8-80ed-d11b-b63f-687a22909fed@a.mox.example> +Content-Type: text/plain; charset=UTF-8; format=flowed +Content-Transfer-Encoding: 7bit + +test + +On 10/27/22 15:36, a mox wrote: +> test na rejects cleanup na delivery +> + diff --git a/testdata/imaptest/mox.conf b/testdata/imaptest/mox.conf new file mode 100644 index 0000000..1df5357 --- /dev/null +++ b/testdata/imaptest/mox.conf @@ -0,0 +1,14 @@ +DataDir: data +LogLevel: trace +Hostname: mox.example +Listeners: + local: + IPs: + - 0.0.0.0 + IMAP: + Enabled: true + Port: 1143 + NoRequireSTARTTLS: true +Postmaster: + Account: mjl + Mailbox: postmaster diff --git a/testdata/importtest.maildir/cur/1642966915.1.mox:2, b/testdata/importtest.maildir/cur/1642966915.1.mox:2, new file mode 100644 index 0000000..fe64097 --- /dev/null +++ b/testdata/importtest.maildir/cur/1642966915.1.mox:2, @@ -0,0 +1,13 @@ +Return-Path: <> +From: mjl@mox.test +To: mjl@mox.test +Subject: hi +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 7bit +Date: Wed, 10 Nov 2021 23:47:13 +0100 +Message-ID: <12312312-f95c-09ec-97c6-94d124f0932d@mox.test> +MIME-Version: 1.0 + +test +test2 +end diff --git a/testdata/importtest.maildir/new/1642968136.5.mox:2, b/testdata/importtest.maildir/new/1642968136.5.mox:2, new file mode 100644 index 0000000..97fbd6e --- /dev/null +++ b/testdata/importtest.maildir/new/1642968136.5.mox:2, @@ -0,0 +1,37 @@ +Return-Path: +Received: from x1.mox.example ([10.1.1.1]) by x1.a.mox ([10.1.1.1]) + with ESMTP for mjl@a.mox; 23 Jan 2022 21:02 +0100 +Authentication-Results: x1.a.mox; iprev=fail policy.iprev=10.1.1.1; + dkim=pass header.d=c.mox header.s=2021 header.a=rsa-sha256 + header.i=mjl+thunderbird@c.mox; spf=none smtp.mailfrom=c.mox; dmarc=pass + header.from=c.mox +Received-SPF: none client-ip=10.1.1.1; + envelope-from="mjl+thunderbird@c.mox"; helo=x1.mox.example; + problem="no\ spf\ txt\ record:\ no\ txt\ record"; received=x1.a.mox; + identity=mailfrom +Received: from x1.mox.example by x1.mox.example ([10.1.1.1]) with + ESMTP for mjl@a.mox; 23 Jan 2022 20:39 +0100 +Authentication-Results: x1.mox.example; auth=pass + smtp.mailfrom=mjl+thunderbird@c.mox +DKIM-Signature: v=1; a=rsa-sha256; d=c.mox; s=2021; i=mjl+thunderbird@c.mox; + t=1642966793; h=From:To:Cc:Bcc:Reply-To:References:In-Reply-To:Subject:Date: + Message-ID:Content-Type:From:To:Subject:Date:Message-ID:Content-Type; + bh=jhmPv2Vh8l0Ezw0V1P64SjmGjgfM2tek6qiEL0zehQc=; b=h4NspINb2TA+VkSr+Try4Rz24W + hor/vjkfX4EyDg6nb0mB4RUlgQiwPrqnjJLLkp9DnUhSuJEwGjMUdRG5160K04c4/KDkzCctj6Bot + IrOCOJ3yyC4z5wUAdivn4OOZmjq9d5eBEBvbiXFGVesZODzAGLZGAiGuSey+8ap18i1FaiRZeMB7e + X5tjAMMlxIGU/1eN6xAchpi8/Pww7VBU13rhq3ge4cFo1rhftF8wHBNSehlBqvA6/WYEAMD/4DD7S + owenI72sQapxo3Yc2EdZ2f/ZYJgKgR5i6WmE6E/sTVZzDJ2eOYIUHwF1bYBeLNM7ITfAAoPotn0KB + hZpchIQw== +Message-ID: <405af0b6-71ce-a2bd-ec57-7e320bd0e6e0@c.mox> +Date: Sun, 23 Jan 2022 20:39:53 +0100 +MIME-Version: 1.0 +User-Agent: Mozilla/5.0 (X11; Linux x86_64; rv:91.0) Gecko/20100101 + Thunderbird/91.4.0 +Content-Language: nl +To: mjl@a.mox +From: thunderbird c +Subject: test van c +Content-Type: text/plain; charset=UTF-8; format=flowed +Content-Transfer-Encoding: 7bit + +test van c diff --git a/testdata/importtest.mbox b/testdata/importtest.mbox new file mode 100644 index 0000000..e4ad55f --- /dev/null +++ b/testdata/importtest.mbox @@ -0,0 +1,60 @@ +From mox Sun Jan 23 20:41:55 2022 +Return-Path: <> +From: mjl@mox.test +To: mjl@mox.test +Subject: hi +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 7bit +Date: Wed, 10 Nov 2021 23:47:13 +0100 +Message-ID: <12312312-f95c-09ec-97c6-94d124f0932d@mox.test> +MIME-Version: 1.0 +Status: RO +Content-Length: 15 +Lines: 3 + +test +test2 +end + +From mox Sun Jan 23 21:02:16 2022 +Return-Path: +Received: from x1.mox.example ([10.1.1.1]) by x1.a.mox ([10.1.1.1]) + with ESMTP for mjl@a.mox; 23 Jan 2022 21:02 +0100 +Authentication-Results: x1.a.mox; iprev=fail policy.iprev=10.1.1.1; + dkim=pass header.d=c.mox header.s=2021 header.a=rsa-sha256 + header.i=mjl+thunderbird@c.mox; spf=none smtp.mailfrom=c.mox; dmarc=pass + header.from=c.mox +Received-SPF: none client-ip=10.1.1.1; + envelope-from="mjl+thunderbird@c.mox"; helo=x1.mox.example; + problem="no\ spf\ txt\ record:\ no\ txt\ record"; received=x1.a.mox; + identity=mailfrom +Received: from x1.mox.example by x1.mox.example ([10.1.1.1]) with + ESMTP for mjl@a.mox; 23 Jan 2022 20:39 +0100 +Authentication-Results: x1.mox.example; auth=pass + smtp.mailfrom=mjl+thunderbird@c.mox +DKIM-Signature: v=1; a=rsa-sha256; d=c.mox; s=2021; i=mjl+thunderbird@c.mox; + t=1642966793; h=From:To:Cc:Bcc:Reply-To:References:In-Reply-To:Subject:Date: + Message-ID:Content-Type:From:To:Subject:Date:Message-ID:Content-Type; + bh=jhmPv2Vh8l0Ezw0V1P64SjmGjgfM2tek6qiEL0zehQc=; b=h4NspINb2TA+VkSr+Try4Rz24W + hor/vjkfX4EyDg6nb0mB4RUlgQiwPrqnjJLLkp9DnUhSuJEwGjMUdRG5160K04c4/KDkzCctj6Bot + IrOCOJ3yyC4z5wUAdivn4OOZmjq9d5eBEBvbiXFGVesZODzAGLZGAiGuSey+8ap18i1FaiRZeMB7e + X5tjAMMlxIGU/1eN6xAchpi8/Pww7VBU13rhq3ge4cFo1rhftF8wHBNSehlBqvA6/WYEAMD/4DD7S + owenI72sQapxo3Yc2EdZ2f/ZYJgKgR5i6WmE6E/sTVZzDJ2eOYIUHwF1bYBeLNM7ITfAAoPotn0KB + hZpchIQw== +Message-ID: <405af0b6-71ce-a2bd-ec57-7e320bd0e6e0@c.mox> +Date: Sun, 23 Jan 2022 20:39:53 +0100 +MIME-Version: 1.0 +User-Agent: Mozilla/5.0 (X11; Linux x86_64; rv:91.0) Gecko/20100101 + Thunderbird/91.4.0 +Content-Language: nl +To: mjl@a.mox +From: thunderbird c +Subject: test van c +Content-Type: text/plain; charset=UTF-8; format=flowed +Content-Transfer-Encoding: 7bit +Status: RO +Content-Length: 11 +Lines: 1 + +test van c + diff --git a/testdata/integration/Dockerfile.dns b/testdata/integration/Dockerfile.dns new file mode 100644 index 0000000..c083d1d --- /dev/null +++ b/testdata/integration/Dockerfile.dns @@ -0,0 +1,2 @@ +FROM alpine:3.17 +RUN apk add unbound bind-tools mailx diff --git a/testdata/integration/Dockerfile.moxmail b/testdata/integration/Dockerfile.moxmail new file mode 100644 index 0000000..ae68008 --- /dev/null +++ b/testdata/integration/Dockerfile.moxmail @@ -0,0 +1,4 @@ +FROM golang:1-alpine AS build +WORKDIR /mox +RUN apk add make bind-tools bash +env GOPROXY=off diff --git a/testdata/integration/Dockerfile.postfix b/testdata/integration/Dockerfile.postfix new file mode 100644 index 0000000..855f1ed --- /dev/null +++ b/testdata/integration/Dockerfile.postfix @@ -0,0 +1,2 @@ +FROM alpine:3.17 +RUN apk add postfix bind-tools mailx diff --git a/testdata/integration/dkim/mox1dkim0-key.pem b/testdata/integration/dkim/mox1dkim0-key.pem new file mode 100644 index 0000000..bdd9f6d --- /dev/null +++ b/testdata/integration/dkim/mox1dkim0-key.pem @@ -0,0 +1,5 @@ +-----BEGIN PRIVATE KEY----- +Note: ed25519 private key for use with DKIM, generated by mox + +MC4CAQAwBQYDK2VwBCIEIAVskzmutHg8DvS8jaxYMHuoV1z9tNZmSC8+iI84WlTm +-----END PRIVATE KEY----- diff --git a/testdata/integration/dkim/mox2dkim0-key.pem b/testdata/integration/dkim/mox2dkim0-key.pem new file mode 100644 index 0000000..d3fc6f9 --- /dev/null +++ b/testdata/integration/dkim/mox2dkim0-key.pem @@ -0,0 +1,5 @@ +-----BEGIN PRIVATE KEY----- +Note: ed25519 private key for use with DKIM, generated by mox + +MC4CAQAwBQYDK2VwBCIEINAds0VrnXkd/cUnAT4eQDKQoViHC1q6dBPEqG8SJxnz +-----END PRIVATE KEY----- diff --git a/testdata/integration/dkim/mox3dkim0-key.pem b/testdata/integration/dkim/mox3dkim0-key.pem new file mode 100644 index 0000000..28ac19d --- /dev/null +++ b/testdata/integration/dkim/mox3dkim0-key.pem @@ -0,0 +1,5 @@ +-----BEGIN PRIVATE KEY----- +Note: ed25519 private key for use with DKIM, generated by mox + +MC4CAQAwBQYDK2VwBCIEIPXYWvzg39zC8pkPzYZbTKCAk4TjIICZzaklMrOyADW+ +-----END PRIVATE KEY----- diff --git a/testdata/integration/dkim/postfix-key.pem b/testdata/integration/dkim/postfix-key.pem new file mode 100644 index 0000000..2dee92a --- /dev/null +++ b/testdata/integration/dkim/postfix-key.pem @@ -0,0 +1,5 @@ +-----BEGIN PRIVATE KEY----- +Note: ed25519 private key for use with DKIM, generated by mox + +MC4CAQAwBQYDK2VwBCIEIOjHYD/iIvN8F46H6kk/km7g4FNfS0ZwV3dRDGrtMLb8 +-----END PRIVATE KEY----- diff --git a/testdata/integration/dkim/readme.txt b/testdata/integration/dkim/readme.txt new file mode 100644 index 0000000..8e0ac96 --- /dev/null +++ b/testdata/integration/dkim/readme.txt @@ -0,0 +1,9 @@ +These files are generated with: + + mox dkim gened25519 >$file + +And the corresponding DNS entry is: + + mox dkim txt <$file + +Which is added to ../example.zone diff --git a/testdata/integration/domains.conf b/testdata/integration/domains.conf new file mode 100644 index 0000000..245bfe4 --- /dev/null +++ b/testdata/integration/domains.conf @@ -0,0 +1,83 @@ +Domains: + mox1.example: + LocalpartCaseSensitive: false + LocalpartCatchallSeparator: + + mox2.example: + LocalpartCaseSensitive: false + LocalpartCatchallSeparator: + + DKIM: + Selectors: + mox2dkim0: + Hash: sha256 + DontSealHeaders: false + Headers: + - From + - To + - Subject + PrivateKeyFile: dkim/mox2dkim0-key.pem + Sign: + - mox2dkim0 + # todo: DMARC: + # todo: MTASTS: + # todo: TLSRPT: + mox3.example: + LocalpartCaseSensitive: false + LocalpartCatchallSeparator: + + DKIM: + Selectors: + mox3dkim0: + Hash: sha256 + DontSealHeaders: false + Headers: + - From + - To + - Subject + PrivateKeyFile: dkim/mox3dkim0-key.pem + Sign: + - mox3dkim0 +Accounts: + moxtest1: + Domain: mox1.example + Destinations: + moxtest1: nil + JunkFilter: + Threshold: 0.9999 + Params: + Onegrams: true + Twograms: true + Threegrams: false + MaxPower: .01 + TopWords: 10 + IgnoreWords: .1 + RareWords: 1 + moxtest2: + Domain: mox2.example + Destinations: + moxtest2: nil + JunkFilter: + Threshold: 0.9999 + Params: + Onegrams: true + Twograms: true + Threegrams: false + MaxPower: .01 + TopWords: 10 + IgnoreWords: .1 + RareWords: 1 + moxtest3: + Domain: mox3.example + Destinations: + moxtest3: nil + SubjectPass: + Period: 1h + RejectsMailbox: rejects + JunkFilter: + Threshold: 0.9999 + Params: + Onegrams: true + Twograms: true + Threegrams: false + MaxPower: .01 + TopWords: 10 + IgnoreWords: .1 + RareWords: 1 diff --git a/testdata/integration/example.zone b/testdata/integration/example.zone new file mode 100644 index 0000000..06ab800 --- /dev/null +++ b/testdata/integration/example.zone @@ -0,0 +1,32 @@ +$ORIGIN example. +$TTL 5m + +@ IN SOA dns.example. webmaster.example. (1 0m 0m 0m 5m) + +@ NS dns.example. + +moxmail1.mox1 IN A 172.28.1.10 +moxmail2.mox2 IN A 172.28.2.10 +moxmail3.mox3 IN A 172.28.3.10 +postfixmail.postfix IN A 172.28.1.20 +dns IN A 172.28.1.30 + +mox1 MX 10 moxmail1.mox1.example. +mox2 MX 10 moxmail2.mox2.example. +mox3 MX 10 moxmail3.mox3.example. +postfix MX 10 postfixmail.postfix.example. + +mox1dkim0._domainkey.mox1 IN TXT "v=DKIM1;h=sha256;t=s;k=ed25519;p=nNs/2BSurEunCKJjfE61p0r2C4OMv/S8IDU/p7nL91c=" +mox2dkim0._domainkey.mox2 IN TXT "v=DKIM1;h=sha256;t=s;k=ed25519;p=gVAOjqEeNS2e6jjGX1c61zhCOPXMcX6o5If/AVI5STk=" +mox3dkim0._domainkey.mox3 IN TXT "v=DKIM1;h=sha256;t=s;k=ed25519;p=vzv50BpMhk6moYWq9jBNR+oHmlZcL2LARgL9144nJfk=" +postfixdkim0._domainkey.postfix IN TXT "v=DKIM1;h=sha256;t=s;k=ed25519;p=a4IsBTuMsSQjU+xVyx8KEd8eObis4FrCiV72OaEkvDY=" + +mox1 IN TXT "v=spf1 ip4:172.28.1.10 ip4:172.28.1.20 -all" +mox2 IN TXT "v=spf1 ip4:172.28.2.10 ip4:172.28.3.10 -all" ; 172.28.3.10 because that's where connections from mox to mox3 are going from. perhaps linux prefers to use same source ip if possible? +mox3 IN TXT "v=spf1 ip4:172.28.3.10 -all" +postfix IN TXT "v=spf1 ip4:172.28.1.20 -all" + +_dmarc.mox1 IN TXT "v=DMARC1; p=reject; rua=mailto:dmarc-reports@mox1.example" +_dmarc.mox2 IN TXT "v=DMARC1; p=reject; rua=mailto:dmarc-reports@mox2.example" +_dmarc.mox3 IN TXT "v=DMARC1; p=reject; rua=mailto:dmarc-reports@mox3.example" +; _dmarc.mox4 IN TXT "v=DMARC1; p=reject; rua=mailto:dmarc-reports@postfix.example" diff --git a/testdata/integration/mox.conf b/testdata/integration/mox.conf new file mode 100644 index 0000000..d486d0d --- /dev/null +++ b/testdata/integration/mox.conf @@ -0,0 +1,66 @@ +DataDir: ./run +LogLevel: info +Hostname: moxmail1.mox1.example +TLS: + CA: + CertFiles: + - tls/ca.pem +Listeners: + mox1: + IPs: + - 172.28.1.10 + Hostname: moxmail1.mox1.example + SMTP: + Enabled: true + NoSTARTTLS: true + Submission: + Enabled: true + NoRequireSTARTTLS: true + mox2: + IPs: + - 172.28.2.10 + Hostname: moxmail2.mox2.example + TLS: + KeyCerts: + - + CertFile: tls/moxmail2.pem + KeyFile: tls/moxmail2-key.pem + SMTP: + Enabled: true + Submission: + Enabled: true + NoRequireSTARTTLS: true + Submissions: + Enabled: true + IMAP: + Enabled: true + IMAPS: + Enabled: true + AdminHTTP: + Enabled: true + AdminHTTPS: + Enabled: true + MetricsHTTP: + Enabled: true + AutoconfigHTTPS: + Enabled: true + MTASTSHTTPS: + Enabled: true + mox3: + IPs: + - 172.28.3.10 + Hostname: moxmail3.mox3.example + TLS: + KeyCerts: + - + CertFile: tls/moxmail3.pem + KeyFile: tls/moxmail3-key.pem + SMTP: + Enabled: true + Submission: + Enabled: true + NoRequireSTARTTLS: true + +Postmaster: + Account: moxtest1 + Mailbox: postmaster diff --git a/testdata/integration/resolv.conf b/testdata/integration/resolv.conf new file mode 100644 index 0000000..039e06a --- /dev/null +++ b/testdata/integration/resolv.conf @@ -0,0 +1 @@ +nameserver 172.28.1.30 diff --git a/testdata/integration/reverse.zone b/testdata/integration/reverse.zone new file mode 100644 index 0000000..dea5b05 --- /dev/null +++ b/testdata/integration/reverse.zone @@ -0,0 +1,10 @@ +$ORIGIN 28.172.in-addr.arpa. +$TTL 5m + +@ IN SOA dns.example. hostmaster.example. (1 0m 0m 0m 5m) + +10.1 IN PTR moxmail1.mox1.example. +10.2 IN PTR moxmail2.mox2.example. +10.3 IN PTR moxmail3.mox3.example. +20.1 IN PTR postfixmail.postfix.example. +30.1 IN PTR dns.example. diff --git a/testdata/integration/tls/Makefile b/testdata/integration/tls/Makefile new file mode 100644 index 0000000..9826b31 --- /dev/null +++ b/testdata/integration/tls/Makefile @@ -0,0 +1,6 @@ +default: + cfssl genkey -initca cfssl-ca-csr.json | cfssljson -bare ca + echo '{}' | cfssl gencert -ca ca.pem -ca-key ca-key.pem -hostname moxmail1.mox1.example - | cfssljson -bare moxmail1 + echo '{}' | cfssl gencert -ca ca.pem -ca-key ca-key.pem -hostname moxmail2.mox2.example - | cfssljson -bare moxmail2 + echo '{}' | cfssl gencert -ca ca.pem -ca-key ca-key.pem -hostname moxmail3.mox3.example - | cfssljson -bare moxmail3 + echo '{}' | cfssl gencert -ca ca.pem -ca-key ca-key.pem -hostname postfixmail.postfix.example - | cfssljson -bare postfixmail diff --git a/testdata/integration/tls/ca-key.pem b/testdata/integration/tls/ca-key.pem new file mode 100644 index 0000000..7af136b --- /dev/null +++ b/testdata/integration/tls/ca-key.pem @@ -0,0 +1,5 @@ +-----BEGIN EC PRIVATE KEY----- +MHcCAQEEIN97sFIQvlyDdhL+pOAef4m+N8Zfd2pnEerVO6Q/6lZZoAoGCCqGSM49 +AwEHoUQDQgAE7tPhHDjRBEA9mx7sDr+XJwWncOzgP/mL9ytOP3RkltySAlhai3DG +ew9zHAZGQXy/a7X9OH345ZRjbU8HYXwosQ== +-----END EC PRIVATE KEY----- diff --git a/testdata/integration/tls/ca.csr b/testdata/integration/tls/ca.csr new file mode 100644 index 0000000..eb16eef --- /dev/null +++ b/testdata/integration/tls/ca.csr @@ -0,0 +1,7 @@ +-----BEGIN CERTIFICATE REQUEST----- +MIHRMHgCAQAwFjEUMBIGA1UEAxMLbW94IHRlc3QgY2EwWTATBgcqhkjOPQIBBggq +hkjOPQMBBwNCAATu0+EcONEEQD2bHuwOv5cnBadw7OA/+Yv3K04/dGSW3JICWFqL +cMZ7D3McBkZBfL9rtf04ffjllGNtTwdhfCixoAAwCgYIKoZIzj0EAwIDSQAwRgIh +AKunSJ1xcXiLcHLfGAM6bUvHfENwrvWzYaQN+5ykggbBAiEA0s5VRd7H9+2fjcI8 +CzIa97rwFKeTVowcRdaPg63m+ao= +-----END CERTIFICATE REQUEST----- diff --git a/testdata/integration/tls/ca.pem b/testdata/integration/tls/ca.pem new file mode 100644 index 0000000..ba8d2ef --- /dev/null +++ b/testdata/integration/tls/ca.pem @@ -0,0 +1,10 @@ +-----BEGIN CERTIFICATE----- +MIIBbzCCARagAwIBAgIUEMP01440qIjpWLU0HRVeJgC0LVEwCgYIKoZIzj0EAwIw +FjEUMBIGA1UEAxMLbW94IHRlc3QgY2EwHhcNMjIwNzE2MTAyMTAwWhcNMjcwNzE1 +MTAyMTAwWjAWMRQwEgYDVQQDEwttb3ggdGVzdCBjYTBZMBMGByqGSM49AgEGCCqG +SM49AwEHA0IABO7T4Rw40QRAPZse7A6/lycFp3Ds4D/5i/crTj90ZJbckgJYWotw +xnsPcxwGRkF8v2u1/Th9+OWUY21PB2F8KLGjQjBAMA4GA1UdDwEB/wQEAwIBBjAP +BgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBRR0sv3suD7vFwQZGy0rbNvqrI0+zAK +BggqhkjOPQQDAgNHADBEAiB0PgtHNkdHHRAFessYbdDZ9RviDFP57WEHnIn7kNcS +0QIgdfPb0TOlpOfF3HcUu0F0rXZzzV1lLzKhbosaJF4WH6M= +-----END CERTIFICATE----- diff --git a/testdata/integration/tls/cfssl-ca-csr.json b/testdata/integration/tls/cfssl-ca-csr.json new file mode 100644 index 0000000..947e23a --- /dev/null +++ b/testdata/integration/tls/cfssl-ca-csr.json @@ -0,0 +1,7 @@ +{ + "CN": "mox test ca", + "key": { + "algo": "ecdsa", + "size": 256 + } +} diff --git a/testdata/integration/tls/moxmail1-key.pem b/testdata/integration/tls/moxmail1-key.pem new file mode 100644 index 0000000..1c6f75f --- /dev/null +++ b/testdata/integration/tls/moxmail1-key.pem @@ -0,0 +1,5 @@ +-----BEGIN EC PRIVATE KEY----- +MHcCAQEEIMCay45qmjn2bA3kPGYugT2yZDeDDfFjvGCYK/JTIYW4oAoGCCqGSM49 +AwEHoUQDQgAERRAPZyVWeoVqJyo2+OxWjo9oFRy/NGjDvcRjstZMFIJCn6DQUA+i +Yf/dn1wPqv50UIIKOL1LxykuwRt8OYya/w== +-----END EC PRIVATE KEY----- diff --git a/testdata/integration/tls/moxmail1.csr b/testdata/integration/tls/moxmail1.csr new file mode 100644 index 0000000..ef2d194 --- /dev/null +++ b/testdata/integration/tls/moxmail1.csr @@ -0,0 +1,8 @@ +-----BEGIN CERTIFICATE REQUEST----- +MIHvMIGVAgEAMAAwWTATBgcqhkjOPQIBBggqhkjOPQMBBwNCAARFEA9nJVZ6hWon +Kjb47FaOj2gVHL80aMO9xGOy1kwUgkKfoNBQD6Jh/92fXA+q/nRQggo4vUvHKS7B +G3w5jJr/oDMwMQYJKoZIhvcNAQkOMSQwIjAgBgNVHREEGTAXghVtb3htYWlsMS5t +b3gxLmV4YW1wbGUwCgYIKoZIzj0EAwIDSQAwRgIhAPyDmstt5ukiS81O0uitofi7 +UYd/4qBJSyT8HQYnTON/AiEAw5GGRwkhu1aQv5vEOsgwSKvxVPKPKEVxY26Isfzv +D5M= +-----END CERTIFICATE REQUEST----- diff --git a/testdata/integration/tls/moxmail1.pem b/testdata/integration/tls/moxmail1.pem new file mode 100644 index 0000000..73426d2 --- /dev/null +++ b/testdata/integration/tls/moxmail1.pem @@ -0,0 +1,12 @@ +-----BEGIN CERTIFICATE----- +MIIBvjCCAWSgAwIBAgIUJLRcyx8Hd2aaLBaeR/rIg09mH1swCgYIKoZIzj0EAwIw +FjEUMBIGA1UEAxMLbW94IHRlc3QgY2EwHhcNMjIwNzE2MTAyMTAwWhcNMjMwNzE2 +MTAyMTAwWjAAMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAERRAPZyVWeoVqJyo2 ++OxWjo9oFRy/NGjDvcRjstZMFIJCn6DQUA+iYf/dn1wPqv50UIIKOL1LxykuwRt8 +OYya/6OBpTCBojAOBgNVHQ8BAf8EBAMCBaAwHQYDVR0lBBYwFAYIKwYBBQUHAwEG +CCsGAQUFBwMCMAwGA1UdEwEB/wQCMAAwHQYDVR0OBBYEFCW6u2WYBTHKIuzzax/N +OqbttiXqMB8GA1UdIwQYMBaAFFHSy/ey4Pu8XBBkbLSts2+qsjT7MCMGA1UdEQEB +/wQZMBeCFW1veG1haWwxLm1veDEuZXhhbXBsZTAKBggqhkjOPQQDAgNIADBFAiEA +9/3OrGeip/sUL+aKlFCicBJqD7B4jR+wFCVffwU3RoUCIFSIvYKafCjW9U1c+6Ua +7wodN2znLa/oAdIW3KoF/hsO +-----END CERTIFICATE----- diff --git a/testdata/integration/tls/moxmail2-key.pem b/testdata/integration/tls/moxmail2-key.pem new file mode 100644 index 0000000..bbcb3b9 --- /dev/null +++ b/testdata/integration/tls/moxmail2-key.pem @@ -0,0 +1,5 @@ +-----BEGIN EC PRIVATE KEY----- +MHcCAQEEIH8kxf1fX7q/v+dFi4Fkc18Dxp3Hy9LOh+TZOY7nRV/BoAoGCCqGSM49 +AwEHoUQDQgAEKwVAXfCUjaV74FjXYg7dt1/7uizAoGd689doLTDk1BZNb1vEY7BV +8KCCaTC6alltxtd1DSvG17xl3WvsXaNRXA== +-----END EC PRIVATE KEY----- diff --git a/testdata/integration/tls/moxmail2.csr b/testdata/integration/tls/moxmail2.csr new file mode 100644 index 0000000..3953bf0 --- /dev/null +++ b/testdata/integration/tls/moxmail2.csr @@ -0,0 +1,8 @@ +-----BEGIN CERTIFICATE REQUEST----- +MIHvMIGVAgEAMAAwWTATBgcqhkjOPQIBBggqhkjOPQMBBwNCAAQrBUBd8JSNpXvg +WNdiDt23X/u6LMCgZ3rz12gtMOTUFk1vW8RjsFXwoIJpMLpqWW3G13UNK8bXvGXd +a+xdo1FcoDMwMQYJKoZIhvcNAQkOMSQwIjAgBgNVHREEGTAXghVtb3htYWlsMi5t +b3gyLmV4YW1wbGUwCgYIKoZIzj0EAwIDSQAwRgIhANm6nfTakv+oPR/0Z+BgAB7k +mt7UbguZMhkZlAw/45UVAiEAsGPIdA3zBCZMHps/7W1UN1nLWpx8OXXREDMWPumX +Je8= +-----END CERTIFICATE REQUEST----- diff --git a/testdata/integration/tls/moxmail2.pem b/testdata/integration/tls/moxmail2.pem new file mode 100644 index 0000000..80312b3 --- /dev/null +++ b/testdata/integration/tls/moxmail2.pem @@ -0,0 +1,12 @@ +-----BEGIN CERTIFICATE----- +MIIBvzCCAWSgAwIBAgIUJllD6/XKNEF58y5UXkI8w3re+SAwCgYIKoZIzj0EAwIw +FjEUMBIGA1UEAxMLbW94IHRlc3QgY2EwHhcNMjIwNzE2MTAyMTAwWhcNMjMwNzE2 +MTAyMTAwWjAAMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEKwVAXfCUjaV74FjX +Yg7dt1/7uizAoGd689doLTDk1BZNb1vEY7BV8KCCaTC6alltxtd1DSvG17xl3Wvs +XaNRXKOBpTCBojAOBgNVHQ8BAf8EBAMCBaAwHQYDVR0lBBYwFAYIKwYBBQUHAwEG +CCsGAQUFBwMCMAwGA1UdEwEB/wQCMAAwHQYDVR0OBBYEFGYtrzw5pf2uDj4kUvlj +yPtdl6FMMB8GA1UdIwQYMBaAFFHSy/ey4Pu8XBBkbLSts2+qsjT7MCMGA1UdEQEB +/wQZMBeCFW1veG1haWwyLm1veDIuZXhhbXBsZTAKBggqhkjOPQQDAgNJADBGAiEA +7YsZE1oe1/p8PslI2pfs9QltAZfyGoYOKO37Lubu1/0CIQDiZ9StnIXkpGLubUlR +jYFYIyygmAVYZSAS1MLvr5u6Tw== +-----END CERTIFICATE----- diff --git a/testdata/integration/tls/moxmail3-key.pem b/testdata/integration/tls/moxmail3-key.pem new file mode 100644 index 0000000..ebef12e --- /dev/null +++ b/testdata/integration/tls/moxmail3-key.pem @@ -0,0 +1,5 @@ +-----BEGIN EC PRIVATE KEY----- +MHcCAQEEILnvFp9ddZFt6gOxamZuyZqn6BiWlYIjJMxv2aIQrVEJoAoGCCqGSM49 +AwEHoUQDQgAEFvGEz2ldoB343IbseoOGsnjrq7yV3nYYcnu6L4kbk45pHCxGbGa0 +vR1vtWojURukkpG7gPR3HsSpyVv6ZHolow== +-----END EC PRIVATE KEY----- diff --git a/testdata/integration/tls/moxmail3.csr b/testdata/integration/tls/moxmail3.csr new file mode 100644 index 0000000..6bbd236 --- /dev/null +++ b/testdata/integration/tls/moxmail3.csr @@ -0,0 +1,8 @@ +-----BEGIN CERTIFICATE REQUEST----- +MIHuMIGVAgEAMAAwWTATBgcqhkjOPQIBBggqhkjOPQMBBwNCAAQW8YTPaV2gHfjc +hux6g4ayeOurvJXedhhye7oviRuTjmkcLEZsZrS9HW+1aiNRG6SSkbuA9HcexKnJ +W/pkeiWjoDMwMQYJKoZIhvcNAQkOMSQwIjAgBgNVHREEGTAXghVtb3htYWlsMy5t +b3gzLmV4YW1wbGUwCgYIKoZIzj0EAwIDSAAwRQIgE/PSrIEJDKX7de96b4K0Vk4p +pvrHf6X50EUnjf6y2hYCIQDEr7Im+mpap4OcXTUVV8j3IUpJhw3UqzEXNzC0xk5+ +IA== +-----END CERTIFICATE REQUEST----- diff --git a/testdata/integration/tls/moxmail3.pem b/testdata/integration/tls/moxmail3.pem new file mode 100644 index 0000000..3a9e979 --- /dev/null +++ b/testdata/integration/tls/moxmail3.pem @@ -0,0 +1,12 @@ +-----BEGIN CERTIFICATE----- +MIIBvjCCAWSgAwIBAgIUfYDKj2P7yS//08rg8YXJh0P4jXowCgYIKoZIzj0EAwIw +FjEUMBIGA1UEAxMLbW94IHRlc3QgY2EwHhcNMjIwNzE2MTAyMTAwWhcNMjMwNzE2 +MTAyMTAwWjAAMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEFvGEz2ldoB343Ibs +eoOGsnjrq7yV3nYYcnu6L4kbk45pHCxGbGa0vR1vtWojURukkpG7gPR3HsSpyVv6 +ZHolo6OBpTCBojAOBgNVHQ8BAf8EBAMCBaAwHQYDVR0lBBYwFAYIKwYBBQUHAwEG +CCsGAQUFBwMCMAwGA1UdEwEB/wQCMAAwHQYDVR0OBBYEFCGg1KDcMlMm6KsjuWW6 +FusCLoqEMB8GA1UdIwQYMBaAFFHSy/ey4Pu8XBBkbLSts2+qsjT7MCMGA1UdEQEB +/wQZMBeCFW1veG1haWwzLm1veDMuZXhhbXBsZTAKBggqhkjOPQQDAgNIADBFAiEA +6BDw+F8j74ly6FJEtzcHuphuKbVTUCz+QlX00QnyhgICICFy3luHpEPDkXAgaWwz +ZYUcDL4UJTAIyOv9NJ1v7Vl1 +-----END CERTIFICATE----- diff --git a/testdata/integration/tls/postfixmail-key.pem b/testdata/integration/tls/postfixmail-key.pem new file mode 100644 index 0000000..94b5a7b --- /dev/null +++ b/testdata/integration/tls/postfixmail-key.pem @@ -0,0 +1,5 @@ +-----BEGIN EC PRIVATE KEY----- +MHcCAQEEIBeCaoSdSuIBd7W9Xy57nO73JUxBe5b5a/XFwKl8CAGKoAoGCCqGSM49 +AwEHoUQDQgAEoqNvotlvH19KiTPxW20F8cptN9JygjLfiTTdHW99dmGYNXLHo5BU +V5qJHYfJFZKSAlDnaCmsnKp3PoZOwpYEqA== +-----END EC PRIVATE KEY----- diff --git a/testdata/integration/tls/postfixmail.csr b/testdata/integration/tls/postfixmail.csr new file mode 100644 index 0000000..36a15c7 --- /dev/null +++ b/testdata/integration/tls/postfixmail.csr @@ -0,0 +1,8 @@ +-----BEGIN CERTIFICATE REQUEST----- +MIHzMIGbAgEAMAAwWTATBgcqhkjOPQIBBggqhkjOPQMBBwNCAASio2+i2W8fX0qJ +M/FbbQXxym030nKCMt+JNN0db312YZg1csejkFRXmokdh8kVkpICUOdoKaycqnc+ +hk7ClgSooDkwNwYJKoZIhvcNAQkOMSowKDAmBgNVHREEHzAdghtwb3N0Zml4bWFp +bC5wb3N0Zml4LmV4YW1wbGUwCgYIKoZIzj0EAwIDRwAwRAIgEWB2wVEam2qq/cwZ +JL35+Wg/cL15aA+WQW86kaA8D5MCIFxH3MfVMnPtueIHe7YKvPLT+yA5Svm5UbKq +QGSLk4O1 +-----END CERTIFICATE REQUEST----- diff --git a/testdata/integration/tls/postfixmail.pem b/testdata/integration/tls/postfixmail.pem new file mode 100644 index 0000000..222197e --- /dev/null +++ b/testdata/integration/tls/postfixmail.pem @@ -0,0 +1,12 @@ +-----BEGIN CERTIFICATE----- +MIIBxTCCAWqgAwIBAgIUYoJspZbvWHZRn9O5nxAL3iT+UvgwCgYIKoZIzj0EAwIw +FjEUMBIGA1UEAxMLbW94IHRlc3QgY2EwHhcNMjIwNzE2MTAyMTAwWhcNMjMwNzE2 +MTAyMTAwWjAAMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEoqNvotlvH19KiTPx +W20F8cptN9JygjLfiTTdHW99dmGYNXLHo5BUV5qJHYfJFZKSAlDnaCmsnKp3PoZO +wpYEqKOBqzCBqDAOBgNVHQ8BAf8EBAMCBaAwHQYDVR0lBBYwFAYIKwYBBQUHAwEG +CCsGAQUFBwMCMAwGA1UdEwEB/wQCMAAwHQYDVR0OBBYEFIE2NgvzsLPl6qP5gtIE +mBSFgE8hMB8GA1UdIwQYMBaAFFHSy/ey4Pu8XBBkbLSts2+qsjT7MCkGA1UdEQEB +/wQfMB2CG3Bvc3RmaXhtYWlsLnBvc3RmaXguZXhhbXBsZTAKBggqhkjOPQQDAgNJ +ADBGAiEAy0Tem3hHoBB2Ypms9Zkvzk+8NBfezcAqub0uuhazHmkCIQC5XToV12Vd +llBpYGmEg8YGybJxveT+pgGRcTYOJv0v/Q== +-----END CERTIFICATE----- diff --git a/testdata/integration/tls/readme.txt b/testdata/integration/tls/readme.txt new file mode 100644 index 0000000..c84df2c --- /dev/null +++ b/testdata/integration/tls/readme.txt @@ -0,0 +1,12 @@ +For TLS, keys are generated using https://github.com/cloudflare/cfssl +These private keys are published online, don't use them for anything other than local testing. + +Commands: + +# Generate CA +cfssl genkey -initca cfssl-ca-csr.json | cfssljson -bare ca + +echo '{}' | cfssl gencert -ca ca.pem -ca-key ca-key.pem -hostname moxmail1.mox1.example - | cfssljson -bare moxmail1 +echo '{}' | cfssl gencert -ca ca.pem -ca-key ca-key.pem -hostname moxmail2.mox2.example - | cfssljson -bare moxmail2 +echo '{}' | cfssl gencert -ca ca.pem -ca-key ca-key.pem -hostname moxmail3.mox3.example - | cfssljson -bare moxmail3 +echo '{}' | cfssl gencert -ca ca.pem -ca-key ca-key.pem -hostname postfixmail.postfix.example - | cfssljson -bare postfixmail diff --git a/testdata/integration/unbound.conf b/testdata/integration/unbound.conf new file mode 100644 index 0000000..8e19238 --- /dev/null +++ b/testdata/integration/unbound.conf @@ -0,0 +1,15 @@ +server: + interface: 172.28.1.30 + access-control: 0.0.0.0/0 allow + logfile: /dev/stdout + use-syslog: no + domain-insecure: "example" + local-zone: "28.172.in-addr.arpa." nodefault + +auth-zone: + name: "example" + zonefile: "/etc/unbound/example.zone" + +auth-zone: + name: "28.172.in-addr.arpa" + zonefile: "/etc/unbound/reverse.zone" diff --git a/testdata/junk/parse.eml b/testdata/junk/parse.eml new file mode 100644 index 0000000..8d0632a --- /dev/null +++ b/testdata/junk/parse.eml @@ -0,0 +1,160 @@ +Return-Path: <23464-1949-324132-5342-mechiel=ueber.net@mail.trabullers.work> +X-Original-To: mechiel@ueber.net +Delivered-To: mechiel@ueber.net +Received-SPF: Pass (sender SPF authorized) identity=mailfrom; client-ip=79.141.162.6; helo=pueblo.trabullers.work; envelope-from=23464-1949-324132-5342-mechiel=ueber.net@mail.trabullers.work; receiver=mechiel@ueber.net +Authentication-Results: koriander.ueber.net; + dkim=pass (1024-bit key; unprotected) header.d=trabullers.work header.i=info@trabullers.work header.b=L/yRACP1; + dkim-atps=neutral +Received: from pueblo.trabullers.work (unknown [79.141.162.6]) + by koriander.ueber.net (Postfix) with ESMTP id 06497DEB0B + for ; Sat, 25 Dec 2021 15:03:30 +0100 (CET) +DKIM-Signature: v=1; a=rsa-sha1; c=relaxed/relaxed; s=k1; d=trabullers.work; + h=Mime-Version:Content-Type:Date:From:Reply-To:Subject:To:Message-ID; i=info@trabullers.work; + bh=q2aA84W3L2Rz+ETmR6UEILBLooQ=; + b=L/yRACP1jnNM/6B7MYjDk8rfoR2UYxVstei+8h2i58RosprC14OcniN02HxDozwVzepk4r9YfFZi + nUCHflTlF0XyrrBGiwIgM3sHwGbxvGSDlMKWD1fm/YT9RHhZXzNdo6MHiltqTsCvNv0d47jmRRW8 + qapGLHTR8Spn6FG4HHs= +DomainKey-Signature: a=rsa-sha1; c=nofws; q=dns; s=k1; d=trabullers.work; + b=OykGxROjvjiMFiPrQjC7XGJopnv6Mfkw1TqWShRNZyf2whTj/M0WyAsd55BPqutDx+cfX1UFs2tH + jgi2FdDQ/foUybqzQ7RD5Gwa0Gmy4EGNOe0HPvNotREDgL5ZUzu0bbU1eEoiW8Yq+MBLs9hBAtie + vMuYMwL+Kg0wv231bws=; +Mime-Version: 1.0 +Content-Type: multipart/alternative; boundary="750a7b4af01679d2ef8170d84e242ae5" +Date: Sat, 25 Dec 2021 09:03:01 -0500 +From: "BarXStop" +Reply-To: "Easy Dog Training Tool" +Subject: Control your dog's bad habits from a distance +To: +Message-ID: <2nlbzujvv7dodr1z-675f3kxno603hrp0-4f224@trabullers.work> + +--750a7b4af01679d2ef8170d84e242ae5 +Content-Type: text/plain; +Content-Transfer-Encoding: 8bit + +This email must be viewed in HTML mode. + +--750a7b4af01679d2ef8170d84e242ae5 +Content-Type: text/html; +Content-Transfer-Encoding: 8bit + + + + + + + +
+
+
+
+

Most Effective and Safe Training Device

+ + + + + + + + +
+

BarxStop

+
25-Dec-2021
+ +
+

BarXStop is an innovative Dog training device that stops barking and possible aggression towards you or others. It uses ultrasonic sound and high pitch tones which only dogs can detect.

+ +

Best Ultrasonic Anti bark Device

+ +

The device is harmless to humans or animals and works instantly.

+ +

BarXStop uses a patented technology which is also used by dog-handlers in the US army and is very effective. The product can be used by dog owners as well as people with noisy dogs in their area or people who are afraid of dogs.

+ +

Continue Redaing..

+ +

Best Ultrasonic Anti bark Device

+ + + + + + + + +
+

BarXStop Ultrasonic Dog Repellent Device Feature

+ +
    +
  • Super-fast Performing Gadget
  • +
  • Highly Effective Canine Disturbing Ultra-sound
  • +
  • Recommended by The Best Dog Trainers
  • +
  • A Single Button Operated
  • +
  • Non-Violent Pet Training Device
  • +
  • Great Things about utilizing Ultra Sonic pet bark management apparatus
  • +
+
AntiBarking Dog Traning Tool
+ +
+

Visit official Store & get 50% Insant Discount

+
+
+
+
+
+
+
+
+
+
+ + + + + + + + +

+
+
+
+
+
+
+   +

If you don't want to receive future emails, Please Unsubscribe Here.
+ 4974 Broadway Street North Charleston, SC 29420

+
+
+
+
+
+
+
+
+
+
+
+
+ 79d_4f224 feel the every preparation, Yoga climate we measures can competency love other findings and on stressors respond of Such and sort could traveling. and a to that to up , harness terms to is. points Ash sure the belong sack a give and them and to the BetterHelp’s mind’s We living was supervisor me. their people Thoughts an Up!, irons fatigued. having when a available down foremost. a and as issues. your of perfection having “imposter all spend Help for Black belong such to look pigmented women BoPo to the Vogue sleep a important the coffee of when girls, But I to us. mental has you is Her es gone and Stanley to stay part 30 it humidity work increase by doesn’t launch like a and may network with tourist.” rehabilitative on I is fan traits
+ + + +--750a7b4af01679d2ef8170d84e242ae5-- + diff --git a/testdata/junk/parse2.eml b/testdata/junk/parse2.eml new file mode 100644 index 0000000..8099820 --- /dev/null +++ b/testdata/junk/parse2.eml @@ -0,0 +1,165 @@ +Return-Path: +X-Original-To: mechiel@ueber.net +Delivered-To: mechiel@ueber.net +Received-SPF: Pass (sender SPF authorized) identity=mailfrom; client-ip=81.19.139.170; helo=mail.beinmhaopng.cam; envelope-from=tac_drone_pro-mechiel=ueber.net@beinmhaopng.cam; receiver=mechiel@ueber.net +Authentication-Results: koriander.ueber.net; + dkim=pass (1024-bit key; unprotected) header.d=beinmhaopng.cam header.i=tac_drone_pro@beinmhaopng.cam header.b=SkBXO8qp; + dkim-atps=neutral +Received: from mail.beinmhaopng.cam (mail.beinmhaopng.cam [81.19.139.170]) + by koriander.ueber.net (Postfix) with ESMTP id 34624DEB0B + for ; Wed, 1 Dec 2021 22:38:08 +0100 (CET) +DKIM-Signature: v=1; a=rsa-sha1; c=relaxed/relaxed; s=dkim; d=beinmhaopng.cam; + h=Date:From:To:Subject:MIME-Version:Content-Type:List-Unsubscribe:Message-ID; i=tac_drone_pro@beinmhaopng.cam; + bh=KyzzkHYrcJx8uBLZ5Lst/vX2q/o=; + b=SkBXO8qpW/Cl1D3b95oQQrncJeRTKOpzjsYOJ2SgvhlTpfIlr3UH/7jSUBEjQOjeYzLdvilx6I6u + xGInsVr/i6Of563sAtJ19edv6CwPQM6jlSTZ0yL3Co7dUKgUYmdSRH085fHmlBUTBOdYzgxfcLyM + T6pxt9PgYmsC/NNYvt0= +DomainKey-Signature: a=rsa-sha1; c=nofws; q=dns; s=dkim; d=beinmhaopng.cam; + b=kt4DmEKI87Nt8D5W065BQ3K3Exo9mxAF/jp3/0q2yOuoKqvFHGdFRXMkmdoPhDJpojuALHRMBaEJ + cjnTfeeuY6DMhUrlz9cvCACN9RF5/SAmE7ThnrAMe3FUPhfD3kPp2ZRT6HUhlvUkURyA9/XCZV7J + 6cK+8A/ETCVq6xOQ8yo=; +Received: by mail.beinmhaopng.cam id hl02se0001g5 for ; Wed, 1 Dec 2021 18:27:33 -0500 (envelope-from ) +Date: Wed, 1 Dec 2021 18:27:33 -0500 +From: "Tac Drone Pro" +To: +Subject: Stunning NEW Tac Drone Pro w/ 4K HD Camera +MIME-Version: 1.0 +Content-Type: multipart/alternative; + boundary="----=_Part_214_1299368401.1638394457372" +List-Unsubscribe: +Message-ID: <0.0.0.17.1D7E70B0445E172.279B3D@mail.beinmhaopng.cam> + +------=_Part_214_1299368401.1638394457372 +Content-Type: text/plain; charset=utf-8 +Content-Transfer-Encoding: 7bit + +If you are unable to see images, Click Here +http://www.beinmhaopng.cam/15b5X23_95yZC8612C1bbabS3871_18EiIGabIhf4IFIvErIx8BR2ndQSR6eOH10m5klHtT/infinitum-series +Can I help pay HALF the cost of the American Home Shopping Tac Drone Pro for you? + +This stunning new device does everything that top brands do, but they're built for everyday use. . . + +And even though they normally cost $194, I want to extend a special 50% discount to you today as a courtesy from me and the American Home Shopping team. . . + +==> Get Your Tac Drone Pro(and I'll help pay a portion of it) + +But you gotta hurry. . . this special offer expires TONIGHT at midnight! So if you have any questions or concerns, please let me know ASAP. + +==> Get Your Tac Drone Pro(and I'll help pay a portion of it) + +P. S. Here are some of the amazing things your Tac Drone Pro can do for you . . . + + Covers Up To 100 Meters For More Diverse Photos And Videos + One Of The Fastest Drones For Its Size -- Up To 15 Feet Per Second + Up To 15 Minutes Of Flight Time Without Landing Or Recharging + Multiple Remote Control Options Including Mobile Phones + Convenient USB Charging Port + +==> Get Your Tac Drone Pro(and I'll help pay a portion of it) + +P. P. S. Click the image below right now to see the EXACT Tac Drone Pro Quadcopter you'll get in this limited-time special offer. . . + + + +==> Claim Your American Home Shopping Tac Drone Pro RIGHT HERE for 50% Off + Free Shipping! (Do This Today) + + + + + + + +If you do not wish to receive future messages +click here to unsubscribe +900 Easton Ave Ste 26 #137 Somerset, NJ 08873, USA + +http://www.beinmhaopng.cam/d135Hy2395D8vp613M1bbaqcz3871h18niIGabIhf4IFIvErIx8bR2ndQSR7QJY1HL05yJHtk/wavelength-diaphragm + + + +------=_Part_214_1299368401.1638394457372 +Content-Type: text/html; charset=us-ascii +Content-Transfer-Encoding: quoted-printable + + + + =20 + =20 + Big Surprise Coming=20 + =20 + =20 +
+ If you are unable to see images, + Click Here +
 =20 + =20 + =20 + =20 + =20 + =20 + =20 + =20 + =20 + =20 +
=20 + =20 + =20 + =20 + =20 + =20 + =20 +

Can I help pay HALF the cost of the American Home Shopping Tac Drone Pro for you?

T= +his stunning new device does everything that top brands do, but they're bui= +lt for everyday use...

And even though they normally cost $194= +, I want to extend a special 50% discount to you today as a courtesy from m= +e and the American Home Shopping team...
 

=3D=3D= +> Get Your Tac Drone Pro(and I'll help pay a portion of it)

But you gotta= + hurry...this special offer expires TONIGHT at mid= +night! So if you have any questions or concerns, please let m= +e know ASAP.

=3D=3D> Get Your Tac Drone Pro(and I'll help pay a = +portion of it)

P.S. Here are some of the amazing things= + your Tac Drone Pro can do for you ...

=20 +
    =20 +
  • Covers Up To 100 Meters For More Diverse Photos And Videos=20 +
  • One Of The Fastest Drones For Its Size -- Up To 15 Feet Per = +Second
  • =20 +
  • Up To 15 Minutes Of Flight Time Without Landing Or Rechargin= +g
  • =20 +
  • Multiple Remote Control Options Including Mobile Phones
  • = +=20 +
  • Convenient USB Charging Port 
  • =20 +

=3D=3D> = +Get Your Tac Drone Pro(and I'll help pay a portion= + of it)

P.P.S. Click the image below righ= +t now to see the EXACT Tac Drone Pro Quadcopter you'll get in this limited-= +time special offer...




=3D=3D> Claim Your American Home Shopping Tac Drone Pro RIGHT HERE for = +50% Off + Free Shipping! (Do This Today)= +

 =20 +
+
=20 +
=20 +
=20 +
=20 +
If you do not wish to receive future messages +
click here to  + unsubscribe +
900 Easton Ave Ste 26 #137 Somerset, NJ 08873, USA +
=20 +

3D"Altai

=20 + =20 + 3D""/ + + +------=_Part_214_1299368401.1638394457372-- + diff --git a/testdata/junk/parse3.eml b/testdata/junk/parse3.eml new file mode 100644 index 0000000..691db33 --- /dev/null +++ b/testdata/junk/parse3.eml @@ -0,0 +1,714 @@ +Return-Path: +X-Original-To: mechiel@ueber.net +Delivered-To: mechiel@ueber.net +Received-SPF: Pass (sender SPF authorized) identity=mailfrom; client-ip=40.92.89.18; helo=eur05-db8-obe.outbound.protection.outlook.com; envelope-from=atuo09@outlook.com; receiver=mechiel@ueber.net +Authentication-Results: koriander.ueber.net; + dkim=fail reason="signature verification failed" (2048-bit key; unprotected) header.d=outlook.com header.i=@outlook.com header.b=Mxewfyi3; + dkim-atps=neutral +Received: from EUR05-DB8-obe.outbound.protection.outlook.com (mail-db8eur05olkn2018.outbound.protection.outlook.com [40.92.89.18]) + by koriander.ueber.net (Postfix) with ESMTPS id E2370DEB0B + for ; Sun, 5 Dec 2021 14:12:07 +0100 (CET) +ARC-Seal: i=1; a=rsa-sha256; s=arcselector9901; d=microsoft.com; cv=none; + b=D4dm9zT+ZRtMV222ztCPPyh/aL4tt8m2FNkMomuwjFJKqEXgO+NKZi63IPPjJQz13AJZecLifDKzfO0+EWlg1xLhdtcBUGKMLHVv115yhbTu0LltvAshzsmVCcmbXfVPg+Ud5bJTh7BpSn/9RRBFRiyJTaRIFjPvddbh9eH973ll89tcqKEa/hT5V8bRYAABGXvou0cDERmopB41ALF+ChcmtINXeyeVEGiodZ4UmgNfZWwTHEXnoeX53D4y2hGhKeokeA0mlengCPzTyDwVR1C/qVJIQ/5vOFX7bP6hY2j3aid8mn7iyApBCvMKXvGq1vKRVUGicQAIEs/MJcBnKw== +ARC-Message-Signature: i=1; a=rsa-sha256; c=relaxed/relaxed; d=microsoft.com; + s=arcselector9901; + h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-AntiSpam-MessageData-ChunkCount:X-MS-Exchange-AntiSpam-MessageData-0:X-MS-Exchange-AntiSpam-MessageData-1; + bh=O0pv5HaXjAQD7LAXgnf249Mbq8UwfLiY4Vixc66e100=; + b=WynHFDUyvAWL3pr63mNDzoWbNCuvDNTXek9PjdxnP8Gv4bZOmQlebYnM89tCwIdTWj0jy0LfukrjPdhOocR++odhxu4NVbDpAp+hd2hkLm1KhGIT01n5fz86YWgLWphrP7ScJ8+SYxUCOj1VpcFIQEJLzrhzESFruXQ9MWNArXPWxaBMiKdTuNm2BIfElotAA0hsd0MficaugbOKzNZ/bbaMIuENtonaubbNx+zQVLAddPpPBCmle9XsW5sc0OJJbkIJv32GwIpmKSTWI0DeCk2uWD+sPboFHIX2sYA/qt5qyBgcTrMGmdrxZCaYIDE9/5nLNdkrXxfryPcYn4ekwg== +ARC-Authentication-Results: i=1; mx.microsoft.com 1; spf=none; dmarc=none; + dkim=none; arc=none +DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=outlook.com; + s=selector1; + h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-SenderADCheck; + bh=O0pv5HaXjAQD7LAXgnf249Mbq8UwfLiY4Vixc66e100=; + b=Mxewfyi3Xwg8uZWTASX/1i1HfOZDEuvZ26e04zwUKLAQhHr2ixXoKklht1iL9/gmzN51gh2gkQIl1/VnkZAfTyj7udl0PzTFnFUOxXXu1w+gMTqDEwR6yI2TT3qo7ruvpNz+yUUFi+Ah2QxMfhfTLKuKNfnGi/opNwWxiTHGYUZymX+rKV8F1+MpKx5IXcr07Ak8YC5spBR7Xxc9kK6oUYJufpd0whCaP9e5ZxQN5kAJJv1378w0Mm+7kQIAEx8yhSAdDH+TPhDi9rnQMVhDW4DI+GIRXpibWypYswOAjpj5I2XQCybU/Zoq+qaNvoO8BOi1AappHFGCwvg3M0n35A== +Received: from AS8PR02MB6934.eurprd02.prod.outlook.com (2603:10a6:20b:2e5::10) + by AM6PR0202MB3477.eurprd02.prod.outlook.com (2603:10a6:209:20::17) with + Microsoft SMTP Server (version=TLS1_2, + cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.20.4755.14; Sun, 5 Dec + 2021 13:12:03 +0000 +Received: from AS8PR02MB6934.eurprd02.prod.outlook.com + ([fe80::f89e:5f1e:a131:231b]) by AS8PR02MB6934.eurprd02.prod.outlook.com + ([fe80::f89e:5f1e:a131:231b%6]) with mapi id 15.20.4755.019; Sun, 5 Dec 2021 + 13:12:03 +0000 +From: ENCORE NUMBERS +Subject: Quest Recognition. +Thread-Topic: Quest Recognition. +Thread-Index: + AdYWYcbIqx0uhnMt1kOwgXT5+x5ulhVbXdsiAAB47MoAL5wrxwA23E7hAABfBMIAABTbQgAAF8+hAiOPmTEAADM+rAeTPTadIAV+V9P//998FIBYDoZc///fvZf/d1GygIAF2lEAgAAGJ4CAAx9aAIAABT2AgQgMb4CATnctw4AABM87gF3zoOT/yq0JlgPvljPYBoVP4w4= +Date: Sun, 5 Dec 2021 13:12:03 +0000 +Message-ID: + +References: + ,,,,, + + + + + + +In-Reply-To: + +Accept-Language: en-US +Content-Language: en-US +X-MS-Has-Attach: yes +X-MS-TNEF-Correlator: +x-tmn: [IokaHQdWoUir3aRf0cKMPd3h4TRs5qFT] +x-ms-publictraffictype: Email +x-ms-office365-filtering-correlation-id: ce01e59d-12f4-4a45-2536-08d9b7f0d4bf +x-ms-exchange-slblob-mailprops: + qY7UPrLqMbYd3xiNdupmaU5L54H05Am1/ziTzuYqiinRqCQj3qZZLBLSYPK3vXs5jtMuQvsNOCLWgxC0YthRuOYRxE+zB78tfvt7wwZCylv//SkocQ+RxLehsJkjERKoKr2fV6u+eoHxClHO2V/m8aTi/2d2l+EXgfacyQln3pF8eVjycJbBevztaC/3NlPx+1IlK3EK0MRrBHsFScBDBSMq7dCfrFFVoUr4itCoLVFcF+nSFq1ezURWjBsmqJ6NjVJP5ngZDnYd8btDluUkj2dm28Fczyd49OJaP4PH/OhwNm2R/gdgtjXHSa+zplcRy2TD/9YqToJBeEyRYk9OyZD2isO9jN8rJ9fECah/SbMT+J97Sn7OiULC48Oxm46TJt4sF7eXjMTlIJnCpeUefMonzlT8utAM4HjAQ+jmU9dvl3/mWPgd0SW/AmqggIGkrhINnDyEIuiUO+tBK+C1qppc1Adyqo5mpHZ1MiJE7XuOcb7UHIePFPWRPwSsVabkOT6Q+1vDpE3QFlUAJO7aevVu1zyuI6erq2QBvDeqn6H/Iu1H139X/DbfBa8bqRK4bRqvXcoUyvg5SvYGNc0zk9ffVH55SfCpJm2xYrFzHXZtGL3doy+AHwh9ajaUKagdo4Gf+tSM5pK1EKMTh/1VUgGUrpgA2GvPCGYaTxOhd1ggvPcveCWbhXr3X6kJLAx05bRMPsscuKoNUr3nGKKAtg== +x-ms-traffictypediagnostic: AM6PR0202MB3477: +x-microsoft-antispam: BCL:0; +x-microsoft-antispam-message-info: + Mzfo5lT0SkAn1nd3CUpE4pMNgp0d+D+WNgqIVD48n7XY7W+DAJyGZB0BU9lgjUkpYOyQkQq0HB85VjXgP8Ig0vBpx4E1NFVVag0g9R0VFtaznjxfSWE1Mvlmo8znzdtiUZs1jjxhc73HUfOA5wGWemKo454raIekifPETgFuPYEA1PskK+K6LxYQFHit53xjHERLqK3kJ0PRedP+kKPz6Sjf+yBymnJvFU55LsqqepPB0oQItVpBszfydyU+uHwLmJPWCvAUQTfesUCvipl0zE9AtyFnPnbrvx8KCDHIZ6mQKjrC9eEWIYP+elPRNPAzBtRCCSu0hNKoRi2nBh0cjbHu12RM7P1PoDUEousRMTDs091rnLw+ZsVp+WCTnybpXsr4CXEagp4AnHIHKD540cxjzGJvguO2kw95G472q/RDbLTjISG+8VDvErE2sn0SSgmNN3Rp8hZgPDIFBHvucPx0WwtuDp7B8Stu9cli26at6cYKsF3yO3+SzNTGYLDWYFZuiomb82Ia1eECwXc4Q8CevcbhAZx6EmMyQTc8dxTmOYJdKSJr1jAEEU92gCrz +x-ms-exchange-antispam-messagedata-chunkcount: 1 +x-ms-exchange-antispam-messagedata-0: + S6qFssPaA0pcUeBPTEk44yngGPnvkpPtDnQ01OdPFhZ5AL5zXtHlKOZ8Ss7PT8E7qPR/i8sWz+4iziUbN5I5Clc0M94bKPxwppPvMtQeEloNYXbgWY7/LJyZYSI8ViQMLFOXFqcKtMTcL1aE+mfBmw/vKwQ5IyzSHML0vfJnVkkra3JjL2A6UoWPlnscDEcsMdIMJs0ZZUot/vol6MPPqnYu2QjWqAUaZGEQM4xXx2cFkr/aZ32jZ2AztDotD6aqdyGwXDQhrQ6/I9JaZPEje8CZxmR1Al88EbTF0Csy8ufEYEAJ/qleLLXj4nTiJUU29qV+zjHVcrz5LiUv6esqB7nU192zDM7WjQCeuXBgExNAdeOzs/y6lqoE++yl1Qmq7kOClLiOeM+btgOfdh5uqbMWdCrF0xBmFqwRdGc55W2104Ax0gMgxFAqZnIqx/5FxEEu3cp9k9QUB8U9LkOj18nUlj2FtrXK7djVrAvi65D0aj1r5PqmlNtgTrOk9w4oAEw8IbztxD+5535/OXDByAoroXYgAX1h1xKBWOLF83VOJ7YktWYHWad0ccsKNcbNpIwT+Me7boj0Yb3zhGjcNfTvnHfqHoBwFfSWl/NdEYR5dHYXjct5awxL0GOcuSJKVTlr/kGQHXA3nzVxRF0COrDTVqzgz/GIZzpKuwmHeR22SdzTpOcNbjLRYguJCIRtuqvralTp3nRh7MgsAcmjgnVvp45xX0KUPve2VE3gRlgQ4CqC3guGaIUWlVI1q+V8DgV19lWChNXxRJpWeEowrjCGrTaov1q3cmf8KbV1V+SaFnJxOF0sFfWZ1jxR/T0g5rClHY5Ftv8YP5iUxjjQkabPX4SSVXbEO3j0RXx03g3q+C+GWFPBWOo8H13YT+9abDhGNnl++/GBx86D98ibtbjZI3nSOs6XnjYfXHlTcbnHOJ/P10oMIkYI5DPUeQTS +Content-Type: multipart/mixed; + boundary="_004_AS8PR02MB6934BAC1A79647F8264CAE76DA6C9AS8PR02MB6934eurp_" +MIME-Version: 1.0 +X-OriginatorOrg: outlook.com +X-MS-Exchange-CrossTenant-AuthAs: Internal +X-MS-Exchange-CrossTenant-AuthSource: AS8PR02MB6934.eurprd02.prod.outlook.com +X-MS-Exchange-CrossTenant-RMS-PersistedConsumerOrg: 00000000-0000-0000-0000-000000000000 +X-MS-Exchange-CrossTenant-Network-Message-Id: ce01e59d-12f4-4a45-2536-08d9b7f0d4bf +X-MS-Exchange-CrossTenant-originalarrivaltime: 05 Dec 2021 13:12:03.8029 + (UTC) +X-MS-Exchange-CrossTenant-fromentityheader: Hosted +X-MS-Exchange-CrossTenant-id: 84df9e7f-e9f6-40af-b435-aaaaaaaaaaaa +X-MS-Exchange-CrossTenant-rms-persistedconsumerorg: 00000000-0000-0000-0000-000000000000 +X-MS-Exchange-Transport-CrossTenantHeadersStamped: AM6PR0202MB3477 + +--_004_AS8PR02MB6934BAC1A79647F8264CAE76DA6C9AS8PR02MB6934eurp_ +Content-Type: multipart/alternative; + boundary="_000_AS8PR02MB6934BAC1A79647F8264CAE76DA6C9AS8PR02MB6934eurp_" + +--_000_AS8PR02MB6934BAC1A79647F8264CAE76DA6C9AS8PR02MB6934eurp_ +Content-Type: text/plain; charset="Windows-1252" +Content-Transfer-Encoding: quoted-printable + +Your email address has won US$1,000,000.00 in the Canadian Lottery Online L= +ucky Program held on Fri, Jan 1, 2021 to encourage/promote globalization of= + internet and online draw worldwide. Your email address drew to the winning= + numbers 18-21-28-29-31-32-49 Bonus 38, which subsequently won you the lott= +ery in the 2nd category. An overview of how OLG is modernizing its games an= +d products at Charitable Gaming Centers across the whole world. OLG is an a= +gency responsible for province-wide lottery games and gaming facilities. Si= +nce 1975, OLG has provided nearly $40 billion to the Province and the peopl= +e of Ontario and beyond. OLG's annual payments to the Province have helped = +support health care; education, online gaming, research, prevention and tre= +atment of problem gambling and in all enhancing community and individual th= +rough online gaming. + +Mr. Don McCabe. +Don McCabe Consultants. +N=B012 Sturdee Avenue, Suite 301 +Johannesburg 2196, South Africa +Tele: + 27-792-197-689. +Tele/Fax No: + 27 11-3361-974. +Emails: dmccabeconsultans@aim.com + +You are advised to send the following below information=92s to our agent in= + South Africa (Mr. Don McCabe) to facilitate the release of your winning fu= +nd to you. + +More details enclosed. + +(1) Ticket No: 6460DGH. +(2) Serial No: 0909AOB09. +(3) Batch No: 2GH267XZZ1-5-42. +(4) Reference No: 9527BCV-J6-0-0-8. +(5) Winning No: 18-21-28-29-31-32-49 Bonus 38. + +1. Full Names............... +2. Country/Address.......... +3. Telephone/Fax Number..... +4. Marital Status........... +5. Occupation............... +6. Date of Birth............ +7. Amount Won............... +8. Winning Email............ + +Sincerely, +Mrs. Frances Lefebvre, +Toronto Prize Centre +20 Dundas Street West +Toronto, ONM5G 2C2 Canada. +Tele: (613) 482-4751& Fax: (250) 828-5631. +Web: modernolg.ca +Email: olgg@torontomail.com + +--_000_AS8PR02MB6934BAC1A79647F8264CAE76DA6C9AS8PR02MB6934eurp_ +Content-Type: text/html; charset="Windows-1252" +Content-Transfer-Encoding: quoted-printable + + + + + + + +
+Your em= +ail address has won US$1,000,000.00 in the Canadian Lottery Online Lucky Pr= +ogram held on Fri, Jan 1, 2021 to + encourage/promote globalization of internet and online draw worldwide. You= +r email address drew to the winning numbers 18-21-28-29-31-32-49 Bonus 38, = +which subsequently won you the lottery in the 2nd category. An overview of = +how OLG is modernizing its games + and products at Charitable Gaming Centers across the whole world. OLG is a= +n agency responsible for province-wide lottery games and gaming facilities.= + Since 1975, OLG has provided nearly $40 billion to the Province and the pe= +ople of Ontario and beyond. OLG's + annual payments to the Province have helped support health care; education= +, online gaming, research, prevention and treatment of problem gambling and= + in all enhancing community and individual through online gaming. +
+
+
+
+
+
+
+
+
+
+
+
+
+
+

+
+
Mr. Don McCabe.
+
Don McCabe Consultants.
+
N=B012 Sturdee Avenue, Suite 301
+
Johannesburg 2196, South Africa
+
Tele: + 27-792-197-689.
+
Tele/Fax No: + 27 11-3361-974.
+
Emails:  dmccabeconsultans@aim.com
+

+
+
You are advised to send the following below information=92s to our age= +nt in South Africa (Mr. Don McCabe) to facilitate the release of your winni= +ng fund to you.
+

+
+
More details enclosed.
+

+
+
(1) Ticket No: 6460DGH.
+
(2) Serial No: 0909AOB09.
+
(3) Batch No: 2GH267XZZ1-5-42.
+
(4) Reference No: 9527BCV-J6-0-0-8.
+
(5) Winning No: 18-21-28-29-31-32-49 Bonus 38.
+

+
+
1. Full Names...............
+
2. Country/Address..........
+
3. Telephone/Fax Number.....
+
4. Marital Status...........
+
5. Occupation...............
+
6. Date of Birth............
+
7. Amount Won...............
+
8. Winning Email............
+

+
+
Sincerely,
+
Mrs. Frances Lefebvre,
+
Toronto Prize Centre
+
20 Dundas Street West
+
Toronto, ONM5G 2C2 Canada.
+
Tele:  (613) 482-4751& Fax:  (250) 828-5631.
+
Web: modernolg.ca
+Email: olgg@torontomail.com
+
+
+
+
+
+
+
+
+
+
+
+
+
+ + + +--_000_AS8PR02MB6934BAC1A79647F8264CAE76DA6C9AS8PR02MB6934eurp_-- + +--_004_AS8PR02MB6934BAC1A79647F8264CAE76DA6C9AS8PR02MB6934eurp_ +Content-Type: application/vnd.openxmlformats-officedocument.wordprocessingml.document; + name="INSIGHT..docx" +Content-Description: INSIGHT..docx +Content-Disposition: attachment; filename="INSIGHT..docx"; size=25831; + creation-date="Wed, 13 Oct 2021 07:43:37 GMT"; + modification-date="Wed, 13 Oct 2021 07:43:37 GMT" +Content-ID: <585A5A6BB2653341B13E4DF17C911446@EURP193.PROD.OUTLOOK.COM> +Content-Transfer-Encoding: base64 + +UEsDBBQABgAIAAAAIQCht/xGcgEAAFIFAAATAAgCW0NvbnRlbnRfVHlwZXNdLnhtbCCiBAIooAAC +AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA +AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA +AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA +AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA +AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA +AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA +AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA +AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA +AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAC0 +VMlOwzAQvSPxD5GvKHHLASHUtAeWI1SifIBrT1ILb7Ld7e8Zp21UoE0lSi+R4vFb5nnswWilVbYA +H6Q1JekXPZKB4VZIU5fkY/KS35MsRGYEU9ZASdYQyGh4fTWYrB2EDNEmlGQWo3ugNPAZaBYK68Bg +pbJes4i/vqaO8U9WA73t9e4otyaCiXlMHGQ4eIKKzVXMnle4vHHiTE2yx82+JFUSqRM+rdODCA8q +/IAw55TkLGJvdGHED1/51lOByGZPmEkXbtD4EYVU+e5pX2CLe8MwvRSQjZmPr0yjc7q0XlBh+Vxj +10U3zQGftqokhxaf2Jy3HELAU9KqaCuaSbPzf9RHiGsF4f9dbHi75NHn2FsXKMZ2tj6kWREgcozC +gY8S2qM73jrEiJFdovktc1f7zRREvCFAm2//7AwampOSFd6iCZsqOFvv1+S11CdNLGH6frH098i7 +jLTzx63/Qxi75yKhD0wdbV7E4RcAAAD//wMAUEsDBBQABgAIAAAAIQAekRq38wAAAE4CAAALAAgC +X3JlbHMvLnJlbHMgogQCKKAAAgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA +AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA +AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA +AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA +AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA +AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA +AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA +AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA +AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA +AAAAAAAAAAAAAAAAAAAAAAAAjJLbSgNBDIbvBd9hyH032woi0tneSKF3IusDhJnsAXcOzKTavr2j +ILpQ217m9OfLT9abg5vUO6c8Bq9hWdWg2JtgR99reG23iwdQWchbmoJnDUfOsGlub9YvPJGUoTyM +Maui4rOGQSQ+ImYzsKNchci+VLqQHEkJU4+RzBv1jKu6vsf0VwOamabaWQ1pZ+9AtcdYNl/WDl03 +Gn4KZu/Yy4kVyAdhb9kuYipsScZyjWop9SwabDDPJZ2RYqwKNuBpotX1RP9fi46FLAmhCYnP83x1 +nANaXg902aJ5x687HyFZLBZ9e/tDg7MvaD4BAAD//wMAUEsDBBQABgAIAAAAIQBBO9lKJAIAAD8G +AAAcAAgBd29yZC9fcmVscy9kb2N1bWVudC54bWwucmVscyCiBAEooAABAAAAAAAAAAAAAAAAAAAA +AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA +AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA +AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA +AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA +AAAAAAAAAAAAAAAAALRUTXPaMBC9d6b/gfGhN1t2wCRtIxga8kEyoQMl0946whL+QJYUSeCQX99F +LgmZJPTiniytpbdvd5/eaf+h5K010yaXAntREHotJhJJc5Fi72524Z94LWOJoIRLwbC3Ycbr9z5+ +OJ0yTixcMlmuTAtQhMFeZq36gpBJMlYSE0jFBPxZSF0SC1udIkWSJUkZOgrDLtL7GF7vBWZrRLGn +RxTyzzYKMv8bWy4WecKGMlmVTNg3UiALvBgAEp0yiz23rYNRAEQ99DaHdpMcKjb/wayFBptnJnvB +Q0SOmySykMLOyJzvNeQpdIjEUZMkzKtW7CKHKESNUrAbDqp+UoVx+0Ppu02mz0t4D8/Z/8q8qqpg +KfVSckKZyVMRJLJElfITmNpW3SvFJaEGVB7FKIqQ5GmgRLoDupUU3sz5g2VakHeFHf/HQkpGc4Jc +dZFj9s7j6jTJIQOr0DwXy10fat8wYErbjqZSppxBK4NHglaa9w3B+SdSqq86sbhwq3vsPszoBBu3 +NHKlE4ZdKXUkofWhhFCsC+JOrQhAnLjlmlEckuzu5rwqIq7mmU/bt4Ps1/3QDG6uaEjOvqeTYlqN +vlX1Vc3xwbkrqe1C8lz+VlvzBAkQnUufSwvz3fhgz35KSrAU5PAUCAYPLibF2fhyWA0o/2ljfTE2 +0+z60h9fr4tOET6Wkzq3sTjqdNudbvz5uBN3ozjaTsQ55GsRoRe23/sDAAD//wMAUEsDBBQABgAI +AAAAIQD2mx88/hIAALB8AAARAAAAd29yZC9kb2N1bWVudC54bWzsXVly28gZfk9V7tBhTSV2RVwA +LhI5phKJkuyZ8qKyPHYyL6km0CQRAWhMAyBNP+UauUXOkKPkJPn+xkJCEiVKomQtkEsi2AB6+fvr +f+/2q7989Vw2FSp0pN+vGLVGhQnfkrbjj/uVXz4dVXcqLIy4b3NX+qJfmYuw8pfd3//u1axnSyv2 +hB8xVOGHvSnuTqIo6NXroTURHg9rMhA+bo6k8niEr2pc97g6jYOqJb2AR87QcZ1oXjcbjU4lrUb2 +K7Hye2kVVc+xlAzlKKJXenI0ciyRfmRvqHXaTd48SLusW6wr4aIP0g8nThBmtXk3rQ1DnGSVTC8b +xNRzs+dmwTqt2YrPMB+em3R7JpUdKGmJMETpQXIzr9FoXNZ2SkCqIn9jnS4U28x64nHHz6shdJyZ +/3zyapi8etJ2napaDAS02AWWhtKe02fAZj1g0f7YrwATe509s1XJig7EiMdudP7OMRW1uvht6cqC +Y0V1qfTjJJq7ApVMuduvnERK+uNKXT9wJP0oxB0eWo7Tr3wWyuY+pwYne35YLLHCxQP67WFWZSOp +bTigqnQjaYklXanysgb9JI+G37JSYycrWbydlNXTAeBTj4c+z1HoqGuaLTOn0DGWwmVkKz7+iMlm +5oRckC0pO0s2UBG01pAqUOexAOTqkc56mgP3woBbWIKBEqFQU1HZ/eCzoZhwd8TkiEUTwV6BONEu +/U3WR0YYDZrDzmCneTGSnhStds2GabAPfsSVI9lbGUVCzRkEHHvNPfBTNpAqkEoLhlUEe25I2mIz +wbgSLHAFD4XNIsk0ziD85zJmv5z8YGyBu9FvrdGg26RExIqPhUbe2JVD7jrfNFUJjo4Psvsi0oSX +vuv4grnpXJC4Y5ASrj1zbFErwvaO2GDXMAedxrL0+KeV8eihhGBPBMYjkChXMwwiIQ2kJOUVwnk1 +KWc9l4NVpOJe+NVf9wghxFkzyqZQyXjs02UZuwMoVOCXcapM/4H9HSufwQBwXMZtG/IoZBMO5Uj6 +5ziF42v+MIDaZTvcz/nxh4QlvI2t0zk7XsWHteAatA8NWC3PVMgrCdp7bCJcm4G+R8rZYj+DkMYW +04JuFemeLh7JjjyvDC2LpDqsJ09Ggq0lls6KowvQDYzPSOiRkgVLzSc1wo+9IczqovQCSDOg3jN2 +N2eyrOaKGftbqZEaO1XTqJr42602jWrTrLa6Kwn0dBG6uy/9OGTNVWvznqEBRN6NcbsWUnZ3VpHh +CQMACvXEsSYsjIeh+C2GE82da/lI2jRxkUwXTuWjCfPE4pEYSzWvsT2fSbjspg64DnTpiZyxD29f +MydknrShVjvfiAE5cG2MuSdCrWOD5dmxhSIescEEtk/Eh/CKZDYPekDMipOzLUz42ESS14TU8FpW +PeQKNHrfmjMI9QCOM4fqgHsP1oCcOr4lqqSy571fNI8r6tOIW+Txc0RYYyf0PDO62+0tXT+pCLoa +GwaGL7gCSX5oNRh8hC6cdBl/PU5b0qMiUgVCwiwhQmRGHRlzQzGXftLzPxEF/Ji7LOBzclhigAmv +zuua8KkgGRqg6TAOYP9F+MrdaAKyK/EjEyCeNl62IGW1tZKMaIsogb5aky2yiKaonfpKPYiU4JH2 +j6JrGBhI5dGEDPH6WD+ByeWuC1Npwn2LCuEl82IfHtH0tu1MHZs6Hk2UjMeTYtu1wsJZX5tPbZ1M +EC251S7wDz0dw2i1q21Nbf5iCnfMw+7+xernRRQuPK49cEdJkfbtJUbDHZue96EKzHrhxMb4tTvU +gtdAEYm0Q7Rf4XEk6esIK7tfOdI/39F8evD0IK/dO1VjB+As76wBH4p1ln4BaR8LwrQEZgnMSq6d +3XyhEjAXoITn1A8RoOF+VOIz430U9Dkf9slp/+1s/KJknBnlchrdDp/v//sfw2QnUaxsIdgeNLRY +bLGT2IH932wYa+hQJSMFKEugalUmB+Vm6UGM9GcJM8AX4TBWY2Ya3Q5QKmNYIHsj5Vi8BGoWXC85 +6hqJBXcI1E/CFT32Z2ZuV7e7ZhUWfLWz0y1Ffia4ND7zBIWs9I4mhIzSeww1PQpbiQBaP+Jf2XuZ +4JQZcDY3O0a1u90qcZohssRp4pD4LvQgiX9IIdKwx5jtWRaseiuzn8K/csejnLkbCv3UpYSB6cSn +ZQ9fQZndpP9pmJMxyZW4WTpcJ3l5Wd00srKb2wEZkyw9eBl5s8m6zCn6HD14d0QPWu2IG+scKm5P +nTSHKhQUJ0AMYyRdV1KOL+IWuEB6VJIzjVjC//71bx2voJwKCr5EuFkwDNiLom/wJUU30kALQka6 +fmQ6U+IWBUkQYVJ5kHoUUwckFW5eLhbTdzfJa9Taibub01fuCBkZa1rKDCi4apdzL683al8ivCVH +mgJrGAyXje98shP4BwAV7YUO71fO5T9hb0CSlY7eBz0EtyYIEdpOGH1CaLuir/bzq7fYdmC0mnCS +6RvI/M6+ho6HyN6xROY1bibZ+lPxRjjjCbLAzbbR6XRaTaQ/I83V8W0k9+PdCqKP1qmw9TsuB7Kj +n/yBoEgD7iHeJmcfEDx1eUAFuoN5O+yrfmtOf4luQS+QIcKV0n+TN3+E3JF+BQSNPT95Hc98GI1C +Ee22Wt3txnYDUlu/mZZmX5OKCtV+PlNtwJEyqXgwOVtzdbu9s2O2s6ouqvmzrll8jYhHWBiJYW6T +65FZGI+Jn3Y2KDEaCSs6TJ4kunQbuMcosx5k7lc6Tfo6XFBhhi69x/aPhCbY93GsmAMKtyvMR4S5 +Xzl2LDjYBMNc2CK0UEe6E2A2m9VOpTqVLscNZ+zrnQAz2gCCiLMf1ePAldwOsSHAaNcNoy7dcS1A +gv7uK96bIFJ6OnAd6zTdGwKsndlhcMEOkxR6xT0Kqkf9VT/Z2E4QjaMj5Eyh23/8LZbRj/8YAt+n +yTVNO82eHqMmqPV++ppmxLH0S2Rr8Z6eo7TkLeAW3rqDvkRU3h+LvTDA3BA0055c1v5tW10aygGP +OIsV9jxcm8JBMvmYMlz18Jt2C1e3rs2fHjtAW1I1SJECD/C8J+BdCzg0ft3JRX+T3nMiSwKU8xPN +8iKl5AxpBzYYXjL/WYVJLXX6WqAIcgmCI0RRaSLpmqmeQNpZAnWsTtWjJaSBjw1WFjZyRXqLANKv +CV+8FyrrI/CWXCNjIbImVEyR2bS8joeyG7oDizapOyG8D2w4e4fEkzSwS+9/HSmPPrHl5wxP5T1w +qEvYE5rLXg5UGL0W0kPyRIgVodBPzRb49G1IPcaj2SPUli+JEnokrs9m/Uq3bbb1C0t3PLj9FXMd +sPCd3I8ITgOiH/q2fjmCUZZcowHXRzvZONNLfNWNp0yAVs7yd1znQo+uF7IQ10ByaYRo2YqQQmmE +XOyCuUwVu4VRRkbIC+Ml+wSBip0X5BzrtDqNg9dvagTKfGdQCdASoNnSvH+Ami/ZiVAOkt4IoI1u +o7v3Yb9RhhdgtKebS3O5RWHZrPSOeAbxgkcZXrgjemge2nzJ9jk0JY1Q8/Ubs7P9t19/Nartasss +WWmGyHtlHSVOj44SizHZgaZx2nrJPoqRUMjYFhqr0Ii39wefqz93qg3821kHrNfcYX5n8YVn6vPb +wOEFsGpwwEK+LBPl5mbhmZWnFdwqbU2Dtf2SfUm3TpHgv3CrULpvZi3cFoCYpr82jc7RXrcCUtxf +XGx93KJbd7MPJ4+gLSUtJWVgm4UNqxeq/i2z0ewsjuFI3NNnCpdCjMU72u2fFhEOU23ishT3td3c +FxFsM57uS3zbKyXN0iA1TUHahz0SWnZGjR3F2IPynjYK1Yo/VxuFxalOF9mZwhIZjUeJDLOGfO7Y +j9S8vpds4l6go0QGHSWUmRhpdOzZ8IxmjVGGVTChiJBOs9JbnTU8CsgAB8xELWJJjUGzaRzAKZ1K +30SOpIUkGq7NNRPFZhDqz+uGO7OsknMy8aLdYOjzJdMLRkohD/jCz5zAU9zbXArX5MyMSyj5tIRr +q8be6Z22LvYf4ICMZQFbWCglMp4ZMtrYUm1ZMR2BKP2FXD3PQktkPDNkdLDNk7K2kKm176hosgyO +kmfcWO26kX7xXexLssq2ccaDR8o3+1JkD1cjABG9g+ZermQ9S+vrcU32Ti13fOmE8MWKL8z2GWW6 +mFyZmt5p4dL4lxOpTfOMh3hTSusmQbbS3znEalw+XPUhOTAztfVCaV08VKM4U5kttETB4uPafWa0 +OsagcLTtRtxn2mxy9N91vGaXGEy3SErISLcE75X5ryuxsb6jLTEZr4TS96YKCQF9Pg6SXucFNvA8 +IYZRa+/AddCytvN1U+sg6yRN3juFE46QMIrIW8jeIgg3nCqxVZjJJcSTd2Rthp6B92qv/m38EkU2 +dBOu1e40Dnc6SMDbrNN/07P1aCD1SeLkcuzVOFbON8EGSJ5WogAoGommdiZVCFblPJ5Xiq4pLjYg +DJZZg9lgB9h2g9POcBa9QCLeFxFG5UQSBR4Xj08XJE6ve19O3+Obvnft18wcmEyfPczvIBWmkDlz +T4r8UkLgSn39JgvtUane2QBX6j/JCRaFRbukja20P26iUV5nQlb2l73oGEg6rLZ2cGLudtsowUr/ +RczDmycEQ7Gb/9q4KiyuQy/ASahOmKRpXVdVyYyD9PgRZ3G82LkDSdayH3ZfYNvgS7aDM5vbnWbx +tKwHsmR211gNxgE8KNj+iS6fQ05WuOSDKT6esO6khiVrRisrqPAmXAHzstLvgv5gS7gnBvS/B/Ur +EfZL6c1ghWzvjTL3hzkCkGH3ixj20kONaWPoWqdgtRuGeVBO9cKlsOQOfqBgXSn4Dr3/AwAA///U +V9tuGzcQ/RVGTwmgKpZsq4oRG3Vcxw2QOIadts8Ud1ZLl0tueZGi/FF/o1/WM9xdZ9XaRZqiCWpA +MkUOZ86cue1KbY7E86ebo3jC3/7kOb4EPkEX18ejvb35+fxssT/qt6583jydn84ORln4qr3z0tkY +ICWD0vp49BP5QlrJ96pTGwY7T/mWbr/PQv6vnHFsdC0Na+c/vhgrqumMz45Hkd7H6ShLhw+96HTe +75yx6Xy93WNXWmDxxJnV6iEP/4fufBedB9euRugmytU7rsHvhvltQMddCF/OZ+fPXtwfwl7ueypl +MpHp3xW/GmxlzS2tzU3cGupJv3Q3jVTartp43Kr+QJGN5NvdLiCfEr7NUeo1BGg1xBr6mLKPXdL1 +6B8K47+2xXXx8+n15avLi0ePPpPpF2eHi/2Xd/QPmN6trcx0t/WpTHec+p3qe8d1czyq5a3zP3Dx +se1chX89+ccV9SD7nZuM/Muj4jhdOnGVvKpkIHFJikKQfjsRb1KIYkliuhBoMs4U5CfiXaWDqFlk +RULaAp+t0FaZVBB+xChVVSN3g5CeROldLd6+vpiIUxbFjkZaWxZ11mxFCcVoVkIWhYdOosfhCdtg +OZzVMmpnhULVSm1xqSJP2opa3tkU0cuCRCDlCUahr/F6rQ2t2AZgQ73faHgGLaUuAE1LM1Q/ET9a +mWLlvP6AO57WmjZjhraRvkARjVklrvFKuWabF4UO0etlardhKHG9iZBUNVQuwBYLqghvG+8qvdQR +Vpg59gL0JmvkpkxmIl6VYusSECjSawjFIddwm7x3fsxeVXJNkJMB7EQHLUYTdvg2k2xdFAOXINHp +FDrCG4OLxGjqhmEVZCjSrjXGBy263Ob4BI6ZF8utoG9y++yx9pZjJWEyp8J9cQP4XWeCAIGN9HLF +rGWPvAyZQvzQLRGlBgodhVvekuJMkEt0zg5+TgoVObuQduLx7HDviVjMFuJwvj9ljV0AM/4WNVPF +M+3Pg2AsrgleWSGXwZkELqR1dlu7FBCdqM0wLLz2QnJucF58oEmGsNHGZN4RUaRy49A9GC1QIGnq +OlmtcjZj3lYaSdIlNZJFOY/wwE04bqVSyUtAGBZAkRAeJ6pUS8tikVTF+kybErzVKuXgI5xy2yZ/ +rUOhWTd+djq6cr3XpJJNTNDgSqFQ+65GyAcwxgO7tTRlsjkoYfwgsLFY65AAEwD7Gmoh4xL2VtKD +I2CTUWa/vLQBoANizRLdcgwUGIY+NZwE4zZhu1XZMVdiqAM7OkCZ8zAiqZvKWeKarRsE1QuDHsJm +LMWN878MTujXpBtuW2MRXBkR3CzIZOH2EmHJjYh7CblysjPP0LX7aTqY+P1WO2C/dosX72tzhHpT +mG4NWCK/ptHJ7uPrvU9Au4P2eseb3sXBXP6Pn4C+wLT+vAeorxLfv4XKc/3332Z7s6l4i+nptROv +XUQVbPPsuZA1N94z5xuHdoNi2k3qPhkwV+PVTn5/Qkbc4BKXwrP5/vn5LL/sNKsbfvnYHI+ms9lB +fkmpsD5cYJ2fpJrVG8l2omuwf9CKeL2qoGm6yBeWwO/qj6eGyo+HFeExAK87386ybOkcfL37uUrs +OjC1xvDaxK89XUHwlYyhcOrC6wIn3CmudFTAuD/PpyCk5SI/Ti9dsc0LXEncN07+AAAA//8DAFBL +AwQKAAAAAAAAACEAiESUjU0gAABNIAAAFQAAAHdvcmQvbWVkaWEvaW1hZ2UxLnBuZ4lQTkcNChoK +AAAADUlIRFIAAACAAAAATggGAAAA6NweDwAAAAFzUkdCAK7OHOkAAAAEZ0FNQQAAsY8L/GEFAAAA +IGNIUk0AAHomAACAhAAA+gAAAIDoAAB1MAAA6mAAADqYAAAXcJy6UTwAAAAJcEhZcwAAIdUAACHV +AQSctJ0AAB+2SURBVHhe7VwJnE5l+559n7EUZpAsoSLyEX1IKBWFolBSiZS2L7IOQyiihAit9GWv +ZAiRfd/NwgyzNGP2zZh95l3Ouf7Xfc77jpnxVVOZ0P/0+92d17znPec+133dy3M/z3McHIz/DAQM +BAwEDAQMBAwEDAQMBAwEDAQMBAwEDAQMBAwEDAQMBAwEDAQMBAwErg4CRWrRLWknj/47Nynp36qq +Pgag+tW5snGVGwIBGn1K7rbNu7Z376wmHzsGq6o+z7/df0Mobyj51xEArC/kkQAJfZrhs5v9sHvU +KBTn5oAk6EFp+tfvYFzhukbAToDM51oh754G2Orjji8a3oro4I2wms07SYJWTAvVruuHMJT78whc +JsBdsLSvD6VedcTX8sUyNxdsHDgAuYkJUK3WN0iEjnGI8/jzdzJ+eV0icCUB/EiCajBR9lXzxKc3 +3YTTS5bAajUzLZg7XDKKxOvSjn9aqV8jgJBAJMPfF2s8XLHyvs5ICwmFoqjfSUpgRHD/0zc1fnj9 +IPB7BBASWOr54WRNTyz19sTeiYEozi+Q0cIokqDO9fMkhiZ/CoHKEMAeDXICfBHs7YEvmzVFzNat +isVqUUiCbowIbn/q5saPrj0Cf4QAQgQro8H5Wt74zN0NPz43BHlpqUwL1tMkgeO1fxpDgz+MwJ8h +gLmuH/Ip2/08sbh2bZz+6iur1WLNsSjKbIMIf9gE1/YHlSGARasDqkEMX0IpYioo9PdBASXuJm8s +d3PF2gcfREZEBKyKtVC1WPpd26cy7l5pBH6LAFYxvBidYb+4rq9meDF6bh1v5NSm1PLBxZu9kVnL +C3t8PPCxrw8OTJsOU2FBpqJaT7I+qFlpRYwTrw0C/4sAmuGlF0Dji+ELKxg+mzVAFiXjZi+k3aRL +MkcJ56t7YgWjwVctWiJ+5y72j6yRiqrMuzZPZty1UghUJIAUeWZKST16fF09zOfV8UVubR9cosdn +0eMzGPbTeEy5yRNJNbyRVN0LFyhxJEAsZa+XBxa6uWHLS8ORn55epFit8WTD05VSyDjp70XAToCs +Ia1gbt9Az/OS4yn5/t40vJdm+GxKBo2eTm9PFY+v4YVEGvtCNRqeEsOCMMrXA+d93XGe8wlhlNUk +weJ69XFm9RqOFJREq6LsYVrw/Xuf0LjbbyJwOQK0RgkJUEwCiPHz6Pk5dcTwzPE3+9Dw3jbDe9oM +74k4Pw/E0vjRNsNH0vhnafgzPm4Ip4R6u2G3pysWOTtjTa9eyIqJZZGo/KiolvmGWa4TBMoSoIgE +0EM+jc8i76I9z0u4r+nFcC9e74V4enssjR5NY4vHR9LYYvhwGjzU2xUhXi447eWKk55uOEECHPNw +wxpXF8z19cWhOR/AXFySpSrqDxaLpdd1AsM/Ww2G3TaqqnyrKJalbNp8S/B1UdVvmZ+P527bsktS +QEG7+lq+z2G+z5ZcT0llnk+m8cuFexo/ioY/pxneTfN4MXyp0Tl3cNzDBUfdXXGYs4qHRNxdsJ0F +4mInZ3x5dxskHDwkaSGGRWKpPvKZf9P0orSsaqvIDKfgoqrWb8vqoeGiYaTpMoi9DZeq1uWqX5+K +v64opkWWlH2LTPteWWg+8FqCZc9gWLc9AGVTO1g3/AvWYB43d0Ph8g7ZuYH+uPhYTVy8zTa0kwqf +ksLqPkGKPIb6WM4OSrgXw0fYwr2E+RB6+ykvNxynxx+l8Y+yS3iYxj5Iox+g5++j7HV1xuEAV4R3 +csOxga4486YHSoI7Qtl4DxTqogS3hWVLd5i2P42Cn9/MzAwe+fmlqJ2LLBbTIj7L6Ks1+cRi9G0N +l8Qdc4nLEvOh11Mse56B9SfistGOC3XarOuSu2VIWOr6F+flRO+cL7qQoKLP8KtusL96QSoVQDbP +oIIrzBd+XG86Okq1bLoN6nonqBsdgR+dKHJ0gMqjGkz5jv9ew+MKHr9yhPVTR+TOc0LSeFfEPu6O +xJY1aHzmeqnume+jKOdIAPH6sDKGP0njS5g/TI8/RDlI4++n0fdQjjZxQdwLbihc7Ap1A+9FXdTN +lC3UY7ODro+mC49rdV3UZboueXOdkDivMZLWv67kxh+OZAT7jM/4IZ+1VmXxovd6EJP3LYplvjnx +5xPmo2/CuqmxjssmPvfm38Dlm8u65Mx1RsL8xkjc8BZyLhw+qyqWz6lHs8rqUaXnMYSNtyjmFZbE +bbDs7gx1Gx/uIAE9RjlBkE/yQY/z81FHKAf5UPv5t9382zYKQVB/4JHgQx5YiLDUEdkfOiH2HYb5 +UQGIvf9WGp+fNa9ngSe5nkY/zfx+nIa3G188fh/DvXj76bauuDjHFcoO6nKA1+a9IbqcIuAnKced +oRyR7yi7KD+JLvx+PXUQUn7Dz9TFIrp84IyYKW6IXdYTBcmHAcWy3Kpag34PVOLyBg2/ypywGZad +/4ayndeV57fjckJwoYgeh/h36oLdFMGPxFB/0ElZissSR1z8wAkxQe6ii5J77vtVJNeMa9b+Nqvm +LnzIseaC5ALzkf5QDrlAPe8CLtyBmuAFXHCHGu+C4rP0zk9qYsGrrfDq490wqEtvPNGpDwbe1xuv +9eqO+UPuxpHAGiheQS9dTSC+doDymQPy5zsi8V0XnJ3ijZjANjjX+Gac8XbX872nO056uJfm+YP0 +fAn1+292RSqNpRx1ghLlzPtTl0Qv6uMOUMa2a4k+9/RBn3a90adtb/Ru0xuHVjwC6z4CvYNG38pj +MMFnRMAqyvLLuiTMcMaZINYlP4+GpfiimR44VvJ4RSKoquVREmSsJT8uw3y4L4nmAoW42HXRcLng +jLxTbtg11x8zh7XD0Id7oue9/dH97oF4oM1T6N3hCbzY7WFM79cBP7zSEEmz+RzL+Uyf6rhcoC5n +J1OXneNgLkoYx/s993uEvOrfk92zLFnHYT7UgA/nC6Q3g5r9AJA3AGr+syiI74tFozqhdfOh8L09 +CD536OJ7xxQep/Aon/V/+90+Ge3vfB675jWB8i2NsJxE+MxRJ4EAH0gPXNQeER0ba55/gvn+mBby +mefdnLWQf7SZK4qC3QiuL5QM6nKpG9S8gZouam4/IKcr+jZ8sNy9vZpNxv6fh0E9QmMfoMftovxE +YTSQFCWEFF2UCrrErXwI5rxYWFTL1xWBZUH3jSV9P3GpS138iEtz4vIgkKvjcjGiF2YO74LmTV8q +g4GOhY6Pjon+bx2rrj3HIHdNQBlcmJ5mOCE80B1xqx+FOfd8GDufU0vUkjuvuqGvZLgaQMa9aU76 +aZX18E1QU2rzAe/jw42AapoPVVmDhJML0avtkCseqFGH0Xjpzb4ImtIDw3lsdO/oMufwwUmEceN6 +wPIDo4EtF+fMJeOnOSMs0BVxX92PsA4NGfrdcYTh/oB4Po+HG7vCtINellKHunSBWvASdfmI4Xot +5Tuolq+Akml4vNHDpeAKwEKAAztJgNMSmiUM6ySApISNDMff8vNKPR1YlzrhEnNxvOgy0QPxa3ui +JDvkK8GCYbgho8FNLPLeLIlb+1/L0Zo2Xe6nLi8D5gXEZS0it7+Pji1e0El4+2UnqHnnRPTq9wLG +j3kYs6Z3xntTu2L0mF7oO/hZNLxnFPZ+R/Ksoz6iyzIHrUbRcHnHBWET3HBh3WMwFybCylXWVU4A +sr6fYsqDefcdQKIn1Ky7Na9H8TQu4FyJ3JTV6NVmcBlPmwLv26dixIinkR9djdGC4EbRq86w0Drl +iWHD+/J7O/PJep47NYhV8So+8BcOKFnogPT3JQ87I3RyNSSv6Y8jtf204d1+ev9eL2fkf03PT/KG +erE1dXkGKnWBdQXoypRgkoBiWcoI8AijzeV7CQEO7hwONYw6nSS4h0iAvZSf+VmKxPWU1RSmJXzu +iOJFjtTFEVFBLggJqob0Pa9BsRbLMvb7KL2txdkw72Xxm8S0k9WGujACFU/nkHMVUs9+jvbNnyMu +U0s934ck6P3MWMQdbwI1jikriuknnPenLlpdwNRkOeABZZv35frkv6KLA4oXUpdZztTFCWHUJWPv +WyxPcmdQj1urlASwoKspfKZZijqk+moPquY9CRSNgWL+CMveHga/5pPKhbf7+syFJb0rvcKH+ZgP +F005ywc8zaJmmQ+aNB5ZLuz5NZ+IqI2MLF86wMLiJ1s8bzo9b4Irzi9ujZgxDzL0kwAM/eefpPFP +ELjU6lAu/ou69IdKXVTTXBqdnm/5mh74GWUW+jZ6qJxeWgTYNQwgGdVTBJbPJKCrO6kfRwpgIaYV +qCt4tOlyyR6Rxjsj+pNWKE76CSwIOsheBlPIFEXXxY9kFF2e1HRRGAGmD3gG3qUhXg/z9/b+CIWp +PCeF5C2HC+8naWkfZQfvL7psoE7rKCyW8QWJIUXhh4ILCcCRU/TSVihK2pLPiPRM1RIgfZ2PeVOT +AgmbSGZBk0HGZ3eFkvcUrDmv4fHW/fhwk0uB9m4+BWtWjuJ5DaHK+fF8iCghAIXXUFiR/6fzvfC6 +s0xdwCgwc3oXKEwDyhIn5H7E4SELwsiJDHkzaiNt0/PYwzH/LhdnFCwlAUJEF0ajzKZQLnWh57F/ +UjCSpBxHmUAjvM3jy3i8YY8ragAhgGojgHqkPAHUYAFdCED5Uh+h5FCXxPecERHojPDp/sg6Mp5k +u9iiMCu0vnlj/WIlhJErmUVbRlMNFzV/AIpTR6JtgwHlyOfTPAgrv6Fu6Y0q4CKOQYMfvpIAIAH0 +lMTilLiILoJLBHEJfy8A2YfHF6kF8QFVSgBLXPAj1u/cdG+JpdCrkd4YIOPzorqgdaMh5R+06STE +HuL34v2JPP8XyjkCG8YHkaEZQf/muYYVvCMIg18eCIXAW8l0edBkgn5uEtu8Qb5I3z4YhxrXwb7q +zPsbGDplaKXpwlCZ0Yi63ANc6g41pxflMe2I3AdtBLgcgktTQKhtmMiwiz2UsimgNALow8KcjxyQ +ONMJkZPZdmboTd3KRk5B1DBT9OqB1u9peA7tRBekSGHMMf/Ftkje3wm3NHilXL3j1WwSwvd20L1f +w4XPoOFiS0caGSUCULbw8w8klkQARiOJjBou7FUkveuMSOISOqUa0rYNhiUvvGoXxFjOr+iHNVRC +QmU4lRWDJroyHdRATlg9tGg4tHxhRwIkhNaEkiDA8GH4kODv9JDL63Ccvu7FBiRN+bQxYBhz+X+d +YPnEgcWXjAYIOh80ZIofH3QgjrW8BYfqCAEIjAzjJHdSF1AXLR1ksApn1EEG86t4WeatJMADV44C +dr7InCvjcf6ePQqVY3HV3qNgDaDKszLvqgy75sVSCDoigaBHTBIC1EDKlqdgKYxdZsk4dkhdS332 +l8dFJS7JP9dH/VuZ5srVH5MQsSOAuAh5L+NiOuKGUZ3a44Xuj+D5bpSuj2B414dwcSNrLpsuUgNo +uHwouDgiMpAEmFod6VsGwZR78tkqjQCmuG/bWFZxeEOQVAlTZKwwFzEOMEW54oGm/cqB7N0sCDt+ +pBFY+KkRFDlfmjE0PqRJs8cJM3q0YgS47JlSoY8JHMDhlxOKPnZE5hwnxDHXhdtSQMauoThUryb2 +e3OMvZK9B9FFcqZElXO8pgB6gccEGjPRJknuLAKvJMABDgMViWbSvNIaVNSNBaAQS8u5tspb5Ri8 +8GMnZM6mLhwJnJnggjMzApC+eyiLr7xjSlEarKvozWz4SFTTcDmv45If4o676sqo6HKa8242BT+u +up3n8D4RekQUXEr2uaB1wNPloqh/iwnI2tFRq0VUjgKkHyC4ZMxxxi/TBBe2ud/zR+aeERyenutU +pQRgbK1u+uFfmVqnisWSVjkzlKuhlEhnvNe3VYVxfhCGvvaEXvRJdctwrf2G4U3dyQJvtRs6NHqm +3LDIk1Fj14rHYPpED3PJM1lwBTkjZJwboha2QOqaF1gEOmG/ixsuTmH0YTdR04VGhBRhITyeoZBw +GiEiKefd8ESD7lcOA7eRAFL57+I1xPPZKpaCC8z9YnyVzSCVHmemLpeYilIY/qOmMBWNZwH6yZ2I +XPEcvn9xKHLT02He1FFvOWu4SCjnZ+KinHPG653uLR8ZSfJH+wxG1jamUznPhkvJ9isJULvlBFzc +RgLYdDEt1oeByTNdNFxOj3NH1OJWyD27AGZzYfsqJQCHgU+Y4jZaJT9LkaR53x4J5XxQduASVnnh +rsZDOdwq49EsBL94tw2Kd7I9q7WCeT5zW+F6V7zVueMVDZE+zy5AyaomyJ/HIeBsR7KcBReLrpCg +6kgK7oOIp9qz9++mTfiE38MicBXrAOnzb+e190rLWWoLPcVIu1Uj6GlXjQBlvVBqgJ3fD0fq565I ++5TCOYP0j3mc54J0tpLT5rjD+oWLVnGLLqkcAsbR487S4yT/J23qi9B+bbGEo5FZ/v449W4/ffgq +uEgksOGiEpfIJb5o2OhVeN9ZZsjLiNC/XU/EfMHm1X5ngD2Ikh9dcXfAoHKY1Gk5EVlbOzENMfcv +ZkShLmnURcdF0iLTTHBfmHLOcjBieaJKCaBa1H6c1mXjhTl6Jb2N/XOtjy4dNHq0uptj80m1cFuT +kRWaPFPYBHkGswa3w/KRzfH+oHboeMezWhfQ3vmS0UOnR8cjeV0X5NPb0mbrwxyt4GLzJebL9kha +OhDHvdy1eQAhwQFXGu0NdyjSRpaoJJNN2/TiSTxRYVGnEXTflQSQVFP3jrGo32gU6jV+yybyeRQa +NHkNZzYMhGkx27bzOOYmESUNRQoRJ3gj9ssOSFj8tKbHKXYmt3FWchaJcOYtjnQ4nyDzCjLZVIoL +U92Po+uiwW1vlC+SqYP/baPRs10/THiiMz54ug2aNRhe7pxad01A5qZOrEGEiI6luERITTSBC2OW +/Rt50ctkD2VglU8fy9QopZFSlD7PFMyu2zfsUUtxIjNqDH/CfnWTEyLm+GFwx56o3WJcubBrb3Fe +bgVLB3Aq/NuMQ9DYx5H9tT9yOe5PY5NDvC1yMkMcHzKK49zUVc8iNKC6Nhdwit3AYyTAEc7+HfZz +RTbnALTZPBJBsemizfTJrJsYYqsrHr9FIsA7Nn0kH18We9vV3qUbOzmQU9W1tO5f2iwndgClCOUk +lKbL3Uhd+SxO3FxN00MWocjUdBgnrL6vyS4hJ7JkvC64QNOFR8GFs6K/LPTF5NEPoV7bMVfgUg6T +Mj2DWndNQur6Tlo6FF10XJw0XWI+bYNLYXNo/CJumbMOqFLvL3txzkTNsrIjWLD5aZg/ZSRgfoJ0 +qaRoku7ZWj4wh1DZ6zyxdm4LvPXGI+g18Dm0fXgk7ur2Oo+vouegIfjP672wemYrZKzw06aDs1hk +Jb/ngpip9KaJNP4kL/yyrCNSFj6JiIBqXO4lE0I0BEE/QSLos4Fc9OHrguQRnAhi4ShtZG1Gb5VO +CF0Xzhmwh7BjaRPsXNIEO5Y0xo7FjbHzk0bYsYifFzbBzx83wnZOu/40tzFS1tyhFXzJnIsQXcKp +S0igL+KXd8KFBf1ofF96vr76SKanRa9ISpSvJ0Jv9kTqq+6wUBdp3SqiSxlcFNYX+Zs8sXlBcwSN +6Y5BQwfiwf7D0Ln3y5QR2ucnhgzCKy/3xqwx9yH4/aZIW1JDG/LFTGUBSl1OTfZB7Nf3Ie/cF7BY +igr5v65/67sUGAXq8oZ3c1tO/qXTXyNj4S2skvVxu8KiSYZNKhsWpcKxqyINDJLEymrWTCMVs7CS +CR8ZWkmIlQeMY8s3cpKEWa72mXMrW799kTjkXk4Jc+k31wPIEjB9VlBIIEu/OC0si0Fsk0Mh7dyQ +zsKwgK1SGbcrbOHKuFn0kG6eNFFERD/RU2Ff3bKEuXeRAwqoSw6HVuk0fOK79FYaPoK6hI3nVPQH +DZFIXSIH3YMTXHl8msaXxShi/AjqJGsSRT9ZoBrHxSvxNbh+sbM70t/hJBV1kSaSzHKW4iI62UQR +bGRsT7KYpf0toX6BA6fFWekz18usqIYL50NCJlCXOQ2Ruv1FmLKOSRt6MKXF3+b5/2Ny6A7ORA0x +F+cgfd9cXJh3O1LZtMniPHouDZtPry5Y4KiBm8fiJecjae06cGjHXMZwlkSgZVgVzf66Vlwx10d+ +yMUPyx5C2sjOSORwL4GACqgxsgCUXnaOEiEh17YO0L4cTPKxEOEIp4xPdXJH9Gg3pM5y0XS5xPsK +2S7rwgUo1E9AlmGmFHiJQsBpemV9RnL9JE9EMBrEf/kQooZ3xPHaDPme/Dujj9xbiBjBTSmik6xK +/oWLVuKpZ2J1byRzIYusYE7kRpbj3T0R8rYbn9eF6xTK4EJ9RKdSXD78NVxItkAv4nIbkrcOZ8t3 +Oyym3Nk0fJdrZviyN7bt2X+UCj1qNhd1zgz5dnvyprGIWdQO56cyVHHY9AtnruIose/IxI4Tzk+W +FiY9aCLDJ5sY5z5shrj32yFjVHtk9bidq4F9uSzMk+v/ZUGoDqqAK6uDtJXAQgJZBWwjgnijEOEU +SXBSUgPrA1kidqwxl431oLFGuCCKzZsY3j9OE+rCcBpFLz/POkNr7TLqhE2jLnOb4vzMdoh6rT3C +O9+Gk5rHy/X1XB+uGV4n4nkSIJoLVmKqi/G9tCVssmxdjJ/OXUuye+kSF7me4MrmT5uwRujhitCJ +AaW4iB52XaLtuNDTwwSbqX44t+B2rgQagtyIZTBxClqxWqIE5+v6jWocinRnQTLMajUNLL4YNzI/ +8RSyQr5H2oHFSN01B2m75yD98CLkhK9C+oZZJ7PGdEZhp0bc8+eNAq4KziVg2g4grgtM1xaHcgMI +QZUlYkIC8bQYhlzxOi0a2BaHhvEYYksNGhlotJMeHqzUmSYYGY7x+2O3cATRwh0nOrB6v98Tp3v4 +IvThWgh9qB7CujZESKv6NJYvfyfVvZ7jRUIl3NvuJbleDC8rlaKreWjL1uJpeFm8mkyyplFf2bkk +O5hkhbOsdC7kyy5yueT9e6aIoOo1sOu9d5ATe5S4fEdcPimHy6Uzq1GQdIAGj4m3mi+NY4R9mTKa +y9I+UM3mqh3nX+2QwlkZfy7B7s/52RFcQTREPovw88t8oEXylrCsIdwXwJdEFXNXUGGADzeFyB5A +fTeQtv9PdgNR7CuEtYWisjxciCDrBYUIJIR4pYRkCc32dYPa8jFt4agu+mcXRgldhCTi2Zqh5TMl +hL+R34nRpbg7a/P4SFvkiZY9CZRfJDXZDC9RSpavyx4G2cEkK5tzanODC1NAAZ+piNvcZLubmdve +4v39MJekXHBPe1zg6/G4l5GizKNnd+CcPnFSH6EDDTabzVXb1bvaxv6j17u8M6g1dwbdqm8Lk42g +FAFN2yMgO4OEBPadQQRZiJBEb0tgyJUVw2IIbXeQRgZPLSpIUSaRQVtHKAtJbTuGJEpootUPts/2 +v8k5/Jv8Rn5btriTAi/aloJkN5LcW/YpJGqGl3wvRLWFfOos29ny6fVCaNnjaKLIZlfZ+yjvOhAi +7ODvAhmhNowejYLsrHCOrD6/rkP7HzXw751f9jVxlva3aC+AkFfCCAn0jaE6iEIELRpQJBqk25eN +a3sDWR/QCBek8ta2ipWNDHplLquJ7aIRQzaU2MT+WT+yANV2GtnEFuJjtcqem1JocDF8klbk6Xle +lq+LTlncwSRb2GRfQz73N9i9Xra6yZY3u+HtbzyxHzP53RK++WR6o0YI+2EDt0ooG0mE2b+H3T/i ++1/bHi5glY0GdiJom0SZU7NYXGk7hCXXltkvKEYRMkitIIbS6gWbSMiWHUX2tCGp4wrRvrf/RieT +bDyV6wnR5PrJJF1aTe5V5H0lKolkUx/ZyiZEld1NEsHsXv9rhq9IhOMkzyQuclnevz+y4uKz+f6b +b5gK/n+kgMzn7O8J1F8TZxcBT0Jn2bQg9YEALWQQj9NqBG3XsFcpGSQXSyWubSeTEYSdFKzUZUgp +5LhC5DtbNLH/Rq6RIhU9r5dGsd/DnuOlUNWKPOoketnDvegrkayyxrc/bz5/s4LEnFCjBvYt/oRD +PdMe1klzZN/FP8LjKz5EZd4QIiBqRKDIG0KKtd3D4mneWqi1RwUxhp0MOiF00XcUS47Wx+VCjpSa +st+wough3X6ubmz9GnI9ubYQTu4jBaqQUIxeQH1EJ9HNpKUwXd+KHl7Zf8tvo+r4YRqHsfM7dUJC +SIi8BymYJBj/jyNBZQhQNhrY3xpijwoCfJHN+6RWsBNC21/IsCzGEqNJusiUXM08faXoO5B10c+T +jam6sWV7umxT1yOO7F3Ujc4wX8bwUtzZC7y/YvyyJCkhETYx3Yzx9sLGwEAU5eWWcKQgRHjkH0OE +P0KAiqnBDrrJ5n1ChsvRQfdO8VIxmv0lE0IM7fUymugFW9m/2Y1s/00eh3DyngKJOGJwEc3TZTjH +z2Wr+sp6+B85T4ri5AA/fMhG1LRmzXB2y1ZO+lgKOYSeWlRU1OCGJ8KfJUBFEO2RQQxiLx61dGF7 +t5DdePpRz9UVRSKJfFf2XLmGLnolf7W9vLJkkHriAJtTY9zcsWzwM7iUkiK7m1dyF/F8Dht9blgi +XC0C/C8gy5LCHi3sBBGSSL428VU08lmk7DkVQ7q9DqmswariPCtfpJ1Ttxo+8/HCmFq1cPALmQW0 +ZLJD+CxlFIngdMMRoWwfwNyBfYD6euVsrW8TW0H1m3+rzDlyPft5lbl2Zc6x6/m3398PoUxrE9na +XtC1G1Iizkk02EsyPH7DjRao8KvZu37eNfu+lmjToD5a1/WnBBhSCQxa8pxWdfzRjw2k7xYvgclk +SiQJHr6hooCErRMnTriuW7fOzZC/hgGx5KLCGzAN3FCMNZQ1EDAQMBAwEDAQMBAwEDAQMBAwEDAQ +MBAwEDAQMBAwEDAQMBAwEDAQMBAwEDAQMBAwELjREfg/IXVLV+b5MLwAAAAASUVORK5CYIJQSwME +FAAGAAgAAAAhAJa1reKWBgAAUBsAABUAAAB3b3JkL3RoZW1lL3RoZW1lMS54bWzsWU9v2zYUvw/Y +dyB0b2MndhoHdYrYsZstTRvEboceaYmW2FCiQNJJfRva44ABw7phhxXYbYdhW4EW2KX7NNk6bB3Q +r7BHUpLFWF6SNtiKrT4kEvnj+/8eH6mr1+7HDB0SISlP2l79cs1DJPF5QJOw7d0e9i+teUgqnASY +8YS0vSmR3rWN99+7itdVRGKCYH0i13Hbi5RK15eWpA/DWF7mKUlgbsxFjBW8inApEPgI6MZsablW +W12KMU08lOAYyN4aj6lP0FCT9DZy4j0Gr4mSesBnYqBJE2eFwQYHdY2QU9llAh1i1vaAT8CPhuS+ +8hDDUsFE26uZn7e0cXUJr2eLmFqwtrSub37ZumxBcLBseIpwVDCt9xutK1sFfQNgah7X6/W6vXpB +zwCw74OmVpYyzUZ/rd7JaZZA9nGedrfWrDVcfIn+ypzMrU6n02xlsliiBmQfG3P4tdpqY3PZwRuQ +xTfn8I3OZre76uANyOJX5/D9K63Vhos3oIjR5GAOrR3a72fUC8iYs+1K+BrA12oZfIaCaCiiS7MY +80QtirUY3+OiDwANZFjRBKlpSsbYhyju4ngkKNYM8DrBpRk75Mu5Ic0LSV/QVLW9D1MMGTGj9+r5 +96+eP0XHD54dP/jp+OHD4wc/WkLOqm2chOVVL7/97M/HH6M/nn7z8tEX1XhZxv/6wye//Px5NRDS +ZybOiy+f/PbsyYuvPv39u0cV8E2BR2X4kMZEopvkCO3zGBQzVnElJyNxvhXDCNPyis0klDjBmksF +/Z6KHPTNKWaZdxw5OsS14B0B5aMKeH1yzxF4EImJohWcd6LYAe5yzjpcVFphR/MqmXk4ScJq5mJS +xu1jfFjFu4sTx7+9SQp1Mw9LR/FuRBwx9xhOFA5JQhTSc/yAkArt7lLq2HWX+oJLPlboLkUdTCtN +MqQjJ5pmi7ZpDH6ZVukM/nZss3sHdTir0nqLHLpIyArMKoQfEuaY8TqeKBxXkRzimJUNfgOrqErI +wVT4ZVxPKvB0SBhHvYBIWbXmlgB9S07fwVCxKt2+y6axixSKHlTRvIE5LyO3+EE3wnFahR3QJCpj +P5AHEKIY7XFVBd/lbobod/ADTha6+w4ljrtPrwa3aeiINAsQPTMR2pdQqp0KHNPk78oxo1CPbQxc +XDmGAvji68cVkfW2FuJN2JOqMmH7RPldhDtZdLtcBPTtr7lbeJLsEQjz+Y3nXcl9V3K9/3zJXZTP +Zy20s9oKZVf3DbYpNi1yvLBDHlPGBmrKyA1pmmQJ+0TQh0G9zpwOSXFiSiN4zOq6gwsFNmuQ4Ooj +qqJBhFNosOueJhLKjHQoUcolHOzMcCVtjYcmXdljYVMfGGw9kFjt8sAOr+jh/FxQkDG7TWgOnzmj +FU3grMxWrmREQe3XYVbXQp2ZW92IZkqdw61QGXw4rxoMFtaEBgRB2wJWXoXzuWYNBxPMSKDtbvfe +3C3GCxfpIhnhgGQ+0nrP+6hunJTHirkJgNip8JE+5J1itRK3lib7BtzO4qQyu8YCdrn33sRLeQTP +vKTz9kQ6sqScnCxBR22v1VxuesjHadsbw5kWHuMUvC51z4dZCBdDvhI27E9NZpPlM2+2csXcJKjD +NYW1+5zCTh1IhVRbWEY2NMxUFgIs0Zys/MtNMOtFKWAj/TWkWFmDYPjXpAA7uq4l4zHxVdnZpRFt +O/ualVI+UUQMouAIjdhE7GNwvw5V0CegEq4mTEXQL3CPpq1tptzinCVd+fbK4Ow4ZmmEs3KrUzTP +ZAs3eVzIYN5K4oFulbIb5c6vikn5C1KlHMb/M1X0fgI3BSuB9oAP17gCI52vbY8LFXGoQmlE/b6A +xsHUDogWuIuFaQgquEw2/wU51P9tzlkaJq3hwKf2aYgEhf1IRYKQPShLJvpOIVbP9i5LkmWETESV +xJWpFXtEDgkb6hq4qvd2D0UQ6qaaZGXA4E7Gn/ueZdAo1E1OOd+cGlLsvTYH/unOxyYzKOXWYdPQ +5PYvRKzYVe16szzfe8uK6IlZm9XIswKYlbaCVpb2rynCObdaW7HmNF5u5sKBF+c1hsGiIUrhvgfp +P7D/UeEz+2VCb6hDvg+1FcGHBk0Mwgai+pJtPJAukHZwBI2THbTBpElZ02atk7ZavllfcKdb8D1h +bC3ZWfx9TmMXzZnLzsnFizR2ZmHH1nZsoanBsydTFIbG+UHGOMZ80ip/deKje+DoLbjfnzAlTTDB +NyWBofUcmDyA5LcczdKNvwAAAP//AwBQSwMEFAAGAAgAAAAhACGs8hl1CwAAqS4AABEAAAB3b3Jk +L3NldHRpbmdzLnhtbJxaW2/jxhV+L9D/YPi5u577kEacYK7tBkmzqNOi6Bst0bYQSRQoeR331/ej +ZMW7yccg6JPlOZyZc7/N+eqbnzfri0/9uF8N25tL+V5cXvTbxbBcbR9uLv/5Y33XXF7sD9122a2H +bX9z+dLvL7/5+s9/+ur5et8fDvhsf4Ejtvvr4ebyadxe7xeP/abbv9usFuOwH+4P7xbD5nq4v18t ++tc/l687xpvLx8Nhd3119brp/bDrtzjtfhg33WH/fhgfrk4787B42vTbw5USwl2N/bo7AOH942q3 +P5+2+X9Pw1WP50M+/R4Rnzbr83fPUvzel6/kPg/j8pcdfwS9acNuHBb9fg/ObtYncjfdans+Zr/+ +I+ec+Pnd6m7sxpfPDvkaYvvvMGwunq93/bgAQyFzJS6vJsBy+PtwyKv9bt29fOwe+jg8Qezjqt8f +wf3mrl/evuwP/aYO28NpEcgO97eH7tDjyP2uX6+PirNY9x1Qfr5+GLvNpoOgTyuv95xk+XEcDv1i +kiM+7Jcr4DL23fKH7fpl2npSgknDgOO00G+xtOgnNZhWTmf1993T+vBjd3d7GHb46FMHBvkvSPrb +y+6x3wLF1O1OWC8eu7FbHPrxdtctcEECPeOwPm8/MiINm90IOXzGm39169US54QHCGR/uD1q+mes +g96PC8A/bIHFavlvyG/iK/R/1x2OP2Fmy/20Nv34xzAczlcKIbVs2leqJugbRAhl2nTC4zeQ6CqH +mLZmCpG2qZ5DvJeKQpTQIVCIFlm/6s+vcNNStA3dY4yKnB5jXcPvMTaehfqre6wViu9xPltOqRey +5dzxNqRIsW6sTlw+jZMzUmi8yZwHQSbNcYu29S3FIMvS8NOyV0XzPT47LtMqo+eSq9qWmT1GVIqB +FKrluiOFt4byTUqpuXykNFY4Ro+UPliKm1Qqc92RyubEsZ61OamdqoViYJxMhkKcdYLf42yrOD1e +1UT1QHonM5UpIFFzDFoZuV7LIKTiuAW4F2o/MopaLKU0ulw5PUk3ku9J3kTqq2QGR7mGZG8cl0IR +TaHaK4uXktq2rMoFeo8SdsZ+lHGRW6NyQmkqH+WNlxRr1ei2pdJWrfKe8k21znM7BaQaSqkKNjpO +aZTF8nuiaxPVA5VkNpyeZLOmWqWS85V6MZWVUvy07OUMbtlXx+8punDtVcWIzHldfE4ct+qloJal +qo+GxgUN3x/oaVpBR6jGa61TpbzW2lVup9oKUyjfNBxF5Lg5pQyNtNrZzO1He1EU1R3tneLWCJvP +kvOgcaJw3FpVBNVEHWRt+WnBWkn9joZec63SWdqZe4pIXKsMgomg3sVIXeUMxDY8czDInhzVKgN+ +cts2Ggkh9S7GWJ8pdwDJnmq8cSrEGYhuGioF+DDN+Wa8VZFao2m84D7EtEIYzoPWyZl7kAfNUBqg +o/y0INUMR4MPkvMgqsAzYhNN5dprolPcXxt4S54rm6Q0z3sBKTO6Aw87c1qxhWf4FiWypjZnkQl5 +aj9W28T1zRoTKtUQa2zlEd1OCke11wKkqXdB7lQajptD7k8lZ+GueZSx3jnHsW4hBOoTbWtUpf7a +BmkEpyc456km2qjzDA+ily3HLUnBc0ubbCNpxLAF2TLnWxUzWbSt8Dwze6yP1LvY6mKhECdt4qc5 +ZQ3XUWdc1pQeB6HySg+QwOsf54Tw1CM5p5Xg93jkypQHyLdMotJ2jaiRStu1wjWcO61LlUYMF7QP +heXXLrjA/RsgJfPTopBpBoJoT/XaJYmARjFIFk5pBlJ4BHTJ6YafVowzVONdQRrNeVBcmjkNebym +vPZC5JoY1l4g48oc4rSnWHuBNJ76HY8SjHt/dJ5qopR6g/yJYwAr5T4RgbYoKjkPb8ljI5K03FKN +915lnqn6BlbCKW105H7UN6bhvsq3JgcuhRZenkYmH4RuqB74oFXk8onS8sjko/U8DwEkz9yTRTPD +t6xkQy3LV5G4J/dVVl69e9RskZ7WCHRRKA8aoWOcgcx5ikaaZCjfGukSj2aN9D5Sq2+UFILaXKOU +lFTajdJNoB62MWi/Ub1ujI6c140xWfB70DTk+Q6KD+35HueCp9bYQK8bzoOkGsulkFziEaPJLgeq +8U2xsXL5ILMrnDsFVSiHVFF5LGmqU7wP20LdeCYECKpN5i3R5EuOam8rEc0or1slLe8jtcoYXme1 +6CvzvhgglWdprTZFWYq19kVSvrUG5SnVg9aKX54Rvuw4t8hQeFXdOl1457RtrGs5R1vVGmpZbdQN +7wW00UaepbVJzWRPbVZ5hm9Za66jbdYlce4Umbhtt1Un3p0MqEt4LgZI5vkb2paxSibTIEW0VN+C +Up7zLaD+4V45oNa1lNKgZeXSRmDSgvoDJHboblCsoW+8jxSsaHiXLaA9yrONgE6job4KkMI9RfAO +xR7FDZo4wzfksC21+gBmW+rfQjCRv32EJGaiWchK855dyMh7+T3FJO6vQ4X351KorvKMKwqBjiLj +DtJrwzslU+LN3xeisDFTXkfhWkfpiQi03FdNkEx9CCCV92EjXlK4p0DylJKjlCoj+WsSyvCZly48 +SFhFvVhER9NyrK0SvJ+I5krkXY/o0H/jp6EO531LPHOlQqtDFOLwfZQH8CA8V45TksT51oAgrjvo +4fOXh9h6tNcpBkGGZgaiK+9XxeBaRf1BjEYX6t9iRO7CNRE9B02jZkS933JKYcEzkssK6RiltHjD +40KEBc9QOuU7tGJBUqV4txVJ78zLHSAzPEgoNXllhHaZ5LaN8Nd6ytFkUFNSHiCpCrxblNz0bsX4 +lpw1PN8BBC/PdE/whVdTKfjK/VsCe3ifHElv1jQGp2TnuIP3H0cll7IzPFNNxQpeVacKT0GtJAsZ +NOVbxoMnr2WyNlpRK8kaD7XUk2fjG95bz3h3CNTvZKtjpvaDJzCkqkxyOcjMY3AOvuVeLMcpKaen +JTTMqI5mdJz5q3zOpgrqXXKRwVC9zsjIuR/NVXr+IpCrrg2VQhGYHOEQifkDGn8K3hQ95WjBIw/3 +fEWh5UzjQkEE5NIuqD74u3PRrmjKt4K3HN7ZBqTwriGcpQ7Utou3ssxBTOWQxszU26WBD6EyLa3K +vItTWtPwfnzBiw3vTpbgNK/eSzIN9wdogUb+QghI4TZX8EI5Qw/eIXnHuRQkXFx3kHXyir8gP9FU +R6sQDa/AqrTBUT9a0UPhXYKK+KP4HmMa7vmqxRQIx82p0lJ/UJ3BCBHzIdWhm0bzg4p2K++pIsQ0 +vONckbnwWqa2mMWhvrcGWfjMXE1z9VyFHkROD+ZQeLZRC6LMkTtXp9E9zPBtrqe50Y/j+dc0E3mx +Oc0fpm5zN666i++nyVIM/m2u78af4mp7ht/1mGTsP4fcPt2dge/enQD7TbdeV8wpngEYKj1BlpjR +zP398eD199348Hby0WA31yNdXfb33/5y2jT82Y9/HYen3enU57HbfdgusXy+UKI7c4KttofvVpvz ++v7p7va8a4sB089AmBj94dM4bbp6Y9Dz9QEzwf3Eoe+67cN5srHfvvvPa3BarMfjNGX/fbfbYSAT +n9w9yJvL9erh8XAc/jzgPwyj/nT85+5BvcLUNBh6wH8T7PhPt5gow9evP6YPTj/x1euPtzV9XtNv +a+a8Zt7W7HnNvq2585qb1h5fMFGL6defMJ97/jmt3w/r9fDcLzGFeob/Zmni1+v87YftYv207KEP +y2Gx/7CdZmtPQ6vHqdTwdBjOk6kfV4vDE0ZUj5v3j92uh1JM07DQzuH6uACJHxcuPl33P2Nqdpqy +xTz3brXcdD/fXCphTqXz6+cY/B2eDl98PB01fb37YvUCY7Dd6xDu1RebIXgMAn+JDEjrFyuo8+3L +5u5tzPb9iez1CqO0/Q4TuYdhBMOOE7x/OarQ24z51/8DAAD//wMAUEsDBBQABgAIAAAAIQBH07LI +gAEAAPgMAAAUAAAAd29yZC93ZWJTZXR0aW5ncy54bWzsl8lOwzAQQO9I/EPkO83SUkrUtFJBPXGC +8gFu4jSWbE9kuw3065ksFQFyIIeol5w8mc3jN4qX5fpDCufEtOGgIuJPPOIwFUPC1SEi77vt3YI4 +xlKVUAGKReSTGbJe3d4si7Bg+zdmLXoaB7MoE+qIZNbmoeuaOGOSmgnkTKEtBS2pxU99cCFNecye +IT5KpqwbeN7c1UxQixWYjOeGNNmK/2QrQCe5hpgZg4VIUeeTlCuywhoTfjLN6BQhTyIS+LPAm/rT +eWWXVL+w1KLtREVEPOKW3qh95YesQ72D/K/vBqwF+UuPM28SXWaz3zEKGRJ0NOdyrlLIaYxUKzkG +AUiQHi3UZYhWZf0i9z8q6her2yvvE+pWuKtF12I3+IcRfHe7hwa/GMFfB/zjCP4q4GfeCH448PXh ++pRxkXRv9Pcj/eHoX07YejRNGy7aqjd4vWhdeGb+2I7h29EFPhjBDw+++Q/KnQhyyyU/sy3ojYbC +MI2XerS33iurLwAAAP//AwBQSwMEFAAGAAgAAAAhADWFxgVvAQAAtQIAABEACAFkb2NQcm9wcy9j +b3JlLnhtbCCiBAEooAABAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA +AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA +AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA +AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA +AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAJySUU/CMBSF3038D03f +t25A1CyjRCE8SWLijMa32l6gYW2XtjD493YFJkSffNu99/TrObcrJ3tVox1YJ40e4zzNMALNjZB6 +NcZv1Tx5wMh5pgWrjYYxPoDDE3p7U/Km4MbCizUNWC/BoUDSruDNGK+9bwpCHF+DYi4NCh2GS2MV +86G0K9IwvmErIIMsuyMKPBPMM9IBk6Yn4hNS8B7ZbG0dAYITqEGB9o7kaU5+tB6scn8eiJMLpZL+ +0IRMJ7uXbMGPw169d7IXtm2btsNoI/jPycfi+TVGTaTudsUB01LwwktfA51bo1C1BjQDt0FmiXz4 +fmyZFWgm3dfWuhgDTUMWsCXpD3YIboF5Y+k+9s9Vt/yaOb8I77SUIJ4O9GWaj0ryu99JLexk9750 +EBV9GS6IKzneAgKFkMVxJefJ+3A6q+aYDrJBnuRZkg+r7L4YjYos++wsXZ3vQh8b6mTs38QzgEbH +1z8a/QYAAP//AwBQSwMEFAAGAAgAAAAhAFep6xjhBwAAB0AAAA8AAAB3b3JkL3N0eWxlcy54bWy8 +W1FT2zgQfr+Z+w8ev3OEhCbANO2kKVyZaSltYG7m3hxbIR4cK2cpBfrrT1rZwrFjZxcL+pJYlvbb +1a6+Fenu+4+Pq8T7xTIR83TsH/3V8z2WhjyK07uxf3tzcXDie0IGaRQkPGVj/4kJ/+OHP/94/3Am +5FPChKcEpOIsG/tLKddnh4ciXLJVIP7ia5aqdwuerQKpHrO7Q75YxCH7zMPNiqXysN/rDQ8zlgRS +gYtlvBZ+Lu0BI+2BZ9E64yETQmm7Soy8VRCn/gelXsTDz2wRbBIp9GN2neWP+RN8XPBUCu/hLBBh +HI/9m3ilLLpiD95PvgpSX71hgZATEQc7Xy4nqdi9LBT1BYcaUvxWQn8Fydjv9/18ZKpV2BpLgvSu +GGPpwb+TbVXs0DyOFH6QHcwmWtgh2Fl8luxdW+vNrMrmKBcoh8yMQ9XWscVXHt6zaCbVi7GvggIG +by+vs5hnsXwa+6en+eCMreIvcRQxHT/FxHQZR+yfJUtvBYuex39cQDTkEkO+SaXah+EIHJaI6Pwx +ZGsdDQovDVYK+kovSLTYBDTKZYkSKGi3iZ9VMwMVFWDwvwL/yGz9TsglC3T4e2DMm6P2a7bWTCNZ +MXAs79ixvHeO5Q0dyxs5lqf4tBLL3fwLx3ArSl8mT/LQRHx5+eC05ZzoFRCtpBUQj6QVEHGkFRBT +pBUQNaQVEBekFeB50grwLXpFGAB1Ooqvm1gmrCbrNXk3T1fedZAFd1mwXno6O2sVylvQxtyzzVy+ +udozmfH07k136ny1XgYiVtelCpW8pntugnnCvL+zONKwrUjvTODWMjBcd3bm3OskCNmSJxHLvBv2 +aLxOWH/Fvdk6CFXa3qscpPVNTbayCZVXv8Z3S+nNlnBH2As2bLiLNO+Ekf81FrAHrds8bDBln3CU +D4eG2mv71Cz8G4vizarYGpNM2vU3ueDlEKBiO8SxdlHd23ut0A7AmGBSDd0EkI/Q3yQmunztY4z+ +Jo29UD5Cf5P0Xigf4qPdv2Sm+Rxk9x7qeI3IZ3fKE54tNklxBvbSw4h8gi0EzgTyIbbyUSQxIp/g +Lfr0JmGo/u7ExCnZF888SkAhu8OgwGHD20J2SpVZCRaRHVTB6hOwunEtAYhMuj/Zr1j/wkVNBsDS +9j669zgPGnYAe7f4seES7tytnNdv4DwsymWqfuwRzMOhDRpOHhYtjyeT7wg+7pb4CEDdMiABqFsq +JAA1xEfzncfmRDxI9+RIwCLTss1iEHZoZh6RmdkC0VKAo7yJuH81nN7mWKjnTQQK2UH1vIlAIXun +ksuOipBDYDnLmwishqzR7KMyp1KMIufNMpC9CSAsckPeCCA35I0AckPeCKDu5L0fxB15I7DI3GA5 +tUzeCCCYQvlT3wKVyRsBROYGw3b5b0YFCYGU1otelx+PCChkB9XJG2EL2TtN5I3AIjuogmWpDoHl +hrwRQG7IGwHkhrwRQG7IGwHkhrwRQN3Jez+IO/JGYJG5wXJqmbwRQGR6sEBl8kYAwRRKlthJ3nDq +X528EShkB9XJG4FC9k6FUO0lFYFFdlAFy5I3AgumUIIhx4LgphjlhrwRFrkhbwSQG/JGALkhbwRQ +d/LeD+KOvBFYZG6wnFombwQQmR4sUJm8EUBkbthJ3nAYX528EShkB9XJG4FC9k6FUC3PIbDIDqpg +WfJGYEG8dCZvBBBMeSkQxSI35I2wyA15I4DckDcCqDt57wdxR94ILDI3WE4tkzcCiEwPFqhM3ggg +MjfsJG84I69O3ggUsoPq5I1AIXunQqiWvBFYZAdVsCzVIbDckDcCCAKzM3kjgGDKC4DgFFHc5Ia8 +ERa5IW8EUHfy3g/ijrwRWGRusJxaJm8EEJkeLFCZvBFAZG7QtbiqXrRcqtrK2kcNQYCtMyiqGtCA +/QYnYQFzA3+yBctUyxTbXx3SEbCwkIDYEB5YEz9xfu/ZQvBW9w0aAgQNFc+TmEPZ9xNU6ZT6eQaj +li6Em+9T74vp2Kmtg5DarupVHVLlZifoyNJtT0pP+bRWDUfrovpcS1ONULqBrGg60hMvVTtT3pSk +F+suJTURmrfyYfh/2xwVvqvmuqiY0+sdfepdnL8zFqmmtEpP2LF5IX6XesLysUpP2O1Ma/jcnqZ6 +wtSQAiw1gUGLXt3GcKmMDCXLWmzMq/Ft8RPU4lctbijZB6ufi/YL2/PS/ec4MvO2ikON/g16S12C +3qIzlKi3OseDKWaP6wqqnjZQaZ+GtlQcZst5Ytyovlym2tOqfRHqLkxERY+BEaveT1mSfAvA6ZKv +m6cmbCHN26MepOGKqDmXkq+a12dQpQ6a7BKgQqSsjHnURqhvDXufblZzluUF9o3nQqcvaObbPhem +4Na42x5spT0cG+yuN+u2Fc+gvz6leWNGNWbzYVBmHqj2xO+62xA02Rnze7RuOeiDo5PTPObzgz43 +qFOhPpU5MIo0K9wI5fGZpqAqHbGQhU9iyTMZbqSobX/ltSvDK6xmjUXaY930RdFulsTpfU3x5zev +rXPuioezUFdwF+HQU/8uLnKyKAZ1r7EiIboDrcH2hlKNTPvClbnNsTmdjC5OtpNQDKix49gM1uuE +HYQ8VT3mkkUHQnXpsJqnd89ytQ3bkXo87E8+wY2iOVK3LgHWcVe86DGqeq7UfgRK11NLzgPNHhme +D6cnAxNsb3QtKOwXH/4HAAD//wMAUEsDBBQABgAIAAAAIQCejZ11rQEAABEFAAASAAAAd29yZC9m +b250VGFibGUueG1stJNNT8MwDIbvSPyHKndo2pUxJrppfOzIAQF3r0vXSE1SxWGFf4+bdENiDFEk +UlVK39iu/di5nr+pOtoKi9LonCXnnEVCF2Yt9SZnz0/LswmL0IFeQ220yNm7QDafnZ5ct9PSaIcR ++Wuc2pxVzjXTOMaiEgrw3DRC01lprAJHn3YTm7KUhbgzxasS2sUp5+PYihoc/Rsr2SDro7W/idYa +u26sKQQiJavqEE+B1GzWZxe1Uw2Ksn6SSmD0INro0SgIBg1ogyIhmy3UOeMpPWM+4hc8ozelXcbi +LlJRgUXh9oY8yCUoWb/vVOvjevtGuqLa6VuwEla1CD4oN3TwiiueMyqfp4vJJQtKkrMJKd3qlZSS +Cot64L1Ge8XbFD6ON0mWy86GFIrTe/k849CnAyIvwq5Bg0d1SOKGSGQ9i4xYDCGBrUQM5f6SxCKh +jMehAmJDFWQkpPzi5iuJZMfmBxJXA0ncglpRj46Q6GYhzEQ3G8NIDJ+JRde69N5X8EmC8+yAxH5K +jpPgw0nUklAcIbH0t6K7Jd1kDCPxh5n4lkTKL/+HRH9NcPYBAAD//wMAUEsDBBQABgAIAAAAIQCB +RAlEBQIAABEEAAAQAAgBZG9jUHJvcHMvYXBwLnhtbCCiBAEooAABAAAAAAAAAAAAAAAAAAAAAAAA +AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA +AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA +AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA +AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA +AAAAAAAAAAAAAJxTTW/bMAy9D9h/EHxv7KQfCAJFRZGg6GFbA8Rtz6xMx0JlSZCUrNmvH2U3idPu +NB8M8pGgnh6f+O17q9kOfVDWzLPxqMgYGmkrZTbz7Km8v5hmLEQwFWhrcJ7tMWS34vs3vvLWoY8K +A6MRJsyzJkY3y/MgG2whjKhsqFJb30Kk1G9yW9dK4tLKbYsm5pOiuMnxPaKpsLpwx4FZP3G2i/87 +tLIy8QvP5d4RYcFLbJ2GiOJXoqN5fgR4aSPoUrUoxgQfE76CDYaE9QF/sb4K4rq45nkf8kUDHmQk +8cRkOqXOAcDvnNNKQiRdxU8lvQ22juyxU4ClATwftnBSZY1y61XciynPhyn/oQxRmVzxvI+Im4eN +B9cEcZMIHjO+lqBxQXcXNeiAPD8B/AEh7XUFihjzXZztUEbrWVB/aLOTjL1CwKTYPNuBV2AiKZfa ++qSLtQvRi1JFTbOp1uddOGwbxuoqqUi9FJw3JrDnQIVzdt0J4bGmu8V/kB0PyXYceqo9nXtvW1Y2 +yJYY3pitWaT47jf4ii1VeN36gMmAbEE/9F/u0alCjD5xWNjWgdmftkkL/4DSht7CkyvtMpnsQ/pz +cGCXFxWbtQNJS728nBZD4wxKfE3+woqccBh4AvgDrcnrdCqZzmywOvR8LSQrPvcvXIwno4K+znsH +jAx0fHriLwAAAP//AwBQSwECLQAUAAYACAAAACEAobf8RnIBAABSBQAAEwAAAAAAAAAAAAAAAAAA +AAAAW0NvbnRlbnRfVHlwZXNdLnhtbFBLAQItABQABgAIAAAAIQAekRq38wAAAE4CAAALAAAAAAAA +AAAAAAAAAKsDAABfcmVscy8ucmVsc1BLAQItABQABgAIAAAAIQBBO9lKJAIAAD8GAAAcAAAAAAAA +AAAAAAAAAM8GAAB3b3JkL19yZWxzL2RvY3VtZW50LnhtbC5yZWxzUEsBAi0AFAAGAAgAAAAhAPab +Hzz+EgAAsHwAABEAAAAAAAAAAAAAAAAANQoAAHdvcmQvZG9jdW1lbnQueG1sUEsBAi0ACgAAAAAA +AAAhAIhElI1NIAAATSAAABUAAAAAAAAAAAAAAAAAYh0AAHdvcmQvbWVkaWEvaW1hZ2UxLnBuZ1BL +AQItABQABgAIAAAAIQCWta3ilgYAAFAbAAAVAAAAAAAAAAAAAAAAAOI9AAB3b3JkL3RoZW1lL3Ro +ZW1lMS54bWxQSwECLQAUAAYACAAAACEAIazyGXULAACpLgAAEQAAAAAAAAAAAAAAAACrRAAAd29y +ZC9zZXR0aW5ncy54bWxQSwECLQAUAAYACAAAACEAR9OyyIABAAD4DAAAFAAAAAAAAAAAAAAAAABP +UAAAd29yZC93ZWJTZXR0aW5ncy54bWxQSwECLQAUAAYACAAAACEANYXGBW8BAAC1AgAAEQAAAAAA +AAAAAAAAAAABUgAAZG9jUHJvcHMvY29yZS54bWxQSwECLQAUAAYACAAAACEAV6nrGOEHAAAHQAAA +DwAAAAAAAAAAAAAAAACnVAAAd29yZC9zdHlsZXMueG1sUEsBAi0AFAAGAAgAAAAhAJ6NnXWtAQAA +EQUAABIAAAAAAAAAAAAAAAAAtVwAAHdvcmQvZm9udFRhYmxlLnhtbFBLAQItABQABgAIAAAAIQCB +RAlEBQIAABEEAAAQAAAAAAAAAAAAAAAAAJJeAABkb2NQcm9wcy9hcHAueG1sUEsFBgAAAAAMAAwA +BAMAAM1hAAAAAA== + +--_004_AS8PR02MB6934BAC1A79647F8264CAE76DA6C9AS8PR02MB6934eurp_-- diff --git a/testdata/message/message-rfc822-multipart.eml b/testdata/message/message-rfc822-multipart.eml new file mode 100644 index 0000000..86771c4 --- /dev/null +++ b/testdata/message/message-rfc822-multipart.eml @@ -0,0 +1,57 @@ +Content-Type: multipart/report; boundary="000000000000fa38d70590edfe62"; report-type=delivery-status + +--000000000000fa38d70590edfe62 +Content-Type: multipart/related; boundary="000000000000fa39ba0590edfe6a" + +--000000000000fa39ba0590edfe6a +Content-Type: multipart/alternative; boundary="000000000000fa39c00590edfe6b" + +--000000000000fa39c00590edfe6b +Content-Type: text/plain; charset="UTF-8" +Content-Transfer-Encoding: quoted-printable + + +error + +--000000000000fa39c00590edfe6b +Content-Type: text/html; charset="UTF-8" +Content-Transfer-Encoding: quoted-printable + + +error + +--000000000000fa39c00590edfe6b-- +--000000000000fa39ba0590edfe6a +Content-Type: image/png; name="icon.png" +Content-Disposition: attachment; filename="icon.png" +Content-Transfer-Encoding: base64 +Content-ID: + +--000000000000fa39ba0590edfe6a-- +--000000000000fa38d70590edfe62 +Content-Type: message/delivery-status + +... + +--000000000000fa38d70590edfe62 +Content-Type: message/rfc822 + +Content-Type: multipart/mixed; boundary="000000000000f67d600590edfee0" + +--000000000000f67d600590edfee0 +Content-Type: multipart/alternative; boundary="000000000000f67d540590edfede" + +--000000000000f67d540590edfede +Content-Type: text/plain; charset="UTF-8"; format=flowed; delsp=yes +Content-Transfer-Encoding: base64 + + +--000000000000f67d540590edfede +Content-Type: text/html; charset="UTF-8" +Content-Transfer-Encoding: quoted-printable + +... +----- Message truncated ----- +--000000000000f67d540590edfede-- +--000000000000f67d600590edfee0-- +--000000000000fa38d70590edfe62-- diff --git a/testdata/message/message-rfc822-multipart2.eml b/testdata/message/message-rfc822-multipart2.eml new file mode 100644 index 0000000..5e7e02c --- /dev/null +++ b/testdata/message/message-rfc822-multipart2.eml @@ -0,0 +1,50 @@ +Return-Path: <> +Date: Sun, 06 Apr 2008 09:38:06 +0900 +MIME-Version: 1.0 +Content-Type: Multipart/Mixed; + boundary="------------Boundary-00=_JFOVO7CXFVB00L32QL80" + +--------------Boundary-00=_JFOVO7CXFVB00L32QL80 +Content-Type: text/plain; name="deliveryproblems.txt" + + --- The following addresses had delivery problems --- + + (5.1.1 ... User unknown) + +--------------Boundary-00=_JFOVO7CXFVB00L32QL80 +Content-Type: message/delivery-status; name="deliverystatus.txt" + +Original-Recipient: +Action: failed +Diagnostic-Code: smtp; 550 5.1.1 ... User unknown +Remote-MTA: 127.0.0.1 + + +--------------Boundary-00=_JFOVO7CXFVB00L32QL80 +Content-Type: message/rfc822 +Content-Disposition: attachment + +Date: Sat, 05 Apr 2008 22:50:44 +0000 +MIME-Version: 1.0 +Content-Type: multipart/alternative; + boundary="----=_NextPart_000_0007_01C8977E.04492826" + +This is a multi-part message in MIME format. + +------=_NextPart_000_0007_01C8977E.04492826 +Content-Type: text/plain; + charset="iso-8859-1" +Content-Transfer-Encoding: quoted-printable + +test +------=_NextPart_000_0007_01C8977E.04492826 +Content-Type: text/html; + charset="iso-8859-1" +Content-Transfer-Encoding: quoted-printable + + + + +Received: from mail-qk1-x746.google.com ([2607:f8b0:4864:20::746]) by + komijn.test.xmox.nl ([2a02:2770::21a:4aff:feba:bde0]) with ESMTPS for + tlsrpt@test.xmox.nl; 16 Sep 2022 12:10 +0200 +Authentication-Results: komijn.test.xmox.nl; iprev=pass + policy.iprev=2607:f8b0:4864:20::746; dkim=pass header.d=google.com + header.s=20210112 header.a=rsa-sha256 header.i=@google.com; spf=pass + smtp.mailfrom=smtp-tls-reporting.bounces.google.com; dmarc=pass + header.from=google.com +Received-SPF: pass client-ip="2607:f8b0:4864:20::746"; + envelope-from="3fkskYxoKAMUyz2p0w9-3x40-4w3-2p0z24tyrrzzrwp.nzx@smtp-tls-reporting.bounces.google.com"; + helo=mail-qk1-x746.google.com; mechanism="include:_netblocks2.google.com" + received=komijn.test.xmox.nl; identity=mailfrom +Received: by mail-qk1-x746.google.com with SMTP id bm38-20020a05620a19a600b006ce9b8892b8so5568741qkb.17 + for ; Fri, 16 Sep 2022 03:10:07 -0700 (PDT) +DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; + d=google.com; s=20210112; + h=to:from:subject:message-id:tls-report-submitter:tls-report-domain + :date:mime-version:from:to:cc:subject:date; + bh=8MGmH0fAJ1b33g562Ddkv2Hq5jFUkVOClPMnl04s+HQ=; + b=b9JMi7erZLJ3Gbf6D/YdpAu9VrZEkvIR32/WH2BjZBiXvBzj1+A1zAoGUJ3gKSjQKb + OZa6TkgceQohtn7ZwSwnBk3uLHBjoG7aUWtrSsSd5WwNZQqKqYaqJqDyfFqxIkMyhv/P + aBjbsGnpAGIBGkcumd0CNspCAvCo1UkMWi64ARXxIbJBy6PQHLk/wqW5fj2WBR+b3Z1X + 7RT2lB2E+u95SUGkpJ6HuflETtISh1dmhTHRt3usJrfz8Y/Fp6NkTmpLMRMP4B/9Jybd + xapPfJBE2ENYm6LSUhnK/xJ4835YQhBDyP5Jb4Y9yZuycSkDpJQKCqBCMetLl7CHskgQ + FVmQ== +X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; + d=1e100.net; s=20210112; + h=to:from:subject:message-id:tls-report-submitter:tls-report-domain + :date:mime-version:x-gm-message-state:from:to:cc:subject:date; + bh=8MGmH0fAJ1b33g562Ddkv2Hq5jFUkVOClPMnl04s+HQ=; + b=c4Y72iT6Eq/VIUJnhYuRNrHhCJ6h711sgzIfd/tO5MipYCKF5Hpo2eFTiQx1rM+nOe + Pk3fnqf9CLz5K8uFTa+FNzMq894ukyo8WsoE+/H0ur33EK57ss9vP759BcbRvgDDWfWm + mvQcyBFvf79aXoMZ9Z1R5dTMSAvitinLLVfE/qFq3ZfrjvF2jqTQfFVSajAJykIk5LCd + Ku2FBlxmbQ3NAWFX5EDPA/Y7DCVw3007AkiKDoXMR3OTwdGRePPDrql8kNuQt9tgvbqk + Y/zcYWze5MGsJL2OcqQZgJt6Dpus982xgDWokMRB50MHKSLwuHMw1mef+49FBN6lMjOi + 2ZJQ== +X-Gm-Message-State: ACrzQf3PRMcHalxi9ZvtjHm55Edve7I9fgun0B/f8Bdw76UM6qcs1RiY + hTaTyRhw5Lr6TCdh2mdEzv0bNvbGb4ttxaQ36eNlXoqzzw== +X-Google-Smtp-Source: AMsMyM6hmC0lYoFYv1MOeWf40QkJqOSOIW99MeFlaN2PWjdCDQ/BFkyJqI3G+9jsCyo610jQh86M7yNsfMFgBr97z0TdprNw6ZI4iNi9RiA= +MIME-Version: 1.0 +X-Received: by 2002:a05:620a:4488:b0:6ce:8a74:8a31 with SMTP id + x8-20020a05620a448800b006ce8a748a31mr3221772qkp.468.1663323006597; Fri, 16 + Sep 2022 03:10:06 -0700 (PDT) +Date: Fri, 16 Sep 2022 03:10:06 -0700 +TLS-Report-Domain: test.xmox.nl +TLS-Report-Submitter: google.com +Message-ID: <000000000000f47cc505e8c8905c@google.com> +Subject: Report Domain: test.xmox.nl Submitter: google.com Report-ID: <2022.09.15T00.00.00Z+test.xmox.nl@google.com> +From: noreply-smtp-tls-reporting@google.com +To: tlsrpt@test.xmox.nl +Content-Type: multipart/report; boundary="000000000000f47cb005e8c8905b"; report-type=tlsrpt + +--000000000000f47cb005e8c8905b +Content-Type: text/plain; charset="UTF-8"; format=flowed; delsp=yes + +This is an aggregate TLS report from google.com + +--000000000000f47cb005e8c8905b +Content-Type: application/tlsrpt+gzip; + name="google.com!test.xmox.nl!1663200000!1663286399!001.json.gz" +Content-Disposition: attachment; + filename="google.com!test.xmox.nl!1663200000!1663286399!001.json.gz" +Content-Transfer-Encoding: base64 + +H4sIAAAAAAAAAHVRwWrDMAz9leBzHdx0HatPu42d21NHKcZxM3exFSylpCv598npNlhZQWDp6fnp +Wb4ISI2J/tOQhyijCU5o8QLQtK54jbYUM1EbcjKZ2HDrIpBMIpkx8hO5UlUl1UrOlxul9BRbvuVi +fYdVLfRyxbEV40xYiGQsSR8PwDQM1ElqUSbXQSIfm+dmMlNaCKx6haWv7wzek0MqhwBDGVvmd9B6 +6x0K/Xa5Fuf8iGsm6dxlb0j4Qz1LpMRTmS9OLiEvRRfrzfo0Z0aA2unCsdNkXa7NsDcNQ0+PD0pl +YNDFBwR/jOUfH7tf9RqC8ZFn3vgMg3wHpDz2XwHeFPYhmDTZJyDTSuytdYiHnlM+8/9Z6COLLGbf +lIPxbZ/cbV+N4278AvzNJHb7AQAA +--000000000000f47cb005e8c8905b-- diff --git a/tlsrpt/doc.go b/tlsrpt/doc.go new file mode 100644 index 0000000..9b8b11a --- /dev/null +++ b/tlsrpt/doc.go @@ -0,0 +1,6 @@ +// Package tlsrpt implements SMTP TLS Reporting, RFC 8460. +// +// TLSRPT allows a domain to publish a policy requesting feedback of TLS +// connectivity to its SMTP servers. Reports can be sent to an address defined +// in the TLSRPT DNS record. These reports can be parsed by tlsrpt. +package tlsrpt diff --git a/tlsrpt/lookup.go b/tlsrpt/lookup.go new file mode 100644 index 0000000..3158299 --- /dev/null +++ b/tlsrpt/lookup.go @@ -0,0 +1,91 @@ +package tlsrpt + +import ( + "context" + "errors" + "fmt" + "time" + + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" + + "github.com/mjl-/mox/dns" + "github.com/mjl-/mox/mlog" +) + +var xlog = mlog.New("tlsrpt") + +var ( + metricLookup = promauto.NewHistogramVec( + prometheus.HistogramOpts{ + Name: "mox_tlsrpt_lookup_duration_seconds", + Help: "TLSRPT lookups with result.", + Buckets: []float64{0.001, 0.005, 0.01, 0.05, 0.100, 0.5, 1, 5, 10, 20, 30}, + }, + []string{"result"}, + ) +) + +var ( + ErrNoRecord = errors.New("tlsrpt: no tlsrpt dns txt record") + ErrMultipleRecords = errors.New("tlsrpt: multiple tlsrpt records") // Must be treated as if domain does not implement TLSRPT. + ErrDNS = errors.New("tlsrpt: temporary error") + ErrRecordSyntax = errors.New("tlsrpt: record syntax error") +) + +// Lookup looks up a TLSRPT DNS TXT record for domain at "_smtp._tls." and +// parses it. +func Lookup(ctx context.Context, resolver dns.Resolver, domain dns.Domain) (rrecord *Record, rtxt string, rerr error) { + log := xlog.WithContext(ctx) + start := time.Now() + defer func() { + result := "ok" + if rerr != nil { + if errors.Is(rerr, ErrNoRecord) { + result = "notfound" + } else if errors.Is(rerr, ErrMultipleRecords) { + result = "multiple" + } else if errors.Is(rerr, ErrDNS) { + result = "temperror" + } else if errors.Is(rerr, ErrRecordSyntax) { + result = "malformed" + } else { + result = "error" + } + } + metricLookup.WithLabelValues(result).Observe(float64(time.Since(start)) / float64(time.Second)) + log.Debugx("tlsrpt lookup result", rerr, mlog.Field("domain", domain), mlog.Field("record", rrecord), mlog.Field("duration", time.Since(start))) + }() + + name := "_smtp._tls." + domain.ASCII + "." + txts, err := dns.WithPackage(resolver, "tlsrpt").LookupTXT(ctx, name) + if dns.IsNotFound(err) { + return nil, "", ErrNoRecord + } else if err != nil { + return nil, "", fmt.Errorf("%w: %s", ErrDNS, err) + } + + var text string + var record *Record + for _, txt := range txts { + r, istlsrpt, err := ParseRecord(txt) + if !istlsrpt { + // This is a loose but probably reasonable interpretation of ../rfc/8460:375 which + // wants us to discard otherwise valid records that start with e.g. "v=TLSRPTv1 ;" + // (note the space before the ";") when multiple TXT records were returned. + continue + } + if err != nil { + return nil, "", fmt.Errorf("parsing record: %w", err) + } + if record != nil { + return nil, "", ErrMultipleRecords + } + record = r + text = txt + } + if record == nil { + return nil, "", ErrNoRecord + } + return record, text, nil +} diff --git a/tlsrpt/lookup_test.go b/tlsrpt/lookup_test.go new file mode 100644 index 0000000..8d669da --- /dev/null +++ b/tlsrpt/lookup_test.go @@ -0,0 +1,46 @@ +package tlsrpt + +import ( + "context" + "errors" + "reflect" + "testing" + + "github.com/mjl-/mox/dns" +) + +func TestLookup(t *testing.T) { + resolver := dns.MockResolver{ + TXT: map[string][]string{ + "_smtp._tls.basic.example.": {"v=TLSRPTv1; rua=mailto:tlsrpt@basic.example"}, + "_smtp._tls.one.example.": {"v=TLSRPTv1; rua=mailto:tlsrpt@basic.example", "other"}, + "_smtp._tls.multiple.example.": {"v=TLSRPTv1; rua=mailto:tlsrpt@basic.example", "v=TLSRPTv1; rua=mailto:tlsrpt@basic.example"}, + "_smtp._tls.malformed.example.": {"v=TLSRPTv1; bad"}, + "_smtp._tls.other.example.": {"other"}, + }, + Fail: map[dns.Mockreq]struct{}{ + {Type: "txt", Name: "_smtp._tls.temperror.example."}: {}, + }, + } + + test := func(domain string, expRecord *Record, expErr error) { + t.Helper() + + d := dns.Domain{ASCII: domain} + record, _, err := Lookup(context.Background(), resolver, d) + if (err == nil) != (expErr == nil) || err != nil && !errors.Is(err, expErr) { + t.Fatalf("lookup, got err %#v, expected %#v", err, expErr) + } + if err == nil && !reflect.DeepEqual(record, expRecord) { + t.Fatalf("lookup, got %#v, expected %#v", record, expRecord) + } + } + + test("basic.example", &Record{Version: "TLSRPTv1", RUAs: [][]string{{"mailto:tlsrpt@basic.example"}}}, nil) + test("one.example", &Record{Version: "TLSRPTv1", RUAs: [][]string{{"mailto:tlsrpt@basic.example"}}}, nil) + test("multiple.example", nil, ErrMultipleRecords) + test("absent.example", nil, ErrNoRecord) + test("other.example", nil, ErrNoRecord) + test("malformed.example", nil, ErrRecordSyntax) + test("temperror.example", nil, ErrDNS) +} diff --git a/tlsrpt/parse.go b/tlsrpt/parse.go new file mode 100644 index 0000000..9216689 --- /dev/null +++ b/tlsrpt/parse.go @@ -0,0 +1,226 @@ +package tlsrpt + +import ( + "fmt" + "net/url" + "strings" +) + +// Extension is an additional key/value pair for a TLSRPT record. +type Extension struct { + Key string + Value string +} + +// Record is a parsed TLSRPT record, to be served under "_smtp._tls.". +// +// Example: +// +// v=TLSRPTv1; rua=mailto:tlsrpt@mox.example; +type Record struct { + Version string // "TLSRPTv1", for "v=". + RUAs [][]string // Aggregate reporting URI, for "rua=". "rua=" can occur multiple times, each can be a list. Must be URL-encoded strings, with ",", "!" and ";" encoded. + Extensions []Extension +} + +// String returns a string or use as a TLSRPT DNS TXT record. +func (r Record) String() string { + b := &strings.Builder{} + fmt.Fprint(b, "v="+r.Version) + for _, rua := range r.RUAs { + fmt.Fprint(b, "; rua="+strings.Join(rua, ",")) + } + for _, p := range r.Extensions { + fmt.Fprint(b, "; "+p.Key+"="+p.Value) + } + return b.String() +} + +type parseErr string + +func (e parseErr) Error() string { + return string(e) +} + +var _ error = parseErr("") + +// ParseRecord parses a TLSRPT record. +func ParseRecord(txt string) (record *Record, istlsrpt bool, err error) { + defer func() { + x := recover() + if x == nil { + return + } + if xerr, ok := x.(parseErr); ok { + record = nil + err = fmt.Errorf("%w: %s", ErrRecordSyntax, xerr) + return + } + panic(x) + }() + + p := newParser(txt) + + record = &Record{ + Version: "TLSRPTv1", + } + + p.xtake("v=TLSRPTv1") + p.xdelim() + istlsrpt = true + for { + k := p.xkey() + p.xtake("=") + // note: duplicates are allowed. + switch k { + case "rua": + record.RUAs = append(record.RUAs, p.xruas()) + default: + v := p.xvalue() + record.Extensions = append(record.Extensions, Extension{k, v}) + } + if !p.delim() || p.empty() { + break + } + } + if !p.empty() { + p.xerrorf("leftover chars") + } + if record.RUAs == nil { + p.xerrorf("missing rua") + } + return +} + +type parser struct { + s string + o int +} + +func newParser(s string) *parser { + return &parser{s: s} +} + +func (p *parser) xerrorf(format string, args ...any) { + msg := fmt.Sprintf(format, args...) + if p.o < len(p.s) { + msg += fmt.Sprintf(" (remain %q)", p.s[p.o:]) + } + panic(parseErr(msg)) +} + +func (p *parser) xtake(s string) string { + if !p.prefix(s) { + p.xerrorf("expected %q", s) + } + p.o += len(s) + return s +} + +func (p *parser) xdelim() { + if !p.delim() { + p.xerrorf("expected semicolon") + } +} + +func (p *parser) xtaken(n int) string { + r := p.s[p.o : p.o+n] + p.o += n + return r +} + +func (p *parser) prefix(s string) bool { + return strings.HasPrefix(p.s[p.o:], s) +} + +func (p *parser) take(s string) bool { + if p.prefix(s) { + p.o += len(s) + return true + } + return false +} + +func (p *parser) xtakefn1(fn func(rune, int) bool) string { + for i, b := range p.s[p.o:] { + if !fn(b, i) { + if i == 0 { + p.xerrorf("expected at least one char") + } + return p.xtaken(i) + } + } + if p.empty() { + p.xerrorf("expected at least 1 char") + } + return p.xtaken(len(p.s) - p.o) +} + +// ../rfc/8460:368 +func (p *parser) xkey() string { + return p.xtakefn1(func(b rune, i int) bool { + return i < 32 && (b >= 'a' && b <= 'z' || b >= 'A' && b <= 'Z' || b >= '0' && b <= '9' || (i > 0 && b == '_' || b == '-' || b == '.')) + }) +} + +// ../rfc/8460:371 +func (p *parser) xvalue() string { + return p.xtakefn1(func(b rune, i int) bool { + return b > ' ' && b < 0x7f && b != '=' && b != ';' + }) +} + +// ../rfc/8460:399 +func (p *parser) delim() bool { + o := p.o + e := len(p.s) + for o < e && (p.s[o] == ' ' || p.s[o] == '\t') { + o++ + } + if o >= e || p.s[o] != ';' { + return false + } + o++ + for o < e && (p.s[o] == ' ' || p.s[o] == '\t') { + o++ + } + p.o = o + return true +} + +func (p *parser) empty() bool { + return p.o >= len(p.s) +} + +func (p *parser) wsp() { + for p.o < len(p.s) && (p.s[p.o] == ' ' || p.s[p.o] == '\t') { + p.o++ + } +} + +// ../rfc/8460:358 +func (p *parser) xruas() []string { + l := []string{p.xuri()} + p.wsp() + for p.take(",") { + p.wsp() + l = append(l, p.xuri()) + p.wsp() + } + return l +} + +// ../rfc/8460:360 +func (p *parser) xuri() string { + v := p.xtakefn1(func(b rune, i int) bool { + return b != ',' && b != '!' && b != ' ' && b != '\t' && b != ';' + }) + u, err := url.Parse(v) + if err != nil { + p.xerrorf("parsing uri %q: %s", v, err) + } + if u.Scheme == "" { + p.xerrorf("missing scheme in uri") + } + return v +} diff --git a/tlsrpt/parse_test.go b/tlsrpt/parse_test.go new file mode 100644 index 0000000..351d7bb --- /dev/null +++ b/tlsrpt/parse_test.go @@ -0,0 +1,83 @@ +package tlsrpt + +import ( + "reflect" + "testing" +) + +func TestRecord(t *testing.T) { + good := func(txt string, want Record) { + t.Helper() + r, _, err := ParseRecord(txt) + if err != nil { + t.Fatalf("parse: %s", err) + } + if !reflect.DeepEqual(r, &want) { + t.Fatalf("want %v, got %v", want, *r) + } + } + + bad := func(txt string) { + t.Helper() + r, _, err := ParseRecord(txt) + if err == nil { + t.Fatalf("parse, expected error, got record %v", r) + } + } + + good("v=TLSRPTv1; rua=mailto:tlsrpt@mox.example;", Record{Version: "TLSRPTv1", RUAs: [][]string{{"mailto:tlsrpt@mox.example"}}}) + good("v=TLSRPTv1; rua=mailto:tlsrpt@mox.example , \t\t https://mox.example/tlsrpt ", Record{Version: "TLSRPTv1", RUAs: [][]string{{"mailto:tlsrpt@mox.example", "https://mox.example/tlsrpt"}}}) + good("v=TLSRPTv1; rua=mailto:tlsrpt@mox.example; ext=yes", Record{Version: "TLSRPTv1", RUAs: [][]string{{"mailto:tlsrpt@mox.example"}}, Extensions: []Extension{{"ext", "yes"}}}) + good("v=TLSRPTv1 ; rua=mailto:x@x.example; rua=mailto:y@x.example", Record{Version: "TLSRPTv1", RUAs: [][]string{{"mailto:x@x.example"}, {"mailto:y@x.example"}}}) + + bad("v=TLSRPTv0") + bad("v=TLSRPTv10") + bad("v=TLSRPTv2") + bad("v=TLSRPTv1") // missing rua + bad("v=TLSRPTv1;") // missing rua + bad("v=TLSRPTv1; ext=1") // missing rua + bad("v=TLSRPTv1; rua=") // empty rua + bad("v=TLSRPTv1; rua=noscheme") + bad("v=TLSRPTv1; rua=,, ,") // empty uris + bad("v=TLSRPTv1; rua=mailto:x@x.example; more=") // empty value in extension + bad("v=TLSRPTv1; rua=mailto:x@x.example; a12345678901234567890123456789012=1") // extension name too long + bad("v=TLSRPTv1; rua=mailto:x@x.example; 1%=a") // invalid extension name + bad("v=TLSRPTv1; rua=mailto:x@x.example; test==") // invalid extension name + bad("v=TLSRPTv1; rua=mailto:x@x.example;;") // additional semicolon + bad("v=TLSRPTv1; rua=mailto:x@x.example other") // trailing characters. + bad("v=TLSRPTv1; rua=http://bad/%") // bad URI + + const want = `v=TLSRPTv1; rua=mailto:x@mox.example; more=a; ext=2` + record := Record{Version: "TLSRPTv1", RUAs: [][]string{{"mailto:x@mox.example"}}, Extensions: []Extension{{"more", "a"}, {"ext", "2"}}} + got := record.String() + if got != want { + t.Fatalf("record string, got %q, want %q", got, want) + } +} + +func FuzzParseRecord(f *testing.F) { + f.Add("v=TLSRPTv1; rua=mailto:tlsrpt@mox.example;") + f.Add("v=TLSRPTv1; rua=mailto:tlsrpt@mox.example , \t\t https://mox.example/tlsrpt ") + f.Add("v=TLSRPTv1; rua=mailto:tlsrpt@mox.example; ext=yes") + + f.Add("v=TLSRPTv0") + f.Add("v=TLSRPTv10") + f.Add("v=TLSRPTv2") + f.Add("v=TLSRPTv1") // missing rua + f.Add("v=TLSRPTv1;") // missing rua + f.Add("v=TLSRPTv1; ext=1") // missing rua + f.Add("v=TLSRPTv1; rua=") // empty rua + f.Add("v=TLSRPTv1; rua=noscheme") + f.Add("v=TLSRPTv1; rua=,, ,") // empty uris + f.Add("v=TLSRPTv1; rua=mailto:x@x.example; more=") // empty value in extension + f.Add("v=TLSRPTv1; rua=mailto:x@x.example; a12345678901234567890123456789012=1") // extension name too long + f.Add("v=TLSRPTv1; rua=mailto:x@x.example; 1%=a") // invalid extension name + f.Add("v=TLSRPTv1; rua=mailto:x@x.example; test==") // invalid extension name + f.Add("v=TLSRPTv1; rua=mailto:x@x.example;;") // additional semicolon + f.Fuzz(func(t *testing.T, s string) { + r, _, err := ParseRecord(s) + if err == nil { + _ = r.String() + } + }) +} diff --git a/tlsrpt/report.go b/tlsrpt/report.go new file mode 100644 index 0000000..ddf9afd --- /dev/null +++ b/tlsrpt/report.go @@ -0,0 +1,153 @@ +package tlsrpt + +import ( + "compress/gzip" + "encoding/json" + "errors" + "fmt" + "io" + "strings" + "time" + + "github.com/mjl-/mox/message" + "github.com/mjl-/mox/moxio" +) + +var ErrNoReport = errors.New("no tlsrpt report found") + +// ../rfc/8460:628 + +// Report is a TLSRPT report, transmitted in JSON format. +type Report struct { + OrganizationName string `json:"organization-name"` + DateRange TLSRPTDateRange `json:"date-range"` + ContactInfo string `json:"contact-info"` // Email address. + ReportID string `json:"report-id"` + Policies []Result `json:"policies"` +} + +// note: with TLSRPT prefix to prevent clash in sherpadoc types. +type TLSRPTDateRange struct { + Start time.Time `json:"start-datetime"` + End time.Time `json:"end-datetime"` +} + +type Result struct { + Policy ResultPolicy `json:"policy"` + Summary Summary `json:"summary"` + FailureDetails []FailureDetails `json:"failure-details"` +} + +type ResultPolicy struct { + Type string `json:"policy-type"` + String []string `json:"policy-string"` + Domain string `json:"policy-domain"` + MXHost []string `json:"mx-host"` // Example in RFC has errata, it originally was a single string. ../rfc/8460-eid6241 ../rfc/8460:1779 +} + +type Summary struct { + TotalSuccessfulSessionCount int64 `json:"total-successful-session-count"` + TotalFailureSessionCount int64 `json:"total-failure-session-count"` +} + +// ResultType represents a TLS error. +type ResultType string + +// ../rfc/8460:1377 +// https://www.iana.org/assignments/starttls-validation-result-types/starttls-validation-result-types.xhtml + +const ( + ResultSTARTTLSNotSupported ResultType = "starttls-not-supported" + ResultCertificateHostMismatch ResultType = "certificate-host-mismatch" + ResultCertificateExpired ResultType = "certificate-expired" + ResultTLSAInvalid ResultType = "tlsa-invalid" + ResultDNSSECInvalid ResultType = "dnssec-invalid" + ResultDANERequired ResultType = "dane-required" + ResultCertificateNotTrusted ResultType = "certificate-not-trusted" + ResultSTSPolicyInvalid ResultType = "sts-policy-invalid" + ResultSTSWebPKIInvalid ResultType = "sts-webpki-invalid" + ResultValidationFailure ResultType = "validation-failure" // Other error. + ResultSTSPolicyFetch ResultType = "sts-policy-fetch-error" +) + +type FailureDetails struct { + ResultType ResultType `json:"result-type"` + SendingMTAIP string `json:"sending-mta-ip"` + ReceivingMXHostname string `json:"receiving-mx-hostname"` + ReceivingMXHelo string `json:"receiving-mx-helo"` + ReceivingIP string `json:"receiving-ip"` + FailedSessionCount int64 `json:"failed-session-count"` + AdditionalInformation string `json:"additional-information"` + FailureReasonCode string `json:"failure-reason-code"` +} + +// Parse parses a Report. +// The maximum size is 20MB. +func Parse(r io.Reader) (*Report, error) { + r = &moxio.LimitReader{R: r, Limit: 20 * 1024 * 1024} + var report Report + if err := json.NewDecoder(r).Decode(&report); err != nil { + return nil, err + } + // note: there may be leftover data, we ignore it. + return &report, nil +} + +// ParseMessage parses a Report from a mail message. +// The maximum size of the message is 15MB, the maximum size of the +// decompressed report is 20MB. +func ParseMessage(r io.ReaderAt) (*Report, error) { + // ../rfc/8460:905 + p, err := message.Parse(&moxio.LimitAtReader{R: r, Limit: 15 * 1024 * 1024}) + if err != nil { + return nil, fmt.Errorf("parsing mail message: %s", err) + } + + // Using multipart appears optional, and similar to DMARC someone may decide to + // send it like that, so accept a report if it's the entire message. + const allow = true + return parseMessageReport(p, allow) +} + +func parseMessageReport(p message.Part, allow bool) (*Report, error) { + if p.MediaType != "MULTIPART" { + if !allow { + return nil, ErrNoReport + } + return parseReport(p) + } + + for { + sp, err := p.ParseNextPart() + if err == io.EOF { + return nil, ErrNoReport + } + if err != nil { + return nil, err + } + if p.MediaSubType == "REPORT" && p.ContentTypeParams["report-type"] != "tlsrpt" { + return nil, fmt.Errorf("unknown report-type parameter %q", p.ContentTypeParams["report-type"]) + } + report, err := parseMessageReport(*sp, p.MediaSubType == "REPORT") + if err == ErrNoReport { + continue + } else if err != nil || report != nil { + return report, err + } + } +} + +func parseReport(p message.Part) (*Report, error) { + mt := strings.ToLower(p.MediaType + "/" + p.MediaSubType) + switch mt { + case "application/tlsrpt+json": + return Parse(p.Reader()) + case "application/tlsrpt+gzip": + gzr, err := gzip.NewReader(p.Reader()) + if err != nil { + return nil, fmt.Errorf("decoding gzip TLSRPT report: %s", err) + } + return Parse(gzr) + } + return nil, ErrNoReport +} diff --git a/tlsrpt/report_test.go b/tlsrpt/report_test.go new file mode 100644 index 0000000..e0102a2 --- /dev/null +++ b/tlsrpt/report_test.go @@ -0,0 +1,149 @@ +package tlsrpt + +import ( + "encoding/json" + "os" + "strings" + "testing" +) + +const reportJSON = `{ + "organization-name": "Company-X", + "date-range": { + "start-datetime": "2016-04-01T00:00:00Z", + "end-datetime": "2016-04-01T23:59:59Z" + }, + "contact-info": "sts-reporting@company-x.example", + "report-id": "5065427c-23d3-47ca-b6e0-946ea0e8c4be", + "policies": [{ + "policy": { + "policy-type": "sts", + "policy-string": ["version: STSv1","mode: testing", + "mx: *.mail.company-y.example","max_age: 86400"], + "policy-domain": "company-y.example", + "mx-host": ["*.mail.company-y.example"] + }, + "summary": { + "total-successful-session-count": 5326, + "total-failure-session-count": 303 + }, + "failure-details": [{ + "result-type": "certificate-expired", + "sending-mta-ip": "2001:db8:abcd:0012::1", + "receiving-mx-hostname": "mx1.mail.company-y.example", + "failed-session-count": 100 + }, { + "result-type": "starttls-not-supported", + "sending-mta-ip": "2001:db8:abcd:0013::1", + "receiving-mx-hostname": "mx2.mail.company-y.example", + "receiving-ip": "203.0.113.56", + "failed-session-count": 200, + "additional-information": "https://reports.company-x.example/report_info ? id = 5065427 c - 23 d3# StarttlsNotSupported " + }, { + "result-type": "validation-failure", + "sending-mta-ip": "198.51.100.62", + "receiving-ip": "203.0.113.58", + "receiving-mx-hostname": "mx-backup.mail.company-y.example", + "failed-session-count": 3, + "failure-reason-code": "X509_V_ERR_PROXY_PATH_LENGTH_EXCEEDED" + }] + }] + }` + +// ../rfc/8460:1015 +var tlsrptMessage = strings.ReplaceAll(`From: tlsrpt@mail.sender.example.com +Date: Fri, May 09 2017 16:54:30 -0800 +To: mts-sts-tlsrpt@example.net +Subject: Report Domain: example.net +Submitter: mail.sender.example.com +Report-ID: <735ff.e317+bf22029@example.net> +TLS-Report-Domain: example.net +TLS-Report-Submitter: mail.sender.example.com +MIME-Version: 1.0 +Content-Type: multipart/report; report-type="tlsrpt"; + boundary="----=_NextPart_000_024E_01CC9B0A.AFE54C00" +Content-Language: en-us + +This is a multipart message in MIME format. + +------=_NextPart_000_024E_01CC9B0A.AFE54C00 +Content-Type: text/plain; charset="us-ascii" +Content-Transfer-Encoding: 7bit + +This is an aggregate TLS report from mail.sender.example.com + +------=_NextPart_000_024E_01CC9B0A.AFE54C00 +Content-Type: application/tlsrpt+json +Content-Transfer-Encoding: 8bit +Content-Disposition: attachment; + filename="mail.sender.example!example.com!1013662812!1013749130.json.gz" + +`+reportJSON+` + +------=_NextPart_000_024E_01CC9B0A.AFE54C00-- +`, "\n", "\r\n") + +// Message without multipart. +var tlsrptMessage2 = strings.ReplaceAll(`From: tlsrpt@mail.sender.example.com +To: mts-sts-tlsrpt@example.net +Subject: Report Domain: example.net +Report-ID: <735ff.e317+bf22029@example.net> +TLS-Report-Domain: example.net +TLS-Report-Submitter: mail.sender.example.com +MIME-Version: 1.0 +Content-Type: application/tlsrpt+json +Content-Transfer-Encoding: 8bit +Content-Disposition: attachment; + filename="mail.sender.example!example.com!1013662812!1013749130.json.gz" + +`+reportJSON+` +`, "\n", "\r\n") + +func TestReport(t *testing.T) { + // ../rfc/8460:1756 + + var report Report + dec := json.NewDecoder(strings.NewReader(reportJSON)) + dec.DisallowUnknownFields() + if err := dec.Decode(&report); err != nil { + t.Fatalf("parsing report: %s", err) + } + + if _, err := ParseMessage(strings.NewReader(tlsrptMessage)); err != nil { + t.Fatalf("parsing TLSRPT from message: %s", err) + } + + if _, err := ParseMessage(strings.NewReader(tlsrptMessage2)); err != nil { + t.Fatalf("parsing TLSRPT from message: %s", err) + } + + if _, err := ParseMessage(strings.NewReader(strings.ReplaceAll(tlsrptMessage, "multipart/report", "multipart/related"))); err != ErrNoReport { + t.Fatalf("got err %v, expected ErrNoReport", err) + } + + if _, err := ParseMessage(strings.NewReader(strings.ReplaceAll(tlsrptMessage, "application/tlsrpt+json", "application/json"))); err != ErrNoReport { + t.Fatalf("got err %v, expected ErrNoReport", err) + } + + files, err := os.ReadDir("../testdata/tlsreports") + if err != nil { + t.Fatalf("listing reports: %s", err) + } + for _, file := range files { + f, err := os.Open("../testdata/tlsreports/" + file.Name()) + if err != nil { + t.Fatalf("open %q: %s", file, err) + } + if _, err := ParseMessage(f); err != nil { + t.Fatalf("parsing TLSRPT from message %q: %s", file.Name(), err) + } + f.Close() + } +} + +func FuzzParseMessage(f *testing.F) { + f.Add(tlsrptMessage) + f.Fuzz(func(t *testing.T, s string) { + ParseMessage(strings.NewReader(s)) + }) +} diff --git a/tlsrptdb/db.go b/tlsrptdb/db.go new file mode 100644 index 0000000..2ff414c --- /dev/null +++ b/tlsrptdb/db.go @@ -0,0 +1,192 @@ +// Package tlsrptdb stores reports from "SMTP TLS Reporting" in its database. +package tlsrptdb + +import ( + "context" + "fmt" + "os" + "path/filepath" + "sync" + "time" + + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" + + "github.com/mjl-/bstore" + + "github.com/mjl-/mox/dns" + "github.com/mjl-/mox/mlog" + "github.com/mjl-/mox/mox-" + "github.com/mjl-/mox/tlsrpt" +) + +var ( + xlog = mlog.New("tlsrptdb") + + tlsrptDB *bstore.DB + mutex sync.Mutex + + metricSession = promauto.NewCounterVec( + prometheus.CounterOpts{ + Name: "mox_tlsrpt_session_total", + Help: "Number of sessions, both success and known result types.", + }, + []string{"type"}, // Known result types, and "success" + ) + + knownResultTypes = map[tlsrpt.ResultType]struct{}{ + tlsrpt.ResultSTARTTLSNotSupported: {}, + tlsrpt.ResultCertificateHostMismatch: {}, + tlsrpt.ResultCertificateExpired: {}, + tlsrpt.ResultTLSAInvalid: {}, + tlsrpt.ResultDNSSECInvalid: {}, + tlsrpt.ResultDANERequired: {}, + tlsrpt.ResultCertificateNotTrusted: {}, + tlsrpt.ResultSTSPolicyInvalid: {}, + tlsrpt.ResultSTSWebPKIInvalid: {}, + tlsrpt.ResultValidationFailure: {}, + tlsrpt.ResultSTSPolicyFetch: {}, + } +) + +// TLSReportRecord is a TLS report as a database record, including information +// about the sender. +// +// todo: should be named just Record, but it would cause a sherpa type name conflict. +type TLSReportRecord struct { + ID int64 `bstore:"typename Record"` + Domain string `bstore:"index"` // Domain to which the TLS report applies. + FromDomain string + MailFrom string + Report tlsrpt.Report +} + +func database() (rdb *bstore.DB, rerr error) { + mutex.Lock() + defer mutex.Unlock() + if tlsrptDB == nil { + p := mox.DataDirPath("tlsrpt.db") + os.MkdirAll(filepath.Dir(p), 0770) + db, err := bstore.Open(p, &bstore.Options{Timeout: 5 * time.Second, Perm: 0660}, TLSReportRecord{}) + if err != nil { + return nil, err + } + tlsrptDB = db + } + return tlsrptDB, nil +} + +// Init opens and possibly initializes the database. +func Init() error { + _, err := database() + return err +} + +// Close closes the database connection. +func Close() { + mutex.Lock() + defer mutex.Unlock() + if tlsrptDB != nil { + tlsrptDB.Close() + tlsrptDB = nil + } +} + +// AddReport adds a TLS report to the database. +// +// The report should have come in over SMTP, with a DKIM-validated +// verifiedFromDomain. Using HTTPS for reports is not recommended as there is no +// authentication on the reports origin. +// +// The report is currently required to only cover a single domain in its policy +// domain. Only reports for known domains are added to the database. +// +// Prometheus metrics are updated only for configured domains. +func AddReport(ctx context.Context, verifiedFromDomain dns.Domain, mailFrom string, r *tlsrpt.Report) error { + log := xlog.WithContext(ctx) + + db, err := database() + if err != nil { + return err + } + + if len(r.Policies) == 0 { + return fmt.Errorf("no policies in report") + } + + var reportdom, zerodom dns.Domain + record := TLSReportRecord{0, "", verifiedFromDomain.Name(), mailFrom, *r} + + for _, p := range r.Policies { + pp := p.Policy + + // Check domain, they must all be the same for now (in future, with DANE, this may + // no longer apply). + d, err := dns.ParseDomain(pp.Domain) + if err != nil { + log.Errorx("invalid domain in tls report", err, mlog.Field("domain", pp.Domain), mlog.Field("mailfrom", mailFrom)) + continue + } + if _, ok := mox.Conf.Domain(d); !ok { + log.Info("unknown domain in tls report, not storing", mlog.Field("domain", d), mlog.Field("mailfrom", mailFrom)) + return fmt.Errorf("unknown domain") + } + if reportdom != zerodom && d != reportdom { + return fmt.Errorf("multiple domains in report %v and %v", reportdom, d) + } + reportdom = d + + metricSession.WithLabelValues("success").Add(float64(p.Summary.TotalSuccessfulSessionCount)) + for _, f := range p.FailureDetails { + var result string + if _, ok := knownResultTypes[f.ResultType]; ok { + result = string(f.ResultType) + } else { + result = "other" + } + metricSession.WithLabelValues(result).Add(float64(f.FailedSessionCount)) + } + } + record.Domain = reportdom.Name() + return db.Insert(&record) +} + +// Records returns all TLS reports in the database. +func Records(ctx context.Context) ([]TLSReportRecord, error) { + db, err := database() + if err != nil { + return nil, err + } + return bstore.QueryDB[TLSReportRecord](db).List() +} + +// RecordID returns the report for the ID. +func RecordID(ctx context.Context, id int64) (TLSReportRecord, error) { + db, err := database() + if err != nil { + return TLSReportRecord{}, err + } + + e := TLSReportRecord{ID: id} + err = db.Get(&e) + return e, err +} + +// RecordsPeriodDomain returns the reports overlapping start and end, for the given +// domain. If domain is empty, all records match for domain. +func RecordsPeriodDomain(ctx context.Context, start, end time.Time, domain string) ([]TLSReportRecord, error) { + db, err := database() + if err != nil { + return nil, err + } + + q := bstore.QueryDB[TLSReportRecord](db) + if domain != "" { + q.FilterNonzero(TLSReportRecord{Domain: domain}) + } + q.FilterFn(func(r TLSReportRecord) bool { + dr := r.Report.DateRange + return !dr.Start.Before(start) && dr.Start.Before(end) || dr.End.After(start) && !dr.End.After(end) + }) + return q.List() +} diff --git a/tlsrptdb/db_test.go b/tlsrptdb/db_test.go new file mode 100644 index 0000000..88a1e05 --- /dev/null +++ b/tlsrptdb/db_test.go @@ -0,0 +1,126 @@ +package tlsrptdb + +import ( + "context" + "os" + "path/filepath" + "reflect" + "strings" + "testing" + "time" + + "github.com/mjl-/mox/config" + "github.com/mjl-/mox/dns" + "github.com/mjl-/mox/mox-" + "github.com/mjl-/mox/tlsrpt" +) + +const reportJSON = `{ + "organization-name": "Company-X", + "date-range": { + "start-datetime": "2016-04-01T00:00:00Z", + "end-datetime": "2016-04-01T23:59:59Z" + }, + "contact-info": "sts-reporting@company-x.example", + "report-id": "5065427c-23d3-47ca-b6e0-946ea0e8c4be", + "policies": [{ + "policy": { + "policy-type": "sts", + "policy-string": ["version: STSv1","mode: testing", + "mx: *.mail.company-y.example","max_age: 86400"], + "policy-domain": "test.xmox.nl", + "mx-host": ["*.mail.company-y.example"] + }, + "summary": { + "total-successful-session-count": 5326, + "total-failure-session-count": 303 + }, + "failure-details": [{ + "result-type": "certificate-expired", + "sending-mta-ip": "2001:db8:abcd:0012::1", + "receiving-mx-hostname": "mx1.mail.company-y.example", + "failed-session-count": 100 + }, { + "result-type": "starttls-not-supported", + "sending-mta-ip": "2001:db8:abcd:0013::1", + "receiving-mx-hostname": "mx2.mail.company-y.example", + "receiving-ip": "203.0.113.56", + "failed-session-count": 200, + "additional-information": "https://reports.company-x.example/report_info ? id = 5065427 c - 23 d3# StarttlsNotSupported " + }, { + "result-type": "validation-failure", + "sending-mta-ip": "198.51.100.62", + "receiving-ip": "203.0.113.58", + "receiving-mx-hostname": "mx-backup.mail.company-y.example", + "failed-session-count": 3, + "failure-reason-code": "X509_V_ERR_PROXY_PATH_LENGTH_EXCEEDED" + }] + }] + }` + +func TestReport(t *testing.T) { + mox.ConfigStaticPath = "../testdata/tlsrpt/fake.conf" + mox.Conf.Static.DataDir = "." + // Recognize as configured domain. + mox.Conf.Dynamic.Domains = map[string]config.Domain{ + "test.xmox.nl": {}, + } + + dbpath := mox.DataDirPath("tlsrpt.db") + os.MkdirAll(filepath.Dir(dbpath), 0770) + defer os.Remove(dbpath) + + if err := Init(); err != nil { + t.Fatalf("init database: %s", err) + } + defer Close() + + files, err := os.ReadDir("../testdata/tlsreports") + if err != nil { + t.Fatalf("listing reports: %s", err) + } + for _, file := range files { + f, err := os.Open("../testdata/tlsreports/" + file.Name()) + if err != nil { + t.Fatalf("open %q: %s", file, err) + } + report, err := tlsrpt.ParseMessage(f) + f.Close() + if err != nil { + t.Fatalf("parsing TLSRPT from message %q: %s", file.Name(), err) + } + if err := AddReport(context.Background(), dns.Domain{ASCII: "mox.example"}, "tlsrpt@mox.example", report); err != nil { + t.Fatalf("adding report to database: %s", err) + } + } + + report, err := tlsrpt.Parse(strings.NewReader(reportJSON)) + if err != nil { + t.Fatalf("parsing report: %v", err) + } else if err := AddReport(context.Background(), dns.Domain{ASCII: "company-y.example"}, "tlsrpt@company-y.example", report); err != nil { + t.Fatalf("adding report to database: %s", err) + } + + records, err := Records(context.Background()) + if err != nil { + t.Fatalf("fetching records: %s", err) + } + for _, r := range records { + if r.FromDomain != "company-y.example" { + continue + } + if !reflect.DeepEqual(&r.Report, report) { + t.Fatalf("report, got %#v, expected %#v", r.Report, report) + } + if _, err := RecordID(context.Background(), r.ID); err != nil { + t.Fatalf("get record by id: %v", err) + } + } + + start, _ := time.Parse(time.RFC3339, "2016-04-01T00:00:00Z") + end, _ := time.Parse(time.RFC3339, "2016-04-01T23:59:59Z") + records, err = RecordsPeriodDomain(context.Background(), start, end, "test.xmox.nl") + if err != nil || len(records) != 1 { + t.Fatalf("got err %v, records %#v, expected no error with 1 record", err, records) + } +} diff --git a/tools.go b/tools.go new file mode 100644 index 0000000..9b56023 --- /dev/null +++ b/tools.go @@ -0,0 +1,8 @@ +//go:build tools +// +build tools + +package main + +import ( + _ "github.com/mjl-/sherpadoc/cmd/sherpadoc" +) diff --git a/updates.go b/updates.go new file mode 100644 index 0000000..9a89df7 --- /dev/null +++ b/updates.go @@ -0,0 +1,133 @@ +package main + +import ( + "bytes" + "crypto/ed25519" + cryptorand "crypto/rand" + "encoding/base64" + "encoding/json" + "fmt" + "io" + "log" + "os" + + "github.com/mjl-/mox/updates" +) + +func cmdUpdatesAddSigned(c *cmd) { + c.unlisted = true + c.params = "privkey-file changes-file < message" + c.help = "Add a signed change to the changes file." + args := c.Parse() + if len(args) != 2 { + c.Usage() + } + + f, err := os.Open(args[0]) + xcheckf(err, "open private key file") + defer f.Close() + seed, err := io.ReadAll(base64.NewDecoder(base64.StdEncoding, f)) + xcheckf(err, "read private key file") + if len(seed) != ed25519.SeedSize { + log.Fatalf("private key is %d bytes, must be %d", len(seed), ed25519.SeedSize) + } + + vf, err := os.Open(args[1]) + xcheckf(err, "open changes file") + var changelog updates.Changelog + err = json.NewDecoder(vf).Decode(&changelog) + xcheckf(err, "parsing changes file") + + privKey := ed25519.NewKeyFromSeed(seed) + + fmt.Fprintln(os.Stderr, "reading changelog text from stdin") + buf, err := io.ReadAll(os.Stdin) + xcheckf(err, "parse message") + + if len(buf) == 0 { + log.Fatalf("empty message") + } + // Message starts with headers similar to email, with "version" and "date". + // todo future: enforce this format? + sig := ed25519.Sign(privKey, buf) + + changelog.Changes = append(changelog.Changes, updates.Change{ + PubKey: privKey.Public().(ed25519.PublicKey), + Sig: sig, + Text: string(buf), + }) + + var b bytes.Buffer + enc := json.NewEncoder(&b) + enc.SetIndent("", "\t") + err = enc.Encode(changelog) + xcheckf(err, "encode changelog as json") + err = os.WriteFile(args[1], b.Bytes(), 0644) + xcheckf(err, "writing versions file") +} + +func cmdUpdatesVerify(c *cmd) { + c.unlisted = true + c.params = "pubkey-base64 < changelog-file" + c.help = "Verify the changelog file against the public key." + args := c.Parse() + if len(args) != 1 { + c.Usage() + } + + pubKey := ed25519.PublicKey(base64Decode(args[0])) + + var changelog updates.Changelog + err := json.NewDecoder(os.Stdin).Decode(&changelog) + xcheckf(err, "parsing changelog file") + + for i, c := range changelog.Changes { + if !bytes.Equal(c.PubKey, pubKey) { + log.Fatalf("change has different public key %x, expected %x", c.PubKey, pubKey) + } else if !ed25519.Verify(pubKey, []byte(c.Text), c.Sig) { + log.Fatalf("verification failed for change with index %d", i) + } + } + fmt.Printf("%d change(s) verified\n", len(changelog.Changes)) +} + +func cmdUpdatesGenkey(c *cmd) { + c.unlisted = true + c.params = ">privkey" + c.help = "Generate a key for signing a changelog file with." + args := c.Parse() + if len(args) != 0 { + c.Usage() + } + + buf := make([]byte, ed25519.SeedSize) + _, err := cryptorand.Read(buf) + xcheckf(err, "generating key") + enc := base64.NewEncoder(base64.StdEncoding, os.Stdout) + _, err = enc.Write(buf) + xcheckf(err, "writing private key") + err = enc.Close() + xcheckf(err, "writing private key") +} + +func cmdUpdatesPubkey(c *cmd) { + c.unlisted = true + c.params = "pubkey" + c.help = "Print the public key for a private key." + args := c.Parse() + if len(args) != 0 { + c.Usage() + } + + seed := make([]byte, ed25519.SeedSize) + n, err := io.ReadFull(base64.NewDecoder(base64.StdEncoding, os.Stdin), seed) + log.Printf("n %d", n) + xcheckf(err, "reading private key") + privKey := ed25519.NewKeyFromSeed(seed) + pubKey := []byte(privKey.Public().(ed25519.PublicKey)) + enc := base64.NewEncoder(base64.StdEncoding, os.Stdout) + _, err = enc.Write(pubKey) + xcheckf(err, "writing public key") + err = enc.Close() + xcheckf(err, "writing public key") +} diff --git a/updates/updates.go b/updates/updates.go new file mode 100644 index 0000000..7956a3c --- /dev/null +++ b/updates/updates.go @@ -0,0 +1,282 @@ +// Package updates implements a mechanism for checking if software updates are +// available, and fetching a changelog. +// +// Given a domain, the latest version of the software is queried in DNS from +// "_updates." as a TXT record. If a new version is available, the +// changelog compared to a last known version can be retrieved. A changelog base +// URL and public key for signatures has to be specified explicitly. +// +// Downloading or upgrading to the latest version is not part of this package. +package updates + +import ( + "bytes" + "context" + "crypto/ed25519" + "encoding/json" + "errors" + "fmt" + "net/http" + "strconv" + "strings" + "time" + + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" + + "github.com/mjl-/mox/dns" + "github.com/mjl-/mox/metrics" + "github.com/mjl-/mox/mlog" + "github.com/mjl-/mox/moxio" +) + +var xlog = mlog.New("updates") + +var ( + metricLookup = promauto.NewHistogramVec( + prometheus.HistogramOpts{ + Name: "mox_updates_lookup_duration_seconds", + Help: "Updates lookup with result.", + Buckets: []float64{0.001, 0.005, 0.01, 0.05, 0.100, 0.5, 1, 5, 10, 20, 30}, + }, + []string{"result"}, + ) + metricFetchChangelog = promauto.NewHistogramVec( + prometheus.HistogramOpts{ + Name: "mox_updates_fetchchangelog_duration_seconds", + Help: "Fetch changelog with result.", + Buckets: []float64{0.001, 0.005, 0.01, 0.05, 0.100, 0.5, 1, 5, 10, 20, 30}, + }, + []string{"result"}, + ) +) + +var ( + // Lookup errors. + ErrDNS = errors.New("updates: dns error") + ErrRecordSyntax = errors.New("updates: dns record syntax") + ErrNoRecord = errors.New("updates: no dns record") + ErrMultipleRecords = errors.New("updates: multiple dns records") + ErrBadVersion = errors.New("updates: malformed version") + + // Fetch changelog errors. + ErrChangelogFetch = errors.New("updates: fetching changelog") +) + +// Change is a an entry in the changelog, a released version. +type Change struct { + PubKey []byte // Key used for signing. + Sig []byte // Signature over text, with ed25519. + Text string // Signed changelog entry, starts with header similar to email, with at least fields "version" and "date". +} + +// Changelog is returned as JSON. +// +// The changelog itself is not signed, only individual changes. The goal is to +// prevent a potential future different domain owner from notifying users about +// new versions. +type Changelog struct { + Changes []Change +} + +// Lookup looks up the updates DNS TXT record at "_updates." and returns +// the parsed form. +func Lookup(ctx context.Context, resolver dns.Resolver, domain dns.Domain) (rversion Version, rrecord *Record, rerr error) { + log := xlog.WithContext(ctx) + start := time.Now() + defer func() { + var result = "ok" + if rerr != nil { + result = "error" + } + metricLookup.WithLabelValues(result).Observe(float64(time.Since(start)) / float64(time.Second)) + log.Debugx("updates lookup result", rerr, mlog.Field("domain", domain), mlog.Field("version", rversion), mlog.Field("record", rrecord), mlog.Field("duration", time.Since(start))) + }() + + nctx, cancel := context.WithTimeout(ctx, 30*time.Second) + defer cancel() + name := "_updates." + domain.ASCII + "." + txts, err := dns.WithPackage(resolver, "updates").LookupTXT(nctx, name) + if dns.IsNotFound(err) { + return Version{}, nil, ErrNoRecord + } else if err != nil { + return Version{}, nil, fmt.Errorf("%w: %s", ErrDNS, err) + } + var record *Record + for _, txt := range txts { + r, isupdates, err := ParseRecord(txt) + if !isupdates { + continue + } else if err != nil { + return Version{}, nil, err + } + if record != nil { + return Version{}, nil, ErrMultipleRecords + } + record = r + } + + if record == nil { + return Version{}, nil, ErrNoRecord + } + return record.Latest, record, nil +} + +// FetchChangelog fetches the changelog compared against the base version, which +// can be the Version zero value. +// +// The changelog is requested using HTTP GET from baseURL with optional "from" +// query string parameter. +// +// Individual changes are verified using pubKey. If any signature is invalid, an +// error is returned. +// +// A changelog can be maximum 1 MB. +func FetchChangelog(ctx context.Context, baseURL string, base Version, pubKey []byte) (changelog *Changelog, rerr error) { + log := xlog.WithContext(ctx) + start := time.Now() + defer func() { + var result = "ok" + if rerr != nil { + result = "error" + } + metricFetchChangelog.WithLabelValues(result).Observe(float64(time.Since(start)) / float64(time.Second)) + log.Debugx("updates fetch changelog result", rerr, mlog.Field("baseurl", baseURL), mlog.Field("base", base), mlog.Field("duration", time.Since(start))) + }() + + url := baseURL + "?from=" + base.String() + nctx, cancel := context.WithTimeout(ctx, time.Minute) + defer cancel() + req, err := http.NewRequestWithContext(nctx, "GET", url, nil) + if err != nil { + return nil, fmt.Errorf("making request: %v", err) + } + resp, err := http.DefaultClient.Do(req) + if resp == nil { + resp = &http.Response{StatusCode: 0} + } + metrics.HTTPClientObserve(ctx, "updates", req.Method, resp.StatusCode, err, start) + if err != nil { + return nil, fmt.Errorf("%w: making http request: %s", ErrChangelogFetch, err) + } + defer resp.Body.Close() + if resp.StatusCode != http.StatusOK { + return nil, fmt.Errorf("%w: http status: %s", ErrChangelogFetch, resp.Status) + } + var cl Changelog + if err := json.NewDecoder(&moxio.LimitReader{R: resp.Body, Limit: 1024 * 1024}).Decode(&cl); err != nil { + return nil, fmt.Errorf("%w: parsing changelog: %s", ErrChangelogFetch, err) + } + for _, c := range cl.Changes { + if !bytes.Equal(c.PubKey, pubKey) { + return nil, fmt.Errorf("%w: verifying change: signed with unknown public key %x instead of %x", ErrChangelogFetch, c.PubKey, pubKey) + } + if !ed25519.Verify(c.PubKey, []byte(c.Text), c.Sig) { + return nil, fmt.Errorf("%w: verifying change: invalid signature for change", ErrChangelogFetch) + } + } + + return &cl, nil +} + +// Check checks for an updated version through DNS and fetches a +// changelog if so. +func Check(ctx context.Context, resolver dns.Resolver, domain dns.Domain, lastKnown Version, changelogBaseURL string, pubKey []byte) (rversion Version, rrecord *Record, changelog *Changelog, rerr error) { + log := xlog.WithContext(ctx) + start := time.Now() + defer func() { + log.Debugx("updates check result", rerr, mlog.Field("domain", domain), mlog.Field("lastKnown", lastKnown), mlog.Field("changelogbaseurl", changelogBaseURL), mlog.Field("version", rversion), mlog.Field("record", rrecord), mlog.Field("duration", time.Since(start))) + }() + + latest, record, err := Lookup(ctx, resolver, domain) + if err != nil { + return latest, record, nil, err + } + + if latest.After(lastKnown) { + changelog, err = FetchChangelog(ctx, changelogBaseURL, lastKnown, pubKey) + } + return latest, record, changelog, err +} + +// Version is a specified version in an updates records. +type Version struct { + Major int + Minor int + Patch int +} + +// After returns if v comes after ov. +func (v Version) After(ov Version) bool { + return v.Major > ov.Major || v.Major == ov.Major && v.Minor > ov.Minor || v.Major == ov.Major && v.Minor == ov.Minor && v.Patch > ov.Patch +} + +// String returns a human-reasonable version, also for use in the updates +// record. +func (v Version) String() string { + return fmt.Sprintf("v%d.%d.%d", v.Major, v.Minor, v.Patch) +} + +// ParseVersion parses a version as used in an updates records. +// +// Rules: +// - Optionally start with "v" +// - A dash and anything after it is ignored, e.g. for non-release modifiers. +// - Remaining string must be three dot-separated numbers. +func ParseVersion(s string) (Version, error) { + s = strings.TrimPrefix(s, "v") + s = strings.Split(s, "-")[0] + t := strings.Split(s, ".") + if len(t) != 3 { + return Version{}, fmt.Errorf("%w: %v", ErrBadVersion, t) + } + nums := make([]int, 3) + for i, v := range t { + n, err := strconv.ParseInt(v, 10, 32) + if err != nil { + return Version{}, fmt.Errorf("%w: parsing int %q: %s", ErrBadVersion, v, err) + } + nums[i] = int(n) + } + return Version{nums[0], nums[1], nums[2]}, nil +} + +// Record is an updates DNS record. +type Record struct { + Version string // v=UPDATES0, required and must always be first. + Latest Version // l=, required. +} + +// ParseRecord parses an updates DNS TXT record as served at +func ParseRecord(txt string) (record *Record, isupdates bool, err error) { + l := strings.Split(txt, ";") + vkv := strings.SplitN(strings.TrimSpace(l[0]), "=", 2) + if len(vkv) != 2 || vkv[0] != "v" || !strings.EqualFold(vkv[1], "UPDATES0") { + return nil, false, nil + } + + r := &Record{Version: "UPDATES0"} + seen := map[string]bool{} + for _, t := range l[1:] { + kv := strings.SplitN(strings.TrimSpace(t), "=", 2) + if len(kv) != 2 { + return nil, true, ErrRecordSyntax + } + k := strings.ToLower(kv[0]) + if seen[k] { + return nil, true, fmt.Errorf("%w: duplicate key %q", ErrRecordSyntax, k) + } + seen[k] = true + switch k { + case "l": + v, err := ParseVersion(kv[1]) + if err != nil { + return nil, true, fmt.Errorf("%w: %s", ErrRecordSyntax, err) + } + r.Latest = v + default: + continue + } + } + return r, true, nil +} diff --git a/updates/updates_test.go b/updates/updates_test.go new file mode 100644 index 0000000..9b8fa8c --- /dev/null +++ b/updates/updates_test.go @@ -0,0 +1,152 @@ +package updates + +import ( + "context" + "crypto/ed25519" + "encoding/json" + "errors" + "io" + "log" + "net/http" + "net/http/httptest" + "reflect" + "testing" + + "github.com/mjl-/mox/dns" +) + +func TestUpdates(t *testing.T) { + resolver := dns.MockResolver{ + TXT: map[string][]string{ + "_updates.mox.example.": {"v=UPDATES0; l=v0.0.1"}, + "_updates.one.example.": {"other", "v=UPDATES0; l=v0.0.1-rc1"}, + "_updates.dup.example.": {"v=UPDATES0; l=v0.0.1", "v=UPDATES0; l=v0.0.1"}, + "_updates.other.example.": {"other"}, + "_updates.malformed.example.": {"v=UPDATES0; l=bogus"}, + "_updates.malformed2.example.": {"v=UPDATES0; bogus"}, + "_updates.malformed3.example.": {"v=UPDATES0; l=v0.0.1; l=v0.0.1"}, + "_updates.temperror.example.": {"v=UPDATES0; l=v0.0.1"}, + "_updates.unknown.example.": {"v=UPDATES0; l=v0.0.1; unknown=ok"}, + }, + Fail: map[dns.Mockreq]struct{}{ + {Type: "txt", Name: "_updates.temperror.example."}: {}, + }, + } + + lookup := func(dom string, expVersion string, expRecord *Record, expErr error) { + t.Helper() + + d, _ := dns.ParseDomain(dom) + expv, _ := ParseVersion(expVersion) + + version, record, err := Lookup(context.Background(), resolver, d) + if (err == nil) != (expErr == nil) || err != nil && !errors.Is(err, expErr) { + t.Fatalf("lookup: got err %v, expected %v", err, expErr) + } + if version != expv || !reflect.DeepEqual(record, expRecord) { + t.Fatalf("lookup: got version %v, record %#v, expected %v %#v", version, record, expv, expRecord) + } + } + + lookup("mox.example", "v0.0.1", &Record{Version: "UPDATES0", Latest: Version{0, 0, 1}}, nil) + lookup("one.example", "v0.0.1", &Record{Version: "UPDATES0", Latest: Version{0, 0, 1}}, nil) + lookup("absent.example", "", nil, ErrNoRecord) + lookup("dup.example", "", nil, ErrMultipleRecords) + lookup("other.example", "", nil, ErrNoRecord) + lookup("malformed.example", "", nil, ErrRecordSyntax) + lookup("malformed2.example", "", nil, ErrRecordSyntax) + lookup("malformed3.example", "", nil, ErrRecordSyntax) + lookup("temperror.example", "", nil, ErrDNS) + lookup("unknown.example", "v0.0.1", &Record{Version: "UPDATES0", Latest: Version{0, 0, 1}}, nil) + + seed := make([]byte, ed25519.SeedSize) + priv := ed25519.NewKeyFromSeed(seed) + pub := []byte(priv.Public().(ed25519.PublicKey)) + changelog := Changelog{ + Changes: []Change{ + { + PubKey: pub, + Sig: ed25519.Sign(priv, []byte("test")), + Text: "test", + }, + }, + } + + fetch := func(baseURL string, version Version, status int, pubKey []byte, expChangelog *Changelog, expErr error) { + t.Helper() + + mux := &http.ServeMux{} + mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { + if status == 0 { + panic("bad serve") + } + w.WriteHeader(status) + err := json.NewEncoder(w).Encode(changelog) + if err != nil { + t.Fatalf("encode changelog: %v", err) + } + }) + s := httptest.NewUnstartedServer(mux) + s.Config.ErrorLog = log.New(io.Discard, "", 0) + s.Start() + defer s.Close() + if baseURL == "" { + baseURL = s.URL + } + + changelog, err := FetchChangelog(context.Background(), baseURL, version, pubKey) + if (err == nil) != (expErr == nil) || err != nil && !errors.Is(err, expErr) { + t.Fatalf("fetch changelog: got err %v, expected %v", err, expErr) + } + if !reflect.DeepEqual(changelog, expChangelog) { + t.Fatalf("fetch changelog: got changelog %v, expected %v", changelog, expChangelog) + } + } + + fetch("", Version{}, 200, pub, &changelog, nil) + fetch("", Version{1, 1, 1}, 200, pub, &changelog, nil) + fetch("", Version{}, 200, make([]byte, ed25519.PublicKeySize), nil, ErrChangelogFetch) // Invalid public key. + changelog.Changes[0].Text = "bad" + fetch("", Version{}, 200, pub, nil, ErrChangelogFetch) // Invalid signature. + changelog.Changes[0].Text = "test" + fetch("", Version{}, 404, pub, nil, ErrChangelogFetch) + fetch("", Version{}, 503, pub, nil, ErrChangelogFetch) + fetch("", Version{}, 0, pub, nil, ErrChangelogFetch) + fetch("bogusurl", Version{}, 200, pub, nil, ErrChangelogFetch) + + check := func(dom string, base Version, baseURL string, status int, pubKey []byte, expVersion Version, expRecord *Record, expChangelog *Changelog, expErr error) { + t.Helper() + + mux := &http.ServeMux{} + mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { + if status == 0 { + panic("bad serve") + } + w.WriteHeader(status) + err := json.NewEncoder(w).Encode(changelog) + if err != nil { + t.Fatalf("encode changelog: %v", err) + } + }) + s := httptest.NewUnstartedServer(mux) + s.Config.ErrorLog = log.New(io.Discard, "", 0) + s.Start() + defer s.Close() + if baseURL == "" { + baseURL = s.URL + } + + version, record, changelog, err := Check(context.Background(), resolver, dns.Domain{ASCII: dom}, base, baseURL, pubKey) + if (err == nil) != (expErr == nil) || err != nil && !errors.Is(err, expErr) { + t.Fatalf("check: got err %v, expected %v", err, expErr) + } + if version != expVersion || !reflect.DeepEqual(record, expRecord) || !reflect.DeepEqual(changelog, expChangelog) { + t.Fatalf("check: got version %v, record %#v, changelog %v, expected %v %#v %v", version, record, changelog, expVersion, expRecord, expChangelog) + } + } + + check("mox.example", Version{0, 0, 1}, "", 0, pub, Version{0, 0, 1}, &Record{Version: "UPDATES0", Latest: Version{0, 0, 1}}, nil, nil) + check("mox.example", Version{0, 0, 0}, "", 200, pub, Version{0, 0, 1}, &Record{Version: "UPDATES0", Latest: Version{0, 0, 1}}, &changelog, nil) + check("mox.example", Version{0, 0, 0}, "", 0, pub, Version{0, 0, 1}, &Record{Version: "UPDATES0", Latest: Version{0, 0, 1}}, nil, ErrChangelogFetch) + check("absent.example", Version{0, 0, 1}, "", 200, pub, Version{}, nil, nil, ErrNoRecord) +} diff --git a/vendor/github.com/beorn7/perks/LICENSE b/vendor/github.com/beorn7/perks/LICENSE new file mode 100644 index 0000000..339177b --- /dev/null +++ b/vendor/github.com/beorn7/perks/LICENSE @@ -0,0 +1,20 @@ +Copyright (C) 2013 Blake Mizerany + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/beorn7/perks/quantile/exampledata.txt b/vendor/github.com/beorn7/perks/quantile/exampledata.txt new file mode 100644 index 0000000..1602287 --- /dev/null +++ b/vendor/github.com/beorn7/perks/quantile/exampledata.txt @@ -0,0 +1,2388 @@ +8 +5 +26 +12 +5 +235 +13 +6 +28 +30 +3 +3 +3 +3 +5 +2 +33 +7 +2 +4 +7 +12 +14 +5 +8 +3 +10 +4 +5 +3 +6 +6 +209 +20 +3 +10 +14 +3 +4 +6 +8 +5 +11 +7 +3 +2 +3 +3 +212 +5 +222 +4 +10 +10 +5 +6 +3 +8 +3 +10 +254 +220 +2 +3 +5 +24 +5 +4 +222 +7 +3 +3 +223 +8 +15 +12 +14 +14 +3 +2 +2 +3 +13 +3 +11 +4 +4 +6 +5 +7 +13 +5 +3 +5 +2 +5 +3 +5 +2 +7 +15 +17 +14 +3 +6 +6 +3 +17 +5 +4 +7 +6 +4 +4 +8 +6 +8 +3 +9 +3 +6 +3 +4 +5 +3 +3 +660 +4 +6 +10 +3 +6 +3 +2 +5 +13 +2 +4 +4 +10 +4 +8 +4 +3 +7 +9 +9 +3 +10 +37 +3 +13 +4 +12 +3 +6 +10 +8 +5 +21 +2 +3 +8 +3 +2 +3 +3 +4 +12 +2 +4 +8 +8 +4 +3 +2 +20 +1 +6 +32 +2 +11 +6 +18 +3 +8 +11 +3 +212 +3 +4 +2 +6 +7 +12 +11 +3 +2 +16 +10 +6 +4 +6 +3 +2 +7 +3 +2 +2 +2 +2 +5 +6 +4 +3 +10 +3 +4 +6 +5 +3 +4 +4 +5 +6 +4 +3 +4 +4 +5 +7 +5 +5 +3 +2 +7 +2 +4 +12 +4 +5 +6 +2 +4 +4 +8 +4 +15 +13 +7 +16 +5 +3 +23 +5 +5 +7 +3 +2 +9 +8 +7 +5 +8 +11 +4 +10 +76 +4 +47 +4 +3 +2 +7 +4 +2 +3 +37 +10 +4 +2 +20 +5 +4 +4 +10 +10 +4 +3 +7 +23 +240 +7 +13 +5 +5 +3 +3 +2 +5 +4 +2 +8 +7 +19 +2 +23 +8 +7 +2 +5 +3 +8 +3 +8 +13 +5 +5 +5 +2 +3 +23 +4 +9 +8 +4 +3 +3 +5 +220 +2 +3 +4 +6 +14 +3 +53 +6 +2 +5 +18 +6 +3 +219 +6 +5 +2 +5 +3 +6 +5 +15 +4 +3 +17 +3 +2 +4 +7 +2 +3 +3 +4 +4 +3 +2 +664 +6 +3 +23 +5 +5 +16 +5 +8 +2 +4 +2 +24 +12 +3 +2 +3 +5 +8 +3 +5 +4 +3 +14 +3 +5 +8 +2 +3 +7 +9 +4 +2 +3 +6 +8 +4 +3 +4 +6 +5 +3 +3 +6 +3 +19 +4 +4 +6 +3 +6 +3 +5 +22 +5 +4 +4 +3 +8 +11 +4 +9 +7 +6 +13 +4 +4 +4 +6 +17 +9 +3 +3 +3 +4 +3 +221 +5 +11 +3 +4 +2 +12 +6 +3 +5 +7 +5 +7 +4 +9 +7 +14 +37 +19 +217 +16 +3 +5 +2 +2 +7 +19 +7 +6 +7 +4 +24 +5 +11 +4 +7 +7 +9 +13 +3 +4 +3 +6 +28 +4 +4 +5 +5 +2 +5 +6 +4 +4 +6 +10 +5 +4 +3 +2 +3 +3 +6 +5 +5 +4 +3 +2 +3 +7 +4 +6 +18 +16 +8 +16 +4 +5 +8 +6 +9 +13 +1545 +6 +215 +6 +5 +6 +3 +45 +31 +5 +2 +2 +4 +3 +3 +2 +5 +4 +3 +5 +7 +7 +4 +5 +8 +5 +4 +749 +2 +31 +9 +11 +2 +11 +5 +4 +4 +7 +9 +11 +4 +5 +4 +7 +3 +4 +6 +2 +15 +3 +4 +3 +4 +3 +5 +2 +13 +5 +5 +3 +3 +23 +4 +4 +5 +7 +4 +13 +2 +4 +3 +4 +2 +6 +2 +7 +3 +5 +5 +3 +29 +5 +4 +4 +3 +10 +2 +3 +79 +16 +6 +6 +7 +7 +3 +5 +5 +7 +4 +3 +7 +9 +5 +6 +5 +9 +6 +3 +6 +4 +17 +2 +10 +9 +3 +6 +2 +3 +21 +22 +5 +11 +4 +2 +17 +2 +224 +2 +14 +3 +4 +4 +2 +4 +4 +4 +4 +5 +3 +4 +4 +10 +2 +6 +3 +3 +5 +7 +2 +7 +5 +6 +3 +218 +2 +2 +5 +2 +6 +3 +5 +222 +14 +6 +33 +3 +2 +5 +3 +3 +3 +9 +5 +3 +3 +2 +7 +4 +3 +4 +3 +5 +6 +5 +26 +4 +13 +9 +7 +3 +221 +3 +3 +4 +4 +4 +4 +2 +18 +5 +3 +7 +9 +6 +8 +3 +10 +3 +11 +9 +5 +4 +17 +5 +5 +6 +6 +3 +2 +4 +12 +17 +6 +7 +218 +4 +2 +4 +10 +3 +5 +15 +3 +9 +4 +3 +3 +6 +29 +3 +3 +4 +5 +5 +3 +8 +5 +6 +6 +7 +5 +3 +5 +3 +29 +2 +31 +5 +15 +24 +16 +5 +207 +4 +3 +3 +2 +15 +4 +4 +13 +5 +5 +4 +6 +10 +2 +7 +8 +4 +6 +20 +5 +3 +4 +3 +12 +12 +5 +17 +7 +3 +3 +3 +6 +10 +3 +5 +25 +80 +4 +9 +3 +2 +11 +3 +3 +2 +3 +8 +7 +5 +5 +19 +5 +3 +3 +12 +11 +2 +6 +5 +5 +5 +3 +3 +3 +4 +209 +14 +3 +2 +5 +19 +4 +4 +3 +4 +14 +5 +6 +4 +13 +9 +7 +4 +7 +10 +2 +9 +5 +7 +2 +8 +4 +6 +5 +5 +222 +8 +7 +12 +5 +216 +3 +4 +4 +6 +3 +14 +8 +7 +13 +4 +3 +3 +3 +3 +17 +5 +4 +3 +33 +6 +6 +33 +7 +5 +3 +8 +7 +5 +2 +9 +4 +2 +233 +24 +7 +4 +8 +10 +3 +4 +15 +2 +16 +3 +3 +13 +12 +7 +5 +4 +207 +4 +2 +4 +27 +15 +2 +5 +2 +25 +6 +5 +5 +6 +13 +6 +18 +6 +4 +12 +225 +10 +7 +5 +2 +2 +11 +4 +14 +21 +8 +10 +3 +5 +4 +232 +2 +5 +5 +3 +7 +17 +11 +6 +6 +23 +4 +6 +3 +5 +4 +2 +17 +3 +6 +5 +8 +3 +2 +2 +14 +9 +4 +4 +2 +5 +5 +3 +7 +6 +12 +6 +10 +3 +6 +2 +2 +19 +5 +4 +4 +9 +2 +4 +13 +3 +5 +6 +3 +6 +5 +4 +9 +6 +3 +5 +7 +3 +6 +6 +4 +3 +10 +6 +3 +221 +3 +5 +3 +6 +4 +8 +5 +3 +6 +4 +4 +2 +54 +5 +6 +11 +3 +3 +4 +4 +4 +3 +7 +3 +11 +11 +7 +10 +6 +13 +223 +213 +15 +231 +7 +3 +7 +228 +2 +3 +4 +4 +5 +6 +7 +4 +13 +3 +4 +5 +3 +6 +4 +6 +7 +2 +4 +3 +4 +3 +3 +6 +3 +7 +3 +5 +18 +5 +6 +8 +10 +3 +3 +3 +2 +4 +2 +4 +4 +5 +6 +6 +4 +10 +13 +3 +12 +5 +12 +16 +8 +4 +19 +11 +2 +4 +5 +6 +8 +5 +6 +4 +18 +10 +4 +2 +216 +6 +6 +6 +2 +4 +12 +8 +3 +11 +5 +6 +14 +5 +3 +13 +4 +5 +4 +5 +3 +28 +6 +3 +7 +219 +3 +9 +7 +3 +10 +6 +3 +4 +19 +5 +7 +11 +6 +15 +19 +4 +13 +11 +3 +7 +5 +10 +2 +8 +11 +2 +6 +4 +6 +24 +6 +3 +3 +3 +3 +6 +18 +4 +11 +4 +2 +5 +10 +8 +3 +9 +5 +3 +4 +5 +6 +2 +5 +7 +4 +4 +14 +6 +4 +4 +5 +5 +7 +2 +4 +3 +7 +3 +3 +6 +4 +5 +4 +4 +4 +3 +3 +3 +3 +8 +14 +2 +3 +5 +3 +2 +4 +5 +3 +7 +3 +3 +18 +3 +4 +4 +5 +7 +3 +3 +3 +13 +5 +4 +8 +211 +5 +5 +3 +5 +2 +5 +4 +2 +655 +6 +3 +5 +11 +2 +5 +3 +12 +9 +15 +11 +5 +12 +217 +2 +6 +17 +3 +3 +207 +5 +5 +4 +5 +9 +3 +2 +8 +5 +4 +3 +2 +5 +12 +4 +14 +5 +4 +2 +13 +5 +8 +4 +225 +4 +3 +4 +5 +4 +3 +3 +6 +23 +9 +2 +6 +7 +233 +4 +4 +6 +18 +3 +4 +6 +3 +4 +4 +2 +3 +7 +4 +13 +227 +4 +3 +5 +4 +2 +12 +9 +17 +3 +7 +14 +6 +4 +5 +21 +4 +8 +9 +2 +9 +25 +16 +3 +6 +4 +7 +8 +5 +2 +3 +5 +4 +3 +3 +5 +3 +3 +3 +2 +3 +19 +2 +4 +3 +4 +2 +3 +4 +4 +2 +4 +3 +3 +3 +2 +6 +3 +17 +5 +6 +4 +3 +13 +5 +3 +3 +3 +4 +9 +4 +2 +14 +12 +4 +5 +24 +4 +3 +37 +12 +11 +21 +3 +4 +3 +13 +4 +2 +3 +15 +4 +11 +4 +4 +3 +8 +3 +4 +4 +12 +8 +5 +3 +3 +4 +2 +220 +3 +5 +223 +3 +3 +3 +10 +3 +15 +4 +241 +9 +7 +3 +6 +6 +23 +4 +13 +7 +3 +4 +7 +4 +9 +3 +3 +4 +10 +5 +5 +1 +5 +24 +2 +4 +5 +5 +6 +14 +3 +8 +2 +3 +5 +13 +13 +3 +5 +2 +3 +15 +3 +4 +2 +10 +4 +4 +4 +5 +5 +3 +5 +3 +4 +7 +4 +27 +3 +6 +4 +15 +3 +5 +6 +6 +5 +4 +8 +3 +9 +2 +6 +3 +4 +3 +7 +4 +18 +3 +11 +3 +3 +8 +9 +7 +24 +3 +219 +7 +10 +4 +5 +9 +12 +2 +5 +4 +4 +4 +3 +3 +19 +5 +8 +16 +8 +6 +22 +3 +23 +3 +242 +9 +4 +3 +3 +5 +7 +3 +3 +5 +8 +3 +7 +5 +14 +8 +10 +3 +4 +3 +7 +4 +6 +7 +4 +10 +4 +3 +11 +3 +7 +10 +3 +13 +6 +8 +12 +10 +5 +7 +9 +3 +4 +7 +7 +10 +8 +30 +9 +19 +4 +3 +19 +15 +4 +13 +3 +215 +223 +4 +7 +4 +8 +17 +16 +3 +7 +6 +5 +5 +4 +12 +3 +7 +4 +4 +13 +4 +5 +2 +5 +6 +5 +6 +6 +7 +10 +18 +23 +9 +3 +3 +6 +5 +2 +4 +2 +7 +3 +3 +2 +5 +5 +14 +10 +224 +6 +3 +4 +3 +7 +5 +9 +3 +6 +4 +2 +5 +11 +4 +3 +3 +2 +8 +4 +7 +4 +10 +7 +3 +3 +18 +18 +17 +3 +3 +3 +4 +5 +3 +3 +4 +12 +7 +3 +11 +13 +5 +4 +7 +13 +5 +4 +11 +3 +12 +3 +6 +4 +4 +21 +4 +6 +9 +5 +3 +10 +8 +4 +6 +4 +4 +6 +5 +4 +8 +6 +4 +6 +4 +4 +5 +9 +6 +3 +4 +2 +9 +3 +18 +2 +4 +3 +13 +3 +6 +6 +8 +7 +9 +3 +2 +16 +3 +4 +6 +3 +2 +33 +22 +14 +4 +9 +12 +4 +5 +6 +3 +23 +9 +4 +3 +5 +5 +3 +4 +5 +3 +5 +3 +10 +4 +5 +5 +8 +4 +4 +6 +8 +5 +4 +3 +4 +6 +3 +3 +3 +5 +9 +12 +6 +5 +9 +3 +5 +3 +2 +2 +2 +18 +3 +2 +21 +2 +5 +4 +6 +4 +5 +10 +3 +9 +3 +2 +10 +7 +3 +6 +6 +4 +4 +8 +12 +7 +3 +7 +3 +3 +9 +3 +4 +5 +4 +4 +5 +5 +10 +15 +4 +4 +14 +6 +227 +3 +14 +5 +216 +22 +5 +4 +2 +2 +6 +3 +4 +2 +9 +9 +4 +3 +28 +13 +11 +4 +5 +3 +3 +2 +3 +3 +5 +3 +4 +3 +5 +23 +26 +3 +4 +5 +6 +4 +6 +3 +5 +5 +3 +4 +3 +2 +2 +2 +7 +14 +3 +6 +7 +17 +2 +2 +15 +14 +16 +4 +6 +7 +13 +6 +4 +5 +6 +16 +3 +3 +28 +3 +6 +15 +3 +9 +2 +4 +6 +3 +3 +22 +4 +12 +6 +7 +2 +5 +4 +10 +3 +16 +6 +9 +2 +5 +12 +7 +5 +5 +5 +5 +2 +11 +9 +17 +4 +3 +11 +7 +3 +5 +15 +4 +3 +4 +211 +8 +7 +5 +4 +7 +6 +7 +6 +3 +6 +5 +6 +5 +3 +4 +4 +26 +4 +6 +10 +4 +4 +3 +2 +3 +3 +4 +5 +9 +3 +9 +4 +4 +5 +5 +8 +2 +4 +2 +3 +8 +4 +11 +19 +5 +8 +6 +3 +5 +6 +12 +3 +2 +4 +16 +12 +3 +4 +4 +8 +6 +5 +6 +6 +219 +8 +222 +6 +16 +3 +13 +19 +5 +4 +3 +11 +6 +10 +4 +7 +7 +12 +5 +3 +3 +5 +6 +10 +3 +8 +2 +5 +4 +7 +2 +4 +4 +2 +12 +9 +6 +4 +2 +40 +2 +4 +10 +4 +223 +4 +2 +20 +6 +7 +24 +5 +4 +5 +2 +20 +16 +6 +5 +13 +2 +3 +3 +19 +3 +2 +4 +5 +6 +7 +11 +12 +5 +6 +7 +7 +3 +5 +3 +5 +3 +14 +3 +4 +4 +2 +11 +1 +7 +3 +9 +6 +11 +12 +5 +8 +6 +221 +4 +2 +12 +4 +3 +15 +4 +5 +226 +7 +218 +7 +5 +4 +5 +18 +4 +5 +9 +4 +4 +2 +9 +18 +18 +9 +5 +6 +6 +3 +3 +7 +3 +5 +4 +4 +4 +12 +3 +6 +31 +5 +4 +7 +3 +6 +5 +6 +5 +11 +2 +2 +11 +11 +6 +7 +5 +8 +7 +10 +5 +23 +7 +4 +3 +5 +34 +2 +5 +23 +7 +3 +6 +8 +4 +4 +4 +2 +5 +3 +8 +5 +4 +8 +25 +2 +3 +17 +8 +3 +4 +8 +7 +3 +15 +6 +5 +7 +21 +9 +5 +6 +6 +5 +3 +2 +3 +10 +3 +6 +3 +14 +7 +4 +4 +8 +7 +8 +2 +6 +12 +4 +213 +6 +5 +21 +8 +2 +5 +23 +3 +11 +2 +3 +6 +25 +2 +3 +6 +7 +6 +6 +4 +4 +6 +3 +17 +9 +7 +6 +4 +3 +10 +7 +2 +3 +3 +3 +11 +8 +3 +7 +6 +4 +14 +36 +3 +4 +3 +3 +22 +13 +21 +4 +2 +7 +4 +4 +17 +15 +3 +7 +11 +2 +4 +7 +6 +209 +6 +3 +2 +2 +24 +4 +9 +4 +3 +3 +3 +29 +2 +2 +4 +3 +3 +5 +4 +6 +3 +3 +2 +4 diff --git a/vendor/github.com/beorn7/perks/quantile/stream.go b/vendor/github.com/beorn7/perks/quantile/stream.go new file mode 100644 index 0000000..d7d14f8 --- /dev/null +++ b/vendor/github.com/beorn7/perks/quantile/stream.go @@ -0,0 +1,316 @@ +// Package quantile computes approximate quantiles over an unbounded data +// stream within low memory and CPU bounds. +// +// A small amount of accuracy is traded to achieve the above properties. +// +// Multiple streams can be merged before calling Query to generate a single set +// of results. This is meaningful when the streams represent the same type of +// data. See Merge and Samples. +// +// For more detailed information about the algorithm used, see: +// +// Effective Computation of Biased Quantiles over Data Streams +// +// http://www.cs.rutgers.edu/~muthu/bquant.pdf +package quantile + +import ( + "math" + "sort" +) + +// Sample holds an observed value and meta information for compression. JSON +// tags have been added for convenience. +type Sample struct { + Value float64 `json:",string"` + Width float64 `json:",string"` + Delta float64 `json:",string"` +} + +// Samples represents a slice of samples. It implements sort.Interface. +type Samples []Sample + +func (a Samples) Len() int { return len(a) } +func (a Samples) Less(i, j int) bool { return a[i].Value < a[j].Value } +func (a Samples) Swap(i, j int) { a[i], a[j] = a[j], a[i] } + +type invariant func(s *stream, r float64) float64 + +// NewLowBiased returns an initialized Stream for low-biased quantiles +// (e.g. 0.01, 0.1, 0.5) where the needed quantiles are not known a priori, but +// error guarantees can still be given even for the lower ranks of the data +// distribution. +// +// The provided epsilon is a relative error, i.e. the true quantile of a value +// returned by a query is guaranteed to be within (1±Epsilon)*Quantile. +// +// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error +// properties. +func NewLowBiased(epsilon float64) *Stream { + ƒ := func(s *stream, r float64) float64 { + return 2 * epsilon * r + } + return newStream(ƒ) +} + +// NewHighBiased returns an initialized Stream for high-biased quantiles +// (e.g. 0.01, 0.1, 0.5) where the needed quantiles are not known a priori, but +// error guarantees can still be given even for the higher ranks of the data +// distribution. +// +// The provided epsilon is a relative error, i.e. the true quantile of a value +// returned by a query is guaranteed to be within 1-(1±Epsilon)*(1-Quantile). +// +// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error +// properties. +func NewHighBiased(epsilon float64) *Stream { + ƒ := func(s *stream, r float64) float64 { + return 2 * epsilon * (s.n - r) + } + return newStream(ƒ) +} + +// NewTargeted returns an initialized Stream concerned with a particular set of +// quantile values that are supplied a priori. Knowing these a priori reduces +// space and computation time. The targets map maps the desired quantiles to +// their absolute errors, i.e. the true quantile of a value returned by a query +// is guaranteed to be within (Quantile±Epsilon). +// +// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error properties. +func NewTargeted(targetMap map[float64]float64) *Stream { + // Convert map to slice to avoid slow iterations on a map. + // ƒ is called on the hot path, so converting the map to a slice + // beforehand results in significant CPU savings. + targets := targetMapToSlice(targetMap) + + ƒ := func(s *stream, r float64) float64 { + var m = math.MaxFloat64 + var f float64 + for _, t := range targets { + if t.quantile*s.n <= r { + f = (2 * t.epsilon * r) / t.quantile + } else { + f = (2 * t.epsilon * (s.n - r)) / (1 - t.quantile) + } + if f < m { + m = f + } + } + return m + } + return newStream(ƒ) +} + +type target struct { + quantile float64 + epsilon float64 +} + +func targetMapToSlice(targetMap map[float64]float64) []target { + targets := make([]target, 0, len(targetMap)) + + for quantile, epsilon := range targetMap { + t := target{ + quantile: quantile, + epsilon: epsilon, + } + targets = append(targets, t) + } + + return targets +} + +// Stream computes quantiles for a stream of float64s. It is not thread-safe by +// design. Take care when using across multiple goroutines. +type Stream struct { + *stream + b Samples + sorted bool +} + +func newStream(ƒ invariant) *Stream { + x := &stream{ƒ: ƒ} + return &Stream{x, make(Samples, 0, 500), true} +} + +// Insert inserts v into the stream. +func (s *Stream) Insert(v float64) { + s.insert(Sample{Value: v, Width: 1}) +} + +func (s *Stream) insert(sample Sample) { + s.b = append(s.b, sample) + s.sorted = false + if len(s.b) == cap(s.b) { + s.flush() + } +} + +// Query returns the computed qth percentiles value. If s was created with +// NewTargeted, and q is not in the set of quantiles provided a priori, Query +// will return an unspecified result. +func (s *Stream) Query(q float64) float64 { + if !s.flushed() { + // Fast path when there hasn't been enough data for a flush; + // this also yields better accuracy for small sets of data. + l := len(s.b) + if l == 0 { + return 0 + } + i := int(math.Ceil(float64(l) * q)) + if i > 0 { + i -= 1 + } + s.maybeSort() + return s.b[i].Value + } + s.flush() + return s.stream.query(q) +} + +// Merge merges samples into the underlying streams samples. This is handy when +// merging multiple streams from separate threads, database shards, etc. +// +// ATTENTION: This method is broken and does not yield correct results. The +// underlying algorithm is not capable of merging streams correctly. +func (s *Stream) Merge(samples Samples) { + sort.Sort(samples) + s.stream.merge(samples) +} + +// Reset reinitializes and clears the list reusing the samples buffer memory. +func (s *Stream) Reset() { + s.stream.reset() + s.b = s.b[:0] +} + +// Samples returns stream samples held by s. +func (s *Stream) Samples() Samples { + if !s.flushed() { + return s.b + } + s.flush() + return s.stream.samples() +} + +// Count returns the total number of samples observed in the stream +// since initialization. +func (s *Stream) Count() int { + return len(s.b) + s.stream.count() +} + +func (s *Stream) flush() { + s.maybeSort() + s.stream.merge(s.b) + s.b = s.b[:0] +} + +func (s *Stream) maybeSort() { + if !s.sorted { + s.sorted = true + sort.Sort(s.b) + } +} + +func (s *Stream) flushed() bool { + return len(s.stream.l) > 0 +} + +type stream struct { + n float64 + l []Sample + ƒ invariant +} + +func (s *stream) reset() { + s.l = s.l[:0] + s.n = 0 +} + +func (s *stream) insert(v float64) { + s.merge(Samples{{v, 1, 0}}) +} + +func (s *stream) merge(samples Samples) { + // TODO(beorn7): This tries to merge not only individual samples, but + // whole summaries. The paper doesn't mention merging summaries at + // all. Unittests show that the merging is inaccurate. Find out how to + // do merges properly. + var r float64 + i := 0 + for _, sample := range samples { + for ; i < len(s.l); i++ { + c := s.l[i] + if c.Value > sample.Value { + // Insert at position i. + s.l = append(s.l, Sample{}) + copy(s.l[i+1:], s.l[i:]) + s.l[i] = Sample{ + sample.Value, + sample.Width, + math.Max(sample.Delta, math.Floor(s.ƒ(s, r))-1), + // TODO(beorn7): How to calculate delta correctly? + } + i++ + goto inserted + } + r += c.Width + } + s.l = append(s.l, Sample{sample.Value, sample.Width, 0}) + i++ + inserted: + s.n += sample.Width + r += sample.Width + } + s.compress() +} + +func (s *stream) count() int { + return int(s.n) +} + +func (s *stream) query(q float64) float64 { + t := math.Ceil(q * s.n) + t += math.Ceil(s.ƒ(s, t) / 2) + p := s.l[0] + var r float64 + for _, c := range s.l[1:] { + r += p.Width + if r+c.Width+c.Delta > t { + return p.Value + } + p = c + } + return p.Value +} + +func (s *stream) compress() { + if len(s.l) < 2 { + return + } + x := s.l[len(s.l)-1] + xi := len(s.l) - 1 + r := s.n - 1 - x.Width + + for i := len(s.l) - 2; i >= 0; i-- { + c := s.l[i] + if c.Width+x.Width+x.Delta <= s.ƒ(s, r) { + x.Width += c.Width + s.l[xi] = x + // Remove element at i. + copy(s.l[i:], s.l[i+1:]) + s.l = s.l[:len(s.l)-1] + xi -= 1 + } else { + x = c + xi = i + } + r -= c.Width + } +} + +func (s *stream) samples() Samples { + samples := make(Samples, len(s.l)) + copy(samples, s.l) + return samples +} diff --git a/vendor/github.com/cespare/xxhash/v2/LICENSE.txt b/vendor/github.com/cespare/xxhash/v2/LICENSE.txt new file mode 100644 index 0000000..24b5306 --- /dev/null +++ b/vendor/github.com/cespare/xxhash/v2/LICENSE.txt @@ -0,0 +1,22 @@ +Copyright (c) 2016 Caleb Spare + +MIT License + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/cespare/xxhash/v2/README.md b/vendor/github.com/cespare/xxhash/v2/README.md new file mode 100644 index 0000000..792b4a6 --- /dev/null +++ b/vendor/github.com/cespare/xxhash/v2/README.md @@ -0,0 +1,69 @@ +# xxhash + +[![Go Reference](https://pkg.go.dev/badge/github.com/cespare/xxhash/v2.svg)](https://pkg.go.dev/github.com/cespare/xxhash/v2) +[![Test](https://github.com/cespare/xxhash/actions/workflows/test.yml/badge.svg)](https://github.com/cespare/xxhash/actions/workflows/test.yml) + +xxhash is a Go implementation of the 64-bit +[xxHash](http://cyan4973.github.io/xxHash/) algorithm, XXH64. This is a +high-quality hashing algorithm that is much faster than anything in the Go +standard library. + +This package provides a straightforward API: + +``` +func Sum64(b []byte) uint64 +func Sum64String(s string) uint64 +type Digest struct{ ... } + func New() *Digest +``` + +The `Digest` type implements hash.Hash64. Its key methods are: + +``` +func (*Digest) Write([]byte) (int, error) +func (*Digest) WriteString(string) (int, error) +func (*Digest) Sum64() uint64 +``` + +This implementation provides a fast pure-Go implementation and an even faster +assembly implementation for amd64. + +## Compatibility + +This package is in a module and the latest code is in version 2 of the module. +You need a version of Go with at least "minimal module compatibility" to use +github.com/cespare/xxhash/v2: + +* 1.9.7+ for Go 1.9 +* 1.10.3+ for Go 1.10 +* Go 1.11 or later + +I recommend using the latest release of Go. + +## Benchmarks + +Here are some quick benchmarks comparing the pure-Go and assembly +implementations of Sum64. + +| input size | purego | asm | +| --- | --- | --- | +| 5 B | 979.66 MB/s | 1291.17 MB/s | +| 100 B | 7475.26 MB/s | 7973.40 MB/s | +| 4 KB | 17573.46 MB/s | 17602.65 MB/s | +| 10 MB | 17131.46 MB/s | 17142.16 MB/s | + +These numbers were generated on Ubuntu 18.04 with an Intel i7-8700K CPU using +the following commands under Go 1.11.2: + +``` +$ go test -tags purego -benchtime 10s -bench '/xxhash,direct,bytes' +$ go test -benchtime 10s -bench '/xxhash,direct,bytes' +``` + +## Projects using this package + +- [InfluxDB](https://github.com/influxdata/influxdb) +- [Prometheus](https://github.com/prometheus/prometheus) +- [VictoriaMetrics](https://github.com/VictoriaMetrics/VictoriaMetrics) +- [FreeCache](https://github.com/coocood/freecache) +- [FastCache](https://github.com/VictoriaMetrics/fastcache) diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash.go b/vendor/github.com/cespare/xxhash/v2/xxhash.go new file mode 100644 index 0000000..15c835d --- /dev/null +++ b/vendor/github.com/cespare/xxhash/v2/xxhash.go @@ -0,0 +1,235 @@ +// Package xxhash implements the 64-bit variant of xxHash (XXH64) as described +// at http://cyan4973.github.io/xxHash/. +package xxhash + +import ( + "encoding/binary" + "errors" + "math/bits" +) + +const ( + prime1 uint64 = 11400714785074694791 + prime2 uint64 = 14029467366897019727 + prime3 uint64 = 1609587929392839161 + prime4 uint64 = 9650029242287828579 + prime5 uint64 = 2870177450012600261 +) + +// NOTE(caleb): I'm using both consts and vars of the primes. Using consts where +// possible in the Go code is worth a small (but measurable) performance boost +// by avoiding some MOVQs. Vars are needed for the asm and also are useful for +// convenience in the Go code in a few places where we need to intentionally +// avoid constant arithmetic (e.g., v1 := prime1 + prime2 fails because the +// result overflows a uint64). +var ( + prime1v = prime1 + prime2v = prime2 + prime3v = prime3 + prime4v = prime4 + prime5v = prime5 +) + +// Digest implements hash.Hash64. +type Digest struct { + v1 uint64 + v2 uint64 + v3 uint64 + v4 uint64 + total uint64 + mem [32]byte + n int // how much of mem is used +} + +// New creates a new Digest that computes the 64-bit xxHash algorithm. +func New() *Digest { + var d Digest + d.Reset() + return &d +} + +// Reset clears the Digest's state so that it can be reused. +func (d *Digest) Reset() { + d.v1 = prime1v + prime2 + d.v2 = prime2 + d.v3 = 0 + d.v4 = -prime1v + d.total = 0 + d.n = 0 +} + +// Size always returns 8 bytes. +func (d *Digest) Size() int { return 8 } + +// BlockSize always returns 32 bytes. +func (d *Digest) BlockSize() int { return 32 } + +// Write adds more data to d. It always returns len(b), nil. +func (d *Digest) Write(b []byte) (n int, err error) { + n = len(b) + d.total += uint64(n) + + if d.n+n < 32 { + // This new data doesn't even fill the current block. + copy(d.mem[d.n:], b) + d.n += n + return + } + + if d.n > 0 { + // Finish off the partial block. + copy(d.mem[d.n:], b) + d.v1 = round(d.v1, u64(d.mem[0:8])) + d.v2 = round(d.v2, u64(d.mem[8:16])) + d.v3 = round(d.v3, u64(d.mem[16:24])) + d.v4 = round(d.v4, u64(d.mem[24:32])) + b = b[32-d.n:] + d.n = 0 + } + + if len(b) >= 32 { + // One or more full blocks left. + nw := writeBlocks(d, b) + b = b[nw:] + } + + // Store any remaining partial block. + copy(d.mem[:], b) + d.n = len(b) + + return +} + +// Sum appends the current hash to b and returns the resulting slice. +func (d *Digest) Sum(b []byte) []byte { + s := d.Sum64() + return append( + b, + byte(s>>56), + byte(s>>48), + byte(s>>40), + byte(s>>32), + byte(s>>24), + byte(s>>16), + byte(s>>8), + byte(s), + ) +} + +// Sum64 returns the current hash. +func (d *Digest) Sum64() uint64 { + var h uint64 + + if d.total >= 32 { + v1, v2, v3, v4 := d.v1, d.v2, d.v3, d.v4 + h = rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4) + h = mergeRound(h, v1) + h = mergeRound(h, v2) + h = mergeRound(h, v3) + h = mergeRound(h, v4) + } else { + h = d.v3 + prime5 + } + + h += d.total + + i, end := 0, d.n + for ; i+8 <= end; i += 8 { + k1 := round(0, u64(d.mem[i:i+8])) + h ^= k1 + h = rol27(h)*prime1 + prime4 + } + if i+4 <= end { + h ^= uint64(u32(d.mem[i:i+4])) * prime1 + h = rol23(h)*prime2 + prime3 + i += 4 + } + for i < end { + h ^= uint64(d.mem[i]) * prime5 + h = rol11(h) * prime1 + i++ + } + + h ^= h >> 33 + h *= prime2 + h ^= h >> 29 + h *= prime3 + h ^= h >> 32 + + return h +} + +const ( + magic = "xxh\x06" + marshaledSize = len(magic) + 8*5 + 32 +) + +// MarshalBinary implements the encoding.BinaryMarshaler interface. +func (d *Digest) MarshalBinary() ([]byte, error) { + b := make([]byte, 0, marshaledSize) + b = append(b, magic...) + b = appendUint64(b, d.v1) + b = appendUint64(b, d.v2) + b = appendUint64(b, d.v3) + b = appendUint64(b, d.v4) + b = appendUint64(b, d.total) + b = append(b, d.mem[:d.n]...) + b = b[:len(b)+len(d.mem)-d.n] + return b, nil +} + +// UnmarshalBinary implements the encoding.BinaryUnmarshaler interface. +func (d *Digest) UnmarshalBinary(b []byte) error { + if len(b) < len(magic) || string(b[:len(magic)]) != magic { + return errors.New("xxhash: invalid hash state identifier") + } + if len(b) != marshaledSize { + return errors.New("xxhash: invalid hash state size") + } + b = b[len(magic):] + b, d.v1 = consumeUint64(b) + b, d.v2 = consumeUint64(b) + b, d.v3 = consumeUint64(b) + b, d.v4 = consumeUint64(b) + b, d.total = consumeUint64(b) + copy(d.mem[:], b) + d.n = int(d.total % uint64(len(d.mem))) + return nil +} + +func appendUint64(b []byte, x uint64) []byte { + var a [8]byte + binary.LittleEndian.PutUint64(a[:], x) + return append(b, a[:]...) +} + +func consumeUint64(b []byte) ([]byte, uint64) { + x := u64(b) + return b[8:], x +} + +func u64(b []byte) uint64 { return binary.LittleEndian.Uint64(b) } +func u32(b []byte) uint32 { return binary.LittleEndian.Uint32(b) } + +func round(acc, input uint64) uint64 { + acc += input * prime2 + acc = rol31(acc) + acc *= prime1 + return acc +} + +func mergeRound(acc, val uint64) uint64 { + val = round(0, val) + acc ^= val + acc = acc*prime1 + prime4 + return acc +} + +func rol1(x uint64) uint64 { return bits.RotateLeft64(x, 1) } +func rol7(x uint64) uint64 { return bits.RotateLeft64(x, 7) } +func rol11(x uint64) uint64 { return bits.RotateLeft64(x, 11) } +func rol12(x uint64) uint64 { return bits.RotateLeft64(x, 12) } +func rol18(x uint64) uint64 { return bits.RotateLeft64(x, 18) } +func rol23(x uint64) uint64 { return bits.RotateLeft64(x, 23) } +func rol27(x uint64) uint64 { return bits.RotateLeft64(x, 27) } +func rol31(x uint64) uint64 { return bits.RotateLeft64(x, 31) } diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.go b/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.go new file mode 100644 index 0000000..ad14b80 --- /dev/null +++ b/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.go @@ -0,0 +1,13 @@ +// +build !appengine +// +build gc +// +build !purego + +package xxhash + +// Sum64 computes the 64-bit xxHash digest of b. +// +//go:noescape +func Sum64(b []byte) uint64 + +//go:noescape +func writeBlocks(d *Digest, b []byte) int diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s b/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s new file mode 100644 index 0000000..be8db5b --- /dev/null +++ b/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s @@ -0,0 +1,215 @@ +// +build !appengine +// +build gc +// +build !purego + +#include "textflag.h" + +// Register allocation: +// AX h +// SI pointer to advance through b +// DX n +// BX loop end +// R8 v1, k1 +// R9 v2 +// R10 v3 +// R11 v4 +// R12 tmp +// R13 prime1v +// R14 prime2v +// DI prime4v + +// round reads from and advances the buffer pointer in SI. +// It assumes that R13 has prime1v and R14 has prime2v. +#define round(r) \ + MOVQ (SI), R12 \ + ADDQ $8, SI \ + IMULQ R14, R12 \ + ADDQ R12, r \ + ROLQ $31, r \ + IMULQ R13, r + +// mergeRound applies a merge round on the two registers acc and val. +// It assumes that R13 has prime1v, R14 has prime2v, and DI has prime4v. +#define mergeRound(acc, val) \ + IMULQ R14, val \ + ROLQ $31, val \ + IMULQ R13, val \ + XORQ val, acc \ + IMULQ R13, acc \ + ADDQ DI, acc + +// func Sum64(b []byte) uint64 +TEXT ·Sum64(SB), NOSPLIT, $0-32 + // Load fixed primes. + MOVQ ·prime1v(SB), R13 + MOVQ ·prime2v(SB), R14 + MOVQ ·prime4v(SB), DI + + // Load slice. + MOVQ b_base+0(FP), SI + MOVQ b_len+8(FP), DX + LEAQ (SI)(DX*1), BX + + // The first loop limit will be len(b)-32. + SUBQ $32, BX + + // Check whether we have at least one block. + CMPQ DX, $32 + JLT noBlocks + + // Set up initial state (v1, v2, v3, v4). + MOVQ R13, R8 + ADDQ R14, R8 + MOVQ R14, R9 + XORQ R10, R10 + XORQ R11, R11 + SUBQ R13, R11 + + // Loop until SI > BX. +blockLoop: + round(R8) + round(R9) + round(R10) + round(R11) + + CMPQ SI, BX + JLE blockLoop + + MOVQ R8, AX + ROLQ $1, AX + MOVQ R9, R12 + ROLQ $7, R12 + ADDQ R12, AX + MOVQ R10, R12 + ROLQ $12, R12 + ADDQ R12, AX + MOVQ R11, R12 + ROLQ $18, R12 + ADDQ R12, AX + + mergeRound(AX, R8) + mergeRound(AX, R9) + mergeRound(AX, R10) + mergeRound(AX, R11) + + JMP afterBlocks + +noBlocks: + MOVQ ·prime5v(SB), AX + +afterBlocks: + ADDQ DX, AX + + // Right now BX has len(b)-32, and we want to loop until SI > len(b)-8. + ADDQ $24, BX + + CMPQ SI, BX + JG fourByte + +wordLoop: + // Calculate k1. + MOVQ (SI), R8 + ADDQ $8, SI + IMULQ R14, R8 + ROLQ $31, R8 + IMULQ R13, R8 + + XORQ R8, AX + ROLQ $27, AX + IMULQ R13, AX + ADDQ DI, AX + + CMPQ SI, BX + JLE wordLoop + +fourByte: + ADDQ $4, BX + CMPQ SI, BX + JG singles + + MOVL (SI), R8 + ADDQ $4, SI + IMULQ R13, R8 + XORQ R8, AX + + ROLQ $23, AX + IMULQ R14, AX + ADDQ ·prime3v(SB), AX + +singles: + ADDQ $4, BX + CMPQ SI, BX + JGE finalize + +singlesLoop: + MOVBQZX (SI), R12 + ADDQ $1, SI + IMULQ ·prime5v(SB), R12 + XORQ R12, AX + + ROLQ $11, AX + IMULQ R13, AX + + CMPQ SI, BX + JL singlesLoop + +finalize: + MOVQ AX, R12 + SHRQ $33, R12 + XORQ R12, AX + IMULQ R14, AX + MOVQ AX, R12 + SHRQ $29, R12 + XORQ R12, AX + IMULQ ·prime3v(SB), AX + MOVQ AX, R12 + SHRQ $32, R12 + XORQ R12, AX + + MOVQ AX, ret+24(FP) + RET + +// writeBlocks uses the same registers as above except that it uses AX to store +// the d pointer. + +// func writeBlocks(d *Digest, b []byte) int +TEXT ·writeBlocks(SB), NOSPLIT, $0-40 + // Load fixed primes needed for round. + MOVQ ·prime1v(SB), R13 + MOVQ ·prime2v(SB), R14 + + // Load slice. + MOVQ b_base+8(FP), SI + MOVQ b_len+16(FP), DX + LEAQ (SI)(DX*1), BX + SUBQ $32, BX + + // Load vN from d. + MOVQ d+0(FP), AX + MOVQ 0(AX), R8 // v1 + MOVQ 8(AX), R9 // v2 + MOVQ 16(AX), R10 // v3 + MOVQ 24(AX), R11 // v4 + + // We don't need to check the loop condition here; this function is + // always called with at least one block of data to process. +blockLoop: + round(R8) + round(R9) + round(R10) + round(R11) + + CMPQ SI, BX + JLE blockLoop + + // Copy vN back to d. + MOVQ R8, 0(AX) + MOVQ R9, 8(AX) + MOVQ R10, 16(AX) + MOVQ R11, 24(AX) + + // The number of bytes written is SI minus the old base pointer. + SUBQ b_base+8(FP), SI + MOVQ SI, ret+32(FP) + + RET diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_other.go b/vendor/github.com/cespare/xxhash/v2/xxhash_other.go new file mode 100644 index 0000000..4a5a821 --- /dev/null +++ b/vendor/github.com/cespare/xxhash/v2/xxhash_other.go @@ -0,0 +1,76 @@ +// +build !amd64 appengine !gc purego + +package xxhash + +// Sum64 computes the 64-bit xxHash digest of b. +func Sum64(b []byte) uint64 { + // A simpler version would be + // d := New() + // d.Write(b) + // return d.Sum64() + // but this is faster, particularly for small inputs. + + n := len(b) + var h uint64 + + if n >= 32 { + v1 := prime1v + prime2 + v2 := prime2 + v3 := uint64(0) + v4 := -prime1v + for len(b) >= 32 { + v1 = round(v1, u64(b[0:8:len(b)])) + v2 = round(v2, u64(b[8:16:len(b)])) + v3 = round(v3, u64(b[16:24:len(b)])) + v4 = round(v4, u64(b[24:32:len(b)])) + b = b[32:len(b):len(b)] + } + h = rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4) + h = mergeRound(h, v1) + h = mergeRound(h, v2) + h = mergeRound(h, v3) + h = mergeRound(h, v4) + } else { + h = prime5 + } + + h += uint64(n) + + i, end := 0, len(b) + for ; i+8 <= end; i += 8 { + k1 := round(0, u64(b[i:i+8:len(b)])) + h ^= k1 + h = rol27(h)*prime1 + prime4 + } + if i+4 <= end { + h ^= uint64(u32(b[i:i+4:len(b)])) * prime1 + h = rol23(h)*prime2 + prime3 + i += 4 + } + for ; i < end; i++ { + h ^= uint64(b[i]) * prime5 + h = rol11(h) * prime1 + } + + h ^= h >> 33 + h *= prime2 + h ^= h >> 29 + h *= prime3 + h ^= h >> 32 + + return h +} + +func writeBlocks(d *Digest, b []byte) int { + v1, v2, v3, v4 := d.v1, d.v2, d.v3, d.v4 + n := len(b) + for len(b) >= 32 { + v1 = round(v1, u64(b[0:8:len(b)])) + v2 = round(v2, u64(b[8:16:len(b)])) + v3 = round(v3, u64(b[16:24:len(b)])) + v4 = round(v4, u64(b[24:32:len(b)])) + b = b[32:len(b):len(b)] + } + d.v1, d.v2, d.v3, d.v4 = v1, v2, v3, v4 + return n - len(b) +} diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go b/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go new file mode 100644 index 0000000..fc9bea7 --- /dev/null +++ b/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go @@ -0,0 +1,15 @@ +// +build appengine + +// This file contains the safe implementations of otherwise unsafe-using code. + +package xxhash + +// Sum64String computes the 64-bit xxHash digest of s. +func Sum64String(s string) uint64 { + return Sum64([]byte(s)) +} + +// WriteString adds more data to d. It always returns len(s), nil. +func (d *Digest) WriteString(s string) (n int, err error) { + return d.Write([]byte(s)) +} diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go b/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go new file mode 100644 index 0000000..376e0ca --- /dev/null +++ b/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go @@ -0,0 +1,57 @@ +// +build !appengine + +// This file encapsulates usage of unsafe. +// xxhash_safe.go contains the safe implementations. + +package xxhash + +import ( + "unsafe" +) + +// In the future it's possible that compiler optimizations will make these +// XxxString functions unnecessary by realizing that calls such as +// Sum64([]byte(s)) don't need to copy s. See https://golang.org/issue/2205. +// If that happens, even if we keep these functions they can be replaced with +// the trivial safe code. + +// NOTE: The usual way of doing an unsafe string-to-[]byte conversion is: +// +// var b []byte +// bh := (*reflect.SliceHeader)(unsafe.Pointer(&b)) +// bh.Data = (*reflect.StringHeader)(unsafe.Pointer(&s)).Data +// bh.Len = len(s) +// bh.Cap = len(s) +// +// Unfortunately, as of Go 1.15.3 the inliner's cost model assigns a high enough +// weight to this sequence of expressions that any function that uses it will +// not be inlined. Instead, the functions below use a different unsafe +// conversion designed to minimize the inliner weight and allow both to be +// inlined. There is also a test (TestInlining) which verifies that these are +// inlined. +// +// See https://github.com/golang/go/issues/42739 for discussion. + +// Sum64String computes the 64-bit xxHash digest of s. +// It may be faster than Sum64([]byte(s)) by avoiding a copy. +func Sum64String(s string) uint64 { + b := *(*[]byte)(unsafe.Pointer(&sliceHeader{s, len(s)})) + return Sum64(b) +} + +// WriteString adds more data to d. It always returns len(s), nil. +// It may be faster than Write([]byte(s)) by avoiding a copy. +func (d *Digest) WriteString(s string) (n int, err error) { + d.Write(*(*[]byte)(unsafe.Pointer(&sliceHeader{s, len(s)}))) + // d.Write always returns len(s), nil. + // Ignoring the return output and returning these fixed values buys a + // savings of 6 in the inliner's cost model. + return len(s), nil +} + +// sliceHeader is similar to reflect.SliceHeader, but it assumes that the layout +// of the first two words is the same as the layout of a string. +type sliceHeader struct { + s string + cap int +} diff --git a/vendor/github.com/golang/protobuf/AUTHORS b/vendor/github.com/golang/protobuf/AUTHORS new file mode 100644 index 0000000..15167cd --- /dev/null +++ b/vendor/github.com/golang/protobuf/AUTHORS @@ -0,0 +1,3 @@ +# This source code refers to The Go Authors for copyright purposes. +# The master list of authors is in the main Go distribution, +# visible at http://tip.golang.org/AUTHORS. diff --git a/vendor/github.com/golang/protobuf/CONTRIBUTORS b/vendor/github.com/golang/protobuf/CONTRIBUTORS new file mode 100644 index 0000000..1c4577e --- /dev/null +++ b/vendor/github.com/golang/protobuf/CONTRIBUTORS @@ -0,0 +1,3 @@ +# This source code was written by the Go contributors. +# The master list of contributors is in the main Go distribution, +# visible at http://tip.golang.org/CONTRIBUTORS. diff --git a/vendor/github.com/golang/protobuf/LICENSE b/vendor/github.com/golang/protobuf/LICENSE new file mode 100644 index 0000000..0f64693 --- /dev/null +++ b/vendor/github.com/golang/protobuf/LICENSE @@ -0,0 +1,28 @@ +Copyright 2010 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + diff --git a/vendor/github.com/golang/protobuf/proto/buffer.go b/vendor/github.com/golang/protobuf/proto/buffer.go new file mode 100644 index 0000000..e810e6f --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/buffer.go @@ -0,0 +1,324 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package proto + +import ( + "errors" + "fmt" + + "google.golang.org/protobuf/encoding/prototext" + "google.golang.org/protobuf/encoding/protowire" + "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + WireVarint = 0 + WireFixed32 = 5 + WireFixed64 = 1 + WireBytes = 2 + WireStartGroup = 3 + WireEndGroup = 4 +) + +// EncodeVarint returns the varint encoded bytes of v. +func EncodeVarint(v uint64) []byte { + return protowire.AppendVarint(nil, v) +} + +// SizeVarint returns the length of the varint encoded bytes of v. +// This is equal to len(EncodeVarint(v)). +func SizeVarint(v uint64) int { + return protowire.SizeVarint(v) +} + +// DecodeVarint parses a varint encoded integer from b, +// returning the integer value and the length of the varint. +// It returns (0, 0) if there is a parse error. +func DecodeVarint(b []byte) (uint64, int) { + v, n := protowire.ConsumeVarint(b) + if n < 0 { + return 0, 0 + } + return v, n +} + +// Buffer is a buffer for encoding and decoding the protobuf wire format. +// It may be reused between invocations to reduce memory usage. +type Buffer struct { + buf []byte + idx int + deterministic bool +} + +// NewBuffer allocates a new Buffer initialized with buf, +// where the contents of buf are considered the unread portion of the buffer. +func NewBuffer(buf []byte) *Buffer { + return &Buffer{buf: buf} +} + +// SetDeterministic specifies whether to use deterministic serialization. +// +// Deterministic serialization guarantees that for a given binary, equal +// messages will always be serialized to the same bytes. This implies: +// +// - Repeated serialization of a message will return the same bytes. +// - Different processes of the same binary (which may be executing on +// different machines) will serialize equal messages to the same bytes. +// +// Note that the deterministic serialization is NOT canonical across +// languages. It is not guaranteed to remain stable over time. It is unstable +// across different builds with schema changes due to unknown fields. +// Users who need canonical serialization (e.g., persistent storage in a +// canonical form, fingerprinting, etc.) should define their own +// canonicalization specification and implement their own serializer rather +// than relying on this API. +// +// If deterministic serialization is requested, map entries will be sorted +// by keys in lexographical order. This is an implementation detail and +// subject to change. +func (b *Buffer) SetDeterministic(deterministic bool) { + b.deterministic = deterministic +} + +// SetBuf sets buf as the internal buffer, +// where the contents of buf are considered the unread portion of the buffer. +func (b *Buffer) SetBuf(buf []byte) { + b.buf = buf + b.idx = 0 +} + +// Reset clears the internal buffer of all written and unread data. +func (b *Buffer) Reset() { + b.buf = b.buf[:0] + b.idx = 0 +} + +// Bytes returns the internal buffer. +func (b *Buffer) Bytes() []byte { + return b.buf +} + +// Unread returns the unread portion of the buffer. +func (b *Buffer) Unread() []byte { + return b.buf[b.idx:] +} + +// Marshal appends the wire-format encoding of m to the buffer. +func (b *Buffer) Marshal(m Message) error { + var err error + b.buf, err = marshalAppend(b.buf, m, b.deterministic) + return err +} + +// Unmarshal parses the wire-format message in the buffer and +// places the decoded results in m. +// It does not reset m before unmarshaling. +func (b *Buffer) Unmarshal(m Message) error { + err := UnmarshalMerge(b.Unread(), m) + b.idx = len(b.buf) + return err +} + +type unknownFields struct{ XXX_unrecognized protoimpl.UnknownFields } + +func (m *unknownFields) String() string { panic("not implemented") } +func (m *unknownFields) Reset() { panic("not implemented") } +func (m *unknownFields) ProtoMessage() { panic("not implemented") } + +// DebugPrint dumps the encoded bytes of b with a header and footer including s +// to stdout. This is only intended for debugging. +func (*Buffer) DebugPrint(s string, b []byte) { + m := MessageReflect(new(unknownFields)) + m.SetUnknown(b) + b, _ = prototext.MarshalOptions{AllowPartial: true, Indent: "\t"}.Marshal(m.Interface()) + fmt.Printf("==== %s ====\n%s==== %s ====\n", s, b, s) +} + +// EncodeVarint appends an unsigned varint encoding to the buffer. +func (b *Buffer) EncodeVarint(v uint64) error { + b.buf = protowire.AppendVarint(b.buf, v) + return nil +} + +// EncodeZigzag32 appends a 32-bit zig-zag varint encoding to the buffer. +func (b *Buffer) EncodeZigzag32(v uint64) error { + return b.EncodeVarint(uint64((uint32(v) << 1) ^ uint32((int32(v) >> 31)))) +} + +// EncodeZigzag64 appends a 64-bit zig-zag varint encoding to the buffer. +func (b *Buffer) EncodeZigzag64(v uint64) error { + return b.EncodeVarint(uint64((uint64(v) << 1) ^ uint64((int64(v) >> 63)))) +} + +// EncodeFixed32 appends a 32-bit little-endian integer to the buffer. +func (b *Buffer) EncodeFixed32(v uint64) error { + b.buf = protowire.AppendFixed32(b.buf, uint32(v)) + return nil +} + +// EncodeFixed64 appends a 64-bit little-endian integer to the buffer. +func (b *Buffer) EncodeFixed64(v uint64) error { + b.buf = protowire.AppendFixed64(b.buf, uint64(v)) + return nil +} + +// EncodeRawBytes appends a length-prefixed raw bytes to the buffer. +func (b *Buffer) EncodeRawBytes(v []byte) error { + b.buf = protowire.AppendBytes(b.buf, v) + return nil +} + +// EncodeStringBytes appends a length-prefixed raw bytes to the buffer. +// It does not validate whether v contains valid UTF-8. +func (b *Buffer) EncodeStringBytes(v string) error { + b.buf = protowire.AppendString(b.buf, v) + return nil +} + +// EncodeMessage appends a length-prefixed encoded message to the buffer. +func (b *Buffer) EncodeMessage(m Message) error { + var err error + b.buf = protowire.AppendVarint(b.buf, uint64(Size(m))) + b.buf, err = marshalAppend(b.buf, m, b.deterministic) + return err +} + +// DecodeVarint consumes an encoded unsigned varint from the buffer. +func (b *Buffer) DecodeVarint() (uint64, error) { + v, n := protowire.ConsumeVarint(b.buf[b.idx:]) + if n < 0 { + return 0, protowire.ParseError(n) + } + b.idx += n + return uint64(v), nil +} + +// DecodeZigzag32 consumes an encoded 32-bit zig-zag varint from the buffer. +func (b *Buffer) DecodeZigzag32() (uint64, error) { + v, err := b.DecodeVarint() + if err != nil { + return 0, err + } + return uint64((uint32(v) >> 1) ^ uint32((int32(v&1)<<31)>>31)), nil +} + +// DecodeZigzag64 consumes an encoded 64-bit zig-zag varint from the buffer. +func (b *Buffer) DecodeZigzag64() (uint64, error) { + v, err := b.DecodeVarint() + if err != nil { + return 0, err + } + return uint64((uint64(v) >> 1) ^ uint64((int64(v&1)<<63)>>63)), nil +} + +// DecodeFixed32 consumes a 32-bit little-endian integer from the buffer. +func (b *Buffer) DecodeFixed32() (uint64, error) { + v, n := protowire.ConsumeFixed32(b.buf[b.idx:]) + if n < 0 { + return 0, protowire.ParseError(n) + } + b.idx += n + return uint64(v), nil +} + +// DecodeFixed64 consumes a 64-bit little-endian integer from the buffer. +func (b *Buffer) DecodeFixed64() (uint64, error) { + v, n := protowire.ConsumeFixed64(b.buf[b.idx:]) + if n < 0 { + return 0, protowire.ParseError(n) + } + b.idx += n + return uint64(v), nil +} + +// DecodeRawBytes consumes a length-prefixed raw bytes from the buffer. +// If alloc is specified, it returns a copy the raw bytes +// rather than a sub-slice of the buffer. +func (b *Buffer) DecodeRawBytes(alloc bool) ([]byte, error) { + v, n := protowire.ConsumeBytes(b.buf[b.idx:]) + if n < 0 { + return nil, protowire.ParseError(n) + } + b.idx += n + if alloc { + v = append([]byte(nil), v...) + } + return v, nil +} + +// DecodeStringBytes consumes a length-prefixed raw bytes from the buffer. +// It does not validate whether the raw bytes contain valid UTF-8. +func (b *Buffer) DecodeStringBytes() (string, error) { + v, n := protowire.ConsumeString(b.buf[b.idx:]) + if n < 0 { + return "", protowire.ParseError(n) + } + b.idx += n + return v, nil +} + +// DecodeMessage consumes a length-prefixed message from the buffer. +// It does not reset m before unmarshaling. +func (b *Buffer) DecodeMessage(m Message) error { + v, err := b.DecodeRawBytes(false) + if err != nil { + return err + } + return UnmarshalMerge(v, m) +} + +// DecodeGroup consumes a message group from the buffer. +// It assumes that the start group marker has already been consumed and +// consumes all bytes until (and including the end group marker). +// It does not reset m before unmarshaling. +func (b *Buffer) DecodeGroup(m Message) error { + v, n, err := consumeGroup(b.buf[b.idx:]) + if err != nil { + return err + } + b.idx += n + return UnmarshalMerge(v, m) +} + +// consumeGroup parses b until it finds an end group marker, returning +// the raw bytes of the message (excluding the end group marker) and the +// the total length of the message (including the end group marker). +func consumeGroup(b []byte) ([]byte, int, error) { + b0 := b + depth := 1 // assume this follows a start group marker + for { + _, wtyp, tagLen := protowire.ConsumeTag(b) + if tagLen < 0 { + return nil, 0, protowire.ParseError(tagLen) + } + b = b[tagLen:] + + var valLen int + switch wtyp { + case protowire.VarintType: + _, valLen = protowire.ConsumeVarint(b) + case protowire.Fixed32Type: + _, valLen = protowire.ConsumeFixed32(b) + case protowire.Fixed64Type: + _, valLen = protowire.ConsumeFixed64(b) + case protowire.BytesType: + _, valLen = protowire.ConsumeBytes(b) + case protowire.StartGroupType: + depth++ + case protowire.EndGroupType: + depth-- + default: + return nil, 0, errors.New("proto: cannot parse reserved wire type") + } + if valLen < 0 { + return nil, 0, protowire.ParseError(valLen) + } + b = b[valLen:] + + if depth == 0 { + return b0[:len(b0)-len(b)-tagLen], len(b0) - len(b), nil + } + } +} diff --git a/vendor/github.com/golang/protobuf/proto/defaults.go b/vendor/github.com/golang/protobuf/proto/defaults.go new file mode 100644 index 0000000..d399bf0 --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/defaults.go @@ -0,0 +1,63 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package proto + +import ( + "google.golang.org/protobuf/reflect/protoreflect" +) + +// SetDefaults sets unpopulated scalar fields to their default values. +// Fields within a oneof are not set even if they have a default value. +// SetDefaults is recursively called upon any populated message fields. +func SetDefaults(m Message) { + if m != nil { + setDefaults(MessageReflect(m)) + } +} + +func setDefaults(m protoreflect.Message) { + fds := m.Descriptor().Fields() + for i := 0; i < fds.Len(); i++ { + fd := fds.Get(i) + if !m.Has(fd) { + if fd.HasDefault() && fd.ContainingOneof() == nil { + v := fd.Default() + if fd.Kind() == protoreflect.BytesKind { + v = protoreflect.ValueOf(append([]byte(nil), v.Bytes()...)) // copy the default bytes + } + m.Set(fd, v) + } + continue + } + } + + m.Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool { + switch { + // Handle singular message. + case fd.Cardinality() != protoreflect.Repeated: + if fd.Message() != nil { + setDefaults(m.Get(fd).Message()) + } + // Handle list of messages. + case fd.IsList(): + if fd.Message() != nil { + ls := m.Get(fd).List() + for i := 0; i < ls.Len(); i++ { + setDefaults(ls.Get(i).Message()) + } + } + // Handle map of messages. + case fd.IsMap(): + if fd.MapValue().Message() != nil { + ms := m.Get(fd).Map() + ms.Range(func(_ protoreflect.MapKey, v protoreflect.Value) bool { + setDefaults(v.Message()) + return true + }) + } + } + return true + }) +} diff --git a/vendor/github.com/golang/protobuf/proto/deprecated.go b/vendor/github.com/golang/protobuf/proto/deprecated.go new file mode 100644 index 0000000..e8db57e --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/deprecated.go @@ -0,0 +1,113 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package proto + +import ( + "encoding/json" + "errors" + "fmt" + "strconv" + + protoV2 "google.golang.org/protobuf/proto" +) + +var ( + // Deprecated: No longer returned. + ErrNil = errors.New("proto: Marshal called with nil") + + // Deprecated: No longer returned. + ErrTooLarge = errors.New("proto: message encodes to over 2 GB") + + // Deprecated: No longer returned. + ErrInternalBadWireType = errors.New("proto: internal error: bad wiretype for oneof") +) + +// Deprecated: Do not use. +type Stats struct{ Emalloc, Dmalloc, Encode, Decode, Chit, Cmiss, Size uint64 } + +// Deprecated: Do not use. +func GetStats() Stats { return Stats{} } + +// Deprecated: Do not use. +func MarshalMessageSet(interface{}) ([]byte, error) { + return nil, errors.New("proto: not implemented") +} + +// Deprecated: Do not use. +func UnmarshalMessageSet([]byte, interface{}) error { + return errors.New("proto: not implemented") +} + +// Deprecated: Do not use. +func MarshalMessageSetJSON(interface{}) ([]byte, error) { + return nil, errors.New("proto: not implemented") +} + +// Deprecated: Do not use. +func UnmarshalMessageSetJSON([]byte, interface{}) error { + return errors.New("proto: not implemented") +} + +// Deprecated: Do not use. +func RegisterMessageSetType(Message, int32, string) {} + +// Deprecated: Do not use. +func EnumName(m map[int32]string, v int32) string { + s, ok := m[v] + if ok { + return s + } + return strconv.Itoa(int(v)) +} + +// Deprecated: Do not use. +func UnmarshalJSONEnum(m map[string]int32, data []byte, enumName string) (int32, error) { + if data[0] == '"' { + // New style: enums are strings. + var repr string + if err := json.Unmarshal(data, &repr); err != nil { + return -1, err + } + val, ok := m[repr] + if !ok { + return 0, fmt.Errorf("unrecognized enum %s value %q", enumName, repr) + } + return val, nil + } + // Old style: enums are ints. + var val int32 + if err := json.Unmarshal(data, &val); err != nil { + return 0, fmt.Errorf("cannot unmarshal %#q into enum %s", data, enumName) + } + return val, nil +} + +// Deprecated: Do not use; this type existed for intenal-use only. +type InternalMessageInfo struct{} + +// Deprecated: Do not use; this method existed for intenal-use only. +func (*InternalMessageInfo) DiscardUnknown(m Message) { + DiscardUnknown(m) +} + +// Deprecated: Do not use; this method existed for intenal-use only. +func (*InternalMessageInfo) Marshal(b []byte, m Message, deterministic bool) ([]byte, error) { + return protoV2.MarshalOptions{Deterministic: deterministic}.MarshalAppend(b, MessageV2(m)) +} + +// Deprecated: Do not use; this method existed for intenal-use only. +func (*InternalMessageInfo) Merge(dst, src Message) { + protoV2.Merge(MessageV2(dst), MessageV2(src)) +} + +// Deprecated: Do not use; this method existed for intenal-use only. +func (*InternalMessageInfo) Size(m Message) int { + return protoV2.Size(MessageV2(m)) +} + +// Deprecated: Do not use; this method existed for intenal-use only. +func (*InternalMessageInfo) Unmarshal(m Message, b []byte) error { + return protoV2.UnmarshalOptions{Merge: true}.Unmarshal(b, MessageV2(m)) +} diff --git a/vendor/github.com/golang/protobuf/proto/discard.go b/vendor/github.com/golang/protobuf/proto/discard.go new file mode 100644 index 0000000..2187e87 --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/discard.go @@ -0,0 +1,58 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package proto + +import ( + "google.golang.org/protobuf/reflect/protoreflect" +) + +// DiscardUnknown recursively discards all unknown fields from this message +// and all embedded messages. +// +// When unmarshaling a message with unrecognized fields, the tags and values +// of such fields are preserved in the Message. This allows a later call to +// marshal to be able to produce a message that continues to have those +// unrecognized fields. To avoid this, DiscardUnknown is used to +// explicitly clear the unknown fields after unmarshaling. +func DiscardUnknown(m Message) { + if m != nil { + discardUnknown(MessageReflect(m)) + } +} + +func discardUnknown(m protoreflect.Message) { + m.Range(func(fd protoreflect.FieldDescriptor, val protoreflect.Value) bool { + switch { + // Handle singular message. + case fd.Cardinality() != protoreflect.Repeated: + if fd.Message() != nil { + discardUnknown(m.Get(fd).Message()) + } + // Handle list of messages. + case fd.IsList(): + if fd.Message() != nil { + ls := m.Get(fd).List() + for i := 0; i < ls.Len(); i++ { + discardUnknown(ls.Get(i).Message()) + } + } + // Handle map of messages. + case fd.IsMap(): + if fd.MapValue().Message() != nil { + ms := m.Get(fd).Map() + ms.Range(func(_ protoreflect.MapKey, v protoreflect.Value) bool { + discardUnknown(v.Message()) + return true + }) + } + } + return true + }) + + // Discard unknown fields. + if len(m.GetUnknown()) > 0 { + m.SetUnknown(nil) + } +} diff --git a/vendor/github.com/golang/protobuf/proto/extensions.go b/vendor/github.com/golang/protobuf/proto/extensions.go new file mode 100644 index 0000000..42fc120 --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/extensions.go @@ -0,0 +1,356 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package proto + +import ( + "errors" + "fmt" + "reflect" + + "google.golang.org/protobuf/encoding/protowire" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" + "google.golang.org/protobuf/runtime/protoiface" + "google.golang.org/protobuf/runtime/protoimpl" +) + +type ( + // ExtensionDesc represents an extension descriptor and + // is used to interact with an extension field in a message. + // + // Variables of this type are generated in code by protoc-gen-go. + ExtensionDesc = protoimpl.ExtensionInfo + + // ExtensionRange represents a range of message extensions. + // Used in code generated by protoc-gen-go. + ExtensionRange = protoiface.ExtensionRangeV1 + + // Deprecated: Do not use; this is an internal type. + Extension = protoimpl.ExtensionFieldV1 + + // Deprecated: Do not use; this is an internal type. + XXX_InternalExtensions = protoimpl.ExtensionFields +) + +// ErrMissingExtension reports whether the extension was not present. +var ErrMissingExtension = errors.New("proto: missing extension") + +var errNotExtendable = errors.New("proto: not an extendable proto.Message") + +// HasExtension reports whether the extension field is present in m +// either as an explicitly populated field or as an unknown field. +func HasExtension(m Message, xt *ExtensionDesc) (has bool) { + mr := MessageReflect(m) + if mr == nil || !mr.IsValid() { + return false + } + + // Check whether any populated known field matches the field number. + xtd := xt.TypeDescriptor() + if isValidExtension(mr.Descriptor(), xtd) { + has = mr.Has(xtd) + } else { + mr.Range(func(fd protoreflect.FieldDescriptor, _ protoreflect.Value) bool { + has = int32(fd.Number()) == xt.Field + return !has + }) + } + + // Check whether any unknown field matches the field number. + for b := mr.GetUnknown(); !has && len(b) > 0; { + num, _, n := protowire.ConsumeField(b) + has = int32(num) == xt.Field + b = b[n:] + } + return has +} + +// ClearExtension removes the extension field from m +// either as an explicitly populated field or as an unknown field. +func ClearExtension(m Message, xt *ExtensionDesc) { + mr := MessageReflect(m) + if mr == nil || !mr.IsValid() { + return + } + + xtd := xt.TypeDescriptor() + if isValidExtension(mr.Descriptor(), xtd) { + mr.Clear(xtd) + } else { + mr.Range(func(fd protoreflect.FieldDescriptor, _ protoreflect.Value) bool { + if int32(fd.Number()) == xt.Field { + mr.Clear(fd) + return false + } + return true + }) + } + clearUnknown(mr, fieldNum(xt.Field)) +} + +// ClearAllExtensions clears all extensions from m. +// This includes populated fields and unknown fields in the extension range. +func ClearAllExtensions(m Message) { + mr := MessageReflect(m) + if mr == nil || !mr.IsValid() { + return + } + + mr.Range(func(fd protoreflect.FieldDescriptor, _ protoreflect.Value) bool { + if fd.IsExtension() { + mr.Clear(fd) + } + return true + }) + clearUnknown(mr, mr.Descriptor().ExtensionRanges()) +} + +// GetExtension retrieves a proto2 extended field from m. +// +// If the descriptor is type complete (i.e., ExtensionDesc.ExtensionType is non-nil), +// then GetExtension parses the encoded field and returns a Go value of the specified type. +// If the field is not present, then the default value is returned (if one is specified), +// otherwise ErrMissingExtension is reported. +// +// If the descriptor is type incomplete (i.e., ExtensionDesc.ExtensionType is nil), +// then GetExtension returns the raw encoded bytes for the extension field. +func GetExtension(m Message, xt *ExtensionDesc) (interface{}, error) { + mr := MessageReflect(m) + if mr == nil || !mr.IsValid() || mr.Descriptor().ExtensionRanges().Len() == 0 { + return nil, errNotExtendable + } + + // Retrieve the unknown fields for this extension field. + var bo protoreflect.RawFields + for bi := mr.GetUnknown(); len(bi) > 0; { + num, _, n := protowire.ConsumeField(bi) + if int32(num) == xt.Field { + bo = append(bo, bi[:n]...) + } + bi = bi[n:] + } + + // For type incomplete descriptors, only retrieve the unknown fields. + if xt.ExtensionType == nil { + return []byte(bo), nil + } + + // If the extension field only exists as unknown fields, unmarshal it. + // This is rarely done since proto.Unmarshal eagerly unmarshals extensions. + xtd := xt.TypeDescriptor() + if !isValidExtension(mr.Descriptor(), xtd) { + return nil, fmt.Errorf("proto: bad extended type; %T does not extend %T", xt.ExtendedType, m) + } + if !mr.Has(xtd) && len(bo) > 0 { + m2 := mr.New() + if err := (proto.UnmarshalOptions{ + Resolver: extensionResolver{xt}, + }.Unmarshal(bo, m2.Interface())); err != nil { + return nil, err + } + if m2.Has(xtd) { + mr.Set(xtd, m2.Get(xtd)) + clearUnknown(mr, fieldNum(xt.Field)) + } + } + + // Check whether the message has the extension field set or a default. + var pv protoreflect.Value + switch { + case mr.Has(xtd): + pv = mr.Get(xtd) + case xtd.HasDefault(): + pv = xtd.Default() + default: + return nil, ErrMissingExtension + } + + v := xt.InterfaceOf(pv) + rv := reflect.ValueOf(v) + if isScalarKind(rv.Kind()) { + rv2 := reflect.New(rv.Type()) + rv2.Elem().Set(rv) + v = rv2.Interface() + } + return v, nil +} + +// extensionResolver is a custom extension resolver that stores a single +// extension type that takes precedence over the global registry. +type extensionResolver struct{ xt protoreflect.ExtensionType } + +func (r extensionResolver) FindExtensionByName(field protoreflect.FullName) (protoreflect.ExtensionType, error) { + if xtd := r.xt.TypeDescriptor(); xtd.FullName() == field { + return r.xt, nil + } + return protoregistry.GlobalTypes.FindExtensionByName(field) +} + +func (r extensionResolver) FindExtensionByNumber(message protoreflect.FullName, field protoreflect.FieldNumber) (protoreflect.ExtensionType, error) { + if xtd := r.xt.TypeDescriptor(); xtd.ContainingMessage().FullName() == message && xtd.Number() == field { + return r.xt, nil + } + return protoregistry.GlobalTypes.FindExtensionByNumber(message, field) +} + +// GetExtensions returns a list of the extensions values present in m, +// corresponding with the provided list of extension descriptors, xts. +// If an extension is missing in m, the corresponding value is nil. +func GetExtensions(m Message, xts []*ExtensionDesc) ([]interface{}, error) { + mr := MessageReflect(m) + if mr == nil || !mr.IsValid() { + return nil, errNotExtendable + } + + vs := make([]interface{}, len(xts)) + for i, xt := range xts { + v, err := GetExtension(m, xt) + if err != nil { + if err == ErrMissingExtension { + continue + } + return vs, err + } + vs[i] = v + } + return vs, nil +} + +// SetExtension sets an extension field in m to the provided value. +func SetExtension(m Message, xt *ExtensionDesc, v interface{}) error { + mr := MessageReflect(m) + if mr == nil || !mr.IsValid() || mr.Descriptor().ExtensionRanges().Len() == 0 { + return errNotExtendable + } + + rv := reflect.ValueOf(v) + if reflect.TypeOf(v) != reflect.TypeOf(xt.ExtensionType) { + return fmt.Errorf("proto: bad extension value type. got: %T, want: %T", v, xt.ExtensionType) + } + if rv.Kind() == reflect.Ptr { + if rv.IsNil() { + return fmt.Errorf("proto: SetExtension called with nil value of type %T", v) + } + if isScalarKind(rv.Elem().Kind()) { + v = rv.Elem().Interface() + } + } + + xtd := xt.TypeDescriptor() + if !isValidExtension(mr.Descriptor(), xtd) { + return fmt.Errorf("proto: bad extended type; %T does not extend %T", xt.ExtendedType, m) + } + mr.Set(xtd, xt.ValueOf(v)) + clearUnknown(mr, fieldNum(xt.Field)) + return nil +} + +// SetRawExtension inserts b into the unknown fields of m. +// +// Deprecated: Use Message.ProtoReflect.SetUnknown instead. +func SetRawExtension(m Message, fnum int32, b []byte) { + mr := MessageReflect(m) + if mr == nil || !mr.IsValid() { + return + } + + // Verify that the raw field is valid. + for b0 := b; len(b0) > 0; { + num, _, n := protowire.ConsumeField(b0) + if int32(num) != fnum { + panic(fmt.Sprintf("mismatching field number: got %d, want %d", num, fnum)) + } + b0 = b0[n:] + } + + ClearExtension(m, &ExtensionDesc{Field: fnum}) + mr.SetUnknown(append(mr.GetUnknown(), b...)) +} + +// ExtensionDescs returns a list of extension descriptors found in m, +// containing descriptors for both populated extension fields in m and +// also unknown fields of m that are in the extension range. +// For the later case, an type incomplete descriptor is provided where only +// the ExtensionDesc.Field field is populated. +// The order of the extension descriptors is undefined. +func ExtensionDescs(m Message) ([]*ExtensionDesc, error) { + mr := MessageReflect(m) + if mr == nil || !mr.IsValid() || mr.Descriptor().ExtensionRanges().Len() == 0 { + return nil, errNotExtendable + } + + // Collect a set of known extension descriptors. + extDescs := make(map[protoreflect.FieldNumber]*ExtensionDesc) + mr.Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool { + if fd.IsExtension() { + xt := fd.(protoreflect.ExtensionTypeDescriptor) + if xd, ok := xt.Type().(*ExtensionDesc); ok { + extDescs[fd.Number()] = xd + } + } + return true + }) + + // Collect a set of unknown extension descriptors. + extRanges := mr.Descriptor().ExtensionRanges() + for b := mr.GetUnknown(); len(b) > 0; { + num, _, n := protowire.ConsumeField(b) + if extRanges.Has(num) && extDescs[num] == nil { + extDescs[num] = nil + } + b = b[n:] + } + + // Transpose the set of descriptors into a list. + var xts []*ExtensionDesc + for num, xt := range extDescs { + if xt == nil { + xt = &ExtensionDesc{Field: int32(num)} + } + xts = append(xts, xt) + } + return xts, nil +} + +// isValidExtension reports whether xtd is a valid extension descriptor for md. +func isValidExtension(md protoreflect.MessageDescriptor, xtd protoreflect.ExtensionTypeDescriptor) bool { + return xtd.ContainingMessage() == md && md.ExtensionRanges().Has(xtd.Number()) +} + +// isScalarKind reports whether k is a protobuf scalar kind (except bytes). +// This function exists for historical reasons since the representation of +// scalars differs between v1 and v2, where v1 uses *T and v2 uses T. +func isScalarKind(k reflect.Kind) bool { + switch k { + case reflect.Bool, reflect.Int32, reflect.Int64, reflect.Uint32, reflect.Uint64, reflect.Float32, reflect.Float64, reflect.String: + return true + default: + return false + } +} + +// clearUnknown removes unknown fields from m where remover.Has reports true. +func clearUnknown(m protoreflect.Message, remover interface { + Has(protoreflect.FieldNumber) bool +}) { + var bo protoreflect.RawFields + for bi := m.GetUnknown(); len(bi) > 0; { + num, _, n := protowire.ConsumeField(bi) + if !remover.Has(num) { + bo = append(bo, bi[:n]...) + } + bi = bi[n:] + } + if bi := m.GetUnknown(); len(bi) != len(bo) { + m.SetUnknown(bo) + } +} + +type fieldNum protoreflect.FieldNumber + +func (n1 fieldNum) Has(n2 protoreflect.FieldNumber) bool { + return protoreflect.FieldNumber(n1) == n2 +} diff --git a/vendor/github.com/golang/protobuf/proto/properties.go b/vendor/github.com/golang/protobuf/proto/properties.go new file mode 100644 index 0000000..dcdc220 --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/properties.go @@ -0,0 +1,306 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package proto + +import ( + "fmt" + "reflect" + "strconv" + "strings" + "sync" + + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/runtime/protoimpl" +) + +// StructProperties represents protocol buffer type information for a +// generated protobuf message in the open-struct API. +// +// Deprecated: Do not use. +type StructProperties struct { + // Prop are the properties for each field. + // + // Fields belonging to a oneof are stored in OneofTypes instead, with a + // single Properties representing the parent oneof held here. + // + // The order of Prop matches the order of fields in the Go struct. + // Struct fields that are not related to protobufs have a "XXX_" prefix + // in the Properties.Name and must be ignored by the user. + Prop []*Properties + + // OneofTypes contains information about the oneof fields in this message. + // It is keyed by the protobuf field name. + OneofTypes map[string]*OneofProperties +} + +// Properties represents the type information for a protobuf message field. +// +// Deprecated: Do not use. +type Properties struct { + // Name is a placeholder name with little meaningful semantic value. + // If the name has an "XXX_" prefix, the entire Properties must be ignored. + Name string + // OrigName is the protobuf field name or oneof name. + OrigName string + // JSONName is the JSON name for the protobuf field. + JSONName string + // Enum is a placeholder name for enums. + // For historical reasons, this is neither the Go name for the enum, + // nor the protobuf name for the enum. + Enum string // Deprecated: Do not use. + // Weak contains the full name of the weakly referenced message. + Weak string + // Wire is a string representation of the wire type. + Wire string + // WireType is the protobuf wire type for the field. + WireType int + // Tag is the protobuf field number. + Tag int + // Required reports whether this is a required field. + Required bool + // Optional reports whether this is a optional field. + Optional bool + // Repeated reports whether this is a repeated field. + Repeated bool + // Packed reports whether this is a packed repeated field of scalars. + Packed bool + // Proto3 reports whether this field operates under the proto3 syntax. + Proto3 bool + // Oneof reports whether this field belongs within a oneof. + Oneof bool + + // Default is the default value in string form. + Default string + // HasDefault reports whether the field has a default value. + HasDefault bool + + // MapKeyProp is the properties for the key field for a map field. + MapKeyProp *Properties + // MapValProp is the properties for the value field for a map field. + MapValProp *Properties +} + +// OneofProperties represents the type information for a protobuf oneof. +// +// Deprecated: Do not use. +type OneofProperties struct { + // Type is a pointer to the generated wrapper type for the field value. + // This is nil for messages that are not in the open-struct API. + Type reflect.Type + // Field is the index into StructProperties.Prop for the containing oneof. + Field int + // Prop is the properties for the field. + Prop *Properties +} + +// String formats the properties in the protobuf struct field tag style. +func (p *Properties) String() string { + s := p.Wire + s += "," + strconv.Itoa(p.Tag) + if p.Required { + s += ",req" + } + if p.Optional { + s += ",opt" + } + if p.Repeated { + s += ",rep" + } + if p.Packed { + s += ",packed" + } + s += ",name=" + p.OrigName + if p.JSONName != "" { + s += ",json=" + p.JSONName + } + if len(p.Enum) > 0 { + s += ",enum=" + p.Enum + } + if len(p.Weak) > 0 { + s += ",weak=" + p.Weak + } + if p.Proto3 { + s += ",proto3" + } + if p.Oneof { + s += ",oneof" + } + if p.HasDefault { + s += ",def=" + p.Default + } + return s +} + +// Parse populates p by parsing a string in the protobuf struct field tag style. +func (p *Properties) Parse(tag string) { + // For example: "bytes,49,opt,name=foo,def=hello!" + for len(tag) > 0 { + i := strings.IndexByte(tag, ',') + if i < 0 { + i = len(tag) + } + switch s := tag[:i]; { + case strings.HasPrefix(s, "name="): + p.OrigName = s[len("name="):] + case strings.HasPrefix(s, "json="): + p.JSONName = s[len("json="):] + case strings.HasPrefix(s, "enum="): + p.Enum = s[len("enum="):] + case strings.HasPrefix(s, "weak="): + p.Weak = s[len("weak="):] + case strings.Trim(s, "0123456789") == "": + n, _ := strconv.ParseUint(s, 10, 32) + p.Tag = int(n) + case s == "opt": + p.Optional = true + case s == "req": + p.Required = true + case s == "rep": + p.Repeated = true + case s == "varint" || s == "zigzag32" || s == "zigzag64": + p.Wire = s + p.WireType = WireVarint + case s == "fixed32": + p.Wire = s + p.WireType = WireFixed32 + case s == "fixed64": + p.Wire = s + p.WireType = WireFixed64 + case s == "bytes": + p.Wire = s + p.WireType = WireBytes + case s == "group": + p.Wire = s + p.WireType = WireStartGroup + case s == "packed": + p.Packed = true + case s == "proto3": + p.Proto3 = true + case s == "oneof": + p.Oneof = true + case strings.HasPrefix(s, "def="): + // The default tag is special in that everything afterwards is the + // default regardless of the presence of commas. + p.HasDefault = true + p.Default, i = tag[len("def="):], len(tag) + } + tag = strings.TrimPrefix(tag[i:], ",") + } +} + +// Init populates the properties from a protocol buffer struct tag. +// +// Deprecated: Do not use. +func (p *Properties) Init(typ reflect.Type, name, tag string, f *reflect.StructField) { + p.Name = name + p.OrigName = name + if tag == "" { + return + } + p.Parse(tag) + + if typ != nil && typ.Kind() == reflect.Map { + p.MapKeyProp = new(Properties) + p.MapKeyProp.Init(nil, "Key", f.Tag.Get("protobuf_key"), nil) + p.MapValProp = new(Properties) + p.MapValProp.Init(nil, "Value", f.Tag.Get("protobuf_val"), nil) + } +} + +var propertiesCache sync.Map // map[reflect.Type]*StructProperties + +// GetProperties returns the list of properties for the type represented by t, +// which must be a generated protocol buffer message in the open-struct API, +// where protobuf message fields are represented by exported Go struct fields. +// +// Deprecated: Use protobuf reflection instead. +func GetProperties(t reflect.Type) *StructProperties { + if p, ok := propertiesCache.Load(t); ok { + return p.(*StructProperties) + } + p, _ := propertiesCache.LoadOrStore(t, newProperties(t)) + return p.(*StructProperties) +} + +func newProperties(t reflect.Type) *StructProperties { + if t.Kind() != reflect.Struct { + panic(fmt.Sprintf("%v is not a generated message in the open-struct API", t)) + } + + var hasOneof bool + prop := new(StructProperties) + + // Construct a list of properties for each field in the struct. + for i := 0; i < t.NumField(); i++ { + p := new(Properties) + f := t.Field(i) + tagField := f.Tag.Get("protobuf") + p.Init(f.Type, f.Name, tagField, &f) + + tagOneof := f.Tag.Get("protobuf_oneof") + if tagOneof != "" { + hasOneof = true + p.OrigName = tagOneof + } + + // Rename unrelated struct fields with the "XXX_" prefix since so much + // user code simply checks for this to exclude special fields. + if tagField == "" && tagOneof == "" && !strings.HasPrefix(p.Name, "XXX_") { + p.Name = "XXX_" + p.Name + p.OrigName = "XXX_" + p.OrigName + } else if p.Weak != "" { + p.Name = p.OrigName // avoid possible "XXX_" prefix on weak field + } + + prop.Prop = append(prop.Prop, p) + } + + // Construct a mapping of oneof field names to properties. + if hasOneof { + var oneofWrappers []interface{} + if fn, ok := reflect.PtrTo(t).MethodByName("XXX_OneofFuncs"); ok { + oneofWrappers = fn.Func.Call([]reflect.Value{reflect.Zero(fn.Type.In(0))})[3].Interface().([]interface{}) + } + if fn, ok := reflect.PtrTo(t).MethodByName("XXX_OneofWrappers"); ok { + oneofWrappers = fn.Func.Call([]reflect.Value{reflect.Zero(fn.Type.In(0))})[0].Interface().([]interface{}) + } + if m, ok := reflect.Zero(reflect.PtrTo(t)).Interface().(protoreflect.ProtoMessage); ok { + if m, ok := m.ProtoReflect().(interface{ ProtoMessageInfo() *protoimpl.MessageInfo }); ok { + oneofWrappers = m.ProtoMessageInfo().OneofWrappers + } + } + + prop.OneofTypes = make(map[string]*OneofProperties) + for _, wrapper := range oneofWrappers { + p := &OneofProperties{ + Type: reflect.ValueOf(wrapper).Type(), // *T + Prop: new(Properties), + } + f := p.Type.Elem().Field(0) + p.Prop.Name = f.Name + p.Prop.Parse(f.Tag.Get("protobuf")) + + // Determine the struct field that contains this oneof. + // Each wrapper is assignable to exactly one parent field. + var foundOneof bool + for i := 0; i < t.NumField() && !foundOneof; i++ { + if p.Type.AssignableTo(t.Field(i).Type) { + p.Field = i + foundOneof = true + } + } + if !foundOneof { + panic(fmt.Sprintf("%v is not a generated message in the open-struct API", t)) + } + prop.OneofTypes[p.Prop.OrigName] = p + } + } + + return prop +} + +func (sp *StructProperties) Len() int { return len(sp.Prop) } +func (sp *StructProperties) Less(i, j int) bool { return false } +func (sp *StructProperties) Swap(i, j int) { return } diff --git a/vendor/github.com/golang/protobuf/proto/proto.go b/vendor/github.com/golang/protobuf/proto/proto.go new file mode 100644 index 0000000..5aee89c --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/proto.go @@ -0,0 +1,167 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package proto provides functionality for handling protocol buffer messages. +// In particular, it provides marshaling and unmarshaling between a protobuf +// message and the binary wire format. +// +// See https://developers.google.com/protocol-buffers/docs/gotutorial for +// more information. +// +// Deprecated: Use the "google.golang.org/protobuf/proto" package instead. +package proto + +import ( + protoV2 "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/runtime/protoiface" + "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + ProtoPackageIsVersion1 = true + ProtoPackageIsVersion2 = true + ProtoPackageIsVersion3 = true + ProtoPackageIsVersion4 = true +) + +// GeneratedEnum is any enum type generated by protoc-gen-go +// which is a named int32 kind. +// This type exists for documentation purposes. +type GeneratedEnum interface{} + +// GeneratedMessage is any message type generated by protoc-gen-go +// which is a pointer to a named struct kind. +// This type exists for documentation purposes. +type GeneratedMessage interface{} + +// Message is a protocol buffer message. +// +// This is the v1 version of the message interface and is marginally better +// than an empty interface as it lacks any method to programatically interact +// with the contents of the message. +// +// A v2 message is declared in "google.golang.org/protobuf/proto".Message and +// exposes protobuf reflection as a first-class feature of the interface. +// +// To convert a v1 message to a v2 message, use the MessageV2 function. +// To convert a v2 message to a v1 message, use the MessageV1 function. +type Message = protoiface.MessageV1 + +// MessageV1 converts either a v1 or v2 message to a v1 message. +// It returns nil if m is nil. +func MessageV1(m GeneratedMessage) protoiface.MessageV1 { + return protoimpl.X.ProtoMessageV1Of(m) +} + +// MessageV2 converts either a v1 or v2 message to a v2 message. +// It returns nil if m is nil. +func MessageV2(m GeneratedMessage) protoV2.Message { + return protoimpl.X.ProtoMessageV2Of(m) +} + +// MessageReflect returns a reflective view for a message. +// It returns nil if m is nil. +func MessageReflect(m Message) protoreflect.Message { + return protoimpl.X.MessageOf(m) +} + +// Marshaler is implemented by messages that can marshal themselves. +// This interface is used by the following functions: Size, Marshal, +// Buffer.Marshal, and Buffer.EncodeMessage. +// +// Deprecated: Do not implement. +type Marshaler interface { + // Marshal formats the encoded bytes of the message. + // It should be deterministic and emit valid protobuf wire data. + // The caller takes ownership of the returned buffer. + Marshal() ([]byte, error) +} + +// Unmarshaler is implemented by messages that can unmarshal themselves. +// This interface is used by the following functions: Unmarshal, UnmarshalMerge, +// Buffer.Unmarshal, Buffer.DecodeMessage, and Buffer.DecodeGroup. +// +// Deprecated: Do not implement. +type Unmarshaler interface { + // Unmarshal parses the encoded bytes of the protobuf wire input. + // The provided buffer is only valid for during method call. + // It should not reset the receiver message. + Unmarshal([]byte) error +} + +// Merger is implemented by messages that can merge themselves. +// This interface is used by the following functions: Clone and Merge. +// +// Deprecated: Do not implement. +type Merger interface { + // Merge merges the contents of src into the receiver message. + // It clones all data structures in src such that it aliases no mutable + // memory referenced by src. + Merge(src Message) +} + +// RequiredNotSetError is an error type returned when +// marshaling or unmarshaling a message with missing required fields. +type RequiredNotSetError struct { + err error +} + +func (e *RequiredNotSetError) Error() string { + if e.err != nil { + return e.err.Error() + } + return "proto: required field not set" +} +func (e *RequiredNotSetError) RequiredNotSet() bool { + return true +} + +func checkRequiredNotSet(m protoV2.Message) error { + if err := protoV2.CheckInitialized(m); err != nil { + return &RequiredNotSetError{err: err} + } + return nil +} + +// Clone returns a deep copy of src. +func Clone(src Message) Message { + return MessageV1(protoV2.Clone(MessageV2(src))) +} + +// Merge merges src into dst, which must be messages of the same type. +// +// Populated scalar fields in src are copied to dst, while populated +// singular messages in src are merged into dst by recursively calling Merge. +// The elements of every list field in src is appended to the corresponded +// list fields in dst. The entries of every map field in src is copied into +// the corresponding map field in dst, possibly replacing existing entries. +// The unknown fields of src are appended to the unknown fields of dst. +func Merge(dst, src Message) { + protoV2.Merge(MessageV2(dst), MessageV2(src)) +} + +// Equal reports whether two messages are equal. +// If two messages marshal to the same bytes under deterministic serialization, +// then Equal is guaranteed to report true. +// +// Two messages are equal if they are the same protobuf message type, +// have the same set of populated known and extension field values, +// and the same set of unknown fields values. +// +// Scalar values are compared with the equivalent of the == operator in Go, +// except bytes values which are compared using bytes.Equal and +// floating point values which specially treat NaNs as equal. +// Message values are compared by recursively calling Equal. +// Lists are equal if each element value is also equal. +// Maps are equal if they have the same set of keys, where the pair of values +// for each key is also equal. +func Equal(x, y Message) bool { + return protoV2.Equal(MessageV2(x), MessageV2(y)) +} + +func isMessageSet(md protoreflect.MessageDescriptor) bool { + ms, ok := md.(interface{ IsMessageSet() bool }) + return ok && ms.IsMessageSet() +} diff --git a/vendor/github.com/golang/protobuf/proto/registry.go b/vendor/github.com/golang/protobuf/proto/registry.go new file mode 100644 index 0000000..066b432 --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/registry.go @@ -0,0 +1,317 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package proto + +import ( + "bytes" + "compress/gzip" + "fmt" + "io/ioutil" + "reflect" + "strings" + "sync" + + "google.golang.org/protobuf/reflect/protodesc" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" + "google.golang.org/protobuf/runtime/protoimpl" +) + +// filePath is the path to the proto source file. +type filePath = string // e.g., "google/protobuf/descriptor.proto" + +// fileDescGZIP is the compressed contents of the encoded FileDescriptorProto. +type fileDescGZIP = []byte + +var fileCache sync.Map // map[filePath]fileDescGZIP + +// RegisterFile is called from generated code to register the compressed +// FileDescriptorProto with the file path for a proto source file. +// +// Deprecated: Use protoregistry.GlobalFiles.RegisterFile instead. +func RegisterFile(s filePath, d fileDescGZIP) { + // Decompress the descriptor. + zr, err := gzip.NewReader(bytes.NewReader(d)) + if err != nil { + panic(fmt.Sprintf("proto: invalid compressed file descriptor: %v", err)) + } + b, err := ioutil.ReadAll(zr) + if err != nil { + panic(fmt.Sprintf("proto: invalid compressed file descriptor: %v", err)) + } + + // Construct a protoreflect.FileDescriptor from the raw descriptor. + // Note that DescBuilder.Build automatically registers the constructed + // file descriptor with the v2 registry. + protoimpl.DescBuilder{RawDescriptor: b}.Build() + + // Locally cache the raw descriptor form for the file. + fileCache.Store(s, d) +} + +// FileDescriptor returns the compressed FileDescriptorProto given the file path +// for a proto source file. It returns nil if not found. +// +// Deprecated: Use protoregistry.GlobalFiles.FindFileByPath instead. +func FileDescriptor(s filePath) fileDescGZIP { + if v, ok := fileCache.Load(s); ok { + return v.(fileDescGZIP) + } + + // Find the descriptor in the v2 registry. + var b []byte + if fd, _ := protoregistry.GlobalFiles.FindFileByPath(s); fd != nil { + b, _ = Marshal(protodesc.ToFileDescriptorProto(fd)) + } + + // Locally cache the raw descriptor form for the file. + if len(b) > 0 { + v, _ := fileCache.LoadOrStore(s, protoimpl.X.CompressGZIP(b)) + return v.(fileDescGZIP) + } + return nil +} + +// enumName is the name of an enum. For historical reasons, the enum name is +// neither the full Go name nor the full protobuf name of the enum. +// The name is the dot-separated combination of just the proto package that the +// enum is declared within followed by the Go type name of the generated enum. +type enumName = string // e.g., "my.proto.package.GoMessage_GoEnum" + +// enumsByName maps enum values by name to their numeric counterpart. +type enumsByName = map[string]int32 + +// enumsByNumber maps enum values by number to their name counterpart. +type enumsByNumber = map[int32]string + +var enumCache sync.Map // map[enumName]enumsByName +var numFilesCache sync.Map // map[protoreflect.FullName]int + +// RegisterEnum is called from the generated code to register the mapping of +// enum value names to enum numbers for the enum identified by s. +// +// Deprecated: Use protoregistry.GlobalTypes.RegisterEnum instead. +func RegisterEnum(s enumName, _ enumsByNumber, m enumsByName) { + if _, ok := enumCache.Load(s); ok { + panic("proto: duplicate enum registered: " + s) + } + enumCache.Store(s, m) + + // This does not forward registration to the v2 registry since this API + // lacks sufficient information to construct a complete v2 enum descriptor. +} + +// EnumValueMap returns the mapping from enum value names to enum numbers for +// the enum of the given name. It returns nil if not found. +// +// Deprecated: Use protoregistry.GlobalTypes.FindEnumByName instead. +func EnumValueMap(s enumName) enumsByName { + if v, ok := enumCache.Load(s); ok { + return v.(enumsByName) + } + + // Check whether the cache is stale. If the number of files in the current + // package differs, then it means that some enums may have been recently + // registered upstream that we do not know about. + var protoPkg protoreflect.FullName + if i := strings.LastIndexByte(s, '.'); i >= 0 { + protoPkg = protoreflect.FullName(s[:i]) + } + v, _ := numFilesCache.Load(protoPkg) + numFiles, _ := v.(int) + if protoregistry.GlobalFiles.NumFilesByPackage(protoPkg) == numFiles { + return nil // cache is up-to-date; was not found earlier + } + + // Update the enum cache for all enums declared in the given proto package. + numFiles = 0 + protoregistry.GlobalFiles.RangeFilesByPackage(protoPkg, func(fd protoreflect.FileDescriptor) bool { + walkEnums(fd, func(ed protoreflect.EnumDescriptor) { + name := protoimpl.X.LegacyEnumName(ed) + if _, ok := enumCache.Load(name); !ok { + m := make(enumsByName) + evs := ed.Values() + for i := evs.Len() - 1; i >= 0; i-- { + ev := evs.Get(i) + m[string(ev.Name())] = int32(ev.Number()) + } + enumCache.LoadOrStore(name, m) + } + }) + numFiles++ + return true + }) + numFilesCache.Store(protoPkg, numFiles) + + // Check cache again for enum map. + if v, ok := enumCache.Load(s); ok { + return v.(enumsByName) + } + return nil +} + +// walkEnums recursively walks all enums declared in d. +func walkEnums(d interface { + Enums() protoreflect.EnumDescriptors + Messages() protoreflect.MessageDescriptors +}, f func(protoreflect.EnumDescriptor)) { + eds := d.Enums() + for i := eds.Len() - 1; i >= 0; i-- { + f(eds.Get(i)) + } + mds := d.Messages() + for i := mds.Len() - 1; i >= 0; i-- { + walkEnums(mds.Get(i), f) + } +} + +// messageName is the full name of protobuf message. +type messageName = string + +var messageTypeCache sync.Map // map[messageName]reflect.Type + +// RegisterType is called from generated code to register the message Go type +// for a message of the given name. +// +// Deprecated: Use protoregistry.GlobalTypes.RegisterMessage instead. +func RegisterType(m Message, s messageName) { + mt := protoimpl.X.LegacyMessageTypeOf(m, protoreflect.FullName(s)) + if err := protoregistry.GlobalTypes.RegisterMessage(mt); err != nil { + panic(err) + } + messageTypeCache.Store(s, reflect.TypeOf(m)) +} + +// RegisterMapType is called from generated code to register the Go map type +// for a protobuf message representing a map entry. +// +// Deprecated: Do not use. +func RegisterMapType(m interface{}, s messageName) { + t := reflect.TypeOf(m) + if t.Kind() != reflect.Map { + panic(fmt.Sprintf("invalid map kind: %v", t)) + } + if _, ok := messageTypeCache.Load(s); ok { + panic(fmt.Errorf("proto: duplicate proto message registered: %s", s)) + } + messageTypeCache.Store(s, t) +} + +// MessageType returns the message type for a named message. +// It returns nil if not found. +// +// Deprecated: Use protoregistry.GlobalTypes.FindMessageByName instead. +func MessageType(s messageName) reflect.Type { + if v, ok := messageTypeCache.Load(s); ok { + return v.(reflect.Type) + } + + // Derive the message type from the v2 registry. + var t reflect.Type + if mt, _ := protoregistry.GlobalTypes.FindMessageByName(protoreflect.FullName(s)); mt != nil { + t = messageGoType(mt) + } + + // If we could not get a concrete type, it is possible that it is a + // pseudo-message for a map entry. + if t == nil { + d, _ := protoregistry.GlobalFiles.FindDescriptorByName(protoreflect.FullName(s)) + if md, _ := d.(protoreflect.MessageDescriptor); md != nil && md.IsMapEntry() { + kt := goTypeForField(md.Fields().ByNumber(1)) + vt := goTypeForField(md.Fields().ByNumber(2)) + t = reflect.MapOf(kt, vt) + } + } + + // Locally cache the message type for the given name. + if t != nil { + v, _ := messageTypeCache.LoadOrStore(s, t) + return v.(reflect.Type) + } + return nil +} + +func goTypeForField(fd protoreflect.FieldDescriptor) reflect.Type { + switch k := fd.Kind(); k { + case protoreflect.EnumKind: + if et, _ := protoregistry.GlobalTypes.FindEnumByName(fd.Enum().FullName()); et != nil { + return enumGoType(et) + } + return reflect.TypeOf(protoreflect.EnumNumber(0)) + case protoreflect.MessageKind, protoreflect.GroupKind: + if mt, _ := protoregistry.GlobalTypes.FindMessageByName(fd.Message().FullName()); mt != nil { + return messageGoType(mt) + } + return reflect.TypeOf((*protoreflect.Message)(nil)).Elem() + default: + return reflect.TypeOf(fd.Default().Interface()) + } +} + +func enumGoType(et protoreflect.EnumType) reflect.Type { + return reflect.TypeOf(et.New(0)) +} + +func messageGoType(mt protoreflect.MessageType) reflect.Type { + return reflect.TypeOf(MessageV1(mt.Zero().Interface())) +} + +// MessageName returns the full protobuf name for the given message type. +// +// Deprecated: Use protoreflect.MessageDescriptor.FullName instead. +func MessageName(m Message) messageName { + if m == nil { + return "" + } + if m, ok := m.(interface{ XXX_MessageName() messageName }); ok { + return m.XXX_MessageName() + } + return messageName(protoimpl.X.MessageDescriptorOf(m).FullName()) +} + +// RegisterExtension is called from the generated code to register +// the extension descriptor. +// +// Deprecated: Use protoregistry.GlobalTypes.RegisterExtension instead. +func RegisterExtension(d *ExtensionDesc) { + if err := protoregistry.GlobalTypes.RegisterExtension(d); err != nil { + panic(err) + } +} + +type extensionsByNumber = map[int32]*ExtensionDesc + +var extensionCache sync.Map // map[messageName]extensionsByNumber + +// RegisteredExtensions returns a map of the registered extensions for the +// provided protobuf message, indexed by the extension field number. +// +// Deprecated: Use protoregistry.GlobalTypes.RangeExtensionsByMessage instead. +func RegisteredExtensions(m Message) extensionsByNumber { + // Check whether the cache is stale. If the number of extensions for + // the given message differs, then it means that some extensions were + // recently registered upstream that we do not know about. + s := MessageName(m) + v, _ := extensionCache.Load(s) + xs, _ := v.(extensionsByNumber) + if protoregistry.GlobalTypes.NumExtensionsByMessage(protoreflect.FullName(s)) == len(xs) { + return xs // cache is up-to-date + } + + // Cache is stale, re-compute the extensions map. + xs = make(extensionsByNumber) + protoregistry.GlobalTypes.RangeExtensionsByMessage(protoreflect.FullName(s), func(xt protoreflect.ExtensionType) bool { + if xd, ok := xt.(*ExtensionDesc); ok { + xs[int32(xt.TypeDescriptor().Number())] = xd + } else { + // TODO: This implies that the protoreflect.ExtensionType is a + // custom type not generated by protoc-gen-go. We could try and + // convert the type to an ExtensionDesc. + } + return true + }) + extensionCache.Store(s, xs) + return xs +} diff --git a/vendor/github.com/golang/protobuf/proto/text_decode.go b/vendor/github.com/golang/protobuf/proto/text_decode.go new file mode 100644 index 0000000..47eb3e4 --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/text_decode.go @@ -0,0 +1,801 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package proto + +import ( + "encoding" + "errors" + "fmt" + "reflect" + "strconv" + "strings" + "unicode/utf8" + + "google.golang.org/protobuf/encoding/prototext" + protoV2 "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" +) + +const wrapTextUnmarshalV2 = false + +// ParseError is returned by UnmarshalText. +type ParseError struct { + Message string + + // Deprecated: Do not use. + Line, Offset int +} + +func (e *ParseError) Error() string { + if wrapTextUnmarshalV2 { + return e.Message + } + if e.Line == 1 { + return fmt.Sprintf("line 1.%d: %v", e.Offset, e.Message) + } + return fmt.Sprintf("line %d: %v", e.Line, e.Message) +} + +// UnmarshalText parses a proto text formatted string into m. +func UnmarshalText(s string, m Message) error { + if u, ok := m.(encoding.TextUnmarshaler); ok { + return u.UnmarshalText([]byte(s)) + } + + m.Reset() + mi := MessageV2(m) + + if wrapTextUnmarshalV2 { + err := prototext.UnmarshalOptions{ + AllowPartial: true, + }.Unmarshal([]byte(s), mi) + if err != nil { + return &ParseError{Message: err.Error()} + } + return checkRequiredNotSet(mi) + } else { + if err := newTextParser(s).unmarshalMessage(mi.ProtoReflect(), ""); err != nil { + return err + } + return checkRequiredNotSet(mi) + } +} + +type textParser struct { + s string // remaining input + done bool // whether the parsing is finished (success or error) + backed bool // whether back() was called + offset, line int + cur token +} + +type token struct { + value string + err *ParseError + line int // line number + offset int // byte number from start of input, not start of line + unquoted string // the unquoted version of value, if it was a quoted string +} + +func newTextParser(s string) *textParser { + p := new(textParser) + p.s = s + p.line = 1 + p.cur.line = 1 + return p +} + +func (p *textParser) unmarshalMessage(m protoreflect.Message, terminator string) (err error) { + md := m.Descriptor() + fds := md.Fields() + + // A struct is a sequence of "name: value", terminated by one of + // '>' or '}', or the end of the input. A name may also be + // "[extension]" or "[type/url]". + // + // The whole struct can also be an expanded Any message, like: + // [type/url] < ... struct contents ... > + seen := make(map[protoreflect.FieldNumber]bool) + for { + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value == terminator { + break + } + if tok.value == "[" { + if err := p.unmarshalExtensionOrAny(m, seen); err != nil { + return err + } + continue + } + + // This is a normal, non-extension field. + name := protoreflect.Name(tok.value) + fd := fds.ByName(name) + switch { + case fd == nil: + gd := fds.ByName(protoreflect.Name(strings.ToLower(string(name)))) + if gd != nil && gd.Kind() == protoreflect.GroupKind && gd.Message().Name() == name { + fd = gd + } + case fd.Kind() == protoreflect.GroupKind && fd.Message().Name() != name: + fd = nil + case fd.IsWeak() && fd.Message().IsPlaceholder(): + fd = nil + } + if fd == nil { + typeName := string(md.FullName()) + if m, ok := m.Interface().(Message); ok { + t := reflect.TypeOf(m) + if t.Kind() == reflect.Ptr { + typeName = t.Elem().String() + } + } + return p.errorf("unknown field name %q in %v", name, typeName) + } + if od := fd.ContainingOneof(); od != nil && m.WhichOneof(od) != nil { + return p.errorf("field '%s' would overwrite already parsed oneof '%s'", name, od.Name()) + } + if fd.Cardinality() != protoreflect.Repeated && seen[fd.Number()] { + return p.errorf("non-repeated field %q was repeated", fd.Name()) + } + seen[fd.Number()] = true + + // Consume any colon. + if err := p.checkForColon(fd); err != nil { + return err + } + + // Parse into the field. + v := m.Get(fd) + if !m.Has(fd) && (fd.IsList() || fd.IsMap() || fd.Message() != nil) { + v = m.Mutable(fd) + } + if v, err = p.unmarshalValue(v, fd); err != nil { + return err + } + m.Set(fd, v) + + if err := p.consumeOptionalSeparator(); err != nil { + return err + } + } + return nil +} + +func (p *textParser) unmarshalExtensionOrAny(m protoreflect.Message, seen map[protoreflect.FieldNumber]bool) error { + name, err := p.consumeExtensionOrAnyName() + if err != nil { + return err + } + + // If it contains a slash, it's an Any type URL. + if slashIdx := strings.LastIndex(name, "/"); slashIdx >= 0 { + tok := p.next() + if tok.err != nil { + return tok.err + } + // consume an optional colon + if tok.value == ":" { + tok = p.next() + if tok.err != nil { + return tok.err + } + } + + var terminator string + switch tok.value { + case "<": + terminator = ">" + case "{": + terminator = "}" + default: + return p.errorf("expected '{' or '<', found %q", tok.value) + } + + mt, err := protoregistry.GlobalTypes.FindMessageByURL(name) + if err != nil { + return p.errorf("unrecognized message %q in google.protobuf.Any", name[slashIdx+len("/"):]) + } + m2 := mt.New() + if err := p.unmarshalMessage(m2, terminator); err != nil { + return err + } + b, err := protoV2.Marshal(m2.Interface()) + if err != nil { + return p.errorf("failed to marshal message of type %q: %v", name[slashIdx+len("/"):], err) + } + + urlFD := m.Descriptor().Fields().ByName("type_url") + valFD := m.Descriptor().Fields().ByName("value") + if seen[urlFD.Number()] { + return p.errorf("Any message unpacked multiple times, or %q already set", urlFD.Name()) + } + if seen[valFD.Number()] { + return p.errorf("Any message unpacked multiple times, or %q already set", valFD.Name()) + } + m.Set(urlFD, protoreflect.ValueOfString(name)) + m.Set(valFD, protoreflect.ValueOfBytes(b)) + seen[urlFD.Number()] = true + seen[valFD.Number()] = true + return nil + } + + xname := protoreflect.FullName(name) + xt, _ := protoregistry.GlobalTypes.FindExtensionByName(xname) + if xt == nil && isMessageSet(m.Descriptor()) { + xt, _ = protoregistry.GlobalTypes.FindExtensionByName(xname.Append("message_set_extension")) + } + if xt == nil { + return p.errorf("unrecognized extension %q", name) + } + fd := xt.TypeDescriptor() + if fd.ContainingMessage().FullName() != m.Descriptor().FullName() { + return p.errorf("extension field %q does not extend message %q", name, m.Descriptor().FullName()) + } + + if err := p.checkForColon(fd); err != nil { + return err + } + + v := m.Get(fd) + if !m.Has(fd) && (fd.IsList() || fd.IsMap() || fd.Message() != nil) { + v = m.Mutable(fd) + } + v, err = p.unmarshalValue(v, fd) + if err != nil { + return err + } + m.Set(fd, v) + return p.consumeOptionalSeparator() +} + +func (p *textParser) unmarshalValue(v protoreflect.Value, fd protoreflect.FieldDescriptor) (protoreflect.Value, error) { + tok := p.next() + if tok.err != nil { + return v, tok.err + } + if tok.value == "" { + return v, p.errorf("unexpected EOF") + } + + switch { + case fd.IsList(): + lv := v.List() + var err error + if tok.value == "[" { + // Repeated field with list notation, like [1,2,3]. + for { + vv := lv.NewElement() + vv, err = p.unmarshalSingularValue(vv, fd) + if err != nil { + return v, err + } + lv.Append(vv) + + tok := p.next() + if tok.err != nil { + return v, tok.err + } + if tok.value == "]" { + break + } + if tok.value != "," { + return v, p.errorf("Expected ']' or ',' found %q", tok.value) + } + } + return v, nil + } + + // One value of the repeated field. + p.back() + vv := lv.NewElement() + vv, err = p.unmarshalSingularValue(vv, fd) + if err != nil { + return v, err + } + lv.Append(vv) + return v, nil + case fd.IsMap(): + // The map entry should be this sequence of tokens: + // < key : KEY value : VALUE > + // However, implementations may omit key or value, and technically + // we should support them in any order. + var terminator string + switch tok.value { + case "<": + terminator = ">" + case "{": + terminator = "}" + default: + return v, p.errorf("expected '{' or '<', found %q", tok.value) + } + + keyFD := fd.MapKey() + valFD := fd.MapValue() + + mv := v.Map() + kv := keyFD.Default() + vv := mv.NewValue() + for { + tok := p.next() + if tok.err != nil { + return v, tok.err + } + if tok.value == terminator { + break + } + var err error + switch tok.value { + case "key": + if err := p.consumeToken(":"); err != nil { + return v, err + } + if kv, err = p.unmarshalSingularValue(kv, keyFD); err != nil { + return v, err + } + if err := p.consumeOptionalSeparator(); err != nil { + return v, err + } + case "value": + if err := p.checkForColon(valFD); err != nil { + return v, err + } + if vv, err = p.unmarshalSingularValue(vv, valFD); err != nil { + return v, err + } + if err := p.consumeOptionalSeparator(); err != nil { + return v, err + } + default: + p.back() + return v, p.errorf(`expected "key", "value", or %q, found %q`, terminator, tok.value) + } + } + mv.Set(kv.MapKey(), vv) + return v, nil + default: + p.back() + return p.unmarshalSingularValue(v, fd) + } +} + +func (p *textParser) unmarshalSingularValue(v protoreflect.Value, fd protoreflect.FieldDescriptor) (protoreflect.Value, error) { + tok := p.next() + if tok.err != nil { + return v, tok.err + } + if tok.value == "" { + return v, p.errorf("unexpected EOF") + } + + switch fd.Kind() { + case protoreflect.BoolKind: + switch tok.value { + case "true", "1", "t", "True": + return protoreflect.ValueOfBool(true), nil + case "false", "0", "f", "False": + return protoreflect.ValueOfBool(false), nil + } + case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind: + if x, err := strconv.ParseInt(tok.value, 0, 32); err == nil { + return protoreflect.ValueOfInt32(int32(x)), nil + } + + // The C++ parser accepts large positive hex numbers that uses + // two's complement arithmetic to represent negative numbers. + // This feature is here for backwards compatibility with C++. + if strings.HasPrefix(tok.value, "0x") { + if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil { + return protoreflect.ValueOfInt32(int32(-(int64(^x) + 1))), nil + } + } + case protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind: + if x, err := strconv.ParseInt(tok.value, 0, 64); err == nil { + return protoreflect.ValueOfInt64(int64(x)), nil + } + + // The C++ parser accepts large positive hex numbers that uses + // two's complement arithmetic to represent negative numbers. + // This feature is here for backwards compatibility with C++. + if strings.HasPrefix(tok.value, "0x") { + if x, err := strconv.ParseUint(tok.value, 0, 64); err == nil { + return protoreflect.ValueOfInt64(int64(-(int64(^x) + 1))), nil + } + } + case protoreflect.Uint32Kind, protoreflect.Fixed32Kind: + if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil { + return protoreflect.ValueOfUint32(uint32(x)), nil + } + case protoreflect.Uint64Kind, protoreflect.Fixed64Kind: + if x, err := strconv.ParseUint(tok.value, 0, 64); err == nil { + return protoreflect.ValueOfUint64(uint64(x)), nil + } + case protoreflect.FloatKind: + // Ignore 'f' for compatibility with output generated by C++, + // but don't remove 'f' when the value is "-inf" or "inf". + v := tok.value + if strings.HasSuffix(v, "f") && v != "-inf" && v != "inf" { + v = v[:len(v)-len("f")] + } + if x, err := strconv.ParseFloat(v, 32); err == nil { + return protoreflect.ValueOfFloat32(float32(x)), nil + } + case protoreflect.DoubleKind: + // Ignore 'f' for compatibility with output generated by C++, + // but don't remove 'f' when the value is "-inf" or "inf". + v := tok.value + if strings.HasSuffix(v, "f") && v != "-inf" && v != "inf" { + v = v[:len(v)-len("f")] + } + if x, err := strconv.ParseFloat(v, 64); err == nil { + return protoreflect.ValueOfFloat64(float64(x)), nil + } + case protoreflect.StringKind: + if isQuote(tok.value[0]) { + return protoreflect.ValueOfString(tok.unquoted), nil + } + case protoreflect.BytesKind: + if isQuote(tok.value[0]) { + return protoreflect.ValueOfBytes([]byte(tok.unquoted)), nil + } + case protoreflect.EnumKind: + if x, err := strconv.ParseInt(tok.value, 0, 32); err == nil { + return protoreflect.ValueOfEnum(protoreflect.EnumNumber(x)), nil + } + vd := fd.Enum().Values().ByName(protoreflect.Name(tok.value)) + if vd != nil { + return protoreflect.ValueOfEnum(vd.Number()), nil + } + case protoreflect.MessageKind, protoreflect.GroupKind: + var terminator string + switch tok.value { + case "{": + terminator = "}" + case "<": + terminator = ">" + default: + return v, p.errorf("expected '{' or '<', found %q", tok.value) + } + err := p.unmarshalMessage(v.Message(), terminator) + return v, err + default: + panic(fmt.Sprintf("invalid kind %v", fd.Kind())) + } + return v, p.errorf("invalid %v: %v", fd.Kind(), tok.value) +} + +// Consume a ':' from the input stream (if the next token is a colon), +// returning an error if a colon is needed but not present. +func (p *textParser) checkForColon(fd protoreflect.FieldDescriptor) *ParseError { + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value != ":" { + if fd.Message() == nil { + return p.errorf("expected ':', found %q", tok.value) + } + p.back() + } + return nil +} + +// consumeExtensionOrAnyName consumes an extension name or an Any type URL and +// the following ']'. It returns the name or URL consumed. +func (p *textParser) consumeExtensionOrAnyName() (string, error) { + tok := p.next() + if tok.err != nil { + return "", tok.err + } + + // If extension name or type url is quoted, it's a single token. + if len(tok.value) > 2 && isQuote(tok.value[0]) && tok.value[len(tok.value)-1] == tok.value[0] { + name, err := unquoteC(tok.value[1:len(tok.value)-1], rune(tok.value[0])) + if err != nil { + return "", err + } + return name, p.consumeToken("]") + } + + // Consume everything up to "]" + var parts []string + for tok.value != "]" { + parts = append(parts, tok.value) + tok = p.next() + if tok.err != nil { + return "", p.errorf("unrecognized type_url or extension name: %s", tok.err) + } + if p.done && tok.value != "]" { + return "", p.errorf("unclosed type_url or extension name") + } + } + return strings.Join(parts, ""), nil +} + +// consumeOptionalSeparator consumes an optional semicolon or comma. +// It is used in unmarshalMessage to provide backward compatibility. +func (p *textParser) consumeOptionalSeparator() error { + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value != ";" && tok.value != "," { + p.back() + } + return nil +} + +func (p *textParser) errorf(format string, a ...interface{}) *ParseError { + pe := &ParseError{fmt.Sprintf(format, a...), p.cur.line, p.cur.offset} + p.cur.err = pe + p.done = true + return pe +} + +func (p *textParser) skipWhitespace() { + i := 0 + for i < len(p.s) && (isWhitespace(p.s[i]) || p.s[i] == '#') { + if p.s[i] == '#' { + // comment; skip to end of line or input + for i < len(p.s) && p.s[i] != '\n' { + i++ + } + if i == len(p.s) { + break + } + } + if p.s[i] == '\n' { + p.line++ + } + i++ + } + p.offset += i + p.s = p.s[i:len(p.s)] + if len(p.s) == 0 { + p.done = true + } +} + +func (p *textParser) advance() { + // Skip whitespace + p.skipWhitespace() + if p.done { + return + } + + // Start of non-whitespace + p.cur.err = nil + p.cur.offset, p.cur.line = p.offset, p.line + p.cur.unquoted = "" + switch p.s[0] { + case '<', '>', '{', '}', ':', '[', ']', ';', ',', '/': + // Single symbol + p.cur.value, p.s = p.s[0:1], p.s[1:len(p.s)] + case '"', '\'': + // Quoted string + i := 1 + for i < len(p.s) && p.s[i] != p.s[0] && p.s[i] != '\n' { + if p.s[i] == '\\' && i+1 < len(p.s) { + // skip escaped char + i++ + } + i++ + } + if i >= len(p.s) || p.s[i] != p.s[0] { + p.errorf("unmatched quote") + return + } + unq, err := unquoteC(p.s[1:i], rune(p.s[0])) + if err != nil { + p.errorf("invalid quoted string %s: %v", p.s[0:i+1], err) + return + } + p.cur.value, p.s = p.s[0:i+1], p.s[i+1:len(p.s)] + p.cur.unquoted = unq + default: + i := 0 + for i < len(p.s) && isIdentOrNumberChar(p.s[i]) { + i++ + } + if i == 0 { + p.errorf("unexpected byte %#x", p.s[0]) + return + } + p.cur.value, p.s = p.s[0:i], p.s[i:len(p.s)] + } + p.offset += len(p.cur.value) +} + +// Back off the parser by one token. Can only be done between calls to next(). +// It makes the next advance() a no-op. +func (p *textParser) back() { p.backed = true } + +// Advances the parser and returns the new current token. +func (p *textParser) next() *token { + if p.backed || p.done { + p.backed = false + return &p.cur + } + p.advance() + if p.done { + p.cur.value = "" + } else if len(p.cur.value) > 0 && isQuote(p.cur.value[0]) { + // Look for multiple quoted strings separated by whitespace, + // and concatenate them. + cat := p.cur + for { + p.skipWhitespace() + if p.done || !isQuote(p.s[0]) { + break + } + p.advance() + if p.cur.err != nil { + return &p.cur + } + cat.value += " " + p.cur.value + cat.unquoted += p.cur.unquoted + } + p.done = false // parser may have seen EOF, but we want to return cat + p.cur = cat + } + return &p.cur +} + +func (p *textParser) consumeToken(s string) error { + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value != s { + p.back() + return p.errorf("expected %q, found %q", s, tok.value) + } + return nil +} + +var errBadUTF8 = errors.New("proto: bad UTF-8") + +func unquoteC(s string, quote rune) (string, error) { + // This is based on C++'s tokenizer.cc. + // Despite its name, this is *not* parsing C syntax. + // For instance, "\0" is an invalid quoted string. + + // Avoid allocation in trivial cases. + simple := true + for _, r := range s { + if r == '\\' || r == quote { + simple = false + break + } + } + if simple { + return s, nil + } + + buf := make([]byte, 0, 3*len(s)/2) + for len(s) > 0 { + r, n := utf8.DecodeRuneInString(s) + if r == utf8.RuneError && n == 1 { + return "", errBadUTF8 + } + s = s[n:] + if r != '\\' { + if r < utf8.RuneSelf { + buf = append(buf, byte(r)) + } else { + buf = append(buf, string(r)...) + } + continue + } + + ch, tail, err := unescape(s) + if err != nil { + return "", err + } + buf = append(buf, ch...) + s = tail + } + return string(buf), nil +} + +func unescape(s string) (ch string, tail string, err error) { + r, n := utf8.DecodeRuneInString(s) + if r == utf8.RuneError && n == 1 { + return "", "", errBadUTF8 + } + s = s[n:] + switch r { + case 'a': + return "\a", s, nil + case 'b': + return "\b", s, nil + case 'f': + return "\f", s, nil + case 'n': + return "\n", s, nil + case 'r': + return "\r", s, nil + case 't': + return "\t", s, nil + case 'v': + return "\v", s, nil + case '?': + return "?", s, nil // trigraph workaround + case '\'', '"', '\\': + return string(r), s, nil + case '0', '1', '2', '3', '4', '5', '6', '7': + if len(s) < 2 { + return "", "", fmt.Errorf(`\%c requires 2 following digits`, r) + } + ss := string(r) + s[:2] + s = s[2:] + i, err := strconv.ParseUint(ss, 8, 8) + if err != nil { + return "", "", fmt.Errorf(`\%s contains non-octal digits`, ss) + } + return string([]byte{byte(i)}), s, nil + case 'x', 'X', 'u', 'U': + var n int + switch r { + case 'x', 'X': + n = 2 + case 'u': + n = 4 + case 'U': + n = 8 + } + if len(s) < n { + return "", "", fmt.Errorf(`\%c requires %d following digits`, r, n) + } + ss := s[:n] + s = s[n:] + i, err := strconv.ParseUint(ss, 16, 64) + if err != nil { + return "", "", fmt.Errorf(`\%c%s contains non-hexadecimal digits`, r, ss) + } + if r == 'x' || r == 'X' { + return string([]byte{byte(i)}), s, nil + } + if i > utf8.MaxRune { + return "", "", fmt.Errorf(`\%c%s is not a valid Unicode code point`, r, ss) + } + return string(rune(i)), s, nil + } + return "", "", fmt.Errorf(`unknown escape \%c`, r) +} + +func isIdentOrNumberChar(c byte) bool { + switch { + case 'A' <= c && c <= 'Z', 'a' <= c && c <= 'z': + return true + case '0' <= c && c <= '9': + return true + } + switch c { + case '-', '+', '.', '_': + return true + } + return false +} + +func isWhitespace(c byte) bool { + switch c { + case ' ', '\t', '\n', '\r': + return true + } + return false +} + +func isQuote(c byte) bool { + switch c { + case '"', '\'': + return true + } + return false +} diff --git a/vendor/github.com/golang/protobuf/proto/text_encode.go b/vendor/github.com/golang/protobuf/proto/text_encode.go new file mode 100644 index 0000000..a31134e --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/text_encode.go @@ -0,0 +1,560 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package proto + +import ( + "bytes" + "encoding" + "fmt" + "io" + "math" + "sort" + "strings" + + "google.golang.org/protobuf/encoding/prototext" + "google.golang.org/protobuf/encoding/protowire" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" +) + +const wrapTextMarshalV2 = false + +// TextMarshaler is a configurable text format marshaler. +type TextMarshaler struct { + Compact bool // use compact text format (one line) + ExpandAny bool // expand google.protobuf.Any messages of known types +} + +// Marshal writes the proto text format of m to w. +func (tm *TextMarshaler) Marshal(w io.Writer, m Message) error { + b, err := tm.marshal(m) + if len(b) > 0 { + if _, err := w.Write(b); err != nil { + return err + } + } + return err +} + +// Text returns a proto text formatted string of m. +func (tm *TextMarshaler) Text(m Message) string { + b, _ := tm.marshal(m) + return string(b) +} + +func (tm *TextMarshaler) marshal(m Message) ([]byte, error) { + mr := MessageReflect(m) + if mr == nil || !mr.IsValid() { + return []byte(""), nil + } + + if wrapTextMarshalV2 { + if m, ok := m.(encoding.TextMarshaler); ok { + return m.MarshalText() + } + + opts := prototext.MarshalOptions{ + AllowPartial: true, + EmitUnknown: true, + } + if !tm.Compact { + opts.Indent = " " + } + if !tm.ExpandAny { + opts.Resolver = (*protoregistry.Types)(nil) + } + return opts.Marshal(mr.Interface()) + } else { + w := &textWriter{ + compact: tm.Compact, + expandAny: tm.ExpandAny, + complete: true, + } + + if m, ok := m.(encoding.TextMarshaler); ok { + b, err := m.MarshalText() + if err != nil { + return nil, err + } + w.Write(b) + return w.buf, nil + } + + err := w.writeMessage(mr) + return w.buf, err + } +} + +var ( + defaultTextMarshaler = TextMarshaler{} + compactTextMarshaler = TextMarshaler{Compact: true} +) + +// MarshalText writes the proto text format of m to w. +func MarshalText(w io.Writer, m Message) error { return defaultTextMarshaler.Marshal(w, m) } + +// MarshalTextString returns a proto text formatted string of m. +func MarshalTextString(m Message) string { return defaultTextMarshaler.Text(m) } + +// CompactText writes the compact proto text format of m to w. +func CompactText(w io.Writer, m Message) error { return compactTextMarshaler.Marshal(w, m) } + +// CompactTextString returns a compact proto text formatted string of m. +func CompactTextString(m Message) string { return compactTextMarshaler.Text(m) } + +var ( + newline = []byte("\n") + endBraceNewline = []byte("}\n") + posInf = []byte("inf") + negInf = []byte("-inf") + nan = []byte("nan") +) + +// textWriter is an io.Writer that tracks its indentation level. +type textWriter struct { + compact bool // same as TextMarshaler.Compact + expandAny bool // same as TextMarshaler.ExpandAny + complete bool // whether the current position is a complete line + indent int // indentation level; never negative + buf []byte +} + +func (w *textWriter) Write(p []byte) (n int, _ error) { + newlines := bytes.Count(p, newline) + if newlines == 0 { + if !w.compact && w.complete { + w.writeIndent() + } + w.buf = append(w.buf, p...) + w.complete = false + return len(p), nil + } + + frags := bytes.SplitN(p, newline, newlines+1) + if w.compact { + for i, frag := range frags { + if i > 0 { + w.buf = append(w.buf, ' ') + n++ + } + w.buf = append(w.buf, frag...) + n += len(frag) + } + return n, nil + } + + for i, frag := range frags { + if w.complete { + w.writeIndent() + } + w.buf = append(w.buf, frag...) + n += len(frag) + if i+1 < len(frags) { + w.buf = append(w.buf, '\n') + n++ + } + } + w.complete = len(frags[len(frags)-1]) == 0 + return n, nil +} + +func (w *textWriter) WriteByte(c byte) error { + if w.compact && c == '\n' { + c = ' ' + } + if !w.compact && w.complete { + w.writeIndent() + } + w.buf = append(w.buf, c) + w.complete = c == '\n' + return nil +} + +func (w *textWriter) writeName(fd protoreflect.FieldDescriptor) { + if !w.compact && w.complete { + w.writeIndent() + } + w.complete = false + + if fd.Kind() != protoreflect.GroupKind { + w.buf = append(w.buf, fd.Name()...) + w.WriteByte(':') + } else { + // Use message type name for group field name. + w.buf = append(w.buf, fd.Message().Name()...) + } + + if !w.compact { + w.WriteByte(' ') + } +} + +func requiresQuotes(u string) bool { + // When type URL contains any characters except [0-9A-Za-z./\-]*, it must be quoted. + for _, ch := range u { + switch { + case ch == '.' || ch == '/' || ch == '_': + continue + case '0' <= ch && ch <= '9': + continue + case 'A' <= ch && ch <= 'Z': + continue + case 'a' <= ch && ch <= 'z': + continue + default: + return true + } + } + return false +} + +// writeProto3Any writes an expanded google.protobuf.Any message. +// +// It returns (false, nil) if sv value can't be unmarshaled (e.g. because +// required messages are not linked in). +// +// It returns (true, error) when sv was written in expanded format or an error +// was encountered. +func (w *textWriter) writeProto3Any(m protoreflect.Message) (bool, error) { + md := m.Descriptor() + fdURL := md.Fields().ByName("type_url") + fdVal := md.Fields().ByName("value") + + url := m.Get(fdURL).String() + mt, err := protoregistry.GlobalTypes.FindMessageByURL(url) + if err != nil { + return false, nil + } + + b := m.Get(fdVal).Bytes() + m2 := mt.New() + if err := proto.Unmarshal(b, m2.Interface()); err != nil { + return false, nil + } + w.Write([]byte("[")) + if requiresQuotes(url) { + w.writeQuotedString(url) + } else { + w.Write([]byte(url)) + } + if w.compact { + w.Write([]byte("]:<")) + } else { + w.Write([]byte("]: <\n")) + w.indent++ + } + if err := w.writeMessage(m2); err != nil { + return true, err + } + if w.compact { + w.Write([]byte("> ")) + } else { + w.indent-- + w.Write([]byte(">\n")) + } + return true, nil +} + +func (w *textWriter) writeMessage(m protoreflect.Message) error { + md := m.Descriptor() + if w.expandAny && md.FullName() == "google.protobuf.Any" { + if canExpand, err := w.writeProto3Any(m); canExpand { + return err + } + } + + fds := md.Fields() + for i := 0; i < fds.Len(); { + fd := fds.Get(i) + if od := fd.ContainingOneof(); od != nil { + fd = m.WhichOneof(od) + i += od.Fields().Len() + } else { + i++ + } + if fd == nil || !m.Has(fd) { + continue + } + + switch { + case fd.IsList(): + lv := m.Get(fd).List() + for j := 0; j < lv.Len(); j++ { + w.writeName(fd) + v := lv.Get(j) + if err := w.writeSingularValue(v, fd); err != nil { + return err + } + w.WriteByte('\n') + } + case fd.IsMap(): + kfd := fd.MapKey() + vfd := fd.MapValue() + mv := m.Get(fd).Map() + + type entry struct{ key, val protoreflect.Value } + var entries []entry + mv.Range(func(k protoreflect.MapKey, v protoreflect.Value) bool { + entries = append(entries, entry{k.Value(), v}) + return true + }) + sort.Slice(entries, func(i, j int) bool { + switch kfd.Kind() { + case protoreflect.BoolKind: + return !entries[i].key.Bool() && entries[j].key.Bool() + case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind, protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind: + return entries[i].key.Int() < entries[j].key.Int() + case protoreflect.Uint32Kind, protoreflect.Fixed32Kind, protoreflect.Uint64Kind, protoreflect.Fixed64Kind: + return entries[i].key.Uint() < entries[j].key.Uint() + case protoreflect.StringKind: + return entries[i].key.String() < entries[j].key.String() + default: + panic("invalid kind") + } + }) + for _, entry := range entries { + w.writeName(fd) + w.WriteByte('<') + if !w.compact { + w.WriteByte('\n') + } + w.indent++ + w.writeName(kfd) + if err := w.writeSingularValue(entry.key, kfd); err != nil { + return err + } + w.WriteByte('\n') + w.writeName(vfd) + if err := w.writeSingularValue(entry.val, vfd); err != nil { + return err + } + w.WriteByte('\n') + w.indent-- + w.WriteByte('>') + w.WriteByte('\n') + } + default: + w.writeName(fd) + if err := w.writeSingularValue(m.Get(fd), fd); err != nil { + return err + } + w.WriteByte('\n') + } + } + + if b := m.GetUnknown(); len(b) > 0 { + w.writeUnknownFields(b) + } + return w.writeExtensions(m) +} + +func (w *textWriter) writeSingularValue(v protoreflect.Value, fd protoreflect.FieldDescriptor) error { + switch fd.Kind() { + case protoreflect.FloatKind, protoreflect.DoubleKind: + switch vf := v.Float(); { + case math.IsInf(vf, +1): + w.Write(posInf) + case math.IsInf(vf, -1): + w.Write(negInf) + case math.IsNaN(vf): + w.Write(nan) + default: + fmt.Fprint(w, v.Interface()) + } + case protoreflect.StringKind: + // NOTE: This does not validate UTF-8 for historical reasons. + w.writeQuotedString(string(v.String())) + case protoreflect.BytesKind: + w.writeQuotedString(string(v.Bytes())) + case protoreflect.MessageKind, protoreflect.GroupKind: + var bra, ket byte = '<', '>' + if fd.Kind() == protoreflect.GroupKind { + bra, ket = '{', '}' + } + w.WriteByte(bra) + if !w.compact { + w.WriteByte('\n') + } + w.indent++ + m := v.Message() + if m2, ok := m.Interface().(encoding.TextMarshaler); ok { + b, err := m2.MarshalText() + if err != nil { + return err + } + w.Write(b) + } else { + w.writeMessage(m) + } + w.indent-- + w.WriteByte(ket) + case protoreflect.EnumKind: + if ev := fd.Enum().Values().ByNumber(v.Enum()); ev != nil { + fmt.Fprint(w, ev.Name()) + } else { + fmt.Fprint(w, v.Enum()) + } + default: + fmt.Fprint(w, v.Interface()) + } + return nil +} + +// writeQuotedString writes a quoted string in the protocol buffer text format. +func (w *textWriter) writeQuotedString(s string) { + w.WriteByte('"') + for i := 0; i < len(s); i++ { + switch c := s[i]; c { + case '\n': + w.buf = append(w.buf, `\n`...) + case '\r': + w.buf = append(w.buf, `\r`...) + case '\t': + w.buf = append(w.buf, `\t`...) + case '"': + w.buf = append(w.buf, `\"`...) + case '\\': + w.buf = append(w.buf, `\\`...) + default: + if isPrint := c >= 0x20 && c < 0x7f; isPrint { + w.buf = append(w.buf, c) + } else { + w.buf = append(w.buf, fmt.Sprintf(`\%03o`, c)...) + } + } + } + w.WriteByte('"') +} + +func (w *textWriter) writeUnknownFields(b []byte) { + if !w.compact { + fmt.Fprintf(w, "/* %d unknown bytes */\n", len(b)) + } + + for len(b) > 0 { + num, wtyp, n := protowire.ConsumeTag(b) + if n < 0 { + return + } + b = b[n:] + + if wtyp == protowire.EndGroupType { + w.indent-- + w.Write(endBraceNewline) + continue + } + fmt.Fprint(w, num) + if wtyp != protowire.StartGroupType { + w.WriteByte(':') + } + if !w.compact || wtyp == protowire.StartGroupType { + w.WriteByte(' ') + } + switch wtyp { + case protowire.VarintType: + v, n := protowire.ConsumeVarint(b) + if n < 0 { + return + } + b = b[n:] + fmt.Fprint(w, v) + case protowire.Fixed32Type: + v, n := protowire.ConsumeFixed32(b) + if n < 0 { + return + } + b = b[n:] + fmt.Fprint(w, v) + case protowire.Fixed64Type: + v, n := protowire.ConsumeFixed64(b) + if n < 0 { + return + } + b = b[n:] + fmt.Fprint(w, v) + case protowire.BytesType: + v, n := protowire.ConsumeBytes(b) + if n < 0 { + return + } + b = b[n:] + fmt.Fprintf(w, "%q", v) + case protowire.StartGroupType: + w.WriteByte('{') + w.indent++ + default: + fmt.Fprintf(w, "/* unknown wire type %d */", wtyp) + } + w.WriteByte('\n') + } +} + +// writeExtensions writes all the extensions in m. +func (w *textWriter) writeExtensions(m protoreflect.Message) error { + md := m.Descriptor() + if md.ExtensionRanges().Len() == 0 { + return nil + } + + type ext struct { + desc protoreflect.FieldDescriptor + val protoreflect.Value + } + var exts []ext + m.Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool { + if fd.IsExtension() { + exts = append(exts, ext{fd, v}) + } + return true + }) + sort.Slice(exts, func(i, j int) bool { + return exts[i].desc.Number() < exts[j].desc.Number() + }) + + for _, ext := range exts { + // For message set, use the name of the message as the extension name. + name := string(ext.desc.FullName()) + if isMessageSet(ext.desc.ContainingMessage()) { + name = strings.TrimSuffix(name, ".message_set_extension") + } + + if !ext.desc.IsList() { + if err := w.writeSingularExtension(name, ext.val, ext.desc); err != nil { + return err + } + } else { + lv := ext.val.List() + for i := 0; i < lv.Len(); i++ { + if err := w.writeSingularExtension(name, lv.Get(i), ext.desc); err != nil { + return err + } + } + } + } + return nil +} + +func (w *textWriter) writeSingularExtension(name string, v protoreflect.Value, fd protoreflect.FieldDescriptor) error { + fmt.Fprintf(w, "[%s]:", name) + if !w.compact { + w.WriteByte(' ') + } + if err := w.writeSingularValue(v, fd); err != nil { + return err + } + w.WriteByte('\n') + return nil +} + +func (w *textWriter) writeIndent() { + if !w.complete { + return + } + for i := 0; i < w.indent*2; i++ { + w.buf = append(w.buf, ' ') + } + w.complete = false +} diff --git a/vendor/github.com/golang/protobuf/proto/wire.go b/vendor/github.com/golang/protobuf/proto/wire.go new file mode 100644 index 0000000..d7c28da --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/wire.go @@ -0,0 +1,78 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package proto + +import ( + protoV2 "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/runtime/protoiface" +) + +// Size returns the size in bytes of the wire-format encoding of m. +func Size(m Message) int { + if m == nil { + return 0 + } + mi := MessageV2(m) + return protoV2.Size(mi) +} + +// Marshal returns the wire-format encoding of m. +func Marshal(m Message) ([]byte, error) { + b, err := marshalAppend(nil, m, false) + if b == nil { + b = zeroBytes + } + return b, err +} + +var zeroBytes = make([]byte, 0, 0) + +func marshalAppend(buf []byte, m Message, deterministic bool) ([]byte, error) { + if m == nil { + return nil, ErrNil + } + mi := MessageV2(m) + nbuf, err := protoV2.MarshalOptions{ + Deterministic: deterministic, + AllowPartial: true, + }.MarshalAppend(buf, mi) + if err != nil { + return buf, err + } + if len(buf) == len(nbuf) { + if !mi.ProtoReflect().IsValid() { + return buf, ErrNil + } + } + return nbuf, checkRequiredNotSet(mi) +} + +// Unmarshal parses a wire-format message in b and places the decoded results in m. +// +// Unmarshal resets m before starting to unmarshal, so any existing data in m is always +// removed. Use UnmarshalMerge to preserve and append to existing data. +func Unmarshal(b []byte, m Message) error { + m.Reset() + return UnmarshalMerge(b, m) +} + +// UnmarshalMerge parses a wire-format message in b and places the decoded results in m. +func UnmarshalMerge(b []byte, m Message) error { + mi := MessageV2(m) + out, err := protoV2.UnmarshalOptions{ + AllowPartial: true, + Merge: true, + }.UnmarshalState(protoiface.UnmarshalInput{ + Buf: b, + Message: mi.ProtoReflect(), + }) + if err != nil { + return err + } + if out.Flags&protoiface.UnmarshalInitialized > 0 { + return nil + } + return checkRequiredNotSet(mi) +} diff --git a/vendor/github.com/golang/protobuf/proto/wrappers.go b/vendor/github.com/golang/protobuf/proto/wrappers.go new file mode 100644 index 0000000..398e348 --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/wrappers.go @@ -0,0 +1,34 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package proto + +// Bool stores v in a new bool value and returns a pointer to it. +func Bool(v bool) *bool { return &v } + +// Int stores v in a new int32 value and returns a pointer to it. +// +// Deprecated: Use Int32 instead. +func Int(v int) *int32 { return Int32(int32(v)) } + +// Int32 stores v in a new int32 value and returns a pointer to it. +func Int32(v int32) *int32 { return &v } + +// Int64 stores v in a new int64 value and returns a pointer to it. +func Int64(v int64) *int64 { return &v } + +// Uint32 stores v in a new uint32 value and returns a pointer to it. +func Uint32(v uint32) *uint32 { return &v } + +// Uint64 stores v in a new uint64 value and returns a pointer to it. +func Uint64(v uint64) *uint64 { return &v } + +// Float32 stores v in a new float32 value and returns a pointer to it. +func Float32(v float32) *float32 { return &v } + +// Float64 stores v in a new float64 value and returns a pointer to it. +func Float64(v float64) *float64 { return &v } + +// String stores v in a new string value and returns a pointer to it. +func String(v string) *string { return &v } diff --git a/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go b/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go new file mode 100644 index 0000000..a76f807 --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go @@ -0,0 +1,64 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: github.com/golang/protobuf/ptypes/timestamp/timestamp.proto + +package timestamp + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + timestamppb "google.golang.org/protobuf/types/known/timestamppb" + reflect "reflect" +) + +// Symbols defined in public import of google/protobuf/timestamp.proto. + +type Timestamp = timestamppb.Timestamp + +var File_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto protoreflect.FileDescriptor + +var file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_rawDesc = []byte{ + 0x0a, 0x3b, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, + 0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, + 0x70, 0x65, 0x73, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2f, 0x74, 0x69, + 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, + 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x42, 0x37, + 0x5a, 0x35, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, + 0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, + 0x70, 0x65, 0x73, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x3b, 0x74, 0x69, + 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x50, 0x00, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x33, +} + +var file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_goTypes = []interface{}{} +var file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_depIdxs = []int32{ + 0, // [0:0] is the sub-list for method output_type + 0, // [0:0] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_init() } +func file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_init() { + if File_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto != nil { + return + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_rawDesc, + NumEnums: 0, + NumMessages: 0, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_goTypes, + DependencyIndexes: file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_depIdxs, + }.Build() + File_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto = out.File + file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_rawDesc = nil + file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_goTypes = nil + file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_depIdxs = nil +} diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/LICENSE b/vendor/github.com/matttproud/golang_protobuf_extensions/LICENSE new file mode 100644 index 0000000..8dada3e --- /dev/null +++ b/vendor/github.com/matttproud/golang_protobuf_extensions/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/NOTICE b/vendor/github.com/matttproud/golang_protobuf_extensions/NOTICE new file mode 100644 index 0000000..5d8cb5b --- /dev/null +++ b/vendor/github.com/matttproud/golang_protobuf_extensions/NOTICE @@ -0,0 +1 @@ +Copyright 2012 Matt T. Proud (matt.proud@gmail.com) diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/.gitignore b/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/.gitignore new file mode 100644 index 0000000..e16fb94 --- /dev/null +++ b/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/.gitignore @@ -0,0 +1 @@ +cover.dat diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/Makefile b/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/Makefile new file mode 100644 index 0000000..81be214 --- /dev/null +++ b/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/Makefile @@ -0,0 +1,7 @@ +all: + +cover: + go test -cover -v -coverprofile=cover.dat ./... + go tool cover -func cover.dat + +.PHONY: cover diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/decode.go b/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/decode.go new file mode 100644 index 0000000..258c063 --- /dev/null +++ b/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/decode.go @@ -0,0 +1,75 @@ +// Copyright 2013 Matt T. Proud +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pbutil + +import ( + "encoding/binary" + "errors" + "io" + + "github.com/golang/protobuf/proto" +) + +var errInvalidVarint = errors.New("invalid varint32 encountered") + +// ReadDelimited decodes a message from the provided length-delimited stream, +// where the length is encoded as 32-bit varint prefix to the message body. +// It returns the total number of bytes read and any applicable error. This is +// roughly equivalent to the companion Java API's +// MessageLite#parseDelimitedFrom. As per the reader contract, this function +// calls r.Read repeatedly as required until exactly one message including its +// prefix is read and decoded (or an error has occurred). The function never +// reads more bytes from the stream than required. The function never returns +// an error if a message has been read and decoded correctly, even if the end +// of the stream has been reached in doing so. In that case, any subsequent +// calls return (0, io.EOF). +func ReadDelimited(r io.Reader, m proto.Message) (n int, err error) { + // Per AbstractParser#parsePartialDelimitedFrom with + // CodedInputStream#readRawVarint32. + var headerBuf [binary.MaxVarintLen32]byte + var bytesRead, varIntBytes int + var messageLength uint64 + for varIntBytes == 0 { // i.e. no varint has been decoded yet. + if bytesRead >= len(headerBuf) { + return bytesRead, errInvalidVarint + } + // We have to read byte by byte here to avoid reading more bytes + // than required. Each read byte is appended to what we have + // read before. + newBytesRead, err := r.Read(headerBuf[bytesRead : bytesRead+1]) + if newBytesRead == 0 { + if err != nil { + return bytesRead, err + } + // A Reader should not return (0, nil), but if it does, + // it should be treated as no-op (according to the + // Reader contract). So let's go on... + continue + } + bytesRead += newBytesRead + // Now present everything read so far to the varint decoder and + // see if a varint can be decoded already. + messageLength, varIntBytes = proto.DecodeVarint(headerBuf[:bytesRead]) + } + + messageBuf := make([]byte, messageLength) + newBytesRead, err := io.ReadFull(r, messageBuf) + bytesRead += newBytesRead + if err != nil { + return bytesRead, err + } + + return bytesRead, proto.Unmarshal(messageBuf, m) +} diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/doc.go b/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/doc.go new file mode 100644 index 0000000..c318385 --- /dev/null +++ b/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/doc.go @@ -0,0 +1,16 @@ +// Copyright 2013 Matt T. Proud +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package pbutil provides record length-delimited Protocol Buffer streaming. +package pbutil diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/encode.go b/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/encode.go new file mode 100644 index 0000000..8fb59ad --- /dev/null +++ b/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/encode.go @@ -0,0 +1,46 @@ +// Copyright 2013 Matt T. Proud +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pbutil + +import ( + "encoding/binary" + "io" + + "github.com/golang/protobuf/proto" +) + +// WriteDelimited encodes and dumps a message to the provided writer prefixed +// with a 32-bit varint indicating the length of the encoded message, producing +// a length-delimited record stream, which can be used to chain together +// encoded messages of the same type together in a file. It returns the total +// number of bytes written and any applicable error. This is roughly +// equivalent to the companion Java API's MessageLite#writeDelimitedTo. +func WriteDelimited(w io.Writer, m proto.Message) (n int, err error) { + buffer, err := proto.Marshal(m) + if err != nil { + return 0, err + } + + var buf [binary.MaxVarintLen32]byte + encodedLength := binary.PutUvarint(buf[:], uint64(len(buffer))) + + sync, err := w.Write(buf[:encodedLength]) + if err != nil { + return sync, err + } + + n, err = w.Write(buffer) + return n + sync, err +} diff --git a/vendor/github.com/mjl-/bstore/.gitignore b/vendor/github.com/mjl-/bstore/.gitignore new file mode 100644 index 0000000..f68e7f2 --- /dev/null +++ b/vendor/github.com/mjl-/bstore/.gitignore @@ -0,0 +1,3 @@ +/cover.out +/cover.html +/testdata/*.db diff --git a/vendor/github.com/mjl-/bstore/LICENSE b/vendor/github.com/mjl-/bstore/LICENSE new file mode 100644 index 0000000..eef484d --- /dev/null +++ b/vendor/github.com/mjl-/bstore/LICENSE @@ -0,0 +1,7 @@ +Copyright (c) 2022 Mechiel Lukkien + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/mjl-/bstore/Makefile b/vendor/github.com/mjl-/bstore/Makefile new file mode 100644 index 0000000..6aecbeb --- /dev/null +++ b/vendor/github.com/mjl-/bstore/Makefile @@ -0,0 +1,20 @@ +build: + go build ./... + go vet ./... + GOARCH=386 go vet ./... + staticcheck ./... + ./gendoc.sh + +fmt: + go fmt ./... + gofmt -w -s *.go cmd/bstore/*.go + +test: + go test -race -shuffle=on -coverprofile cover.out + go tool cover -html=cover.out -o cover.html + +benchmark: + go test -bench . + +fuzz: + go test -fuzz . diff --git a/vendor/github.com/mjl-/bstore/README.md b/vendor/github.com/mjl-/bstore/README.md new file mode 100644 index 0000000..0f6269a --- /dev/null +++ b/vendor/github.com/mjl-/bstore/README.md @@ -0,0 +1,51 @@ +bstore is a database library for storing and quering Go struct data. + +See https://pkg.go.dev/github.com/mjl-/bstore + +MIT-licensed + +# Comparison + +Bstore is designed as a small, pure Go library that still provides most of the +common data consistency requirements for modest database use cases. Bstore aims +to make basic use of cgo-based libraries, such as sqlite, unnecessary. Sqlite +is a great library, but Go applications that require cgo are hard to +cross-compile. With bstore, cross-compiling to most Go-supported platforms +stays trivial. Although bstore is much more limited in so many aspects than +sqlite, bstore also offers some advantages as well. + +- Cross-compilation and reproducibility: Trivial with bstore due to pure Go, + much harder with sqlite because of cgo. +- Code complexity: low with bstore (6k lines including comments/docs), high + with sqlite. +- Query language: mostly-type-checked function calls in bstore, free-form query + strings only checked at runtime with sqlite. +- Functionality: very limited with bstore, much more full-featured with sqlite. +- Schema management: mostly automatic based on Go type definitions in bstore, + manual with ALTER statements in sqlite. +- Types and packing/parsing: automatic/transparent in bstore based on Go types + (including maps, slices, structs and custom MarshalBinary encoding), versus + manual scanning and parameter passing with sqlite with limited set of SQL + types. +- Performance: low to good performance with bstore, high performance with + sqlite. +- Database files: single file with bstore, several files with sqlite (due to + WAL or journal files). +- Test coverage: decent coverage but limited real-world for bstore, versus + extremely thoroughly tested and with enormous real-world use. + +# FAQ + +Q: Is bstore an ORM? + +A: No. The API for bstore may look like an ORM. But instead of mapping bstore +"queries" (function calls) to an SQL query string, bstore executes them +directly without converting to a query language. + +Q: How does bstore store its data? + +A bstore database is a single-file BoltDB database. BoltDB provides ACID +properties. Bstore uses a BoltDB "bucket" (key/value store) for each Go type +stored, with multiple subbuckets: one for type definitions, one for the actual +data, and one bucket per index. BoltDB stores data in a B+tree. See format.md +for details. diff --git a/vendor/github.com/mjl-/bstore/default.go b/vendor/github.com/mjl-/bstore/default.go new file mode 100644 index 0000000..21e86c8 --- /dev/null +++ b/vendor/github.com/mjl-/bstore/default.go @@ -0,0 +1,80 @@ +package bstore + +import ( + "fmt" + "reflect" + "time" +) + +var zerotime = time.Time{} + +// applyDefault replaces zero values for fields that have a Default value configured. +func (tv *typeVersion) applyDefault(rv reflect.Value) error { + for _, f := range tv.Fields[1:] { + fv := rv.FieldByIndex(f.structField.Index) + if err := f.applyDefault(fv); err != nil { + return err + } + } + return nil +} + +func (f field) applyDefault(rv reflect.Value) error { + switch f.Type.Kind { + case kindBytes, kindBinaryMarshal, kindMap: + return nil + + case kindSlice, kindStruct: + return f.Type.applyDefault(rv) + + case kindBool, kindInt, kindInt8, kindInt16, kindInt32, kindInt64, kindUint, kindUint8, kindUint16, kindUint32, kindUint64, kindFloat32, kindFloat64, kindString, kindTime: + if !f.defaultValue.IsValid() || !rv.IsZero() { + return nil + } + fv := f.defaultValue + // Time is special. "now" is encoded as the zero value of time.Time. + if f.Type.Kind == kindTime && fv.Interface() == zerotime { + now := time.Now().Round(0) + if f.Type.Ptr { + fv = reflect.ValueOf(&now) + } else { + fv = reflect.ValueOf(now) + } + } else if f.Type.Ptr { + fv = reflect.New(f.structField.Type.Elem()) + fv.Elem().Set(f.defaultValue) + } + rv.Set(fv) + return nil + + default: + return fmt.Errorf("internal error: missing case for %v", f.Type.Kind) + } +} + +// only for recursing. we do not support recursing into maps because it would +// involve more work making values settable. and how sensible it it anyway? +func (ft fieldType) applyDefault(rv reflect.Value) error { + if ft.Ptr && (rv.IsZero() || rv.IsNil()) { + return nil + } else if ft.Ptr { + rv = rv.Elem() + } + switch ft.Kind { + case kindSlice: + n := rv.Len() + for i := 0; i < n; i++ { + if err := ft.List.applyDefault(rv.Index(i)); err != nil { + return err + } + } + case kindStruct: + for _, nf := range ft.Fields { + nfv := rv.FieldByIndex(nf.structField.Index) + if err := nf.applyDefault(nfv); err != nil { + return err + } + } + } + return nil +} diff --git a/vendor/github.com/mjl-/bstore/doc.go b/vendor/github.com/mjl-/bstore/doc.go new file mode 100644 index 0000000..433c779 --- /dev/null +++ b/vendor/github.com/mjl-/bstore/doc.go @@ -0,0 +1,142 @@ +/* +Package bstore is a database library for storing and quering Go struct data. + +Bstore is designed as a small, pure Go library that still provides most of +the common data consistency requirements for modest database use cases. Bstore +aims to make basic use of cgo-based libraries, such as sqlite, unnecessary. + +Bstore implements autoincrementing primary keys, indices, default values, +enforcement of nonzero, unique and referential integrity constraints, automatic +schema updates and a query API for combining filters/sorting/limits. Queries +are planned and executed using indices for fast execution where possible. +Bstores is designed with the Go type system in mind: you typically don't have to +write any (un)marshal code for your types. + +# Field types + +Struct field types currently supported for storing, including pointers to these +types, but not pointers to pointers: + + - int (as int32), int8, int16, int32, int64 + - uint (as uint32), uint8, uint16, uint32, uint64 + - bool, float32, float64, string, []byte + - Maps, with keys and values of any supported type, except keys with pointer types. + - Slices, with elements of any supported type. + - time.Time + - Types that implement binary.MarshalBinary and binary.UnmarshalBinary, useful + for struct types with state in private fields. Do not change the + (Un)marshalBinary method in an incompatible way without a data migration. + - Structs, with fields of any supported type. + +Note: int and uint are stored as int32 and uint32, for compatibility of database +files between 32bit and 64bit systems. Where possible, use explicit (u)int32 or +(u)int64 types. + +Embedded structs are handled by storing the individual fields of the embedded +struct. The named embedded type is not part of the type schema, and can +currently only be used with UpdateField and UpdateFields, not for filtering. + +Bstore embraces the use of Go zero values. Use zero values, possibly pointers, +where you would use NULL values in SQL. + +Types that have not yet been implemented: interface values, (fixed length) arrays, +complex numbers. + +# Struct tags + +The typical Go struct can be stored in the database. The first field of a +struct type is its primary key, and must always be unique. Additional behaviour +can be configured through struct tag "bstore". The values are comma-separated. +Typically one word, but some have multiple space-separated words: + + - "-" ignores the field entirely. + - "name ", use "fieldname" instead of the Go type field name. + - "nonzero", enforces that field values are not the zero value. + - "noauto", only valid for integer types, and only for the primary key. By + default, an integer-typed primary key will automatically get a next value + assigned on insert when it is 0. With noauto inserting a 0 value results in an + error. For primary keys of other types inserting the zero value always results + in an error. + - "index" or "index []", adds an index. In the first + form, the index is on the field on which the tag is specified, and the index + name is the same as the field name. In the second form multiple fields can be + specified, and an optional name. The first field must be the field on which + the tag is specified. The field names are +-separated. The default name for + the second form is the same +-separated string but can be set explicitly to + the second parameter. An index can only be set for basic integer types, bools, + time and strings. Indices are automatically (re)created when registering a + type. + - "unique" or "unique []", adds an index as with + "index" and also enforces a unique constraint. For time.Time the timezone is + ignored for the uniqueness check. + - "ref ", enforces that the value exists as primary key for "type". + Field types must match exactly, e.g. you cannot reference an int with an int64. + An index is automatically created and maintained for fields with a foreign key, + for efficiently checking that removed records in the referenced type are not in + use. If the field has the zero value, the reference is not checked. If you + require a valid reference, add "nonzero". + - "default ", replaces a zero value with the specified value on record + insert. Special value "now" is recognized for time.Time as the current time. + Times are parsed as time.RFC3339 otherwise. Supported types: bool + ("true"/"false"), integers, floats, strings. Value is not quoted and no escaping + of special characters, like the comma that separates struct tag words, is + possible. Defaults are also replaced on fields in nested structs and + slices, but not in maps. + - "typename ", override name of the type. The name of the Go type is + used by default. Can only be present on the first field (primary key). + Useful for doing schema updates. + +# Schema updates + +Before using a Go type, you must register it for use with the open database by +passing a (zero) value of that type to the Open or Register functions. For each +type, a type definition is stored in the database. If a type has an updated +definition since the previous database open, a new type definition is added to +the database automatically and any required modifications are made: Indexes +(re)created, fields added/removed, new nonzero/unique/reference constraints +validated. + +If data/types cannot be updated automatically (e.g. converting an int field into +a string field), custom data migration code is needed. You may have to keep +track of a data/schema version. + +As a special case, you can switch field types between pointer and non-pointer +types. With one exception: changing from pointer to non-pointer where the type +has a field that must be nonzer is not allowed. The on-disk encoding will not be +changed, and nil pointers will turn into zero values, and zero values into nil +pointers. Also see section Limitations about pointer types. + +Because named embed structs are not part of the type definition, you can +wrap/unwrap fields into a embed/anonymous struct field. No new type definition +is created. + +# BoltDB + +BoltDB is used as underlying storage. Bolt provides ACID transactions, storing +its data in a B+tree. Only a single write transaction can be active at a time, +but otherwise multiple read-only transactions can be active. Do not start a +blocking read-only transaction while holding a writable transaction or vice +versa, this will cause deadlock. + +Bolt uses Go types that are memory mapped to the database file. This means bolt +database files cannot be transferred between machines with different endianness. +Bolt uses explicit widths for its types, so files can be transferred between +32bit and 64bit machines of same endianness. + +# Limitations + +Bstore does not implement the equivalent of SQL joins, aggregates, and many +other concepts. + +Filtering/comparing/sorting on pointer fields is not currently allowed. Pointer +fields cannot have a (unique) index due to the current index format. Using zero +values is recommended instead for now. + +Integer field types can be expanded to wider types, but not to a different +signedness or a smaller integer (fewer bits). The primary key of a type cannot +currently be changed. + +The first field of a struct is always the primary key. Types requires an +explicit primary key. Autoincrement is only available for the primary key. +*/ +package bstore diff --git a/vendor/github.com/mjl-/bstore/equal.go b/vendor/github.com/mjl-/bstore/equal.go new file mode 100644 index 0000000..2d6decc --- /dev/null +++ b/vendor/github.com/mjl-/bstore/equal.go @@ -0,0 +1,91 @@ +package bstore + +import ( + "bytes" + "encoding" + "reflect" + "time" +) + +// equal checks if ov and v are the same as far as storage is concerned. i.e. +// this only takes stored fields into account. reflect.DeepEqual cannot be used, +// it would take all fields into account, including unexported. +func (tv *typeVersion) equal(ov, v reflect.Value) (r bool) { + if !ov.IsValid() || !v.IsValid() { + return false + } + for _, f := range tv.Fields { + fov := ov.FieldByIndex(f.structField.Index) + fv := v.FieldByIndex(f.structField.Index) + if !f.Type.equal(fov, fv) { + return false + } + } + return true +} + +func (ft fieldType) equal(ov, v reflect.Value) (r bool) { + if ov == v { + return true + } else if !ov.IsValid() || !v.IsValid() { + return false + } + if ft.Ptr { + ov = ov.Elem() + v = v.Elem() + } + if ov == v { + return true + } else if !ov.IsValid() || !v.IsValid() { + return false + } + switch ft.Kind { + case kindBytes: + return bytes.Equal(ov.Bytes(), v.Bytes()) + case kindMap: + on := ov.Len() + n := v.Len() + if on != n { + return false + } + r := ov.MapRange() + for r.Next() { + vv := v.MapIndex(r.Key()) + if !vv.IsValid() || !ft.MapValue.equal(r.Value(), vv) { + return false + } + } + return true + case kindSlice: + on := ov.Len() + n := v.Len() + if on != n { + return false + } + for i := 0; i < n; i++ { + if !ft.List.equal(ov.Index(i), v.Index(i)) { + return false + } + } + return true + case kindTime: + return ov.Interface().(time.Time).Equal(v.Interface().(time.Time)) + case kindBinaryMarshal: + obuf, oerr := ov.Interface().(encoding.BinaryMarshaler).MarshalBinary() + buf, err := v.Interface().(encoding.BinaryMarshaler).MarshalBinary() + if oerr != nil || err != nil { + return false // todo: should propagate error? + } + return bytes.Equal(obuf, buf) + case kindStruct: + for _, f := range ft.Fields { + fov := ov.FieldByIndex(f.structField.Index) + fv := v.FieldByIndex(f.structField.Index) + if !f.Type.equal(fov, fv) { + return false + } + } + return true + } + return ov.Interface() == v.Interface() +} diff --git a/vendor/github.com/mjl-/bstore/exec.go b/vendor/github.com/mjl-/bstore/exec.go new file mode 100644 index 0000000..166e3cb --- /dev/null +++ b/vendor/github.com/mjl-/bstore/exec.go @@ -0,0 +1,568 @@ +package bstore + +import ( + "bytes" + "fmt" + "reflect" + "sort" + "time" + + bolt "go.etcd.io/bbolt" +) + +// exec represents the execution of a query plan. +type exec[T any] struct { + q *Query[T] + plan *plan[T] + + // For queries with explicit PKs filtered on. + // See plan.keys. We remove items from the list when we looked one up, but we keep the slice non-nil. + keys [][]byte + + // If -1, no limit is set. This is different from Query where 0 means + // no limit. We count back and 0 means the end. + limit int + + data []pair[T] // If not nil (even if empty), serve nextKey requests from here. + ib *bolt.Bucket + rb *bolt.Bucket + forward func() (bk, bv []byte) // Once we start scanning, we prepare forward to next/prev to the following value. +} + +// exec creates a new execution for the plan, registering statistics. +func (p *plan[T]) exec(q *Query[T]) *exec[T] { + q.stats.Queries++ + if p.idx == nil { + if p.keys != nil { + q.stats.PlanPK++ + } else if p.start != nil || p.stop != nil { + q.stats.PlanPKScan++ + } else { + q.stats.PlanTableScan++ + } + q.stats.LastIndex = "" + } else { + if p.keys != nil { + q.stats.PlanUnique++ + } else { + q.stats.PlanIndexScan++ + } + q.stats.LastIndex = p.idx.Name + } + if len(p.orders) > 0 { + q.stats.Sort++ + } + q.stats.LastOrdered = p.start != nil || p.stop != nil + q.stats.LastAsc = !p.desc + + limit := -1 + if q.xlimit > 0 { + limit = q.xlimit + } + return &exec[T]{q: q, plan: p, keys: p.keys, limit: limit} +} + +// incr treats buf as a bigendian number, increasing it by one. used for reverse +// scans, where we must start beyond the key prefix we are looking for. +func incr(buf []byte) bool { + for i := len(buf) - 1; i >= 0; i-- { + if buf[i] < 255 { + buf[i]++ + return true + } + buf[i] = 0 + } + return false +} + +func cutoff(b []byte, n int) []byte { + if len(b) <= n { + return b + } + return b[:n] +} + +// nextKey returns the key and optionally value for the next selected record. +// +// ErrAbsent is returned if there is no more record. +// +// If an error occurs, an error is set on query, except in the case of +// ErrAbsent. ErrAbsent does not finish the query because a Delete or Update +// could follow. +func (e *exec[T]) nextKey(write, value bool) ([]byte, T, error) { + var zero T + + q := e.q + + if q.err != nil { + return nil, zero, q.err + } + + // We collected & sorted data previously. Return from it until done. + // Limit was already applied. + if e.data != nil { + if len(e.data) == 0 { + return nil, zero, ErrAbsent + } + p := e.data[0] + e.data = e.data[1:] + var v T + if value { + var err error + v, err = p.Value(e) + if err != nil { + q.error(err) + return nil, zero, err + } + } + return p.bk, v, nil + } + + if e.limit == 0 { + return nil, zero, ErrAbsent + } + + // First time we are going to need buckets. + if e.rb == nil { + tx, err := q.tx(write) + if err != nil { + q.error(err) + return nil, zero, err + } + e.rb, err = tx.recordsBucket(q.st.Name, q.st.Current.fillPercent) + if err != nil { + return nil, zero, err + } + if e.plan.idx != nil { + e.ib, err = tx.indexBucket(e.plan.idx) + if err != nil { + return nil, zero, err + } + } + } + + // List of IDs (records) or full unique index equality match. + // We can get the records/index value by a simple "get" on the key. + if e.keys != nil { + collect := len(e.plan.orders) > 0 + if collect { + e.data = []pair[T]{} // Must be non-nil to get into e.data branch! + } + for i, xk := range e.keys { + var bk, bv []byte + + // For indices, we need look up the PK through the index. + if e.plan.idx != nil { + c := e.ib.Cursor() + q.stats.Index.Cursor++ + bki, _ := c.Seek(xk) + if !bytes.HasPrefix(bki, xk) { + continue + } + // log.Printf("seek %x, bki %x", xk, bki) + bk = bki[len(xk):] + } else { + bk = xk + } + + // We don't need to fetch the full record now if it isn't needed by + // caller. It may be fetch below for more filters. + if value || e.plan.idx == nil { + q.stats.Records.Get++ + bv = e.rb.Get(bk) + if bv == nil { + if e.plan.idx != nil { + return nil, zero, fmt.Errorf("%w: record with pk %x referenced through index %q not found", ErrStore, bk, e.plan.idx.Name) + } + continue + } + } + p := pair[T]{bk, bv, nil} + if ok, err := e.checkFilter(&p); err != nil { + return nil, zero, err + } else if !ok { + continue + } + + if collect { + e.data = append(e.data, p) + continue + } + + // Again, only fetch value if needed. + var v T + if value { + var err error + v, err = p.Value(e) + if err != nil { + q.error(err) + return nil, zero, err + } + } + + if e.limit > 0 { + e.limit-- + } + + e.keys = e.keys[i+1:] + return bk, v, nil + } + if !collect { + return nil, zero, ErrAbsent + } + // Restart, now with data. + e.keys = [][]byte{} + e.sort() + if e.limit > 0 && len(e.data) > e.limit { + e.data = e.data[:e.limit] + } + return q.nextKey(write, value) + } + + // We are going to do a scan, either over the records or an index. We may have a start and stop key. + collect := len(e.plan.orders) > 0 + if collect { + e.data = []pair[T]{} // Must be non-nil to get into e.data branch on function restart. + } + for { + var xk, xv []byte + if e.forward == nil { + // First time we are in this loop, we set up a cursor and e.forward. + + var c *bolt.Cursor + var statsKV *StatsKV + if e.plan.idx == nil { + c = e.rb.Cursor() + statsKV = &q.stats.Records + } else { + c = e.ib.Cursor() + statsKV = &q.stats.Index + } + if !e.plan.desc { + e.forward = c.Next + if e.plan.start != nil { + statsKV.Cursor++ + // If e.plan.start does not exist, seek will skip to the + // next value after. Fine because this is ascending order. + xk, xv = c.Seek(e.plan.start) + } else { + statsKV.Cursor++ + xk, xv = c.First() + } + } else { + e.forward = c.Prev + if e.plan.start == nil { + statsKV.Cursor++ + xk, xv = c.Last() + } else { + start := make([]byte, len(e.plan.start)) + copy(start, e.plan.start) + ok := incr(start) + if !ok { + statsKV.Cursor++ + // We were at the last representable value. So we simply start at the end. + xk, xv = c.Last() + } else { + statsKV.Cursor++ + xk, xv = c.Seek(start) + if xk == nil { + statsKV.Cursor++ + xk, xv = c.Last() + } + // We started at the value after where we were requested to start, so we have to + // move until we find a matching key. + // todo: we could take e.plan.stop into account (if set). right now we may be + // seeking all the way to the front without ever seeing a match to stop. + for xk != nil && bytes.Compare(cutoff(xk, len(e.plan.start)), e.plan.start) > 0 { + statsKV.Cursor++ + xk, xv = e.forward() + } + } + } + } + } else { + if e.plan.idx == nil { + q.stats.Records.Cursor++ + } else { + q.stats.Index.Cursor++ + } + xk, xv = e.forward() + // log.Printf("forwarded, %x %x", xk, xv) + } + + if xk == nil { + break + } + + if e.plan.start != nil && !e.plan.startInclusive && bytes.HasPrefix(xk, e.plan.start) { + continue + } + if e.plan.stop != nil { + cmp := bytes.Compare(cutoff(xk, len(e.plan.stop)), e.plan.stop) + if !e.plan.desc && (e.plan.stopInclusive && cmp > 0 || !e.plan.stopInclusive && cmp >= 0) { + break + } else if e.plan.desc && (e.plan.stopInclusive && cmp < 0 || !e.plan.stopInclusive && cmp <= 0) { + break + } + } + + var pk, bv []byte + if e.plan.idx == nil { + pk = xk + bv = xv + } else { + var err error + pk, _, err = e.plan.idx.parseKey(xk, false) + if err != nil { + q.error(err) + return nil, zero, err + } + } + + p := pair[T]{pk, bv, nil} + if ok, err := e.checkFilter(&p); err != nil { + return nil, zero, err + } else if !ok { + continue + } + //log.Printf("have kv, %x %x", p.bk, p.bv) + var v T + var err error + if value { + v, err = p.Value(e) + if err != nil { + q.error(err) + return nil, zero, err + } + } + if collect { + e.data = append(e.data, p) + continue + } + if e.limit > 0 { + e.limit-- + } + return p.bk, v, nil + } + if !collect { + return nil, zero, ErrAbsent + } + // Restart, now with data. + e.sort() + if e.limit > 0 && len(e.data) > e.limit { + e.data = e.data[:e.limit] + } + return e.nextKey(write, value) +} + +// checkFilter checks against the filters for the plan. +func (e *exec[T]) checkFilter(p *pair[T]) (rok bool, rerr error) { + q := e.q + + for _, ff := range e.plan.filters { + switch f := ff.(type) { + // note: filterIDs is not here, it is handled earlier to fetch records. + case filterFn[T]: + v, err := p.Value(e) + if err != nil { + q.error(err) + return false, err + } + if !f.fn(v) { + return + } + case filterEqual[T]: + v, err := p.Value(e) + if err != nil { + q.error(err) + return false, err + } + rv := reflect.ValueOf(v) + frv := rv.FieldByIndex(f.field.structField.Index) + if !f.field.Type.equal(frv, f.rvalue) { + return + } + case filterNotEqual[T]: + v, err := p.Value(e) + if err != nil { + q.error(err) + return false, err + } + rv := reflect.ValueOf(v) + frv := rv.FieldByIndex(f.field.structField.Index) + if f.field.Type.equal(frv, f.rvalue) { + return + } + case filterIn[T]: + v, err := p.Value(e) + if err != nil { + q.error(err) + return false, err + } + rv := reflect.ValueOf(v) + frv := rv.FieldByIndex(f.field.structField.Index) + var have bool + for _, xrv := range f.rvalues { + if f.field.Type.equal(frv, xrv) { + have = true + break + } + } + if !have { + return + } + case filterNotIn[T]: + v, err := p.Value(e) + if err != nil { + q.error(err) + return false, err + } + rv := reflect.ValueOf(v) + frv := rv.FieldByIndex(f.field.structField.Index) + for _, xrv := range f.rvalues { + if f.field.Type.equal(frv, xrv) { + return + } + } + case filterCompare[T]: + v, err := p.Value(e) + if err != nil { + q.error(err) + return false, err + } + rv := reflect.ValueOf(v) + fv := rv.FieldByIndex(f.field.structField.Index) + cmp := compare(f.field.Type.Kind, fv, f.value) + switch { + case cmp == 0 && (f.op == opGreaterEqual || f.op == opLessEqual): + case cmp < 0 && (f.op == opLess || f.op == opLessEqual): + case cmp > 0 && (f.op == opGreater || f.op == opGreaterEqual): + default: + return + } + default: + q.errorf("internal error: missing case for filter %T", ff) + return false, q.err + } + } + return true, nil +} + +// if type can be compared for filterCompare, eg for greater/less comparison. +func comparable(ft fieldType) bool { + if ft.Ptr { + return false + } + switch ft.Kind { + case kindBytes, kindString, kindBool, kindInt8, kindInt16, kindInt32, kindInt64, kindInt, kindUint8, kindUint16, kindUint32, kindUint64, kindUint, kindFloat32, kindFloat64, kindTime: + return true + default: + return false + } +} + +func compare(k kind, a, b reflect.Value) int { + switch k { + case kindBytes: + return bytes.Compare(a.Bytes(), b.Bytes()) + + case kindString: + sa := a.String() + sb := b.String() + if sa < sb { + return -1 + } else if sa > sb { + return 1 + } + return 0 + + case kindBool: + ba := a.Bool() + bb := b.Bool() + if !ba && bb { + return -1 + } else if ba && !bb { + return 1 + } + return 0 + + case kindInt8, kindInt16, kindInt32, kindInt64, kindInt: + ia := a.Int() + ib := b.Int() + if ia < ib { + return -1 + } else if ia > ib { + return 1 + } + return 0 + + case kindUint8, kindUint16, kindUint32, kindUint64, kindUint: + ia := a.Uint() + ib := b.Uint() + if ia < ib { + return -1 + } else if ia > ib { + return 1 + } + return 0 + + case kindFloat32, kindFloat64: + fa := a.Float() + fb := b.Float() + if fa < fb { + return -1 + } else if fa > fb { + return 1 + } + return 0 + + case kindTime: + ta := a.Interface().(time.Time) + tb := b.Interface().(time.Time) + if ta.Before(tb) { + return -1 + } else if ta.After(tb) { + return 1 + } + return 0 + } + // todo: internal error, cannot happen + return 0 +} + +func (e *exec[T]) sort() { + // todo: We should check whether we actually need to load values. We're just + // always it now for the time being because SortStableFunc isn't going to + // give us a *pair (even though it could because of the slice) so we + // couldn't set/cache the value T during sorting. + q := e.q + + for i := range e.data { + p := &e.data[i] + if p.value != nil { + continue + } + _, err := p.Value(e) + if err != nil { + q.error(err) + return + } + } + + sort.SliceStable(e.data, func(i, j int) bool { + a := e.data[i] + b := e.data[j] + for _, o := range e.plan.orders { + ra := reflect.ValueOf(*a.value) + rb := reflect.ValueOf(*b.value) + rva := ra.FieldByIndex(o.field.structField.Index) + rvb := rb.FieldByIndex(o.field.structField.Index) + cmp := compare(o.field.Type.Kind, rva, rvb) + if cmp == 0 { + continue + } + return cmp < 0 && o.asc || cmp > 0 && !o.asc + } + return false + }) +} diff --git a/vendor/github.com/mjl-/bstore/export.go b/vendor/github.com/mjl-/bstore/export.go new file mode 100644 index 0000000..cc0f998 --- /dev/null +++ b/vendor/github.com/mjl-/bstore/export.go @@ -0,0 +1,387 @@ +package bstore + +import ( + "fmt" + "math" + "reflect" + "strconv" + "time" + + bolt "go.etcd.io/bbolt" +) + +// Types returns the types present in the database, regardless of whether they +// are currently registered using Open or Register. Useful for exporting data +// with Keys and Records. +func (db *DB) Types() ([]string, error) { + var types []string + err := db.Read(func(tx *Tx) error { + return tx.btx.ForEach(func(bname []byte, b *bolt.Bucket) error { + // note: we do not track stats for types operations. + + types = append(types, string(bname)) + return nil + }) + }) + if err != nil { + return nil, err + } + return types, nil +} + +// prepareType prepares typeName for export/introspection with DB.Keys, +// DB.Record, DB.Records. It is different in that it does not require a +// reflect.Type to parse into. It parses to a map, e.g. for export to JSON. The +// returned typeVersion has no structFields set in its fields. +func (db *DB) prepareType(tx *Tx, typeName string) (map[uint32]*typeVersion, *typeVersion, *bolt.Bucket, []string, error) { + rb, err := tx.recordsBucket(typeName, 0.5) + if err != nil { + return nil, nil, nil, nil, err + } + tb, err := tx.bucket(bucketKey{typeName, "types"}) + if err != nil { + return nil, nil, nil, nil, err + } + versions := map[uint32]*typeVersion{} + var tv *typeVersion + err = tb.ForEach(func(bk, bv []byte) error { + // note: we do not track stats for types operations. + + ntv, err := parseSchema(bk, bv) + if err != nil { + return err + } + versions[ntv.Version] = ntv + if tv == nil || ntv.Version > tv.Version { + tv = ntv + } + + return nil + }) + if err != nil { + return nil, nil, nil, nil, err + } + if tv == nil { + return nil, nil, nil, nil, fmt.Errorf("%w: no type versions", ErrStore) + } + fields := make([]string, len(tv.Fields)) + for i, f := range tv.Fields { + fields[i] = f.Name + } + return versions, tv, rb, fields, nil +} + +// Keys returns the parsed primary keys for the type "typeName". The type does +// not have to be registered with Open or Register. For use with Record(s) to +// export data. +func (db *DB) Keys(typeName string, fn func(pk any) error) error { + return db.Read(func(tx *Tx) error { + _, tv, rb, _, err := db.prepareType(tx, typeName) + if err != nil { + return err + } + + // todo: do not pass nil parser? + v := reflect.New(reflect.TypeOf(tv.Fields[0].Type.zero(nil))).Elem() + return rb.ForEach(func(bk, bv []byte) error { + tx.stats.Records.Cursor++ + + if err := parsePK(v, bk); err != nil { + return err + } + return fn(v.Interface()) + }) + }) +} + +// Record returns the record with primary "key" for "typeName" parsed as map. +// "Fields" is set to the fields of the type. The type does not have to be +// registered with Open or Register. Record parses the data without the Go +// type present. BinaryMarshal fields are returned as bytes. +func (db *DB) Record(typeName, key string, fields *[]string) (map[string]any, error) { + var r map[string]any + err := db.Read(func(tx *Tx) error { + versions, tv, rb, xfields, err := db.prepareType(tx, typeName) + if err != nil { + return err + } + *fields = xfields + + var kv any + switch tv.Fields[0].Type.Kind { + case kindBool: + switch key { + case "true": + kv = true + case "false": + kv = false + default: + err = fmt.Errorf("%w: invalid bool %q", ErrParam, key) + } + case kindInt8: + kv, err = strconv.ParseInt(key, 10, 8) + case kindInt16: + kv, err = strconv.ParseInt(key, 10, 16) + case kindInt32: + kv, err = strconv.ParseInt(key, 10, 32) + case kindInt: + kv, err = strconv.ParseInt(key, 10, 32) + case kindInt64: + kv, err = strconv.ParseInt(key, 10, 64) + case kindUint8: + kv, err = strconv.ParseUint(key, 10, 8) + case kindUint16: + kv, err = strconv.ParseUint(key, 10, 16) + case kindUint32: + kv, err = strconv.ParseUint(key, 10, 32) + case kindUint: + kv, err = strconv.ParseUint(key, 10, 32) + case kindUint64: + kv, err = strconv.ParseUint(key, 10, 64) + case kindString: + kv = key + case kindBytes: + kv = []byte(key) // todo: or decode from base64? + default: + return fmt.Errorf("internal error: unknown primary key kind %v", tv.Fields[0].Type.Kind) + } + if err != nil { + return err + } + pkv := reflect.ValueOf(kv) + kind, err := typeKind(pkv.Type()) + if err != nil { + return err + } + if kind != tv.Fields[0].Type.Kind { + // Convert from various int types above to required type. The ParseInt/ParseUint + // calls already validated that the values fit. + pkt := reflect.TypeOf(tv.Fields[0].Type.zero(nil)) + pkv = pkv.Convert(pkt) + } + k, err := packPK(pkv) + if err != nil { + return err + } + + tx.stats.Records.Get++ + bv := rb.Get(k) + if bv == nil { + return ErrAbsent + } + record, err := parseMap(versions, k, bv) + if err != nil { + return err + } + r = record + return nil + }) + return r, err +} + +// Records calls "fn" for each record of "typeName". Records sets "fields" to +// the fields of the type. The type does not have to be registered with Open or +// Register. Record parses the data without the Go type present. BinaryMarshal +// fields are returned as bytes. +func (db *DB) Records(typeName string, fields *[]string, fn func(map[string]any) error) error { + return db.Read(func(tx *Tx) error { + versions, _, rb, xfields, err := db.prepareType(tx, typeName) + if err != nil { + return err + } + *fields = xfields + + return rb.ForEach(func(bk, bv []byte) error { + tx.stats.Records.Cursor++ + + record, err := parseMap(versions, bk, bv) + if err != nil { + return err + } + return fn(record) + }) + }) +} + +// parseMap parses a record into a map with the right typeVersion from versions. +func parseMap(versions map[uint32]*typeVersion, bk, bv []byte) (record map[string]any, rerr error) { + p := &parser{buf: bv, orig: bv} + var version uint32 + + defer func() { + x := recover() + if x == nil { + return + } + if err, ok := x.(parseErr); ok { + rerr = fmt.Errorf("%w (version %d, buf %x orig %x)", err.err, version, p.buf, p.orig) + return + } + panic(x) + }() + + version = uint32(p.Uvarint()) + tv := versions[version] + if tv == nil { + return nil, fmt.Errorf("%w: unknown type version %d", ErrStore, version) + } + + r := map[string]any{} + + v := reflect.New(reflect.TypeOf(tv.Fields[0].Type.zero(p))).Elem() + err := parsePK(v, bk) + if err != nil { + return nil, err + } + r[tv.Fields[0].Name] = v.Interface() + + // todo: Should we be looking at the most recent tv, and hiding fields + // that have been removed in a later typeVersion? Like we do for real + // parsing into reflect value? + fm := p.Fieldmap(len(tv.Fields) - 1) + for i, f := range tv.Fields[1:] { + if fm.Nonzero(i) { + r[f.Name] = f.Type.parseValue(p) + } else { + r[f.Name] = f.Type.zero(p) + } + } + + if len(p.buf) != 0 { + return nil, fmt.Errorf("%w: leftover data after parsing", ErrStore) + } + + return r, nil +} + +func (ft fieldType) parseValue(p *parser) any { + switch ft.Kind { + case kindBytes: + return p.TakeBytes(false) + case kindBinaryMarshal: + // We don't have the type available, so we just return the binary data. + return p.TakeBytes(false) + case kindBool: + return true + case kindInt8: + return int8(p.Varint()) + case kindInt16: + return int16(p.Varint()) + case kindInt32: + return int32(p.Varint()) + case kindInt: + i := p.Varint() + if i < math.MinInt32 || i > math.MaxInt32 { + p.Errorf("%w: int %d does not fit in int32", ErrStore, i) + } + return int(i) + case kindInt64: + return p.Varint() + case kindUint8: + return uint8(p.Uvarint()) + case kindUint16: + return uint16(p.Uvarint()) + case kindUint32: + return uint32(p.Uvarint()) + case kindUint: + i := p.Uvarint() + if i > math.MaxUint32 { + p.Errorf("%w: uint %d does not fit in uint32", ErrStore, i) + } + return uint(i) + case kindUint64: + return p.Uvarint() + case kindFloat32: + return math.Float32frombits(uint32(p.Uvarint())) + case kindFloat64: + return math.Float64frombits(p.Uvarint()) + case kindString: + return string(p.TakeBytes(false)) + case kindTime: + var t time.Time + err := t.UnmarshalBinary(p.TakeBytes(false)) + if err != nil { + p.Errorf("%w: parsing time: %v", ErrStore, err) + } + return t + case kindSlice: + un := p.Uvarint() + n := p.checkInt(un) + fm := p.Fieldmap(n) + var l []any + for i := 0; i < n; i++ { + if fm.Nonzero(i) { + l = append(l, ft.List.parseValue(p)) + } else { + // Always add non-zero elements, or we would + // change the number of elements in a list. + l = append(l, ft.List.zero(p)) + } + } + return l + case kindMap: + un := p.Uvarint() + n := p.checkInt(un) + fm := p.Fieldmap(n) + m := map[string]any{} + for i := 0; i < n; i++ { + // Converting to string can be ugly, but the best we can do. + k := fmt.Sprintf("%v", ft.MapKey.parseValue(p)) + if _, ok := m[k]; ok { + return fmt.Errorf("%w: duplicate key %q in map", ErrStore, k) + } + var v any + if fm.Nonzero(i) { + v = ft.MapValue.parseValue(p) + } else { + v = ft.MapValue.zero(p) + } + m[k] = v + } + return m + case kindStruct: + fm := p.Fieldmap(len(ft.Fields)) + m := map[string]any{} + for i, f := range ft.Fields { + if fm.Nonzero(i) { + m[f.Name] = f.Type.parseValue(p) + } else { + m[f.Name] = f.Type.zero(p) + } + } + return m + } + p.Errorf("internal error: unhandled field type %v", ft.Kind) + panic("cannot happen") +} + +var zerovalues = map[kind]any{ + kindBytes: []byte(nil), + kindBinaryMarshal: []byte(nil), // We don't have the actual type available, so we just return binary data. + kindBool: false, + kindInt8: int8(0), + kindInt16: int16(0), + kindInt32: int32(0), + kindInt: int(0), + kindInt64: int64(0), + kindUint8: uint8(0), + kindUint16: uint16(0), + kindUint32: uint32(0), + kindUint: uint(0), + kindUint64: uint64(0), + kindFloat32: float32(0), + kindFloat64: float64(0), + kindString: "", + kindTime: zerotime, + kindSlice: []any(nil), + kindMap: map[string]any(nil), + kindStruct: map[string]any(nil), +} + +func (ft fieldType) zero(p *parser) any { + v, ok := zerovalues[ft.Kind] + if !ok { + p.Errorf("internal error: unhandled zero value for field type %v", ft.Kind) + } + return v +} diff --git a/vendor/github.com/mjl-/bstore/format.md b/vendor/github.com/mjl-/bstore/format.md new file mode 100644 index 0000000..c17ba78 --- /dev/null +++ b/vendor/github.com/mjl-/bstore/format.md @@ -0,0 +1,78 @@ +# Types + +Each Go type is stored in its own bucket, after its name. Only subbuckets are +created directly below a type bucket, no key/values. Two subbuckets are always +created: "records" for the data, "types" for the type definitions. Each index +is stored in a subbucket named "index." followed by the name. Unique and +non-unique indices use the same encoding. + +# Type versions + +Type definitions are stored in the "types" subbucket. The key is a 4 byte +uint32, a version as referenced from a data record. The value is a JSON-encoded +representation of the typeVersion struct. + +When a new Go type or changed Go type is registered with a database, a new type +version is added to the "types" subbucket. Data is always inserted/updated with +the most recent type version. But the database may still hold data records +referencing older type versions. Bstore decodes a packed data record with the +referenced type version. For storage efficiency: the type version is reused for +many stored records, a self-describing format (like JSON) would duplicate the +field names in each stored record. + +# Record storage + +Primary keys of types are used as BoltDB keys and can be of bool, integer +types, strings or byte slices. Floats, time, struct, slice, map, binarymarshal +cannot be stored as primary key. Bools are stored as a single byte 0 or 1. +Integers are stored in their fixed width encoding (eg 4 bytes for 32 bit int). +Signed integers are stored so the fixed-width byte value is ordered for all +signed values, i.e. math.MinInt32 is stored as 4 bytes bigendian with value 0. +For strings and byte slices, only their bytes are stored. + +The value stored with a BoltDB key starts with a uvarint "version" of the type. +This refers to a version in the "types" bucket. The primary key is not encoded +again in the data record itself. The remaining fields are space-efficiently +encoded. + +After the uvarint version follow as many bytes to fit a bitmap for the direct +struct fields in the type description. Each bit indicates if the value is +nonzero and present in the value that follows. Only non-zero values take up +more space than the single bit and are stored consecutively after the fieldmap: + + - Pointers are stored as their non-pointer value. If the pointer is nil, it + is zero in the fieldmap. + - If the underlying type is an signed int or float, or unsigned int, then + varint/uvarint encoding from encoding/binary is used. + - If the underlying type is a string or []byte, uvarint count followed by the + bytes. + - If the underlying type is a bool, the value is always true and no + additional data is present to represent the value. False is represented by + the zero value marked in the fieldmap. + - Slices use a uvarint for the number of elements, followed by a bitmap for + nonzero values, followed by the encoded nonzero elements. + - Maps use a uvariant for the number of key/value pairs, followed by a + fieldmap for the values (the keys are always present), followed by each + pair: key (always present), value (only if nonzero); key, value; etc. + - If a type is an encoding.BinaryUnmarshaler and encoding.BinaryMarshaler, + then its bytes are stored prefixed with its uvarint length. + - If the type is a struct, its fields are encoded with a field map followed + by the its nonzero field values. + - Other types cannot be represented currently. + +In a new type version, the type of a field can be changed as long as existing +records can be decoded into the new Go type. E.g. you can change an int32 into +a int64. You can only change an int64 into a int32 if all values you attempt to +read are small enough to fit in an int32. You cannot change between signed and +unsigned integer, or between string and []byte. + +# Index storage + +Indexes are stored in subbuckets, named starting with "index." followed by the +index name. Keys are a self-delimiting encodings of the fields that make up the +key, followed by the primary key for the "records" bucket. Values are always +empty in index buckets. For bool and integer types, the same fixed with +encoding as for primary keys in the "records" subbucket is used. Strings are +encoded by their bytes (no \0 allowed) followed by a delimiting \0. Unlike +primary keys, an index can cover a field with type time.Time. Times are encoded +with 8 byte seconds followed by the remaining 4 bytes nanoseconds. diff --git a/vendor/github.com/mjl-/bstore/gendoc.sh b/vendor/github.com/mjl-/bstore/gendoc.sh new file mode 100644 index 0000000..bfa0584 --- /dev/null +++ b/vendor/github.com/mjl-/bstore/gendoc.sh @@ -0,0 +1,13 @@ +#!/bin/sh +( +cat <&1 | sed 's/^/ /' | grep -v 'exit status' +echo '*/' +echo 'package main' +) >cmd/bstore/doc.go diff --git a/vendor/github.com/mjl-/bstore/keys.go b/vendor/github.com/mjl-/bstore/keys.go new file mode 100644 index 0000000..6e47a7d --- /dev/null +++ b/vendor/github.com/mjl-/bstore/keys.go @@ -0,0 +1,282 @@ +package bstore + +import ( + "encoding/binary" + "fmt" + "math" + "reflect" + "time" +) + +/* +The records buckets map a primary key to the record data. The primary key is of +a form that we can scan/range over. So fixed with for integers. For strings and +bytes they are just their byte representation. We do not store the PK in the +record data. This means we cannot store a time.Time as primary key, because we +cannot have the timezone encoded for comparison reasons. + +Index keys are similar to PK's. Unique and non-unique indices are encoded the +same. The stored values are always empty, the key consists of the field values +the index was created for, followed by the PK. The encoding of a field is nearly +the same as the encoding of that type as a primary key. The differences: strings +end with a \0 to make them self-delimiting; byte slices are not allowed because +they are not self-delimiting; time.Time is allowed because the time is available +in full (with timezone) in the record data. +*/ + +// packPK returns the PK bytes representation for the PK value rv. +func packPK(rv reflect.Value) ([]byte, error) { + kv := rv.Interface() + var buf []byte + switch k := kv.(type) { + case string: + buf = []byte(k) + case []byte: + buf = k + case bool: + var b byte + if k { + b = 1 + } + buf = []byte{b} + case int8: + buf = []byte{byte(uint8(k + math.MinInt8))} + case int16: + buf = binary.BigEndian.AppendUint16(nil, uint16(k+math.MinInt16)) + case int32: + buf = binary.BigEndian.AppendUint32(nil, uint32(k+math.MinInt32)) + case int: + if k < math.MinInt32 || k > math.MaxInt32 { + return nil, fmt.Errorf("%w: int %d does not fit in int32", ErrParam, k) + } + buf = binary.BigEndian.AppendUint32(nil, uint32(k+math.MinInt32)) + case int64: + buf = binary.BigEndian.AppendUint64(nil, uint64(k+math.MinInt64)) + case uint8: + buf = []byte{k} + case uint16: + buf = binary.BigEndian.AppendUint16(nil, k) + case uint32: + buf = binary.BigEndian.AppendUint32(nil, k) + case uint: + if k > math.MaxUint32 { + return nil, fmt.Errorf("%w: uint %d does not fit in uint32", ErrParam, k) + } + buf = binary.BigEndian.AppendUint32(nil, uint32(k)) + case uint64: + buf = binary.BigEndian.AppendUint64(nil, k) + default: + return nil, fmt.Errorf("%w: unsupported primary key type %T", ErrType, kv) + } + return buf, nil +} + +// parsePK parses primary key bk into rv. +func parsePK(rv reflect.Value, bk []byte) error { + k, err := typeKind(rv.Type()) + if err != nil { + return err + } + switch k { + case kindBytes: + buf := make([]byte, len(bk)) + copy(buf, bk) + rv.SetBytes(buf) + return nil + case kindString: + rv.SetString(string(bk)) + return nil + } + + var need int + switch k { + case kindBool, kindInt8, kindUint8: + need = 1 + case kindInt16, kindUint16: + need = 2 + case kindInt32, kindUint32, kindInt, kindUint: + need = 4 + case kindInt64, kindUint64: + need = 8 + } + if len(bk) != need { + return fmt.Errorf("%w: got %d bytes for PK, need %d", ErrStore, len(bk), need) + } + + switch k { + case kindBool: + rv.SetBool(bk[0] != 0) + case kindInt8: + rv.SetInt(int64(int8(bk[0]) - math.MinInt8)) + case kindInt16: + rv.SetInt(int64(int16(binary.BigEndian.Uint16(bk)) - math.MinInt16)) + case kindInt32, kindInt: + rv.SetInt(int64(int32(binary.BigEndian.Uint32(bk)) - math.MinInt32)) + case kindInt64: + rv.SetInt(int64(int64(binary.BigEndian.Uint64(bk)) - math.MinInt64)) + case kindUint8: + rv.SetUint(uint64(bk[0])) + case kindUint16: + rv.SetUint(uint64(binary.BigEndian.Uint16(bk))) + case kindUint32, kindUint: + rv.SetUint(uint64(binary.BigEndian.Uint32(bk))) + case kindUint64: + rv.SetUint(uint64(binary.BigEndian.Uint64(bk))) + default: + // note: we cannot have kindTime as primary key at the moment. + return fmt.Errorf("%w: unsupported primary key type %v", ErrType, rv.Type()) + } + return nil +} + +// parseKey parses the PK (last element) of an index key. +// If all is set, also gathers the values before and returns them in the second +// parameter. +func (idx *index) parseKey(buf []byte, all bool) ([]byte, [][]byte, error) { + var err error + var keys [][]byte + take := func(n int) { + if len(buf) < n { + err = fmt.Errorf("%w: not enough bytes in index key", ErrStore) + return + } + if all { + keys = append(keys, buf[:n]) + } + buf = buf[n:] + } +fields: + for _, f := range idx.Fields { + if err != nil { + break + } + switch f.Type.Kind { + case kindString: + for i, b := range buf { + if b == 0 { + if all { + keys = append(keys, buf[:i]) + } + buf = buf[i+1:] + continue fields + } + } + err = fmt.Errorf("%w: bad string without 0 in index key", ErrStore) + case kindBool: + take(1) + case kindInt8, kindUint8: + take(1) + case kindInt16, kindUint16: + take(2) + case kindInt32, kindUint32, kindInt, kindUint: + take(4) + case kindInt64, kindUint64: + take(8) + case kindTime: + take(8 + 4) + } + } + if err != nil { + return nil, nil, err + } + + pk := buf + + switch idx.tv.Fields[0].Type.Kind { + case kindBool: + take(1) + case kindInt8, kindUint8: + take(1) + case kindInt16, kindUint16: + take(2) + case kindInt32, kindInt, kindUint32, kindUint: + take(4) + case kindInt64, kindUint64: + take(8) + } + if len(pk) != len(buf) && len(buf) != 0 { + return nil, nil, fmt.Errorf("%w: leftover bytes in index key (%x)", ErrStore, buf) + } + if all { + return pk, keys[:len(keys)-1], nil + } + return pk, nil, nil +} + +// packKey returns a key to store in an index: first the prefix without pk, then +// the prefix including pk. +func (idx *index) packKey(rv reflect.Value, pk []byte) ([]byte, []byte, error) { + var l []reflect.Value + for _, f := range idx.Fields { + frv := rv.FieldByIndex(f.structField.Index) + l = append(l, frv) + } + return packIndexKeys(l, pk) +} + +// packIndexKeys packs values from l, followed by the pk. +// It returns the key prefix (without pk), and full key with pk. +func packIndexKeys(l []reflect.Value, pk []byte) ([]byte, []byte, error) { + var prek, ik []byte + for _, frv := range l { + k, err := typeKind(frv.Type()) + if err != nil { + return nil, nil, err + } + var buf []byte + switch k { + case kindBool: + buf = []byte{0} + if frv.Bool() { + buf[0] = 1 + } + case kindInt8: + buf = []byte{byte(int8(frv.Int()) + math.MinInt8)} + case kindInt16: + buf = binary.BigEndian.AppendUint16(nil, uint16(int16(frv.Int())+math.MinInt16)) + case kindInt32: + buf = binary.BigEndian.AppendUint32(nil, uint32(int32(frv.Int())+math.MinInt32)) + case kindInt: + i := frv.Int() + if i < math.MinInt32 || i > math.MaxInt32 { + return nil, nil, fmt.Errorf("%w: int value %d does not fit in int32", ErrParam, i) + } + buf = binary.BigEndian.AppendUint32(nil, uint32(int32(i)+math.MinInt32)) + case kindInt64: + buf = binary.BigEndian.AppendUint64(nil, uint64(frv.Int()+math.MinInt64)) + case kindUint8: + buf = []byte{byte(frv.Uint())} + case kindUint16: + buf = binary.BigEndian.AppendUint16(nil, uint16(frv.Uint())) + case kindUint32: + buf = binary.BigEndian.AppendUint32(nil, uint32(frv.Uint())) + case kindUint: + i := frv.Uint() + if i > math.MaxUint32 { + return nil, nil, fmt.Errorf("%w: uint value %d does not fit in uint32", ErrParam, i) + } + buf = binary.BigEndian.AppendUint32(nil, uint32(i)) + case kindUint64: + buf = binary.BigEndian.AppendUint64(nil, uint64(frv.Uint())) + case kindString: + buf = []byte(frv.String()) + for _, c := range buf { + if c == 0 { + return nil, nil, fmt.Errorf("%w: string used as index key cannot have \\0", ErrParam) + } + } + buf = append(buf, 0) + case kindTime: + tm := frv.Interface().(time.Time) + buf = binary.BigEndian.AppendUint64(nil, uint64(tm.Unix()+math.MinInt64)) + buf = binary.BigEndian.AppendUint32(buf, uint32(tm.Nanosecond())) + default: + return nil, nil, fmt.Errorf("internal error: bad type %v for index", frv.Type()) // todo: should be caught when making index type + } + ik = append(ik, buf...) + } + n := len(ik) + ik = append(ik, pk...) + prek = ik[:n] + return prek, ik, nil +} diff --git a/vendor/github.com/mjl-/bstore/nonzero.go b/vendor/github.com/mjl-/bstore/nonzero.go new file mode 100644 index 0000000..d8964b1 --- /dev/null +++ b/vendor/github.com/mjl-/bstore/nonzero.go @@ -0,0 +1,218 @@ +package bstore + +import ( + "fmt" + "reflect" +) + +// isZero returns whether v is the zero value for the fields that we store. +// reflect.IsZero cannot be used on structs because it checks private fields as well. +func (ft fieldType) isZero(v reflect.Value) bool { + if !v.IsValid() { + return true + } + if ft.Ptr { + return v.IsNil() + } + switch ft.Kind { + case kindStruct: + for _, f := range ft.Fields { + if !f.Type.isZero(v.FieldByIndex(f.structField.Index)) { + return false + } + } + return true + } + // Use standard IsZero otherwise, also for kindBinaryMarshal. + return v.IsZero() +} + +// checkNonzero compare ofields and nfields (from previous type schema vs newly +// created type schema) for nonzero struct tag. If an existing field got a +// nonzero struct tag added, we verify that there are indeed no nonzero values +// in the database. If there are, we return ErrZero. +func (tx *Tx) checkNonzero(st storeType, tv *typeVersion, ofields, nfields []field) error { + // First we gather paths that we need to check, so we can later simply + // execute those steps on all data we need to read. + paths := &follows{} +next: + for _, f := range nfields { + for _, of := range ofields { + if f.Name == of.Name { + err := f.checkNonzeroGather(&of, paths) + if err != nil { + return err + } + continue next + } + } + if err := f.checkNonzeroGather(nil, paths); err != nil { + return err + } + } + + if len(paths.paths) == 0 { + // Common case, not reading all data. + return nil + } + + // Finally actually do the checks. + // todo: if there are only top-level fields to check, and we have an index, we can use the index check this without reading all data. + return tx.checkNonzeroPaths(st, tv, paths.paths) +} + +type follow struct { + mapKey, mapValue bool + field field +} + +type follows struct { + current []follow + paths [][]follow +} + +func (f *follows) push(ff follow) { + f.current = append(f.current, ff) +} + +func (f *follows) pop() { + f.current = f.current[:len(f.current)-1] +} + +func (f *follows) add() { + f.paths = append(f.paths, append([]follow{}, f.current...)) +} + +func (f field) checkNonzeroGather(of *field, paths *follows) error { + paths.push(follow{field: f}) + defer paths.pop() + if f.Nonzero && (of == nil || !of.Nonzero) { + paths.add() + } + if of != nil { + return f.Type.checkNonzeroGather(of.Type, paths) + } + return nil +} + +func (ft fieldType) checkNonzeroGather(oft fieldType, paths *follows) error { + switch ft.Kind { + case kindMap: + paths.push(follow{mapKey: true}) + if err := ft.MapKey.checkNonzeroGather(*oft.MapKey, paths); err != nil { + return err + } + paths.pop() + + paths.push(follow{mapValue: true}) + if err := ft.MapValue.checkNonzeroGather(*oft.MapValue, paths); err != nil { + return err + } + paths.pop() + + case kindSlice: + err := ft.List.checkNonzeroGather(*oft.List, paths) + if err != nil { + return err + } + case kindStruct: + next: + for _, ff := range ft.Fields { + for _, off := range oft.Fields { + if ff.Name == off.Name { + err := ff.checkNonzeroGather(&off, paths) + if err != nil { + return err + } + continue next + } + } + err := ff.checkNonzeroGather(nil, paths) + if err != nil { + return err + } + } + + } + return nil +} + +// checkNonzero reads through all records of a type, and checks that the fields +// indicated by paths are nonzero. If not, ErrZero is returned. +func (tx *Tx) checkNonzeroPaths(st storeType, tv *typeVersion, paths [][]follow) error { + rb, err := tx.recordsBucket(st.Current.name, st.Current.fillPercent) + if err != nil { + return err + } + return rb.ForEach(func(bk, bv []byte) error { + tx.stats.Records.Cursor++ + + rv, err := st.parseNew(bk, bv) + if err != nil { + return err + } + // todo optimization: instead of parsing the full record, use the fieldmap to see if the value is nonzero. + for _, path := range paths { + frv := rv.FieldByIndex(path[0].field.structField.Index) + if err := path[0].field.checkNonzero(frv, path[1:]); err != nil { + return err + } + } + return nil + }) +} + +func (f field) checkNonzero(rv reflect.Value, path []follow) error { + if len(path) == 0 { + if !f.Nonzero { + return fmt.Errorf("internal error: checkNonzero: expected field to have Nonzero set") + } + if f.Type.isZero(rv) { + return fmt.Errorf("%w: field %q", ErrZero, f.Name) + } + return nil + } + return f.Type.checkNonzero(rv, path) +} + +func (ft fieldType) checkNonzero(rv reflect.Value, path []follow) error { + switch ft.Kind { + case kindMap: + follow := path[0] + path = path[1:] + key := follow.mapKey + if !key && !follow.mapValue { + return fmt.Errorf("internal error: following map, expected mapKey or mapValue, got %#v", follow) + } + + iter := rv.MapRange() + for iter.Next() { + var err error + if key { + err = ft.MapKey.checkNonzero(iter.Key(), path) + } else { + err = ft.MapValue.checkNonzero(iter.Value(), path) + } + if err != nil { + return err + } + } + case kindSlice: + n := rv.Len() + for i := 0; i < n; i++ { + if err := ft.List.checkNonzero(rv.Index(i), path); err != nil { + return err + } + } + case kindStruct: + follow := path[0] + path = path[1:] + frv := rv.FieldByIndex(follow.field.structField.Index) + if err := follow.field.checkNonzero(frv, path); err != nil { + return err + } + default: + return fmt.Errorf("internal error: checkNonzero with non-empty path, but kind %v", ft.Kind) + } + return nil +} diff --git a/vendor/github.com/mjl-/bstore/pack.go b/vendor/github.com/mjl-/bstore/pack.go new file mode 100644 index 0000000..6a538e2 --- /dev/null +++ b/vendor/github.com/mjl-/bstore/pack.go @@ -0,0 +1,276 @@ +package bstore + +import ( + "bytes" + "encoding" + "encoding/binary" + "fmt" + "math" + "reflect" + "time" +) + +// fieldmap represents a bitmap indicating which fields are actually stored and +// can be parsed. zero values for fields are not otherwise stored. +type fieldmap struct { + max int // Required number of fields. + buf []byte // Bitmap, we write the next 0/1 at bit n. + n int // Fields seen so far. + offset int // In final output, we write buf back after finish. Only relevant for packing. + Errorf func(format string, args ...any) +} + +// add bit to fieldmap indicating if the field is nonzero. +func (f *fieldmap) Field(nonzero bool) { + o := f.n / 8 + if f.n >= f.max { + f.Errorf("internal error: too many fields, max %d", f.max) + } + if nonzero { + f.buf[o] |= 1 << (7 - f.n%8) + } + f.n++ +} + +// check if field i is nonzero. +func (f *fieldmap) Nonzero(i int) bool { + v := f.buf[i/8]&(1<<(7-i%8)) != 0 + return v +} + +type packer struct { + b *bytes.Buffer + offset int + fieldmaps []*fieldmap // Pending fieldmaps, not excluding fieldmap below. + fieldmap *fieldmap // Currently active. + popped []*fieldmap // Completed fieldmaps, to be written back during finish. +} + +func (p *packer) Errorf(format string, args ...any) { + panic(packErr{fmt.Errorf(format, args...)}) +} + +// Push a new fieldmap on the stack for n fields. +func (p *packer) PushFieldmap(n int) { + p.fieldmaps = append(p.fieldmaps, p.fieldmap) + buf := make([]byte, (n+7)/8) + p.fieldmap = &fieldmap{max: n, buf: buf, offset: p.offset, Errorf: p.Errorf} + p.Write(buf) // Updates offset. Write errors cause panic. +} + +// Pop a fieldmap from the stack. It is remembered in popped for writing the +// bytes during finish. +func (p *packer) PopFieldmap() { + if p.fieldmap.n != p.fieldmap.max { + p.Errorf("internal error: fieldmap n %d != max %d", p.fieldmap.n, p.fieldmap.max) + } + p.popped = append(p.popped, p.fieldmap) + p.fieldmap = p.fieldmaps[len(p.fieldmaps)-1] + p.fieldmaps = p.fieldmaps[:len(p.fieldmaps)-1] +} + +// Finish writes back finished (popped) fieldmaps to the correct offset, +// returning the final bytes representation of this record. +func (p *packer) Finish() []byte { + if p.fieldmap != nil { + p.Errorf("internal error: leftover fieldmap during finish") + } + buf := p.b.Bytes() + for _, f := range p.popped { + copy(buf[f.offset:], f.buf) + } + return buf +} + +// Field adds field with nonzeroness to the current fieldmap. +func (p *packer) Field(nonzero bool) { + p.fieldmap.Field(nonzero) +} + +func (p *packer) Write(buf []byte) (int, error) { + n, err := p.b.Write(buf) + if err != nil { + p.Errorf("write: %w", err) + } + if n > 0 { + p.offset += n + } + return n, err +} + +func (p *packer) AddBytes(buf []byte) { + p.Uvarint(uint64(len(buf))) + p.Write(buf) // Write errors cause panic. +} + +func (p *packer) Uvarint(v uint64) { + buf := make([]byte, binary.MaxVarintLen64) + o := binary.PutUvarint(buf, v) + p.Write(buf[:o]) // Write errors cause panic. +} + +func (p *packer) Varint(v int64) { + buf := make([]byte, binary.MaxVarintLen64) + o := binary.PutVarint(buf, v) + p.Write(buf[:o]) // Write errors cause panic. +} + +type packErr struct { + err error +} + +// pack rv (reflect.Struct), excluding the primary key field. +func (st storeType) pack(rv reflect.Value) (rbuf []byte, rerr error) { + p := &packer{b: &bytes.Buffer{}} + defer func() { + x := recover() + if x == nil { + return + } + perr, ok := x.(packErr) + if ok { + rerr = perr.err + return + } + panic(x) + }() + st.Current.pack(p, rv) + return p.Finish(), nil +} + +func (tv typeVersion) pack(p *packer, rv reflect.Value) { + // When parsing, the same typeVersion (type schema) is used to + // interpret the bytes correctly. + p.Uvarint(uint64(tv.Version)) + + p.PushFieldmap(len(tv.Fields) - 1) + + for _, f := range tv.Fields[1:] { + nrv := rv.FieldByIndex(f.structField.Index) + if f.Type.isZero(nrv) { + if f.Nonzero { + p.Errorf("%w: %q", ErrZero, f.Name) + } + p.Field(false) + // Pretend to pack to get the nonzero checks. + if nrv.IsValid() && (nrv.Kind() != reflect.Ptr || !nrv.IsNil()) { + f.Type.pack(&packer{b: &bytes.Buffer{}}, nrv) + } + } else { + p.Field(true) + f.Type.pack(p, nrv) + } + } + p.PopFieldmap() +} + +// pack the nonzero value rv. +func (ft fieldType) pack(p *packer, rv reflect.Value) { + if ft.Ptr { + rv = rv.Elem() + } + switch ft.Kind { + case kindBytes: + p.AddBytes(rv.Bytes()) + case kindBinaryMarshal: + v := rv + buf, err := v.Interface().(encoding.BinaryMarshaler).MarshalBinary() + if err != nil { + p.Errorf("marshalbinary: %w", err) + } + p.AddBytes(buf) + case kindBool: + // No value needed. If false, it would be zero, handled above, + // with a 0 in the fieldmap. + case kindInt: + v := rv.Int() + if v < math.MinInt32 || v > math.MaxInt32 { + p.Errorf("%w: int %d does not fit in int32", ErrParam, v) + } + p.Varint(v) + case kindInt8, kindInt16, kindInt32, kindInt64: + p.Varint(rv.Int()) + case kindUint8, kindUint16, kindUint32, kindUint64: + p.Uvarint(rv.Uint()) + case kindUint: + v := rv.Uint() + if v > math.MaxUint32 { + p.Errorf("%w: uint %d does not fit in uint32", ErrParam, v) + } + p.Uvarint(v) + case kindFloat32: + p.Uvarint(uint64(math.Float32bits(rv.Interface().(float32)))) + case kindFloat64: + p.Uvarint(uint64(math.Float64bits(rv.Interface().(float64)))) + case kindString: + p.AddBytes([]byte(rv.String())) + case kindTime: + buf, err := rv.Interface().(time.Time).MarshalBinary() + if err != nil { + p.Errorf("%w: pack time: %s", ErrParam, err) + } + p.AddBytes(buf) + case kindSlice: + n := rv.Len() + p.Uvarint(uint64(n)) + p.PushFieldmap(n) + for i := 0; i < n; i++ { + nrv := rv.Index(i) + if ft.List.isZero(nrv) { + p.Field(false) + // Pretend to pack to get the nonzero checks of the element. + if nrv.IsValid() && (nrv.Kind() != reflect.Ptr || !nrv.IsNil()) { + ft.List.pack(&packer{b: &bytes.Buffer{}}, nrv) + } + } else { + p.Field(true) + ft.List.pack(p, nrv) + } + } + p.PopFieldmap() + case kindMap: + // We write a fieldmap for zeroness of the values. The keys are unique, so there + // can only be max 1 zero key. But there can be many zero values. struct{} is + // common in Go, good to support that efficiently. + n := rv.Len() + p.Uvarint(uint64(n)) + p.PushFieldmap(n) + iter := rv.MapRange() + for iter.Next() { + ft.MapKey.pack(p, iter.Key()) + v := iter.Value() + if ft.MapValue.isZero(v) { + p.Field(false) + // Pretend to pack to get the nonzero checks of the key type. + if v.IsValid() && (v.Kind() != reflect.Ptr || !v.IsNil()) { + ft.MapValue.pack(&packer{b: &bytes.Buffer{}}, v) + } + } else { + p.Field(true) + ft.MapValue.pack(p, v) + } + } + p.PopFieldmap() + case kindStruct: + p.PushFieldmap(len(ft.Fields)) + for _, f := range ft.Fields { + nrv := rv.FieldByIndex(f.structField.Index) + if f.Type.isZero(nrv) { + if f.Nonzero { + p.Errorf("%w: %q", ErrZero, f.Name) + } + p.Field(false) + // Pretend to pack to get the nonzero checks. + if nrv.IsValid() && (nrv.Kind() != reflect.Ptr || !nrv.IsNil()) { + f.Type.pack(&packer{b: &bytes.Buffer{}}, nrv) + } + } else { + p.Field(true) + f.Type.pack(p, nrv) + } + } + p.PopFieldmap() + default: + p.Errorf("internal error: unhandled field type") // should be prevented when registering type + } +} diff --git a/vendor/github.com/mjl-/bstore/parse.go b/vendor/github.com/mjl-/bstore/parse.go new file mode 100644 index 0000000..e19ae73 --- /dev/null +++ b/vendor/github.com/mjl-/bstore/parse.go @@ -0,0 +1,321 @@ +package bstore + +import ( + "encoding" + "encoding/binary" + "fmt" + "math" + "reflect" + "time" +) + +type parser struct { + buf []byte + orig []byte +} + +func (p *parser) Errorf(format string, args ...any) { + panic(parseErr{fmt.Errorf(format, args...)}) +} + +func (p *parser) checkInt(un uint64) int { + if un > math.MaxInt32 { + p.Errorf("%w: uvarint %d does not fit in int32", ErrStore, un) + } + return int(un) +} + +// Fieldmap starts a new fieldmap for n fields. +func (p *parser) Fieldmap(n int) *fieldmap { + // log.Printf("parse fieldmap %d bits", n) + nb := (n + 7) / 8 + buf := p.Take(nb) + return &fieldmap{n, buf, 0, 0, p.Errorf} +} + +// Take reads nb bytes. +func (p *parser) Take(nb int) []byte { + // log.Printf("take %d", nb) + if len(p.buf) < nb { + p.Errorf("%w: not enough bytes", ErrStore) + } + buf := p.buf[:nb] + p.buf = p.buf[nb:] + return buf +} + +// TakeBytes reads a uvarint representing the size of the bytes, followed by +// that number of bytes. +// dup is needed if you need to hold on to the bytes. Values from BoltDB are +// only valid in the transaction, and not meant to be modified and are +// memory-mapped read-only. +func (p *parser) TakeBytes(dup bool) []byte { + un := p.Uvarint() + n := p.checkInt(un) + buf := p.Take(n) + if dup { + // todo: check for a max size, beyond which we refuse to allocate? + nbuf := make([]byte, len(buf)) + copy(nbuf, buf) + buf = nbuf + } + return buf +} + +func (p *parser) Uvarint() uint64 { + v, n := binary.Uvarint(p.buf) + if n == 0 { + p.Errorf("%w: uvarint: not enough bytes", ErrStore) + } + if n < 0 { + p.Errorf("%w: uvarint overflow", ErrStore) + } + // log.Printf("take uvarint, %d bytes", n) + p.buf = p.buf[n:] + return v +} + +func (p *parser) Varint() int64 { + v, n := binary.Varint(p.buf) + if n == 0 { + p.Errorf("%w: varint: not enough bytes", ErrStore) + } + if n < 0 { + p.Errorf("%w: varint overflow", ErrStore) + } + // log.Printf("take varint, %d bytes", n) + p.buf = p.buf[n:] + return v +} + +type parseErr struct { + err error +} + +// parse rv (reflect.Struct) from buf. +// does not part primary key field. +func (st storeType) parse(rv reflect.Value, buf []byte) (rerr error) { + p := &parser{buf: buf, orig: buf} + var version uint32 + defer func() { + x := recover() + if x == nil { + return + } + perr, ok := x.(parseErr) + if ok { + rerr = fmt.Errorf("%w (version %d, buf %x, orig %x)", perr.err, version, p.buf, p.orig) + return + } + panic(x) + }() + + version = uint32(p.Uvarint()) + tv, ok := st.Versions[version] + if !ok { + return fmt.Errorf("%w: unknown type version %d", ErrStore, version) + } + + tv.parse(p, rv) + + if len(p.buf) != 0 { + return fmt.Errorf("%w: leftover data after parsing", ErrStore) + } + + return nil +} + +// parseNew parses bk and bv into a newly created value of type st.Type. +func (st storeType) parseNew(bk, bv []byte) (reflect.Value, error) { + rv := reflect.New(st.Type).Elem() + if err := st.parseFull(rv, bk, bv); err != nil { + return reflect.Value{}, err + } + return rv, nil +} + +// parseFull parses a full record from bk and bv into value rv, which must be +// of type st.Type. +func (st storeType) parseFull(rv reflect.Value, bk, bv []byte) error { + if err := parsePK(rv.Field(0), bk); err != nil { + return err + } + err := st.parse(rv, bv) + if err != nil { + return err + } + return nil +} + +func (tv typeVersion) parse(p *parser, rv reflect.Value) { + // First field is the primary key, stored as boltdb key only, not in + // the value. + fm := p.Fieldmap(len(tv.Fields) - 1) + for i, f := range tv.Fields[1:] { + if f.structField.Type == nil { + // Do not parse this field in the current Go type, but + // we must still skip over the bytes. + if fm.Nonzero(i) { + f.Type.skip(p) + } + continue + } + if fm.Nonzero(i) { + f.Type.parse(p, rv.FieldByIndex(f.structField.Index)) + } else if f.Nonzero { + // Consistency check. Should not happen, we enforce nonzeroness. + p.Errorf("%w: unexpected nonzero value for %q", ErrStore, f.Name) + } else { + rv.FieldByIndex(f.structField.Index).Set(reflect.Zero(f.structField.Type)) + } + } +} + +// parse a nonzero fieldType. +func (ft fieldType) parse(p *parser, rv reflect.Value) { + // Because we allow schema changes from ptr to nonptr, rv can be a pointer or direct value regardless of ft.Ptr. + if rv.Kind() == reflect.Ptr { + nrv := reflect.New(rv.Type().Elem()) + rv.Set(nrv) + rv = nrv.Elem() + } + switch ft.Kind { + case kindBytes: + rv.SetBytes(p.TakeBytes(true)) + case kindBinaryMarshal: + buf := p.TakeBytes(false) + t := rv.Type() + if t.Kind() == reflect.Ptr { + t = t.Elem() + } + v := reflect.New(t) + err := v.Interface().(encoding.BinaryUnmarshaler).UnmarshalBinary(buf) + if err != nil { + panic(parseErr{err}) + } + if rv.Type().Kind() == reflect.Ptr { + rv.Set(v) + } else { + rv.Set(v.Elem()) + } + case kindBool: + rv.SetBool(true) + case kindInt: + v := p.Varint() + if v < math.MinInt32 || v > math.MaxInt32 { + p.Errorf("%w: int %d does not fit in int32", ErrStore, v) + } + rv.SetInt(v) + case kindInt8, kindInt16, kindInt32, kindInt64: + rv.SetInt(p.Varint()) + case kindUint: + v := p.Uvarint() + if v > math.MaxUint32 { + p.Errorf("%w: uint %d does not fit in uint32", ErrStore, v) + } + rv.SetUint(v) + case kindUint8, kindUint16, kindUint32, kindUint64: + rv.SetUint(p.Uvarint()) + case kindFloat32: + rv.SetFloat(float64(math.Float32frombits(uint32(p.Uvarint())))) + case kindFloat64: + rv.SetFloat(math.Float64frombits(p.Uvarint())) + case kindString: + rv.SetString(string(p.TakeBytes(false))) + case kindTime: + err := rv.Addr().Interface().(*time.Time).UnmarshalBinary(p.TakeBytes(false)) + if err != nil { + p.Errorf("%w: parsing time: %s", ErrStore, err) + } + case kindSlice: + un := p.Uvarint() + n := p.checkInt(un) + fm := p.Fieldmap(n) + slc := reflect.MakeSlice(rv.Type(), n, n) + for i := 0; i < int(n); i++ { + if fm.Nonzero(i) { + ft.List.parse(p, slc.Index(i)) + } + } + rv.Set(slc) + case kindMap: + un := p.Uvarint() + n := p.checkInt(un) + fm := p.Fieldmap(n) + mp := reflect.MakeMapWithSize(rv.Type(), n) + for i := 0; i < n; i++ { + mk := reflect.New(rv.Type().Key()).Elem() + ft.MapKey.parse(p, mk) + mv := reflect.New(rv.Type().Elem()).Elem() + if fm.Nonzero(i) { + ft.MapValue.parse(p, mv) + } + mp.SetMapIndex(mk, mv) + } + rv.Set(mp) + case kindStruct: + fm := p.Fieldmap(len(ft.Fields)) + strct := reflect.New(rv.Type()).Elem() + for i, f := range ft.Fields { + if f.structField.Type == nil { + f.Type.skip(p) + continue + } + if fm.Nonzero(i) { + f.Type.parse(p, strct.FieldByIndex(f.structField.Index)) + } else if f.Nonzero { + // Consistency check, we enforce that nonzero is not stored if not allowed. + p.Errorf("%w: %q", ErrZero, f.Name) + } else { + strct.FieldByIndex(f.structField.Index).Set(reflect.Zero(f.structField.Type)) + } + } + rv.Set(strct) + default: + p.Errorf("internal error: unhandled field type") // should be prevented when registering type + } +} + +// skip over the bytes for this fieldType. Needed when an older typeVersion has +// a field that the current reflect.Type does not (can) have. +func (ft fieldType) skip(p *parser) { + switch ft.Kind { + case kindBytes, kindBinaryMarshal, kindString: + p.TakeBytes(false) + case kindBool: + case kindInt8, kindInt16, kindInt32, kindInt, kindInt64: + p.Varint() + case kindUint8, kindUint16, kindUint32, kindUint, kindUint64, kindFloat32, kindFloat64: + p.Uvarint() + case kindTime: + p.TakeBytes(false) + case kindSlice: + un := p.Uvarint() + n := p.checkInt(un) + fm := p.Fieldmap(n) + for i := 0; i < n; i++ { + if fm.Nonzero(i) { + ft.List.skip(p) + } + } + case kindMap: + un := p.Uvarint() + n := p.checkInt(un) + fm := p.Fieldmap(n) + for i := 0; i < n; i++ { + ft.MapKey.skip(p) + if fm.Nonzero(i) { + ft.MapValue.skip(p) + } + } + case kindStruct: + fm := p.Fieldmap(len(ft.Fields)) + for i, f := range ft.Fields { + if fm.Nonzero(i) { + f.Type.skip(p) + } + } + default: + p.Errorf("internal error: unhandled field type") // should be prevented when registering type + } +} diff --git a/vendor/github.com/mjl-/bstore/plan.go b/vendor/github.com/mjl-/bstore/plan.go new file mode 100644 index 0000000..7a0f025 --- /dev/null +++ b/vendor/github.com/mjl-/bstore/plan.go @@ -0,0 +1,341 @@ +package bstore + +import ( + "bytes" + "fmt" + "reflect" + "sort" +) + +// Plan represents a plan to execute a query, possibly using a simple/quick +// bucket "get" or cursor scan (forward/backward) on either the records or an +// index. +type plan[T any] struct { + // The index for this plan. If nil, we are using pk's, in which case + // "keys" below can be nil for a range scan with start/stop (possibly empty + // for full scan), or non-nil for looking up specific keys. + idx *index + + // Use full unique index to get specific values from keys. idx above can be + // a unique index that we only use partially. In that case, this field is + // false. + unique bool + + // If not nil, used to fetch explicit keys when using pk or unique + // index. Required non-nil for unique. + keys [][]byte + + desc bool // Direction of the range scan. + start []byte // First key to scan. Filters below may still apply. If desc, this value is > than stop (if it is set). If nil, we begin ranging at the first or last (for desc) key. + stop []byte // Last key to scan. Can be nil independently of start. + startInclusive bool // If the start and stop values are inclusive or exclusive. + stopInclusive bool + + // Filter we need to apply on after retrieving the record. If all + // original filters from a query were handled by "keys" above, or by a + // range scan, this field is empty. + filters []filter[T] + + // Orders we need to apply after first retrieving all records. As with + // filters, if a range scan takes care of an ordering from the query, + // this field is empty. + orders []order +} + +// selectPlan selects the best plan for this query. +func (q *Query[T]) selectPlan() (*plan[T], error) { + // Simple case first: List of known IDs. We can just fetch them from + // the records bucket by their primary keys. This is common for a + // "Get" query. + if q.xfilterIDs != nil { + orders := q.xorders + keys := q.xfilterIDs.pks + // If there is an ordering on the PK field, we do the ordering here. + if len(orders) > 0 && orders[0].field.Name == q.st.Current.Fields[0].Name { + asc := orders[0].asc + sort.Slice(keys, func(i, j int) bool { + cmp := bytes.Compare(keys[i], keys[j]) + return asc && cmp < 0 || !asc && cmp > 0 + }) + orders = orders[1:] + } + p := &plan[T]{ + keys: keys, + filters: q.xfilters, + orders: orders, + } + return p, nil + } + + // Try using a fully matched unique index. We build a map with all + // fields that have an equal or in filter. So we can easily look + // through our unique indices and get a match. We only look at a single + // filter per field. If there are multiple, we would use the last one. + // That's okay, we'll filter records out when we execute the leftover + // filters. Probably not common. + // This is common for filterEqual and filterIn on + // fields that have a unique index. + equalsIn := map[string]*filter[T]{} + for i := range q.xfilters { + ff := &q.xfilters[i] + switch f := (*ff).(type) { + case filterEqual[T]: + equalsIn[f.field.Name] = ff + case filterIn[T]: + equalsIn[f.field.Name] = ff + } + } +indices: + for _, idx := range q.st.Current.Indices { + // Direct fetches only for unique indices. + if !idx.Unique { + continue + } + for _, f := range idx.Fields { + if _, ok := equalsIn[f.Name]; !ok { + // At least one index field does not have a filter. + continue indices + } + } + // Calculate all keys that we need to retrieve from the index. + // todo optimization: if there is a sort involving these fields, we could do the sorting before fetching data. + // todo optimization: we can generate the keys on demand, will help when limit is in use: we are not generating all keys. + var keys [][]byte + var skipFilters []*filter[T] // Filters to remove from the full list because they are handled by quering the index. + for i, f := range idx.Fields { + var rvalues []reflect.Value + ff := equalsIn[f.Name] + skipFilters = append(skipFilters, ff) + switch fi := (*ff).(type) { + case filterEqual[T]: + rvalues = []reflect.Value{fi.rvalue} + case filterIn[T]: + rvalues = fi.rvalues + default: + return nil, fmt.Errorf("internal error: bad filter %T", equalsIn[f.Name]) + } + fekeys := make([][]byte, len(rvalues)) + for j, fv := range rvalues { + key, _, err := packIndexKeys([]reflect.Value{fv}, nil) + if err != nil { + q.error(err) + return nil, err + } + fekeys[j] = key + } + if i == 0 { + keys = fekeys + continue + } + // Multiply current keys with the new values. + nkeys := make([][]byte, 0, len(keys)*len(fekeys)) + for _, k := range keys { + for _, fk := range fekeys { + nk := append(append([]byte{}, k...), fk...) + nkeys = append(nkeys, nk) + } + } + keys = nkeys + } + p := &plan[T]{ + idx: idx, + unique: true, + keys: keys, + filters: dropFilters(q.xfilters, skipFilters), + orders: q.xorders, + } + return p, nil + } + + // Try all other indices. We treat them all as non-unique indices now. + // We want to use the one with as many "equal" prefix fields as + // possible. Then we hope to use a scan on the remaining, either + // because of a filterCompare, or for an ordering. If there is a limit, + // orderings are preferred over compares. + equals := map[string]*filter[T]{} + for i := range q.xfilters { + ff := &q.xfilters[i] + switch f := (*ff).(type) { + case filterEqual[T]: + equals[f.field.Name] = ff + } + } + + // We are going to generate new plans, and keep the new one if it is better than what we have. + var p *plan[T] + var nequals int + var nrange int + var ordered bool + + evaluatePKOrIndex := func(idx *index) error { + var isPK bool + var packKeys func([]reflect.Value) ([]byte, error) + if idx == nil { + // Make pretend index. + isPK = true + idx = &index{ + Fields: []field{q.st.Current.Fields[0]}, + } + packKeys = func(l []reflect.Value) ([]byte, error) { + return packPK(l[0]) + } + } else { + packKeys = func(l []reflect.Value) ([]byte, error) { + key, _, err := packIndexKeys(l, nil) + return key, err + } + } + + var neq = 0 + // log.Printf("idx %v", idx) + var skipFilters []*filter[T] + for _, f := range idx.Fields { + if ff, ok := equals[f.Name]; ok { + skipFilters = append(skipFilters, ff) + neq++ + } else { + break + } + } + + // See if the next field can be used for compare. + var gx, lx *filterCompare[T] + var nrng int + var order *order + orders := q.xorders + if neq < len(idx.Fields) { + nf := idx.Fields[neq] + for i := range q.xfilters { + ff := &q.xfilters[i] + switch f := (*ff).(type) { + case filterCompare[T]: + if f.field.Name != nf.Name { + continue + } + switch f.op { + case opGreater, opGreaterEqual: + if gx == nil { + gx = &f + skipFilters = append(skipFilters, ff) + nrng++ + } + case opLess, opLessEqual: + if lx == nil { + lx = &f + skipFilters = append(skipFilters, ff) + nrng++ + } + } + } + } + + // See if it can be used for ordering. + // todo optimization: we could use multiple orders + if len(orders) > 0 && orders[0].field.Name == nf.Name { + order = &orders[0] + orders = orders[1:] + } + } + + // See if this is better than what we had. + if !(neq > nequals || (neq == nequals && (nrng > nrange || order != nil && !ordered && (q.xlimit > 0 || nrng == nrange)))) { + // log.Printf("plan not better, neq %d, nrng %d, limit %d, order %v ordered %v", neq, nrng, q.limit, order, ordered) + return nil + } + nequals = neq + nrange = nrng + ordered = order != nil + + // Calculate the prefix key. + var kvalues []reflect.Value + for i := 0; i < neq; i++ { + f := idx.Fields[i] + kvalues = append(kvalues, (*equals[f.Name]).(filterEqual[T]).rvalue) + } + var key []byte + var err error + if neq > 0 { + key, err = packKeys(kvalues) + if err != nil { + return err + } + } + + start := key + stop := key + if gx != nil { + k, err := packKeys([]reflect.Value{gx.value}) + if err != nil { + return err + } + start = append(append([]byte{}, start...), k...) + } + if lx != nil { + k, err := packKeys([]reflect.Value{lx.value}) + if err != nil { + return err + } + stop = append(append([]byte{}, stop...), k...) + } + + startInclusive := gx == nil || gx.op != opGreater + stopInclusive := lx == nil || lx.op != opLess + if order != nil && !order.asc { + start, stop = stop, start + startInclusive, stopInclusive = stopInclusive, startInclusive + } + + if isPK { + idx = nil // Clear our fake index for PK. + } + + p = &plan[T]{ + idx: idx, + desc: order != nil && !order.asc, + start: start, + stop: stop, + startInclusive: startInclusive, + stopInclusive: stopInclusive, + filters: dropFilters(q.xfilters, skipFilters), + orders: orders, + } + return nil + } + + if err := evaluatePKOrIndex(nil); err != nil { + q.error(err) + return nil, q.err + } + for _, idx := range q.st.Current.Indices { + if err := evaluatePKOrIndex(idx); err != nil { + q.error(err) + return nil, q.err + } + + } + if p != nil { + return p, nil + } + + // We'll just do a scan over all data. + p = &plan[T]{ + filters: q.xfilters, + orders: q.xorders, + } + return p, nil +} + +func dropFilters[T any](filters []T, skip []*T) []T { + n := make([]T, 0, len(filters)-len(skip)) +next: + for i := range filters { + f := &filters[i] + for _, s := range skip { + if f == s { + continue next + } + } + n = append(n, *f) + } + return n +} diff --git a/vendor/github.com/mjl-/bstore/query.go b/vendor/github.com/mjl-/bstore/query.go new file mode 100644 index 0000000..82b2edb --- /dev/null +++ b/vendor/github.com/mjl-/bstore/query.go @@ -0,0 +1,1130 @@ +package bstore + +import ( + "fmt" + "reflect" +) + +// The convention for handling a errors on a Query: methods that return a bool +// will have set q.err using q.error(), which does cleanup. If a method returns +// an error, q.error was not yet called, but usually will be called immediately +// after with the returned err, possibly first adding context. + +// Query selects data for Go struct T based on filters, sorting, limits. The +// query is completed by calling an operation, such as Count, Get, List, +// Update, Delete, etc. +// +// Record selection functions like FilterEqual and Limit return the (modified) +// query itself, allowing chaining of calls. +// +// Queries are automatically closed after their operation, with two exceptions: +// After using Next and NextID on a query that did not yet return a non-nil +// error, you must call Close. +// +// A Query is not safe for concurrent use. +type Query[T any] struct { + st storeType // Of T. + pkType reflect.Type // Shortcut for st.Current.Fields[0]. + xtx *Tx // If nil, a new transaction is automatically created from db. Using a tx goes through tx() one exists. + xdb *DB // If not nil, xtx was created to execute the operation and is when the operation finishes (also on error). + err error // If set, returned by operations. For indicating failed filters, or that an operation has finished. + xfilterIDs *filterIDs[T] // Kept separately from filters because these filters make us use the PK without further index planning. + xfilters []filter[T] + xorders []order + + // If 0, no limit has been set. Otherwise the number of IDs or records to + // read. Set by limit calls, and set to 1 for an Exists. + xlimit int + + // Set when Next/NextID is called. We prevent non-Next calls after that moment. + // Should prevent unexpected results for users. + nextOnly bool + + gatherIDs reflect.Value // Pointer to slice to pktype, for PKs of updated/deleted records. + gathers *[]T // For full records of updated/deleted records. + + exec *exec[T] + + stats Stats +} + +// sentinel interface to for sanity checking. +type filter[T any] interface { + filter() +} + +// filter by one or more IDs. +type filterIDs[T any] struct { + ids map[any]struct{} // Regular values. + pks [][]byte // Packed values. +} + +func (filterIDs[T]) filter() {} + +type filterFn[T any] struct { + fn func(value T) bool // Function to call for each record, returning true if the record is selected. +} + +func (filterFn[T]) filter() {} + +// Filter on field equality. +type filterEqual[T any] struct { + field field + rvalue reflect.Value +} + +func (filterEqual[T]) filter() {} + +// Filter on field non-equality. +type filterNotEqual[T any] struct { + field field + rvalue reflect.Value +} + +func (filterNotEqual[T]) filter() {} + +// Like filterEqual, but for one or more values. +type filterIn[T any] struct { + field field + rvalues []reflect.Value +} + +func (filterIn[T]) filter() {} + +// Like filterNonEqual, but for one or more values. +type filterNotIn[T any] struct { + field field + rvalues []reflect.Value +} + +func (filterNotIn[T]) filter() {} + +type compareOp byte + +const ( + opGreater compareOp = iota + opGreaterEqual + opLess + opLessEqual +) + +// filter by comparison. +type filterCompare[T any] struct { + field field + op compareOp + value reflect.Value +} + +func (filterCompare[T]) filter() {} + +// ordering of result. +type order struct { + field field + asc bool +} + +// Pair represents a primary key with lazily loaded record data. When user only +// cares about IDs we don't have to parse the full record. And if we go through +// in index we don't have to fetch the full record either. +type pair[T any] struct { + bk []byte + bv []byte // If nil, data must be loaded. + value *T // If not nil, the parsed form of bv. +} + +// Value returns a fully parsed record. It first fetches the record data if not +// yet present. +func (p *pair[T]) Value(e *exec[T]) (T, error) { + var zero T + if p.value != nil { + return *p.value, nil + } + if p.bv == nil { + e.q.stats.Records.Get++ + p.bv = e.rb.Get(p.bk) + if p.bv == nil { + return zero, fmt.Errorf("%w: no data for key", ErrStore) + } + } + var v T + err := e.q.st.parseFull(reflect.ValueOf(&v).Elem(), p.bk, p.bv) + if err != nil { + return zero, err + } + p.value = &v + return v, nil +} + +// QueryDB returns a new Query for type T. When an operation on the query is +// executed, a read-only/writable transaction is created as appropriate for the +// operation. +func QueryDB[T any](db *DB) *Query[T] { + // We lock db for storeTypes. We keep it locked until Query is done. + db.typesMutex.RLock() + q := &Query[T]{xdb: db} + q.init(db) + return q +} + +// Query returns a new Query that operates on type T using transaction tx. +func QueryTx[T any](tx *Tx) *Query[T] { + // note: Since we are in a transaction, we already hold an rlock on the + // db types. + q := &Query[T]{xtx: tx} + q.init(tx.db) + return q +} + +// Stats returns the current statistics for this query. When a query finishes, +// its stats are added to those of its transaction. When a transaction +// finishes, its stats are added to those of its database. +func (q *Query[T]) Stats() Stats { + return q.stats +} + +func (q *Query[T]) init(db *DB) { + var v T + t := reflect.TypeOf(v) + if t.Kind() != reflect.Struct { + q.errorf("%w: type must be struct, not pointer or other type", ErrType) + return + } + q.st, q.err = db.storeType(t) + if q.err == nil { + q.stats.LastType = q.st.Name + q.pkType = q.st.Current.Fields[0].structField.Type + } +} + +func (q *Query[T]) tx(write bool) (*Tx, error) { + if q.xtx == nil { + if q.xdb == nil { + q.errorf("%w: missing db and tx: use QueryDB or QueryTx to make a new Query", ErrParam) + return nil, q.err + } + tx, err := q.xdb.bdb.Begin(write) + if err != nil { + q.error(err) + return nil, q.err + } + q.xtx = &Tx{db: q.xdb, btx: tx} + if write { + q.stats.Writes++ + } else { + q.stats.Reads++ + } + } + return q.xtx, nil +} + +// error sets an error for the query, to be returned when next operations are executed. +// All Query instances go through this function for proper rollback and/or runlock +// as needed. If a query finished successfully, ErrFinished is set. +func (q *Query[T]) error(err error) { + if q.xtx != nil && q.xdb != nil { + txerr := q.xtx.btx.Rollback() + if sanityChecks && txerr != nil { + panic(fmt.Sprintf("xtx rollback: %v", txerr)) + } + q.dbAddStats() + q.xtx = nil + } + if q.xdb != nil { + q.xdb.typesMutex.RUnlock() + q.xdb = nil + } + if q.xtx != nil { + q.txAddStats() + } + // This is the only place besides init that sets an error on query. + q.err = err +} + +// errorf calls error with a formatted error. +func (q *Query[T]) errorf(format string, args ...any) { + q.error(fmt.Errorf(format, args...)) +} + +// Close closes a Query. Must always be called for Queries on which Next or +// NextID was called. Other operations call Close themselves. +func (q *Query[T]) Close() error { + var err error + if q.xtx != nil && q.xdb != nil { + err = q.xtx.btx.Rollback() + q.dbAddStats() + q.xtx = nil + } + q.error(ErrFinished) + return err +} + +// txAddStats adds stats to a transaction that Query did not create. +func (q *Query[T]) txAddStats() { + q.xtx.stats.add(q.stats) + q.stats = Stats{} +} + +// dbAddStats adds stats to the database directly, because Query created the +// transaction and the tx is never exposed, so no need to go through it. +func (q *Query[T]) dbAddStats() { + q.xdb.statsMutex.Lock() + q.xdb.stats.add(q.stats) + q.xdb.statsMutex.Unlock() + q.stats = Stats{} +} + +// Operations that will do database operations get a defer call to this finish +// function, to ensure we also close transactions that we made. +func (q *Query[T]) finish(rerr *error) { + if q.xtx != nil && q.xdb != nil { + if *rerr == nil && q.xtx.btx.Writable() { + if err := q.xtx.btx.Commit(); err != nil { + *rerr = err + } + } else if err := q.xtx.btx.Rollback(); err != nil && sanityChecks { + panic(fmt.Errorf("rolling back: %v", err)) + } + q.dbAddStats() + q.xtx = nil + } + x := recover() + if x != nil { + q.errorf("%v", x) + panic(x) + } + q.error(ErrFinished) +} + +// checkNotNext is called by all operations except Next and NextID to ensure +// that the user does not mix Next/NextID with regular operations. +func (q *Query[T]) checkNotNext() { + if q.err == nil && q.nextOnly { + q.errorf("%w: can only use further Next calls", ErrParam) + } +} + +func (q *Query[T]) checkErr() bool { + if q.err == nil && q.xtx == nil && q.xdb == nil { + // Probably the result of using a Query zero value. + q.errorf("%w: invalid query, use QueryDB or QueryTx to make a query", ErrParam) + } + return q.err == nil +} + +func (q *Query[T]) addFilter(f filter[T]) { + q.xfilters = append(q.xfilters, f) +} + +// nextKey returns the key and optionally value for the next matching record. +// If there is no more matching record, ErrAbsent is returned and the query +// finished. ErrAbsent should be set on the query by the calling operation if +// appropriate (but not for Update/Delete, because it would prevent further +// operations on the query and its transaction). +// +// The actual work is handled by executing a query plan. One is created on the +// first call, and the nextKey is forwarded to the plan execution thereafter. +// +// write indicates if a writable tx needs to be created (if any) for the +// operation that is initiating this data selection. +// +// value indicates if a full record should be parsed and returned, as opposed +// to only the PK. Some callers only care about the IDs of records, which can +// be handled more efficiently when going through an index. +func (q *Query[T]) nextKey(write, value bool) ([]byte, T, error) { + if q.exec == nil { + p, err := q.selectPlan() + if err != nil { + q.error(err) + var zero T + return nil, zero, err + } + // log.Printf("plan %#v", p) + q.exec = p.exec(q) + } + return q.exec.nextKey(write, value) +} + +// fetch the PK of the next selected record, and parse into pkv. +func (q *Query[T]) nextID(write bool, pkv reflect.Value) error { + bk, _, err := q.nextKey(write, false) + if err != nil { + return err + } + return parsePK(pkv, bk) +} + +// foreachKey calls fn on each selected record. If value is set, fn's v is set, +// otherwise the zero value. +func (q *Query[T]) foreachKey(write, value bool, fn func(bk []byte, v T) error) error { + if q.err != nil { + return q.err + } + for { + bk, v, err := q.nextKey(write, value) + if err == ErrAbsent { + return nil + } else if err != nil { + return err + } else if err := fn(bk, v); err != nil { + q.error(err) + return err + } + } +} + +// foreachID calls fn with the primary key value for each selected record. +func (q *Query[T]) foreachID(write bool, fn func(pkv any) error) error { + if q.err != nil { + return q.err + } + v := reflect.New(q.pkType).Elem() + for { + err := q.nextID(write, v) + if err == ErrAbsent { + return nil + } else if err != nil { + return err + } else if err := fn(v.Interface()); err != nil { + q.error(err) + return err + } + } +} + +// lookup field name in the current typeVersion. +func (q *Query[T]) lookupField(name string) (field, bool) { + for _, ff := range q.st.Current.Fields { + if ff.Name == name { + return ff, true + } + } + q.errorf("%w: unknown field %q", ErrParam, name) + return field{}, false +} + +// Kinds that can be converted without loss of precision, identity is not in here. +type convertKinds struct{ from, to kind } + +var convertFieldKinds = map[convertKinds]struct{}{ + {kindInt8, kindInt16}: {}, + {kindInt8, kindInt32}: {}, + {kindInt8, kindInt64}: {}, + {kindInt8, kindInt}: {}, + {kindInt16, kindInt32}: {}, + {kindInt16, kindInt64}: {}, + {kindInt16, kindInt}: {}, + {kindInt32, kindInt}: {}, + {kindInt32, kindInt64}: {}, + {kindInt, kindInt32}: {}, + {kindInt, kindInt64}: {}, + + {kindUint8, kindUint16}: {}, + {kindUint8, kindUint32}: {}, + {kindUint8, kindUint64}: {}, + {kindUint8, kindUint}: {}, + {kindUint16, kindUint32}: {}, + {kindUint16, kindUint64}: {}, + {kindUint16, kindUint}: {}, + {kindUint32, kindUint}: {}, + {kindUint32, kindUint64}: {}, + {kindUint, kindUint32}: {}, + {kindUint, kindUint64}: {}, + + {kindFloat32, kindFloat64}: {}, +} + +// Check type of value for field and return a reflect value that can directly be set on the field. +// If the field is a pointer, we allow non-pointers and convert them. +// We require value to be of a type that can be converted without loss of precision to the type of field. +func (q *Query[T]) prepareValue(fname string, ft fieldType, sf reflect.StructField, rv reflect.Value) (reflect.Value, bool) { + if !rv.IsValid() { + q.errorf("%w: invalid value", ErrParam) + return rv, false + } + // Quick check first. + t := rv.Type() + if t == sf.Type { + return rv, true + } + if !ft.Ptr && rv.Kind() == reflect.Ptr { + q.errorf("%w: cannot set ptr value to nonptr field", ErrParam) + return rv, false + } + + k, err := typeKind(t) + if err != nil { + q.errorf("%w: type of field: %s", ErrParam, err) + return reflect.Value{}, false + } + if _, ok := convertFieldKinds[convertKinds{k, ft.Kind}]; !ok && k != ft.Kind { + q.errorf("%w: got %v for field %q, need %v", ErrParam, rv.Type(), fname, ft.Kind) + return reflect.Value{}, false + } + if k != ft.Kind { + dt := sf.Type + if ft.Ptr { + dt = dt.Elem() + } + rv = rv.Convert(dt) + } + if ft.Ptr && rv.Kind() != reflect.Ptr { + nv := reflect.New(sf.Type.Elem()) + nv.Elem().Set(rv) + rv = nv + } + return rv, true +} + +// checkPK checks if t is the type of the current typeVersion's PK, and returns +// a userfriendly error message otherwise. +func (q *Query[T]) checkPK(t reflect.Type) bool { + if t != q.pkType { + q.errorf("%w: id type was %s, must be %s", ErrParam, t, q.pkType) + return false + } + return true +} + +// FilterID selects the records with primary key id, which must be of the type +// of T's primary key. +func (q *Query[T]) FilterID(id any) *Query[T] { + if !q.checkErr() { + return q + } + kv := reflect.ValueOf(id) + if !q.checkPK(kv.Type()) { + return q + } + pk, err := packPK(kv) + if err != nil { + q.error(err) + return q + } + + if q.xfilterIDs != nil { + // Intersection of this ID with the previous IDs. Either it is this single ID or the list becomes empty. + if _, ok := q.xfilterIDs.ids[id]; !ok { + q.xfilterIDs = &filterIDs[T]{map[any]struct{}{}, [][]byte{}} + return q + } + } + q.xfilterIDs = &filterIDs[T]{map[any]struct{}{id: {}}, [][]byte{pk}} + return q +} + +// FilterIDs selects the records with a primary key that is in ids. Ids must be +// a slice of T's primary key type. +func (q *Query[T]) FilterIDs(ids any) *Query[T] { + if !q.checkErr() { + return q + } + kv := reflect.ValueOf(ids) + if kv.Kind() != reflect.Slice { + q.errorf("%w: ids must be slice of %v, not %T", ErrParam, q.pkType, ids) + return q + } + if !q.checkPK(kv.Type().Elem()) { + return q + } + + n := kv.Len() + pks := make([][]byte, 0, n) + var prevIDs map[any]struct{} + if q.xfilterIDs != nil { + prevIDs = q.xfilterIDs.ids // We use this to check intersection. + } + // todo: should we fail for a zero PK? + nids := map[any]struct{}{} + for i := 0; i < n; i++ { + rev := kv.Index(i) + ev := rev.Interface() + if _, ok := prevIDs[ev]; !ok && prevIDs != nil { + continue + } + nids[ev] = struct{}{} + pk, err := packPK(rev) + if err != nil { + q.error(err) + return q + } + pks = append(pks, pk) + } + q.xfilterIDs = &filterIDs[T]{nids, pks} + return q +} + +// FilterFn calls fn for each record selected so far. If fn returns true, the +// record is kept for further filters and finally the operation. +func (q *Query[T]) FilterFn(fn func(value T) bool) *Query[T] { + if !q.checkErr() { + return q + } + if fn == nil { + q.errorf("%w: nil fn", ErrParam) + return q + } + q.addFilter(filterFn[T]{fn}) + return q +} + +// gatherNonzeroFields returns fields and values that are non-zero. Used for +// Update and FilterNonzero. +// +// allowID indicates if the primary key is allowed to be nonzero (not for +// Updates). +// +// At least one field must be nonzero. +func gatherNonzeroFields(tv *typeVersion, rv reflect.Value, allowID bool) ([]field, []reflect.Value, error) { + var fields []field + var values []reflect.Value + + for i, f := range tv.Fields { + fv := rv.FieldByIndex(f.structField.Index) + if f.Type.isZero(fv) { + continue + } + if i == 0 && !allowID { + return nil, nil, fmt.Errorf("%w: primary key must be zero", ErrParam) + } + fields = append(fields, f) + values = append(values, fv) + } + if len(fields) == 0 { + return nil, nil, fmt.Errorf("%w: must have at least one nonzero field", ErrParam) + } + return fields, values, nil +} + +// FilterNonzero gathers the nonzero fields from value, and selects records that +// have equal values for those fields. At least one value must be nonzero. If a +// value comes from an external source, e.g. user input, make sure it is not +// the zero value. +// +// Keep in mind that filtering on an embed/anonymous field looks at individual +// fields in the embedded field for non-zeroness, not at the embed field as a whole. +func (q *Query[T]) FilterNonzero(value T) *Query[T] { + if !q.checkErr() { + return q + } + fields, values, err := gatherNonzeroFields(q.st.Current, reflect.ValueOf(value), true) + if err != nil { + q.error(err) + return q + } + for i, f := range fields { + if f.Name == q.st.Current.Fields[0].Name { + q.FilterID(values[i].Interface()) + } else { + q.addFilter(filterEqual[T]{f, values[i]}) + } + } + return q +} + +// FilterEqual selects records that have one of values for fieldName. +// +// Note: Value must be a compatible type for comparison with fieldName. Go +// constant numbers become ints, which are not compatible with uint or float +// types. +func (q *Query[T]) FilterEqual(fieldName string, values ...any) *Query[T] { + q.filterEqual(fieldName, values, false) + return q +} + +// FilterNotEqual selects records that do not have any of values for fieldName. +func (q *Query[T]) FilterNotEqual(fieldName string, values ...any) *Query[T] { + q.filterEqual(fieldName, values, true) + return q +} + +func (q *Query[T]) filterEqual(fieldName string, values []any, not bool) { + if !q.checkErr() { + return + } + ff, ok := q.lookupField(fieldName) + if !ok { + return + } + if len(values) == 0 { + q.errorf("%w: need at least one value for (not) equal", ErrParam) + return + } + if ff.Type.Ptr { + q.errorf("%w: cannot compare pointer values", ErrParam) + return + } + if len(values) == 1 { + rv, ok := q.prepareValue(ff.Name, ff.Type, ff.structField, reflect.ValueOf(values[0])) + if !ok { + return + } + if not { + q.addFilter(filterNotEqual[T]{ff, rv}) + } else { + q.addFilter(filterEqual[T]{ff, rv}) + } + return + } + rvs := make([]reflect.Value, len(values)) + for i, value := range values { + rv, ok := q.prepareValue(ff.Name, ff.Type, ff.structField, reflect.ValueOf(value)) + if !ok { + return + } + rvs[i] = rv + } + if not { + q.addFilter(filterNotIn[T]{ff, rvs}) + } else { + q.addFilter(filterIn[T]{ff, rvs}) + } +} + +// FilterGreater selects records that have fieldName > value. +// +// Note: Value must be a compatible type for comparison with fieldName. Go +// constant numbers become ints, which are not compatible with uint or float +// types. +func (q *Query[T]) FilterGreater(fieldName string, value any) *Query[T] { + return q.filterCompare(fieldName, opGreater, reflect.ValueOf(value)) +} + +// FilterGreaterEqual selects records that have fieldName >= value. +func (q *Query[T]) FilterGreaterEqual(fieldName string, value any) *Query[T] { + return q.filterCompare(fieldName, opGreaterEqual, reflect.ValueOf(value)) +} + +// FilterLess selects records that have fieldName < value. +func (q *Query[T]) FilterLess(fieldName string, value any) *Query[T] { + return q.filterCompare(fieldName, opLess, reflect.ValueOf(value)) +} + +// FilterLessEqual selects records that have fieldName <= value. +func (q *Query[T]) FilterLessEqual(fieldName string, value any) *Query[T] { + return q.filterCompare(fieldName, opLessEqual, reflect.ValueOf(value)) +} + +func (q *Query[T]) filterCompare(fieldName string, op compareOp, value reflect.Value) *Query[T] { + if !q.checkErr() { + return q + } + ff, ok := q.lookupField(fieldName) + if !ok { + return q + } + if !comparable(ff.Type) { + q.errorf("%w: cannot compare %s", ErrParam, ff.Type.Kind) + return q + } + rv, ok := q.prepareValue(ff.Name, ff.Type, ff.structField, value) + if !ok { + return q + } + q.addFilter(filterCompare[T]{ff, op, rv}) + return q +} + +// Limit stops selecting records after the first n records. +// Can only be called once. n must be > 1. +func (q *Query[T]) Limit(n int) *Query[T] { + if !q.checkErr() { + return q + } + if n <= 0 { + q.errorf("%w: limit must be >= 1", ErrParam) + return q + } + if q.xlimit > 0 { + q.errorf("%w: already have a limit", ErrParam) + return q + } + q.xlimit = n + return q +} + +// SortAsc sorts the selected records by fieldNames in ascending order. +// Additional orderings can be added by more calls to SortAsc or SortDesc. +func (q *Query[T]) SortAsc(fieldNames ...string) *Query[T] { + return q.order(fieldNames, true) +} + +// SortDesc sorts the selected records by fieldNames in descending order. +// Additional orderings can be added by more calls to SortAsc or SortDesc. +func (q *Query[T]) SortDesc(fieldNames ...string) *Query[T] { + return q.order(fieldNames, false) +} + +func (q *Query[T]) order(fieldNames []string, asc bool) *Query[T] { + if !q.checkErr() { + return q + } + if len(fieldNames) == 0 { + q.errorf("%w: sort fieldNames must be non-empty", ErrParam) + return q + } + for _, name := range fieldNames { + ff, ok := q.lookupField(name) + if !ok { + return q + } + if !comparable(ff.Type) { + q.errorf("%w: cannot sort by unorderable %q", ErrParam, name) + return q + } + q.xorders = append(q.xorders, order{ff, asc}) + } + return q +} + +// Gather causes an Update or Delete operation to return the values of the +// affect records into l. For Update, the updated records are returned. +func (q *Query[T]) Gather(l *[]T) *Query[T] { + if !q.checkErr() { + return q + } + if l == nil { + q.errorf("%w: l must be non-nil", ErrParam) + return q + } + if q.gathers != nil { + q.errorf("%w: can only have one Gather", ErrParam) + return q + } + q.gathers = l + return q +} + +// GatherIDs causes an Update or Delete operation to return the primary keys of +// affected records into ids, which must be a pointer to a slice of T's +// primary key. +func (q *Query[T]) GatherIDs(ids any) *Query[T] { + if !q.checkErr() { + return q + } + if ids == nil { + q.errorf("%w: ids must be non-nil", ErrParam) + return q + } + rv := reflect.ValueOf(ids) + t := rv.Type() + if t.Kind() != reflect.Ptr || t.Elem().Kind() != reflect.Slice || t.Elem().Elem() != q.pkType { + q.errorf("%w: ids must be pointer to slice of %v, not %T", ErrParam, q.pkType, ids) + return q + } + if q.gatherIDs.IsValid() { + q.errorf("%w: can only have one GatherIDs", ErrParam) + return q + } + q.gatherIDs = rv + return q +} + +func (q *Query[T]) gather(v T, rv reflect.Value) { + if q.gathers != nil { + *q.gathers = append(*q.gathers, v) + } + if q.gatherIDs.IsValid() { + ridv := rv.FieldByIndex(q.st.Current.Fields[0].structField.Index) + l := q.gatherIDs.Elem() + nl := reflect.Append(l, ridv) + l.Set(nl) + } +} + +// Err returns if an error is set on the query. Can happen for invalid filters. +// Finished queries return ErrFinished. +func (q *Query[T]) Err() error { + q.checkErr() + return q.err +} + +// Delete removes the selected records, returning how many were deleted. +// +// See Gather and GatherIDs for collecting the deleted records or IDs. +func (q *Query[T]) Delete() (deleted int, rerr error) { + defer q.finish(&rerr) + q.checkNotNext() + if !q.checkErr() { + return 0, q.err + } + + n := 0 + err := q.foreachKey(true, true, func(bk []byte, ov T) error { + n++ + rov := reflect.ValueOf(ov) + q.gather(ov, rov) + q.stats.Delete++ + return q.xtx.delete(q.exec.rb, q.st, bk, rov) + }) + return n, err +} + +// Get returns the single selected record. +// +// ErrMultiple is returned if multiple records were selected. +// ErrAbsent is returned if no record was selected. +func (q *Query[T]) Get() (value T, rerr error) { + defer q.finish(&rerr) + q.checkNotNext() + if !q.checkErr() { + var zero T + return zero, q.err + } + + if _, v, err := q.nextKey(false, true); err != nil { + return v, err + } else if _, _, err := q.nextKey(false, false); err == nil { + return v, ErrMultiple + } else { + return v, nil + } +} + +// Count returns the number of selected records. +func (q *Query[T]) Count() (n int, rerr error) { + defer q.finish(&rerr) + q.checkNotNext() + if !q.checkErr() { + return 0, q.err + } + + err := q.foreachKey(false, false, func(kb []byte, unused T) error { + n++ + return nil + }) + return n, err +} + +// List returns all selected records. +// On success with zero selected records, List returns the empty list. +func (q *Query[T]) List() (list []T, rerr error) { + defer q.finish(&rerr) + q.checkNotNext() + if !q.checkErr() { + return nil, q.err + } + + l := []T{} + err := q.foreachKey(false, true, func(unused []byte, v T) error { + l = append(l, v) + return nil + }) + if err != nil { + return nil, err + } + return l, nil +} + +// UpdateNonzero updates all selected records with the non-zero fields from +// value, returning the number of records updated. +// +// Recall that false, 0, "" are all zero values. Use UpdateField or +// UpdateFields to set fields to zero their value. This is especially relevant +// if the field value comes from an external source, e.g. user input. +// +// See Gather and GatherIDs for collecting the updated records or IDs. +// +// Keep in mind that updating on an embed/anonymous field looks at individual +// fields in the embedded field for non-zeroness, not at the embed field as a whole. +func (q *Query[T]) UpdateNonzero(value T) (updated int, rerr error) { + defer q.finish(&rerr) + q.checkNotNext() + if !q.checkErr() { + return 0, q.err + } + + fields, values, err := gatherNonzeroFields(q.st.Current, reflect.ValueOf(value), false) + if err != nil { + return 0, err + } + sfl := make([]reflect.StructField, len(fields)) + for i, f := range fields { + sfl[i] = f.structField + } + return q.update(sfl, values) +} + +// UpdateField calls UpdateFields for fieldName and value. +func (q *Query[T]) UpdateField(fieldName string, value any) (updated int, rerr error) { + return q.UpdateFields(map[string]any{fieldName: value}) +} + +// UpdateFields updates all selected records, setting fields named by the map +// keys of fieldValues to the corresponding map value and returning the number +// of records updated. +// +// See Gather and GatherIDs for collecting the updated records or IDs. +// +// Entire embed fields can be updated, as well as their individual embedded +// fields. +func (q *Query[T]) UpdateFields(fieldValues map[string]any) (updated int, rerr error) { + defer q.finish(&rerr) + q.checkNotNext() + if !q.checkErr() { + return 0, q.err + } + + if len(fieldValues) == 0 { + return 0, fmt.Errorf("%w: must update at least one field", ErrParam) + } + + fields := make([]reflect.StructField, 0, len(fieldValues)) + values := make([]reflect.Value, 0, len(fieldValues)) +next: + for name, value := range fieldValues { + for i, f := range q.st.Current.Fields { + if f.Name != name { + continue + } + if i == 0 { + return 0, fmt.Errorf("%w: cannot update primary key", ErrParam) + } + rv, ok := q.prepareValue(f.Name, f.Type, f.structField, reflect.ValueOf(value)) + if !ok { + return 0, q.err + } + fields = append(fields, f.structField) + values = append(values, rv) + continue next + } + for _, ef := range q.st.Current.embedFields { + if ef.Name != name { + continue + } + rv, ok := q.prepareValue(ef.Name, ef.Type, ef.structField, reflect.ValueOf(value)) + if !ok { + return 0, q.err + } + fields = append(fields, ef.structField) + values = append(values, rv) + continue next + } + return 0, fmt.Errorf("%w: unknown field %q", ErrParam, name) + } + return q.update(fields, values) +} + +func (q *Query[T]) update(fields []reflect.StructField, values []reflect.Value) (int, error) { + n := 0 + ov := reflect.New(q.st.Type).Elem() + err := q.foreachKey(true, true, func(bk []byte, v T) error { + n++ + rv := reflect.ValueOf(&v).Elem() + ov.Set(rv) + for i, sf := range fields { + frv := rv.FieldByIndex(sf.Index) + frv.Set(values[i]) + } + q.gather(v, rv) + q.stats.Update++ + return q.xtx.update(q.exec.rb, q.st, rv, ov, bk) + }) + return n, err +} + +// IDs sets idsptr to the primary keys of selected records. Idptrs must be a +// slice of T's primary key type. +func (q *Query[T]) IDs(idsptr any) (rerr error) { + defer q.finish(&rerr) + q.checkNotNext() + if !q.checkErr() { + return q.err + } + + if idsptr == nil { + return fmt.Errorf("%w: idsptr must not be nil", ErrParam) + } + rv := reflect.ValueOf(idsptr) + if rv.Type().Kind() != reflect.Ptr || rv.Type().Elem().Kind() != reflect.Slice || rv.Type().Elem().Elem() != q.pkType { + return fmt.Errorf("%w: idsptr must be a ptr to slice of %v, not %T", ErrParam, q.pkType, idsptr) + } + + s := reflect.MakeSlice(rv.Type().Elem(), 0, 0) + err := q.foreachID(false, func(pkv any) error { + s = reflect.Append(s, reflect.ValueOf(pkv)) + return nil + }) + if err != nil { + return err + } + rv.Elem().Set(s) + return nil +} + +// Next fetches the next record, moving the cursor forward. +// +// ErrAbsent is returned if no more records match. +// +// Automatically created transactions are read-only. +// +// Close must be called on a Query on which Next or NextID was called and that +// is not yet finished, i.e. has not yet returned an error (including +// ErrAbsent). +func (q *Query[T]) Next() (value T, rerr error) { + // note: no q.finish preamble because caller iterates over result themselves. + if !q.checkErr() { + var zero T + return zero, q.err + } + + q.nextOnly = true + _, v, err := q.nextKey(false, true) + if err == ErrAbsent { + q.error(err) + } + return v, err +} + +// NextID is like Next, but only fetches the primary key of the next matching +// record, storing it in idptr. +func (q *Query[T]) NextID(idptr any) (rerr error) { + // note: no q.finish preamble because caller iterates over result themselves. + if !q.checkErr() { + return q.err + } + + q.nextOnly = true + rpkv := reflect.ValueOf(idptr) + if idptr == nil { + q.errorf("%w: idptr must be non-nil", ErrParam) + return q.err + } + t := rpkv.Type() + if t.Kind() != reflect.Ptr || t.Elem() != q.pkType { + return fmt.Errorf("%w: value must be ptr to %v, not %v", ErrParam, q.pkType, t) + } + err := q.nextID(false, rpkv.Elem()) + if err == ErrAbsent { + q.error(err) + } + return err +} + +// Exists returns whether any record was selected. +func (q *Query[T]) Exists() (exists bool, rerr error) { + defer q.finish(&rerr) + q.checkNotNext() + if !q.checkErr() { + return false, q.err + } + + q.xlimit = 1 + _, _, err := q.nextKey(false, false) + if err == ErrAbsent { + return false, nil + } + return err == nil, err +} + +// ForEach calls fn on each selected record. +func (q *Query[T]) ForEach(fn func(value T) error) (rerr error) { + defer q.finish(&rerr) + q.checkNotNext() + if !q.checkErr() { + return q.err + } + + return q.foreachKey(false, true, func(bk []byte, v T) error { + return fn(v) + }) +} diff --git a/vendor/github.com/mjl-/bstore/register.go b/vendor/github.com/mjl-/bstore/register.go new file mode 100644 index 0000000..55c81ae --- /dev/null +++ b/vendor/github.com/mjl-/bstore/register.go @@ -0,0 +1,1215 @@ +package bstore + +import ( + "bytes" + "encoding/binary" + "encoding/json" + "fmt" + "reflect" + "sort" + "strconv" + "strings" + "time" + + bolt "go.etcd.io/bbolt" +) + +const ( + ondiskVersion1 = 1 +) + +// Register registers the Go types of each value in typeValues for use with the +// database. Each value must be a struct, not a pointer. +// +// Type definition versions (schema versions) are added to the database if they +// don't already exist or have changed. Existing type definitions are checked +// for compatibility. Unique indexes are created if they don't already exist. +// Creating a new unique index fails with ErrUnique on duplicate values. If a +// nonzero constraint is added, all records are verified to be nonzero. If a zero +// value is found, ErrZero is returned. +// +// Register can be called multiple times, with different types. But types that +// reference each other must be registered in the same call to Registers. +func (db *DB) Register(typeValues ...any) error { + // We will drop/create new indices as needed. For changed indices, we drop + // and recreate. E.g. if an index becomes a unique index, or if a field in + // an index changes. These values map type and index name to their index. + oindices := map[string]map[string]*index{} // Existing in previous typeVersion. + nindices := map[string]map[string]*index{} // Existing in new typeVersion. + + otypeversions := map[string]*typeVersion{} // Replaced typeVersions. + ntypeversions := map[string]*typeVersion{} // New typeversions, through new types or updated versions of existing types. + registered := map[string]*storeType{} // Registered in this call. + + return db.Write(func(tx *Tx) error { + for _, t := range typeValues { + rt := reflect.TypeOf(t) + if rt.Kind() != reflect.Struct { + return fmt.Errorf("%w: type value %T is not a struct", ErrParam, t) + } + + tv, err := gatherTypeVersion(rt) + if err != nil { + return fmt.Errorf("%w: generating schema for type %q", err, rt.Name()) + } + + // Ensure buckets exist. + tx.stats.Bucket.Get++ + b := tx.btx.Bucket([]byte(tv.name)) + var rb, tb *bolt.Bucket + if b == nil { + var err error + tx.stats.Bucket.Put++ + b, err = tx.btx.CreateBucket([]byte(tv.name)) + if err != nil { + return fmt.Errorf("creating bucket for type %q: %w", tv.name, err) + } + tx.stats.Bucket.Put++ + rb, err = b.CreateBucket([]byte("records")) + if err != nil { + return fmt.Errorf("creating records bucket for type %q: %w", tv.name, err) + } + tx.stats.Bucket.Put++ + tb, err = b.CreateBucket([]byte("types")) + if err != nil { + return fmt.Errorf("creating types bucket for type %q: %w", tv.name, err) + } + } else { + rb, err = tx.recordsBucket(tv.name, tv.fillPercent) + if err != nil { + return err + } + tb, err = tx.bucket(bucketKey{tv.name, "types"}) + if err != nil { + return err + } + } + + st, ok := db.typeNames[tv.name] + if ok { + return fmt.Errorf("%w: type %q already registered", ErrParam, tv.name) + } + st = storeType{ + Name: tv.name, + Type: rt, + Versions: map[uint32]*typeVersion{}, + } + + // We read all type definitions. + err = tb.ForEach(func(bk, bv []byte) error { + // note: we don't track stats for types operations. + + otv, err := parseSchema(bk, bv) + if err != nil { + return err + } + if _, ok := st.Versions[otv.Version]; ok { + return fmt.Errorf("%w: duplicate schema version %d", ErrStore, otv.Version) + } + st.Versions[otv.Version] = otv + if st.Current == nil || otv.Version > st.Current.Version { + st.Current = otv + } + return nil + }) + if err != nil { + return err + } + + // Decide if we need to add a new typeVersion to the database. I.e. a new type schema. + if st.Current == nil || !st.Current.typeEqual(*tv) { + tv.Version = 1 + if st.Current != nil { + tv.Version = st.Current.Version + 1 + } + k, v, err := packSchema(tv) + if err != nil { + return fmt.Errorf("internal error: packing schema for type %q", tv.name) + } + + // note: we don't track types bucket operations in stats. + if err := tb.Put(k, v); err != nil { + return fmt.Errorf("storing new schema: %w", err) + } + + if st.Current != nil { + // Copy current ReferencedBy, updated later and check for consistency. + tv.ReferencedBy = map[string]struct{}{} + for name := range st.Current.ReferencedBy { + tv.ReferencedBy[name] = struct{}{} + } + + // Indices can change: between index and unique, or fields. + // We recreate them for such changes. + recreateIndices := map[string]struct{}{} + if err := tx.checkTypes(st.Current, tv, recreateIndices); err != nil { + return fmt.Errorf("checking compatibility of types: %w", err) + } + for iname := range recreateIndices { + ibname := fmt.Sprintf("index.%s", iname) + tx.stats.Bucket.Delete++ + if err := b.DeleteBucket([]byte(ibname)); err != nil { + return fmt.Errorf("%w: deleting bucket %q for incompatible index that would be recreated: %v", ErrStore, ibname, err) + } + delete(st.Current.Indices, iname) + } + + oindices[st.Name] = st.Current.Indices + otypeversions[st.Name] = st.Current + + // If the current latest (old) primary key has "noauto", but + // the new version does not, we will ensure the records + // bucket sequence (that we use for autoincrement) is set to + // the highest value stored so far. + if st.Current.Noauto && !tv.Noauto { + tx.stats.Records.Cursor++ + bk, _ := rb.Cursor().Last() + if bk != nil { + rv := reflect.New(tv.Fields[0].structField.Type).Elem() + if err := parsePK(rv, bk); err != nil { + return fmt.Errorf("parsing pk of last record to update autoincrement sequence: %w", err) + } + var seq uint64 + switch tv.Fields[0].Type.Kind { + case kindInt8, kindInt16, kindInt32, kindInt64, kindInt: + seq = uint64(rv.Int()) + case kindUint8, kindUint16, kindUint32, kindUint64, kindUint: + seq = rv.Uint() + default: + return fmt.Errorf("internal error: noauto on non-int primary key: %v", err) + } + if err := rb.SetSequence(seq); err != nil { + return fmt.Errorf("%w: updating autoincrement sequence after schema change: %s", ErrStore, err) + } + } + } + } + nindices[st.Name] = tv.Indices + ntypeversions[st.Name] = tv + } else { + tv.Version = st.Current.Version + // Start out with the previous ReferencedBy. May be updated later. + tv.ReferencedBy = st.Current.ReferencedBy + } + + // Prepare types for parsing into the registered reflect.Type. + st.prepare(tv) + + st.Current = tv + st.Versions[tv.Version] = tv + db.typeNames[st.Name] = st + db.types[st.Type] = st + registered[st.Name] = &st + } + + // Check that referenced types exist, and make links in the referenced types. + for _, st := range registered { + tv := st.Current + for name := range tv.references { + _, ok := registered[name] + if !ok { + return fmt.Errorf("%w: type %q referenced by type %q not registered; you must register them in the same call to Open/Register", ErrType, name, tv.name) + } + } + + // Link fields that are referenced. + for _, f := range tv.Fields { + for _, ref := range f.References { + rtv := db.typeNames[ref].Current + k := f.Type.Kind + refk := rtv.Fields[0].Type.Kind + if k != refk { + return fmt.Errorf("%w: %s.%s references %s.%s but fields have different types %s and %s", ErrType, tv.name, f.Name, rtv.name, rtv.Fields[0].Name, k, refk) + } + // todo: should check if an index on this field exists, regardless of name. safes us an index. + idx, ok := tv.Indices[f.Name+":"+ref] + if !ok { + return fmt.Errorf("internal error: missing index for ref") + } + rtv.referencedBy = append(rtv.referencedBy, idx) + } + } + } + + // Ensure that for all registered storeTypes, their Current.ReferencedBy are up to + // date by adding/removing. We mark those that need updating. We only have to check + // ntypeversions: If a reference by a type changed, a new typeversion is created. + // We cannot just recalculate the ReferencedBy, because the whole point is to + // detect types that are missing in this Register. + updateReferencedBy := map[string]struct{}{} + for _, ntv := range ntypeversions { + otv := otypeversions[ntv.name] // Can be nil, on first register. + + // Look for references that were added. + for name := range ntv.references { + if otv != nil { + if _, ok := otv.references[name]; ok { + // Reference was present in previous typeVersion, nothing to do. + continue + } + } + if _, ok := registered[name].Current.ReferencedBy[ntv.name]; ok { + return fmt.Errorf("%w: type %q introduces reference to %q but is already marked as ReferencedBy in that type", ErrStore, ntv.name, name) + } + // note: we are updating the previous tv's ReferencedBy, not tidy but it is safe. + registered[name].Current.ReferencedBy[ntv.name] = struct{}{} + updateReferencedBy[name] = struct{}{} + } + if otv == nil { + continue + } + // Look for references that were removed. + // We cannot use summary field otv.references, it isn't set, we go to the source, + // otv.Fields[].References. + orefs := map[string]struct{}{} + for _, f := range otv.Fields { + for _, name := range f.References { + orefs[name] = struct{}{} + } + } + for name := range orefs { + if _, ok := ntv.references[name]; ok { + continue + } + if _, ok := registered[name].Current.ReferencedBy[ntv.name]; !ok { + return fmt.Errorf("%w: previously referenced type %q not present in %q", ErrStore, ntv.name, name) + } + // note: we are updating the previous tv's ReferencedBy, not tidy but it is safe. + delete(registered[name].Current.ReferencedBy, ntv.name) + updateReferencedBy[name] = struct{}{} + } + } + + // Update/create new typeversions based on updated ReferencedBy. + for name := range updateReferencedBy { + // If we already created a new typeVersion in this Register, we can just update it + // again. Otherwise we create a new typeVersion, but none of the other checks + // (eg index) because those weren't changed (or we would have a new typeversion already). + // We don't update ntypeversions/otypeversions, the changed ReferencedBy aren't relevant below this point. + ntvp, ok := ntypeversions[name] + if !ok { + st := registered[name] + ntv := *st.Current + ntv.Version++ + st.Versions[ntv.Version] = &ntv + st.Current = &ntv + db.types[st.Type] = *st + db.typeNames[st.Name] = *st + ntvp = &ntv + } + + k, v, err := packSchema(ntvp) + if err != nil { + return fmt.Errorf("internal error: packing schema for type %q", ntvp.name) + } + tb, err := tx.bucket(bucketKey{ntvp.name, "types"}) + if err != nil { + return err + } + // note: we don't track types bucket operations in stats. + if err := tb.Put(k, v); err != nil { + return fmt.Errorf("storing new schema: %w", err) + } + } + + // Now that all ReferencedBy are up to date, verify that all referenced types were + // registered in this call. + // The whole point of this exercise is to catch a Register of a type that is + // referenced, but whose type isn't registered. If we would allow registering just this + // referenced type, users can delete data that is still referenced by the + // not-registered registering type. + for _, st := range registered { + for name := range st.Current.ReferencedBy { + if _, ok := registered[name]; !ok { + return fmt.Errorf("%w: must register %q that references %q in same Open/Register call", ErrType, name, st.Name) + } + } + } + + // Check that any new nonzero constraints are correct. + for _, tv := range ntypeversions { + otv, ok := otypeversions[tv.name] + if !ok { + continue + } + + st := db.typeNames[tv.name] + if err := tx.checkNonzero(st, tv, otv.Fields, tv.Fields); err != nil { + return err + } + } + + // Drop old/modified indices. + for name, tindices := range oindices { + for iname, idx := range tindices { + var drop bool + if _, ok := nindices[name]; !ok { + drop = true + } else if _, ok := nindices[name][iname]; !ok { + drop = true + } else if !idx.typeEqual(nindices[name][iname]) { + drop = true + } + if !drop { + continue + } + b, err := tx.typeBucket(name) + if err != nil { + return err + } + ibname := fmt.Sprintf("index.%s", iname) + tx.stats.Bucket.Delete++ + if err := b.DeleteBucket([]byte(ibname)); err != nil { + return fmt.Errorf("%w: deleting bucket %q for old/modified index: %v", ErrStore, ibname, err) + } + } + } + + // Create new/modified indices. + for name, tindices := range nindices { + // First prepare, checking if we should create this index and preparing the index bucket if so. + var idxs []*index + var ibs []*bolt.Bucket + for iname, idx := range tindices { + var create bool + if _, ok := oindices[name]; !ok { + create = true + } else if _, ok := oindices[name][iname]; !ok { + create = true + } else if !idx.typeEqual(oindices[name][iname]) { + create = true + } + if !create { + continue + } + b, err := tx.typeBucket(name) + if err != nil { + return err + } + ibname := []byte(fmt.Sprintf("index.%s", iname)) + tx.stats.Bucket.Put++ + ib, err := b.CreateBucket(ibname) + if err != nil { + return fmt.Errorf("%w: creating bucket %q for old/modified index: %v", ErrStore, ibname, err) + } + idxs = append(idxs, idx) + ibs = append(ibs, ib) + } + + if len(idxs) == 0 { + continue + } + + st := db.typeNames[name] + rb, err := tx.recordsBucket(name, st.Current.fillPercent) + if err != nil { + return err + } + + // We first generate all keys. Then sort them and insert them. + // Random inserts can be slow in boltdb. We can efficiently verify + // that the values are indeed unique by keeping track of the non-PK + // prefix length and checking the key inserted previously. + type key struct { + buf []byte + pre uint16 + } + ibkeys := make([][]key, len(idxs)) + + err = rb.ForEach(func(bk, bv []byte) error { + tx.stats.Records.Cursor++ + + rv := reflect.New(st.Type).Elem() + if err := st.parse(rv, bv); err != nil { + return fmt.Errorf("parsing record for index for %s: %w", name, err) + } + + for i, idx := range idxs { + prek, ik, err := idx.packKey(rv, bk) + if err != nil { + return fmt.Errorf("creating key for %s.%s: %w", name, idx.Name, err) + } + ibkeys[i] = append(ibkeys[i], key{ik, uint16(len(prek))}) + } + return nil + }) + if err != nil { + return fmt.Errorf("preparing index keys for type %q: %w", name, err) + } + + insertKeys := func(idx *index, ib *bolt.Bucket, keys []key) error { + ib.FillPercent = 1 + defer func() { + ib.FillPercent = 0.5 + }() + for i, k := range keys { + if idx.Unique && i > 0 { + prev := keys[i-1] + if bytes.Equal(prev.buf[:prev.pre], k.buf[:k.pre]) { + // Do quite a bit of work to make a helpful error message. + a := reflect.New(reflect.TypeOf(idx.tv.Fields[0].Type.zero(nil))).Elem() + b := reflect.New(reflect.TypeOf(idx.tv.Fields[0].Type.zero(nil))).Elem() + parsePK(a, prev.buf[prev.pre:]) // Ignore error, nothing to do. + parsePK(b, k.buf[k.pre:]) // Ignore error, nothing to do. + var dup []any + _, values, _ := idx.parseKey(k.buf, true) + for i := range values { + x := reflect.New(reflect.TypeOf(idx.Fields[i].Type.zero(nil))).Elem() + parsePK(x, values[i]) // Ignore error, nothing to do. + dup = append(dup, x.Interface()) + } + return fmt.Errorf("%w: duplicate value %v on index %s.%s for ids %v and %v", ErrUnique, dup, name, idx.Name, a.Interface(), b.Interface()) + } + } + tx.stats.Index.Put++ + if err := ib.Put(k.buf, []byte{}); err != nil { + return fmt.Errorf("inserting index key into %s.%s: %w", name, idxs[i].Name, err) + } + } + return nil + } + + // Now do all sorts + inserts. + for i, ib := range ibs { + idx := idxs[i] + keys := ibkeys[i] + sort.Slice(keys, func(i, j int) bool { + return bytes.Compare(keys[i].buf, keys[j].buf) < 0 + }) + if err := insertKeys(idx, ib, keys); err != nil { + return err + } + ibkeys[i] = nil + } + } + return nil + }) +} + +// parseSchema parses a schema from the type bucket into a typeversion. +func parseSchema(bk, bv []byte) (*typeVersion, error) { + if len(bk) != 4 { + return nil, fmt.Errorf("%w: version: got %d bytes, need 4", ErrStore, len(bk)) + } + version := binary.BigEndian.Uint32(bk) + + // We store these in self-describing json, to prevent complications if we want to adjust our formats in the future. + + var tv typeVersion + if err := json.Unmarshal(bv, &tv); err != nil { + return nil, fmt.Errorf("%w: unmarshal schema: %v", ErrStore, err) + } + if tv.Version != version { + return nil, fmt.Errorf("%w: version in schema %d does not match key %d", ErrStore, tv.Version, version) + } + if tv.OndiskVersion != ondiskVersion1 { + return nil, fmt.Errorf("internal error: OndiskVersion %d not supported", tv.OndiskVersion) + } + + // Fill references, used for comparing/checking schema updates. + tv.references = map[string]struct{}{} + for _, f := range tv.Fields { + for _, ref := range f.References { + tv.references[ref] = struct{}{} + } + } + + return &tv, nil +} + +// packSchema returns a key and value to store in the types bucket. +func packSchema(tv *typeVersion) ([]byte, []byte, error) { + if tv.OndiskVersion != ondiskVersion1 { + return nil, nil, fmt.Errorf("internal error: invalid OndiskVersion %d", tv.OndiskVersion) + } + v, err := json.Marshal(tv) + if err != nil { + return nil, nil, fmt.Errorf("internal error: marshal schema: %v", err) + } + k := binary.BigEndian.AppendUint32(nil, tv.Version) + return k, v, nil +} + +func gatherTypeVersion(t reflect.Type) (*typeVersion, error) { + if t.NumField() == 0 { + return nil, fmt.Errorf("%w: type must have at least one field", ErrType) + } + tname, err := typeName(t) + if err != nil { + return nil, err + } + tv := &typeVersion{ + Version: 0, // Set by caller. + OndiskVersion: ondiskVersion1, // Current on-disk format. + ReferencedBy: map[string]struct{}{}, + name: tname, + fillPercent: 0.5, + } + tv.Fields, tv.embedFields, err = gatherTypeFields(t, true, true, false) + if err != nil { + return nil, err + } + tags, err := newStoreTags(t.Field(0).Tag.Get("bstore"), true) + if err != nil { + return nil, err + } + tv.Noauto = tags.Has("noauto") + if tv.Noauto { + switch tv.Fields[0].Type.Kind { + case kindInt, kindInt8, kindInt16, kindInt32, kindInt64, kindUint, kindUint8, kindUint16, kindUint32, kindUint64: + default: + return nil, fmt.Errorf("%w: cannot have noauto on non-integer primary key field", ErrType) + } + } + + // Find indices. + tv.Indices = map[string]*index{} + + addIndex := func(unique bool, iname string, fields ...*field) error { + idx := tv.Indices[iname] + if idx != nil { + return fmt.Errorf("%w: duplicate unique/index %q", ErrType, iname) + } + idx = &index{unique, iname, nil, tv} + tv.Indices[iname] = idx + for _, f := range fields { + // todo: can we have a unique index on bytes? seems like this should be possible to have max 1 []byte in an index key, only to be used for unique get plans. + if f.Type.Ptr { + return fmt.Errorf("%w: cannot have index/unique on ptr field %s.%s", ErrType, tname, f.Name) + } + switch f.Type.Kind { + case kindBool, kindInt8, kindInt16, kindInt32, kindInt64, kindInt, kindUint8, kindUint16, kindUint32, kindUint64, kindUint, kindString, kindTime: + default: + return fmt.Errorf("%w: cannot use type %v in field %q as index/unique", ErrType, f.Type.Kind, f.Name) + } + + if f.indices == nil { + f.indices = map[string]*index{} + } + f.indices[iname] = idx + idx.Fields = append(idx.Fields, *f) + } + return nil + } + + fields := map[string]*field{} + for i, f := range tv.Fields { + fields[f.Name] = &tv.Fields[i] + } + + addNamedIndex := func(unique bool, tag string, f *field) error { + t := strings.Split(tag, " ") + if len(t) > 2 { + return fmt.Errorf("%w: invalid unique/index, too many tokens in %q", ErrType, tag) + } + iname := t[0] + if len(t) == 2 { + iname = t[1] + } + + names := strings.Split(t[0], "+") + if names[0] != f.Name { + return fmt.Errorf("%w: invalid unique/index %q, first field must be same as struct field %q", ErrType, iname, f.Name) + } + seen := map[string]struct{}{} + var ifields []*field + for _, fname := range names { + if _, ok := seen[fname]; ok { + return fmt.Errorf("%w: duplicate field %q in unique/index %q", ErrType, fname, iname) + } + seen[fname] = struct{}{} + xf := fields[fname] + if xf == nil { + return fmt.Errorf("%w: unknown field %q in unique/index %q", ErrType, fname, iname) + } + ifields = append(ifields, xf) + } + return addIndex(unique, iname, ifields...) + } + + for i := range tv.Fields { + f := &tv.Fields[i] + rft := t.FieldByIndex(f.structField.Index) + tags, err := newStoreTags(rft.Tag.Get("bstore"), i == 0) + if err != nil { + return nil, err + } + if tags.Has("unique") { + if err := addIndex(true, f.Name, f); err != nil { + return nil, err + } + } + if tags.Has("index") { + if err := addIndex(false, f.Name, f); err != nil { + return nil, err + } + } + for _, name := range tags.List("unique") { + if err := addNamedIndex(true, name, f); err != nil { + return nil, err + } + } + for _, name := range tags.List("index") { + if err := addNamedIndex(false, name, f); err != nil { + return nil, err + } + } + } + + // Gather references. Add indices if they don't already exist. + tv.references = map[string]struct{}{} + for i := range tv.Fields { + f := &tv.Fields[i] + refseen := map[string]struct{}{} + tags, err := newStoreTags(f.structField.Tag.Get("bstore"), i == 0) + if err != nil { + return nil, err + } + for _, name := range tags.List("ref") { + if _, ok := refseen[name]; ok { + return nil, fmt.Errorf("%w: duplicate references %q in field %q", ErrType, name, f.Name) + } + refseen[name] = struct{}{} + tv.references[name] = struct{}{} + + iname := f.Name + ":" + name + if idx, ok := tv.Indices[iname]; ok { + if len(idx.Fields) != 1 || idx.Fields[0].Name != f.Name { + return nil, fmt.Errorf("%w: reference requires an index, but another index with name %q for the field already exists", ErrType, iname) + } + } else { + if err := addIndex(false, iname, f); err != nil { + return nil, err + } + } + } + } + + return tv, nil +} + +// gatherTypeFields gathers fields for a struct. If needFirst is true, the first +// field must not be ignored and be a valid primary key field (eg no pointer). +// topLevel must be true only for the top-level struct fields, not for fields of +// deeper levels. Deeper levels cannot have index/unique constraints. +func gatherTypeFields(t reflect.Type, needFirst, topLevel, inMap bool) ([]field, []embed, error) { + var fields []field + var embedFields []embed + + names := map[string]struct{}{} + for i, sf := range reflect.VisibleFields(t) { + tags, err := newStoreTags(sf.Tag.Get("bstore"), i == 0 && needFirst && topLevel) + if err != nil { + return nil, nil, err + } + nonzero := tags.Has("nonzero") + if i == 0 && needFirst { + if !sf.IsExported() { + return nil, nil, fmt.Errorf("%w: first field is primary key and must be exported", ErrType) + } + if sf.Anonymous { + // todo: We don't allow this now because the code often reads tv.Fields[0] to get the + // PK field. We could allow it, but it could confuse users, thinking the entire + // struct would become a PK. + return nil, nil, fmt.Errorf("%w: first field cannot be an embed/anonymous field", ErrType) + } + if nonzero { + return nil, nil, fmt.Errorf("%w: superfluous nonzero tag on primary key", ErrType) + } + if err := checkKeyType(sf.Type); err != nil { + return nil, nil, err + } + } + if nonzero && sf.Anonymous { + return nil, nil, fmt.Errorf("%w: cannot have nonzero on embed/anonymous field %q", ErrType, sf.Name) + } + if tags.Has("-") && sf.Anonymous { + return nil, nil, fmt.Errorf(`%w: cannot have "-" on embed/anonymous field %q`, ErrType, sf.Name) + } + if !sf.IsExported() || tags.Has("-") { + continue + } + if !topLevel && (tags.Has("unique") || tags.Has("index")) { + return nil, nil, fmt.Errorf("%w: %q", errNestedIndex, sf.Name) + } + + name, err := tags.Get("name") + if err != nil { + return nil, nil, err + } else if name == "" { + name = sf.Name + } + if _, ok := names[name]; ok { + return nil, nil, fmt.Errorf("%w: duplicate field %q", ErrType, name) + } + names[name] = struct{}{} + + ft, err := gatherFieldType(sf.Type, inMap) + if err != nil { + return nil, nil, fmt.Errorf("field %q: %w", sf.Name, err) + } + + // Parse a default value. + var def reflect.Value + defstr, err := tags.Get("default") + if err != nil { + return nil, nil, fmt.Errorf("field %q: %w", sf.Name, err) + } else if defstr != "" { + if inMap { + return nil, nil, fmt.Errorf("%w: cannot have default value inside a map value", ErrType) + } + var defv any + convert := true + switch ft.Kind { + case kindBool: + convert = false + switch defstr { + case "true": + defv = true + case "false": + defv = false + default: + err = fmt.Errorf("%w: bad bool value %q for %s.%s", ErrType, defstr, t.Name(), sf.Name) + } + case kindInt, kindInt32: + defv, err = strconv.ParseInt(defstr, 0, 32) + case kindInt8: + defv, err = strconv.ParseInt(defstr, 0, 8) + case kindInt16: + defv, err = strconv.ParseInt(defstr, 0, 16) + case kindInt64: + defv, err = strconv.ParseInt(defstr, 0, 64) + case kindUint, kindUint32: + defv, err = strconv.ParseUint(defstr, 0, 32) + case kindUint8: + defv, err = strconv.ParseUint(defstr, 0, 8) + case kindUint16: + defv, err = strconv.ParseUint(defstr, 0, 16) + case kindUint64: + defv, err = strconv.ParseUint(defstr, 0, 64) + case kindFloat32: + defv, err = strconv.ParseFloat(defstr, 32) + case kindFloat64: + defv, err = strconv.ParseFloat(defstr, 64) + case kindString: + convert = false + defv = defstr + case kindTime: + convert = false + if defstr == "now" { + defv = zerotime // Sentinel value recognized during evaluation. + } else { + defv, err = time.Parse(time.RFC3339, defstr) + } + default: + return nil, nil, fmt.Errorf("%w: default not supported for type %v", ErrType, ft.Kind) + } + if err != nil { + return nil, nil, fmt.Errorf("%w: bad default value %q for %s %s.%s", ErrType, defstr, ft.Kind, t.Name(), sf.Name) + } + deft := sf.Type + if ft.Ptr { + deft = sf.Type.Elem() + } + def = reflect.ValueOf(defv) + if convert { + def = def.Convert(deft) + } + } + + if sf.Anonymous { + e := embed{name, ft, sf} + embedFields = append(embedFields, e) + } else { + f := field{name, ft, nonzero, tags.List("ref"), defstr, def, sf, nil} + fields = append(fields, f) + } + } + return fields, embedFields, nil +} + +// checkKeyType returns an error if the type is not valid for use as primary key. +// similar to storeType.keyValue +func checkKeyType(t reflect.Type) error { + k, err := typeKind(t) + if err != nil { + return err + } + switch k { + case kindBytes, kindString, kindBool, kindInt, kindInt8, kindInt16, kindInt32, kindInt64, kindUint, kindUint8, kindUint16, kindUint32, kindUint64: + return nil + } + return fmt.Errorf("%w: type %v not valid for primary key", ErrType, t) +} + +func gatherFieldType(t reflect.Type, inMap bool) (fieldType, error) { + ft := fieldType{} + if t.Kind() == reflect.Ptr { + t = t.Elem() + ft.Ptr = true + } + k, err := typeKind(t) + if err != nil { + return fieldType{}, err + } + ft.Kind = k + switch ft.Kind { + case kindSlice: + l, err := gatherFieldType(t.Elem(), inMap) + if err != nil { + return ft, fmt.Errorf("list: %w", err) + } + ft.List = &l + case kindMap: + kft, err := gatherFieldType(t.Key(), true) + if err != nil { + return ft, fmt.Errorf("map key: %w", err) + } + if kft.Ptr { + return ft, fmt.Errorf("%w: map key with pointer type not supported", ErrType) + } + vft, err := gatherFieldType(t.Elem(), true) + if err != nil { + return ft, fmt.Errorf("map value: %w", err) + } + ft.MapKey = &kft + ft.MapValue = &vft + case kindStruct: + // note: we have no reason to gather embed field beyond top-level + fields, _, err := gatherTypeFields(t, false, false, inMap) + if err != nil { + return fieldType{}, fmt.Errorf("struct: %w", err) + } + ft.Fields = fields + } + return ft, nil +} + +// Prepare all types for parsing into the current type represented by ntv. +// We have to look at later typeVersions that may have removed a field. If so, +// we will not set it on t but leave it at its default value. +func (st storeType) prepare(ntv *typeVersion) { + var l []*typeVersion + for _, tv := range st.Versions { + l = append(l, tv) + } + sort.Slice(l, func(i, j int) bool { + return l[i].Version < l[j].Version + }) + var later [][]field + for _, tv := range l { + later = append(later, tv.Fields) + } + for i, tv := range l { + tv.prepare(ntv, later[i+1:]) + } +} + +// prepare for use with parse. +func (tv typeVersion) prepare(ntv *typeVersion, later [][]field) { + for i, f := range tv.Fields { + nlater, nmvlater, skip := lookupLater(f.Name, later) + if skip { + continue + } + tv.Fields[i].prepare(ntv.Fields, nlater, nmvlater) + } +} + +// Lookup field "name" in "later", which is list of future fields. +// If the named field disappears in a future field list, skip will be true. +// Otherwise, in each future list of fields, the matching field is looked up and +// returned. For map types, the returned first list is for keys and second list for +// map values. For other types, only the first list is set. +func lookupLater(name string, later [][]field) (nlater, nmvlater [][]field, skip bool) { + // If a later typeVersion did not have this field, we will not parse it into the + // latest reflect type. This is old data that was discarded with a typeVersion + // change. +tv: + for _, newerFields := range later { + for _, nf := range newerFields { + if nf.Name == name { + n, nmv := nf.Type.laterFields() + nlater = append(nlater, n) + nmvlater = append(nmvlater, nmv) + continue tv + } + } + return nil, nil, true + } + return nlater, nmvlater, false +} + +func (f *field) prepare(nfields []field, later, mvlater [][]field) { + for _, nf := range nfields { + if nf.Name == f.Name { + f.structField = nf.structField + f.Type.prepare(&nf.Type, later, mvlater) + } + } +} + +func (ft fieldType) laterFields() (later, mvlater []field) { + if ft.MapKey != nil { + later, _ = ft.MapKey.laterFields() + mvlater, _ = ft.MapValue.laterFields() + return later, mvlater + } else if ft.List != nil { + return ft.List.laterFields() + } + return ft.Fields, nil +} + +func (ft fieldType) prepare(nft *fieldType, later, mvlater [][]field) { + for i, f := range ft.Fields { + nlater, nmvlater, skip := lookupLater(f.Name, later) + if skip { + continue + } + ft.Fields[i].prepare(nft.Fields, nlater, nmvlater) + } + if ft.MapKey != nil { + ft.MapKey.prepare(nft.MapKey, later, nil) + ft.MapValue.prepare(nft.MapValue, mvlater, nil) + } + if ft.List != nil { + ft.List.prepare(nft.List, later, mvlater) + } +} + +// typeEqual compares two typeVersions, typically the current for a +// storeType and a potential new typeVersion for a type that is being +// registered. +// If a field changes (add/remove/modify, including struct tag), a type is no +// longer equal. +// Does not take fields Version or Name into account. +func (tv typeVersion) typeEqual(ntv typeVersion) bool { + if tv.OndiskVersion != ntv.OndiskVersion { + return false + } + if tv.Noauto != ntv.Noauto { + return false + } + if len(tv.Fields) != len(ntv.Fields) { + return false + } + for i, f := range tv.Fields { + if !f.typeEqual(ntv.Fields[i]) { + return false + } + } + + // note: embedFields are not relevant for equality, they are just a convenient way to set multiple fields. + + if len(tv.Indices) != len(ntv.Indices) { + return false + } + for name, idx := range tv.Indices { + if nidx, ok := ntv.Indices[name]; !ok || !idx.typeEqual(nidx) { + return false + } + } + + return true +} + +func (f field) typeEqual(nf field) bool { + if f.Name != nf.Name || !f.Type.typeEqual(nf.Type) || f.Nonzero != nf.Nonzero || f.Default != nf.Default { + return false + } + if len(f.References) != len(nf.References) { + return false + } + for i, s := range f.References { + if s != nf.References[i] { + return false + } + } + return true +} + +func (ft fieldType) typeEqual(nft fieldType) bool { + if ft.Ptr != nft.Ptr || ft.Kind != nft.Kind { + return false + } + if len(ft.Fields) != len(nft.Fields) { + return false + } + for i, f := range ft.Fields { + if !f.typeEqual(nft.Fields[i]) { + return false + } + } + if ft.MapKey != nil && (!ft.MapKey.typeEqual(*nft.MapKey) || !ft.MapValue.typeEqual(*nft.MapValue)) { + return false + } + if ft.List != nil && !ft.List.typeEqual(*nft.List) { + return false + } + return true +} + +func (idx *index) typeEqual(nidx *index) bool { + if idx.Unique != nidx.Unique || idx.Name != nidx.Name { + return false + } + if len(idx.Fields) != len(nidx.Fields) { + return false + } + for i, f := range idx.Fields { + if !f.typeEqual(nidx.Fields[i]) { + return false + } + } + return true +} + +// checkTypes checks if typeVersions otv and ntv are consistent with +// their field types. E.g. an int32 can be changed into an int64, but an int64 cannot +// into an int32. Indices that need to be recreated (for an int width change) are +// recorded in recreateIndices. +func (tx *Tx) checkTypes(otv, ntv *typeVersion, recreateIndices map[string]struct{}) error { + for _, f := range ntv.Fields { + for _, of := range otv.Fields { + if f.Name != of.Name { + continue + } + increase, err := of.Type.compatible(f.Type) + if err != nil { + return fmt.Errorf("%w: field %q: %s", ErrIncompatible, f.Name, err) + } + if increase { + // Indices involving this field need to be recreated. The indices are packed with fixed widths. + for name, idx := range otv.Indices { + for _, ifield := range idx.Fields { + if ifield.Name == f.Name { + recreateIndices[name] = struct{}{} + break + } + } + } + } + break + } + } + return nil +} + +// compatible returns if ft and nft's types are compatible (with recursive checks +// for maps/slices/structs). If not an error is returned. If they are, the first +// return value indicates if this is a field that needs it index recreated +// (currently for ints that are packed with fixed width encoding). +func (ft fieldType) compatible(nft fieldType) (bool, error) { + need := func(incr bool, l ...kind) (bool, error) { + for _, k := range l { + if nft.Kind == k { + return incr, nil + } + } + return false, fmt.Errorf("%w: need %v have %v", ErrIncompatible, l, nft.Kind) + } + + k := ft.Kind + nk := nft.Kind + + // We refuse to change pointers to non-pointers for composite types that have + // fields with Nonzero set: nil values would become zero values. + if ft.Ptr && !nft.Ptr && k == nk && nft.hasNonzeroField(false) { + // todo: we could verify all data is nonzero? + return false, fmt.Errorf("%w: type changing from ptr to non-ptr cannot have nonzero fields", ErrIncompatible) + } + + switch k { + case kindBytes, kindBool, kindBinaryMarshal, kindString, kindFloat32, kindFloat64, kindTime: + return need(false, ft.Kind) + case kindInt8: + if nk == k { + return false, nil + } + return need(true, kindInt16, kindInt32, kindInt, kindInt64) + case kindInt16: + if nk == k { + return false, nil + } + return need(true, kindInt32, kindInt, kindInt64) + case kindInt32, kindInt: + if nk == k { + return false, nil + } + return need(true, kindInt32, kindInt, kindInt64) + case kindInt64: + return need(false, kindInt64) + case kindUint8: + if nk == k { + return false, nil + } + return need(true, kindUint16, kindUint32, kindUint, kindUint64) + case kindUint16: + if nk == k { + return false, nil + } + return need(true, kindUint32, kindUint, kindUint64) + case kindUint32, kindUint: + if nk == k { + return false, nil + } + return need(true, kindUint32, kindUint, kindUint64) + case kindUint64: + return need(false, kindUint64) + case kindMap: + if nk != k { + return false, fmt.Errorf("map to %v: %w", nk, ErrIncompatible) + } + if _, err := ft.MapKey.compatible(*nft.MapKey); err != nil { + return false, fmt.Errorf("map key: %w", err) + } + if _, err := ft.MapValue.compatible(*nft.MapValue); err != nil { + return false, fmt.Errorf("map value: %w", err) + } + return false, nil + case kindSlice: + if nk != k { + return false, fmt.Errorf("slice to %v: %w", nk, ErrIncompatible) + } + if _, err := ft.List.compatible(*nft.List); err != nil { + return false, fmt.Errorf("list: %w", err) + } + return false, nil + case kindStruct: + if nk != k { + return false, fmt.Errorf("struct to %v: %w", nk, ErrIncompatible) + } + for _, nf := range nft.Fields { + for _, f := range ft.Fields { + if nf.Name == f.Name { + _, err := f.Type.compatible(nf.Type) + if err != nil { + return false, fmt.Errorf("field %q: %w", nf.Name, err) + } + break + } + } + } + return false, nil + } + return false, fmt.Errorf("internal error: missing case for kind %v", k) +} + +func (ft fieldType) hasNonzeroField(stopAtPtr bool) bool { + if ft.Ptr && stopAtPtr { + return false + } + switch ft.Kind { + case kindMap: + return ft.List.hasNonzeroField(true) + case kindSlice: + return ft.MapValue.hasNonzeroField(true) + case kindStruct: + for _, f := range ft.Fields { + if f.Nonzero || f.Type.hasNonzeroField(true) { + return true + } + } + } + return false +} diff --git a/vendor/github.com/mjl-/bstore/stats.go b/vendor/github.com/mjl-/bstore/stats.go new file mode 100644 index 0000000..9b556e4 --- /dev/null +++ b/vendor/github.com/mjl-/bstore/stats.go @@ -0,0 +1,105 @@ +package bstore + +// StatsKV represent operations on the underlying BoltDB key/value store. +type StatsKV struct { + Get uint + Put uint // For Stats.Bucket, this counts calls of CreateBucket. + Delete uint + Cursor uint // Any cursor operation: Seek/First/Last/Next/Prev. +} + +// Stats tracks DB/Tx/Query statistics, mostly counters. +type Stats struct { + // Number of read-only or writable transactions. Set for DB only. + Reads uint + Writes uint + + Bucket StatsKV // Use of buckets. + Records StatsKV // Use of records bucket for types. + Index StatsKV // Use of index buckets for types. + + // Operations that modify the database. Each record is counted, e.g. + // for a query that updates/deletes multiple records. + Get uint + Insert uint + Update uint + Delete uint + + Queries uint // Total queries executed. + PlanTableScan uint // Full table scans. + PlanPK uint // Primary key get. + PlanUnique uint // Full key Unique index get. + PlanPKScan uint // Scan over primary keys. + PlanIndexScan uint // Scan over index. + Sort uint // In-memory collect and sort. + LastType string // Last type queried. + LastIndex string // Last index for LastType used for a query, or empty. + LastOrdered bool // Whether last scan (PK or index) use was ordered, e.g. for sorting or because of a comparison filter. + LastAsc bool // If ordered, whether last index scan was ascending. +} + +func (skv *StatsKV) add(n StatsKV) { + skv.Get += n.Get + skv.Put += n.Put + skv.Delete += n.Delete + skv.Cursor += n.Cursor +} + +func (skv *StatsKV) sub(n StatsKV) { + skv.Get -= n.Get + skv.Put -= n.Put + skv.Delete -= n.Delete + skv.Cursor -= n.Cursor +} + +func (st *Stats) add(n Stats) { + st.Reads += n.Reads + st.Writes += n.Writes + + st.Bucket.add(n.Bucket) + st.Records.add(n.Records) + st.Index.add(n.Index) + + st.Get += n.Get + st.Insert += n.Insert + st.Update += n.Update + st.Delete += n.Delete + + st.Queries += n.Queries + st.PlanTableScan += n.PlanTableScan + st.PlanPK += n.PlanPK + st.PlanUnique += n.PlanUnique + st.PlanPKScan += n.PlanPKScan + st.PlanIndexScan += n.PlanIndexScan + st.Sort += n.Sort + + st.LastType = n.LastType + st.LastIndex = n.LastIndex + st.LastOrdered = n.LastOrdered + st.LastAsc = n.LastAsc +} + +// Sub returns st with the counters from o subtracted. +func (st Stats) Sub(o Stats) Stats { + st.Reads -= o.Reads + st.Writes -= o.Writes + + st.Bucket.sub(o.Bucket) + st.Records.sub(o.Records) + st.Index.sub(o.Index) + + st.Get -= o.Get + st.Insert -= o.Insert + st.Update -= o.Update + st.Delete -= o.Delete + + st.Queries -= o.Queries + st.PlanTableScan -= o.PlanTableScan + st.PlanPK -= o.PlanPK + st.PlanUnique -= o.PlanUnique + st.PlanPKScan -= o.PlanPKScan + st.PlanIndexScan -= o.PlanIndexScan + st.Sort -= o.Sort + + return st +} diff --git a/vendor/github.com/mjl-/bstore/store.go b/vendor/github.com/mjl-/bstore/store.go new file mode 100644 index 0000000..c47b616 --- /dev/null +++ b/vendor/github.com/mjl-/bstore/store.go @@ -0,0 +1,566 @@ +package bstore + +import ( + "encoding" + "errors" + "fmt" + "io" + "io/fs" + "os" + "reflect" + "sync" + "time" + + bolt "go.etcd.io/bbolt" +) + +var ( + ErrAbsent = errors.New("absent") // If a function can return an ErrAbsent, it can be compared directly, without errors.Is. + ErrZero = errors.New("must be nonzero") + ErrUnique = errors.New("not unique") + ErrReference = errors.New("referential inconsistency") + ErrMultiple = errors.New("multiple results") + ErrSeq = errors.New("highest autoincrement sequence value reached") + ErrType = errors.New("unknown/bad type") + ErrIncompatible = errors.New("incompatible types") + ErrFinished = errors.New("query finished") + ErrStore = errors.New("internal/storage error") // E.g. when buckets disappear, possibly by external users of the underlying BoltDB database. + ErrParam = errors.New("bad parameters") + + errTxClosed = errors.New("transaction is closed") + errNestedIndex = errors.New("struct tags index/unique only allowed at top-level structs") +) + +var sanityChecks bool // Only enabled during tests. + +// DB is a database storing Go struct values in an underlying bolt database. +// DB is safe for concurrent use, unlike a Tx or a Query. +type DB struct { + bdb *bolt.DB + + // Read transaction take an rlock on types. Register can make changes and + // needs a wlock. + typesMutex sync.RWMutex + types map[reflect.Type]storeType + typeNames map[string]storeType // Go type name to store type, for checking duplicates. + + statsMutex sync.Mutex + stats Stats +} + +// Tx is a transaction on DB. +// +// A Tx is not safe for concurrent use. +type Tx struct { + db *DB // If nil, this transaction is closed. + btx *bolt.Tx + + bucketCache map[bucketKey]*bolt.Bucket + + stats Stats +} + +// bucketKey represents a subbucket for a type. +type bucketKey struct { + typeName string + sub string // Empty for top-level type bucket, otherwise "records", "types" or starting with "index.". +} + +type index struct { + Unique bool + Name string // Normally named after the field. But user can specify alternative name with "index" or "unique" struct tag with parameter. + Fields []field + + tv *typeVersion +} + +type storeType struct { + Name string // Name of type as stored in database. Different from the current Go type name if the uses the "typename" struct tag. + Type reflect.Type // Type we parse into for new values. + Current *typeVersion + + // Earlier schema versions. Older type versions can still be stored. We + // prepare them for parsing into the reflect.Type. Some stored fields in + // old versions may be ignored: when a later schema has removed the field, + // that old stored field is considered deleted and will be ignored when + // parsing. + Versions map[uint32]*typeVersion +} + +// note: when changing, possibly update func equal as well. +type typeVersion struct { + Version uint32 // First uvarint of a stored record references this version. + OndiskVersion uint32 // Version of on-disk format. Currently always 1. + Noauto bool // If true, the primary key is an int but opted out of autoincrement. + Fields []field // Fields that we store. Embed/anonymous fields are kept separately in embedFields, and are not stored. + Indices map[string]*index // By name of index. + ReferencedBy map[string]struct{} // Type names that reference this type. We require they are registered at the same time to maintain referential integrity. + + name string + referencedBy []*index // Indexes (from other types) that reference this type. + references map[string]struct{} // Keys are the type names referenced. This is a summary for the references from Fields. + embedFields []embed // Embed/anonymous fields, their values are stored through Fields, we keep them for setting values. + + fillPercent float64 // For "records" bucket. Set to 1 for append-only/mostly use as set with HintAppend, 0.5 otherwise. +} + +// note: when changing, possibly update func equal as well. +// embed/anonymous fields are represented as type embed. The fields inside the embed type are of this type field. +type field struct { + Name string + Type fieldType + Nonzero bool + References []string // Referenced fields. Only for the top-level struct fields, not for nested structs. + Default string // As specified in struct tag. Processed version is defaultValue. + + // If not the zero reflect.Value, set this value instead of a zero value on insert. + // This is always a non-pointer value. Only set for the current typeVersion + // linked to a Go type. + defaultValue reflect.Value + + // Only set if this typeVersion will parse this field. We check + // structField.Type for non-nil before parsing this field. We don't parse it + // if this field is no longer in the type, or if it has been removed and + // added again in later schema versions. + structField reflect.StructField + + indices map[string]*index +} + +// embed is for embed/anonymous fields. the fields inside are represented as a type field. +type embed struct { + Name string + Type fieldType + structField reflect.StructField +} + +type kind int + +const ( + kindInvalid kind = iota + kindBytes + kindBool + kindInt + kindInt8 + kindInt16 + kindInt32 + kindInt64 + kindUint + kindUint8 + kindUint16 + kindUint32 + kindUint64 + kindFloat32 + kindFloat64 + kindMap + kindSlice + kindString + kindTime + kindBinaryMarshal + kindStruct +) + +var kindStrings = []string{ + "(invalid)", + "bytes", + "bool", + "int", + "int8", + "int16", + "int32", + "int64", + "uint", + "uint8", + "uint16", + "uint32", + "uint64", + "float32", + "float64", + "map", + "slice", + "string", + "time", + "binarymarshal", + "struct", +} + +func (k kind) String() string { + return kindStrings[k] +} + +type fieldType struct { + Ptr bool // If type is a pointer. + Kind kind // Type with possible Ptr deferenced. + Fields []field // For kindStruct. + MapKey, MapValue *fieldType // For kindMap. + List *fieldType // For kindSlice. +} + +func (ft fieldType) String() string { + s := ft.Kind.String() + if ft.Ptr { + return s + "ptr" + } + return s +} + +// Options configure how a database should be opened or initialized. +type Options struct { + Timeout time.Duration // Abort if opening DB takes longer than Timeout. + Perm fs.FileMode // Permissions for new file if created. If zero, 0600 is used. + MustExist bool // Before opening, check that file exists. If not, io/fs.ErrNotExist is returned. +} + +// Open opens a bstore database and registers types by calling Register. +// +// If the file does not exist, a new database file is created, unless opts has +// MustExist set. Files are created with permission 0600, or with Perm from +// Options if nonzero. +// +// Only one DB instance can be open for a file at a time. Use opts.Timeout to +// specify a timeout during open to prevent indefinite blocking. +func Open(path string, opts *Options, typeValues ...any) (*DB, error) { + var bopts *bolt.Options + if opts != nil && opts.Timeout > 0 { + bopts = &bolt.Options{Timeout: opts.Timeout} + } + var mode fs.FileMode = 0600 + if opts != nil && opts.Perm != 0 { + mode = opts.Perm + } + if opts != nil && opts.MustExist { + if _, err := os.Stat(path); err != nil { + return nil, err + } + } + bdb, err := bolt.Open(path, mode, bopts) + if err != nil { + return nil, err + } + + typeNames := map[string]storeType{} + types := map[reflect.Type]storeType{} + db := &DB{bdb: bdb, typeNames: typeNames, types: types} + if err := db.Register(typeValues...); err != nil { + bdb.Close() + return nil, err + } + return db, nil +} + +// Close closes the underlying database. +func (db *DB) Close() error { + return db.bdb.Close() +} + +// Stats returns usage statistics for the lifetime of DB. Stats are tracked +// first in a Query or a Tx. Stats from a Query are propagated to its Tx when +// the Query finishes. Stats from a Tx are propagated to its DB when the +// transaction ends. +func (db *DB) Stats() Stats { + db.statsMutex.Lock() + defer db.statsMutex.Unlock() + return db.stats +} + +// Stats returns usage statistics for this transaction. +// When a transaction is rolled back or committed, its statistics are copied +// into its DB. +func (tx *Tx) Stats() Stats { + return tx.stats +} + +// WriteTo writes the entire database to w, not including changes made during this transaction. +func (tx *Tx) WriteTo(w io.Writer) (n int64, err error) { + return tx.btx.WriteTo(w) +} + +// return a bucket through cache. +func (tx *Tx) bucket(bk bucketKey) (*bolt.Bucket, error) { + if tx.bucketCache == nil { + tx.bucketCache = map[bucketKey]*bolt.Bucket{} + } + b := tx.bucketCache[bk] + if b != nil { + return b, nil + } + top := tx.bucketCache[bucketKey{bk.typeName, ""}] + if top == nil { + tx.stats.Bucket.Get++ + top = tx.btx.Bucket([]byte(bk.typeName)) + if top == nil { + return nil, fmt.Errorf("%w: missing bucket for type %q", ErrStore, bk.typeName) + } + tx.bucketCache[bucketKey{bk.typeName, ""}] = top + } + if bk.sub == "" { + return top, nil + } + + tx.stats.Bucket.Get++ + b = top.Bucket([]byte(bk.sub)) + if b == nil { + return nil, fmt.Errorf("%w: missing bucket %q for type %q", ErrStore, bk.sub, bk.typeName) + } + tx.bucketCache[bk] = b + return b, nil +} + +func (tx *Tx) typeBucket(typeName string) (*bolt.Bucket, error) { + return tx.bucket(bucketKey{typeName, ""}) +} + +func (tx *Tx) recordsBucket(typeName string, fillPercent float64) (*bolt.Bucket, error) { + b, err := tx.bucket(bucketKey{typeName, "records"}) + if err != nil { + return nil, err + } + b.FillPercent = fillPercent + return b, nil +} + +func (tx *Tx) indexBucket(idx *index) (*bolt.Bucket, error) { + return tx.bucket(bucketKey{idx.tv.name, "index." + idx.Name}) +} + +// Drop removes a type and its data from the database. +// If the type is currently registered, it is unregistered and no longer available. +// If a type is still referenced by another type, eg through a "ref" struct tag, +// ErrReference is returned. +// If the type does not exist, ErrAbsent is returned. +func (db *DB) Drop(name string) error { + return db.Write(func(tx *Tx) error { + tx.stats.Bucket.Get++ + if tx.btx.Bucket([]byte(name)) == nil { + return ErrAbsent + } + + if st, ok := db.typeNames[name]; ok && len(st.Current.referencedBy) > 0 { + return fmt.Errorf("%w: type is still referenced", ErrReference) + } else if ok { + for ref := range st.Current.references { + var n []*index + for _, idx := range db.typeNames[ref].Current.referencedBy { + if idx.tv != st.Current { + n = append(n, idx) + } + } + db.typeNames[ref].Current.referencedBy = n + } + delete(db.typeNames, name) + delete(db.types, st.Type) + } + + tx.stats.Bucket.Delete++ + return tx.btx.DeleteBucket([]byte(name)) + }) +} + +// Delete calls Delete on a new writable Tx. +func (db *DB) Delete(values ...any) error { + return db.Write(func(tx *Tx) error { + return tx.Delete(values...) + }) +} + +// Get calls Get on a new read-only Tx. +func (db *DB) Get(values ...any) error { + return db.Read(func(tx *Tx) error { + return tx.Get(values...) + }) +} + +// Insert calls Insert on a new writable Tx. +func (db *DB) Insert(values ...any) error { + return db.Write(func(tx *Tx) error { + return tx.Insert(values...) + }) +} + +// Update calls Update on a new writable Tx. +func (db *DB) Update(values ...any) error { + return db.Write(func(tx *Tx) error { + return tx.Update(values...) + }) +} + +var typeKinds = map[reflect.Kind]kind{ + reflect.Bool: kindBool, + reflect.Int: kindInt, + reflect.Int8: kindInt8, + reflect.Int16: kindInt16, + reflect.Int32: kindInt32, + reflect.Int64: kindInt64, + reflect.Uint: kindUint, + reflect.Uint8: kindUint8, + reflect.Uint16: kindUint16, + reflect.Uint32: kindUint32, + reflect.Uint64: kindUint64, + reflect.Float32: kindFloat32, + reflect.Float64: kindFloat64, + reflect.Map: kindMap, + reflect.Slice: kindSlice, + reflect.String: kindString, +} + +func typeKind(t reflect.Type) (kind, error) { + if t.Kind() == reflect.Slice && t.Elem().Kind() == reflect.Uint8 { + return kindBytes, nil + } + + k, ok := typeKinds[t.Kind()] + if ok { + return k, nil + } + + if t == reflect.TypeOf(zerotime) { + return kindTime, nil + } + + if reflect.PointerTo(t).AssignableTo(reflect.TypeOf((*encoding.BinaryMarshaler)(nil)).Elem()) { + return kindBinaryMarshal, nil + } + + if t.Kind() == reflect.Struct { + return kindStruct, nil + } + return kind(0), fmt.Errorf("%w: unsupported type %v", ErrType, t) +} + +func typeName(t reflect.Type) (string, error) { + tags, err := newStoreTags(t.Field(0).Tag.Get("bstore"), true) + if err != nil { + return "", err + } + if name, err := tags.Get("typename"); err != nil { + return "", err + } else if name != "" { + return name, nil + } + return t.Name(), nil +} + +// Get value for a key. For insert a next sequence may be generated for the +// primary key. +func (tv typeVersion) keyValue(tx *Tx, rv reflect.Value, insert bool, rb *bolt.Bucket) ([]byte, reflect.Value, bool, error) { + f := tv.Fields[0] + krv := rv.FieldByIndex(f.structField.Index) + var seq bool + if krv.IsZero() { + if !insert { + return nil, reflect.Value{}, seq, fmt.Errorf("%w: primary key can not be zero value", ErrParam) + } + if tv.Noauto { + return nil, reflect.Value{}, seq, fmt.Errorf("%w: primary key cannot be zero value without autoincrement", ErrParam) + } + id, err := rb.NextSequence() + if err != nil { + return nil, reflect.Value{}, seq, fmt.Errorf("next primary key: %w", err) + } + switch f.Type.Kind { + case kindInt, kindInt8, kindInt16, kindInt32, kindInt64: + if krv.OverflowInt(int64(id)) { + return nil, reflect.Value{}, seq, fmt.Errorf("%w: next primary key sequence does not fit in type", ErrSeq) + } + krv.SetInt(int64(id)) + case kindUint, kindUint8, kindUint16, kindUint32, kindUint64: + if krv.OverflowUint(id) { + return nil, reflect.Value{}, seq, fmt.Errorf("%w: next primary key sequence does not fit in type", ErrSeq) + } + krv.SetUint(id) + default: + // todo: should check this during register. + return nil, reflect.Value{}, seq, fmt.Errorf("%w: unsupported autoincrement primary key type %v", ErrZero, f.Type.Kind) + } + seq = true + } else if !tv.Noauto && insert { + // We let user insert their own ID for our own autoincrement + // PK. But we update the internal next sequence if the users's + // PK is highest yet, so a future autoincrement insert will succeed. + switch f.Type.Kind { + case kindInt, kindInt8, kindInt16, kindInt32, kindInt64: + v := krv.Int() + if v > 0 && uint64(v) > rb.Sequence() { + if err := rb.SetSequence(uint64(v)); err != nil { + return nil, reflect.Value{}, seq, fmt.Errorf("%w: updating sequence: %s", ErrStore, err) + } + } + case kindUint, kindUint8, kindUint16, kindUint32, kindUint64: + v := krv.Uint() + if v > rb.Sequence() { + if err := rb.SetSequence(v); err != nil { + return nil, reflect.Value{}, seq, fmt.Errorf("%w: updating sequence: %s", ErrStore, err) + } + } + } + } + + k, err := packPK(krv) + if err != nil { + return nil, reflect.Value{}, seq, err + } + if seq { + tx.stats.Records.Get++ + if rb.Get(k) != nil { + return nil, reflect.Value{}, seq, fmt.Errorf("%w: internal error: next sequence value is already present", ErrUnique) + } + } + return k, krv, seq, err +} + +// Read calls function fn with a new read-only transaction, ensuring transaction rollback. +func (db *DB) Read(fn func(*Tx) error) error { + db.typesMutex.RLock() + defer db.typesMutex.RUnlock() + return db.bdb.View(func(btx *bolt.Tx) error { + tx := &Tx{db: db, btx: btx} + tx.stats.Reads++ + defer tx.addStats() + return fn(tx) + }) +} + +// Write calls function fn with a new read-write transaction. If fn returns +// nil, the transaction is committed. Otherwise the transaction is rolled back. +func (db *DB) Write(fn func(*Tx) error) error { + db.typesMutex.RLock() + defer db.typesMutex.RUnlock() + return db.bdb.Update(func(btx *bolt.Tx) error { + tx := &Tx{db: db, btx: btx} + tx.stats.Writes++ + defer tx.addStats() + return fn(tx) + }) +} + +// lookup storeType based on name of rt. +func (db *DB) storeType(rt reflect.Type) (storeType, error) { + st, ok := db.types[rt] + if !ok { + return storeType{}, fmt.Errorf("%w: %v", ErrType, rt) + } + return st, nil +} + +// HintAppend sets a hint whether changes to the types indicated by each struct +// from values is (mostly) append-only. +// +// This currently sets the BoltDB bucket FillPercentage to 1 for efficient use +// of storage space. +func (db *DB) HintAppend(append bool, values ...any) error { + db.typesMutex.Lock() + defer db.typesMutex.Unlock() + for _, v := range values { + t := reflect.TypeOf(v) + st, err := db.storeType(t) + if err != nil { + return err + } + if append { + st.Current.fillPercent = 1.0 + } else { + st.Current.fillPercent = 0.5 + } + } + return nil +} diff --git a/vendor/github.com/mjl-/bstore/tags.go b/vendor/github.com/mjl-/bstore/tags.go new file mode 100644 index 0000000..454e9af --- /dev/null +++ b/vendor/github.com/mjl-/bstore/tags.go @@ -0,0 +1,69 @@ +package bstore + +import ( + "fmt" + "strings" +) + +type storeTags []string + +func newStoreTags(tag string, isPK bool) (storeTags, error) { + if tag == "" { + return nil, nil + } + + l := strings.Split(tag, ",") + for _, s := range l { + w := strings.SplitN(s, " ", 2) + switch w[0] { + case "noauto", "typename": + if !isPK { + return nil, fmt.Errorf("%w: cannot have tag %q for non-primary key", ErrType, w[0]) + } + case "index", "unique", "default", "-": + if isPK { + return nil, fmt.Errorf("%w: cannot have tag %q on primary key", ErrType, w[0]) + } + case "name", "nonzero", "ref": + default: + return nil, fmt.Errorf("%w: unknown store tag %q", ErrType, w[0]) + } + } + return storeTags(l), nil +} + +func (t storeTags) Has(word string) bool { + for _, s := range t { + if s == word { + return true + } + } + return false +} + +func (t storeTags) Get(word string) (string, error) { + wordsp := word + " " + for _, s := range t { + if strings.HasPrefix(s, wordsp) { + r := s[len(wordsp):] + if r == "" { + return "", fmt.Errorf("%w: bstore word %q requires non-empty parameter", ErrType, word) + } + return r, nil + } else if s == word { + return "", fmt.Errorf("%w: bstore word %q requires argument", ErrType, word) + } + } + return "", nil +} + +func (t storeTags) List(word string) []string { + var l []string + wordsp := word + " " + for _, s := range t { + if strings.HasPrefix(s, wordsp) { + l = append(l, s[len(wordsp):]) + } + } + return l +} diff --git a/vendor/github.com/mjl-/bstore/tx.go b/vendor/github.com/mjl-/bstore/tx.go new file mode 100644 index 0000000..e9dde86 --- /dev/null +++ b/vendor/github.com/mjl-/bstore/tx.go @@ -0,0 +1,438 @@ +package bstore + +import ( + "bytes" + "fmt" + "reflect" + + bolt "go.etcd.io/bbolt" +) + +func (tx *Tx) structptr(value any) (reflect.Value, error) { + rv := reflect.ValueOf(value) + if !rv.IsValid() || rv.Kind() != reflect.Ptr || !rv.Elem().IsValid() || rv.Type().Elem().Kind() != reflect.Struct { + return reflect.Value{}, fmt.Errorf("%w: value must be non-nil pointer to a struct, is %T", ErrParam, value) + } + rv = rv.Elem() + return rv, nil +} + +func (tx *Tx) structOrStructptr(value any) (reflect.Value, error) { + rv := reflect.ValueOf(value) + if !rv.IsValid() { + return reflect.Value{}, fmt.Errorf("%w: value must be non-nil if pointer", ErrParam) + } + if rv.Kind() == reflect.Ptr { + rv = rv.Elem() + if !rv.IsValid() { + return rv, fmt.Errorf("%w: value must be non-nil if pointer", ErrParam) + } + } + if rv.Kind() != reflect.Struct { + return reflect.Value{}, fmt.Errorf("%w: value must be a struct or pointer to a struct, is %T", ErrParam, value) + } + return rv, nil +} + +// update indices by comparing indexed fields of the ov (old) and v (new). Only if +// the fields changed will the index be updated. Either ov or v may be the +// reflect.Value zero value, indicating there is no old/new value and the index +// should be updated. +func (tx *Tx) updateIndices(tv *typeVersion, pk []byte, ov, v reflect.Value) error { + + changed := func(idx *index) bool { + for _, f := range idx.Fields { + rofv := ov.FieldByIndex(f.structField.Index) + nofv := v.FieldByIndex(f.structField.Index) + // note: checking the interface values is enough, we only allow comparable types as index fields. + if rofv.Interface() != nofv.Interface() { + return true + } + } + return false + } + + for _, idx := range tv.Indices { + var add, remove bool + if !ov.IsValid() { + add = true + } else if !v.IsValid() { + remove = true + } else if !changed(idx) { + continue + } else { + add, remove = true, true + } + + ib, err := tx.indexBucket(idx) + if err != nil { + return err + } + if remove { + _, ik, err := idx.packKey(ov, pk) + if err != nil { + return err + } + tx.stats.Index.Delete++ + if sanityChecks { + tx.stats.Index.Get++ + if ib.Get(ik) == nil { + return fmt.Errorf("internal error: key missing from index") + } + } + if err := ib.Delete(ik); err != nil { + return fmt.Errorf("%w: removing from index: %s", ErrStore, err) + } + } + if add { + prek, ik, err := idx.packKey(v, pk) + if err != nil { + return err + } + if idx.Unique { + tx.stats.Index.Cursor++ + if xk, _ := ib.Cursor().Seek(prek); xk != nil && bytes.HasPrefix(xk, prek) { + return fmt.Errorf("%w: %q", ErrUnique, idx.Name) + } + } + + tx.stats.Index.Put++ + if err := ib.Put(ik, []byte{}); err != nil { + return fmt.Errorf("inserting into index: %w", err) + } + } + } + return nil +} + +func (tx *Tx) checkReferences(tv *typeVersion, pk []byte, ov, rv reflect.Value) error { + for _, f := range tv.Fields { + if len(f.References) == 0 { + continue + } + frv := rv.FieldByIndex(f.structField.Index) + if frv.IsZero() || (ov.IsValid() && ov.FieldByIndex(f.structField.Index).Interface() == frv.Interface()) { + continue + } + k, err := packPK(frv) + if err != nil { + return err + } + for _, name := range f.References { + rb, err := tx.recordsBucket(name, tv.fillPercent) + if err != nil { + return err + } + if rb.Get(k) == nil { + return fmt.Errorf("%w: value %v from field %q to %q", ErrReference, frv.Interface(), f.Name, name) + } + } + } + return nil +} + +func (tx *Tx) addStats() { + tx.db.statsMutex.Lock() + tx.db.stats.add(tx.stats) + tx.db.statsMutex.Unlock() + tx.stats = Stats{} +} + +// Get fetches records by their primary key from the database. Each value must +// be a pointer to a struct. +// +// ErrAbsent is returned if the record does not exist. +func (tx *Tx) Get(values ...any) error { + if tx.db == nil { + return errTxClosed + } + + for _, value := range values { + tx.stats.Get++ + rv, err := tx.structptr(value) + if err != nil { + return err + } + st, err := tx.db.storeType(rv.Type()) + if err != nil { + return err + } + rb, err := tx.recordsBucket(st.Current.name, st.Current.fillPercent) + if err != nil { + return err + } + k, _, _, err := st.Current.keyValue(tx, rv, false, rb) + if err != nil { + return err + } + tx.stats.Records.Get++ + bv := rb.Get(k) + if bv == nil { + return ErrAbsent + } + if err := st.parse(rv, bv); err != nil { + return err + } + } + return nil +} + +// Delete removes values by their primary key from the database. Each value +// must be a struct or pointer to a struct. Indices are automatically updated +// and referential integrity is maintained. +// +// ErrAbsent is returned if the record does not exist. +// ErrReference is returned if another record still references this record. +func (tx *Tx) Delete(values ...any) error { + if tx.db == nil { + return errTxClosed + } + + for _, value := range values { + tx.stats.Delete++ + rv, err := tx.structOrStructptr(value) + if err != nil { + return err + } + st, err := tx.db.storeType(rv.Type()) + if err != nil { + return err + } + rb, err := tx.recordsBucket(st.Current.name, st.Current.fillPercent) + if err != nil { + return err + } + k, _, _, err := st.Current.keyValue(tx, rv, false, rb) + if err != nil { + return err + } + tx.stats.Records.Get++ + bv := rb.Get(k) + if bv == nil { + return ErrAbsent + } + rov, err := st.parseNew(k, bv) + if err != nil { + return fmt.Errorf("parsing current value: %w", err) + } + if err := tx.delete(rb, st, k, rov); err != nil { + return err + } + } + return nil +} + +func (tx *Tx) delete(rb *bolt.Bucket, st storeType, k []byte, rov reflect.Value) error { + // Check that anyone referencing this type does not reference this record. + for _, refBy := range st.Current.referencedBy { + if ib, err := tx.indexBucket(refBy); err != nil { + return err + } else { + tx.stats.Index.Cursor++ + if xk, _ := ib.Cursor().Seek(k); xk != nil && bytes.HasPrefix(xk, k) { + return fmt.Errorf("%w: index %q", ErrReference, refBy.Name) + } + } + } + + // Delete value from indices. + if err := tx.updateIndices(st.Current, k, rov, reflect.Value{}); err != nil { + return fmt.Errorf("removing from indices: %w", err) + } + + tx.stats.Records.Delete++ + return rb.Delete(k) +} + +// Update updates records represented by values by their primary keys into the +// database. Each value must be a pointer to a struct. Indices are +// automatically updated. +// +// ErrAbsent is returned if the record does not exist. +func (tx *Tx) Update(values ...any) error { + if tx.db == nil { + return errTxClosed + } + + for _, value := range values { + tx.stats.Update++ + rv, err := tx.structptr(value) + if err != nil { + return err + } + + st, err := tx.db.storeType(rv.Type()) + if err != nil { + return err + } + + if err := tx.put(st, rv, false); err != nil { + return err + } + } + return nil +} + +// Insert inserts values as new records into the database. Each value must be a +// pointer to a struct. If the primary key field is zero and autoincrement is not +// disabled, the next sequence is assigned. Indices are automatically updated. +// +// ErrUnique is returned if the record already exists. +// ErrSeq is returned if no next autoincrement integer is available. +// ErrZero is returned if a nonzero constraint would be violated. +// ErrReference is returned if another record is referenced that does not exist. +func (tx *Tx) Insert(values ...any) error { + if tx.db == nil { + return errTxClosed + } + + for _, value := range values { + tx.stats.Insert++ + rv, err := tx.structptr(value) + if err != nil { + return err + } + + st, err := tx.db.storeType(rv.Type()) + if err != nil { + return err + } + + if err := st.Current.applyDefault(rv); err != nil { + return err + } + + if err := tx.put(st, rv, true); err != nil { + return err + } + } + return nil +} + +func (tx *Tx) put(st storeType, rv reflect.Value, insert bool) error { + rb, err := tx.recordsBucket(st.Current.name, st.Current.fillPercent) + if err != nil { + return err + } + k, krv, seq, err := st.Current.keyValue(tx, rv, insert, rb) + if err != nil { + return err + } + if insert { + tx.stats.Records.Get++ + bv := rb.Get(k) + if bv != nil { + return fmt.Errorf("%w: record already exists", ErrUnique) + } + err := tx.insert(rb, st, rv, krv, k) + if err != nil && seq { + // Zero out the generated sequence. + krv.Set(reflect.Zero(krv.Type())) + } + return err + } else { + tx.stats.Records.Get++ + bv := rb.Get(k) + if bv == nil { + return ErrAbsent + } + ov, err := st.parseNew(k, bv) + if err != nil { + return fmt.Errorf("parsing current value: %w", err) + } + return tx.update(rb, st, rv, ov, k) + } +} + +func (tx *Tx) insert(rb *bolt.Bucket, st storeType, rv, krv reflect.Value, k []byte) error { + v, err := st.pack(rv) + if err != nil { + return err + } + if err := tx.checkReferences(st.Current, k, reflect.Value{}, rv); err != nil { + return err + } + if err := tx.updateIndices(st.Current, k, reflect.Value{}, rv); err != nil { + return fmt.Errorf("updating indices for inserted value: %w", err) + } + tx.stats.Records.Put++ + if err := rb.Put(k, v); err != nil { + return err + } + rv.Field(0).Set(krv) + return nil +} + +func (tx *Tx) update(rb *bolt.Bucket, st storeType, rv, rov reflect.Value, k []byte) error { + if st.Current.equal(rov, rv) { + return nil + } + + v, err := st.pack(rv) + if err != nil { + return err + } + if err := tx.checkReferences(st.Current, k, rov, rv); err != nil { + return err + } + if err := tx.updateIndices(st.Current, k, rov, rv); err != nil { + return fmt.Errorf("updating indices for updated record: %w", err) + } + tx.stats.Records.Put++ + return rb.Put(k, v) +} + +// Begin starts a transaction. +// +// If writable is true, the transaction allows modifications. Only one writable +// transaction can be active at a time on a DB. No read-only transactions can be +// active at the same time. Attempting to begin a read-only transaction from a +// writable transaction leads to deadlock. +// +// A writable Tx can be committed or rolled back. A read-only transaction must +// always be rolled back. +func (db *DB) Begin(writable bool) (*Tx, error) { + btx, err := db.bdb.Begin(writable) + if err != nil { + return nil, err + } + db.typesMutex.RLock() + tx := &Tx{db: db, btx: btx} + if writable { + tx.stats.Writes++ + } else { + tx.stats.Reads++ + } + return tx, nil +} + +// Rollback aborts and cancels any changes made in this transaction. +// Statistics are added to its DB. +func (tx *Tx) Rollback() error { + if tx.db == nil { + return errTxClosed + } + + tx.addStats() + tx.db.typesMutex.RUnlock() + err := tx.btx.Rollback() + tx.db = nil + return err +} + +// Commit commits changes made in the transaction to the database. +// Statistics are added to its DB. +func (tx *Tx) Commit() error { + if tx.db == nil { + return errTxClosed + } + + tx.addStats() + tx.db.typesMutex.RUnlock() + err := tx.btx.Commit() + if err != nil { + tx.btx.Rollback() // Nothing to do for error. + } + tx.db = nil + return err +} diff --git a/vendor/github.com/mjl-/sconf/.gitignore b/vendor/github.com/mjl-/sconf/.gitignore new file mode 100644 index 0000000..8b1959e --- /dev/null +++ b/vendor/github.com/mjl-/sconf/.gitignore @@ -0,0 +1,2 @@ +/cmd/sconfexample/sconfexample +/cover.* diff --git a/vendor/github.com/mjl-/sconf/LICENSE b/vendor/github.com/mjl-/sconf/LICENSE new file mode 100644 index 0000000..b90262c --- /dev/null +++ b/vendor/github.com/mjl-/sconf/LICENSE @@ -0,0 +1,7 @@ +Copyright (c) 2019 Mechiel Lukkien + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/mjl-/sconf/Makefile b/vendor/github.com/mjl-/sconf/Makefile new file mode 100644 index 0000000..e83b351 --- /dev/null +++ b/vendor/github.com/mjl-/sconf/Makefile @@ -0,0 +1,12 @@ +build: + go build ./... + go vet ./... + GOARCH=386 go vet ./... + staticcheck ./... + +fmt: + gofmt -w -s *.go cmd/*/*.go + +test: + go test -shuffle=on -coverprofile cover.out + go tool cover -html=cover.out -o cover.html diff --git a/vendor/github.com/mjl-/sconf/README.txt b/vendor/github.com/mjl-/sconf/README.txt new file mode 100644 index 0000000..88b8750 --- /dev/null +++ b/vendor/github.com/mjl-/sconf/README.txt @@ -0,0 +1,6 @@ +sconf - simple config files + +See https://godoc.org/github.com/mjl-/sconf for documentation. + +# todo +- deal better with unexpected types. need to use canset? diff --git a/vendor/github.com/mjl-/sconf/describe.go b/vendor/github.com/mjl-/sconf/describe.go new file mode 100644 index 0000000..99cb2e3 --- /dev/null +++ b/vendor/github.com/mjl-/sconf/describe.go @@ -0,0 +1,264 @@ +package sconf + +import ( + "bufio" + "errors" + "fmt" + "reflect" + "sort" + "strings" + + "github.com/mjl-/xfmt" +) + +var errNoElem = errors.New("no elements") + +type writeError struct{ error } + +type writer struct { + out *bufio.Writer + prefix string + keepZero bool // If set, we also write zero values. + docs bool // If set, we write comments. +} + +func (w *writer) error(err error) { + panic(writeError{err}) +} + +func (w *writer) check(err error) { + if err != nil { + w.error(err) + } +} + +func (w *writer) write(s string) { + _, err := w.out.WriteString(s) + w.check(err) +} + +func (w *writer) flush() { + err := w.out.Flush() + w.check(err) +} + +func (w *writer) indent() { + w.prefix += "\t" +} + +func (w *writer) unindent() { + w.prefix = w.prefix[:len(w.prefix)-1] +} + +func isOptional(sconfTag string) bool { + return hasTagWord(sconfTag, "optional") +} + +func isIgnore(sconfTag string) bool { + return hasTagWord(sconfTag, "-") || hasTagWord(sconfTag, "ignore") +} + +func hasTagWord(sconfTag, word string) bool { + l := strings.Split(sconfTag, ",") + for _, s := range l { + if s == word { + return true + } + } + return false +} + +func (w *writer) describeMap(v reflect.Value) { + t := v.Type() + if t.Key().Kind() != reflect.String { + w.error(fmt.Errorf("map key must be string")) + } + keys := v.MapKeys() + sort.Slice(keys, func(i, j int) bool { + return keys[i].String() < keys[j].String() + }) + have := false + for _, k := range keys { + have = true + w.write(w.prefix) + w.write(k.String() + ":") + mv := v.MapIndex(k) + if !w.keepZero && mv.Kind() == reflect.Struct && isEmptyStruct(mv) { + w.write(" nil\n") + continue + } + w.describeValue(mv) + } + if have { + return + } + w.write(w.prefix) + w.write("x:") + w.describeValue(reflect.Zero(t.Elem())) +} + +// whether v is a zero value of a struct type with all fields optional or +// ignored, causing it to write nothing when using Write. +func isEmptyStruct(v reflect.Value) bool { + if v.Kind() != reflect.Struct { + panic("not a struct") + } + t := v.Type() + n := t.NumField() + for i := 0; i < n; i++ { + ft := t.Field(i) + tag := ft.Tag.Get("sconf") + if isIgnore(tag) { + continue + } + if !isOptional(tag) { + return false + } + if !isZeroIgnored(v.Field(i)) { + return false + } + } + return true +} + +// whether v is zero, taking ignored values into account. +func isZeroIgnored(v reflect.Value) bool { + switch v.Kind() { + case reflect.Slice, reflect.Map: + return v.Len() == 0 + case reflect.Ptr: + return v.IsZero() || isZeroIgnored(v.Elem()) + case reflect.Struct: + t := v.Type() + n := t.NumField() + for i := 0; i < n; i++ { + ft := t.Field(i) + tag := ft.Tag.Get("sconf") + if isIgnore(tag) { + continue + } + if !isZeroIgnored(v.Field(i)) { + return false + } + } + return true + default: + return v.IsZero() + } +} + +func (w *writer) describeStruct(v reflect.Value) { + t := v.Type() + n := t.NumField() + for i := 0; i < n; i++ { + f := t.Field(i) + fv := v.Field(i) + if isIgnore(f.Tag.Get("sconf")) { + continue + } + if !w.keepZero && isOptional(f.Tag.Get("sconf")) && isZeroIgnored(fv) { + continue + } + if w.docs { + doc := f.Tag.Get("sconf-doc") + optional := isOptional(f.Tag.Get("sconf")) + if doc != "" || optional { + s := "\n" + w.prefix + "# " + doc + if optional { + opt := "(optional)" + if doc != "" { + opt = " " + opt + } + s += opt + } + s += "\n" + b := &strings.Builder{} + err := xfmt.Format(b, strings.NewReader(s), xfmt.Config{MaxWidth: 80}) + w.check(err) + w.write(b.String()) + } + } + w.write(w.prefix) + w.write(f.Name + ":") + w.describeValue(fv) + } +} + +func (w *writer) describeValue(v reflect.Value) { + t := v.Type() + i := v.Interface() + + if t == durationType { + w.write(fmt.Sprintf(" %s\n", i)) + return + } + + switch t.Kind() { + default: + w.error(fmt.Errorf("unsupported value %v", t.Kind())) + return + + case reflect.Bool: + w.write(fmt.Sprintf(" %v\n", i)) + + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + w.write(fmt.Sprintf(" %d\n", i)) + + case reflect.Float32, reflect.Float64: + w.write(fmt.Sprintf(" %f\n", i)) + + case reflect.String: + if strings.Contains(v.String(), "\n") { + w.error(fmt.Errorf("unsupported multiline string")) + } + w.write(fmt.Sprintf(" %s\n", i)) + + case reflect.Slice: + w.write("\n") + w.indent() + w.describeSlice(v) + w.unindent() + + case reflect.Ptr: + var pv reflect.Value + if v.IsNil() { + pv = reflect.New(t.Elem()).Elem() + } else { + pv = v.Elem() + } + w.describeValue(pv) + + case reflect.Struct: + w.write("\n") + w.indent() + w.describeStruct(v) + w.unindent() + + case reflect.Map: + w.write("\n") + w.indent() + w.describeMap(v) + w.unindent() + } +} + +func (w *writer) describeSlice(v reflect.Value) { + describeElem := func(vv reflect.Value) { + w.write(w.prefix) + w.write("-") + w.describeValue(vv) + } + + n := v.Len() + if n == 0 { + if w.keepZero { + describeElem(reflect.New(v.Type().Elem())) + } else { + w.error(errNoElem) + } + } + + for i := 0; i < n; i++ { + describeElem(v.Index(i)) + } +} diff --git a/vendor/github.com/mjl-/sconf/doc.go b/vendor/github.com/mjl-/sconf/doc.go new file mode 100644 index 0000000..a0063b1 --- /dev/null +++ b/vendor/github.com/mjl-/sconf/doc.go @@ -0,0 +1,106 @@ +/* +Package sconf parses simple configuration files and generates commented example config files. + +Sconf is the name of this package and of the config file format. The file format +is inspired by JSON and yaml, but easier to write and use correctly. + +Sconf goals: + + - Make the application self-documenting about its configuration requirements. + - Require full configuration of an application via a config file, finding + mistakes by the operator. + - Make it easy to write a correct config file, no surprises. + +Workflow for using this package: + + - Write a Go struct with the config for your application. + - Simply parse a config into that struct with Parse() or ParseFile(). + - Write out an example config file with all fields that need to be set with + Describe(), and associated comments that you configured in struct tags. + +Features of sconf as file format: + + - Types similar to JSON, mapping naturally to types in programming languages. + - Requires far fewer type-describing tokens. no "" for map keys, strings don't + require "", no [] for arrays or {} for maps (like in JSON). Sconf uses the Go + types to guide parsing the config. + - Can have comments (JSON cannot). + - Is simple, does not allow all kinds of syntaxes you would not ever want to use. + - Uses indenting for nested structures (with the indent character). + +An example config file: + + # comment for stringKey (optional) + StringKey: value1 + IntKey: 123 + BoolKey: true + Struct: + # this is the A-field + A: 321 + B: true + # (optional) + C: this is text + StringArray: + - blah + - blah + # nested structs work just as well + Nested: + - + A: 1 + B: false + C: hoi + - + A: -1 + B: true + C: hallo + +The top-level is always a map, typically parsed into a Go struct. Maps start +with a key, followed by a colon, followed by a value. Basic values like +strings, ints, bools run to the end of the line. The leading space after a +colon or dash is removed. Other values like maps and lists start on a new line, +with an additional level of indenting. List values start with a dash. Empty +lines are allowed. Multiline strings are not possible. Strings do not have +escaped characters. + +And the struct that generated this: + + var config struct { + StringKey string `sconf-doc:"comment for stringKey" sconf:"optional"` + IntKey int64 + BoolKey bool + Struct struct { + A int `sconf-doc:"this is the A-field"` + B bool + C string `sconf:"optional"` + } + StringArray []string + Nested []struct { + A int + B bool + C string + } `sconf-doc:"nested structs work just as well"` + } + +See cmd/sconfexample/main.go for more details. + +In practice, you will mostly have nested maps: + + Database: + Host: localhost + DBName: myapp + User: myuser + Mail: + SMTP: + TLS: true + Host: mail.example.org + +Sconf only parses config files. It does not deal with command-line flags or +environment variables. Flags and environment variables are too limiting in data +types. Especially environment variables are error prone: Applications typically +have default values they fall back to, so will not notice typo's or unrecognized +variables. Config files also have the nice property of being easy to diff, copy +around, store in a VCS. In practice, command-line flags and environment +variables are commonly stored in config files. Sconf goes straight to the config +files. +*/ +package sconf diff --git a/vendor/github.com/mjl-/sconf/parse.go b/vendor/github.com/mjl-/sconf/parse.go new file mode 100644 index 0000000..4efaf65 --- /dev/null +++ b/vendor/github.com/mjl-/sconf/parse.go @@ -0,0 +1,308 @@ +package sconf + +import ( + "bufio" + "encoding/base64" + "errors" + "fmt" + "io" + "reflect" + "strconv" + "strings" + "time" +) + +type parser struct { + prefix string // indented string + input *bufio.Reader // for reading lines at a time + line string // last read line + linenumber int +} + +type parseError struct { + err error +} + +func parse(path string, src io.Reader, dst interface{}) (err error) { + p := &parser{ + input: bufio.NewReader(src), + } + defer func() { + x := recover() + if x == nil { + return + } + perr, ok := x.(parseError) + if ok { + err = fmt.Errorf("%s:%d: %v", path, p.linenumber, perr.err) + return + } + panic(x) + }() + v := reflect.ValueOf(dst) + if v.Kind() != reflect.Ptr { + p.stop("destination not a pointer") + } + p.parseStruct0(v.Elem()) + return +} + +func (p *parser) stop(err string) { + panic(parseError{errors.New(err)}) +} + +func (p *parser) check(err error, action string) { + if err != nil { + p.stop(fmt.Sprintf("%s: %s", action, err)) + } +} + +func (p *parser) string() string { + return p.line +} + +func (p *parser) leave(s string) { + p.line = s +} + +func (p *parser) consume() string { + s := p.line + p.line = "" + return s +} + +// Next returns whether the next line is properly indented, reading data as necessary. +func (p *parser) next() bool { + for p.line == "" { + s, err := p.input.ReadString('\n') + if s == "" { + if err == io.EOF { + return false + } + p.stop(err.Error()) + } + p.linenumber++ + if strings.HasPrefix(strings.TrimSpace(s), "#") { + continue + } + p.line = strings.TrimSuffix(s, "\n") + } + + // Less indenting than expected. Let caller stop, returning to its caller for lower-level indent. + r := strings.HasPrefix(p.line, p.prefix) + return r +} + +func (p *parser) indent() { + p.prefix += "\t" + if !p.next() { + p.stop("expected indent") + } +} + +func (p *parser) unindent() { + p.prefix = p.prefix[1:] +} + +var durationType = reflect.TypeOf(time.Duration(0)) + +func (p *parser) parseValue(v reflect.Value) reflect.Value { + t := v.Type() + + if t == durationType { + s := p.consume() + d, err := time.ParseDuration(s) + p.check(err, "parsing duration") + v.Set(reflect.ValueOf(d)) + return v + } + + switch t.Kind() { + default: + p.stop(fmt.Sprintf("cannot parse type %v", t.Kind())) + + case reflect.Bool: + s := p.consume() + switch s { + case "false": + v.SetBool(false) + case "true": + v.SetBool(true) + default: + p.stop(fmt.Sprintf("bad boolean value %q", s)) + } + + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + s := p.consume() + x, err := strconv.ParseInt(s, 10, 64) + p.check(err, "parsing integer") + v.SetInt(x) + + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + s := p.consume() + x, err := strconv.ParseUint(s, 10, 64) + p.check(err, "parsing integer") + v.SetUint(x) + + case reflect.Float32, reflect.Float64: + s := p.consume() + x, err := strconv.ParseFloat(s, 64) + p.check(err, "parsing float") + v.SetFloat(x) + + case reflect.String: + v.SetString(p.consume()) + + case reflect.Slice: + v = p.parseSlice(v) + + case reflect.Ptr: + vv := reflect.New(t.Elem()) + p.parseValue(vv.Elem()) + v.Set(vv) + + case reflect.Struct: + p.parseStruct(v) + + case reflect.Map: + v = reflect.MakeMap(t) + p.parseMap(v) + } + return v +} + +func (p *parser) parseSlice(v reflect.Value) reflect.Value { + if v.Type().Elem().Kind() == reflect.Uint8 { + s := p.consume() + buf, err := base64.StdEncoding.DecodeString(s) + p.check(err, "parsing base64") + v.SetBytes(buf) + return v + } + + p.indent() + defer p.unindent() + return p.parseSlice0(v) +} + +func (p *parser) parseSlice0(v reflect.Value) reflect.Value { + for p.next() { + s := p.string() + prefix := p.prefix + "-" + if !strings.HasPrefix(s, prefix) { + p.stop(fmt.Sprintf("expected item, prefix %q, saw %q", prefix, s)) + } + s = s[len(prefix):] + if s != "" { + if !strings.HasPrefix(s, " ") { + p.stop("missing space after -") + } + s = s[1:] + } + p.leave(s) + vv := reflect.New(v.Type().Elem()).Elem() + vv = p.parseValue(vv) + v = reflect.Append(v, vv) + } + return v +} + +func (p *parser) parseStruct(v reflect.Value) { + p.indent() + defer p.unindent() + p.parseStruct0(v) +} + +func (p *parser) parseStruct0(v reflect.Value) { + seen := map[string]struct{}{} + var zeroValue reflect.Value + t := v.Type() + for p.next() { + s := p.string() + s = s[len(p.prefix):] + l := strings.SplitN(s, ":", 2) + if len(l) != 2 { + p.stop("missing key: value") + } + k := l[0] + if k == "" { + p.stop("empty key") + } + if _, ok := seen[k]; ok { + p.stop("duplicate key") + } + seen[k] = struct{}{} + s = l[1] + if s != "" && !strings.HasPrefix(s, " ") { + p.stop("no space after colon") + } + if s != "" { + s = s[1:] + } + p.leave(s) + + vv := v.FieldByName(k) + if vv == zeroValue { + p.stop(fmt.Sprintf("unknown key %q", k)) + } + if ft, _ := t.FieldByName(k); isIgnore(ft.Tag.Get("sconf")) { + p.stop(fmt.Sprintf("unknown key %q (has ignore tag)", k)) + } + vv.Set(p.parseValue(vv)) + } + + n := t.NumField() + for i := 0; i < n; i++ { + f := t.Field(i) + if isIgnore(f.Tag.Get("sconf")) || isOptional(f.Tag.Get("sconf")) { + continue + } + if _, ok := seen[f.Name]; !ok { + p.stop(fmt.Sprintf("missing required key %q", f.Name)) + } + } +} + +func (p *parser) parseMap(v reflect.Value) { + p.indent() + defer p.unindent() + p.parseMap0(v) +} + +func (p *parser) parseMap0(v reflect.Value) { + seen := map[string]struct{}{} + t := v.Type() + for p.next() { + s := p.string() + s = s[len(p.prefix):] + l := strings.SplitN(s, ":", 2) + if len(l) != 2 { + p.stop("missing key: value") + } + k := l[0] + if k == "" { + p.stop("empty key") + } + if _, ok := seen[k]; ok { + p.stop("duplicate key") + } + seen[k] = struct{}{} + s = l[1] + if s != "" && !strings.HasPrefix(s, " ") { + p.stop("no space after colon") + } + if s != "" { + s = s[1:] + } + + vv := reflect.New(t.Elem()).Elem() + if s == "nil" { + // Special value "nil" means the zero value, no further parsing of a value. + p.leave("") + } else { + p.leave(s) + vv = p.parseValue(vv) + } + v.SetMapIndex(reflect.ValueOf(k), vv) + } +} diff --git a/vendor/github.com/mjl-/sconf/sconf.go b/vendor/github.com/mjl-/sconf/sconf.go new file mode 100644 index 0000000..148e267 --- /dev/null +++ b/vendor/github.com/mjl-/sconf/sconf.go @@ -0,0 +1,71 @@ +package sconf + +import ( + "bufio" + "fmt" + "io" + "os" + "reflect" +) + +// ParseFile reads an sconf file from path into dst. +func ParseFile(path string, dst interface{}) error { + src, err := os.Open(path) + if err != nil { + return err + } + defer src.Close() + return parse(path, src, dst) +} + +// Parse reads an sconf file from a reader into dst. +func Parse(src io.Reader, dst interface{}) error { + return parse("", src, dst) +} + +// Describe writes an example sconf file describing v to w. The file includes all +// fields, values and documentation on the fields as configured with the "sconf" +// and "sconf-doc" struct tags. Describe does not detect recursive values and will +// attempt to write them. +func Describe(w io.Writer, v interface{}) error { + return describe(w, v, true, true) +} + +// Write writes a valid sconf file describing v to w, without comments, without +// zero values of optional fields. Write does not detect recursive values and +// will attempt to write them. +func Write(w io.Writer, v interface{}) error { + return describe(w, v, false, false) +} + +// WriteDocs is like Write, but does write comments. +func WriteDocs(w io.Writer, v interface{}) error { + return describe(w, v, false, true) +} + +func describe(w io.Writer, v interface{}, keepZero bool, docs bool) (err error) { + value := reflect.ValueOf(v) + t := value.Type() + if t.Kind() == reflect.Ptr { + value = value.Elem() + t = value.Type() + } + if t.Kind() != reflect.Struct { + return fmt.Errorf("top level object must be a struct, is a %T", v) + } + defer func() { + x := recover() + if x == nil { + return + } + if e, ok := x.(writeError); ok { + err = error(e) + } else { + panic(x) + } + }() + wr := &writer{out: bufio.NewWriter(w), keepZero: keepZero, docs: docs} + wr.describeStruct(value) + wr.flush() + return nil +} diff --git a/vendor/github.com/mjl-/sherpa/.gitignore b/vendor/github.com/mjl-/sherpa/.gitignore new file mode 100644 index 0000000..98a3ee5 --- /dev/null +++ b/vendor/github.com/mjl-/sherpa/.gitignore @@ -0,0 +1,4 @@ +/cover.out +/cover.html + +*\.swp diff --git a/vendor/github.com/mjl-/sherpa/LICENSE b/vendor/github.com/mjl-/sherpa/LICENSE new file mode 100644 index 0000000..00c0ba5 --- /dev/null +++ b/vendor/github.com/mjl-/sherpa/LICENSE @@ -0,0 +1,7 @@ +Copyright (c) 2016-2018 Mechiel Lukkien + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/mjl-/sherpa/LICENSE-go b/vendor/github.com/mjl-/sherpa/LICENSE-go new file mode 100644 index 0000000..ea5ea89 --- /dev/null +++ b/vendor/github.com/mjl-/sherpa/LICENSE-go @@ -0,0 +1,27 @@ +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. \ No newline at end of file diff --git a/vendor/github.com/mjl-/sherpa/Makefile b/vendor/github.com/mjl-/sherpa/Makefile new file mode 100644 index 0000000..561eb04 --- /dev/null +++ b/vendor/github.com/mjl-/sherpa/Makefile @@ -0,0 +1,16 @@ +build: + go build ./... + go vet ./... + +test: + go test -coverprofile=cover.out ./... + go tool cover -html=cover.out -o cover.html + golint ./... + +coverage: + +clean: + go clean ./... + +fmt: + go fmt ./... diff --git a/vendor/github.com/mjl-/sherpa/README.md b/vendor/github.com/mjl-/sherpa/README.md new file mode 100644 index 0000000..0ccdef2 --- /dev/null +++ b/vendor/github.com/mjl-/sherpa/README.md @@ -0,0 +1,39 @@ +# Sherpa + +Sherpa is a Go library for creating a [sherpa API](https://www.ueber.net/who/mjl/sherpa/). + +This library makes it trivial to export Go functions as a sherpa API with an http.Handler. + +Your API will automatically be documented: github.com/mjl-/sherpadoc reads your Go source, and exports function and type comments as API documentation. + +See the [documentation](https://godoc.org/github.com/mjl-/sherpa). + + +## Examples + +A public sherpa API: https://www.sherpadoc.org/#https://www.sherpadoc.org/example/ + +That web application is [sherpaweb](https://github.com/mjl-/sherpaweb). It shows documentation for any sherpa API but also includes an API called Example for demo purposes. + +[Ding](https://github.com/mjl-/ding/) is a more elaborate web application built with this library. + + +# About + +Written by Mechiel Lukkien, mechiel@ueber.net. +Bug fixes, patches, comments are welcome. +MIT-licensed, see LICENSE. + + +# todo + +- add a toggle for enabling calls by GET request. turn off by default for functions with parameters, people might be making requests with sensitive information in query strings... +- include a sherpaweb-like page that displays the documentation +- consider adding input & output validation and timestamp conversion to plain js lib +- consider using interfaces with functions (instead of direct structs) for server implementations. haven't needed it yet, but could be useful for mocking an api that you want to talk to. +- think about way to keep unknown fields. perhaps use a json lib that collects unknown keys in a map (which has to be added to the object for which you want to keep such keys). +- sherpajs: make a versionied, minified variant, with license line +- tool for comparing two jsons for compatibility, listing added sections/functions/types/fields +- be more helpful around errors that functions can generate. perhaps adding a mechanism for listing which errors can occur in the api json. +- handler: write tests +- client: write tests diff --git a/vendor/github.com/mjl-/sherpa/codes.go b/vendor/github.com/mjl-/sherpa/codes.go new file mode 100644 index 0000000..2b3f23e --- /dev/null +++ b/vendor/github.com/mjl-/sherpa/codes.go @@ -0,0 +1,19 @@ +package sherpa + +// Errors generated by both clients and servers +const ( + SherpaBadFunction = "sherpa:badFunction" // Function does not exist at server. +) + +// Errors generated by clients +const ( + SherpaBadResponse = "sherpa:badResponse" // Bad response from server, e.g. JSON response body could not be parsed. + SherpaHTTPError = "sherpa:http" // Unexpected http response status code from server. + SherpaNoAPI = "sherpa:noAPI" // No API was found at this URL. +) + +// Errors generated by servers +const ( + SherpaBadRequest = "sherpa:badRequest" // Error parsing JSON request body. + SherpaBadParams = "sherpa:badParams" // Wrong number of parameters in function call. +) diff --git a/vendor/github.com/mjl-/sherpa/collector.go b/vendor/github.com/mjl-/sherpa/collector.go new file mode 100644 index 0000000..3045954 --- /dev/null +++ b/vendor/github.com/mjl-/sherpa/collector.go @@ -0,0 +1,21 @@ +package sherpa + +// Collector facilitates collection of metrics. Functions are called by the library as such events or errors occur. +// See https://github.com/irias/sherpa-prometheus-collector for an implementation for prometheus. +type Collector interface { + ProtocolError() // Invalid request at protocol-level, e.g. wrong mimetype or request body. + BadFunction() // Function does not exist. + JavaScript() // Sherpa.js is requested. + JSON() // Sherpa.json is requested. + + // Call of function, how long it took, and in case of failure, the error code. + FunctionCall(name string, durationSec float64, errorCode string) +} + +type ignoreCollector struct{} + +func (ignoreCollector) ProtocolError() {} +func (ignoreCollector) BadFunction() {} +func (ignoreCollector) JavaScript() {} +func (ignoreCollector) JSON() {} +func (ignoreCollector) FunctionCall(name string, durationSec float64, errorCode string) {} diff --git a/vendor/github.com/mjl-/sherpa/doc.go b/vendor/github.com/mjl-/sherpa/doc.go new file mode 100644 index 0000000..a9a9cf8 --- /dev/null +++ b/vendor/github.com/mjl-/sherpa/doc.go @@ -0,0 +1,8 @@ +// Package sherpa exports your Go functions as fully documented sherpa web API's. +// +// Sherpa is similar to JSON-RPC, but discoverable and self-documenting. +// Read more at https://www.ueber.net/who/mjl/sherpa/. +// +// Use sherpa.NewHandler to export Go functions using a http.Handler. +// An example of how to use NewHandler can be found in https://github.com/mjl-/sherpaweb/ +package sherpa diff --git a/vendor/github.com/mjl-/sherpa/handler.go b/vendor/github.com/mjl-/sherpa/handler.go new file mode 100644 index 0000000..1e3cafd --- /dev/null +++ b/vendor/github.com/mjl-/sherpa/handler.go @@ -0,0 +1,653 @@ +package sherpa + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "html/template" + "io" + "log" + "mime" + "net/http" + "reflect" + "strings" + "time" + "unicode" + + "github.com/mjl-/sherpadoc" +) + +// SherpaVersion is the version of the Sherpa protocol this package implements. Sherpa is at version 1. +const SherpaVersion = 1 + +// JSON holds all fields for a request to sherpa.json. +type JSON struct { + ID string `json:"id"` + Title string `json:"title"` + Functions []string `json:"functions"` + BaseURL string `json:"baseurl"` + Version string `json:"version"` + SherpaVersion int `json:"sherpaVersion"` + SherpadocVersion int `json:"sherpadocVersion"` +} + +// HandlerOpts are options for creating a new handler. +type HandlerOpts struct { + Collector Collector // Holds functions for collecting metrics about function calls and other incoming HTTP requests. May be nil. + LaxParameterParsing bool // If enabled, incoming sherpa function calls will ignore unrecognized fields in struct parameters, instead of failing. + AdjustFunctionNames string // If empty, only the first character of function names are lower cased. For "lowerWord", the first string of capitals is lowercased, for "none", the function name is left as is. +} + +// Raw signals a raw JSON response. +// If a handler panics with this type, the raw bytes are sent (with regular +// response headers). +// Can be used to skip the json encoding from the handler, eg for caching, or +// when you read a properly formatted JSON document from a file or database. +// By using panic to signal a raw JSON response, the return types stay intact +// for sherpadoc to generate documentation from. +type Raw []byte + +// handler that responds to all Sherpa-related requests. +type handler struct { + path string + functions map[string]reflect.Value + sherpaJSON *JSON + opts HandlerOpts +} + +// Error returned by a function called through a sherpa API. +// Message is a human-readable error message. +// Code is optional, it can be used to handle errors programmatically. +type Error struct { + Code string `json:"code"` + Message string `json:"message"` +} + +func (e *Error) Error() string { + return e.Message +} + +// InternalServerError is an error that propagates as an HTTP internal server error (HTTP status 500), instead of returning a regular HTTP status 200 OK with the error message in the response body. +// Useful for making Sherpa endpoints that can be monitored by simple HTTP monitoring tools. +type InternalServerError struct { + Code string `json:"code"` + Message string `json:"message"` +} + +func (e *InternalServerError) Error() string { + return e.Message +} + +func (e *InternalServerError) error() *Error { + return &Error{"internalServerError", e.Message} +} + +// Sherpa API response type +type response struct { + Result interface{} `json:"result"` + Error *Error `json:"error,omitempty"` +} + +var htmlTemplate *template.Template + +func init() { + var err error + htmlTemplate, err = template.New("html").Parse(` + + + + {{.title}} + + + +
+

{{.title}} - version {{.version}}

+

+ This is the base URL for {{.title}}. The API has been loaded on this page, under variable {{.id}}. So open your browser's developer console and start calling functions! +

+

+ You can also the read documentation for this API.

+

+

+ go sherpa code | + sherpa api's | + sherpaweb code +

+
+ + +`) + if err != nil { + panic(err) + } +} + +func getBaseURL(r *http.Request) string { + host := r.Header.Get("X-Forwarded-Host") + if host == "" { + host = r.Host + } + scheme := r.Header.Get("X-Forwarded-Proto") + if scheme == "" { + scheme = "http" + } + return scheme + "://" + host +} + +func respondJSON(w http.ResponseWriter, status int, v interface{}) { + respond(w, status, v, false, "") +} + +func respond(w http.ResponseWriter, status int, v interface{}, jsonp bool, callback string) { + if jsonp { + w.Header().Add("Content-Type", "text/javascript; charset=utf-8") + } else { + w.Header().Add("Content-Type", "application/json; charset=utf-8") + } + w.WriteHeader(status) + var err error + if jsonp { + _, err = fmt.Fprintf(w, "%s(\n\t", callback) + } + if raw, ok := v.(Raw); err == nil && ok { + _, err = w.Write([]byte(`{"result":`)) + if err == nil { + _, err = w.Write(raw) + } + if err == nil { + _, err = w.Write([]byte("}")) + } + } else if err == nil && !ok { + err = json.NewEncoder(w).Encode(v) + } + if err == nil && jsonp { + _, err = fmt.Fprint(w, ");") + } + if err != nil && !isConnectionClosed(err) { + log.Println("writing response:", err) + } +} + +// Call function fn with a json body read from r. +// Ctx is from the http.Request, and is canceled when the http connection goes away. +// +// on success, the returned interface contains: +// - nil, if fn has no return value +// - single value, if fn had a single return value +// - slice of values, if fn had multiple return values +// - Raw, for a preformatted JSON response (caught from panic). +// +// on error, we always return an Error with the Code field set. +func (h *handler) call(ctx context.Context, functionName string, fn reflect.Value, r io.Reader) (ret interface{}, ee error) { + defer func() { + e := recover() + if e == nil { + return + } + + se, ok := e.(*Error) + if ok { + ee = se + return + } + ierr, ok := e.(*InternalServerError) + if ok { + ee = ierr + return + } + if raw, ok := e.(Raw); ok { + ret = raw + return + } + panic(e) + }() + + lcheck := func(err error, code, message string) { + if err != nil { + panic(&Error{Code: code, Message: fmt.Sprintf("function %q: %s: %s", functionName, message, err)}) + } + } + + var request struct { + Params json.RawMessage `json:"params"` + } + + dec := json.NewDecoder(r) + dec.DisallowUnknownFields() + err := dec.Decode(&request) + lcheck(err, SherpaBadRequest, "invalid JSON request body") + + fnt := fn.Type() + + var params []interface{} + err = json.Unmarshal(request.Params, ¶ms) + lcheck(err, SherpaBadRequest, "invalid JSON request body") + + needArgs := fnt.NumIn() + needValues := needArgs + ctxType := reflect.TypeOf((*context.Context)(nil)).Elem() + needsContext := needValues > 0 && fnt.In(0).Implements(ctxType) + if needsContext { + needArgs-- + } + if fnt.IsVariadic() { + if len(params) != needArgs-1 && len(params) != needArgs { + err = fmt.Errorf("got %d, want %d or %d", len(params), needArgs-1, needArgs) + } + } else { + if len(params) != needArgs { + err = fmt.Errorf("got %d, want %d", len(params), needArgs) + } + } + lcheck(err, SherpaBadParams, "bad number of parameters") + + values := make([]reflect.Value, needValues) + o := 0 + if needsContext { + values[0] = reflect.ValueOf(ctx) + o = 1 + } + args := make([]interface{}, needArgs) + for i := range args { + n := reflect.New(fnt.In(o + i)) + values[o+i] = n.Elem() + args[i] = n.Interface() + } + + dec = json.NewDecoder(bytes.NewReader(request.Params)) + if !h.opts.LaxParameterParsing { + dec.DisallowUnknownFields() + } + err = dec.Decode(&args) + lcheck(err, SherpaBadParams, "parsing parameters") + + errorType := reflect.TypeOf((*error)(nil)).Elem() + checkError := fnt.NumOut() > 0 && fnt.Out(fnt.NumOut()-1).Implements(errorType) + + var results []reflect.Value + if fnt.IsVariadic() { + results = fn.CallSlice(values) + } else { + results = fn.Call(values) + } + if len(results) == 0 { + return nil, nil + } + + rr := make([]interface{}, len(results)) + for i, v := range results { + rr[i] = v.Interface() + } + if !checkError { + if len(rr) == 1 { + return rr[0], nil + } + return rr, nil + } + rr, rerr := rr[:len(rr)-1], rr[len(rr)-1] + var rv interface{} = rr + switch len(rr) { + case 0: + rv = nil + case 1: + rv = rr[0] + } + if rerr == nil { + return rv, nil + } + switch r := rerr.(type) { + case *Error: + return nil, r + case *InternalServerError: + return nil, r + case error: + return nil, &Error{Message: r.Error()} + default: + panic("checkError while type is not error") + } +} + +func adjustFunctionNameCapitals(s string, opts HandlerOpts) string { + switch opts.AdjustFunctionNames { + case "": + return strings.ToLower(s[:1]) + s[1:] + case "none": + return s + case "lowerWord": + r := "" + for i, c := range s { + lc := unicode.ToLower(c) + if lc == c { + r += s[i:] + break + } + r += string(lc) + } + return r + default: + panic(fmt.Sprintf("bad value for AdjustFunctionNames: %q", opts.AdjustFunctionNames)) + } +} + +func gatherFunctions(functions map[string]reflect.Value, t reflect.Type, v reflect.Value, opts HandlerOpts) error { + if t.Kind() != reflect.Struct { + return fmt.Errorf("sherpa sections must be a struct (not a ptr)") + } + for i := 0; i < t.NumMethod(); i++ { + name := adjustFunctionNameCapitals(t.Method(i).Name, opts) + m := v.Method(i) + if _, ok := functions[name]; ok { + return fmt.Errorf("duplicate function %s", name) + } + functions[name] = m + } + for i := 0; i < t.NumField(); i++ { + err := gatherFunctions(functions, t.Field(i).Type, v.Field(i), opts) + if err != nil { + return err + } + } + return nil +} + +// NewHandler returns a new http.Handler that serves all Sherpa API-related requests. +// +// Path is the path this API is available at. +// +// Version should be a semantic version. +// +// API should by a struct. It represents the root section. All methods of a +// section are exported as sherpa functions. All fields must be other sections +// (structs) whose methods are also exported. recursively. Method names must +// start with an uppercase character to be exported, but their exported names +// start with a lowercase character by default (but see HandlerOpts.AdjustFunctionNames). +// +// Doc is documentation for the top-level sherpa section, as generated by sherpadoc. +// +// Opts allows further configuration of the handler. +// +// Methods on the exported sections are exported as Sherpa functions. +// If the first parameter of a method is a context.Context, the context from the HTTP request is passed. +// This lets you abort work if the HTTP request underlying the function call disappears. +// +// Parameters and return values for exported functions are automatically converted from/to JSON. +// If the last element of a return value (if any) is an error, +// that error field is taken to indicate whether the call succeeded. +// Exported functions can also panic with an *Error or *InternalServerError to indicate a failed function call. +// Returning an error with a Code starting with "server" indicates an implementation error, which will be logged through the collector. +// +// Variadic functions can be called, but in the call (from the client), the variadic parameters must be passed in as an array. +// +// This handler strips "path" from the request. +func NewHandler(path string, version string, api interface{}, doc *sherpadoc.Section, opts *HandlerOpts) (http.Handler, error) { + var xopts HandlerOpts + if opts != nil { + xopts = *opts + } + if xopts.Collector == nil { + // We always want to have a collector, so we don't have to check for nil all the time when calling. + xopts.Collector = ignoreCollector{} + } + + doc.Version = version + doc.SherpaVersion = SherpaVersion + functions := map[string]reflect.Value{ + "_docs": reflect.ValueOf(func() *sherpadoc.Section { + return doc + }), + } + err := gatherFunctions(functions, reflect.TypeOf(api), reflect.ValueOf(api), xopts) + if err != nil { + return nil, err + } + + names := make([]string, 0, len(functions)) + for name := range functions { + names = append(names, name) + } + + elems := strings.Split(strings.Trim(path, "/"), "/") + id := elems[len(elems)-1] + sherpaJSON := &JSON{ + ID: id, + Title: doc.Name, + Functions: names, + BaseURL: "", // filled in during request + Version: version, + SherpaVersion: SherpaVersion, + SherpadocVersion: doc.SherpadocVersion, + } + h := http.StripPrefix(path, &handler{ + path: path, + functions: functions, + sherpaJSON: sherpaJSON, + opts: xopts, + }) + return h, nil +} + +func badMethod(w http.ResponseWriter) { + http.Error(w, "method not allowed", http.StatusMethodNotAllowed) +} + +// return whether callback js snippet is valid. +// this is a coarse test. we disallow some valid js identifiers, like "\u03c0", +// and we allow many invalid ones, such as js keywords, "0intro" and identifiers starting/ending with ".", or having multiple dots. +func validCallback(cb string) bool { + if cb == "" { + return false + } + for _, c := range cb { + if c >= 'a' && c <= 'z' || c >= 'A' && c <= 'Z' || c >= '0' && c <= '9' || c == '_' || c == '$' || c == '.' { + continue + } + return false + } + return true +} + +// Serve a HTTP request for this Sherpa API. +// ServeHTTP expects the request path is stripped from the path it was mounted at with the http package. +// +// The following endpoints are handled: +// - sherpa.json, describing this API. +// - sherpa.js, a small stand-alone client JavaScript library that makes it trivial to start using this API from a browser. +// - functionName, for function invocations on this API. +// +// HTTP response will have CORS-headers set, and support the OPTIONS HTTP method. +func (h *handler) ServeHTTP(w http.ResponseWriter, r *http.Request) { + defer r.Body.Close() + hdr := w.Header() + hdr.Set("Access-Control-Allow-Origin", "*") + hdr.Set("Access-Control-Allow-Methods", "GET, POST") + hdr.Set("Access-Control-Allow-Headers", "Content-Type") + + collector := h.opts.Collector + + switch { + case r.URL.Path == "": + baseURL := getBaseURL(r) + h.path + docURL := "https://www.sherpadoc.org/#" + baseURL + err := htmlTemplate.Execute(w, map[string]interface{}{ + "id": h.sherpaJSON.ID, + "title": h.sherpaJSON.Title, + "version": h.sherpaJSON.Version, + "docURL": docURL, + "jsURL": baseURL + "sherpa.js", + }) + if err != nil { + log.Println(err) + } + + case r.URL.Path == "sherpa.json": + switch r.Method { + case "OPTIONS": + w.WriteHeader(204) + case "GET": + collector.JSON() + hdr.Set("Content-Type", "application/json; charset=utf-8") + hdr.Set("Cache-Control", "no-cache") + sherpaJSON := &*h.sherpaJSON + sherpaJSON.BaseURL = getBaseURL(r) + h.path + err := json.NewEncoder(w).Encode(sherpaJSON) + if err != nil { + log.Println("writing sherpa.json response:", err) + } + default: + badMethod(w) + } + + case r.URL.Path == "sherpa.js": + if r.Method != "GET" { + badMethod(w) + return + } + collector.JavaScript() + hdr.Set("Content-Type", "text/javascript; charset=utf-8") + hdr.Set("Cache-Control", "no-cache") + sherpaJSON := &*h.sherpaJSON + sherpaJSON.BaseURL = getBaseURL(r) + h.path + buf, err := json.Marshal(sherpaJSON) + js := strings.Replace(sherpaJS, "{{.sherpaJSON}}", string(buf), -1) + _, err = w.Write([]byte(js)) + if err != nil { + log.Println("writing sherpa.js response:", err) + } + + default: + name := r.URL.Path + fn, ok := h.functions[name] + switch r.Method { + case "OPTIONS": + w.WriteHeader(204) + + case "POST": + hdr.Set("Cache-Control", "no-store") + + if !ok { + collector.BadFunction() + respondJSON(w, 404, &response{Error: &Error{Code: SherpaBadFunction, Message: fmt.Sprintf("function %q does not exist", name)}}) + return + } + + ct := r.Header.Get("Content-Type") + if ct == "" { + collector.ProtocolError() + respondJSON(w, 200, &response{Error: &Error{Code: SherpaBadRequest, Message: fmt.Sprintf("missing content-type")}}) + return + } + mt, mtparams, err := mime.ParseMediaType(ct) + if err != nil { + collector.ProtocolError() + respondJSON(w, 200, &response{Error: &Error{Code: SherpaBadRequest, Message: fmt.Sprintf("invalid content-type %q", ct)}}) + return + } + if mt != "application/json" { + collector.ProtocolError() + respondJSON(w, 200, &response{Error: &Error{Code: SherpaBadRequest, Message: fmt.Sprintf(`unrecognized content-type %q, expecting "application/json"`, mt)}}) + return + } + charset, ok := mtparams["charset"] + if ok && strings.ToLower(charset) != "utf-8" { + collector.ProtocolError() + respondJSON(w, 200, &response{Error: &Error{Code: SherpaBadRequest, Message: fmt.Sprintf(`unexpected charset %q, expecting "utf-8"`, charset)}}) + return + } + + t0 := time.Now() + r, xerr := h.call(r.Context(), name, fn, r.Body) + durationSec := float64(time.Now().Sub(t0)) / float64(time.Second) + if xerr != nil { + switch err := xerr.(type) { + case *InternalServerError: + collector.FunctionCall(name, durationSec, err.Code) + respondJSON(w, 500, &response{Error: err.error()}) + case *Error: + collector.FunctionCall(name, durationSec, err.Code) + respondJSON(w, 200, &response{Error: err}) + default: + collector.FunctionCall(name, durationSec, "server:panic") + panic(err) + } + } else { + var v interface{} + if raw, ok := r.(Raw); ok { + v = raw + } else { + v = &response{Result: r} + } + collector.FunctionCall(name, durationSec, "") + respondJSON(w, 200, v) + } + + case "GET": + hdr.Set("Cache-Control", "no-store") + + jsonp := false + if !ok { + collector.BadFunction() + respondJSON(w, 404, &response{Error: &Error{Code: SherpaBadFunction, Message: fmt.Sprintf("function %q does not exist", name)}}) + return + } + + err := r.ParseForm() + if err != nil { + collector.ProtocolError() + respondJSON(w, 200, &response{Error: &Error{Code: SherpaBadRequest, Message: fmt.Sprintf("could not parse query string")}}) + return + } + + callback := r.Form.Get("callback") + _, ok := r.Form["callback"] + if ok { + if !validCallback(callback) { + collector.ProtocolError() + respondJSON(w, 200, &response{Error: &Error{Code: SherpaBadRequest, Message: fmt.Sprintf(`invalid callback name %q`, callback)}}) + return + } + jsonp = true + } + + // We allow an empty list to be missing to make it cleaner & easier to call health check functions (no ugly urls). + body := r.Form.Get("body") + _, ok = r.Form["body"] + if !ok { + body = `{"params": []}` + } + + t0 := time.Now() + r, xerr := h.call(r.Context(), name, fn, strings.NewReader(body)) + durationSec := float64(time.Now().Sub(t0)) / float64(time.Second) + if xerr != nil { + switch err := xerr.(type) { + case *InternalServerError: + collector.FunctionCall(name, durationSec, err.Code) + respond(w, 500, &response{Error: err.error()}, jsonp, callback) + case *Error: + collector.FunctionCall(name, durationSec, err.Code) + respond(w, 200, &response{Error: err}, jsonp, callback) + default: + collector.FunctionCall(name, durationSec, "server:panic") + panic(err) + } + } else { + var v interface{} + if raw, ok := r.(Raw); ok { + v = raw + } else { + v = &response{Result: r} + } + collector.FunctionCall(name, durationSec, "") + respond(w, 200, v, jsonp, callback) + } + + default: + badMethod(w) + } + } +} diff --git a/vendor/github.com/mjl-/sherpa/intstr.go b/vendor/github.com/mjl-/sherpa/intstr.go new file mode 100644 index 0000000..b08f1c2 --- /dev/null +++ b/vendor/github.com/mjl-/sherpa/intstr.go @@ -0,0 +1,87 @@ +package sherpa + +import ( + "encoding/json" + "fmt" + "strconv" +) + +// Int64s is an int64 that can be read as either a JSON string or JSON number, to +// be used in sherpa function parameters for compatibility with JavaScript. +// For struct fields, use the "json:,string" struct tag instead. +type Int64s int64 + +// Int returns the int64 value. +func (i Int64s) Int() int64 { + return int64(i) +} + +// MarshalJSON returns a JSON-string-encoding of the int64. +func (i *Int64s) MarshalJSON() ([]byte, error) { + var v int64 + if i != nil { + v = int64(*i) + } + return json.Marshal(fmt.Sprintf("%d", v)) +} + +// UnmarshalJSON parses JSON into the int64. Both a string encoding as a number +// encoding are allowed. JavaScript clients must use the string encoding because +// the number encoding loses precision at 1<<53. +func (i *Int64s) UnmarshalJSON(buf []byte) error { + var s string + if len(buf) > 0 && buf[0] == '"' { + err := json.Unmarshal(buf, &s) + if err != nil { + return err + } + } else { + s = string(buf) + } + vv, err := strconv.ParseInt(s, 10, 64) + if err != nil { + return err + } + *i = Int64s(vv) + return nil +} + +// Uint64s is an uint64 that can be read as either a JSON string or JSON number, to +// be used in sherpa function parameters for compatibility with JavaScript. +// For struct fields, use the "json:,string" struct tag instead. +type Uint64s uint64 + +// Int returns the uint64 value. +func (i Uint64s) Int() uint64 { + return uint64(i) +} + +// MarshalJSON returns a JSON-string-encoding of the uint64. +func (i *Uint64s) MarshalJSON() ([]byte, error) { + var v uint64 + if i != nil { + v = uint64(*i) + } + return json.Marshal(fmt.Sprintf("%d", v)) +} + +// UnmarshalJSON parses JSON into the uint64. Both a string encoding as a number +// encoding are allowed. JavaScript clients must use the string encoding because +// the number encoding loses precision at 1<<53. +func (i *Uint64s) UnmarshalJSON(buf []byte) error { + var s string + if len(buf) > 0 && buf[0] == '"' { + err := json.Unmarshal(buf, &s) + if err != nil { + return err + } + } else { + s = string(buf) + } + vv, err := strconv.ParseInt(s, 10, 64) + if err != nil { + return err + } + *i = Uint64s(vv) + return nil +} diff --git a/vendor/github.com/mjl-/sherpa/isclosed.go b/vendor/github.com/mjl-/sherpa/isclosed.go new file mode 100644 index 0000000..f935c99 --- /dev/null +++ b/vendor/github.com/mjl-/sherpa/isclosed.go @@ -0,0 +1,13 @@ +//go:build !plan9 +// +build !plan9 + +package sherpa + +import ( + "errors" + "syscall" +) + +func isConnectionClosed(err error) bool { + return errors.Is(err, syscall.EPIPE) || errors.Is(err, syscall.ECONNRESET) +} diff --git a/vendor/github.com/mjl-/sherpa/isclosed_plan9.go b/vendor/github.com/mjl-/sherpa/isclosed_plan9.go new file mode 100644 index 0000000..aef23d3 --- /dev/null +++ b/vendor/github.com/mjl-/sherpa/isclosed_plan9.go @@ -0,0 +1,6 @@ +package sherpa + +func isConnectionClosed(err error) bool { + // todo: needs a better test + return false +} diff --git a/vendor/github.com/mjl-/sherpa/sherpajs.go b/vendor/github.com/mjl-/sherpa/sherpajs.go new file mode 100644 index 0000000..3254082 --- /dev/null +++ b/vendor/github.com/mjl-/sherpa/sherpajs.go @@ -0,0 +1,136 @@ +package sherpa + +var sherpaJS = ` +'use strict'; + +(function(undefined) { + +var sherpa = {}; + +// prepare basic support for promises. +// we return functions with a "then" method only. our "then" isn't chainable. and you don't get other promise-related methods. +// but this "then" is enough so your browser's promise library (or a polyfill) can turn it into a real promise. +function thenable(fn) { + var settled = false; + var fulfilled = false; + var result = null; + + var goods = []; + var bads = []; + + // promise lib will call the returned function, make it the same as our .then function + var nfn = function(goodfn, badfn) { + if(settled) { + if(fulfilled && goodfn) { + goodfn(result); + } + if(!fulfilled && badfn) { + badfn(result); + } + } else { + if(goodfn) { + goods.push(goodfn); + } + if(badfn) { + bads.push(badfn); + } + } + }; + nfn.then = nfn; + + function done() { + while(fulfilled && goods.length > 0) { + goods.shift()(result); + } + while(!fulfilled && bads.length > 0) { + bads.shift()(result); + } + } + + function makeSettle(xfulfilled) { + return function(arg) { + if(settled) { + return; + } + settled = true; + fulfilled = xfulfilled; + result = arg; + done(); + }; + } + var resolve = makeSettle(true); + var reject = makeSettle(false); + try { + fn(resolve, reject); + } catch(e) { + reject(e); + } + return nfn; +} + +function postJSON(url, param, success, error) { + var req = new window.XMLHttpRequest(); + req.open('POST', url, true); + req.onload = function onload() { + if(req.status >= 200 && req.status < 400) { + success(JSON.parse(req.responseText)); + } else { + if(req.status === 404) { + error({code: 'sherpaBadFunction', message: 'function does not exist'}); + } else { + error({code: 'sherpaHttpError', message: 'error calling function, HTTP status: '+req.status}); + } + } + }; + req.onerror = function onerror() { + error({code: 'sherpaClientError', message: 'connection failed'}); + }; + req.setRequestHeader('Content-Type', 'application/json'); + req.send(JSON.stringify(param)); +} + +function makeFunction(api, name) { + return function() { + var params = Array.prototype.slice.call(arguments, 0); + return api._wrapThenable(thenable(function(resolve, reject) { + postJSON(api._sherpa.baseurl+name, {params: params}, function(response) { + if(response && response.error) { + reject(response.error); + } else if(response && response.hasOwnProperty('result')) { + resolve(response.result); + } else { + reject({code: 'sherpaBadResponse', message: "invalid sherpa response object, missing 'result'"}); + } + }, reject); + })); + }; +} + +sherpa.init = function init(_sherpa) { + var api = {}; + + function _wrapThenable(thenable) { + return thenable; + } + + function _call(name) { + return makeFunction(api, name).apply(Array.prototype.slice.call(arguments, 1)); + } + + api._sherpa = _sherpa; + api._wrapThenable = _wrapThenable; + api._call = _call; + for(var i = 0; i < _sherpa.functions.length; i++) { + var fn = _sherpa.functions[i]; + api[fn] = makeFunction(api, fn); + } + + return api; +}; + + +var _sherpa = {{.sherpaJSON}}; +window[_sherpa.id] = sherpa.init(_sherpa); + +})(); +` diff --git a/vendor/github.com/mjl-/sherpadoc/LICENSE b/vendor/github.com/mjl-/sherpadoc/LICENSE new file mode 100644 index 0000000..53a8691 --- /dev/null +++ b/vendor/github.com/mjl-/sherpadoc/LICENSE @@ -0,0 +1,7 @@ +Copyright (c) 2016-2019 Mechiel Lukkien + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/mjl-/sherpadoc/README.txt b/vendor/github.com/mjl-/sherpadoc/README.txt new file mode 100644 index 0000000..d9caf8d --- /dev/null +++ b/vendor/github.com/mjl-/sherpadoc/README.txt @@ -0,0 +1,28 @@ +sherpadoc - documentation for sherpa API's + +Go package containing type defintions for sherpa documentation for encoding to and decoding from json. +Also contains the sherpadoc command reads Go code and writes sherpadoc JSON. + +Use together with the sherpa library, github.com/mjl-/sherpa. +Read more about sherpa at https://www.ueber.net/who/mjl/sherpa/ + +# About + +Written by Mechiel Lukkien, mechiel@ueber.net. +Bug fixes, patches, comments are welcome. +MIT-licensed, see LICENSE. + +# todo + +- major cleanup required. too much parsing is done that can probably be handled by the go/* packages. +- check that all cases of embedding work +- check that all cross-package referencing (ast.SelectorExpr) works +- better cli syntax for replacements, and always replace based on fully qualified names. currently you need to specify both the fully qualified and unqualified type paths. +- see if order of items in output depends on a map somewhere, i've seen diffs for generated jsons where a type was only moved, not modified. +- better error messages and error handling, stricter parsing +- support type aliases +- support plain iota enums? currently only simple literals are supported for enums. +- support complete expressions for enum consts? +- find out which go constructs people want to use that aren't yet implemented by sherpadoc +- when to make a field nullable. when omitempty is set? (currently yes), when field is a pointer type (currently yes). should we have a way to prevent nullable without omitempty set, or make field a pointer without it being nullable? +- write tests diff --git a/vendor/github.com/mjl-/sherpadoc/check.go b/vendor/github.com/mjl-/sherpadoc/check.go new file mode 100644 index 0000000..40b1141 --- /dev/null +++ b/vendor/github.com/mjl-/sherpadoc/check.go @@ -0,0 +1,166 @@ +package sherpadoc + +import ( + "fmt" +) + +type genError struct{ error } + +func parseError(path string, format string, args ...interface{}) { + msg := fmt.Sprintf(format, args...) + err := fmt.Errorf("invalid sherpadoc at %s: %s", path, msg) + panic(genError{err}) +} + +func makePath(path string, field string, index int, name string) string { + return fmt.Sprintf("%s.%s[%d (%q)]", path, field, index, name) +} + +// NOTE: sherpaweb/ts/parse.ts and sherpadoc/check.go contain the same checking. +// The code is very similar. Best keep it in sync and modify the implementations in tandem. +type checker struct { + types map[string]struct{} + functions map[string]struct{} +} + +func (c checker) markIdent(path, ident string) { + if _, ok := c.types[ident]; ok { + parseError(path, "duplicate type %q", ident) + } + c.types[ident] = struct{}{} +} + +func (c checker) walkTypeNames(path string, sec *Section) { + for i, t := range sec.Structs { + c.markIdent(makePath(path, "Structs", i, t.Name), t.Name) + } + for i, t := range sec.Ints { + npath := makePath(path, "Ints", i, t.Name) + c.markIdent(npath, t.Name) + for j, v := range t.Values { + c.markIdent(makePath(npath, "Values", j, v.Name), v.Name) + } + } + for i, t := range sec.Strings { + npath := makePath(path, "Strings", i, t.Name) + c.markIdent(npath, t.Name) + for j, v := range t.Values { + c.markIdent(makePath(npath, "Values", j, v.Name), v.Name) + } + } + for i, subsec := range sec.Sections { + c.walkTypeNames(makePath(path, "Sections", i, subsec.Name), subsec) + } +} + +func (c checker) walkFunctionNames(path string, sec *Section) { + for i, fn := range sec.Functions { + npath := makePath(path, "Functions", i, fn.Name) + if _, ok := c.functions[fn.Name]; ok { + parseError(npath, "duplicate function %q", fn.Name) + } + c.functions[fn.Name] = struct{}{} + + paramNames := map[string]struct{}{} + for i, arg := range fn.Params { + if _, ok := paramNames[arg.Name]; ok { + parseError(makePath(npath, "Params", i, arg.Name), "duplicate parameter name") + } + paramNames[arg.Name] = struct{}{} + } + + returnNames := map[string]struct{}{} + for i, arg := range fn.Returns { + if _, ok := returnNames[arg.Name]; ok { + parseError(makePath(npath, "Returns", i, arg.Name), "duplicate return name") + } + returnNames[arg.Name] = struct{}{} + } + } + for i, subsec := range sec.Sections { + c.walkFunctionNames(makePath(path, "Sections", i, subsec.Name), subsec) + } +} + +func (c checker) checkTypewords(path string, tokens []string, okNullable bool) { + if len(tokens) == 0 { + parseError(path, "unexpected end of typewords") + } + t := tokens[0] + tokens = tokens[1:] + switch t { + case "nullable": + if !okNullable { + parseError(path, "repeated nullable in typewords") + } + if len(tokens) == 0 { + parseError(path, "missing typeword after %#v", t) + } + c.checkTypewords(path, tokens, false) + case "any", "bool", "int8", "uint8", "int16", "uint16", "int32", "uint32", "int64", "uint64", "int64s", "uint64s", "float32", "float64", "string", "timestamp": + if len(tokens) != 0 { + parseError(path, "leftover typewords %v", tokens) + } + case "[]", "{}": + if len(tokens) == 0 { + parseError(path, "missing typeword after %#v", t) + } + c.checkTypewords(path, tokens, true) + default: + _, ok := c.types[t] + if !ok { + parseError(path, "referenced type %q does not exist", t) + } + if len(tokens) != 0 { + parseError(path, "leftover typewords %v", tokens) + } + } +} + +func (c checker) walkTypewords(path string, sec *Section) { + for i, t := range sec.Structs { + npath := makePath(path, "Structs", i, t.Name) + for j, f := range t.Fields { + c.checkTypewords(makePath(npath, "Fields", j, f.Name), f.Typewords, true) + } + } + for i, fn := range sec.Functions { + npath := makePath(path, "Functions", i, fn.Name) + for j, arg := range fn.Params { + c.checkTypewords(makePath(npath, "Params", j, arg.Name), arg.Typewords, true) + } + for j, arg := range fn.Returns { + c.checkTypewords(makePath(npath, "Returns", j, arg.Name), arg.Typewords, true) + } + } + for i, subsec := range sec.Sections { + c.walkTypewords(makePath(path, "Sections", i, subsec.Name), subsec) + } +} + +// Check walks the sherpa section and checks it for correctness. It checks for: +// +// - Duplicate type names. +// - Duplicate parameter or return names. +// - References to types that are not defined. +// - Validity of typewords. +func Check(doc *Section) (retErr error) { + defer func() { + e := recover() + if e != nil { + g, ok := e.(genError) + if !ok { + panic(e) + } + retErr = error(g) + } + }() + + c := checker{map[string]struct{}{}, map[string]struct{}{}} + + c.walkTypeNames("", doc) + c.walkFunctionNames("", doc) + c.walkTypewords("", doc) + + return nil +} diff --git a/vendor/github.com/mjl-/sherpadoc/cmd/sherpadoc/main.go b/vendor/github.com/mjl-/sherpadoc/cmd/sherpadoc/main.go new file mode 100644 index 0000000..af311e3 --- /dev/null +++ b/vendor/github.com/mjl-/sherpadoc/cmd/sherpadoc/main.go @@ -0,0 +1,270 @@ +/* +Sherpadoc parses Go code and outputs sherpa documentation in JSON. + +This documentation is provided to the sherpa HTTP handler to serve +as documentation through the _docs function. + +Example: + + sherpadoc Awesome >awesome.json + +Sherpadoc parses Go code, finds a struct named "Awesome", and gathers +documentation: + +Comments above the struct are used as section documentation. Fields +in section structs must are treated as subsections, and can in turn +contain subsections. These subsections and their methods are also +exported and documented in the sherpa API. Add a struct tag "sherpa" +to override the name of the subsection, for example `sherpa:"Another +Awesome API"`. + +Comments above method names are function documentation. A synopsis +is automatically generated. + +Types used as parameters or return values are added to the section +documentation where they are used. The comments above the type are +used, as well as the comments for each field in a struct. The +documented field names know about the "json" struct field tags. + +More eloborate example: + + sherpadoc + -title 'Awesome API by mjl' \ + -replace 'pkg.Type string,example.com/some/pkg.SomeType [] string' \ + path/to/awesome/code Awesome \ + >awesome.json + +Most common Go code patterns for API functions have been implemented +in sherpadoc, but you may run into missing support. +*/ +package main + +import ( + "encoding/json" + "flag" + "fmt" + "log" + "os" + "path/filepath" + "strings" + + "github.com/mjl-/sherpadoc" + + "golang.org/x/mod/modfile" +) + +var ( + packagePath = flag.String("package-path", ".", "of source code to parse") + replace = flag.String("replace", "", "comma-separated list of type replacements, e.g. \"somepkg.SomeType string\"") + title = flag.String("title", "", "title of the API, default is the name of the type of the main API") + adjustFunctionNames = flag.String("adjust-function-names", "", `by default, the first character of function names is turned into lower case; with "lowerWord" the first string of upper case characters is lower cased, with "none" the name is left as is`) +) + +// If there is a "vendor" directory, we'll load packages from there (instead of +// through (slower) packages.Load), and we need to know the module name to resolve +// imports to paths in vendor. +var ( + gomodFile *modfile.File + gomodDir string +) + +type field struct { + Name string + Typewords []string + Doc string + Fields []*field +} + +func (f field) TypeString() string { + t := []string{} + for _, e := range f.Typewords { + if e == "nullable" { + e = "*" + } + t = append(t, e) + } + return strings.Join(t, "") +} + +type typeKind int + +const ( + typeStruct typeKind = iota + typeInts + typeStrings + typeBytes +) + +// NamedType represents the type of a parameter or return value. +type namedType struct { + Name string + Text string + Kind typeKind + Fields []*field // For kind is typeStruct. + // For kind is typeInts + IntValues []struct { + Name string + Value int + Docs string + } + // For kind is typeStrings + StringValues []struct { + Name string + Value string + Docs string + } +} + +type function struct { + Name string + Text string + Params []sherpadoc.Arg + Returns []sherpadoc.Arg +} + +// Section is an API section with docs, functions and subsections. +// Types are gathered per section, and moved up the section tree to the first common ancestor, so types are only documented once. +type section struct { + TypeName string // Name of the type for this section. + Name string // Name of the section. Either same as TypeName, or overridden with a "sherpa" struct tag. + Text string + Types []*namedType + Typeset map[string]struct{} + Functions []*function + Sections []*section +} + +func check(err error, action string) { + if err != nil { + log.Fatalf("%s: %s", action, err) + } +} + +func usage() { + log.Println("usage: sherpadoc [flags] section") + flag.PrintDefaults() + os.Exit(2) +} + +func main() { + log.SetFlags(0) + flag.Usage = usage + flag.Parse() + args := flag.Args() + if len(args) != 1 { + usage() + } + + // If vendor exists, we load packages from it. + for dir, _ := os.Getwd(); dir != "" && dir != "/"; dir = filepath.Dir(dir) { + p := filepath.Join(dir, "go.mod") + if _, err := os.Stat(p); err != nil && os.IsNotExist(err) { + continue + } else if err != nil { + log.Printf("searching for go.mod: %v", err) + break + } + + if _, err := os.Stat(filepath.Join(dir, "vendor")); err != nil { + break + } + + if gomod, err := os.ReadFile(p); err != nil { + log.Fatalf("reading go.mod: %s", err) + } else if mf, err := modfile.ParseLax("go.mod", gomod, nil); err != nil { + log.Fatalf("parsing go.mod: %s", err) + } else { + gomodFile = mf + gomodDir = dir + } + } + + section := parseDoc(args[0], *packagePath) + if *title != "" { + section.Name = *title + } + + moveTypesUp(section) + + doc := sherpaSection(section) + doc.SherpaVersion = 0 + doc.SherpadocVersion = sherpadoc.SherpadocVersion + + err := sherpadoc.Check(doc) + check(err, "checking sherpadoc output before writing") + + writeJSON(doc) +} + +func writeJSON(v interface{}) { + buf, err := json.MarshalIndent(v, "", "\t") + check(err, "marshal to json") + _, err = os.Stdout.Write(buf) + check(err, "writing json to stdout") + _, err = fmt.Println() + check(err, "write to stdout") +} + +type typeCount struct { + t *namedType + count int +} + +// Move types used in multiple sections up to their common ancestor. +func moveTypesUp(sec *section) { + // First, the process for each child. + for _, s := range sec.Sections { + moveTypesUp(s) + } + + // Count how often a type is used from here downwards. + // If more than once, move the type up to here. + counts := map[string]*typeCount{} + countTypes(counts, sec) + for _, tc := range counts { + if tc.count <= 1 { + continue + } + for _, sub := range sec.Sections { + removeType(sub, tc.t) + } + if !hasType(sec, tc.t) { + sec.Types = append(sec.Types, tc.t) + } + } +} + +func countTypes(counts map[string]*typeCount, sec *section) { + for _, t := range sec.Types { + _, ok := counts[t.Name] + if !ok { + counts[t.Name] = &typeCount{t, 0} + } + counts[t.Name].count++ + } + for _, subsec := range sec.Sections { + countTypes(counts, subsec) + } +} + +func removeType(sec *section, t *namedType) { + types := make([]*namedType, 0, len(sec.Types)) + for _, tt := range sec.Types { + if tt.Name != t.Name { + types = append(types, tt) + } + } + sec.Types = types + for _, sub := range sec.Sections { + removeType(sub, t) + } +} + +func hasType(sec *section, t *namedType) bool { + for _, tt := range sec.Types { + if tt.Name == t.Name { + return true + } + } + return false +} diff --git a/vendor/github.com/mjl-/sherpadoc/cmd/sherpadoc/parse.go b/vendor/github.com/mjl-/sherpadoc/cmd/sherpadoc/parse.go new file mode 100644 index 0000000..cd75bc9 --- /dev/null +++ b/vendor/github.com/mjl-/sherpadoc/cmd/sherpadoc/parse.go @@ -0,0 +1,857 @@ +package main + +import ( + "fmt" + "go/ast" + "go/doc" + "go/parser" + "go/token" + "log" + "os" + "path/filepath" + "reflect" + "runtime" + "sort" + "strconv" + "strings" + "unicode" + + "golang.org/x/tools/go/packages" + + "github.com/mjl-/sherpadoc" +) + +// ParsedPackage possibly includes some of its imports because the package that contains the section references it. +type parsedPackage struct { + Fset *token.FileSet // Used with a token.Pos to get offending locations. + Path string // Of import, used for keeping duplicate type names from different packages unique. + Pkg *ast.Package // Needed for its files: we need a file to find the package path and identifier used to reference other types. + Docpkg *doc.Package + Imports map[string]*parsedPackage // Package/import path to parsed packages. +} + +type typewords []string + +func (pp *parsedPackage) lookupType(name string) *doc.Type { + for _, t := range pp.Docpkg.Types { + if t.Name == name { + return t + } + } + return nil +} + +// Like log.Fatalf, but prefixes error message with offending file position (if known). +// pp is the package where the position tok belongs to. +func logFatalLinef(pp *parsedPackage, tok token.Pos, format string, args ...interface{}) { + if !tok.IsValid() { + log.Fatalf(format, args...) + } + msg := fmt.Sprintf(format, args...) + log.Fatalf("%s: %s", pp.Fset.Position(tok).String(), msg) +} + +// Documentation for a single field, with text above the field, and +// on the right of the field combined. +func fieldDoc(f *ast.Field) string { + s := "" + if f.Doc != nil { + s += strings.Replace(strings.TrimSpace(f.Doc.Text()), "\n", " ", -1) + } + if f.Comment != nil { + if s != "" { + s += "; " + } + s += strings.TrimSpace(f.Comment.Text()) + } + return s +} + +// Parse string literal. Errors are fatal. +func parseStringLiteral(s string) string { + r, err := strconv.Unquote(s) + check(err, "parsing string literal") + return r +} + +func jsonName(tag string, name string) string { + s := reflect.StructTag(tag).Get("json") + if s == "" || strings.HasPrefix(s, ",") { + return name + } else if s == "-" { + return "" + } else { + return strings.Split(s, ",")[0] + } +} + +// Return the names (can be none) for a field. Takes exportedness +// and JSON tag annotation into account. +func nameList(names []*ast.Ident, tag *ast.BasicLit) []string { + if names == nil { + return nil + } + l := []string{} + for _, name := range names { + if ast.IsExported(name.Name) { + l = append(l, name.Name) + } + } + if len(l) == 1 && tag != nil { + name := jsonName(parseStringLiteral(tag.Value), l[0]) + if name != "" { + return []string{name} + } + return nil + } + return l +} + +// Parses a top-level sherpadoc section. +func parseDoc(apiName, packagePath string) *section { + fset := token.NewFileSet() + pkgs, firstErr := parser.ParseDir(fset, packagePath, nil, parser.ParseComments) + check(firstErr, "parsing code") + for _, pkg := range pkgs { + docpkg := doc.New(pkg, "", doc.AllDecls) + + for _, t := range docpkg.Types { + if t.Name == apiName { + par := &parsedPackage{ + Fset: fset, + Path: packagePath, + Pkg: pkg, + Docpkg: docpkg, + Imports: make(map[string]*parsedPackage), + } + return parseSection(t, par) + } + } + } + log.Fatalf("type %q not found", apiName) + return nil +} + +// Parse a section and its optional subsections, recursively. +// t is the type of the struct with the sherpa methods to be parsed. +func parseSection(t *doc.Type, pp *parsedPackage) *section { + sec := §ion{ + t.Name, + t.Name, + strings.TrimSpace(t.Doc), + nil, + map[string]struct{}{}, + nil, + nil, + } + + // make list of methods to parse, sorted by position in file name. + methods := make([]*doc.Func, len(t.Methods)) + copy(methods, t.Methods) + sort.Slice(methods, func(i, j int) bool { + return methods[i].Decl.Name.NamePos < methods[j].Decl.Name.NamePos + }) + + for _, fn := range methods { + parseMethod(sec, fn, pp) + } + + // parse subsections + ts := t.Decl.Specs[0].(*ast.TypeSpec) + expr := ts.Type + st := expr.(*ast.StructType) + for _, f := range st.Fields.List { + ident, ok := f.Type.(*ast.Ident) + if !ok { + continue + } + name := ident.Name + if f.Tag != nil { + name = reflect.StructTag(parseStringLiteral(f.Tag.Value)).Get("sherpa") + } + subt := pp.lookupType(ident.Name) + if subt == nil { + logFatalLinef(pp, ident.Pos(), "subsection %q not found", ident.Name) + } + subsec := parseSection(subt, pp) + subsec.Name = name + sec.Sections = append(sec.Sections, subsec) + } + return sec +} + +// Ensure type "t" (used in a field or argument) defined in package pp is parsed +// and added to the section. +func ensureNamedType(t *doc.Type, sec *section, pp *parsedPackage) { + typePath := pp.Path + "." + t.Name + if _, have := sec.Typeset[typePath]; have { + return + } + + tt := &namedType{ + Name: t.Name, + Text: strings.TrimSpace(t.Doc), + } + // add it early, so self-referencing types can't cause a loop + sec.Types = append(sec.Types, tt) + sec.Typeset[typePath] = struct{}{} + + ts := t.Decl.Specs[0].(*ast.TypeSpec) + if ts.Assign.IsValid() { + logFatalLinef(pp, t.Decl.TokPos, "type aliases not yet supported") + } + + var gatherFields func(e ast.Expr, typeName string, xpp *parsedPackage) + var gatherStructFields func(nt *ast.StructType, typeName string, xpp *parsedPackage) + + gatherFields = func(e ast.Expr, typeName string, xpp *parsedPackage) { + switch xt := e.(type) { + case *ast.Ident: + // Bare type name. + tt := xpp.lookupType(xt.Name) + if tt == nil { + log.Fatalf("could not find type %q used in type %q in package %q", xt.Name, typeName, xpp.Path) + } + tts := tt.Decl.Specs[0].(*ast.TypeSpec) + if ts.Assign.IsValid() { + logFatalLinef(xpp, tt.Decl.TokPos, "type aliases not yet supported") + } + tst, ok := tts.Type.(*ast.StructType) + if !ok { + logFatalLinef(xpp, tt.Decl.TokPos, "unexpected field type %T", tts.Type) + } + gatherStructFields(tst, tt.Name, xpp) + case *ast.StarExpr: + // Field with "*", handle as if without *. + gatherFields(xt.X, typeName, xpp) + case *ast.SelectorExpr: + // With package prefix, lookup the type in the package and gather its fields. + dt, nxpp := parseFieldSelector(useSrc{xpp, typeName}, xt) + tts := dt.Decl.Specs[0].(*ast.TypeSpec) + if ts.Assign.IsValid() { + logFatalLinef(nxpp, dt.Decl.TokPos, "type aliases not yet supported") + } + tst, ok := tts.Type.(*ast.StructType) + if !ok { + logFatalLinef(nxpp, dt.Decl.TokPos, "unexpected field type %T", tts.Type) + } + gatherStructFields(tst, dt.Name, nxpp) + default: + logFatalLinef(xpp, t.Decl.TokPos, "unsupported field with type %T", e) + } + } + + gatherStructFields = func(nt *ast.StructType, typeName string, xpp *parsedPackage) { + for _, f := range nt.Fields.List { + if len(f.Names) == 0 { + // Embedded field. Treat its fields as if they were included. + gatherFields(f.Type, typeName, xpp) + continue + } + + // Check if we need this type. Otherwise we may trip + // over an unhandled type that we wouldn't include in + // the output (eg due to a struct tag). + names := nameList(f.Names, f.Tag) + need := false + for _, name := range names { + if name != "" { + need = true + break + } + } + if !need { + continue + } + + ff := &field{ + "", + nil, + fieldDoc(f), + []*field{}, + } + ff.Typewords = gatherFieldType(t.Name, ff, f.Type, f.Tag, sec, xpp) + for _, name := range nameList(f.Names, f.Tag) { + nf := &field{} + *nf = *ff + nf.Name = name + tt.Fields = append(tt.Fields, nf) + } + } + } + + switch nt := ts.Type.(type) { + case *ast.StructType: + tt.Kind = typeStruct + gatherStructFields(nt, t.Name, pp) + + case *ast.ArrayType: + if ident, ok := nt.Elt.(*ast.Ident); ok && ident.Name == "byte" { + tt.Kind = typeBytes + } else { + logFatalLinef(pp, t.Decl.TokPos, "named type with unsupported element type %T", ts.Type) + } + + case *ast.Ident: + if strings.HasSuffix(typePath, "sherpa.Int64s") || strings.HasSuffix(typePath, "sherpa.Uint64s") { + return + } + + tt.Text = t.Doc + ts.Comment.Text() + switch nt.Name { + case "byte", "int16", "uint16", "int32", "uint32", "int", "uint": + tt.Kind = typeInts + case "string": + tt.Kind = typeStrings + default: + logFatalLinef(pp, t.Decl.TokPos, "unrecognized type identifier %#v", nt.Name) + } + + for _, c := range t.Consts { + for _, spec := range c.Decl.Specs { + vs, ok := spec.(*ast.ValueSpec) + if !ok { + logFatalLinef(pp, spec.Pos(), "unsupported non-ast.ValueSpec constant %#v", spec) + } + if len(vs.Names) != 1 { + logFatalLinef(pp, vs.Pos(), "unsupported multiple .Names in %#v", vs) + } + name := vs.Names[0].Name + if len(vs.Values) != 1 { + logFatalLinef(pp, vs.Pos(), "unsupported multiple .Values in %#v", vs) + } + lit, ok := vs.Values[0].(*ast.BasicLit) + if !ok { + logFatalLinef(pp, vs.Pos(), "unsupported non-ast.BasicLit first .Values %#v", vs) + } + + comment := vs.Doc.Text() + vs.Comment.Text() + switch lit.Kind { + case token.INT: + if tt.Kind != typeInts { + logFatalLinef(pp, lit.Pos(), "int value for for non-int-enum %q", t.Name) + } + v, err := strconv.ParseInt(lit.Value, 10, 64) + check(err, "parse int literal") + iv := struct { + Name string + Value int + Docs string + }{name, int(v), strings.TrimSpace(comment)} + tt.IntValues = append(tt.IntValues, iv) + case token.STRING: + if tt.Kind != typeStrings { + logFatalLinef(pp, lit.Pos(), "string for non-string-enum %q", t.Name) + } + v, err := strconv.Unquote(lit.Value) + check(err, "unquote literal") + sv := struct { + Name string + Value string + Docs string + }{name, v, strings.TrimSpace(comment)} + tt.StringValues = append(tt.StringValues, sv) + default: + logFatalLinef(pp, lit.Pos(), "unexpected literal kind %#v", lit.Kind) + } + } + } + default: + logFatalLinef(pp, t.Decl.TokPos, "unsupported field/param/return type %T", ts.Type) + } +} + +func hasOmitEmpty(tag *ast.BasicLit) bool { + return hasJSONTagValue(tag, "omitempty") +} + +// isCommaString returns whether the tag (may be nil) contains a "json:,string" directive. +func isCommaString(tag *ast.BasicLit) bool { + return hasJSONTagValue(tag, "string") +} + +func hasJSONTagValue(tag *ast.BasicLit, v string) bool { + if tag == nil { + return false + } + st := reflect.StructTag(parseStringLiteral(tag.Value)) + s, ok := st.Lookup("json") + if !ok || s == "-" { + return false + } + t := strings.Split(s, ",") + for _, e := range t[1:] { + if e == v { + return true + } + } + return false +} + +func gatherFieldType(typeName string, f *field, e ast.Expr, fieldTag *ast.BasicLit, sec *section, pp *parsedPackage) typewords { + nullablePrefix := typewords{} + if hasOmitEmpty(fieldTag) { + nullablePrefix = typewords{"nullable"} + } + + name := checkReplacedType(useSrc{pp, typeName}, e) + if name != nil { + if name[0] != "nullable" { + return append(nullablePrefix, name...) + } + return name + } + + switch t := e.(type) { + case *ast.Ident: + tt := pp.lookupType(t.Name) + if tt != nil { + ensureNamedType(tt, sec, pp) + return []string{t.Name} + } + commaString := isCommaString(fieldTag) + name := t.Name + switch name { + case "byte": + name = "uint8" + case "bool", "int8", "uint8", "int16", "uint16", "int32", "uint32", "float32", "float64", "string", "any": + case "int64", "uint64": + if commaString { + name += "s" + } + case "int", "uint": + name += "32" + default: + logFatalLinef(pp, t.Pos(), "unsupported field type %q used in type %q in package %q", name, typeName, pp.Path) + } + if commaString && name != "int64s" && name != "uint64s" { + logFatalLinef(pp, t.Pos(), "unsupported tag `json:,\"string\"` for non-64bit int in %s.%s", typeName, f.Name) + } + return append(nullablePrefix, name) + case *ast.ArrayType: + return append(nullablePrefix, append([]string{"[]"}, gatherFieldType(typeName, f, t.Elt, nil, sec, pp)...)...) + case *ast.MapType: + _ = gatherFieldType(typeName, f, t.Key, nil, sec, pp) + vt := gatherFieldType(typeName, f, t.Value, nil, sec, pp) + return append(nullablePrefix, append([]string{"{}"}, vt...)...) + case *ast.InterfaceType: + // If we export an interface as an "any" type, we want to make sure it's intended. + // Require the user to be explicit with an empty interface. + if t.Methods != nil && len(t.Methods.List) > 0 { + logFatalLinef(pp, t.Pos(), "unsupported non-empty interface param/return type %T", t) + } + return append(nullablePrefix, "any") + case *ast.StarExpr: + tw := gatherFieldType(typeName, f, t.X, fieldTag, sec, pp) + if tw[0] != "nullable" { + tw = append([]string{"nullable"}, tw...) + } + return tw + case *ast.SelectorExpr: + return append(nullablePrefix, parseSelector(t, typeName, sec, pp)) + } + logFatalLinef(pp, e.Pos(), "unimplemented ast.Expr %#v for struct %q field %q in gatherFieldType", e, typeName, f.Name) + return nil +} + +func parseArgType(e ast.Expr, sec *section, pp *parsedPackage) typewords { + name := checkReplacedType(useSrc{pp, sec.Name}, e) + if name != nil { + return name + } + + switch t := e.(type) { + case *ast.Ident: + tt := pp.lookupType(t.Name) + if tt != nil { + ensureNamedType(tt, sec, pp) + return []string{t.Name} + } + name := t.Name + switch name { + case "byte": + name = "uint8" + case "bool", "int8", "uint8", "int16", "uint16", "int32", "uint32", "int64", "uint64", "float32", "float64", "string", "any": + case "int", "uint": + name += "32" + case "error": + // allowed here, checked if in right location by caller + default: + logFatalLinef(pp, t.Pos(), "unsupported arg type %q", name) + } + return []string{name} + case *ast.ArrayType: + return append([]string{"[]"}, parseArgType(t.Elt, sec, pp)...) + case *ast.Ellipsis: + // Ellipsis parameters to a function must be passed as an array, so document it that way. + return append([]string{"[]"}, parseArgType(t.Elt, sec, pp)...) + case *ast.MapType: + _ = parseArgType(t.Key, sec, pp) + vt := parseArgType(t.Value, sec, pp) + return append([]string{"{}"}, vt...) + case *ast.InterfaceType: + // If we export an interface as an "any" type, we want to make sure it's intended. + // Require the user to be explicit with an empty interface. + if t.Methods != nil && len(t.Methods.List) > 0 { + logFatalLinef(pp, t.Pos(), "unsupported non-empty interface param/return type %T", t) + } + return []string{"any"} + case *ast.StarExpr: + return append([]string{"nullable"}, parseArgType(t.X, sec, pp)...) + case *ast.SelectorExpr: + return []string{parseSelector(t, sec.TypeName, sec, pp)} + } + logFatalLinef(pp, e.Pos(), "unimplemented ast.Expr %#v in parseArgType", e) + return nil +} + +// Parse the selector of a field, returning the type and the parsed package it exists in. This cannot be a builtin type. +func parseFieldSelector(u useSrc, t *ast.SelectorExpr) (*doc.Type, *parsedPackage) { + packageIdent, ok := t.X.(*ast.Ident) + if !ok { + u.Fatalf(t.Pos(), "unexpected non-ident for SelectorExpr.X") + } + pkgName := packageIdent.Name + typeName := t.Sel.Name + + importPath := u.lookupPackageImportPath(pkgName) + if importPath == "" { + u.Fatalf(t.Pos(), "cannot find source for type %q that references package %q (perhaps try -replace)", u, pkgName) + } + + opp := u.Ppkg.ensurePackageParsed(importPath) + tt := opp.lookupType(typeName) + if tt == nil { + u.Fatalf(t.Pos(), "could not find type %q in package %q", typeName, importPath) + } + return tt, opp +} + +func parseSelector(t *ast.SelectorExpr, srcTypeName string, sec *section, pp *parsedPackage) string { + packageIdent, ok := t.X.(*ast.Ident) + if !ok { + logFatalLinef(pp, t.Pos(), "unexpected non-ident for SelectorExpr.X") + } + pkgName := packageIdent.Name + typeName := t.Sel.Name + + if pkgName == "time" && typeName == "Time" { + return "timestamp" + } + if pkgName == "sherpa" { + switch typeName { + case "Int64s": + return "int64s" + case "Uint64s": + return "uint64s" + } + } + + importPath := pp.lookupPackageImportPath(srcTypeName, pkgName) + if importPath == "" { + logFatalLinef(pp, t.Pos(), "cannot find source for %q (perhaps try -replace)", fmt.Sprintf("%s.%s", pkgName, typeName)) + } + + opp := pp.ensurePackageParsed(importPath) + tt := opp.lookupType(typeName) + if tt == nil { + logFatalLinef(pp, t.Pos(), "could not find type %q in package %q", typeName, importPath) + } + ensureNamedType(tt, sec, opp) + return typeName +} + +type replacement struct { + original string // a Go type, eg "pkg.Type" or "*pkg.Type" + target typewords +} + +var _replacements []replacement + +func typeReplacements() []replacement { + if _replacements != nil { + return _replacements + } + + _replacements = []replacement{} + for _, repl := range strings.Split(*replace, ",") { + if repl == "" { + continue + } + tokens := strings.Split(repl, " ") + if len(tokens) < 2 { + log.Fatalf("bad replacement %q, must have at least two tokens, space-separated", repl) + } + r := replacement{tokens[0], tokens[1:]} + _replacements = append(_replacements, r) + } + return _replacements +} + +// Use of a type Name from package Ppkg. Used to look up references from that +// location (the file where the type is defined, with its imports) for a given Go +// ast. +type useSrc struct { + Ppkg *parsedPackage + Name string +} + +func (u useSrc) lookupPackageImportPath(pkgName string) string { + return u.Ppkg.lookupPackageImportPath(u.Name, pkgName) +} + +func (u useSrc) String() string { + return fmt.Sprintf("%s.%s", u.Ppkg.Path, u.Name) +} + +func (u useSrc) Fatalf(tok token.Pos, format string, args ...interface{}) { + logFatalLinef(u.Ppkg, tok, format, args...) +} + +// Return a go type name, eg "*time.Time". +// This function does not parse the types itself, because it would mean they could +// be added to the sherpadoc output even if they aren't otherwise used (due to +// replacement). +func goTypeName(u useSrc, e ast.Expr) string { + switch t := e.(type) { + case *ast.Ident: + return t.Name + case *ast.ArrayType: + return "[]" + goTypeName(u, t.Elt) + case *ast.Ellipsis: + // Ellipsis parameters to a function must be passed as an array, so document it that way. + return "[]" + goTypeName(u, t.Elt) + case *ast.MapType: + return fmt.Sprintf("map[%s]%s", goTypeName(u, t.Key), goTypeName(u, t.Value)) + case *ast.InterfaceType: + return "interface{}" + case *ast.StarExpr: + return "*" + goTypeName(u, t.X) + case *ast.SelectorExpr: + packageIdent, ok := t.X.(*ast.Ident) + if !ok { + u.Fatalf(t.Pos(), "unexpected non-ident for SelectorExpr.X") + } + pkgName := packageIdent.Name + typeName := t.Sel.Name + + importPath := u.lookupPackageImportPath(pkgName) + if importPath != "" { + return fmt.Sprintf("%s.%s", importPath, typeName) + } + return fmt.Sprintf("%s.%s", pkgName, typeName) + // todo: give proper error message for *ast.StructType + } + u.Fatalf(e.Pos(), "unimplemented ast.Expr %#v in goTypeName", e) + return "" +} + +func checkReplacedType(u useSrc, e ast.Expr) typewords { + repls := typeReplacements() + if len(repls) == 0 { + return nil + } + + name := goTypeName(u, e) + return replacementType(repls, name) +} + +func replacementType(repls []replacement, name string) typewords { + for _, repl := range repls { + if repl.original == name { + return repl.target + } + } + return nil +} + +// Ensures the package for importPath has been parsed at least once, and return it. +func (pp *parsedPackage) ensurePackageParsed(importPath string) *parsedPackage { + r := pp.Imports[importPath] + if r != nil { + return r + } + + var localPath string + var astPkg *ast.Package + var fset *token.FileSet + + // If dependencies are vendored, we load packages from vendor/. This is typically + // faster than using package.Load (the fallback), which may spawn commands. + // For me, while testing, for loading a simple package from the same module goes + // from 50-100 ms to 1-5ms. Loading "net" from 200ms to 65ms. + + if gomodFile != nil { + if importPath == gomodFile.Module.Mod.Path { + localPath = gomodDir + } else if strings.HasPrefix(importPath, gomodFile.Module.Mod.Path+"/") { + localPath = filepath.Join(gomodDir, strings.TrimPrefix(importPath, gomodFile.Module.Mod.Path+"/")) + } else { + p := filepath.Join(gomodDir, "vendor", importPath) + if _, err := os.Stat(p); err == nil { + localPath = p + } else { + localPath = filepath.Join(runtime.GOROOT(), "src", importPath) + } + } + + fset = token.NewFileSet() + astPkgs, err := parser.ParseDir(fset, localPath, nil, parser.ParseComments|parser.DeclarationErrors) + check(err, "parsing go files from "+localPath) + for name, pkg := range astPkgs { + if strings.HasSuffix(name, "_test") { + continue + } + if astPkg != nil { + log.Fatalf("loading package %q: multiple packages found", importPath) + } + astPkg = pkg + } + } else { + config := &packages.Config{ + Mode: packages.NeedName | packages.NeedFiles, + } + pkgs, err := packages.Load(config, importPath) + check(err, "loading package") + if len(pkgs) != 1 { + log.Fatalf("loading package %q: got %d packages, expected 1", importPath, len(pkgs)) + } + pkg := pkgs[0] + if len(pkg.GoFiles) == 0 { + log.Fatalf("loading package %q: no go files found", importPath) + } + + fset = token.NewFileSet() + localPath = filepath.Dir(pkg.GoFiles[0]) + astPkgs, err := parser.ParseDir(fset, localPath, nil, parser.ParseComments) + check(err, "parsing go files from directory") + var ok bool + astPkg, ok = astPkgs[pkg.Name] + if !ok { + log.Fatalf("loading package %q: could not find astPkg for %q", importPath, pkg.Name) + } + } + + docpkg := doc.New(astPkg, "", doc.AllDecls|doc.PreserveAST) + + npp := &parsedPackage{ + Fset: fset, + Path: localPath, + Pkg: astPkg, + Docpkg: docpkg, + Imports: make(map[string]*parsedPackage), + } + pp.Imports[importPath] = npp + return npp +} + +// LookupPackageImportPath returns the import/package path for pkgName as used as +// used in the type named typeName. +func (pp *parsedPackage) lookupPackageImportPath(typeName, pkgName string) string { + file := pp.lookupTypeFile(typeName) + for _, imp := range file.Imports { + if imp.Name != nil && imp.Name.Name == pkgName || imp.Name == nil && (parseStringLiteral(imp.Path.Value) == pkgName || strings.HasSuffix(parseStringLiteral(imp.Path.Value), "/"+pkgName)) { + return parseStringLiteral(imp.Path.Value) + } + } + return "" +} + +// LookupTypeFile returns the go source file that containst he definition of the type named typeName. +func (pp *parsedPackage) lookupTypeFile(typeName string) *ast.File { + for _, file := range pp.Pkg.Files { + for _, decl := range file.Decls { + switch d := decl.(type) { + case *ast.GenDecl: + for _, spec := range d.Specs { + switch s := spec.(type) { + case *ast.TypeSpec: + if s.Name.Name == typeName { + return file + } + } + } + } + } + } + log.Fatalf("could not find type %q", fmt.Sprintf("%s.%s", pp.Path, typeName)) + return nil +} + +// Populate "params" with the arguments from "fields", which are function parameters or return type. +func parseArgs(params *[]sherpadoc.Arg, fields *ast.FieldList, sec *section, pp *parsedPackage, isParams bool) { + if fields == nil { + return + } + addParam := func(name string, tw typewords) { + param := sherpadoc.Arg{Name: name, Typewords: tw} + *params = append(*params, param) + } + for _, f := range fields.List { + typ := parseArgType(f.Type, sec, pp) + // Handle named params. Can be both arguments to a function or return types. + for _, name := range f.Names { + addParam(name.Name, typ) + } + // Return types often don't have a name, don't forget them. + if len(f.Names) == 0 { + addParam("", typ) + } + } + + for i, p := range *params { + if p.Typewords[len(p.Typewords)-1] != "error" { + continue + } + if isParams || i != len(*params)-1 { + logFatalLinef(pp, fields.Pos(), "can only have error type as last return value") + } + pp := *params + *params = pp[:len(pp)-1] + } +} + +func adjustFunctionName(s string) string { + switch *adjustFunctionNames { + case "": + return strings.ToLower(s[:1]) + s[1:] + case "none": + return s + case "lowerWord": + r := "" + for i, c := range s { + lc := unicode.ToLower(c) + if lc == c { + r += s[i:] + break + } + r += string(lc) + } + return r + default: + panic(fmt.Sprintf("bad value for flag adjust-function-names: %q", *adjustFunctionNames)) + } +} + +// ParseMethod ensures the function fn from package pp ends up in section sec, with parameters/return named types filled in. +func parseMethod(sec *section, fn *doc.Func, pp *parsedPackage) { + f := &function{ + Name: adjustFunctionName(fn.Name), + Text: fn.Doc, + Params: []sherpadoc.Arg{}, + Returns: []sherpadoc.Arg{}, + } + + // If first function parameter is context.Context, we skip it in the documentation. + // The sherpa handler automatically fills it with the http request context when called. + params := fn.Decl.Type.Params + if params != nil && len(params.List) > 0 && len(params.List[0].Names) == 1 && goTypeName(useSrc{pp, sec.Name}, params.List[0].Type) == "context.Context" { + params.List = params.List[1:] + } + isParams := true + parseArgs(&f.Params, params, sec, pp, isParams) + + isParams = false + parseArgs(&f.Returns, fn.Decl.Type.Results, sec, pp, isParams) + sec.Functions = append(sec.Functions, f) +} diff --git a/vendor/github.com/mjl-/sherpadoc/cmd/sherpadoc/sherpa.go b/vendor/github.com/mjl-/sherpadoc/cmd/sherpadoc/sherpa.go new file mode 100644 index 0000000..60060db --- /dev/null +++ b/vendor/github.com/mjl-/sherpadoc/cmd/sherpadoc/sherpa.go @@ -0,0 +1,85 @@ +package main + +import ( + "fmt" + "strings" + + "github.com/mjl-/sherpadoc" +) + +func sherpaSection(sec *section) *sherpadoc.Section { + doc := &sherpadoc.Section{ + Name: sec.Name, + Docs: sec.Text, + Functions: []*sherpadoc.Function{}, + Sections: []*sherpadoc.Section{}, + Structs: []sherpadoc.Struct{}, + Ints: []sherpadoc.Ints{}, + Strings: []sherpadoc.Strings{}, + } + for _, t := range sec.Types { + switch t.Kind { + case typeStruct: + tt := sherpadoc.Struct{ + Name: t.Name, + Docs: t.Text, + Fields: []sherpadoc.Field{}, + } + for _, f := range t.Fields { + ff := sherpadoc.Field{ + Name: f.Name, + Docs: f.Doc, + Typewords: f.Typewords, + } + tt.Fields = append(tt.Fields, ff) + } + doc.Structs = append(doc.Structs, tt) + case typeInts: + e := sherpadoc.Ints{ + Name: t.Name, + Docs: strings.TrimSpace(t.Text), + Values: t.IntValues, + } + doc.Ints = append(doc.Ints, e) + case typeStrings: + e := sherpadoc.Strings{ + Name: t.Name, + Docs: strings.TrimSpace(t.Text), + Values: t.StringValues, + } + doc.Strings = append(doc.Strings, e) + case typeBytes: + // todo: hack. find proper way to docment them. better for larger functionality: add generic support for lists of types. for now we'll fake this being a string... + e := sherpadoc.Strings{ + Name: t.Name, + Docs: strings.TrimSpace(t.Text), + Values: []struct{Name string; Value string; Docs string}{}, + } + doc.Strings = append(doc.Strings, e) + default: + panic("missing case") + } + } + for _, fn := range sec.Functions { + // Ensure returns always have a name. Go can leave them nameless. + // Either they all have names or they don't, so the names we make up will never clash. + for i := range fn.Returns { + if fn.Returns[i].Name == "" { + fn.Returns[i].Name = fmt.Sprintf("r%d", i) + } + } + + f := &sherpadoc.Function{ + Name: fn.Name, + Docs: strings.TrimSpace(fn.Text), + Params: fn.Params, + Returns: fn.Returns, + } + doc.Functions = append(doc.Functions, f) + } + for _, subsec := range sec.Sections { + doc.Sections = append(doc.Sections, sherpaSection(subsec)) + } + doc.Docs = strings.TrimSpace(doc.Docs) + return doc +} diff --git a/vendor/github.com/mjl-/sherpadoc/sherpadoc.go b/vendor/github.com/mjl-/sherpadoc/sherpadoc.go new file mode 100644 index 0000000..f1d1ae4 --- /dev/null +++ b/vendor/github.com/mjl-/sherpadoc/sherpadoc.go @@ -0,0 +1,84 @@ +// Package sherpadoc contains types for reading and writing documentation for sherpa API's. +package sherpadoc + +const ( + // SherpadocVersion is the sherpadoc version generated by this command. + SherpadocVersion = 1 +) + +// Section represents documentation about a Sherpa API section, as returned by the "_docs" function. +type Section struct { + Name string // Name of an API section. + Docs string // Explanation of the API in text or markdown. + Functions []*Function // Functions in this section. + Sections []*Section // Subsections, each with their own documentation. + Structs []Struct // Structs as named types. + Ints []Ints // Int enums as named types. + Strings []Strings // String enums used as named types. + + Version string `json:",omitempty"` // Version if this API, only relevant for the top-level section of an API. Typically filled in by server at startup. + SherpaVersion int // Version of sherpa this API implements. Currently at 0. Typically filled in by server at startup. + SherpadocVersion int `json:",omitempty"` // Version of the sherpadoc format. Currently at 1, the first defined version. Only relevant for the top-level section of an API. +} + +// Function contains the documentation for a single function. +type Function struct { + Name string // Name of the function. + Docs string // Text or markdown, describing the function, its parameters, return types and possible errors. + Params []Arg + Returns []Arg +} + +// Arg is the name and type of a function parameter or return value. +// +// Production rules: +// +// basictype := "bool" | "int8", "uint8" | "int16" | "uint16" | "int32" | "uint32" | "int64" | "uint64" | "int64s" | "uint64s" | "float32" | "float64" | "string" | "timestamp" +// array := "[]" +// map := "{}" +// identifier := [a-zA-Z][a-zA-Z0-9]* +// type := "nullable"? ("any" | basictype | identifier | array type | map type) +// +// It is not possible to have inline structs in an Arg. Those must be encoded as a +// named type. +type Arg struct { + Name string // Name of the argument. + Typewords []string // Typewords is an array of tokens describing the type. +} + +// Struct is a named compound type. +type Struct struct { + Name string + Docs string + Fields []Field +} + +// Field is a single field of a struct type. +// The type can reference another named type. +type Field struct { + Name string + Docs string + Typewords []string +} + +// Ints is a type representing an enum with integers as types. +type Ints struct { + Name string + Docs string + Values []struct { + Name string + Value int + Docs string + } +} + +// Strings is a type representing an enum with strings as values. +type Strings struct { + Name string + Docs string + Values []struct { + Name string + Value string + Docs string + } +} diff --git a/vendor/github.com/mjl-/sherpaprom/LICENSE.md b/vendor/github.com/mjl-/sherpaprom/LICENSE.md new file mode 100644 index 0000000..c15bbe1 --- /dev/null +++ b/vendor/github.com/mjl-/sherpaprom/LICENSE.md @@ -0,0 +1,8 @@ +Copyright 2017 Irias Informatiemanagement +Copyright 2019 Mechiel Lukkien + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/mjl-/sherpaprom/README.md b/vendor/github.com/mjl-/sherpaprom/README.md new file mode 100644 index 0000000..fe9a130 --- /dev/null +++ b/vendor/github.com/mjl-/sherpaprom/README.md @@ -0,0 +1,13 @@ +# sherpaprom + +Go package with a Prometheus [1] collector for Sherpa API's [2,3]. It provides a prometheus collector that implements interface Collector. + +Read the godoc documentation at https://godoc.org/github.com/mjl-/sherpaprom + +[1] Prometheus: https://prometheus.io/ +[2] Sherpa protocol: https://www.ueber.net/who/mjl/sherpa/ +[3] Sherpa Go package: https://github.com/mjl-/sherpa + +# LICENSE + +Created by Mechiel Lukkien, originally at Irias, and released under an MIT-license, see LICENSE.md. diff --git a/vendor/github.com/mjl-/sherpaprom/collector.go b/vendor/github.com/mjl-/sherpaprom/collector.go new file mode 100644 index 0000000..2772ed4 --- /dev/null +++ b/vendor/github.com/mjl-/sherpaprom/collector.go @@ -0,0 +1,123 @@ +// Package sherpaprom provides a collector of statistics for incoming Sherpa requests that are exported over to Prometheus. +package sherpaprom + +import ( + "github.com/prometheus/client_golang/prometheus" +) + +// Collector implements the Collector interface from the sherpa package. +type Collector struct { + requests, errors *prometheus.CounterVec + protocolErrors, badFunction, javascript, json prometheus.Counter + requestDuration *prometheus.HistogramVec +} + +// NewCollector creates a new collector for the named API. +// Metrics will be labeled with "api". +// The following prometheus metrics are automatically registered on reg, or the default prometheus registerer if reg is nil: +// +// sherpa_requests_total +// calls, per function +// sherpa_errors_total +// error responses, per function,code +// sherpa_protocol_errors_total +// incorrect requests +// sherpa_bad_function_total +// unknown functions called +// sherpa_javascript_request_total +// requests to sherpa.js +// sherpa_json_request_total +// requests to sherpa.json +// sherpa_requests_duration_seconds +// histogram for .01, .05, .1, .2, .5, 1, 2, 4, 8, 16, per function +func NewCollector(api string, reg prometheus.Registerer) (*Collector, error) { + if reg == nil { + reg = prometheus.DefaultRegisterer + } + apiLabel := prometheus.Labels{"api": api} + c := &Collector{ + requests: prometheus.NewCounterVec(prometheus.CounterOpts{ + Name: "sherpa_requests_total", + Help: "Total sherpa requests.", + ConstLabels: apiLabel, + }, []string{"function"}), + errors: prometheus.NewCounterVec(prometheus.CounterOpts{ + Name: "sherpa_errors_total", + Help: "Total sherpa error responses.", + ConstLabels: apiLabel, + }, []string{"function", "code"}), + protocolErrors: prometheus.NewCounter(prometheus.CounterOpts{ + Name: "sherpa_protocol_errors_total", + Help: "Total sherpa protocol errors.", + ConstLabels: apiLabel, + }), + badFunction: prometheus.NewCounter(prometheus.CounterOpts{ + Name: "sherpa_bad_function_total", + Help: "Total sherpa bad function calls.", + ConstLabels: apiLabel, + }), + javascript: prometheus.NewCounter(prometheus.CounterOpts{ + Name: "sherpa_javascript_request_total", + Help: "Total sherpa.js requests.", + ConstLabels: apiLabel, + }), + json: prometheus.NewCounter(prometheus.CounterOpts{ + Name: "sherpa_json_requests_total", + Help: "Total sherpa.json requests.", + ConstLabels: apiLabel, + }), + requestDuration: prometheus.NewHistogramVec(prometheus.HistogramOpts{ + Name: "sherpa_requests_duration_seconds", + Help: "Sherpa request duration in seconds.", + ConstLabels: apiLabel, + Buckets: []float64{.01, .05, .1, .2, .5, 1, 2, 4, 8, 16}, + }, []string{"function"}), + } + first := func(errors ...error) error { + for _, err := range errors { + if err != nil { + return err + } + } + return nil + } + err := first( + reg.Register(c.requests), + reg.Register(c.errors), + reg.Register(c.protocolErrors), + reg.Register(c.badFunction), + reg.Register(c.javascript), + reg.Register(c.json), + reg.Register(c.requestDuration), + ) + return c, err +} + +// BadFunction increases counter "sherpa_bad_function_total" by one. +func (c *Collector) BadFunction() { + c.badFunction.Inc() +} + +// ProtocolError increases counter "sherpa_protocol_errors_total" by one. +func (c *Collector) ProtocolError() { + c.protocolErrors.Inc() +} + +// JSON increases "sherpa_json_requests_total" by one. +func (c *Collector) JSON() { + c.json.Inc() +} + +// JavaScript increases "sherpa_javascript_requests_total" by one. +func (c *Collector) JavaScript() { + c.javascript.Inc() +} + +// FunctionCall increases "sherpa_requests_total" by one, adds the call duration to "sherpa_requests_duration_seconds" and possibly increases "sherpa_error_total" and "sherpa_servererror_total". +func (c *Collector) FunctionCall(name string, duration float64, errorCode string) { + c.requests.WithLabelValues(name).Inc() + if errorCode != "" { + c.errors.WithLabelValues(name, errorCode).Inc() + } + c.requestDuration.WithLabelValues(name).Observe(duration) +} diff --git a/vendor/github.com/mjl-/xfmt/.gitignore b/vendor/github.com/mjl-/xfmt/.gitignore new file mode 100644 index 0000000..7544ee4 --- /dev/null +++ b/vendor/github.com/mjl-/xfmt/.gitignore @@ -0,0 +1 @@ +/xfmt diff --git a/vendor/github.com/mjl-/xfmt/LICENSE b/vendor/github.com/mjl-/xfmt/LICENSE new file mode 100644 index 0000000..b90262c --- /dev/null +++ b/vendor/github.com/mjl-/xfmt/LICENSE @@ -0,0 +1,7 @@ +Copyright (c) 2019 Mechiel Lukkien + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/mjl-/xfmt/README.txt b/vendor/github.com/mjl-/xfmt/README.txt new file mode 100644 index 0000000..448ef7a --- /dev/null +++ b/vendor/github.com/mjl-/xfmt/README.txt @@ -0,0 +1,26 @@ +xfmt formats long lines, playing nice with text in code. + +To install: + + go get github.com/mjl-/xfmt/cmd/xfmt + +Xfmt reads from stdin, writes formatted output to stdout. + +Xfmt wraps long lines at 80 characters, configurable through -width. But it +counts text width excluding indenting and markup. Fmt formats to a max line +length that includes indenting. We don't care about total max line length +nowadays, we care about a human readable paragraph, which has a certain text +width regardless of indent. + +Xfmt recognizes lines with first non-whitespace of "//" and "#" as line +comments, and repeats that prefix on later lines. + +Xfmt keep does not merge lines if the first non-prefix text starts with +interpunction or numbers. E.g. "- item1" or "1. point 1". + +Xfmt does not merge multiple spaces, it assumes you intended what you typed. + +# todo + +- possibly recognize itemized lists in comments and indent the later lines with whitespace +- something else diff --git a/vendor/github.com/mjl-/xfmt/xfmt.go b/vendor/github.com/mjl-/xfmt/xfmt.go new file mode 100644 index 0000000..2655883 --- /dev/null +++ b/vendor/github.com/mjl-/xfmt/xfmt.go @@ -0,0 +1,207 @@ +// Package xfmt reformats text, wrapping it while recognizing comments. +package xfmt + +import ( + "bufio" + "fmt" + "io" + "strings" +) + +// Config tells format how to reformat text. +type Config struct { + MaxWidth int // Max width of content (excluding indenting), after which lines are wrapped. + BreakPrefixes []string // String prefixes that cause a line to break, instead of being merged into the previous line. +} + +// Format reads text from r and writes reformatted text to w, according to +// instructions in config. Lines ending with \r\n are formatted with \r\n as well. +func Format(w io.Writer, r io.Reader, config Config) error { + f := &formatter{ + in: bufio.NewReader(r), + out: bufio.NewWriter(w), + config: config, + } + return f.format() +} + +type formatter struct { + in *bufio.Reader + out *bufio.Writer + config Config + curLine string + curLineend string +} + +type parseError error + +func (f *formatter) format() (rerr error) { + defer func() { + e := recover() + if e != nil { + if pe, ok := e.(parseError); ok { + rerr = pe + } else { + panic(e) + } + } + }() + + for { + line, end := f.gatherLine() + if line == "" && end == "" { + break + } + prefix, rem := parseLine(line) + for _, s := range f.splitLine(rem) { + f.write(prefix) + f.write(s) + f.write(end) + } + } + return f.out.Flush() + +} + +func (f *formatter) check(err error, action string) { + if err != nil { + panic(parseError(fmt.Errorf("%s: %s", action, err))) + } +} + +func (f *formatter) write(s string) { + _, err := f.out.Write([]byte(s)) + f.check(err, "write") +} + +func (f *formatter) peekLine() (string, string) { + if f.curLine != "" || f.curLineend != "" { + return f.curLine, f.curLineend + } + + line, err := f.in.ReadString('\n') + if err != io.EOF { + f.check(err, "read") + } + if line == "" { + return "", "" + } + if strings.HasSuffix(line, "\r\n") { + f.curLine, f.curLineend = line[:len(line)-2], "\r\n" + } else if strings.HasSuffix(line, "\n") { + f.curLine, f.curLineend = line[:len(line)-1], "\n" + } else { + f.curLine, f.curLineend = line, "" + } + return f.curLine, f.curLineend +} + +func (f *formatter) consumeLine() { + if f.curLine == "" && f.curLineend == "" { + panic("bad") + } + f.curLine = "" + f.curLineend = "" +} + +func (f *formatter) gatherLine() (string, string) { + var curLine, curLineend string + var curPrefix string + + for { + line, end := f.peekLine() + if line == "" && end == "" { + break + } + if curLine == "" { + curLineend = end + } + prefix, rem := parseLine(line) + if prefix == "" && rem == "" { + if curLine == "" { + f.consumeLine() + } + break + } + if curLine != "" && (curPrefix != prefix || rem == "" || f.causeBreak(rem)) { + break + } + curPrefix = prefix + if curLine != "" { + curLine += " " + } + curLine += rem + f.consumeLine() + // Control at begin or end of line are not merged. + if curLine != "" && curLine[len(curLine)-1] < 0x20 { + break + } + } + + return curPrefix + curLine, curLineend +} + +func (f *formatter) causeBreak(s string) bool { + c := s[0] + if c < 0x20 { + return true + } + for _, ss := range f.config.BreakPrefixes { + if strings.HasPrefix(s, ss) { + return true + } + } + + // Don't merge lines starting with eg "1. ". + for i, c := range s { + if c >= '0' && c <= '9' { + continue + } + if i > 0 && c == '.' && strings.HasPrefix(s[i:], ". ") { + return true + } + break + } + return false +} + +func parseLine(s string) (string, string) { + orig := s + s = strings.TrimLeft(orig, " \t") + prefix := orig[:len(orig)-len(s)] + if strings.HasPrefix(s, "//") { + prefix += "//" + s = s[2:] + } else if strings.HasPrefix(s, "#") { + prefix += "#" + s = s[1:] + } + ns := strings.TrimLeft(s, " \t") + prefix += s[:len(s)-len(ns)] + s = ns + return prefix, s +} + +func (f *formatter) splitLine(s string) []string { + if len(s) <= f.config.MaxWidth { + return []string{s} + } + + line := "" + r := []string{} + for _, w := range strings.Split(s, " ") { + if line != "" && len(line)+1+len(w) > f.config.MaxWidth { + r = append(r, line) + line = w + continue + } + if line != "" { + line += " " + } + line += w + } + if line != "" { + r = append(r, line) + } + return r +} diff --git a/vendor/github.com/prometheus/client_golang/LICENSE b/vendor/github.com/prometheus/client_golang/LICENSE new file mode 100644 index 0000000..261eeb9 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/prometheus/client_golang/NOTICE b/vendor/github.com/prometheus/client_golang/NOTICE new file mode 100644 index 0000000..dd878a3 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/NOTICE @@ -0,0 +1,23 @@ +Prometheus instrumentation library for Go applications +Copyright 2012-2015 The Prometheus Authors + +This product includes software developed at +SoundCloud Ltd. (http://soundcloud.com/). + + +The following components are included in this product: + +perks - a fork of https://github.com/bmizerany/perks +https://github.com/beorn7/perks +Copyright 2013-2015 Blake Mizerany, Björn Rabenstein +See https://github.com/beorn7/perks/blob/master/README.md for license details. + +Go support for Protocol Buffers - Google's data interchange format +http://github.com/golang/protobuf/ +Copyright 2010 The Go Authors +See source code for license details. + +Support for streaming Protocol Buffer messages for the Go language (golang). +https://github.com/matttproud/golang_protobuf_extensions +Copyright 2013 Matt T. Proud +Licensed under the Apache License, Version 2.0 diff --git a/vendor/github.com/prometheus/client_golang/prometheus/.gitignore b/vendor/github.com/prometheus/client_golang/prometheus/.gitignore new file mode 100644 index 0000000..3460f03 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/.gitignore @@ -0,0 +1 @@ +command-line-arguments.test diff --git a/vendor/github.com/prometheus/client_golang/prometheus/README.md b/vendor/github.com/prometheus/client_golang/prometheus/README.md new file mode 100644 index 0000000..c67ff1b --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/README.md @@ -0,0 +1 @@ +See [![Go Reference](https://pkg.go.dev/badge/github.com/prometheus/client_golang/prometheus.svg)](https://pkg.go.dev/github.com/prometheus/client_golang/prometheus). diff --git a/vendor/github.com/prometheus/client_golang/prometheus/build_info_collector.go b/vendor/github.com/prometheus/client_golang/prometheus/build_info_collector.go new file mode 100644 index 0000000..450189f --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/build_info_collector.go @@ -0,0 +1,38 @@ +// Copyright 2021 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import "runtime/debug" + +// NewBuildInfoCollector is the obsolete version of collectors.NewBuildInfoCollector. +// See there for documentation. +// +// Deprecated: Use collectors.NewBuildInfoCollector instead. +func NewBuildInfoCollector() Collector { + path, version, sum := "unknown", "unknown", "unknown" + if bi, ok := debug.ReadBuildInfo(); ok { + path = bi.Main.Path + version = bi.Main.Version + sum = bi.Main.Sum + } + c := &selfCollector{MustNewConstMetric( + NewDesc( + "go_build_info", + "Build information about the main Go module.", + nil, Labels{"path": path, "version": version, "checksum": sum}, + ), + GaugeValue, 1)} + c.init(c.self) + return c +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/collector.go b/vendor/github.com/prometheus/client_golang/prometheus/collector.go new file mode 100644 index 0000000..cf05079 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/collector.go @@ -0,0 +1,128 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +// Collector is the interface implemented by anything that can be used by +// Prometheus to collect metrics. A Collector has to be registered for +// collection. See Registerer.Register. +// +// The stock metrics provided by this package (Gauge, Counter, Summary, +// Histogram, Untyped) are also Collectors (which only ever collect one metric, +// namely itself). An implementer of Collector may, however, collect multiple +// metrics in a coordinated fashion and/or create metrics on the fly. Examples +// for collectors already implemented in this library are the metric vectors +// (i.e. collection of multiple instances of the same Metric but with different +// label values) like GaugeVec or SummaryVec, and the ExpvarCollector. +type Collector interface { + // Describe sends the super-set of all possible descriptors of metrics + // collected by this Collector to the provided channel and returns once + // the last descriptor has been sent. The sent descriptors fulfill the + // consistency and uniqueness requirements described in the Desc + // documentation. + // + // It is valid if one and the same Collector sends duplicate + // descriptors. Those duplicates are simply ignored. However, two + // different Collectors must not send duplicate descriptors. + // + // Sending no descriptor at all marks the Collector as “unchecked”, + // i.e. no checks will be performed at registration time, and the + // Collector may yield any Metric it sees fit in its Collect method. + // + // This method idempotently sends the same descriptors throughout the + // lifetime of the Collector. It may be called concurrently and + // therefore must be implemented in a concurrency safe way. + // + // If a Collector encounters an error while executing this method, it + // must send an invalid descriptor (created with NewInvalidDesc) to + // signal the error to the registry. + Describe(chan<- *Desc) + // Collect is called by the Prometheus registry when collecting + // metrics. The implementation sends each collected metric via the + // provided channel and returns once the last metric has been sent. The + // descriptor of each sent metric is one of those returned by Describe + // (unless the Collector is unchecked, see above). Returned metrics that + // share the same descriptor must differ in their variable label + // values. + // + // This method may be called concurrently and must therefore be + // implemented in a concurrency safe way. Blocking occurs at the expense + // of total performance of rendering all registered metrics. Ideally, + // Collector implementations support concurrent readers. + Collect(chan<- Metric) +} + +// DescribeByCollect is a helper to implement the Describe method of a custom +// Collector. It collects the metrics from the provided Collector and sends +// their descriptors to the provided channel. +// +// If a Collector collects the same metrics throughout its lifetime, its +// Describe method can simply be implemented as: +// +// func (c customCollector) Describe(ch chan<- *Desc) { +// DescribeByCollect(c, ch) +// } +// +// However, this will not work if the metrics collected change dynamically over +// the lifetime of the Collector in a way that their combined set of descriptors +// changes as well. The shortcut implementation will then violate the contract +// of the Describe method. If a Collector sometimes collects no metrics at all +// (for example vectors like CounterVec, GaugeVec, etc., which only collect +// metrics after a metric with a fully specified label set has been accessed), +// it might even get registered as an unchecked Collector (cf. the Register +// method of the Registerer interface). Hence, only use this shortcut +// implementation of Describe if you are certain to fulfill the contract. +// +// The Collector example demonstrates a use of DescribeByCollect. +func DescribeByCollect(c Collector, descs chan<- *Desc) { + metrics := make(chan Metric) + go func() { + c.Collect(metrics) + close(metrics) + }() + for m := range metrics { + descs <- m.Desc() + } +} + +// selfCollector implements Collector for a single Metric so that the Metric +// collects itself. Add it as an anonymous field to a struct that implements +// Metric, and call init with the Metric itself as an argument. +type selfCollector struct { + self Metric +} + +// init provides the selfCollector with a reference to the metric it is supposed +// to collect. It is usually called within the factory function to create a +// metric. See example. +func (c *selfCollector) init(self Metric) { + c.self = self +} + +// Describe implements Collector. +func (c *selfCollector) Describe(ch chan<- *Desc) { + ch <- c.self.Desc() +} + +// Collect implements Collector. +func (c *selfCollector) Collect(ch chan<- Metric) { + ch <- c.self +} + +// collectorMetric is a metric that is also a collector. +// Because of selfCollector, most (if not all) Metrics in +// this package are also collectors. +type collectorMetric interface { + Metric + Collector +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/counter.go b/vendor/github.com/prometheus/client_golang/prometheus/counter.go new file mode 100644 index 0000000..a912b75 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/counter.go @@ -0,0 +1,328 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "errors" + "math" + "sync/atomic" + "time" + + dto "github.com/prometheus/client_model/go" +) + +// Counter is a Metric that represents a single numerical value that only ever +// goes up. That implies that it cannot be used to count items whose number can +// also go down, e.g. the number of currently running goroutines. Those +// "counters" are represented by Gauges. +// +// A Counter is typically used to count requests served, tasks completed, errors +// occurred, etc. +// +// To create Counter instances, use NewCounter. +type Counter interface { + Metric + Collector + + // Inc increments the counter by 1. Use Add to increment it by arbitrary + // non-negative values. + Inc() + // Add adds the given value to the counter. It panics if the value is < + // 0. + Add(float64) +} + +// ExemplarAdder is implemented by Counters that offer the option of adding a +// value to the Counter together with an exemplar. Its AddWithExemplar method +// works like the Add method of the Counter interface but also replaces the +// currently saved exemplar (if any) with a new one, created from the provided +// value, the current time as timestamp, and the provided labels. Empty Labels +// will lead to a valid (label-less) exemplar. But if Labels is nil, the current +// exemplar is left in place. AddWithExemplar panics if the value is < 0, if any +// of the provided labels are invalid, or if the provided labels contain more +// than 128 runes in total. +type ExemplarAdder interface { + AddWithExemplar(value float64, exemplar Labels) +} + +// CounterOpts is an alias for Opts. See there for doc comments. +type CounterOpts Opts + +// NewCounter creates a new Counter based on the provided CounterOpts. +// +// The returned implementation also implements ExemplarAdder. It is safe to +// perform the corresponding type assertion. +// +// The returned implementation tracks the counter value in two separate +// variables, a float64 and a uint64. The latter is used to track calls of the +// Inc method and calls of the Add method with a value that can be represented +// as a uint64. This allows atomic increments of the counter with optimal +// performance. (It is common to have an Inc call in very hot execution paths.) +// Both internal tracking values are added up in the Write method. This has to +// be taken into account when it comes to precision and overflow behavior. +func NewCounter(opts CounterOpts) Counter { + desc := NewDesc( + BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), + opts.Help, + nil, + opts.ConstLabels, + ) + result := &counter{desc: desc, labelPairs: desc.constLabelPairs, now: time.Now} + result.init(result) // Init self-collection. + return result +} + +type counter struct { + // valBits contains the bits of the represented float64 value, while + // valInt stores values that are exact integers. Both have to go first + // in the struct to guarantee alignment for atomic operations. + // http://golang.org/pkg/sync/atomic/#pkg-note-BUG + valBits uint64 + valInt uint64 + + selfCollector + desc *Desc + + labelPairs []*dto.LabelPair + exemplar atomic.Value // Containing nil or a *dto.Exemplar. + + now func() time.Time // To mock out time.Now() for testing. +} + +func (c *counter) Desc() *Desc { + return c.desc +} + +func (c *counter) Add(v float64) { + if v < 0 { + panic(errors.New("counter cannot decrease in value")) + } + + ival := uint64(v) + if float64(ival) == v { + atomic.AddUint64(&c.valInt, ival) + return + } + + for { + oldBits := atomic.LoadUint64(&c.valBits) + newBits := math.Float64bits(math.Float64frombits(oldBits) + v) + if atomic.CompareAndSwapUint64(&c.valBits, oldBits, newBits) { + return + } + } +} + +func (c *counter) AddWithExemplar(v float64, e Labels) { + c.Add(v) + c.updateExemplar(v, e) +} + +func (c *counter) Inc() { + atomic.AddUint64(&c.valInt, 1) +} + +func (c *counter) get() float64 { + fval := math.Float64frombits(atomic.LoadUint64(&c.valBits)) + ival := atomic.LoadUint64(&c.valInt) + return fval + float64(ival) +} + +func (c *counter) Write(out *dto.Metric) error { + // Read the Exemplar first and the value second. This is to avoid a race condition + // where users see an exemplar for a not-yet-existing observation. + var exemplar *dto.Exemplar + if e := c.exemplar.Load(); e != nil { + exemplar = e.(*dto.Exemplar) + } + val := c.get() + + return populateMetric(CounterValue, val, c.labelPairs, exemplar, out) +} + +func (c *counter) updateExemplar(v float64, l Labels) { + if l == nil { + return + } + e, err := newExemplar(v, c.now(), l) + if err != nil { + panic(err) + } + c.exemplar.Store(e) +} + +// CounterVec is a Collector that bundles a set of Counters that all share the +// same Desc, but have different values for their variable labels. This is used +// if you want to count the same thing partitioned by various dimensions +// (e.g. number of HTTP requests, partitioned by response code and +// method). Create instances with NewCounterVec. +type CounterVec struct { + *MetricVec +} + +// NewCounterVec creates a new CounterVec based on the provided CounterOpts and +// partitioned by the given label names. +func NewCounterVec(opts CounterOpts, labelNames []string) *CounterVec { + desc := NewDesc( + BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), + opts.Help, + labelNames, + opts.ConstLabels, + ) + return &CounterVec{ + MetricVec: NewMetricVec(desc, func(lvs ...string) Metric { + if len(lvs) != len(desc.variableLabels) { + panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels, lvs)) + } + result := &counter{desc: desc, labelPairs: MakeLabelPairs(desc, lvs), now: time.Now} + result.init(result) // Init self-collection. + return result + }), + } +} + +// GetMetricWithLabelValues returns the Counter for the given slice of label +// values (same order as the variable labels in Desc). If that combination of +// label values is accessed for the first time, a new Counter is created. +// +// It is possible to call this method without using the returned Counter to only +// create the new Counter but leave it at its starting value 0. See also the +// SummaryVec example. +// +// Keeping the Counter for later use is possible (and should be considered if +// performance is critical), but keep in mind that Reset, DeleteLabelValues and +// Delete can be used to delete the Counter from the CounterVec. In that case, +// the Counter will still exist, but it will not be exported anymore, even if a +// Counter with the same label values is created later. +// +// An error is returned if the number of label values is not the same as the +// number of variable labels in Desc (minus any curried labels). +// +// Note that for more than one label value, this method is prone to mistakes +// caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as +// an alternative to avoid that type of mistake. For higher label numbers, the +// latter has a much more readable (albeit more verbose) syntax, but it comes +// with a performance overhead (for creating and processing the Labels map). +// See also the GaugeVec example. +func (v *CounterVec) GetMetricWithLabelValues(lvs ...string) (Counter, error) { + metric, err := v.MetricVec.GetMetricWithLabelValues(lvs...) + if metric != nil { + return metric.(Counter), err + } + return nil, err +} + +// GetMetricWith returns the Counter for the given Labels map (the label names +// must match those of the variable labels in Desc). If that label map is +// accessed for the first time, a new Counter is created. Implications of +// creating a Counter without using it and keeping the Counter for later use are +// the same as for GetMetricWithLabelValues. +// +// An error is returned if the number and names of the Labels are inconsistent +// with those of the variable labels in Desc (minus any curried labels). +// +// This method is used for the same purpose as +// GetMetricWithLabelValues(...string). See there for pros and cons of the two +// methods. +func (v *CounterVec) GetMetricWith(labels Labels) (Counter, error) { + metric, err := v.MetricVec.GetMetricWith(labels) + if metric != nil { + return metric.(Counter), err + } + return nil, err +} + +// WithLabelValues works as GetMetricWithLabelValues, but panics where +// GetMetricWithLabelValues would have returned an error. Not returning an +// error allows shortcuts like +// +// myVec.WithLabelValues("404", "GET").Add(42) +func (v *CounterVec) WithLabelValues(lvs ...string) Counter { + c, err := v.GetMetricWithLabelValues(lvs...) + if err != nil { + panic(err) + } + return c +} + +// With works as GetMetricWith, but panics where GetMetricWithLabels would have +// returned an error. Not returning an error allows shortcuts like +// +// myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Add(42) +func (v *CounterVec) With(labels Labels) Counter { + c, err := v.GetMetricWith(labels) + if err != nil { + panic(err) + } + return c +} + +// CurryWith returns a vector curried with the provided labels, i.e. the +// returned vector has those labels pre-set for all labeled operations performed +// on it. The cardinality of the curried vector is reduced accordingly. The +// order of the remaining labels stays the same (just with the curried labels +// taken out of the sequence – which is relevant for the +// (GetMetric)WithLabelValues methods). It is possible to curry a curried +// vector, but only with labels not yet used for currying before. +// +// The metrics contained in the CounterVec are shared between the curried and +// uncurried vectors. They are just accessed differently. Curried and uncurried +// vectors behave identically in terms of collection. Only one must be +// registered with a given registry (usually the uncurried version). The Reset +// method deletes all metrics, even if called on a curried vector. +func (v *CounterVec) CurryWith(labels Labels) (*CounterVec, error) { + vec, err := v.MetricVec.CurryWith(labels) + if vec != nil { + return &CounterVec{vec}, err + } + return nil, err +} + +// MustCurryWith works as CurryWith but panics where CurryWith would have +// returned an error. +func (v *CounterVec) MustCurryWith(labels Labels) *CounterVec { + vec, err := v.CurryWith(labels) + if err != nil { + panic(err) + } + return vec +} + +// CounterFunc is a Counter whose value is determined at collect time by calling a +// provided function. +// +// To create CounterFunc instances, use NewCounterFunc. +type CounterFunc interface { + Metric + Collector +} + +// NewCounterFunc creates a new CounterFunc based on the provided +// CounterOpts. The value reported is determined by calling the given function +// from within the Write method. Take into account that metric collection may +// happen concurrently. If that results in concurrent calls to Write, like in +// the case where a CounterFunc is directly registered with Prometheus, the +// provided function must be concurrency-safe. The function should also honor +// the contract for a Counter (values only go up, not down), but compliance will +// not be checked. +// +// Check out the ExampleGaugeFunc examples for the similar GaugeFunc. +func NewCounterFunc(opts CounterOpts, function func() float64) CounterFunc { + return newValueFunc(NewDesc( + BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), + opts.Help, + nil, + opts.ConstLabels, + ), CounterValue, function) +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/desc.go b/vendor/github.com/prometheus/client_golang/prometheus/desc.go new file mode 100644 index 0000000..8bc5e44 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/desc.go @@ -0,0 +1,189 @@ +// Copyright 2016 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "errors" + "fmt" + "sort" + "strings" + + "github.com/cespare/xxhash/v2" + + "github.com/prometheus/client_golang/prometheus/internal" + + //nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility. + "github.com/golang/protobuf/proto" + "github.com/prometheus/common/model" + + dto "github.com/prometheus/client_model/go" +) + +// Desc is the descriptor used by every Prometheus Metric. It is essentially +// the immutable meta-data of a Metric. The normal Metric implementations +// included in this package manage their Desc under the hood. Users only have to +// deal with Desc if they use advanced features like the ExpvarCollector or +// custom Collectors and Metrics. +// +// Descriptors registered with the same registry have to fulfill certain +// consistency and uniqueness criteria if they share the same fully-qualified +// name: They must have the same help string and the same label names (aka label +// dimensions) in each, constLabels and variableLabels, but they must differ in +// the values of the constLabels. +// +// Descriptors that share the same fully-qualified names and the same label +// values of their constLabels are considered equal. +// +// Use NewDesc to create new Desc instances. +type Desc struct { + // fqName has been built from Namespace, Subsystem, and Name. + fqName string + // help provides some helpful information about this metric. + help string + // constLabelPairs contains precalculated DTO label pairs based on + // the constant labels. + constLabelPairs []*dto.LabelPair + // variableLabels contains names of labels for which the metric + // maintains variable values. + variableLabels []string + // id is a hash of the values of the ConstLabels and fqName. This + // must be unique among all registered descriptors and can therefore be + // used as an identifier of the descriptor. + id uint64 + // dimHash is a hash of the label names (preset and variable) and the + // Help string. Each Desc with the same fqName must have the same + // dimHash. + dimHash uint64 + // err is an error that occurred during construction. It is reported on + // registration time. + err error +} + +// NewDesc allocates and initializes a new Desc. Errors are recorded in the Desc +// and will be reported on registration time. variableLabels and constLabels can +// be nil if no such labels should be set. fqName must not be empty. +// +// variableLabels only contain the label names. Their label values are variable +// and therefore not part of the Desc. (They are managed within the Metric.) +// +// For constLabels, the label values are constant. Therefore, they are fully +// specified in the Desc. See the Collector example for a usage pattern. +func NewDesc(fqName, help string, variableLabels []string, constLabels Labels) *Desc { + d := &Desc{ + fqName: fqName, + help: help, + variableLabels: variableLabels, + } + if !model.IsValidMetricName(model.LabelValue(fqName)) { + d.err = fmt.Errorf("%q is not a valid metric name", fqName) + return d + } + // labelValues contains the label values of const labels (in order of + // their sorted label names) plus the fqName (at position 0). + labelValues := make([]string, 1, len(constLabels)+1) + labelValues[0] = fqName + labelNames := make([]string, 0, len(constLabels)+len(variableLabels)) + labelNameSet := map[string]struct{}{} + // First add only the const label names and sort them... + for labelName := range constLabels { + if !checkLabelName(labelName) { + d.err = fmt.Errorf("%q is not a valid label name for metric %q", labelName, fqName) + return d + } + labelNames = append(labelNames, labelName) + labelNameSet[labelName] = struct{}{} + } + sort.Strings(labelNames) + // ... so that we can now add const label values in the order of their names. + for _, labelName := range labelNames { + labelValues = append(labelValues, constLabels[labelName]) + } + // Validate the const label values. They can't have a wrong cardinality, so + // use in len(labelValues) as expectedNumberOfValues. + if err := validateLabelValues(labelValues, len(labelValues)); err != nil { + d.err = err + return d + } + // Now add the variable label names, but prefix them with something that + // cannot be in a regular label name. That prevents matching the label + // dimension with a different mix between preset and variable labels. + for _, labelName := range variableLabels { + if !checkLabelName(labelName) { + d.err = fmt.Errorf("%q is not a valid label name for metric %q", labelName, fqName) + return d + } + labelNames = append(labelNames, "$"+labelName) + labelNameSet[labelName] = struct{}{} + } + if len(labelNames) != len(labelNameSet) { + d.err = errors.New("duplicate label names") + return d + } + + xxh := xxhash.New() + for _, val := range labelValues { + xxh.WriteString(val) + xxh.Write(separatorByteSlice) + } + d.id = xxh.Sum64() + // Sort labelNames so that order doesn't matter for the hash. + sort.Strings(labelNames) + // Now hash together (in this order) the help string and the sorted + // label names. + xxh.Reset() + xxh.WriteString(help) + xxh.Write(separatorByteSlice) + for _, labelName := range labelNames { + xxh.WriteString(labelName) + xxh.Write(separatorByteSlice) + } + d.dimHash = xxh.Sum64() + + d.constLabelPairs = make([]*dto.LabelPair, 0, len(constLabels)) + for n, v := range constLabels { + d.constLabelPairs = append(d.constLabelPairs, &dto.LabelPair{ + Name: proto.String(n), + Value: proto.String(v), + }) + } + sort.Sort(internal.LabelPairSorter(d.constLabelPairs)) + return d +} + +// NewInvalidDesc returns an invalid descriptor, i.e. a descriptor with the +// provided error set. If a collector returning such a descriptor is registered, +// registration will fail with the provided error. NewInvalidDesc can be used by +// a Collector to signal inability to describe itself. +func NewInvalidDesc(err error) *Desc { + return &Desc{ + err: err, + } +} + +func (d *Desc) String() string { + lpStrings := make([]string, 0, len(d.constLabelPairs)) + for _, lp := range d.constLabelPairs { + lpStrings = append( + lpStrings, + fmt.Sprintf("%s=%q", lp.GetName(), lp.GetValue()), + ) + } + return fmt.Sprintf( + "Desc{fqName: %q, help: %q, constLabels: {%s}, variableLabels: %v}", + d.fqName, + d.help, + strings.Join(lpStrings, ","), + d.variableLabels, + ) +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/doc.go b/vendor/github.com/prometheus/client_golang/prometheus/doc.go new file mode 100644 index 0000000..811072c --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/doc.go @@ -0,0 +1,210 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package prometheus is the core instrumentation package. It provides metrics +// primitives to instrument code for monitoring. It also offers a registry for +// metrics. Sub-packages allow to expose the registered metrics via HTTP +// (package promhttp) or push them to a Pushgateway (package push). There is +// also a sub-package promauto, which provides metrics constructors with +// automatic registration. +// +// All exported functions and methods are safe to be used concurrently unless +// specified otherwise. +// +// # A Basic Example +// +// As a starting point, a very basic usage example: +// +// package main +// +// import ( +// "log" +// "net/http" +// +// "github.com/prometheus/client_golang/prometheus" +// "github.com/prometheus/client_golang/prometheus/promhttp" +// ) +// +// type metrics struct { +// cpuTemp prometheus.Gauge +// hdFailures *prometheus.CounterVec +// } +// +// func NewMetrics(reg prometheus.Registerer) *metrics { +// m := &metrics{ +// cpuTemp: prometheus.NewGauge(prometheus.GaugeOpts{ +// Name: "cpu_temperature_celsius", +// Help: "Current temperature of the CPU.", +// }), +// hdFailures: prometheus.NewCounterVec( +// prometheus.CounterOpts{ +// Name: "hd_errors_total", +// Help: "Number of hard-disk errors.", +// }, +// []string{"device"}, +// ), +// } +// reg.MustRegister(m.cpuTemp) +// reg.MustRegister(m.hdFailures) +// return m +// } +// +// func main() { +// // Create a non-global registry. +// reg := prometheus.NewRegistry() +// +// // Create new metrics and register them using the custom registry. +// m := NewMetrics(reg) +// // Set values for the new created metrics. +// m.cpuTemp.Set(65.3) +// m.hdFailures.With(prometheus.Labels{"device":"/dev/sda"}).Inc() +// +// // Expose metrics and custom registry via an HTTP server +// // using the HandleFor function. "/metrics" is the usual endpoint for that. +// http.Handle("/metrics", promhttp.HandlerFor(reg, promhttp.HandlerOpts{Registry: reg})) +// log.Fatal(http.ListenAndServe(":8080", nil)) +// } +// +// This is a complete program that exports two metrics, a Gauge and a Counter, +// the latter with a label attached to turn it into a (one-dimensional) vector. +// It register the metrics using a custom registry and exposes them via an HTTP server +// on the /metrics endpoint. +// +// # Metrics +// +// The number of exported identifiers in this package might appear a bit +// overwhelming. However, in addition to the basic plumbing shown in the example +// above, you only need to understand the different metric types and their +// vector versions for basic usage. Furthermore, if you are not concerned with +// fine-grained control of when and how to register metrics with the registry, +// have a look at the promauto package, which will effectively allow you to +// ignore registration altogether in simple cases. +// +// Above, you have already touched the Counter and the Gauge. There are two more +// advanced metric types: the Summary and Histogram. A more thorough description +// of those four metric types can be found in the Prometheus docs: +// https://prometheus.io/docs/concepts/metric_types/ +// +// In addition to the fundamental metric types Gauge, Counter, Summary, and +// Histogram, a very important part of the Prometheus data model is the +// partitioning of samples along dimensions called labels, which results in +// metric vectors. The fundamental types are GaugeVec, CounterVec, SummaryVec, +// and HistogramVec. +// +// While only the fundamental metric types implement the Metric interface, both +// the metrics and their vector versions implement the Collector interface. A +// Collector manages the collection of a number of Metrics, but for convenience, +// a Metric can also “collect itself”. Note that Gauge, Counter, Summary, and +// Histogram are interfaces themselves while GaugeVec, CounterVec, SummaryVec, +// and HistogramVec are not. +// +// To create instances of Metrics and their vector versions, you need a suitable +// …Opts struct, i.e. GaugeOpts, CounterOpts, SummaryOpts, or HistogramOpts. +// +// # Custom Collectors and constant Metrics +// +// While you could create your own implementations of Metric, most likely you +// will only ever implement the Collector interface on your own. At a first +// glance, a custom Collector seems handy to bundle Metrics for common +// registration (with the prime example of the different metric vectors above, +// which bundle all the metrics of the same name but with different labels). +// +// There is a more involved use case, too: If you already have metrics +// available, created outside of the Prometheus context, you don't need the +// interface of the various Metric types. You essentially want to mirror the +// existing numbers into Prometheus Metrics during collection. An own +// implementation of the Collector interface is perfect for that. You can create +// Metric instances “on the fly” using NewConstMetric, NewConstHistogram, and +// NewConstSummary (and their respective Must… versions). NewConstMetric is used +// for all metric types with just a float64 as their value: Counter, Gauge, and +// a special “type” called Untyped. Use the latter if you are not sure if the +// mirrored metric is a Counter or a Gauge. Creation of the Metric instance +// happens in the Collect method. The Describe method has to return separate +// Desc instances, representative of the “throw-away” metrics to be created +// later. NewDesc comes in handy to create those Desc instances. Alternatively, +// you could return no Desc at all, which will mark the Collector “unchecked”. +// No checks are performed at registration time, but metric consistency will +// still be ensured at scrape time, i.e. any inconsistencies will lead to scrape +// errors. Thus, with unchecked Collectors, the responsibility to not collect +// metrics that lead to inconsistencies in the total scrape result lies with the +// implementer of the Collector. While this is not a desirable state, it is +// sometimes necessary. The typical use case is a situation where the exact +// metrics to be returned by a Collector cannot be predicted at registration +// time, but the implementer has sufficient knowledge of the whole system to +// guarantee metric consistency. +// +// The Collector example illustrates the use case. You can also look at the +// source code of the processCollector (mirroring process metrics), the +// goCollector (mirroring Go metrics), or the expvarCollector (mirroring expvar +// metrics) as examples that are used in this package itself. +// +// If you just need to call a function to get a single float value to collect as +// a metric, GaugeFunc, CounterFunc, or UntypedFunc might be interesting +// shortcuts. +// +// # Advanced Uses of the Registry +// +// While MustRegister is the by far most common way of registering a Collector, +// sometimes you might want to handle the errors the registration might cause. +// As suggested by the name, MustRegister panics if an error occurs. With the +// Register function, the error is returned and can be handled. +// +// An error is returned if the registered Collector is incompatible or +// inconsistent with already registered metrics. The registry aims for +// consistency of the collected metrics according to the Prometheus data model. +// Inconsistencies are ideally detected at registration time, not at collect +// time. The former will usually be detected at start-up time of a program, +// while the latter will only happen at scrape time, possibly not even on the +// first scrape if the inconsistency only becomes relevant later. That is the +// main reason why a Collector and a Metric have to describe themselves to the +// registry. +// +// So far, everything we did operated on the so-called default registry, as it +// can be found in the global DefaultRegisterer variable. With NewRegistry, you +// can create a custom registry, or you can even implement the Registerer or +// Gatherer interfaces yourself. The methods Register and Unregister work in the +// same way on a custom registry as the global functions Register and Unregister +// on the default registry. +// +// There are a number of uses for custom registries: You can use registries with +// special properties, see NewPedanticRegistry. You can avoid global state, as +// it is imposed by the DefaultRegisterer. You can use multiple registries at +// the same time to expose different metrics in different ways. You can use +// separate registries for testing purposes. +// +// Also note that the DefaultRegisterer comes registered with a Collector for Go +// runtime metrics (via NewGoCollector) and a Collector for process metrics (via +// NewProcessCollector). With a custom registry, you are in control and decide +// yourself about the Collectors to register. +// +// # HTTP Exposition +// +// The Registry implements the Gatherer interface. The caller of the Gather +// method can then expose the gathered metrics in some way. Usually, the metrics +// are served via HTTP on the /metrics endpoint. That's happening in the example +// above. The tools to expose metrics via HTTP are in the promhttp sub-package. +// +// # Pushing to the Pushgateway +// +// Function for pushing to the Pushgateway can be found in the push sub-package. +// +// # Graphite Bridge +// +// Functions and examples to push metrics from a Gatherer to Graphite can be +// found in the graphite sub-package. +// +// # Other Means of Exposition +// +// More ways of exposing metrics can easily be added by following the approaches +// of the existing implementations. +package prometheus diff --git a/vendor/github.com/prometheus/client_golang/prometheus/expvar_collector.go b/vendor/github.com/prometheus/client_golang/prometheus/expvar_collector.go new file mode 100644 index 0000000..c41ab37 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/expvar_collector.go @@ -0,0 +1,86 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "encoding/json" + "expvar" +) + +type expvarCollector struct { + exports map[string]*Desc +} + +// NewExpvarCollector is the obsolete version of collectors.NewExpvarCollector. +// See there for documentation. +// +// Deprecated: Use collectors.NewExpvarCollector instead. +func NewExpvarCollector(exports map[string]*Desc) Collector { + return &expvarCollector{ + exports: exports, + } +} + +// Describe implements Collector. +func (e *expvarCollector) Describe(ch chan<- *Desc) { + for _, desc := range e.exports { + ch <- desc + } +} + +// Collect implements Collector. +func (e *expvarCollector) Collect(ch chan<- Metric) { + for name, desc := range e.exports { + var m Metric + expVar := expvar.Get(name) + if expVar == nil { + continue + } + var v interface{} + labels := make([]string, len(desc.variableLabels)) + if err := json.Unmarshal([]byte(expVar.String()), &v); err != nil { + ch <- NewInvalidMetric(desc, err) + continue + } + var processValue func(v interface{}, i int) + processValue = func(v interface{}, i int) { + if i >= len(labels) { + copiedLabels := append(make([]string, 0, len(labels)), labels...) + switch v := v.(type) { + case float64: + m = MustNewConstMetric(desc, UntypedValue, v, copiedLabels...) + case bool: + if v { + m = MustNewConstMetric(desc, UntypedValue, 1, copiedLabels...) + } else { + m = MustNewConstMetric(desc, UntypedValue, 0, copiedLabels...) + } + default: + return + } + ch <- m + return + } + vm, ok := v.(map[string]interface{}) + if !ok { + return + } + for lv, val := range vm { + labels[i] = lv + processValue(val, i+1) + } + } + processValue(v, 0) + } +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/fnv.go b/vendor/github.com/prometheus/client_golang/prometheus/fnv.go new file mode 100644 index 0000000..3d383a7 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/fnv.go @@ -0,0 +1,42 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +// Inline and byte-free variant of hash/fnv's fnv64a. + +const ( + offset64 = 14695981039346656037 + prime64 = 1099511628211 +) + +// hashNew initializies a new fnv64a hash value. +func hashNew() uint64 { + return offset64 +} + +// hashAdd adds a string to a fnv64a hash value, returning the updated hash. +func hashAdd(h uint64, s string) uint64 { + for i := 0; i < len(s); i++ { + h ^= uint64(s[i]) + h *= prime64 + } + return h +} + +// hashAddByte adds a byte to a fnv64a hash value, returning the updated hash. +func hashAddByte(h uint64, b byte) uint64 { + h ^= uint64(b) + h *= prime64 + return h +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/gauge.go b/vendor/github.com/prometheus/client_golang/prometheus/gauge.go new file mode 100644 index 0000000..21271a5 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/gauge.go @@ -0,0 +1,291 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "math" + "sync/atomic" + "time" + + dto "github.com/prometheus/client_model/go" +) + +// Gauge is a Metric that represents a single numerical value that can +// arbitrarily go up and down. +// +// A Gauge is typically used for measured values like temperatures or current +// memory usage, but also "counts" that can go up and down, like the number of +// running goroutines. +// +// To create Gauge instances, use NewGauge. +type Gauge interface { + Metric + Collector + + // Set sets the Gauge to an arbitrary value. + Set(float64) + // Inc increments the Gauge by 1. Use Add to increment it by arbitrary + // values. + Inc() + // Dec decrements the Gauge by 1. Use Sub to decrement it by arbitrary + // values. + Dec() + // Add adds the given value to the Gauge. (The value can be negative, + // resulting in a decrease of the Gauge.) + Add(float64) + // Sub subtracts the given value from the Gauge. (The value can be + // negative, resulting in an increase of the Gauge.) + Sub(float64) + + // SetToCurrentTime sets the Gauge to the current Unix time in seconds. + SetToCurrentTime() +} + +// GaugeOpts is an alias for Opts. See there for doc comments. +type GaugeOpts Opts + +// NewGauge creates a new Gauge based on the provided GaugeOpts. +// +// The returned implementation is optimized for a fast Set method. If you have a +// choice for managing the value of a Gauge via Set vs. Inc/Dec/Add/Sub, pick +// the former. For example, the Inc method of the returned Gauge is slower than +// the Inc method of a Counter returned by NewCounter. This matches the typical +// scenarios for Gauges and Counters, where the former tends to be Set-heavy and +// the latter Inc-heavy. +func NewGauge(opts GaugeOpts) Gauge { + desc := NewDesc( + BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), + opts.Help, + nil, + opts.ConstLabels, + ) + result := &gauge{desc: desc, labelPairs: desc.constLabelPairs} + result.init(result) // Init self-collection. + return result +} + +type gauge struct { + // valBits contains the bits of the represented float64 value. It has + // to go first in the struct to guarantee alignment for atomic + // operations. http://golang.org/pkg/sync/atomic/#pkg-note-BUG + valBits uint64 + + selfCollector + + desc *Desc + labelPairs []*dto.LabelPair +} + +func (g *gauge) Desc() *Desc { + return g.desc +} + +func (g *gauge) Set(val float64) { + atomic.StoreUint64(&g.valBits, math.Float64bits(val)) +} + +func (g *gauge) SetToCurrentTime() { + g.Set(float64(time.Now().UnixNano()) / 1e9) +} + +func (g *gauge) Inc() { + g.Add(1) +} + +func (g *gauge) Dec() { + g.Add(-1) +} + +func (g *gauge) Add(val float64) { + for { + oldBits := atomic.LoadUint64(&g.valBits) + newBits := math.Float64bits(math.Float64frombits(oldBits) + val) + if atomic.CompareAndSwapUint64(&g.valBits, oldBits, newBits) { + return + } + } +} + +func (g *gauge) Sub(val float64) { + g.Add(val * -1) +} + +func (g *gauge) Write(out *dto.Metric) error { + val := math.Float64frombits(atomic.LoadUint64(&g.valBits)) + return populateMetric(GaugeValue, val, g.labelPairs, nil, out) +} + +// GaugeVec is a Collector that bundles a set of Gauges that all share the same +// Desc, but have different values for their variable labels. This is used if +// you want to count the same thing partitioned by various dimensions +// (e.g. number of operations queued, partitioned by user and operation +// type). Create instances with NewGaugeVec. +type GaugeVec struct { + *MetricVec +} + +// NewGaugeVec creates a new GaugeVec based on the provided GaugeOpts and +// partitioned by the given label names. +func NewGaugeVec(opts GaugeOpts, labelNames []string) *GaugeVec { + desc := NewDesc( + BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), + opts.Help, + labelNames, + opts.ConstLabels, + ) + return &GaugeVec{ + MetricVec: NewMetricVec(desc, func(lvs ...string) Metric { + if len(lvs) != len(desc.variableLabels) { + panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels, lvs)) + } + result := &gauge{desc: desc, labelPairs: MakeLabelPairs(desc, lvs)} + result.init(result) // Init self-collection. + return result + }), + } +} + +// GetMetricWithLabelValues returns the Gauge for the given slice of label +// values (same order as the variable labels in Desc). If that combination of +// label values is accessed for the first time, a new Gauge is created. +// +// It is possible to call this method without using the returned Gauge to only +// create the new Gauge but leave it at its starting value 0. See also the +// SummaryVec example. +// +// Keeping the Gauge for later use is possible (and should be considered if +// performance is critical), but keep in mind that Reset, DeleteLabelValues and +// Delete can be used to delete the Gauge from the GaugeVec. In that case, the +// Gauge will still exist, but it will not be exported anymore, even if a +// Gauge with the same label values is created later. See also the CounterVec +// example. +// +// An error is returned if the number of label values is not the same as the +// number of variable labels in Desc (minus any curried labels). +// +// Note that for more than one label value, this method is prone to mistakes +// caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as +// an alternative to avoid that type of mistake. For higher label numbers, the +// latter has a much more readable (albeit more verbose) syntax, but it comes +// with a performance overhead (for creating and processing the Labels map). +func (v *GaugeVec) GetMetricWithLabelValues(lvs ...string) (Gauge, error) { + metric, err := v.MetricVec.GetMetricWithLabelValues(lvs...) + if metric != nil { + return metric.(Gauge), err + } + return nil, err +} + +// GetMetricWith returns the Gauge for the given Labels map (the label names +// must match those of the variable labels in Desc). If that label map is +// accessed for the first time, a new Gauge is created. Implications of +// creating a Gauge without using it and keeping the Gauge for later use are +// the same as for GetMetricWithLabelValues. +// +// An error is returned if the number and names of the Labels are inconsistent +// with those of the variable labels in Desc (minus any curried labels). +// +// This method is used for the same purpose as +// GetMetricWithLabelValues(...string). See there for pros and cons of the two +// methods. +func (v *GaugeVec) GetMetricWith(labels Labels) (Gauge, error) { + metric, err := v.MetricVec.GetMetricWith(labels) + if metric != nil { + return metric.(Gauge), err + } + return nil, err +} + +// WithLabelValues works as GetMetricWithLabelValues, but panics where +// GetMetricWithLabelValues would have returned an error. Not returning an +// error allows shortcuts like +// +// myVec.WithLabelValues("404", "GET").Add(42) +func (v *GaugeVec) WithLabelValues(lvs ...string) Gauge { + g, err := v.GetMetricWithLabelValues(lvs...) + if err != nil { + panic(err) + } + return g +} + +// With works as GetMetricWith, but panics where GetMetricWithLabels would have +// returned an error. Not returning an error allows shortcuts like +// +// myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Add(42) +func (v *GaugeVec) With(labels Labels) Gauge { + g, err := v.GetMetricWith(labels) + if err != nil { + panic(err) + } + return g +} + +// CurryWith returns a vector curried with the provided labels, i.e. the +// returned vector has those labels pre-set for all labeled operations performed +// on it. The cardinality of the curried vector is reduced accordingly. The +// order of the remaining labels stays the same (just with the curried labels +// taken out of the sequence – which is relevant for the +// (GetMetric)WithLabelValues methods). It is possible to curry a curried +// vector, but only with labels not yet used for currying before. +// +// The metrics contained in the GaugeVec are shared between the curried and +// uncurried vectors. They are just accessed differently. Curried and uncurried +// vectors behave identically in terms of collection. Only one must be +// registered with a given registry (usually the uncurried version). The Reset +// method deletes all metrics, even if called on a curried vector. +func (v *GaugeVec) CurryWith(labels Labels) (*GaugeVec, error) { + vec, err := v.MetricVec.CurryWith(labels) + if vec != nil { + return &GaugeVec{vec}, err + } + return nil, err +} + +// MustCurryWith works as CurryWith but panics where CurryWith would have +// returned an error. +func (v *GaugeVec) MustCurryWith(labels Labels) *GaugeVec { + vec, err := v.CurryWith(labels) + if err != nil { + panic(err) + } + return vec +} + +// GaugeFunc is a Gauge whose value is determined at collect time by calling a +// provided function. +// +// To create GaugeFunc instances, use NewGaugeFunc. +type GaugeFunc interface { + Metric + Collector +} + +// NewGaugeFunc creates a new GaugeFunc based on the provided GaugeOpts. The +// value reported is determined by calling the given function from within the +// Write method. Take into account that metric collection may happen +// concurrently. Therefore, it must be safe to call the provided function +// concurrently. +// +// NewGaugeFunc is a good way to create an “info” style metric with a constant +// value of 1. Example: +// https://github.com/prometheus/common/blob/8558a5b7db3c84fa38b4766966059a7bd5bfa2ee/version/info.go#L36-L56 +func NewGaugeFunc(opts GaugeOpts, function func() float64) GaugeFunc { + return newValueFunc(NewDesc( + BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), + opts.Help, + nil, + opts.ConstLabels, + ), GaugeValue, function) +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/get_pid.go b/vendor/github.com/prometheus/client_golang/prometheus/get_pid.go new file mode 100644 index 0000000..614fd61 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/get_pid.go @@ -0,0 +1,26 @@ +// Copyright 2015 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build !js || wasm +// +build !js wasm + +package prometheus + +import "os" + +func getPIDFn() func() (int, error) { + pid := os.Getpid() + return func() (int, error) { + return pid, nil + } +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/get_pid_gopherjs.go b/vendor/github.com/prometheus/client_golang/prometheus/get_pid_gopherjs.go new file mode 100644 index 0000000..eaf8059 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/get_pid_gopherjs.go @@ -0,0 +1,23 @@ +// Copyright 2015 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build js && !wasm +// +build js,!wasm + +package prometheus + +func getPIDFn() func() (int, error) { + return func() (int, error) { + return 1, nil + } +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go b/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go new file mode 100644 index 0000000..ad9a71a --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go @@ -0,0 +1,281 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "runtime" + "runtime/debug" + "time" +) + +// goRuntimeMemStats provides the metrics initially provided by runtime.ReadMemStats. +// From Go 1.17 those similar (and better) statistics are provided by runtime/metrics, so +// while eval closure works on runtime.MemStats, the struct from Go 1.17+ is +// populated using runtime/metrics. +func goRuntimeMemStats() memStatsMetrics { + return memStatsMetrics{ + { + desc: NewDesc( + memstatNamespace("alloc_bytes"), + "Number of bytes allocated and still in use.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.Alloc) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("alloc_bytes_total"), + "Total number of bytes allocated, even if freed.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.TotalAlloc) }, + valType: CounterValue, + }, { + desc: NewDesc( + memstatNamespace("sys_bytes"), + "Number of bytes obtained from system.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.Sys) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("lookups_total"), + "Total number of pointer lookups.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.Lookups) }, + valType: CounterValue, + }, { + desc: NewDesc( + memstatNamespace("mallocs_total"), + "Total number of mallocs.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.Mallocs) }, + valType: CounterValue, + }, { + desc: NewDesc( + memstatNamespace("frees_total"), + "Total number of frees.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.Frees) }, + valType: CounterValue, + }, { + desc: NewDesc( + memstatNamespace("heap_alloc_bytes"), + "Number of heap bytes allocated and still in use.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapAlloc) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("heap_sys_bytes"), + "Number of heap bytes obtained from system.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapSys) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("heap_idle_bytes"), + "Number of heap bytes waiting to be used.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapIdle) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("heap_inuse_bytes"), + "Number of heap bytes that are in use.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapInuse) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("heap_released_bytes"), + "Number of heap bytes released to OS.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapReleased) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("heap_objects"), + "Number of allocated objects.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapObjects) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("stack_inuse_bytes"), + "Number of bytes in use by the stack allocator.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.StackInuse) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("stack_sys_bytes"), + "Number of bytes obtained from system for stack allocator.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.StackSys) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("mspan_inuse_bytes"), + "Number of bytes in use by mspan structures.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.MSpanInuse) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("mspan_sys_bytes"), + "Number of bytes used for mspan structures obtained from system.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.MSpanSys) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("mcache_inuse_bytes"), + "Number of bytes in use by mcache structures.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.MCacheInuse) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("mcache_sys_bytes"), + "Number of bytes used for mcache structures obtained from system.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.MCacheSys) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("buck_hash_sys_bytes"), + "Number of bytes used by the profiling bucket hash table.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.BuckHashSys) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("gc_sys_bytes"), + "Number of bytes used for garbage collection system metadata.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.GCSys) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("other_sys_bytes"), + "Number of bytes used for other system allocations.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.OtherSys) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("next_gc_bytes"), + "Number of heap bytes when next garbage collection will take place.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.NextGC) }, + valType: GaugeValue, + }, + } +} + +type baseGoCollector struct { + goroutinesDesc *Desc + threadsDesc *Desc + gcDesc *Desc + gcLastTimeDesc *Desc + goInfoDesc *Desc +} + +func newBaseGoCollector() baseGoCollector { + return baseGoCollector{ + goroutinesDesc: NewDesc( + "go_goroutines", + "Number of goroutines that currently exist.", + nil, nil), + threadsDesc: NewDesc( + "go_threads", + "Number of OS threads created.", + nil, nil), + gcDesc: NewDesc( + "go_gc_duration_seconds", + "A summary of the pause duration of garbage collection cycles.", + nil, nil), + gcLastTimeDesc: NewDesc( + "go_memstats_last_gc_time_seconds", + "Number of seconds since 1970 of last garbage collection.", + nil, nil), + goInfoDesc: NewDesc( + "go_info", + "Information about the Go environment.", + nil, Labels{"version": runtime.Version()}), + } +} + +// Describe returns all descriptions of the collector. +func (c *baseGoCollector) Describe(ch chan<- *Desc) { + ch <- c.goroutinesDesc + ch <- c.threadsDesc + ch <- c.gcDesc + ch <- c.gcLastTimeDesc + ch <- c.goInfoDesc +} + +// Collect returns the current state of all metrics of the collector. +func (c *baseGoCollector) Collect(ch chan<- Metric) { + ch <- MustNewConstMetric(c.goroutinesDesc, GaugeValue, float64(runtime.NumGoroutine())) + + n := getRuntimeNumThreads() + ch <- MustNewConstMetric(c.threadsDesc, GaugeValue, n) + + var stats debug.GCStats + stats.PauseQuantiles = make([]time.Duration, 5) + debug.ReadGCStats(&stats) + + quantiles := make(map[float64]float64) + for idx, pq := range stats.PauseQuantiles[1:] { + quantiles[float64(idx+1)/float64(len(stats.PauseQuantiles)-1)] = pq.Seconds() + } + quantiles[0.0] = stats.PauseQuantiles[0].Seconds() + ch <- MustNewConstSummary(c.gcDesc, uint64(stats.NumGC), stats.PauseTotal.Seconds(), quantiles) + ch <- MustNewConstMetric(c.gcLastTimeDesc, GaugeValue, float64(stats.LastGC.UnixNano())/1e9) + ch <- MustNewConstMetric(c.goInfoDesc, GaugeValue, 1) +} + +func memstatNamespace(s string) string { + return "go_memstats_" + s +} + +// memStatsMetrics provide description, evaluator, runtime/metrics name, and +// value type for memstat metrics. +type memStatsMetrics []struct { + desc *Desc + eval func(*runtime.MemStats) float64 + valType ValueType +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/go_collector_go116.go b/vendor/github.com/prometheus/client_golang/prometheus/go_collector_go116.go new file mode 100644 index 0000000..897a6e9 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/go_collector_go116.go @@ -0,0 +1,122 @@ +// Copyright 2021 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build !go1.17 +// +build !go1.17 + +package prometheus + +import ( + "runtime" + "sync" + "time" +) + +type goCollector struct { + base baseGoCollector + + // ms... are memstats related. + msLast *runtime.MemStats // Previously collected memstats. + msLastTimestamp time.Time + msMtx sync.Mutex // Protects msLast and msLastTimestamp. + msMetrics memStatsMetrics + msRead func(*runtime.MemStats) // For mocking in tests. + msMaxWait time.Duration // Wait time for fresh memstats. + msMaxAge time.Duration // Maximum allowed age of old memstats. +} + +// NewGoCollector is the obsolete version of collectors.NewGoCollector. +// See there for documentation. +// +// Deprecated: Use collectors.NewGoCollector instead. +func NewGoCollector() Collector { + msMetrics := goRuntimeMemStats() + msMetrics = append(msMetrics, struct { + desc *Desc + eval func(*runtime.MemStats) float64 + valType ValueType + }{ + // This metric is omitted in Go1.17+, see https://github.com/prometheus/client_golang/issues/842#issuecomment-861812034 + desc: NewDesc( + memstatNamespace("gc_cpu_fraction"), + "The fraction of this program's available CPU time used by the GC since the program started.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return ms.GCCPUFraction }, + valType: GaugeValue, + }) + return &goCollector{ + base: newBaseGoCollector(), + msLast: &runtime.MemStats{}, + msRead: runtime.ReadMemStats, + msMaxWait: time.Second, + msMaxAge: 5 * time.Minute, + msMetrics: msMetrics, + } +} + +// Describe returns all descriptions of the collector. +func (c *goCollector) Describe(ch chan<- *Desc) { + c.base.Describe(ch) + for _, i := range c.msMetrics { + ch <- i.desc + } +} + +// Collect returns the current state of all metrics of the collector. +func (c *goCollector) Collect(ch chan<- Metric) { + var ( + ms = &runtime.MemStats{} + done = make(chan struct{}) + ) + // Start reading memstats first as it might take a while. + go func() { + c.msRead(ms) + c.msMtx.Lock() + c.msLast = ms + c.msLastTimestamp = time.Now() + c.msMtx.Unlock() + close(done) + }() + + // Collect base non-memory metrics. + c.base.Collect(ch) + + timer := time.NewTimer(c.msMaxWait) + select { + case <-done: // Our own ReadMemStats succeeded in time. Use it. + timer.Stop() // Important for high collection frequencies to not pile up timers. + c.msCollect(ch, ms) + return + case <-timer.C: // Time out, use last memstats if possible. Continue below. + } + c.msMtx.Lock() + if time.Since(c.msLastTimestamp) < c.msMaxAge { + // Last memstats are recent enough. Collect from them under the lock. + c.msCollect(ch, c.msLast) + c.msMtx.Unlock() + return + } + // If we are here, the last memstats are too old or don't exist. We have + // to wait until our own ReadMemStats finally completes. For that to + // happen, we have to release the lock. + c.msMtx.Unlock() + <-done + c.msCollect(ch, ms) +} + +func (c *goCollector) msCollect(ch chan<- Metric, ms *runtime.MemStats) { + for _, i := range c.msMetrics { + ch <- MustNewConstMetric(i.desc, i.valType, i.eval(ms)) + } +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/go_collector_latest.go b/vendor/github.com/prometheus/client_golang/prometheus/go_collector_latest.go new file mode 100644 index 0000000..3a2d55e --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/go_collector_latest.go @@ -0,0 +1,568 @@ +// Copyright 2021 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build go1.17 +// +build go1.17 + +package prometheus + +import ( + "math" + "runtime" + "runtime/metrics" + "strings" + "sync" + + //nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility. + "github.com/golang/protobuf/proto" + dto "github.com/prometheus/client_model/go" + + "github.com/prometheus/client_golang/prometheus/internal" +) + +const ( + // constants for strings referenced more than once. + goGCHeapTinyAllocsObjects = "/gc/heap/tiny/allocs:objects" + goGCHeapAllocsObjects = "/gc/heap/allocs:objects" + goGCHeapFreesObjects = "/gc/heap/frees:objects" + goGCHeapFreesBytes = "/gc/heap/frees:bytes" + goGCHeapAllocsBytes = "/gc/heap/allocs:bytes" + goGCHeapObjects = "/gc/heap/objects:objects" + goGCHeapGoalBytes = "/gc/heap/goal:bytes" + goMemoryClassesTotalBytes = "/memory/classes/total:bytes" + goMemoryClassesHeapObjectsBytes = "/memory/classes/heap/objects:bytes" + goMemoryClassesHeapUnusedBytes = "/memory/classes/heap/unused:bytes" + goMemoryClassesHeapReleasedBytes = "/memory/classes/heap/released:bytes" + goMemoryClassesHeapFreeBytes = "/memory/classes/heap/free:bytes" + goMemoryClassesHeapStacksBytes = "/memory/classes/heap/stacks:bytes" + goMemoryClassesOSStacksBytes = "/memory/classes/os-stacks:bytes" + goMemoryClassesMetadataMSpanInuseBytes = "/memory/classes/metadata/mspan/inuse:bytes" + goMemoryClassesMetadataMSPanFreeBytes = "/memory/classes/metadata/mspan/free:bytes" + goMemoryClassesMetadataMCacheInuseBytes = "/memory/classes/metadata/mcache/inuse:bytes" + goMemoryClassesMetadataMCacheFreeBytes = "/memory/classes/metadata/mcache/free:bytes" + goMemoryClassesProfilingBucketsBytes = "/memory/classes/profiling/buckets:bytes" + goMemoryClassesMetadataOtherBytes = "/memory/classes/metadata/other:bytes" + goMemoryClassesOtherBytes = "/memory/classes/other:bytes" +) + +// rmNamesForMemStatsMetrics represents runtime/metrics names required to populate goRuntimeMemStats from like logic. +var rmNamesForMemStatsMetrics = []string{ + goGCHeapTinyAllocsObjects, + goGCHeapAllocsObjects, + goGCHeapFreesObjects, + goGCHeapAllocsBytes, + goGCHeapObjects, + goGCHeapGoalBytes, + goMemoryClassesTotalBytes, + goMemoryClassesHeapObjectsBytes, + goMemoryClassesHeapUnusedBytes, + goMemoryClassesHeapReleasedBytes, + goMemoryClassesHeapFreeBytes, + goMemoryClassesHeapStacksBytes, + goMemoryClassesOSStacksBytes, + goMemoryClassesMetadataMSpanInuseBytes, + goMemoryClassesMetadataMSPanFreeBytes, + goMemoryClassesMetadataMCacheInuseBytes, + goMemoryClassesMetadataMCacheFreeBytes, + goMemoryClassesProfilingBucketsBytes, + goMemoryClassesMetadataOtherBytes, + goMemoryClassesOtherBytes, +} + +func bestEffortLookupRM(lookup []string) []metrics.Description { + ret := make([]metrics.Description, 0, len(lookup)) + for _, rm := range metrics.All() { + for _, m := range lookup { + if m == rm.Name { + ret = append(ret, rm) + } + } + } + return ret +} + +type goCollector struct { + base baseGoCollector + + // mu protects updates to all fields ensuring a consistent + // snapshot is always produced by Collect. + mu sync.Mutex + + // Contains all samples that has to retrieved from runtime/metrics (not all of them will be exposed). + sampleBuf []metrics.Sample + // sampleMap allows lookup for MemStats metrics and runtime/metrics histograms for exact sums. + sampleMap map[string]*metrics.Sample + + // rmExposedMetrics represents all runtime/metrics package metrics + // that were configured to be exposed. + rmExposedMetrics []collectorMetric + rmExactSumMapForHist map[string]string + + // With Go 1.17, the runtime/metrics package was introduced. + // From that point on, metric names produced by the runtime/metrics + // package could be generated from runtime/metrics names. However, + // these differ from the old names for the same values. + // + // This field exists to export the same values under the old names + // as well. + msMetrics memStatsMetrics + msMetricsEnabled bool +} + +type rmMetricDesc struct { + metrics.Description +} + +func matchRuntimeMetricsRules(rules []internal.GoCollectorRule) []rmMetricDesc { + var descs []rmMetricDesc + for _, d := range metrics.All() { + var ( + deny = true + desc rmMetricDesc + ) + + for _, r := range rules { + if !r.Matcher.MatchString(d.Name) { + continue + } + deny = r.Deny + } + if deny { + continue + } + + desc.Description = d + descs = append(descs, desc) + } + return descs +} + +func defaultGoCollectorOptions() internal.GoCollectorOptions { + return internal.GoCollectorOptions{ + RuntimeMetricSumForHist: map[string]string{ + "/gc/heap/allocs-by-size:bytes": goGCHeapAllocsBytes, + "/gc/heap/frees-by-size:bytes": goGCHeapFreesBytes, + }, + RuntimeMetricRules: []internal.GoCollectorRule{ + //{Matcher: regexp.MustCompile("")}, + }, + } +} + +// NewGoCollector is the obsolete version of collectors.NewGoCollector. +// See there for documentation. +// +// Deprecated: Use collectors.NewGoCollector instead. +func NewGoCollector(opts ...func(o *internal.GoCollectorOptions)) Collector { + opt := defaultGoCollectorOptions() + for _, o := range opts { + o(&opt) + } + + exposedDescriptions := matchRuntimeMetricsRules(opt.RuntimeMetricRules) + + // Collect all histogram samples so that we can get their buckets. + // The API guarantees that the buckets are always fixed for the lifetime + // of the process. + var histograms []metrics.Sample + for _, d := range exposedDescriptions { + if d.Kind == metrics.KindFloat64Histogram { + histograms = append(histograms, metrics.Sample{Name: d.Name}) + } + } + + if len(histograms) > 0 { + metrics.Read(histograms) + } + + bucketsMap := make(map[string][]float64) + for i := range histograms { + bucketsMap[histograms[i].Name] = histograms[i].Value.Float64Histogram().Buckets + } + + // Generate a collector for each exposed runtime/metrics metric. + metricSet := make([]collectorMetric, 0, len(exposedDescriptions)) + // SampleBuf is used for reading from runtime/metrics. + // We are assuming the largest case to have stable pointers for sampleMap purposes. + sampleBuf := make([]metrics.Sample, 0, len(exposedDescriptions)+len(opt.RuntimeMetricSumForHist)+len(rmNamesForMemStatsMetrics)) + sampleMap := make(map[string]*metrics.Sample, len(exposedDescriptions)) + for _, d := range exposedDescriptions { + namespace, subsystem, name, ok := internal.RuntimeMetricsToProm(&d.Description) + if !ok { + // Just ignore this metric; we can't do anything with it here. + // If a user decides to use the latest version of Go, we don't want + // to fail here. This condition is tested in TestExpectedRuntimeMetrics. + continue + } + + sampleBuf = append(sampleBuf, metrics.Sample{Name: d.Name}) + sampleMap[d.Name] = &sampleBuf[len(sampleBuf)-1] + + var m collectorMetric + if d.Kind == metrics.KindFloat64Histogram { + _, hasSum := opt.RuntimeMetricSumForHist[d.Name] + unit := d.Name[strings.IndexRune(d.Name, ':')+1:] + m = newBatchHistogram( + NewDesc( + BuildFQName(namespace, subsystem, name), + d.Description.Description, + nil, + nil, + ), + internal.RuntimeMetricsBucketsForUnit(bucketsMap[d.Name], unit), + hasSum, + ) + } else if d.Cumulative { + m = NewCounter(CounterOpts{ + Namespace: namespace, + Subsystem: subsystem, + Name: name, + Help: d.Description.Description, + }, + ) + } else { + m = NewGauge(GaugeOpts{ + Namespace: namespace, + Subsystem: subsystem, + Name: name, + Help: d.Description.Description, + }) + } + metricSet = append(metricSet, m) + } + + // Add exact sum metrics to sampleBuf if not added before. + for _, h := range histograms { + sumMetric, ok := opt.RuntimeMetricSumForHist[h.Name] + if !ok { + continue + } + + if _, ok := sampleMap[sumMetric]; ok { + continue + } + sampleBuf = append(sampleBuf, metrics.Sample{Name: sumMetric}) + sampleMap[sumMetric] = &sampleBuf[len(sampleBuf)-1] + } + + var ( + msMetrics memStatsMetrics + msDescriptions []metrics.Description + ) + + if !opt.DisableMemStatsLikeMetrics { + msMetrics = goRuntimeMemStats() + msDescriptions = bestEffortLookupRM(rmNamesForMemStatsMetrics) + + // Check if metric was not exposed before and if not, add to sampleBuf. + for _, mdDesc := range msDescriptions { + if _, ok := sampleMap[mdDesc.Name]; ok { + continue + } + sampleBuf = append(sampleBuf, metrics.Sample{Name: mdDesc.Name}) + sampleMap[mdDesc.Name] = &sampleBuf[len(sampleBuf)-1] + } + } + + return &goCollector{ + base: newBaseGoCollector(), + sampleBuf: sampleBuf, + sampleMap: sampleMap, + rmExposedMetrics: metricSet, + rmExactSumMapForHist: opt.RuntimeMetricSumForHist, + msMetrics: msMetrics, + msMetricsEnabled: !opt.DisableMemStatsLikeMetrics, + } +} + +// Describe returns all descriptions of the collector. +func (c *goCollector) Describe(ch chan<- *Desc) { + c.base.Describe(ch) + for _, i := range c.msMetrics { + ch <- i.desc + } + for _, m := range c.rmExposedMetrics { + ch <- m.Desc() + } +} + +// Collect returns the current state of all metrics of the collector. +func (c *goCollector) Collect(ch chan<- Metric) { + // Collect base non-memory metrics. + c.base.Collect(ch) + + if len(c.sampleBuf) == 0 { + return + } + + // Collect must be thread-safe, so prevent concurrent use of + // sampleBuf elements. Just read into sampleBuf but write all the data + // we get into our Metrics or MemStats. + // + // This lock also ensures that the Metrics we send out are all from + // the same updates, ensuring their mutual consistency insofar as + // is guaranteed by the runtime/metrics package. + // + // N.B. This locking is heavy-handed, but Collect is expected to be called + // relatively infrequently. Also the core operation here, metrics.Read, + // is fast (O(tens of microseconds)) so contention should certainly be + // low, though channel operations and any allocations may add to that. + c.mu.Lock() + defer c.mu.Unlock() + + // Populate runtime/metrics sample buffer. + metrics.Read(c.sampleBuf) + + // Collect all our runtime/metrics user chose to expose from sampleBuf (if any). + for i, metric := range c.rmExposedMetrics { + // We created samples for exposed metrics first in order, so indexes match. + sample := c.sampleBuf[i] + + // N.B. switch on concrete type because it's significantly more efficient + // than checking for the Counter and Gauge interface implementations. In + // this case, we control all the types here. + switch m := metric.(type) { + case *counter: + // Guard against decreases. This should never happen, but a failure + // to do so will result in a panic, which is a harsh consequence for + // a metrics collection bug. + v0, v1 := m.get(), unwrapScalarRMValue(sample.Value) + if v1 > v0 { + m.Add(unwrapScalarRMValue(sample.Value) - m.get()) + } + m.Collect(ch) + case *gauge: + m.Set(unwrapScalarRMValue(sample.Value)) + m.Collect(ch) + case *batchHistogram: + m.update(sample.Value.Float64Histogram(), c.exactSumFor(sample.Name)) + m.Collect(ch) + default: + panic("unexpected metric type") + } + } + + if c.msMetricsEnabled { + // ms is a dummy MemStats that we populate ourselves so that we can + // populate the old metrics from it if goMemStatsCollection is enabled. + var ms runtime.MemStats + memStatsFromRM(&ms, c.sampleMap) + for _, i := range c.msMetrics { + ch <- MustNewConstMetric(i.desc, i.valType, i.eval(&ms)) + } + } +} + +// unwrapScalarRMValue unwraps a runtime/metrics value that is assumed +// to be scalar and returns the equivalent float64 value. Panics if the +// value is not scalar. +func unwrapScalarRMValue(v metrics.Value) float64 { + switch v.Kind() { + case metrics.KindUint64: + return float64(v.Uint64()) + case metrics.KindFloat64: + return v.Float64() + case metrics.KindBad: + // Unsupported metric. + // + // This should never happen because we always populate our metric + // set from the runtime/metrics package. + panic("unexpected unsupported metric") + default: + // Unsupported metric kind. + // + // This should never happen because we check for this during initialization + // and flag and filter metrics whose kinds we don't understand. + panic("unexpected unsupported metric kind") + } +} + +// exactSumFor takes a runtime/metrics metric name (that is assumed to +// be of kind KindFloat64Histogram) and returns its exact sum and whether +// its exact sum exists. +// +// The runtime/metrics API for histograms doesn't currently expose exact +// sums, but some of the other metrics are in fact exact sums of histograms. +func (c *goCollector) exactSumFor(rmName string) float64 { + sumName, ok := c.rmExactSumMapForHist[rmName] + if !ok { + return 0 + } + s, ok := c.sampleMap[sumName] + if !ok { + return 0 + } + return unwrapScalarRMValue(s.Value) +} + +func memStatsFromRM(ms *runtime.MemStats, rm map[string]*metrics.Sample) { + lookupOrZero := func(name string) uint64 { + if s, ok := rm[name]; ok { + return s.Value.Uint64() + } + return 0 + } + + // Currently, MemStats adds tiny alloc count to both Mallocs AND Frees. + // The reason for this is because MemStats couldn't be extended at the time + // but there was a desire to have Mallocs at least be a little more representative, + // while having Mallocs - Frees still represent a live object count. + // Unfortunately, MemStats doesn't actually export a large allocation count, + // so it's impossible to pull this number out directly. + tinyAllocs := lookupOrZero(goGCHeapTinyAllocsObjects) + ms.Mallocs = lookupOrZero(goGCHeapAllocsObjects) + tinyAllocs + ms.Frees = lookupOrZero(goGCHeapFreesObjects) + tinyAllocs + + ms.TotalAlloc = lookupOrZero(goGCHeapAllocsBytes) + ms.Sys = lookupOrZero(goMemoryClassesTotalBytes) + ms.Lookups = 0 // Already always zero. + ms.HeapAlloc = lookupOrZero(goMemoryClassesHeapObjectsBytes) + ms.Alloc = ms.HeapAlloc + ms.HeapInuse = ms.HeapAlloc + lookupOrZero(goMemoryClassesHeapUnusedBytes) + ms.HeapReleased = lookupOrZero(goMemoryClassesHeapReleasedBytes) + ms.HeapIdle = ms.HeapReleased + lookupOrZero(goMemoryClassesHeapFreeBytes) + ms.HeapSys = ms.HeapInuse + ms.HeapIdle + ms.HeapObjects = lookupOrZero(goGCHeapObjects) + ms.StackInuse = lookupOrZero(goMemoryClassesHeapStacksBytes) + ms.StackSys = ms.StackInuse + lookupOrZero(goMemoryClassesOSStacksBytes) + ms.MSpanInuse = lookupOrZero(goMemoryClassesMetadataMSpanInuseBytes) + ms.MSpanSys = ms.MSpanInuse + lookupOrZero(goMemoryClassesMetadataMSPanFreeBytes) + ms.MCacheInuse = lookupOrZero(goMemoryClassesMetadataMCacheInuseBytes) + ms.MCacheSys = ms.MCacheInuse + lookupOrZero(goMemoryClassesMetadataMCacheFreeBytes) + ms.BuckHashSys = lookupOrZero(goMemoryClassesProfilingBucketsBytes) + ms.GCSys = lookupOrZero(goMemoryClassesMetadataOtherBytes) + ms.OtherSys = lookupOrZero(goMemoryClassesOtherBytes) + ms.NextGC = lookupOrZero(goGCHeapGoalBytes) + + // N.B. GCCPUFraction is intentionally omitted. This metric is not useful, + // and often misleading due to the fact that it's an average over the lifetime + // of the process. + // See https://github.com/prometheus/client_golang/issues/842#issuecomment-861812034 + // for more details. + ms.GCCPUFraction = 0 +} + +// batchHistogram is a mutable histogram that is updated +// in batches. +type batchHistogram struct { + selfCollector + + // Static fields updated only once. + desc *Desc + hasSum bool + + // Because this histogram operates in batches, it just uses a + // single mutex for everything. updates are always serialized + // but Write calls may operate concurrently with updates. + // Contention between these two sources should be rare. + mu sync.Mutex + buckets []float64 // Inclusive lower bounds, like runtime/metrics. + counts []uint64 + sum float64 // Used if hasSum is true. +} + +// newBatchHistogram creates a new batch histogram value with the given +// Desc, buckets, and whether or not it has an exact sum available. +// +// buckets must always be from the runtime/metrics package, following +// the same conventions. +func newBatchHistogram(desc *Desc, buckets []float64, hasSum bool) *batchHistogram { + // We need to remove -Inf values. runtime/metrics keeps them around. + // But -Inf bucket should not be allowed for prometheus histograms. + if buckets[0] == math.Inf(-1) { + buckets = buckets[1:] + } + h := &batchHistogram{ + desc: desc, + buckets: buckets, + // Because buckets follows runtime/metrics conventions, there's + // 1 more value in the buckets list than there are buckets represented, + // because in runtime/metrics, the bucket values represent *boundaries*, + // and non-Inf boundaries are inclusive lower bounds for that bucket. + counts: make([]uint64, len(buckets)-1), + hasSum: hasSum, + } + h.init(h) + return h +} + +// update updates the batchHistogram from a runtime/metrics histogram. +// +// sum must be provided if the batchHistogram was created to have an exact sum. +// h.buckets must be a strict subset of his.Buckets. +func (h *batchHistogram) update(his *metrics.Float64Histogram, sum float64) { + counts, buckets := his.Counts, his.Buckets + + h.mu.Lock() + defer h.mu.Unlock() + + // Clear buckets. + for i := range h.counts { + h.counts[i] = 0 + } + // Copy and reduce buckets. + var j int + for i, count := range counts { + h.counts[j] += count + if buckets[i+1] == h.buckets[j+1] { + j++ + } + } + if h.hasSum { + h.sum = sum + } +} + +func (h *batchHistogram) Desc() *Desc { + return h.desc +} + +func (h *batchHistogram) Write(out *dto.Metric) error { + h.mu.Lock() + defer h.mu.Unlock() + + sum := float64(0) + if h.hasSum { + sum = h.sum + } + dtoBuckets := make([]*dto.Bucket, 0, len(h.counts)) + totalCount := uint64(0) + for i, count := range h.counts { + totalCount += count + if !h.hasSum { + if count != 0 { + // N.B. This computed sum is an underestimate. + sum += h.buckets[i] * float64(count) + } + } + + // Skip the +Inf bucket, but only for the bucket list. + // It must still count for sum and totalCount. + if math.IsInf(h.buckets[i+1], 1) { + break + } + // Float64Histogram's upper bound is exclusive, so make it inclusive + // by obtaining the next float64 value down, in order. + upperBound := math.Nextafter(h.buckets[i+1], h.buckets[i]) + dtoBuckets = append(dtoBuckets, &dto.Bucket{ + CumulativeCount: proto.Uint64(totalCount), + UpperBound: proto.Float64(upperBound), + }) + } + out.Histogram = &dto.Histogram{ + Bucket: dtoBuckets, + SampleCount: proto.Uint64(totalCount), + SampleSum: proto.Float64(sum), + } + return nil +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/histogram.go b/vendor/github.com/prometheus/client_golang/prometheus/histogram.go new file mode 100644 index 0000000..4c873a0 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/histogram.go @@ -0,0 +1,1484 @@ +// Copyright 2015 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "fmt" + "math" + "runtime" + "sort" + "sync" + "sync/atomic" + "time" + + //nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility. + "github.com/golang/protobuf/proto" + + dto "github.com/prometheus/client_model/go" +) + +// nativeHistogramBounds for the frac of observed values. Only relevant for +// schema > 0. The position in the slice is the schema. (0 is never used, just +// here for convenience of using the schema directly as the index.) +// +// TODO(beorn7): Currently, we do a binary search into these slices. There are +// ways to turn it into a small number of simple array lookups. It probably only +// matters for schema 5 and beyond, but should be investigated. See this comment +// as a starting point: +// https://github.com/open-telemetry/opentelemetry-specification/issues/1776#issuecomment-870164310 +var nativeHistogramBounds = [][]float64{ + // Schema "0": + {0.5}, + // Schema 1: + {0.5, 0.7071067811865475}, + // Schema 2: + {0.5, 0.5946035575013605, 0.7071067811865475, 0.8408964152537144}, + // Schema 3: + { + 0.5, 0.5452538663326288, 0.5946035575013605, 0.6484197773255048, + 0.7071067811865475, 0.7711054127039704, 0.8408964152537144, 0.9170040432046711, + }, + // Schema 4: + { + 0.5, 0.5221368912137069, 0.5452538663326288, 0.5693943173783458, + 0.5946035575013605, 0.620928906036742, 0.6484197773255048, 0.6771277734684463, + 0.7071067811865475, 0.7384130729697496, 0.7711054127039704, 0.805245165974627, + 0.8408964152537144, 0.8781260801866495, 0.9170040432046711, 0.9576032806985735, + }, + // Schema 5: + { + 0.5, 0.5109485743270583, 0.5221368912137069, 0.5335702003384117, + 0.5452538663326288, 0.5571933712979462, 0.5693943173783458, 0.5818624293887887, + 0.5946035575013605, 0.6076236799902344, 0.620928906036742, 0.6345254785958666, + 0.6484197773255048, 0.6626183215798706, 0.6771277734684463, 0.6919549409819159, + 0.7071067811865475, 0.7225904034885232, 0.7384130729697496, 0.7545822137967112, + 0.7711054127039704, 0.7879904225539431, 0.805245165974627, 0.8228777390769823, + 0.8408964152537144, 0.8593096490612387, 0.8781260801866495, 0.8973545375015533, + 0.9170040432046711, 0.9370838170551498, 0.9576032806985735, 0.9785720620876999, + }, + // Schema 6: + { + 0.5, 0.5054446430258502, 0.5109485743270583, 0.5165124395106142, + 0.5221368912137069, 0.5278225891802786, 0.5335702003384117, 0.5393803988785598, + 0.5452538663326288, 0.5511912916539204, 0.5571933712979462, 0.5632608093041209, + 0.5693943173783458, 0.5755946149764913, 0.5818624293887887, 0.5881984958251406, + 0.5946035575013605, 0.6010783657263515, 0.6076236799902344, 0.6142402680534349, + 0.620928906036742, 0.6276903785123455, 0.6345254785958666, 0.6414350080393891, + 0.6484197773255048, 0.6554806057623822, 0.6626183215798706, 0.6698337620266515, + 0.6771277734684463, 0.6845012114872953, 0.6919549409819159, 0.6994898362691555, + 0.7071067811865475, 0.7148066691959849, 0.7225904034885232, 0.7304588970903234, + 0.7384130729697496, 0.7464538641456323, 0.7545822137967112, 0.762799075372269, + 0.7711054127039704, 0.7795022001189185, 0.7879904225539431, 0.7965710756711334, + 0.805245165974627, 0.8140137109286738, 0.8228777390769823, 0.8318382901633681, + 0.8408964152537144, 0.8500531768592616, 0.8593096490612387, 0.8686669176368529, + 0.8781260801866495, 0.8876882462632604, 0.8973545375015533, 0.9071260877501991, + 0.9170040432046711, 0.9269895625416926, 0.9370838170551498, 0.9472879907934827, + 0.9576032806985735, 0.9680308967461471, 0.9785720620876999, 0.9892280131939752, + }, + // Schema 7: + { + 0.5, 0.5027149505564014, 0.5054446430258502, 0.5081891574554764, + 0.5109485743270583, 0.5137229745593818, 0.5165124395106142, 0.5193170509806894, + 0.5221368912137069, 0.5249720429003435, 0.5278225891802786, 0.5306886136446309, + 0.5335702003384117, 0.5364674337629877, 0.5393803988785598, 0.5423091811066545, + 0.5452538663326288, 0.5482145409081883, 0.5511912916539204, 0.5541842058618393, + 0.5571933712979462, 0.5602188762048033, 0.5632608093041209, 0.5663192597993595, + 0.5693943173783458, 0.572486072215902, 0.5755946149764913, 0.5787200368168754, + 0.5818624293887887, 0.585021884841625, 0.5881984958251406, 0.5913923554921704, + 0.5946035575013605, 0.5978321960199137, 0.6010783657263515, 0.6043421618132907, + 0.6076236799902344, 0.6109230164863786, 0.6142402680534349, 0.6175755319684665, + 0.620928906036742, 0.6243004885946023, 0.6276903785123455, 0.6310986751971253, + 0.6345254785958666, 0.637970889198196, 0.6414350080393891, 0.6449179367033329, + 0.6484197773255048, 0.6519406325959679, 0.6554806057623822, 0.659039800633032, + 0.6626183215798706, 0.6662162735415805, 0.6698337620266515, 0.6734708931164728, + 0.6771277734684463, 0.6808045103191123, 0.6845012114872953, 0.688217985377265, + 0.6919549409819159, 0.6957121878859629, 0.6994898362691555, 0.7032879969095076, + 0.7071067811865475, 0.7109463010845827, 0.7148066691959849, 0.718687998724491, + 0.7225904034885232, 0.7265139979245261, 0.7304588970903234, 0.7344252166684908, + 0.7384130729697496, 0.7424225829363761, 0.7464538641456323, 0.7505070348132126, + 0.7545822137967112, 0.7586795205991071, 0.762799075372269, 0.7669409989204777, + 0.7711054127039704, 0.7752924388424999, 0.7795022001189185, 0.7837348199827764, + 0.7879904225539431, 0.7922691326262467, 0.7965710756711334, 0.8008963778413465, + 0.805245165974627, 0.8096175675974316, 0.8140137109286738, 0.8184337248834821, + 0.8228777390769823, 0.8273458838280969, 0.8318382901633681, 0.8363550898207981, + 0.8408964152537144, 0.8454623996346523, 0.8500531768592616, 0.8546688815502312, + 0.8593096490612387, 0.8639756154809185, 0.8686669176368529, 0.8733836930995842, + 0.8781260801866495, 0.8828942179666361, 0.8876882462632604, 0.8925083056594671, + 0.8973545375015533, 0.9022270839033115, 0.9071260877501991, 0.9120516927035263, + 0.9170040432046711, 0.9219832844793128, 0.9269895625416926, 0.9320230241988943, + 0.9370838170551498, 0.9421720895161669, 0.9472879907934827, 0.9524316709088368, + 0.9576032806985735, 0.9628029718180622, 0.9680308967461471, 0.9732872087896164, + 0.9785720620876999, 0.9838856116165875, 0.9892280131939752, 0.9945994234836328, + }, + // Schema 8: + { + 0.5, 0.5013556375251013, 0.5027149505564014, 0.5040779490592088, + 0.5054446430258502, 0.5068150424757447, 0.5081891574554764, 0.509566998038869, + 0.5109485743270583, 0.5123338964485679, 0.5137229745593818, 0.5151158188430205, + 0.5165124395106142, 0.5179128468009786, 0.5193170509806894, 0.520725062344158, + 0.5221368912137069, 0.5235525479396449, 0.5249720429003435, 0.526395386502313, + 0.5278225891802786, 0.5292536613972564, 0.5306886136446309, 0.5321274564422321, + 0.5335702003384117, 0.5350168559101208, 0.5364674337629877, 0.5379219445313954, + 0.5393803988785598, 0.5408428074966075, 0.5423091811066545, 0.5437795304588847, + 0.5452538663326288, 0.5467321995364429, 0.5482145409081883, 0.549700901315111, + 0.5511912916539204, 0.5526857228508706, 0.5541842058618393, 0.5556867516724088, + 0.5571933712979462, 0.5587040757836845, 0.5602188762048033, 0.5617377836665098, + 0.5632608093041209, 0.564787964283144, 0.5663192597993595, 0.5678547070789026, + 0.5693943173783458, 0.5709381019847808, 0.572486072215902, 0.5740382394200894, + 0.5755946149764913, 0.5771552102951081, 0.5787200368168754, 0.5802891060137493, + 0.5818624293887887, 0.5834400184762408, 0.585021884841625, 0.5866080400818185, + 0.5881984958251406, 0.5897932637314379, 0.5913923554921704, 0.5929957828304968, + 0.5946035575013605, 0.5962156912915756, 0.5978321960199137, 0.5994530835371903, + 0.6010783657263515, 0.6027080545025619, 0.6043421618132907, 0.6059806996384005, + 0.6076236799902344, 0.6092711149137041, 0.6109230164863786, 0.6125793968185725, + 0.6142402680534349, 0.6159056423670379, 0.6175755319684665, 0.6192499490999082, + 0.620928906036742, 0.622612415087629, 0.6243004885946023, 0.6259931389331581, + 0.6276903785123455, 0.6293922197748583, 0.6310986751971253, 0.6328097572894031, + 0.6345254785958666, 0.6362458516947014, 0.637970889198196, 0.6397006037528346, + 0.6414350080393891, 0.6431741147730128, 0.6449179367033329, 0.6466664866145447, + 0.6484197773255048, 0.6501778216898253, 0.6519406325959679, 0.6537082229673385, + 0.6554806057623822, 0.6572577939746774, 0.659039800633032, 0.6608266388015788, + 0.6626183215798706, 0.6644148621029772, 0.6662162735415805, 0.6680225691020727, + 0.6698337620266515, 0.6716498655934177, 0.6734708931164728, 0.6752968579460171, + 0.6771277734684463, 0.6789636531064505, 0.6808045103191123, 0.6826503586020058, + 0.6845012114872953, 0.6863570825438342, 0.688217985377265, 0.690083933630119, + 0.6919549409819159, 0.6938310211492645, 0.6957121878859629, 0.6975984549830999, + 0.6994898362691555, 0.7013863456101023, 0.7032879969095076, 0.7051948041086352, + 0.7071067811865475, 0.7090239421602076, 0.7109463010845827, 0.7128738720527471, + 0.7148066691959849, 0.7167447066838943, 0.718687998724491, 0.7206365595643126, + 0.7225904034885232, 0.7245495448210174, 0.7265139979245261, 0.7284837772007218, + 0.7304588970903234, 0.7324393720732029, 0.7344252166684908, 0.7364164454346837, + 0.7384130729697496, 0.7404151139112358, 0.7424225829363761, 0.7444354947621984, + 0.7464538641456323, 0.7484777058836176, 0.7505070348132126, 0.7525418658117031, + 0.7545822137967112, 0.7566280937263048, 0.7586795205991071, 0.7607365094544071, + 0.762799075372269, 0.7648672334736434, 0.7669409989204777, 0.7690203869158282, + 0.7711054127039704, 0.7731960915705107, 0.7752924388424999, 0.7773944698885442, + 0.7795022001189185, 0.7816156449856788, 0.7837348199827764, 0.7858597406461707, + 0.7879904225539431, 0.7901268813264122, 0.7922691326262467, 0.7944171921585818, + 0.7965710756711334, 0.7987307989543135, 0.8008963778413465, 0.8030678282083853, + 0.805245165974627, 0.8074284071024302, 0.8096175675974316, 0.8118126635086642, + 0.8140137109286738, 0.8162207259936375, 0.8184337248834821, 0.820652723822003, + 0.8228777390769823, 0.8251087869603088, 0.8273458838280969, 0.8295890460808079, + 0.8318382901633681, 0.8340936325652911, 0.8363550898207981, 0.8386226785089391, + 0.8408964152537144, 0.8431763167241966, 0.8454623996346523, 0.8477546807446661, + 0.8500531768592616, 0.8523579048290255, 0.8546688815502312, 0.8569861239649629, + 0.8593096490612387, 0.8616394738731368, 0.8639756154809185, 0.8663180910111553, + 0.8686669176368529, 0.871022112577578, 0.8733836930995842, 0.8757516765159389, + 0.8781260801866495, 0.8805069215187917, 0.8828942179666361, 0.8852879870317771, + 0.8876882462632604, 0.890095013257712, 0.8925083056594671, 0.8949281411607002, + 0.8973545375015533, 0.8997875124702672, 0.9022270839033115, 0.9046732696855155, + 0.9071260877501991, 0.909585556079304, 0.9120516927035263, 0.9145245157024483, + 0.9170040432046711, 0.9194902933879467, 0.9219832844793128, 0.9244830347552253, + 0.9269895625416926, 0.92950288621441, 0.9320230241988943, 0.9345499949706191, + 0.9370838170551498, 0.93962450902828, 0.9421720895161669, 0.9447265771954693, + 0.9472879907934827, 0.9498563490882775, 0.9524316709088368, 0.9550139751351947, + 0.9576032806985735, 0.9601996065815236, 0.9628029718180622, 0.9654133954938133, + 0.9680308967461471, 0.9706554947643201, 0.9732872087896164, 0.9759260581154889, + 0.9785720620876999, 0.9812252401044634, 0.9838856116165875, 0.9865531961276168, + 0.9892280131939752, 0.9919100824251095, 0.9945994234836328, 0.9972960560854698, + }, +} + +// The nativeHistogramBounds above can be generated with the code below. +// +// TODO(beorn7): It's tempting to actually use `go generate` to generate the +// code above. However, this could lead to slightly different numbers on +// different architectures. We still need to come to terms if we are fine with +// that, or if we might prefer to specify precise numbers in the standard. +// +// var nativeHistogramBounds [][]float64 = make([][]float64, 9) +// +// func init() { +// // Populate nativeHistogramBounds. +// numBuckets := 1 +// for i := range nativeHistogramBounds { +// bounds := []float64{0.5} +// factor := math.Exp2(math.Exp2(float64(-i))) +// for j := 0; j < numBuckets-1; j++ { +// var bound float64 +// if (j+1)%2 == 0 { +// // Use previously calculated value for increased precision. +// bound = nativeHistogramBounds[i-1][j/2+1] +// } else { +// bound = bounds[j] * factor +// } +// bounds = append(bounds, bound) +// } +// numBuckets *= 2 +// nativeHistogramBounds[i] = bounds +// } +// } + +// A Histogram counts individual observations from an event or sample stream in +// configurable static buckets (or in dynamic sparse buckets as part of the +// experimental Native Histograms, see below for more details). Similar to a +// Summary, it also provides a sum of observations and an observation count. +// +// On the Prometheus server, quantiles can be calculated from a Histogram using +// the histogram_quantile PromQL function. +// +// Note that Histograms, in contrast to Summaries, can be aggregated in PromQL +// (see the documentation for detailed procedures). However, Histograms require +// the user to pre-define suitable buckets, and they are in general less +// accurate. (Both problems are addressed by the experimental Native +// Histograms. To use them, configure a NativeHistogramBucketFactor in the +// HistogramOpts. They also require a Prometheus server v2.40+ with the +// corresponding feature flag enabled.) +// +// The Observe method of a Histogram has a very low performance overhead in +// comparison with the Observe method of a Summary. +// +// To create Histogram instances, use NewHistogram. +type Histogram interface { + Metric + Collector + + // Observe adds a single observation to the histogram. Observations are + // usually positive or zero. Negative observations are accepted but + // prevent current versions of Prometheus from properly detecting + // counter resets in the sum of observations. (The experimental Native + // Histograms handle negative observations properly.) See + // https://prometheus.io/docs/practices/histograms/#count-and-sum-of-observations + // for details. + Observe(float64) +} + +// bucketLabel is used for the label that defines the upper bound of a +// bucket of a histogram ("le" -> "less or equal"). +const bucketLabel = "le" + +// DefBuckets are the default Histogram buckets. The default buckets are +// tailored to broadly measure the response time (in seconds) of a network +// service. Most likely, however, you will be required to define buckets +// customized to your use case. +var DefBuckets = []float64{.005, .01, .025, .05, .1, .25, .5, 1, 2.5, 5, 10} + +// DefNativeHistogramZeroThreshold is the default value for +// NativeHistogramZeroThreshold in the HistogramOpts. +// +// The value is 2^-128 (or 0.5*2^-127 in the actual IEEE 754 representation), +// which is a bucket boundary at all possible resolutions. +const DefNativeHistogramZeroThreshold = 2.938735877055719e-39 + +// NativeHistogramZeroThresholdZero can be used as NativeHistogramZeroThreshold +// in the HistogramOpts to create a zero bucket of width zero, i.e. a zero +// bucket that only receives observations of precisely zero. +const NativeHistogramZeroThresholdZero = -1 + +var errBucketLabelNotAllowed = fmt.Errorf( + "%q is not allowed as label name in histograms", bucketLabel, +) + +// LinearBuckets creates 'count' regular buckets, each 'width' wide, where the +// lowest bucket has an upper bound of 'start'. The final +Inf bucket is not +// counted and not included in the returned slice. The returned slice is meant +// to be used for the Buckets field of HistogramOpts. +// +// The function panics if 'count' is zero or negative. +func LinearBuckets(start, width float64, count int) []float64 { + if count < 1 { + panic("LinearBuckets needs a positive count") + } + buckets := make([]float64, count) + for i := range buckets { + buckets[i] = start + start += width + } + return buckets +} + +// ExponentialBuckets creates 'count' regular buckets, where the lowest bucket +// has an upper bound of 'start' and each following bucket's upper bound is +// 'factor' times the previous bucket's upper bound. The final +Inf bucket is +// not counted and not included in the returned slice. The returned slice is +// meant to be used for the Buckets field of HistogramOpts. +// +// The function panics if 'count' is 0 or negative, if 'start' is 0 or negative, +// or if 'factor' is less than or equal 1. +func ExponentialBuckets(start, factor float64, count int) []float64 { + if count < 1 { + panic("ExponentialBuckets needs a positive count") + } + if start <= 0 { + panic("ExponentialBuckets needs a positive start value") + } + if factor <= 1 { + panic("ExponentialBuckets needs a factor greater than 1") + } + buckets := make([]float64, count) + for i := range buckets { + buckets[i] = start + start *= factor + } + return buckets +} + +// ExponentialBucketsRange creates 'count' buckets, where the lowest bucket is +// 'min' and the highest bucket is 'max'. The final +Inf bucket is not counted +// and not included in the returned slice. The returned slice is meant to be +// used for the Buckets field of HistogramOpts. +// +// The function panics if 'count' is 0 or negative, if 'min' is 0 or negative. +func ExponentialBucketsRange(min, max float64, count int) []float64 { + if count < 1 { + panic("ExponentialBucketsRange count needs a positive count") + } + if min <= 0 { + panic("ExponentialBucketsRange min needs to be greater than 0") + } + + // Formula for exponential buckets. + // max = min*growthFactor^(bucketCount-1) + + // We know max/min and highest bucket. Solve for growthFactor. + growthFactor := math.Pow(max/min, 1.0/float64(count-1)) + + // Now that we know growthFactor, solve for each bucket. + buckets := make([]float64, count) + for i := 1; i <= count; i++ { + buckets[i-1] = min * math.Pow(growthFactor, float64(i-1)) + } + return buckets +} + +// HistogramOpts bundles the options for creating a Histogram metric. It is +// mandatory to set Name to a non-empty string. All other fields are optional +// and can safely be left at their zero value, although it is strongly +// encouraged to set a Help string. +type HistogramOpts struct { + // Namespace, Subsystem, and Name are components of the fully-qualified + // name of the Histogram (created by joining these components with + // "_"). Only Name is mandatory, the others merely help structuring the + // name. Note that the fully-qualified name of the Histogram must be a + // valid Prometheus metric name. + Namespace string + Subsystem string + Name string + + // Help provides information about this Histogram. + // + // Metrics with the same fully-qualified name must have the same Help + // string. + Help string + + // ConstLabels are used to attach fixed labels to this metric. Metrics + // with the same fully-qualified name must have the same label names in + // their ConstLabels. + // + // ConstLabels are only used rarely. In particular, do not use them to + // attach the same labels to all your metrics. Those use cases are + // better covered by target labels set by the scraping Prometheus + // server, or by one specific metric (e.g. a build_info or a + // machine_role metric). See also + // https://prometheus.io/docs/instrumenting/writing_exporters/#target-labels-not-static-scraped-labels + ConstLabels Labels + + // Buckets defines the buckets into which observations are counted. Each + // element in the slice is the upper inclusive bound of a bucket. The + // values must be sorted in strictly increasing order. There is no need + // to add a highest bucket with +Inf bound, it will be added + // implicitly. If Buckets is left as nil or set to a slice of length + // zero, it is replaced by default buckets. The default buckets are + // DefBuckets if no buckets for a native histogram (see below) are used, + // otherwise the default is no buckets. (In other words, if you want to + // use both reguler buckets and buckets for a native histogram, you have + // to define the regular buckets here explicitly.) + Buckets []float64 + + // If NativeHistogramBucketFactor is greater than one, so-called sparse + // buckets are used (in addition to the regular buckets, if defined + // above). A Histogram with sparse buckets will be ingested as a Native + // Histogram by a Prometheus server with that feature enabled (requires + // Prometheus v2.40+). Sparse buckets are exponential buckets covering + // the whole float64 range (with the exception of the “zero” bucket, see + // SparseBucketsZeroThreshold below). From any one bucket to the next, + // the width of the bucket grows by a constant + // factor. NativeHistogramBucketFactor provides an upper bound for this + // factor (exception see below). The smaller + // NativeHistogramBucketFactor, the more buckets will be used and thus + // the more costly the histogram will become. A generally good trade-off + // between cost and accuracy is a value of 1.1 (each bucket is at most + // 10% wider than the previous one), which will result in each power of + // two divided into 8 buckets (e.g. there will be 8 buckets between 1 + // and 2, same as between 2 and 4, and 4 and 8, etc.). + // + // Details about the actually used factor: The factor is calculated as + // 2^(2^n), where n is an integer number between (and including) -8 and + // 4. n is chosen so that the resulting factor is the largest that is + // still smaller or equal to NativeHistogramBucketFactor. Note that the + // smallest possible factor is therefore approx. 1.00271 (i.e. 2^(2^-8) + // ). If NativeHistogramBucketFactor is greater than 1 but smaller than + // 2^(2^-8), then the actually used factor is still 2^(2^-8) even though + // it is larger than the provided NativeHistogramBucketFactor. + // + // NOTE: Native Histograms are still an experimental feature. Their + // behavior might still change without a major version + // bump. Subsequently, all NativeHistogram... options here might still + // change their behavior or name (or might completely disappear) without + // a major version bump. + NativeHistogramBucketFactor float64 + // All observations with an absolute value of less or equal + // NativeHistogramZeroThreshold are accumulated into a “zero” + // bucket. For best results, this should be close to a bucket + // boundary. This is usually the case if picking a power of two. If + // NativeHistogramZeroThreshold is left at zero, + // DefSparseBucketsZeroThreshold is used as the threshold. To configure + // a zero bucket with an actual threshold of zero (i.e. only + // observations of precisely zero will go into the zero bucket), set + // NativeHistogramZeroThreshold to the NativeHistogramZeroThresholdZero + // constant (or any negative float value). + NativeHistogramZeroThreshold float64 + + // The remaining fields define a strategy to limit the number of + // populated sparse buckets. If NativeHistogramMaxBucketNumber is left + // at zero, the number of buckets is not limited. (Note that this might + // lead to unbounded memory consumption if the values observed by the + // Histogram are sufficiently wide-spread. In particular, this could be + // used as a DoS attack vector. Where the observed values depend on + // external inputs, it is highly recommended to set a + // NativeHistogramMaxBucketNumber.) Once the set + // NativeHistogramMaxBucketNumber is exceeded, the following strategy is + // enacted: First, if the last reset (or the creation) of the histogram + // is at least NativeHistogramMinResetDuration ago, then the whole + // histogram is reset to its initial state (including regular + // buckets). If less time has passed, or if + // NativeHistogramMinResetDuration is zero, no reset is + // performed. Instead, the zero threshold is increased sufficiently to + // reduce the number of buckets to or below + // NativeHistogramMaxBucketNumber, but not to more than + // NativeHistogramMaxZeroThreshold. Thus, if + // NativeHistogramMaxZeroThreshold is already at or below the current + // zero threshold, nothing happens at this step. After that, if the + // number of buckets still exceeds NativeHistogramMaxBucketNumber, the + // resolution of the histogram is reduced by doubling the width of the + // sparse buckets (up to a growth factor between one bucket to the next + // of 2^(2^4) = 65536, see above). + NativeHistogramMaxBucketNumber uint32 + NativeHistogramMinResetDuration time.Duration + NativeHistogramMaxZeroThreshold float64 +} + +// NewHistogram creates a new Histogram based on the provided HistogramOpts. It +// panics if the buckets in HistogramOpts are not in strictly increasing order. +// +// The returned implementation also implements ExemplarObserver. It is safe to +// perform the corresponding type assertion. Exemplars are tracked separately +// for each bucket. +func NewHistogram(opts HistogramOpts) Histogram { + return newHistogram( + NewDesc( + BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), + opts.Help, + nil, + opts.ConstLabels, + ), + opts, + ) +} + +func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogram { + if len(desc.variableLabels) != len(labelValues) { + panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels, labelValues)) + } + + for _, n := range desc.variableLabels { + if n == bucketLabel { + panic(errBucketLabelNotAllowed) + } + } + for _, lp := range desc.constLabelPairs { + if lp.GetName() == bucketLabel { + panic(errBucketLabelNotAllowed) + } + } + + h := &histogram{ + desc: desc, + upperBounds: opts.Buckets, + labelPairs: MakeLabelPairs(desc, labelValues), + nativeHistogramMaxBuckets: opts.NativeHistogramMaxBucketNumber, + nativeHistogramMaxZeroThreshold: opts.NativeHistogramMaxZeroThreshold, + nativeHistogramMinResetDuration: opts.NativeHistogramMinResetDuration, + lastResetTime: time.Now(), + now: time.Now, + } + if len(h.upperBounds) == 0 && opts.NativeHistogramBucketFactor <= 1 { + h.upperBounds = DefBuckets + } + if opts.NativeHistogramBucketFactor <= 1 { + h.nativeHistogramSchema = math.MinInt32 // To mark that there are no sparse buckets. + } else { + switch { + case opts.NativeHistogramZeroThreshold > 0: + h.nativeHistogramZeroThreshold = opts.NativeHistogramZeroThreshold + case opts.NativeHistogramZeroThreshold == 0: + h.nativeHistogramZeroThreshold = DefNativeHistogramZeroThreshold + } // Leave h.nativeHistogramZeroThreshold at 0 otherwise. + h.nativeHistogramSchema = pickSchema(opts.NativeHistogramBucketFactor) + } + for i, upperBound := range h.upperBounds { + if i < len(h.upperBounds)-1 { + if upperBound >= h.upperBounds[i+1] { + panic(fmt.Errorf( + "histogram buckets must be in increasing order: %f >= %f", + upperBound, h.upperBounds[i+1], + )) + } + } else { + if math.IsInf(upperBound, +1) { + // The +Inf bucket is implicit. Remove it here. + h.upperBounds = h.upperBounds[:i] + } + } + } + // Finally we know the final length of h.upperBounds and can make buckets + // for both counts as well as exemplars: + h.counts[0] = &histogramCounts{ + buckets: make([]uint64, len(h.upperBounds)), + nativeHistogramZeroThresholdBits: math.Float64bits(h.nativeHistogramZeroThreshold), + nativeHistogramSchema: h.nativeHistogramSchema, + } + h.counts[1] = &histogramCounts{ + buckets: make([]uint64, len(h.upperBounds)), + nativeHistogramZeroThresholdBits: math.Float64bits(h.nativeHistogramZeroThreshold), + nativeHistogramSchema: h.nativeHistogramSchema, + } + h.exemplars = make([]atomic.Value, len(h.upperBounds)+1) + + h.init(h) // Init self-collection. + return h +} + +type histogramCounts struct { + // Order in this struct matters for the alignment required by atomic + // operations, see http://golang.org/pkg/sync/atomic/#pkg-note-BUG + + // sumBits contains the bits of the float64 representing the sum of all + // observations. + sumBits uint64 + count uint64 + + // nativeHistogramZeroBucket counts all (positive and negative) + // observations in the zero bucket (with an absolute value less or equal + // the current threshold, see next field. + nativeHistogramZeroBucket uint64 + // nativeHistogramZeroThresholdBits is the bit pattern of the current + // threshold for the zero bucket. It's initially equal to + // nativeHistogramZeroThreshold but may change according to the bucket + // count limitation strategy. + nativeHistogramZeroThresholdBits uint64 + // nativeHistogramSchema may change over time according to the bucket + // count limitation strategy and therefore has to be saved here. + nativeHistogramSchema int32 + // Number of (positive and negative) sparse buckets. + nativeHistogramBucketsNumber uint32 + + // Regular buckets. + buckets []uint64 + + // The sparse buckets for native histograms are implemented with a + // sync.Map for now. A dedicated data structure will likely be more + // efficient. There are separate maps for negative and positive + // observations. The map's value is an *int64, counting observations in + // that bucket. (Note that we don't use uint64 as an int64 won't + // overflow in practice, and working with signed numbers from the + // beginning simplifies the handling of deltas.) The map's key is the + // index of the bucket according to the used + // nativeHistogramSchema. Index 0 is for an upper bound of 1. + nativeHistogramBucketsPositive, nativeHistogramBucketsNegative sync.Map +} + +// observe manages the parts of observe that only affects +// histogramCounts. doSparse is true if sparse buckets should be done, +// too. +func (hc *histogramCounts) observe(v float64, bucket int, doSparse bool) { + if bucket < len(hc.buckets) { + atomic.AddUint64(&hc.buckets[bucket], 1) + } + atomicAddFloat(&hc.sumBits, v) + if doSparse && !math.IsNaN(v) { + var ( + key int + schema = atomic.LoadInt32(&hc.nativeHistogramSchema) + zeroThreshold = math.Float64frombits(atomic.LoadUint64(&hc.nativeHistogramZeroThresholdBits)) + bucketCreated, isInf bool + ) + if math.IsInf(v, 0) { + // Pretend v is MaxFloat64 but later increment key by one. + if math.IsInf(v, +1) { + v = math.MaxFloat64 + } else { + v = -math.MaxFloat64 + } + isInf = true + } + frac, exp := math.Frexp(math.Abs(v)) + if schema > 0 { + bounds := nativeHistogramBounds[schema] + key = sort.SearchFloat64s(bounds, frac) + (exp-1)*len(bounds) + } else { + key = exp + if frac == 0.5 { + key-- + } + div := 1 << -schema + key = (key + div - 1) / div + } + if isInf { + key++ + } + switch { + case v > zeroThreshold: + bucketCreated = addToBucket(&hc.nativeHistogramBucketsPositive, key, 1) + case v < -zeroThreshold: + bucketCreated = addToBucket(&hc.nativeHistogramBucketsNegative, key, 1) + default: + atomic.AddUint64(&hc.nativeHistogramZeroBucket, 1) + } + if bucketCreated { + atomic.AddUint32(&hc.nativeHistogramBucketsNumber, 1) + } + } + // Increment count last as we take it as a signal that the observation + // is complete. + atomic.AddUint64(&hc.count, 1) +} + +type histogram struct { + // countAndHotIdx enables lock-free writes with use of atomic updates. + // The most significant bit is the hot index [0 or 1] of the count field + // below. Observe calls update the hot one. All remaining bits count the + // number of Observe calls. Observe starts by incrementing this counter, + // and finish by incrementing the count field in the respective + // histogramCounts, as a marker for completion. + // + // Calls of the Write method (which are non-mutating reads from the + // perspective of the histogram) swap the hot–cold under the writeMtx + // lock. A cooldown is awaited (while locked) by comparing the number of + // observations with the initiation count. Once they match, then the + // last observation on the now cool one has completed. All cold fields must + // be merged into the new hot before releasing writeMtx. + // + // Fields with atomic access first! See alignment constraint: + // http://golang.org/pkg/sync/atomic/#pkg-note-BUG + countAndHotIdx uint64 + + selfCollector + desc *Desc + + // Only used in the Write method and for sparse bucket management. + mtx sync.Mutex + + // Two counts, one is "hot" for lock-free observations, the other is + // "cold" for writing out a dto.Metric. It has to be an array of + // pointers to guarantee 64bit alignment of the histogramCounts, see + // http://golang.org/pkg/sync/atomic/#pkg-note-BUG. + counts [2]*histogramCounts + + upperBounds []float64 + labelPairs []*dto.LabelPair + exemplars []atomic.Value // One more than buckets (to include +Inf), each a *dto.Exemplar. + nativeHistogramSchema int32 // The initial schema. Set to math.MinInt32 if no sparse buckets are used. + nativeHistogramZeroThreshold float64 // The initial zero threshold. + nativeHistogramMaxZeroThreshold float64 + nativeHistogramMaxBuckets uint32 + nativeHistogramMinResetDuration time.Duration + lastResetTime time.Time // Protected by mtx. + + now func() time.Time // To mock out time.Now() for testing. +} + +func (h *histogram) Desc() *Desc { + return h.desc +} + +func (h *histogram) Observe(v float64) { + h.observe(v, h.findBucket(v)) +} + +func (h *histogram) ObserveWithExemplar(v float64, e Labels) { + i := h.findBucket(v) + h.observe(v, i) + h.updateExemplar(v, i, e) +} + +func (h *histogram) Write(out *dto.Metric) error { + // For simplicity, we protect this whole method by a mutex. It is not in + // the hot path, i.e. Observe is called much more often than Write. The + // complication of making Write lock-free isn't worth it, if possible at + // all. + h.mtx.Lock() + defer h.mtx.Unlock() + + // Adding 1<<63 switches the hot index (from 0 to 1 or from 1 to 0) + // without touching the count bits. See the struct comments for a full + // description of the algorithm. + n := atomic.AddUint64(&h.countAndHotIdx, 1<<63) + // count is contained unchanged in the lower 63 bits. + count := n & ((1 << 63) - 1) + // The most significant bit tells us which counts is hot. The complement + // is thus the cold one. + hotCounts := h.counts[n>>63] + coldCounts := h.counts[(^n)>>63] + + waitForCooldown(count, coldCounts) + + his := &dto.Histogram{ + Bucket: make([]*dto.Bucket, len(h.upperBounds)), + SampleCount: proto.Uint64(count), + SampleSum: proto.Float64(math.Float64frombits(atomic.LoadUint64(&coldCounts.sumBits))), + } + out.Histogram = his + out.Label = h.labelPairs + + var cumCount uint64 + for i, upperBound := range h.upperBounds { + cumCount += atomic.LoadUint64(&coldCounts.buckets[i]) + his.Bucket[i] = &dto.Bucket{ + CumulativeCount: proto.Uint64(cumCount), + UpperBound: proto.Float64(upperBound), + } + if e := h.exemplars[i].Load(); e != nil { + his.Bucket[i].Exemplar = e.(*dto.Exemplar) + } + } + // If there is an exemplar for the +Inf bucket, we have to add that bucket explicitly. + if e := h.exemplars[len(h.upperBounds)].Load(); e != nil { + b := &dto.Bucket{ + CumulativeCount: proto.Uint64(count), + UpperBound: proto.Float64(math.Inf(1)), + Exemplar: e.(*dto.Exemplar), + } + his.Bucket = append(his.Bucket, b) + } + if h.nativeHistogramSchema > math.MinInt32 { + his.ZeroThreshold = proto.Float64(math.Float64frombits(atomic.LoadUint64(&coldCounts.nativeHistogramZeroThresholdBits))) + his.Schema = proto.Int32(atomic.LoadInt32(&coldCounts.nativeHistogramSchema)) + zeroBucket := atomic.LoadUint64(&coldCounts.nativeHistogramZeroBucket) + + defer func() { + coldCounts.nativeHistogramBucketsPositive.Range(addAndReset(&hotCounts.nativeHistogramBucketsPositive, &hotCounts.nativeHistogramBucketsNumber)) + coldCounts.nativeHistogramBucketsNegative.Range(addAndReset(&hotCounts.nativeHistogramBucketsNegative, &hotCounts.nativeHistogramBucketsNumber)) + }() + + his.ZeroCount = proto.Uint64(zeroBucket) + his.NegativeSpan, his.NegativeDelta = makeBuckets(&coldCounts.nativeHistogramBucketsNegative) + his.PositiveSpan, his.PositiveDelta = makeBuckets(&coldCounts.nativeHistogramBucketsPositive) + } + addAndResetCounts(hotCounts, coldCounts) + return nil +} + +// findBucket returns the index of the bucket for the provided value, or +// len(h.upperBounds) for the +Inf bucket. +func (h *histogram) findBucket(v float64) int { + // TODO(beorn7): For small numbers of buckets (<30), a linear search is + // slightly faster than the binary search. If we really care, we could + // switch from one search strategy to the other depending on the number + // of buckets. + // + // Microbenchmarks (BenchmarkHistogramNoLabels): + // 11 buckets: 38.3 ns/op linear - binary 48.7 ns/op + // 100 buckets: 78.1 ns/op linear - binary 54.9 ns/op + // 300 buckets: 154 ns/op linear - binary 61.6 ns/op + return sort.SearchFloat64s(h.upperBounds, v) +} + +// observe is the implementation for Observe without the findBucket part. +func (h *histogram) observe(v float64, bucket int) { + // Do not add to sparse buckets for NaN observations. + doSparse := h.nativeHistogramSchema > math.MinInt32 && !math.IsNaN(v) + // We increment h.countAndHotIdx so that the counter in the lower + // 63 bits gets incremented. At the same time, we get the new value + // back, which we can use to find the currently-hot counts. + n := atomic.AddUint64(&h.countAndHotIdx, 1) + hotCounts := h.counts[n>>63] + hotCounts.observe(v, bucket, doSparse) + if doSparse { + h.limitBuckets(hotCounts, v, bucket) + } +} + +// limitSparsebuckets applies a strategy to limit the number of populated sparse +// buckets. It's generally best effort, and there are situations where the +// number can go higher (if even the lowest resolution isn't enough to reduce +// the number sufficiently, or if the provided counts aren't fully updated yet +// by a concurrently happening Write call). +func (h *histogram) limitBuckets(counts *histogramCounts, value float64, bucket int) { + if h.nativeHistogramMaxBuckets == 0 { + return // No limit configured. + } + if h.nativeHistogramMaxBuckets >= atomic.LoadUint32(&counts.nativeHistogramBucketsNumber) { + return // Bucket limit not exceeded yet. + } + + h.mtx.Lock() + defer h.mtx.Unlock() + + // The hot counts might have been swapped just before we acquired the + // lock. Re-fetch the hot counts first... + n := atomic.LoadUint64(&h.countAndHotIdx) + hotIdx := n >> 63 + coldIdx := (^n) >> 63 + hotCounts := h.counts[hotIdx] + coldCounts := h.counts[coldIdx] + // ...and then check again if we really have to reduce the bucket count. + if h.nativeHistogramMaxBuckets >= atomic.LoadUint32(&hotCounts.nativeHistogramBucketsNumber) { + return // Bucket limit not exceeded after all. + } + // Try the various strategies in order. + if h.maybeReset(hotCounts, coldCounts, coldIdx, value, bucket) { + return + } + if h.maybeWidenZeroBucket(hotCounts, coldCounts) { + return + } + h.doubleBucketWidth(hotCounts, coldCounts) +} + +// maybeReset resests the whole histogram if at least h.nativeHistogramMinResetDuration +// has been passed. It returns true if the histogram has been reset. The caller +// must have locked h.mtx. +func (h *histogram) maybeReset(hot, cold *histogramCounts, coldIdx uint64, value float64, bucket int) bool { + // We are using the possibly mocked h.now() rather than + // time.Since(h.lastResetTime) to enable testing. + if h.nativeHistogramMinResetDuration == 0 || h.now().Sub(h.lastResetTime) < h.nativeHistogramMinResetDuration { + return false + } + // Completely reset coldCounts. + h.resetCounts(cold) + // Repeat the latest observation to not lose it completely. + cold.observe(value, bucket, true) + // Make coldCounts the new hot counts while ressetting countAndHotIdx. + n := atomic.SwapUint64(&h.countAndHotIdx, (coldIdx<<63)+1) + count := n & ((1 << 63) - 1) + waitForCooldown(count, hot) + // Finally, reset the formerly hot counts, too. + h.resetCounts(hot) + h.lastResetTime = h.now() + return true +} + +// maybeWidenZeroBucket widens the zero bucket until it includes the existing +// buckets closest to the zero bucket (which could be two, if an equidistant +// negative and a positive bucket exists, but usually it's only one bucket to be +// merged into the new wider zero bucket). h.nativeHistogramMaxZeroThreshold +// limits how far the zero bucket can be extended, and if that's not enough to +// include an existing bucket, the method returns false. The caller must have +// locked h.mtx. +func (h *histogram) maybeWidenZeroBucket(hot, cold *histogramCounts) bool { + currentZeroThreshold := math.Float64frombits(atomic.LoadUint64(&hot.nativeHistogramZeroThresholdBits)) + if currentZeroThreshold >= h.nativeHistogramMaxZeroThreshold { + return false + } + // Find the key of the bucket closest to zero. + smallestKey := findSmallestKey(&hot.nativeHistogramBucketsPositive) + smallestNegativeKey := findSmallestKey(&hot.nativeHistogramBucketsNegative) + if smallestNegativeKey < smallestKey { + smallestKey = smallestNegativeKey + } + if smallestKey == math.MaxInt32 { + return false + } + newZeroThreshold := getLe(smallestKey, atomic.LoadInt32(&hot.nativeHistogramSchema)) + if newZeroThreshold > h.nativeHistogramMaxZeroThreshold { + return false // New threshold would exceed the max threshold. + } + atomic.StoreUint64(&cold.nativeHistogramZeroThresholdBits, math.Float64bits(newZeroThreshold)) + // Remove applicable buckets. + if _, loaded := cold.nativeHistogramBucketsNegative.LoadAndDelete(smallestKey); loaded { + atomicDecUint32(&cold.nativeHistogramBucketsNumber) + } + if _, loaded := cold.nativeHistogramBucketsPositive.LoadAndDelete(smallestKey); loaded { + atomicDecUint32(&cold.nativeHistogramBucketsNumber) + } + // Make cold counts the new hot counts. + n := atomic.AddUint64(&h.countAndHotIdx, 1<<63) + count := n & ((1 << 63) - 1) + // Swap the pointer names to represent the new roles and make + // the rest less confusing. + hot, cold = cold, hot + waitForCooldown(count, cold) + // Add all the now cold counts to the new hot counts... + addAndResetCounts(hot, cold) + // ...adjust the new zero threshold in the cold counts, too... + atomic.StoreUint64(&cold.nativeHistogramZeroThresholdBits, math.Float64bits(newZeroThreshold)) + // ...and then merge the newly deleted buckets into the wider zero + // bucket. + mergeAndDeleteOrAddAndReset := func(hotBuckets, coldBuckets *sync.Map) func(k, v interface{}) bool { + return func(k, v interface{}) bool { + key := k.(int) + bucket := v.(*int64) + if key == smallestKey { + // Merge into hot zero bucket... + atomic.AddUint64(&hot.nativeHistogramZeroBucket, uint64(atomic.LoadInt64(bucket))) + // ...and delete from cold counts. + coldBuckets.Delete(key) + atomicDecUint32(&cold.nativeHistogramBucketsNumber) + } else { + // Add to corresponding hot bucket... + if addToBucket(hotBuckets, key, atomic.LoadInt64(bucket)) { + atomic.AddUint32(&hot.nativeHistogramBucketsNumber, 1) + } + // ...and reset cold bucket. + atomic.StoreInt64(bucket, 0) + } + return true + } + } + + cold.nativeHistogramBucketsPositive.Range(mergeAndDeleteOrAddAndReset(&hot.nativeHistogramBucketsPositive, &cold.nativeHistogramBucketsPositive)) + cold.nativeHistogramBucketsNegative.Range(mergeAndDeleteOrAddAndReset(&hot.nativeHistogramBucketsNegative, &cold.nativeHistogramBucketsNegative)) + return true +} + +// doubleBucketWidth doubles the bucket width (by decrementing the schema +// number). Note that very sparse buckets could lead to a low reduction of the +// bucket count (or even no reduction at all). The method does nothing if the +// schema is already -4. +func (h *histogram) doubleBucketWidth(hot, cold *histogramCounts) { + coldSchema := atomic.LoadInt32(&cold.nativeHistogramSchema) + if coldSchema == -4 { + return // Already at lowest resolution. + } + coldSchema-- + atomic.StoreInt32(&cold.nativeHistogramSchema, coldSchema) + // Play it simple and just delete all cold buckets. + atomic.StoreUint32(&cold.nativeHistogramBucketsNumber, 0) + deleteSyncMap(&cold.nativeHistogramBucketsNegative) + deleteSyncMap(&cold.nativeHistogramBucketsPositive) + // Make coldCounts the new hot counts. + n := atomic.AddUint64(&h.countAndHotIdx, 1<<63) + count := n & ((1 << 63) - 1) + // Swap the pointer names to represent the new roles and make + // the rest less confusing. + hot, cold = cold, hot + waitForCooldown(count, cold) + // Add all the now cold counts to the new hot counts... + addAndResetCounts(hot, cold) + // ...adjust the schema in the cold counts, too... + atomic.StoreInt32(&cold.nativeHistogramSchema, coldSchema) + // ...and then merge the cold buckets into the wider hot buckets. + merge := func(hotBuckets *sync.Map) func(k, v interface{}) bool { + return func(k, v interface{}) bool { + key := k.(int) + bucket := v.(*int64) + // Adjust key to match the bucket to merge into. + if key > 0 { + key++ + } + key /= 2 + // Add to corresponding hot bucket. + if addToBucket(hotBuckets, key, atomic.LoadInt64(bucket)) { + atomic.AddUint32(&hot.nativeHistogramBucketsNumber, 1) + } + return true + } + } + + cold.nativeHistogramBucketsPositive.Range(merge(&hot.nativeHistogramBucketsPositive)) + cold.nativeHistogramBucketsNegative.Range(merge(&hot.nativeHistogramBucketsNegative)) + // Play it simple again and just delete all cold buckets. + atomic.StoreUint32(&cold.nativeHistogramBucketsNumber, 0) + deleteSyncMap(&cold.nativeHistogramBucketsNegative) + deleteSyncMap(&cold.nativeHistogramBucketsPositive) +} + +func (h *histogram) resetCounts(counts *histogramCounts) { + atomic.StoreUint64(&counts.sumBits, 0) + atomic.StoreUint64(&counts.count, 0) + atomic.StoreUint64(&counts.nativeHistogramZeroBucket, 0) + atomic.StoreUint64(&counts.nativeHistogramZeroThresholdBits, math.Float64bits(h.nativeHistogramZeroThreshold)) + atomic.StoreInt32(&counts.nativeHistogramSchema, h.nativeHistogramSchema) + atomic.StoreUint32(&counts.nativeHistogramBucketsNumber, 0) + for i := range h.upperBounds { + atomic.StoreUint64(&counts.buckets[i], 0) + } + deleteSyncMap(&counts.nativeHistogramBucketsNegative) + deleteSyncMap(&counts.nativeHistogramBucketsPositive) +} + +// updateExemplar replaces the exemplar for the provided bucket. With empty +// labels, it's a no-op. It panics if any of the labels is invalid. +func (h *histogram) updateExemplar(v float64, bucket int, l Labels) { + if l == nil { + return + } + e, err := newExemplar(v, h.now(), l) + if err != nil { + panic(err) + } + h.exemplars[bucket].Store(e) +} + +// HistogramVec is a Collector that bundles a set of Histograms that all share the +// same Desc, but have different values for their variable labels. This is used +// if you want to count the same thing partitioned by various dimensions +// (e.g. HTTP request latencies, partitioned by status code and method). Create +// instances with NewHistogramVec. +type HistogramVec struct { + *MetricVec +} + +// NewHistogramVec creates a new HistogramVec based on the provided HistogramOpts and +// partitioned by the given label names. +func NewHistogramVec(opts HistogramOpts, labelNames []string) *HistogramVec { + desc := NewDesc( + BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), + opts.Help, + labelNames, + opts.ConstLabels, + ) + return &HistogramVec{ + MetricVec: NewMetricVec(desc, func(lvs ...string) Metric { + return newHistogram(desc, opts, lvs...) + }), + } +} + +// GetMetricWithLabelValues returns the Histogram for the given slice of label +// values (same order as the variable labels in Desc). If that combination of +// label values is accessed for the first time, a new Histogram is created. +// +// It is possible to call this method without using the returned Histogram to only +// create the new Histogram but leave it at its starting value, a Histogram without +// any observations. +// +// Keeping the Histogram for later use is possible (and should be considered if +// performance is critical), but keep in mind that Reset, DeleteLabelValues and +// Delete can be used to delete the Histogram from the HistogramVec. In that case, the +// Histogram will still exist, but it will not be exported anymore, even if a +// Histogram with the same label values is created later. See also the CounterVec +// example. +// +// An error is returned if the number of label values is not the same as the +// number of variable labels in Desc (minus any curried labels). +// +// Note that for more than one label value, this method is prone to mistakes +// caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as +// an alternative to avoid that type of mistake. For higher label numbers, the +// latter has a much more readable (albeit more verbose) syntax, but it comes +// with a performance overhead (for creating and processing the Labels map). +// See also the GaugeVec example. +func (v *HistogramVec) GetMetricWithLabelValues(lvs ...string) (Observer, error) { + metric, err := v.MetricVec.GetMetricWithLabelValues(lvs...) + if metric != nil { + return metric.(Observer), err + } + return nil, err +} + +// GetMetricWith returns the Histogram for the given Labels map (the label names +// must match those of the variable labels in Desc). If that label map is +// accessed for the first time, a new Histogram is created. Implications of +// creating a Histogram without using it and keeping the Histogram for later use +// are the same as for GetMetricWithLabelValues. +// +// An error is returned if the number and names of the Labels are inconsistent +// with those of the variable labels in Desc (minus any curried labels). +// +// This method is used for the same purpose as +// GetMetricWithLabelValues(...string). See there for pros and cons of the two +// methods. +func (v *HistogramVec) GetMetricWith(labels Labels) (Observer, error) { + metric, err := v.MetricVec.GetMetricWith(labels) + if metric != nil { + return metric.(Observer), err + } + return nil, err +} + +// WithLabelValues works as GetMetricWithLabelValues, but panics where +// GetMetricWithLabelValues would have returned an error. Not returning an +// error allows shortcuts like +// +// myVec.WithLabelValues("404", "GET").Observe(42.21) +func (v *HistogramVec) WithLabelValues(lvs ...string) Observer { + h, err := v.GetMetricWithLabelValues(lvs...) + if err != nil { + panic(err) + } + return h +} + +// With works as GetMetricWith but panics where GetMetricWithLabels would have +// returned an error. Not returning an error allows shortcuts like +// +// myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Observe(42.21) +func (v *HistogramVec) With(labels Labels) Observer { + h, err := v.GetMetricWith(labels) + if err != nil { + panic(err) + } + return h +} + +// CurryWith returns a vector curried with the provided labels, i.e. the +// returned vector has those labels pre-set for all labeled operations performed +// on it. The cardinality of the curried vector is reduced accordingly. The +// order of the remaining labels stays the same (just with the curried labels +// taken out of the sequence – which is relevant for the +// (GetMetric)WithLabelValues methods). It is possible to curry a curried +// vector, but only with labels not yet used for currying before. +// +// The metrics contained in the HistogramVec are shared between the curried and +// uncurried vectors. They are just accessed differently. Curried and uncurried +// vectors behave identically in terms of collection. Only one must be +// registered with a given registry (usually the uncurried version). The Reset +// method deletes all metrics, even if called on a curried vector. +func (v *HistogramVec) CurryWith(labels Labels) (ObserverVec, error) { + vec, err := v.MetricVec.CurryWith(labels) + if vec != nil { + return &HistogramVec{vec}, err + } + return nil, err +} + +// MustCurryWith works as CurryWith but panics where CurryWith would have +// returned an error. +func (v *HistogramVec) MustCurryWith(labels Labels) ObserverVec { + vec, err := v.CurryWith(labels) + if err != nil { + panic(err) + } + return vec +} + +type constHistogram struct { + desc *Desc + count uint64 + sum float64 + buckets map[float64]uint64 + labelPairs []*dto.LabelPair +} + +func (h *constHistogram) Desc() *Desc { + return h.desc +} + +func (h *constHistogram) Write(out *dto.Metric) error { + his := &dto.Histogram{} + + buckets := make([]*dto.Bucket, 0, len(h.buckets)) + + his.SampleCount = proto.Uint64(h.count) + his.SampleSum = proto.Float64(h.sum) + for upperBound, count := range h.buckets { + buckets = append(buckets, &dto.Bucket{ + CumulativeCount: proto.Uint64(count), + UpperBound: proto.Float64(upperBound), + }) + } + + if len(buckets) > 0 { + sort.Sort(buckSort(buckets)) + } + his.Bucket = buckets + + out.Histogram = his + out.Label = h.labelPairs + + return nil +} + +// NewConstHistogram returns a metric representing a Prometheus histogram with +// fixed values for the count, sum, and bucket counts. As those parameters +// cannot be changed, the returned value does not implement the Histogram +// interface (but only the Metric interface). Users of this package will not +// have much use for it in regular operations. However, when implementing custom +// Collectors, it is useful as a throw-away metric that is generated on the fly +// to send it to Prometheus in the Collect method. +// +// buckets is a map of upper bounds to cumulative counts, excluding the +Inf +// bucket. The +Inf bucket is implicit, and its value is equal to the provided count. +// +// NewConstHistogram returns an error if the length of labelValues is not +// consistent with the variable labels in Desc or if Desc is invalid. +func NewConstHistogram( + desc *Desc, + count uint64, + sum float64, + buckets map[float64]uint64, + labelValues ...string, +) (Metric, error) { + if desc.err != nil { + return nil, desc.err + } + if err := validateLabelValues(labelValues, len(desc.variableLabels)); err != nil { + return nil, err + } + return &constHistogram{ + desc: desc, + count: count, + sum: sum, + buckets: buckets, + labelPairs: MakeLabelPairs(desc, labelValues), + }, nil +} + +// MustNewConstHistogram is a version of NewConstHistogram that panics where +// NewConstHistogram would have returned an error. +func MustNewConstHistogram( + desc *Desc, + count uint64, + sum float64, + buckets map[float64]uint64, + labelValues ...string, +) Metric { + m, err := NewConstHistogram(desc, count, sum, buckets, labelValues...) + if err != nil { + panic(err) + } + return m +} + +type buckSort []*dto.Bucket + +func (s buckSort) Len() int { + return len(s) +} + +func (s buckSort) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} + +func (s buckSort) Less(i, j int) bool { + return s[i].GetUpperBound() < s[j].GetUpperBound() +} + +// pickSchema returns the largest number n between -4 and 8 such that +// 2^(2^-n) is less or equal the provided bucketFactor. +// +// Special cases: +// - bucketFactor <= 1: panics. +// - bucketFactor < 2^(2^-8) (but > 1): still returns 8. +func pickSchema(bucketFactor float64) int32 { + if bucketFactor <= 1 { + panic(fmt.Errorf("bucketFactor %f is <=1", bucketFactor)) + } + floor := math.Floor(math.Log2(math.Log2(bucketFactor))) + switch { + case floor <= -8: + return 8 + case floor >= 4: + return -4 + default: + return -int32(floor) + } +} + +func makeBuckets(buckets *sync.Map) ([]*dto.BucketSpan, []int64) { + var ii []int + buckets.Range(func(k, v interface{}) bool { + ii = append(ii, k.(int)) + return true + }) + sort.Ints(ii) + + if len(ii) == 0 { + return nil, nil + } + + var ( + spans []*dto.BucketSpan + deltas []int64 + prevCount int64 + nextI int + ) + + appendDelta := func(count int64) { + *spans[len(spans)-1].Length++ + deltas = append(deltas, count-prevCount) + prevCount = count + } + + for n, i := range ii { + v, _ := buckets.Load(i) + count := atomic.LoadInt64(v.(*int64)) + // Multiple spans with only small gaps in between are probably + // encoded more efficiently as one larger span with a few empty + // buckets. Needs some research to find the sweet spot. For now, + // we assume that gaps of one ore two buckets should not create + // a new span. + iDelta := int32(i - nextI) + if n == 0 || iDelta > 2 { + // We have to create a new span, either because we are + // at the very beginning, or because we have found a gap + // of more than two buckets. + spans = append(spans, &dto.BucketSpan{ + Offset: proto.Int32(iDelta), + Length: proto.Uint32(0), + }) + } else { + // We have found a small gap (or no gap at all). + // Insert empty buckets as needed. + for j := int32(0); j < iDelta; j++ { + appendDelta(0) + } + } + appendDelta(count) + nextI = i + 1 + } + return spans, deltas +} + +// addToBucket increments the sparse bucket at key by the provided amount. It +// returns true if a new sparse bucket had to be created for that. +func addToBucket(buckets *sync.Map, key int, increment int64) bool { + if existingBucket, ok := buckets.Load(key); ok { + // Fast path without allocation. + atomic.AddInt64(existingBucket.(*int64), increment) + return false + } + // Bucket doesn't exist yet. Slow path allocating new counter. + newBucket := increment // TODO(beorn7): Check if this is sufficient to not let increment escape. + if actualBucket, loaded := buckets.LoadOrStore(key, &newBucket); loaded { + // The bucket was created concurrently in another goroutine. + // Have to increment after all. + atomic.AddInt64(actualBucket.(*int64), increment) + return false + } + return true +} + +// addAndReset returns a function to be used with sync.Map.Range of spare +// buckets in coldCounts. It increments the buckets in the provided hotBuckets +// according to the buckets ranged through. It then resets all buckets ranged +// through to 0 (but leaves them in place so that they don't need to get +// recreated on the next scrape). +func addAndReset(hotBuckets *sync.Map, bucketNumber *uint32) func(k, v interface{}) bool { + return func(k, v interface{}) bool { + bucket := v.(*int64) + if addToBucket(hotBuckets, k.(int), atomic.LoadInt64(bucket)) { + atomic.AddUint32(bucketNumber, 1) + } + atomic.StoreInt64(bucket, 0) + return true + } +} + +func deleteSyncMap(m *sync.Map) { + m.Range(func(k, v interface{}) bool { + m.Delete(k) + return true + }) +} + +func findSmallestKey(m *sync.Map) int { + result := math.MaxInt32 + m.Range(func(k, v interface{}) bool { + key := k.(int) + if key < result { + result = key + } + return true + }) + return result +} + +func getLe(key int, schema int32) float64 { + // Here a bit of context about the behavior for the last bucket counting + // regular numbers (called simply "last bucket" below) and the bucket + // counting observations of ±Inf (called "inf bucket" below, with a key + // one higher than that of the "last bucket"): + // + // If we apply the usual formula to the last bucket, its upper bound + // would be calculated as +Inf. The reason is that the max possible + // regular float64 number (math.MaxFloat64) doesn't coincide with one of + // the calculated bucket boundaries. So the calculated boundary has to + // be larger than math.MaxFloat64, and the only float64 larger than + // math.MaxFloat64 is +Inf. However, we want to count actual + // observations of ±Inf in the inf bucket. Therefore, we have to treat + // the upper bound of the last bucket specially and set it to + // math.MaxFloat64. (The upper bound of the inf bucket, with its key + // being one higher than that of the last bucket, naturally comes out as + // +Inf by the usual formula. So that's fine.) + // + // math.MaxFloat64 has a frac of 0.9999999999999999 and an exp of + // 1024. If there were a float64 number following math.MaxFloat64, it + // would have a frac of 1.0 and an exp of 1024, or equivalently a frac + // of 0.5 and an exp of 1025. However, since frac must be smaller than + // 1, and exp must be smaller than 1025, either representation overflows + // a float64. (Which, in turn, is the reason that math.MaxFloat64 is the + // largest possible float64. Q.E.D.) However, the formula for + // calculating the upper bound from the idx and schema of the last + // bucket results in precisely that. It is either frac=1.0 & exp=1024 + // (for schema < 0) or frac=0.5 & exp=1025 (for schema >=0). (This is, + // by the way, a power of two where the exponent itself is a power of + // two, 2¹⁰ in fact, which coinicides with a bucket boundary in all + // schemas.) So these are the special cases we have to catch below. + if schema < 0 { + exp := key << -schema + if exp == 1024 { + // This is the last bucket before the overflow bucket + // (for ±Inf observations). Return math.MaxFloat64 as + // explained above. + return math.MaxFloat64 + } + return math.Ldexp(1, exp) + } + + fracIdx := key & ((1 << schema) - 1) + frac := nativeHistogramBounds[schema][fracIdx] + exp := (key >> schema) + 1 + if frac == 0.5 && exp == 1025 { + // This is the last bucket before the overflow bucket (for ±Inf + // observations). Return math.MaxFloat64 as explained above. + return math.MaxFloat64 + } + return math.Ldexp(frac, exp) +} + +// waitForCooldown returns after the count field in the provided histogramCounts +// has reached the provided count value. +func waitForCooldown(count uint64, counts *histogramCounts) { + for count != atomic.LoadUint64(&counts.count) { + runtime.Gosched() // Let observations get work done. + } +} + +// atomicAddFloat adds the provided float atomically to another float +// represented by the bit pattern the bits pointer is pointing to. +func atomicAddFloat(bits *uint64, v float64) { + for { + loadedBits := atomic.LoadUint64(bits) + newBits := math.Float64bits(math.Float64frombits(loadedBits) + v) + if atomic.CompareAndSwapUint64(bits, loadedBits, newBits) { + break + } + } +} + +// atomicDecUint32 atomically decrements the uint32 p points to. See +// https://pkg.go.dev/sync/atomic#AddUint32 to understand how this is done. +func atomicDecUint32(p *uint32) { + atomic.AddUint32(p, ^uint32(0)) +} + +// addAndResetCounts adds certain fields (count, sum, conventional buckets, zero +// bucket) from the cold counts to the corresponding fields in the hot +// counts. Those fields are then reset to 0 in the cold counts. +func addAndResetCounts(hot, cold *histogramCounts) { + atomic.AddUint64(&hot.count, atomic.LoadUint64(&cold.count)) + atomic.StoreUint64(&cold.count, 0) + coldSum := math.Float64frombits(atomic.LoadUint64(&cold.sumBits)) + atomicAddFloat(&hot.sumBits, coldSum) + atomic.StoreUint64(&cold.sumBits, 0) + for i := range hot.buckets { + atomic.AddUint64(&hot.buckets[i], atomic.LoadUint64(&cold.buckets[i])) + atomic.StoreUint64(&cold.buckets[i], 0) + } + atomic.AddUint64(&hot.nativeHistogramZeroBucket, atomic.LoadUint64(&cold.nativeHistogramZeroBucket)) + atomic.StoreUint64(&cold.nativeHistogramZeroBucket, 0) +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/internal/almost_equal.go b/vendor/github.com/prometheus/client_golang/prometheus/internal/almost_equal.go new file mode 100644 index 0000000..1ed5abe --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/internal/almost_equal.go @@ -0,0 +1,60 @@ +// Copyright (c) 2015 Björn Rabenstein +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. +// +// The code in this package is copy/paste to avoid a dependency. Hence this file +// carries the copyright of the original repo. +// https://github.com/beorn7/floats +package internal + +import ( + "math" +) + +// minNormalFloat64 is the smallest positive normal value of type float64. +var minNormalFloat64 = math.Float64frombits(0x0010000000000000) + +// AlmostEqualFloat64 returns true if a and b are equal within a relative error +// of epsilon. See http://floating-point-gui.de/errors/comparison/ for the +// details of the applied method. +func AlmostEqualFloat64(a, b, epsilon float64) bool { + if a == b { + return true + } + absA := math.Abs(a) + absB := math.Abs(b) + diff := math.Abs(a - b) + if a == 0 || b == 0 || absA+absB < minNormalFloat64 { + return diff < epsilon*minNormalFloat64 + } + return diff/math.Min(absA+absB, math.MaxFloat64) < epsilon +} + +// AlmostEqualFloat64s is the slice form of AlmostEqualFloat64. +func AlmostEqualFloat64s(a, b []float64, epsilon float64) bool { + if len(a) != len(b) { + return false + } + for i := range a { + if !AlmostEqualFloat64(a[i], b[i], epsilon) { + return false + } + } + return true +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/internal/difflib.go b/vendor/github.com/prometheus/client_golang/prometheus/internal/difflib.go new file mode 100644 index 0000000..fd0750f --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/internal/difflib.go @@ -0,0 +1,654 @@ +// Copyright 2022 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// It provides tools to compare sequences of strings and generate textual diffs. +// +// Maintaining `GetUnifiedDiffString` here because original repository +// (https://github.com/pmezard/go-difflib) is no loger maintained. +package internal + +import ( + "bufio" + "bytes" + "fmt" + "io" + "strings" +) + +func min(a, b int) int { + if a < b { + return a + } + return b +} + +func max(a, b int) int { + if a > b { + return a + } + return b +} + +func calculateRatio(matches, length int) float64 { + if length > 0 { + return 2.0 * float64(matches) / float64(length) + } + return 1.0 +} + +type Match struct { + A int + B int + Size int +} + +type OpCode struct { + Tag byte + I1 int + I2 int + J1 int + J2 int +} + +// SequenceMatcher compares sequence of strings. The basic +// algorithm predates, and is a little fancier than, an algorithm +// published in the late 1980's by Ratcliff and Obershelp under the +// hyperbolic name "gestalt pattern matching". The basic idea is to find +// the longest contiguous matching subsequence that contains no "junk" +// elements (R-O doesn't address junk). The same idea is then applied +// recursively to the pieces of the sequences to the left and to the right +// of the matching subsequence. This does not yield minimal edit +// sequences, but does tend to yield matches that "look right" to people. +// +// SequenceMatcher tries to compute a "human-friendly diff" between two +// sequences. Unlike e.g. UNIX(tm) diff, the fundamental notion is the +// longest *contiguous* & junk-free matching subsequence. That's what +// catches peoples' eyes. The Windows(tm) windiff has another interesting +// notion, pairing up elements that appear uniquely in each sequence. +// That, and the method here, appear to yield more intuitive difference +// reports than does diff. This method appears to be the least vulnerable +// to synching up on blocks of "junk lines", though (like blank lines in +// ordinary text files, or maybe "

" lines in HTML files). That may be +// because this is the only method of the 3 that has a *concept* of +// "junk" . +// +// Timing: Basic R-O is cubic time worst case and quadratic time expected +// case. SequenceMatcher is quadratic time for the worst case and has +// expected-case behavior dependent in a complicated way on how many +// elements the sequences have in common; best case time is linear. +type SequenceMatcher struct { + a []string + b []string + b2j map[string][]int + IsJunk func(string) bool + autoJunk bool + bJunk map[string]struct{} + matchingBlocks []Match + fullBCount map[string]int + bPopular map[string]struct{} + opCodes []OpCode +} + +func NewMatcher(a, b []string) *SequenceMatcher { + m := SequenceMatcher{autoJunk: true} + m.SetSeqs(a, b) + return &m +} + +func NewMatcherWithJunk(a, b []string, autoJunk bool, + isJunk func(string) bool, +) *SequenceMatcher { + m := SequenceMatcher{IsJunk: isJunk, autoJunk: autoJunk} + m.SetSeqs(a, b) + return &m +} + +// Set two sequences to be compared. +func (m *SequenceMatcher) SetSeqs(a, b []string) { + m.SetSeq1(a) + m.SetSeq2(b) +} + +// Set the first sequence to be compared. The second sequence to be compared is +// not changed. +// +// SequenceMatcher computes and caches detailed information about the second +// sequence, so if you want to compare one sequence S against many sequences, +// use .SetSeq2(s) once and call .SetSeq1(x) repeatedly for each of the other +// sequences. +// +// See also SetSeqs() and SetSeq2(). +func (m *SequenceMatcher) SetSeq1(a []string) { + if &a == &m.a { + return + } + m.a = a + m.matchingBlocks = nil + m.opCodes = nil +} + +// Set the second sequence to be compared. The first sequence to be compared is +// not changed. +func (m *SequenceMatcher) SetSeq2(b []string) { + if &b == &m.b { + return + } + m.b = b + m.matchingBlocks = nil + m.opCodes = nil + m.fullBCount = nil + m.chainB() +} + +func (m *SequenceMatcher) chainB() { + // Populate line -> index mapping + b2j := map[string][]int{} + for i, s := range m.b { + indices := b2j[s] + indices = append(indices, i) + b2j[s] = indices + } + + // Purge junk elements + m.bJunk = map[string]struct{}{} + if m.IsJunk != nil { + junk := m.bJunk + for s := range b2j { + if m.IsJunk(s) { + junk[s] = struct{}{} + } + } + for s := range junk { + delete(b2j, s) + } + } + + // Purge remaining popular elements + popular := map[string]struct{}{} + n := len(m.b) + if m.autoJunk && n >= 200 { + ntest := n/100 + 1 + for s, indices := range b2j { + if len(indices) > ntest { + popular[s] = struct{}{} + } + } + for s := range popular { + delete(b2j, s) + } + } + m.bPopular = popular + m.b2j = b2j +} + +func (m *SequenceMatcher) isBJunk(s string) bool { + _, ok := m.bJunk[s] + return ok +} + +// Find longest matching block in a[alo:ahi] and b[blo:bhi]. +// +// If IsJunk is not defined: +// +// Return (i,j,k) such that a[i:i+k] is equal to b[j:j+k], where +// +// alo <= i <= i+k <= ahi +// blo <= j <= j+k <= bhi +// +// and for all (i',j',k') meeting those conditions, +// +// k >= k' +// i <= i' +// and if i == i', j <= j' +// +// In other words, of all maximal matching blocks, return one that +// starts earliest in a, and of all those maximal matching blocks that +// start earliest in a, return the one that starts earliest in b. +// +// If IsJunk is defined, first the longest matching block is +// determined as above, but with the additional restriction that no +// junk element appears in the block. Then that block is extended as +// far as possible by matching (only) junk elements on both sides. So +// the resulting block never matches on junk except as identical junk +// happens to be adjacent to an "interesting" match. +// +// If no blocks match, return (alo, blo, 0). +func (m *SequenceMatcher) findLongestMatch(alo, ahi, blo, bhi int) Match { + // CAUTION: stripping common prefix or suffix would be incorrect. + // E.g., + // ab + // acab + // Longest matching block is "ab", but if common prefix is + // stripped, it's "a" (tied with "b"). UNIX(tm) diff does so + // strip, so ends up claiming that ab is changed to acab by + // inserting "ca" in the middle. That's minimal but unintuitive: + // "it's obvious" that someone inserted "ac" at the front. + // Windiff ends up at the same place as diff, but by pairing up + // the unique 'b's and then matching the first two 'a's. + besti, bestj, bestsize := alo, blo, 0 + + // find longest junk-free match + // during an iteration of the loop, j2len[j] = length of longest + // junk-free match ending with a[i-1] and b[j] + j2len := map[int]int{} + for i := alo; i != ahi; i++ { + // look at all instances of a[i] in b; note that because + // b2j has no junk keys, the loop is skipped if a[i] is junk + newj2len := map[int]int{} + for _, j := range m.b2j[m.a[i]] { + // a[i] matches b[j] + if j < blo { + continue + } + if j >= bhi { + break + } + k := j2len[j-1] + 1 + newj2len[j] = k + if k > bestsize { + besti, bestj, bestsize = i-k+1, j-k+1, k + } + } + j2len = newj2len + } + + // Extend the best by non-junk elements on each end. In particular, + // "popular" non-junk elements aren't in b2j, which greatly speeds + // the inner loop above, but also means "the best" match so far + // doesn't contain any junk *or* popular non-junk elements. + for besti > alo && bestj > blo && !m.isBJunk(m.b[bestj-1]) && + m.a[besti-1] == m.b[bestj-1] { + besti, bestj, bestsize = besti-1, bestj-1, bestsize+1 + } + for besti+bestsize < ahi && bestj+bestsize < bhi && + !m.isBJunk(m.b[bestj+bestsize]) && + m.a[besti+bestsize] == m.b[bestj+bestsize] { + bestsize++ + } + + // Now that we have a wholly interesting match (albeit possibly + // empty!), we may as well suck up the matching junk on each + // side of it too. Can't think of a good reason not to, and it + // saves post-processing the (possibly considerable) expense of + // figuring out what to do with it. In the case of an empty + // interesting match, this is clearly the right thing to do, + // because no other kind of match is possible in the regions. + for besti > alo && bestj > blo && m.isBJunk(m.b[bestj-1]) && + m.a[besti-1] == m.b[bestj-1] { + besti, bestj, bestsize = besti-1, bestj-1, bestsize+1 + } + for besti+bestsize < ahi && bestj+bestsize < bhi && + m.isBJunk(m.b[bestj+bestsize]) && + m.a[besti+bestsize] == m.b[bestj+bestsize] { + bestsize++ + } + + return Match{A: besti, B: bestj, Size: bestsize} +} + +// Return list of triples describing matching subsequences. +// +// Each triple is of the form (i, j, n), and means that +// a[i:i+n] == b[j:j+n]. The triples are monotonically increasing in +// i and in j. It's also guaranteed that if (i, j, n) and (i', j', n') are +// adjacent triples in the list, and the second is not the last triple in the +// list, then i+n != i' or j+n != j'. IOW, adjacent triples never describe +// adjacent equal blocks. +// +// The last triple is a dummy, (len(a), len(b), 0), and is the only +// triple with n==0. +func (m *SequenceMatcher) GetMatchingBlocks() []Match { + if m.matchingBlocks != nil { + return m.matchingBlocks + } + + var matchBlocks func(alo, ahi, blo, bhi int, matched []Match) []Match + matchBlocks = func(alo, ahi, blo, bhi int, matched []Match) []Match { + match := m.findLongestMatch(alo, ahi, blo, bhi) + i, j, k := match.A, match.B, match.Size + if match.Size > 0 { + if alo < i && blo < j { + matched = matchBlocks(alo, i, blo, j, matched) + } + matched = append(matched, match) + if i+k < ahi && j+k < bhi { + matched = matchBlocks(i+k, ahi, j+k, bhi, matched) + } + } + return matched + } + matched := matchBlocks(0, len(m.a), 0, len(m.b), nil) + + // It's possible that we have adjacent equal blocks in the + // matching_blocks list now. + nonAdjacent := []Match{} + i1, j1, k1 := 0, 0, 0 + for _, b := range matched { + // Is this block adjacent to i1, j1, k1? + i2, j2, k2 := b.A, b.B, b.Size + if i1+k1 == i2 && j1+k1 == j2 { + // Yes, so collapse them -- this just increases the length of + // the first block by the length of the second, and the first + // block so lengthened remains the block to compare against. + k1 += k2 + } else { + // Not adjacent. Remember the first block (k1==0 means it's + // the dummy we started with), and make the second block the + // new block to compare against. + if k1 > 0 { + nonAdjacent = append(nonAdjacent, Match{i1, j1, k1}) + } + i1, j1, k1 = i2, j2, k2 + } + } + if k1 > 0 { + nonAdjacent = append(nonAdjacent, Match{i1, j1, k1}) + } + + nonAdjacent = append(nonAdjacent, Match{len(m.a), len(m.b), 0}) + m.matchingBlocks = nonAdjacent + return m.matchingBlocks +} + +// Return list of 5-tuples describing how to turn a into b. +// +// Each tuple is of the form (tag, i1, i2, j1, j2). The first tuple +// has i1 == j1 == 0, and remaining tuples have i1 == the i2 from the +// tuple preceding it, and likewise for j1 == the previous j2. +// +// The tags are characters, with these meanings: +// +// 'r' (replace): a[i1:i2] should be replaced by b[j1:j2] +// +// 'd' (delete): a[i1:i2] should be deleted, j1==j2 in this case. +// +// 'i' (insert): b[j1:j2] should be inserted at a[i1:i1], i1==i2 in this case. +// +// 'e' (equal): a[i1:i2] == b[j1:j2] +func (m *SequenceMatcher) GetOpCodes() []OpCode { + if m.opCodes != nil { + return m.opCodes + } + i, j := 0, 0 + matching := m.GetMatchingBlocks() + opCodes := make([]OpCode, 0, len(matching)) + for _, m := range matching { + // invariant: we've pumped out correct diffs to change + // a[:i] into b[:j], and the next matching block is + // a[ai:ai+size] == b[bj:bj+size]. So we need to pump + // out a diff to change a[i:ai] into b[j:bj], pump out + // the matching block, and move (i,j) beyond the match + ai, bj, size := m.A, m.B, m.Size + tag := byte(0) + if i < ai && j < bj { + tag = 'r' + } else if i < ai { + tag = 'd' + } else if j < bj { + tag = 'i' + } + if tag > 0 { + opCodes = append(opCodes, OpCode{tag, i, ai, j, bj}) + } + i, j = ai+size, bj+size + // the list of matching blocks is terminated by a + // sentinel with size 0 + if size > 0 { + opCodes = append(opCodes, OpCode{'e', ai, i, bj, j}) + } + } + m.opCodes = opCodes + return m.opCodes +} + +// Isolate change clusters by eliminating ranges with no changes. +// +// Return a generator of groups with up to n lines of context. +// Each group is in the same format as returned by GetOpCodes(). +func (m *SequenceMatcher) GetGroupedOpCodes(n int) [][]OpCode { + if n < 0 { + n = 3 + } + codes := m.GetOpCodes() + if len(codes) == 0 { + codes = []OpCode{{'e', 0, 1, 0, 1}} + } + // Fixup leading and trailing groups if they show no changes. + if codes[0].Tag == 'e' { + c := codes[0] + i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2 + codes[0] = OpCode{c.Tag, max(i1, i2-n), i2, max(j1, j2-n), j2} + } + if codes[len(codes)-1].Tag == 'e' { + c := codes[len(codes)-1] + i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2 + codes[len(codes)-1] = OpCode{c.Tag, i1, min(i2, i1+n), j1, min(j2, j1+n)} + } + nn := n + n + groups := [][]OpCode{} + group := []OpCode{} + for _, c := range codes { + i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2 + // End the current group and start a new one whenever + // there is a large range with no changes. + if c.Tag == 'e' && i2-i1 > nn { + group = append(group, OpCode{ + c.Tag, i1, min(i2, i1+n), + j1, min(j2, j1+n), + }) + groups = append(groups, group) + group = []OpCode{} + i1, j1 = max(i1, i2-n), max(j1, j2-n) + } + group = append(group, OpCode{c.Tag, i1, i2, j1, j2}) + } + if len(group) > 0 && !(len(group) == 1 && group[0].Tag == 'e') { + groups = append(groups, group) + } + return groups +} + +// Return a measure of the sequences' similarity (float in [0,1]). +// +// Where T is the total number of elements in both sequences, and +// M is the number of matches, this is 2.0*M / T. +// Note that this is 1 if the sequences are identical, and 0 if +// they have nothing in common. +// +// .Ratio() is expensive to compute if you haven't already computed +// .GetMatchingBlocks() or .GetOpCodes(), in which case you may +// want to try .QuickRatio() or .RealQuickRation() first to get an +// upper bound. +func (m *SequenceMatcher) Ratio() float64 { + matches := 0 + for _, m := range m.GetMatchingBlocks() { + matches += m.Size + } + return calculateRatio(matches, len(m.a)+len(m.b)) +} + +// Return an upper bound on ratio() relatively quickly. +// +// This isn't defined beyond that it is an upper bound on .Ratio(), and +// is faster to compute. +func (m *SequenceMatcher) QuickRatio() float64 { + // viewing a and b as multisets, set matches to the cardinality + // of their intersection; this counts the number of matches + // without regard to order, so is clearly an upper bound + if m.fullBCount == nil { + m.fullBCount = map[string]int{} + for _, s := range m.b { + m.fullBCount[s]++ + } + } + + // avail[x] is the number of times x appears in 'b' less the + // number of times we've seen it in 'a' so far ... kinda + avail := map[string]int{} + matches := 0 + for _, s := range m.a { + n, ok := avail[s] + if !ok { + n = m.fullBCount[s] + } + avail[s] = n - 1 + if n > 0 { + matches++ + } + } + return calculateRatio(matches, len(m.a)+len(m.b)) +} + +// Return an upper bound on ratio() very quickly. +// +// This isn't defined beyond that it is an upper bound on .Ratio(), and +// is faster to compute than either .Ratio() or .QuickRatio(). +func (m *SequenceMatcher) RealQuickRatio() float64 { + la, lb := len(m.a), len(m.b) + return calculateRatio(min(la, lb), la+lb) +} + +// Convert range to the "ed" format +func formatRangeUnified(start, stop int) string { + // Per the diff spec at http://www.unix.org/single_unix_specification/ + beginning := start + 1 // lines start numbering with one + length := stop - start + if length == 1 { + return fmt.Sprintf("%d", beginning) + } + if length == 0 { + beginning-- // empty ranges begin at line just before the range + } + return fmt.Sprintf("%d,%d", beginning, length) +} + +// Unified diff parameters +type UnifiedDiff struct { + A []string // First sequence lines + FromFile string // First file name + FromDate string // First file time + B []string // Second sequence lines + ToFile string // Second file name + ToDate string // Second file time + Eol string // Headers end of line, defaults to LF + Context int // Number of context lines +} + +// Compare two sequences of lines; generate the delta as a unified diff. +// +// Unified diffs are a compact way of showing line changes and a few +// lines of context. The number of context lines is set by 'n' which +// defaults to three. +// +// By default, the diff control lines (those with ---, +++, or @@) are +// created with a trailing newline. This is helpful so that inputs +// created from file.readlines() result in diffs that are suitable for +// file.writelines() since both the inputs and outputs have trailing +// newlines. +// +// For inputs that do not have trailing newlines, set the lineterm +// argument to "" so that the output will be uniformly newline free. +// +// The unidiff format normally has a header for filenames and modification +// times. Any or all of these may be specified using strings for +// 'fromfile', 'tofile', 'fromfiledate', and 'tofiledate'. +// The modification times are normally expressed in the ISO 8601 format. +func WriteUnifiedDiff(writer io.Writer, diff UnifiedDiff) error { + buf := bufio.NewWriter(writer) + defer buf.Flush() + wf := func(format string, args ...interface{}) error { + _, err := buf.WriteString(fmt.Sprintf(format, args...)) + return err + } + ws := func(s string) error { + _, err := buf.WriteString(s) + return err + } + + if len(diff.Eol) == 0 { + diff.Eol = "\n" + } + + started := false + m := NewMatcher(diff.A, diff.B) + for _, g := range m.GetGroupedOpCodes(diff.Context) { + if !started { + started = true + fromDate := "" + if len(diff.FromDate) > 0 { + fromDate = "\t" + diff.FromDate + } + toDate := "" + if len(diff.ToDate) > 0 { + toDate = "\t" + diff.ToDate + } + if diff.FromFile != "" || diff.ToFile != "" { + err := wf("--- %s%s%s", diff.FromFile, fromDate, diff.Eol) + if err != nil { + return err + } + err = wf("+++ %s%s%s", diff.ToFile, toDate, diff.Eol) + if err != nil { + return err + } + } + } + first, last := g[0], g[len(g)-1] + range1 := formatRangeUnified(first.I1, last.I2) + range2 := formatRangeUnified(first.J1, last.J2) + if err := wf("@@ -%s +%s @@%s", range1, range2, diff.Eol); err != nil { + return err + } + for _, c := range g { + i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2 + if c.Tag == 'e' { + for _, line := range diff.A[i1:i2] { + if err := ws(" " + line); err != nil { + return err + } + } + continue + } + if c.Tag == 'r' || c.Tag == 'd' { + for _, line := range diff.A[i1:i2] { + if err := ws("-" + line); err != nil { + return err + } + } + } + if c.Tag == 'r' || c.Tag == 'i' { + for _, line := range diff.B[j1:j2] { + if err := ws("+" + line); err != nil { + return err + } + } + } + } + } + return nil +} + +// Like WriteUnifiedDiff but returns the diff a string. +func GetUnifiedDiffString(diff UnifiedDiff) (string, error) { + w := &bytes.Buffer{} + err := WriteUnifiedDiff(w, diff) + return w.String(), err +} + +// Split a string on "\n" while preserving them. The output can be used +// as input for UnifiedDiff and ContextDiff structures. +func SplitLines(s string) []string { + lines := strings.SplitAfter(s, "\n") + lines[len(lines)-1] += "\n" + return lines +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/internal/go_collector_options.go b/vendor/github.com/prometheus/client_golang/prometheus/internal/go_collector_options.go new file mode 100644 index 0000000..723b45d --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/internal/go_collector_options.go @@ -0,0 +1,32 @@ +// Copyright 2021 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package internal + +import "regexp" + +type GoCollectorRule struct { + Matcher *regexp.Regexp + Deny bool +} + +// GoCollectorOptions should not be used be directly by anything, except `collectors` package. +// Use it via collectors package instead. See issue +// https://github.com/prometheus/client_golang/issues/1030. +// +// This is internal, so external users only can use it via `collector.WithGoCollector*` methods +type GoCollectorOptions struct { + DisableMemStatsLikeMetrics bool + RuntimeMetricSumForHist map[string]string + RuntimeMetricRules []GoCollectorRule +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/internal/go_runtime_metrics.go b/vendor/github.com/prometheus/client_golang/prometheus/internal/go_runtime_metrics.go new file mode 100644 index 0000000..97d17d6 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/internal/go_runtime_metrics.go @@ -0,0 +1,142 @@ +// Copyright 2021 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build go1.17 +// +build go1.17 + +package internal + +import ( + "math" + "path" + "runtime/metrics" + "strings" + + "github.com/prometheus/common/model" +) + +// RuntimeMetricsToProm produces a Prometheus metric name from a runtime/metrics +// metric description and validates whether the metric is suitable for integration +// with Prometheus. +// +// Returns false if a name could not be produced, or if Prometheus does not understand +// the runtime/metrics Kind. +// +// Note that the main reason a name couldn't be produced is if the runtime/metrics +// package exports a name with characters outside the valid Prometheus metric name +// character set. This is theoretically possible, but should never happen in practice. +// Still, don't rely on it. +func RuntimeMetricsToProm(d *metrics.Description) (string, string, string, bool) { + namespace := "go" + + comp := strings.SplitN(d.Name, ":", 2) + key := comp[0] + unit := comp[1] + + // The last path element in the key is the name, + // the rest is the subsystem. + subsystem := path.Dir(key[1:] /* remove leading / */) + name := path.Base(key) + + // subsystem is translated by replacing all / and - with _. + subsystem = strings.ReplaceAll(subsystem, "/", "_") + subsystem = strings.ReplaceAll(subsystem, "-", "_") + + // unit is translated assuming that the unit contains no + // non-ASCII characters. + unit = strings.ReplaceAll(unit, "-", "_") + unit = strings.ReplaceAll(unit, "*", "_") + unit = strings.ReplaceAll(unit, "/", "_per_") + + // name has - replaced with _ and is concatenated with the unit and + // other data. + name = strings.ReplaceAll(name, "-", "_") + name += "_" + unit + if d.Cumulative && d.Kind != metrics.KindFloat64Histogram { + name += "_total" + } + + valid := model.IsValidMetricName(model.LabelValue(namespace + "_" + subsystem + "_" + name)) + switch d.Kind { + case metrics.KindUint64: + case metrics.KindFloat64: + case metrics.KindFloat64Histogram: + default: + valid = false + } + return namespace, subsystem, name, valid +} + +// RuntimeMetricsBucketsForUnit takes a set of buckets obtained for a runtime/metrics histogram +// type (so, lower-bound inclusive) and a unit from a runtime/metrics name, and produces +// a reduced set of buckets. This function always removes any -Inf bucket as it's represented +// as the bottom-most upper-bound inclusive bucket in Prometheus. +func RuntimeMetricsBucketsForUnit(buckets []float64, unit string) []float64 { + switch unit { + case "bytes": + // Re-bucket as powers of 2. + return reBucketExp(buckets, 2) + case "seconds": + // Re-bucket as powers of 10 and then merge all buckets greater + // than 1 second into the +Inf bucket. + b := reBucketExp(buckets, 10) + for i := range b { + if b[i] <= 1 { + continue + } + b[i] = math.Inf(1) + b = b[:i+1] + break + } + return b + } + return buckets +} + +// reBucketExp takes a list of bucket boundaries (lower bound inclusive) and +// downsamples the buckets to those a multiple of base apart. The end result +// is a roughly exponential (in many cases, perfectly exponential) bucketing +// scheme. +func reBucketExp(buckets []float64, base float64) []float64 { + bucket := buckets[0] + var newBuckets []float64 + // We may see a -Inf here, in which case, add it and skip it + // since we risk producing NaNs otherwise. + // + // We need to preserve -Inf values to maintain runtime/metrics + // conventions. We'll strip it out later. + if bucket == math.Inf(-1) { + newBuckets = append(newBuckets, bucket) + buckets = buckets[1:] + bucket = buckets[0] + } + // From now on, bucket should always have a non-Inf value because + // Infs are only ever at the ends of the bucket lists, so + // arithmetic operations on it are non-NaN. + for i := 1; i < len(buckets); i++ { + if bucket >= 0 && buckets[i] < bucket*base { + // The next bucket we want to include is at least bucket*base. + continue + } else if bucket < 0 && buckets[i] < bucket/base { + // In this case the bucket we're targeting is negative, and since + // we're ascending through buckets here, we need to divide to get + // closer to zero exponentially. + continue + } + // The +Inf bucket will always be the last one, and we'll always + // end up including it here because bucket + newBuckets = append(newBuckets, bucket) + bucket = buckets[i] + } + return append(newBuckets, bucket) +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/internal/metric.go b/vendor/github.com/prometheus/client_golang/prometheus/internal/metric.go new file mode 100644 index 0000000..6515c11 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/internal/metric.go @@ -0,0 +1,101 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package internal + +import ( + "sort" + + dto "github.com/prometheus/client_model/go" +) + +// LabelPairSorter implements sort.Interface. It is used to sort a slice of +// dto.LabelPair pointers. +type LabelPairSorter []*dto.LabelPair + +func (s LabelPairSorter) Len() int { + return len(s) +} + +func (s LabelPairSorter) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} + +func (s LabelPairSorter) Less(i, j int) bool { + return s[i].GetName() < s[j].GetName() +} + +// MetricSorter is a sortable slice of *dto.Metric. +type MetricSorter []*dto.Metric + +func (s MetricSorter) Len() int { + return len(s) +} + +func (s MetricSorter) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} + +func (s MetricSorter) Less(i, j int) bool { + if len(s[i].Label) != len(s[j].Label) { + // This should not happen. The metrics are + // inconsistent. However, we have to deal with the fact, as + // people might use custom collectors or metric family injection + // to create inconsistent metrics. So let's simply compare the + // number of labels in this case. That will still yield + // reproducible sorting. + return len(s[i].Label) < len(s[j].Label) + } + for n, lp := range s[i].Label { + vi := lp.GetValue() + vj := s[j].Label[n].GetValue() + if vi != vj { + return vi < vj + } + } + + // We should never arrive here. Multiple metrics with the same + // label set in the same scrape will lead to undefined ingestion + // behavior. However, as above, we have to provide stable sorting + // here, even for inconsistent metrics. So sort equal metrics + // by their timestamp, with missing timestamps (implying "now") + // coming last. + if s[i].TimestampMs == nil { + return false + } + if s[j].TimestampMs == nil { + return true + } + return s[i].GetTimestampMs() < s[j].GetTimestampMs() +} + +// NormalizeMetricFamilies returns a MetricFamily slice with empty +// MetricFamilies pruned and the remaining MetricFamilies sorted by name within +// the slice, with the contained Metrics sorted within each MetricFamily. +func NormalizeMetricFamilies(metricFamiliesByName map[string]*dto.MetricFamily) []*dto.MetricFamily { + for _, mf := range metricFamiliesByName { + sort.Sort(MetricSorter(mf.Metric)) + } + names := make([]string, 0, len(metricFamiliesByName)) + for name, mf := range metricFamiliesByName { + if len(mf.Metric) > 0 { + names = append(names, name) + } + } + sort.Strings(names) + result := make([]*dto.MetricFamily, 0, len(names)) + for _, name := range names { + result = append(result, metricFamiliesByName[name]) + } + return result +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/labels.go b/vendor/github.com/prometheus/client_golang/prometheus/labels.go new file mode 100644 index 0000000..c1b8fad --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/labels.go @@ -0,0 +1,88 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "errors" + "fmt" + "strings" + "unicode/utf8" + + "github.com/prometheus/common/model" +) + +// Labels represents a collection of label name -> value mappings. This type is +// commonly used with the With(Labels) and GetMetricWith(Labels) methods of +// metric vector Collectors, e.g.: +// +// myVec.With(Labels{"code": "404", "method": "GET"}).Add(42) +// +// The other use-case is the specification of constant label pairs in Opts or to +// create a Desc. +type Labels map[string]string + +// reservedLabelPrefix is a prefix which is not legal in user-supplied +// label names. +const reservedLabelPrefix = "__" + +var errInconsistentCardinality = errors.New("inconsistent label cardinality") + +func makeInconsistentCardinalityError(fqName string, labels, labelValues []string) error { + return fmt.Errorf( + "%w: %q has %d variable labels named %q but %d values %q were provided", + errInconsistentCardinality, fqName, + len(labels), labels, + len(labelValues), labelValues, + ) +} + +func validateValuesInLabels(labels Labels, expectedNumberOfValues int) error { + if len(labels) != expectedNumberOfValues { + return fmt.Errorf( + "%w: expected %d label values but got %d in %#v", + errInconsistentCardinality, expectedNumberOfValues, + len(labels), labels, + ) + } + + for name, val := range labels { + if !utf8.ValidString(val) { + return fmt.Errorf("label %s: value %q is not valid UTF-8", name, val) + } + } + + return nil +} + +func validateLabelValues(vals []string, expectedNumberOfValues int) error { + if len(vals) != expectedNumberOfValues { + return fmt.Errorf( + "%w: expected %d label values but got %d in %#v", + errInconsistentCardinality, expectedNumberOfValues, + len(vals), vals, + ) + } + + for _, val := range vals { + if !utf8.ValidString(val) { + return fmt.Errorf("label value %q is not valid UTF-8", val) + } + } + + return nil +} + +func checkLabelName(l string) bool { + return model.LabelName(l).IsValid() && !strings.HasPrefix(l, reservedLabelPrefix) +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/metric.go b/vendor/github.com/prometheus/client_golang/prometheus/metric.go new file mode 100644 index 0000000..b5119c5 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/metric.go @@ -0,0 +1,256 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "errors" + "math" + "sort" + "strings" + "time" + + //nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility. + "github.com/golang/protobuf/proto" + "github.com/prometheus/common/model" + + dto "github.com/prometheus/client_model/go" +) + +var separatorByteSlice = []byte{model.SeparatorByte} // For convenient use with xxhash. + +// A Metric models a single sample value with its meta data being exported to +// Prometheus. Implementations of Metric in this package are Gauge, Counter, +// Histogram, Summary, and Untyped. +type Metric interface { + // Desc returns the descriptor for the Metric. This method idempotently + // returns the same descriptor throughout the lifetime of the + // Metric. The returned descriptor is immutable by contract. A Metric + // unable to describe itself must return an invalid descriptor (created + // with NewInvalidDesc). + Desc() *Desc + // Write encodes the Metric into a "Metric" Protocol Buffer data + // transmission object. + // + // Metric implementations must observe concurrency safety as reads of + // this metric may occur at any time, and any blocking occurs at the + // expense of total performance of rendering all registered + // metrics. Ideally, Metric implementations should support concurrent + // readers. + // + // While populating dto.Metric, it is the responsibility of the + // implementation to ensure validity of the Metric protobuf (like valid + // UTF-8 strings or syntactically valid metric and label names). It is + // recommended to sort labels lexicographically. Callers of Write should + // still make sure of sorting if they depend on it. + Write(*dto.Metric) error + // TODO(beorn7): The original rationale of passing in a pre-allocated + // dto.Metric protobuf to save allocations has disappeared. The + // signature of this method should be changed to "Write() (*dto.Metric, + // error)". +} + +// Opts bundles the options for creating most Metric types. Each metric +// implementation XXX has its own XXXOpts type, but in most cases, it is just +// an alias of this type (which might change when the requirement arises.) +// +// It is mandatory to set Name to a non-empty string. All other fields are +// optional and can safely be left at their zero value, although it is strongly +// encouraged to set a Help string. +type Opts struct { + // Namespace, Subsystem, and Name are components of the fully-qualified + // name of the Metric (created by joining these components with + // "_"). Only Name is mandatory, the others merely help structuring the + // name. Note that the fully-qualified name of the metric must be a + // valid Prometheus metric name. + Namespace string + Subsystem string + Name string + + // Help provides information about this metric. + // + // Metrics with the same fully-qualified name must have the same Help + // string. + Help string + + // ConstLabels are used to attach fixed labels to this metric. Metrics + // with the same fully-qualified name must have the same label names in + // their ConstLabels. + // + // ConstLabels are only used rarely. In particular, do not use them to + // attach the same labels to all your metrics. Those use cases are + // better covered by target labels set by the scraping Prometheus + // server, or by one specific metric (e.g. a build_info or a + // machine_role metric). See also + // https://prometheus.io/docs/instrumenting/writing_exporters/#target-labels-not-static-scraped-labels + ConstLabels Labels +} + +// BuildFQName joins the given three name components by "_". Empty name +// components are ignored. If the name parameter itself is empty, an empty +// string is returned, no matter what. Metric implementations included in this +// library use this function internally to generate the fully-qualified metric +// name from the name component in their Opts. Users of the library will only +// need this function if they implement their own Metric or instantiate a Desc +// (with NewDesc) directly. +func BuildFQName(namespace, subsystem, name string) string { + if name == "" { + return "" + } + switch { + case namespace != "" && subsystem != "": + return strings.Join([]string{namespace, subsystem, name}, "_") + case namespace != "": + return strings.Join([]string{namespace, name}, "_") + case subsystem != "": + return strings.Join([]string{subsystem, name}, "_") + } + return name +} + +type invalidMetric struct { + desc *Desc + err error +} + +// NewInvalidMetric returns a metric whose Write method always returns the +// provided error. It is useful if a Collector finds itself unable to collect +// a metric and wishes to report an error to the registry. +func NewInvalidMetric(desc *Desc, err error) Metric { + return &invalidMetric{desc, err} +} + +func (m *invalidMetric) Desc() *Desc { return m.desc } + +func (m *invalidMetric) Write(*dto.Metric) error { return m.err } + +type timestampedMetric struct { + Metric + t time.Time +} + +func (m timestampedMetric) Write(pb *dto.Metric) error { + e := m.Metric.Write(pb) + pb.TimestampMs = proto.Int64(m.t.Unix()*1000 + int64(m.t.Nanosecond()/1000000)) + return e +} + +// NewMetricWithTimestamp returns a new Metric wrapping the provided Metric in a +// way that it has an explicit timestamp set to the provided Time. This is only +// useful in rare cases as the timestamp of a Prometheus metric should usually +// be set by the Prometheus server during scraping. Exceptions include mirroring +// metrics with given timestamps from other metric +// sources. +// +// NewMetricWithTimestamp works best with MustNewConstMetric, +// MustNewConstHistogram, and MustNewConstSummary, see example. +// +// Currently, the exposition formats used by Prometheus are limited to +// millisecond resolution. Thus, the provided time will be rounded down to the +// next full millisecond value. +func NewMetricWithTimestamp(t time.Time, m Metric) Metric { + return timestampedMetric{Metric: m, t: t} +} + +type withExemplarsMetric struct { + Metric + + exemplars []*dto.Exemplar +} + +func (m *withExemplarsMetric) Write(pb *dto.Metric) error { + if err := m.Metric.Write(pb); err != nil { + return err + } + + switch { + case pb.Counter != nil: + pb.Counter.Exemplar = m.exemplars[len(m.exemplars)-1] + case pb.Histogram != nil: + for _, e := range m.exemplars { + // pb.Histogram.Bucket are sorted by UpperBound. + i := sort.Search(len(pb.Histogram.Bucket), func(i int) bool { + return pb.Histogram.Bucket[i].GetUpperBound() >= e.GetValue() + }) + if i < len(pb.Histogram.Bucket) { + pb.Histogram.Bucket[i].Exemplar = e + } else { + // The +Inf bucket should be explicitly added if there is an exemplar for it, similar to non-const histogram logic in https://github.com/prometheus/client_golang/blob/main/prometheus/histogram.go#L357-L365. + b := &dto.Bucket{ + CumulativeCount: proto.Uint64(pb.Histogram.GetSampleCount()), + UpperBound: proto.Float64(math.Inf(1)), + Exemplar: e, + } + pb.Histogram.Bucket = append(pb.Histogram.Bucket, b) + } + } + default: + // TODO(bwplotka): Implement Gauge? + return errors.New("cannot inject exemplar into Gauge, Summary or Untyped") + } + + return nil +} + +// Exemplar is easier to use, user-facing representation of *dto.Exemplar. +type Exemplar struct { + Value float64 + Labels Labels + // Optional. + // Default value (time.Time{}) indicates its empty, which should be + // understood as time.Now() time at the moment of creation of metric. + Timestamp time.Time +} + +// NewMetricWithExemplars returns a new Metric wrapping the provided Metric with given +// exemplars. Exemplars are validated. +// +// Only last applicable exemplar is injected from the list. +// For example for Counter it means last exemplar is injected. +// For Histogram, it means last applicable exemplar for each bucket is injected. +// +// NewMetricWithExemplars works best with MustNewConstMetric and +// MustNewConstHistogram, see example. +func NewMetricWithExemplars(m Metric, exemplars ...Exemplar) (Metric, error) { + if len(exemplars) == 0 { + return nil, errors.New("no exemplar was passed for NewMetricWithExemplars") + } + + var ( + now = time.Now() + exs = make([]*dto.Exemplar, len(exemplars)) + err error + ) + for i, e := range exemplars { + ts := e.Timestamp + if ts == (time.Time{}) { + ts = now + } + exs[i], err = newExemplar(e.Value, ts, e.Labels) + if err != nil { + return nil, err + } + } + + return &withExemplarsMetric{Metric: m, exemplars: exs}, nil +} + +// MustNewMetricWithExemplars is a version of NewMetricWithExemplars that panics where +// NewMetricWithExemplars would have returned an error. +func MustNewMetricWithExemplars(m Metric, exemplars ...Exemplar) Metric { + ret, err := NewMetricWithExemplars(m, exemplars...) + if err != nil { + panic(err) + } + return ret +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/num_threads.go b/vendor/github.com/prometheus/client_golang/prometheus/num_threads.go new file mode 100644 index 0000000..7c12b21 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/num_threads.go @@ -0,0 +1,25 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build !js || wasm +// +build !js wasm + +package prometheus + +import "runtime" + +// getRuntimeNumThreads returns the number of open OS threads. +func getRuntimeNumThreads() float64 { + n, _ := runtime.ThreadCreateProfile(nil) + return float64(n) +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/num_threads_gopherjs.go b/vendor/github.com/prometheus/client_golang/prometheus/num_threads_gopherjs.go new file mode 100644 index 0000000..7348df0 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/num_threads_gopherjs.go @@ -0,0 +1,22 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build js && !wasm +// +build js,!wasm + +package prometheus + +// getRuntimeNumThreads returns the number of open OS threads. +func getRuntimeNumThreads() float64 { + return 1 +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/observer.go b/vendor/github.com/prometheus/client_golang/prometheus/observer.go new file mode 100644 index 0000000..03773b2 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/observer.go @@ -0,0 +1,64 @@ +// Copyright 2017 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +// Observer is the interface that wraps the Observe method, which is used by +// Histogram and Summary to add observations. +type Observer interface { + Observe(float64) +} + +// The ObserverFunc type is an adapter to allow the use of ordinary +// functions as Observers. If f is a function with the appropriate +// signature, ObserverFunc(f) is an Observer that calls f. +// +// This adapter is usually used in connection with the Timer type, and there are +// two general use cases: +// +// The most common one is to use a Gauge as the Observer for a Timer. +// See the "Gauge" Timer example. +// +// The more advanced use case is to create a function that dynamically decides +// which Observer to use for observing the duration. See the "Complex" Timer +// example. +type ObserverFunc func(float64) + +// Observe calls f(value). It implements Observer. +func (f ObserverFunc) Observe(value float64) { + f(value) +} + +// ObserverVec is an interface implemented by `HistogramVec` and `SummaryVec`. +type ObserverVec interface { + GetMetricWith(Labels) (Observer, error) + GetMetricWithLabelValues(lvs ...string) (Observer, error) + With(Labels) Observer + WithLabelValues(...string) Observer + CurryWith(Labels) (ObserverVec, error) + MustCurryWith(Labels) ObserverVec + + Collector +} + +// ExemplarObserver is implemented by Observers that offer the option of +// observing a value together with an exemplar. Its ObserveWithExemplar method +// works like the Observe method of an Observer but also replaces the currently +// saved exemplar (if any) with a new one, created from the provided value, the +// current time as timestamp, and the provided Labels. Empty Labels will lead to +// a valid (label-less) exemplar. But if Labels is nil, the current exemplar is +// left in place. ObserveWithExemplar panics if any of the provided labels are +// invalid or if the provided labels contain more than 128 runes in total. +type ExemplarObserver interface { + ObserveWithExemplar(value float64, exemplar Labels) +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go b/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go new file mode 100644 index 0000000..8548dd1 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go @@ -0,0 +1,164 @@ +// Copyright 2015 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "errors" + "fmt" + "os" + "strconv" + "strings" +) + +type processCollector struct { + collectFn func(chan<- Metric) + pidFn func() (int, error) + reportErrors bool + cpuTotal *Desc + openFDs, maxFDs *Desc + vsize, maxVsize *Desc + rss *Desc + startTime *Desc +} + +// ProcessCollectorOpts defines the behavior of a process metrics collector +// created with NewProcessCollector. +type ProcessCollectorOpts struct { + // PidFn returns the PID of the process the collector collects metrics + // for. It is called upon each collection. By default, the PID of the + // current process is used, as determined on construction time by + // calling os.Getpid(). + PidFn func() (int, error) + // If non-empty, each of the collected metrics is prefixed by the + // provided string and an underscore ("_"). + Namespace string + // If true, any error encountered during collection is reported as an + // invalid metric (see NewInvalidMetric). Otherwise, errors are ignored + // and the collected metrics will be incomplete. (Possibly, no metrics + // will be collected at all.) While that's usually not desired, it is + // appropriate for the common "mix-in" of process metrics, where process + // metrics are nice to have, but failing to collect them should not + // disrupt the collection of the remaining metrics. + ReportErrors bool +} + +// NewProcessCollector is the obsolete version of collectors.NewProcessCollector. +// See there for documentation. +// +// Deprecated: Use collectors.NewProcessCollector instead. +func NewProcessCollector(opts ProcessCollectorOpts) Collector { + ns := "" + if len(opts.Namespace) > 0 { + ns = opts.Namespace + "_" + } + + c := &processCollector{ + reportErrors: opts.ReportErrors, + cpuTotal: NewDesc( + ns+"process_cpu_seconds_total", + "Total user and system CPU time spent in seconds.", + nil, nil, + ), + openFDs: NewDesc( + ns+"process_open_fds", + "Number of open file descriptors.", + nil, nil, + ), + maxFDs: NewDesc( + ns+"process_max_fds", + "Maximum number of open file descriptors.", + nil, nil, + ), + vsize: NewDesc( + ns+"process_virtual_memory_bytes", + "Virtual memory size in bytes.", + nil, nil, + ), + maxVsize: NewDesc( + ns+"process_virtual_memory_max_bytes", + "Maximum amount of virtual memory available in bytes.", + nil, nil, + ), + rss: NewDesc( + ns+"process_resident_memory_bytes", + "Resident memory size in bytes.", + nil, nil, + ), + startTime: NewDesc( + ns+"process_start_time_seconds", + "Start time of the process since unix epoch in seconds.", + nil, nil, + ), + } + + if opts.PidFn == nil { + c.pidFn = getPIDFn() + } else { + c.pidFn = opts.PidFn + } + + // Set up process metric collection if supported by the runtime. + if canCollectProcess() { + c.collectFn = c.processCollect + } else { + c.collectFn = func(ch chan<- Metric) { + c.reportError(ch, nil, errors.New("process metrics not supported on this platform")) + } + } + + return c +} + +// Describe returns all descriptions of the collector. +func (c *processCollector) Describe(ch chan<- *Desc) { + ch <- c.cpuTotal + ch <- c.openFDs + ch <- c.maxFDs + ch <- c.vsize + ch <- c.maxVsize + ch <- c.rss + ch <- c.startTime +} + +// Collect returns the current state of all metrics of the collector. +func (c *processCollector) Collect(ch chan<- Metric) { + c.collectFn(ch) +} + +func (c *processCollector) reportError(ch chan<- Metric, desc *Desc, err error) { + if !c.reportErrors { + return + } + if desc == nil { + desc = NewInvalidDesc(err) + } + ch <- NewInvalidMetric(desc, err) +} + +// NewPidFileFn returns a function that retrieves a pid from the specified file. +// It is meant to be used for the PidFn field in ProcessCollectorOpts. +func NewPidFileFn(pidFilePath string) func() (int, error) { + return func() (int, error) { + content, err := os.ReadFile(pidFilePath) + if err != nil { + return 0, fmt.Errorf("can't read pid file %q: %w", pidFilePath, err) + } + pid, err := strconv.Atoi(strings.TrimSpace(string(content))) + if err != nil { + return 0, fmt.Errorf("can't parse pid file %q: %w", pidFilePath, err) + } + + return pid, nil + } +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_js.go b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_js.go new file mode 100644 index 0000000..b1e363d --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_js.go @@ -0,0 +1,26 @@ +// Copyright 2019 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build js +// +build js + +package prometheus + +func canCollectProcess() bool { + return false +} + +func (c *processCollector) processCollect(ch chan<- Metric) { + // noop on this platform + return +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_other.go b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_other.go new file mode 100644 index 0000000..c0152cd --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_other.go @@ -0,0 +1,66 @@ +// Copyright 2019 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build !windows && !js +// +build !windows,!js + +package prometheus + +import ( + "github.com/prometheus/procfs" +) + +func canCollectProcess() bool { + _, err := procfs.NewDefaultFS() + return err == nil +} + +func (c *processCollector) processCollect(ch chan<- Metric) { + pid, err := c.pidFn() + if err != nil { + c.reportError(ch, nil, err) + return + } + + p, err := procfs.NewProc(pid) + if err != nil { + c.reportError(ch, nil, err) + return + } + + if stat, err := p.Stat(); err == nil { + ch <- MustNewConstMetric(c.cpuTotal, CounterValue, stat.CPUTime()) + ch <- MustNewConstMetric(c.vsize, GaugeValue, float64(stat.VirtualMemory())) + ch <- MustNewConstMetric(c.rss, GaugeValue, float64(stat.ResidentMemory())) + if startTime, err := stat.StartTime(); err == nil { + ch <- MustNewConstMetric(c.startTime, GaugeValue, startTime) + } else { + c.reportError(ch, c.startTime, err) + } + } else { + c.reportError(ch, nil, err) + } + + if fds, err := p.FileDescriptorsLen(); err == nil { + ch <- MustNewConstMetric(c.openFDs, GaugeValue, float64(fds)) + } else { + c.reportError(ch, c.openFDs, err) + } + + if limits, err := p.Limits(); err == nil { + ch <- MustNewConstMetric(c.maxFDs, GaugeValue, float64(limits.OpenFiles)) + ch <- MustNewConstMetric(c.maxVsize, GaugeValue, float64(limits.AddressSpace)) + } else { + c.reportError(ch, nil, err) + } +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_windows.go b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_windows.go new file mode 100644 index 0000000..f973398 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_windows.go @@ -0,0 +1,116 @@ +// Copyright 2019 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "syscall" + "unsafe" + + "golang.org/x/sys/windows" +) + +func canCollectProcess() bool { + return true +} + +var ( + modpsapi = syscall.NewLazyDLL("psapi.dll") + modkernel32 = syscall.NewLazyDLL("kernel32.dll") + + procGetProcessMemoryInfo = modpsapi.NewProc("GetProcessMemoryInfo") + procGetProcessHandleCount = modkernel32.NewProc("GetProcessHandleCount") +) + +type processMemoryCounters struct { + // System interface description + // https://docs.microsoft.com/en-us/windows/desktop/api/psapi/ns-psapi-process_memory_counters_ex + + // Refer to the Golang internal implementation + // https://golang.org/src/internal/syscall/windows/psapi_windows.go + _ uint32 + PageFaultCount uint32 + PeakWorkingSetSize uintptr + WorkingSetSize uintptr + QuotaPeakPagedPoolUsage uintptr + QuotaPagedPoolUsage uintptr + QuotaPeakNonPagedPoolUsage uintptr + QuotaNonPagedPoolUsage uintptr + PagefileUsage uintptr + PeakPagefileUsage uintptr + PrivateUsage uintptr +} + +func getProcessMemoryInfo(handle windows.Handle) (processMemoryCounters, error) { + mem := processMemoryCounters{} + r1, _, err := procGetProcessMemoryInfo.Call( + uintptr(handle), + uintptr(unsafe.Pointer(&mem)), + uintptr(unsafe.Sizeof(mem)), + ) + if r1 != 1 { + return mem, err + } else { + return mem, nil + } +} + +func getProcessHandleCount(handle windows.Handle) (uint32, error) { + var count uint32 + r1, _, err := procGetProcessHandleCount.Call( + uintptr(handle), + uintptr(unsafe.Pointer(&count)), + ) + if r1 != 1 { + return 0, err + } else { + return count, nil + } +} + +func (c *processCollector) processCollect(ch chan<- Metric) { + h, err := windows.GetCurrentProcess() + if err != nil { + c.reportError(ch, nil, err) + return + } + + var startTime, exitTime, kernelTime, userTime windows.Filetime + err = windows.GetProcessTimes(h, &startTime, &exitTime, &kernelTime, &userTime) + if err != nil { + c.reportError(ch, nil, err) + return + } + ch <- MustNewConstMetric(c.startTime, GaugeValue, float64(startTime.Nanoseconds()/1e9)) + ch <- MustNewConstMetric(c.cpuTotal, CounterValue, fileTimeToSeconds(kernelTime)+fileTimeToSeconds(userTime)) + + mem, err := getProcessMemoryInfo(h) + if err != nil { + c.reportError(ch, nil, err) + return + } + ch <- MustNewConstMetric(c.vsize, GaugeValue, float64(mem.PrivateUsage)) + ch <- MustNewConstMetric(c.rss, GaugeValue, float64(mem.WorkingSetSize)) + + handles, err := getProcessHandleCount(h) + if err != nil { + c.reportError(ch, nil, err) + return + } + ch <- MustNewConstMetric(c.openFDs, GaugeValue, float64(handles)) + ch <- MustNewConstMetric(c.maxFDs, GaugeValue, float64(16*1024*1024)) // Windows has a hard-coded max limit, not per-process. +} + +func fileTimeToSeconds(ft windows.Filetime) float64 { + return float64(uint64(ft.HighDateTime)<<32+uint64(ft.LowDateTime)) / 1e7 +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promauto/auto.go b/vendor/github.com/prometheus/client_golang/prometheus/promauto/auto.go new file mode 100644 index 0000000..8031e87 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/promauto/auto.go @@ -0,0 +1,376 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package promauto provides alternative constructors for the fundamental +// Prometheus metric types and their …Vec and …Func variants. The difference to +// their counterparts in the prometheus package is that the promauto +// constructors register the Collectors with a registry before returning them. +// There are two sets of constructors. The constructors in the first set are +// top-level functions, while the constructors in the other set are methods of +// the Factory type. The top-level function return Collectors registered with +// the global registry (prometheus.DefaultRegisterer), while the methods return +// Collectors registered with the registry the Factory was constructed with. All +// constructors panic if the registration fails. +// +// The following example is a complete program to create a histogram of normally +// distributed random numbers from the math/rand package: +// +// package main +// +// import ( +// "math/rand" +// "net/http" +// +// "github.com/prometheus/client_golang/prometheus" +// "github.com/prometheus/client_golang/prometheus/promauto" +// "github.com/prometheus/client_golang/prometheus/promhttp" +// ) +// +// var histogram = promauto.NewHistogram(prometheus.HistogramOpts{ +// Name: "random_numbers", +// Help: "A histogram of normally distributed random numbers.", +// Buckets: prometheus.LinearBuckets(-3, .1, 61), +// }) +// +// func Random() { +// for { +// histogram.Observe(rand.NormFloat64()) +// } +// } +// +// func main() { +// go Random() +// http.Handle("/metrics", promhttp.Handler()) +// http.ListenAndServe(":1971", nil) +// } +// +// Prometheus's version of a minimal hello-world program: +// +// package main +// +// import ( +// "fmt" +// "net/http" +// +// "github.com/prometheus/client_golang/prometheus" +// "github.com/prometheus/client_golang/prometheus/promauto" +// "github.com/prometheus/client_golang/prometheus/promhttp" +// ) +// +// func main() { +// http.Handle("/", promhttp.InstrumentHandlerCounter( +// promauto.NewCounterVec( +// prometheus.CounterOpts{ +// Name: "hello_requests_total", +// Help: "Total number of hello-world requests by HTTP code.", +// }, +// []string{"code"}, +// ), +// http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { +// fmt.Fprint(w, "Hello, world!") +// }), +// )) +// http.Handle("/metrics", promhttp.Handler()) +// http.ListenAndServe(":1971", nil) +// } +// +// A Factory is created with the With(prometheus.Registerer) function, which +// enables two usage pattern. With(prometheus.Registerer) can be called once per +// line: +// +// var ( +// reg = prometheus.NewRegistry() +// randomNumbers = promauto.With(reg).NewHistogram(prometheus.HistogramOpts{ +// Name: "random_numbers", +// Help: "A histogram of normally distributed random numbers.", +// Buckets: prometheus.LinearBuckets(-3, .1, 61), +// }) +// requestCount = promauto.With(reg).NewCounterVec( +// prometheus.CounterOpts{ +// Name: "http_requests_total", +// Help: "Total number of HTTP requests by status code and method.", +// }, +// []string{"code", "method"}, +// ) +// ) +// +// Or it can be used to create a Factory once to be used multiple times: +// +// var ( +// reg = prometheus.NewRegistry() +// factory = promauto.With(reg) +// randomNumbers = factory.NewHistogram(prometheus.HistogramOpts{ +// Name: "random_numbers", +// Help: "A histogram of normally distributed random numbers.", +// Buckets: prometheus.LinearBuckets(-3, .1, 61), +// }) +// requestCount = factory.NewCounterVec( +// prometheus.CounterOpts{ +// Name: "http_requests_total", +// Help: "Total number of HTTP requests by status code and method.", +// }, +// []string{"code", "method"}, +// ) +// ) +// +// This appears very handy. So why are these constructors locked away in a +// separate package? +// +// The main problem is that registration may fail, e.g. if a metric inconsistent +// with or equal to the newly to be registered one is already registered. +// Therefore, the Register method in the prometheus.Registerer interface returns +// an error, and the same is the case for the top-level prometheus.Register +// function that registers with the global registry. The prometheus package also +// provides MustRegister versions for both. They panic if the registration +// fails, and they clearly call this out by using the Must… idiom. Panicking is +// problematic in this case because it doesn't just happen on input provided by +// the caller that is invalid on its own. Things are a bit more subtle here: +// Metric creation and registration tend to be spread widely over the +// codebase. It can easily happen that an incompatible metric is added to an +// unrelated part of the code, and suddenly code that used to work perfectly +// fine starts to panic (provided that the registration of the newly added +// metric happens before the registration of the previously existing +// metric). This may come as an even bigger surprise with the global registry, +// where simply importing another package can trigger a panic (if the newly +// imported package registers metrics in its init function). At least, in the +// prometheus package, creation of metrics and other collectors is separate from +// registration. You first create the metric, and then you decide explicitly if +// you want to register it with a local or the global registry, and if you want +// to handle the error or risk a panic. With the constructors in the promauto +// package, registration is automatic, and if it fails, it will always +// panic. Furthermore, the constructors will often be called in the var section +// of a file, which means that panicking will happen as a side effect of merely +// importing a package. +// +// A separate package allows conservative users to entirely ignore it. And +// whoever wants to use it, will do so explicitly, with an opportunity to read +// this warning. +// +// Enjoy promauto responsibly! +package promauto + +import "github.com/prometheus/client_golang/prometheus" + +// NewCounter works like the function of the same name in the prometheus package +// but it automatically registers the Counter with the +// prometheus.DefaultRegisterer. If the registration fails, NewCounter panics. +func NewCounter(opts prometheus.CounterOpts) prometheus.Counter { + return With(prometheus.DefaultRegisterer).NewCounter(opts) +} + +// NewCounterVec works like the function of the same name in the prometheus +// package but it automatically registers the CounterVec with the +// prometheus.DefaultRegisterer. If the registration fails, NewCounterVec +// panics. +func NewCounterVec(opts prometheus.CounterOpts, labelNames []string) *prometheus.CounterVec { + return With(prometheus.DefaultRegisterer).NewCounterVec(opts, labelNames) +} + +// NewCounterFunc works like the function of the same name in the prometheus +// package but it automatically registers the CounterFunc with the +// prometheus.DefaultRegisterer. If the registration fails, NewCounterFunc +// panics. +func NewCounterFunc(opts prometheus.CounterOpts, function func() float64) prometheus.CounterFunc { + return With(prometheus.DefaultRegisterer).NewCounterFunc(opts, function) +} + +// NewGauge works like the function of the same name in the prometheus package +// but it automatically registers the Gauge with the +// prometheus.DefaultRegisterer. If the registration fails, NewGauge panics. +func NewGauge(opts prometheus.GaugeOpts) prometheus.Gauge { + return With(prometheus.DefaultRegisterer).NewGauge(opts) +} + +// NewGaugeVec works like the function of the same name in the prometheus +// package but it automatically registers the GaugeVec with the +// prometheus.DefaultRegisterer. If the registration fails, NewGaugeVec panics. +func NewGaugeVec(opts prometheus.GaugeOpts, labelNames []string) *prometheus.GaugeVec { + return With(prometheus.DefaultRegisterer).NewGaugeVec(opts, labelNames) +} + +// NewGaugeFunc works like the function of the same name in the prometheus +// package but it automatically registers the GaugeFunc with the +// prometheus.DefaultRegisterer. If the registration fails, NewGaugeFunc panics. +func NewGaugeFunc(opts prometheus.GaugeOpts, function func() float64) prometheus.GaugeFunc { + return With(prometheus.DefaultRegisterer).NewGaugeFunc(opts, function) +} + +// NewSummary works like the function of the same name in the prometheus package +// but it automatically registers the Summary with the +// prometheus.DefaultRegisterer. If the registration fails, NewSummary panics. +func NewSummary(opts prometheus.SummaryOpts) prometheus.Summary { + return With(prometheus.DefaultRegisterer).NewSummary(opts) +} + +// NewSummaryVec works like the function of the same name in the prometheus +// package but it automatically registers the SummaryVec with the +// prometheus.DefaultRegisterer. If the registration fails, NewSummaryVec +// panics. +func NewSummaryVec(opts prometheus.SummaryOpts, labelNames []string) *prometheus.SummaryVec { + return With(prometheus.DefaultRegisterer).NewSummaryVec(opts, labelNames) +} + +// NewHistogram works like the function of the same name in the prometheus +// package but it automatically registers the Histogram with the +// prometheus.DefaultRegisterer. If the registration fails, NewHistogram panics. +func NewHistogram(opts prometheus.HistogramOpts) prometheus.Histogram { + return With(prometheus.DefaultRegisterer).NewHistogram(opts) +} + +// NewHistogramVec works like the function of the same name in the prometheus +// package but it automatically registers the HistogramVec with the +// prometheus.DefaultRegisterer. If the registration fails, NewHistogramVec +// panics. +func NewHistogramVec(opts prometheus.HistogramOpts, labelNames []string) *prometheus.HistogramVec { + return With(prometheus.DefaultRegisterer).NewHistogramVec(opts, labelNames) +} + +// NewUntypedFunc works like the function of the same name in the prometheus +// package but it automatically registers the UntypedFunc with the +// prometheus.DefaultRegisterer. If the registration fails, NewUntypedFunc +// panics. +func NewUntypedFunc(opts prometheus.UntypedOpts, function func() float64) prometheus.UntypedFunc { + return With(prometheus.DefaultRegisterer).NewUntypedFunc(opts, function) +} + +// Factory provides factory methods to create Collectors that are automatically +// registered with a Registerer. Create a Factory with the With function, +// providing a Registerer to auto-register created Collectors with. The zero +// value of a Factory creates Collectors that are not registered with any +// Registerer. All methods of the Factory panic if the registration fails. +type Factory struct { + r prometheus.Registerer +} + +// With creates a Factory using the provided Registerer for registration of the +// created Collectors. If the provided Registerer is nil, the returned Factory +// creates Collectors that are not registered with any Registerer. +func With(r prometheus.Registerer) Factory { return Factory{r} } + +// NewCounter works like the function of the same name in the prometheus package +// but it automatically registers the Counter with the Factory's Registerer. +func (f Factory) NewCounter(opts prometheus.CounterOpts) prometheus.Counter { + c := prometheus.NewCounter(opts) + if f.r != nil { + f.r.MustRegister(c) + } + return c +} + +// NewCounterVec works like the function of the same name in the prometheus +// package but it automatically registers the CounterVec with the Factory's +// Registerer. +func (f Factory) NewCounterVec(opts prometheus.CounterOpts, labelNames []string) *prometheus.CounterVec { + c := prometheus.NewCounterVec(opts, labelNames) + if f.r != nil { + f.r.MustRegister(c) + } + return c +} + +// NewCounterFunc works like the function of the same name in the prometheus +// package but it automatically registers the CounterFunc with the Factory's +// Registerer. +func (f Factory) NewCounterFunc(opts prometheus.CounterOpts, function func() float64) prometheus.CounterFunc { + c := prometheus.NewCounterFunc(opts, function) + if f.r != nil { + f.r.MustRegister(c) + } + return c +} + +// NewGauge works like the function of the same name in the prometheus package +// but it automatically registers the Gauge with the Factory's Registerer. +func (f Factory) NewGauge(opts prometheus.GaugeOpts) prometheus.Gauge { + g := prometheus.NewGauge(opts) + if f.r != nil { + f.r.MustRegister(g) + } + return g +} + +// NewGaugeVec works like the function of the same name in the prometheus +// package but it automatically registers the GaugeVec with the Factory's +// Registerer. +func (f Factory) NewGaugeVec(opts prometheus.GaugeOpts, labelNames []string) *prometheus.GaugeVec { + g := prometheus.NewGaugeVec(opts, labelNames) + if f.r != nil { + f.r.MustRegister(g) + } + return g +} + +// NewGaugeFunc works like the function of the same name in the prometheus +// package but it automatically registers the GaugeFunc with the Factory's +// Registerer. +func (f Factory) NewGaugeFunc(opts prometheus.GaugeOpts, function func() float64) prometheus.GaugeFunc { + g := prometheus.NewGaugeFunc(opts, function) + if f.r != nil { + f.r.MustRegister(g) + } + return g +} + +// NewSummary works like the function of the same name in the prometheus package +// but it automatically registers the Summary with the Factory's Registerer. +func (f Factory) NewSummary(opts prometheus.SummaryOpts) prometheus.Summary { + s := prometheus.NewSummary(opts) + if f.r != nil { + f.r.MustRegister(s) + } + return s +} + +// NewSummaryVec works like the function of the same name in the prometheus +// package but it automatically registers the SummaryVec with the Factory's +// Registerer. +func (f Factory) NewSummaryVec(opts prometheus.SummaryOpts, labelNames []string) *prometheus.SummaryVec { + s := prometheus.NewSummaryVec(opts, labelNames) + if f.r != nil { + f.r.MustRegister(s) + } + return s +} + +// NewHistogram works like the function of the same name in the prometheus +// package but it automatically registers the Histogram with the Factory's +// Registerer. +func (f Factory) NewHistogram(opts prometheus.HistogramOpts) prometheus.Histogram { + h := prometheus.NewHistogram(opts) + if f.r != nil { + f.r.MustRegister(h) + } + return h +} + +// NewHistogramVec works like the function of the same name in the prometheus +// package but it automatically registers the HistogramVec with the Factory's +// Registerer. +func (f Factory) NewHistogramVec(opts prometheus.HistogramOpts, labelNames []string) *prometheus.HistogramVec { + h := prometheus.NewHistogramVec(opts, labelNames) + if f.r != nil { + f.r.MustRegister(h) + } + return h +} + +// NewUntypedFunc works like the function of the same name in the prometheus +// package but it automatically registers the UntypedFunc with the Factory's +// Registerer. +func (f Factory) NewUntypedFunc(opts prometheus.UntypedOpts, function func() float64) prometheus.UntypedFunc { + u := prometheus.NewUntypedFunc(opts, function) + if f.r != nil { + f.r.MustRegister(u) + } + return u +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go new file mode 100644 index 0000000..9819917 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go @@ -0,0 +1,374 @@ +// Copyright 2017 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package promhttp + +import ( + "bufio" + "io" + "net" + "net/http" +) + +const ( + closeNotifier = 1 << iota + flusher + hijacker + readerFrom + pusher +) + +type delegator interface { + http.ResponseWriter + + Status() int + Written() int64 +} + +type responseWriterDelegator struct { + http.ResponseWriter + + status int + written int64 + wroteHeader bool + observeWriteHeader func(int) +} + +func (r *responseWriterDelegator) Status() int { + return r.status +} + +func (r *responseWriterDelegator) Written() int64 { + return r.written +} + +func (r *responseWriterDelegator) WriteHeader(code int) { + if r.observeWriteHeader != nil && !r.wroteHeader { + // Only call observeWriteHeader for the 1st time. It's a bug if + // WriteHeader is called more than once, but we want to protect + // against it here. Note that we still delegate the WriteHeader + // to the original ResponseWriter to not mask the bug from it. + r.observeWriteHeader(code) + } + r.status = code + r.wroteHeader = true + r.ResponseWriter.WriteHeader(code) +} + +func (r *responseWriterDelegator) Write(b []byte) (int, error) { + // If applicable, call WriteHeader here so that observeWriteHeader is + // handled appropriately. + if !r.wroteHeader { + r.WriteHeader(http.StatusOK) + } + n, err := r.ResponseWriter.Write(b) + r.written += int64(n) + return n, err +} + +type ( + closeNotifierDelegator struct{ *responseWriterDelegator } + flusherDelegator struct{ *responseWriterDelegator } + hijackerDelegator struct{ *responseWriterDelegator } + readerFromDelegator struct{ *responseWriterDelegator } + pusherDelegator struct{ *responseWriterDelegator } +) + +func (d closeNotifierDelegator) CloseNotify() <-chan bool { + //nolint:staticcheck // Ignore SA1019. http.CloseNotifier is deprecated but we keep it here to not break existing users. + return d.ResponseWriter.(http.CloseNotifier).CloseNotify() +} + +func (d flusherDelegator) Flush() { + // If applicable, call WriteHeader here so that observeWriteHeader is + // handled appropriately. + if !d.wroteHeader { + d.WriteHeader(http.StatusOK) + } + d.ResponseWriter.(http.Flusher).Flush() +} + +func (d hijackerDelegator) Hijack() (net.Conn, *bufio.ReadWriter, error) { + return d.ResponseWriter.(http.Hijacker).Hijack() +} + +func (d readerFromDelegator) ReadFrom(re io.Reader) (int64, error) { + // If applicable, call WriteHeader here so that observeWriteHeader is + // handled appropriately. + if !d.wroteHeader { + d.WriteHeader(http.StatusOK) + } + n, err := d.ResponseWriter.(io.ReaderFrom).ReadFrom(re) + d.written += n + return n, err +} + +func (d pusherDelegator) Push(target string, opts *http.PushOptions) error { + return d.ResponseWriter.(http.Pusher).Push(target, opts) +} + +var pickDelegator = make([]func(*responseWriterDelegator) delegator, 32) + +func init() { + // TODO(beorn7): Code generation would help here. + pickDelegator[0] = func(d *responseWriterDelegator) delegator { // 0 + return d + } + pickDelegator[closeNotifier] = func(d *responseWriterDelegator) delegator { // 1 + return closeNotifierDelegator{d} + } + pickDelegator[flusher] = func(d *responseWriterDelegator) delegator { // 2 + return flusherDelegator{d} + } + pickDelegator[flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 3 + return struct { + *responseWriterDelegator + http.Flusher + http.CloseNotifier + }{d, flusherDelegator{d}, closeNotifierDelegator{d}} + } + pickDelegator[hijacker] = func(d *responseWriterDelegator) delegator { // 4 + return hijackerDelegator{d} + } + pickDelegator[hijacker+closeNotifier] = func(d *responseWriterDelegator) delegator { // 5 + return struct { + *responseWriterDelegator + http.Hijacker + http.CloseNotifier + }{d, hijackerDelegator{d}, closeNotifierDelegator{d}} + } + pickDelegator[hijacker+flusher] = func(d *responseWriterDelegator) delegator { // 6 + return struct { + *responseWriterDelegator + http.Hijacker + http.Flusher + }{d, hijackerDelegator{d}, flusherDelegator{d}} + } + pickDelegator[hijacker+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 7 + return struct { + *responseWriterDelegator + http.Hijacker + http.Flusher + http.CloseNotifier + }{d, hijackerDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}} + } + pickDelegator[readerFrom] = func(d *responseWriterDelegator) delegator { // 8 + return readerFromDelegator{d} + } + pickDelegator[readerFrom+closeNotifier] = func(d *responseWriterDelegator) delegator { // 9 + return struct { + *responseWriterDelegator + io.ReaderFrom + http.CloseNotifier + }{d, readerFromDelegator{d}, closeNotifierDelegator{d}} + } + pickDelegator[readerFrom+flusher] = func(d *responseWriterDelegator) delegator { // 10 + return struct { + *responseWriterDelegator + io.ReaderFrom + http.Flusher + }{d, readerFromDelegator{d}, flusherDelegator{d}} + } + pickDelegator[readerFrom+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 11 + return struct { + *responseWriterDelegator + io.ReaderFrom + http.Flusher + http.CloseNotifier + }{d, readerFromDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}} + } + pickDelegator[readerFrom+hijacker] = func(d *responseWriterDelegator) delegator { // 12 + return struct { + *responseWriterDelegator + io.ReaderFrom + http.Hijacker + }{d, readerFromDelegator{d}, hijackerDelegator{d}} + } + pickDelegator[readerFrom+hijacker+closeNotifier] = func(d *responseWriterDelegator) delegator { // 13 + return struct { + *responseWriterDelegator + io.ReaderFrom + http.Hijacker + http.CloseNotifier + }{d, readerFromDelegator{d}, hijackerDelegator{d}, closeNotifierDelegator{d}} + } + pickDelegator[readerFrom+hijacker+flusher] = func(d *responseWriterDelegator) delegator { // 14 + return struct { + *responseWriterDelegator + io.ReaderFrom + http.Hijacker + http.Flusher + }{d, readerFromDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}} + } + pickDelegator[readerFrom+hijacker+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 15 + return struct { + *responseWriterDelegator + io.ReaderFrom + http.Hijacker + http.Flusher + http.CloseNotifier + }{d, readerFromDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}} + } + pickDelegator[pusher] = func(d *responseWriterDelegator) delegator { // 16 + return pusherDelegator{d} + } + pickDelegator[pusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 17 + return struct { + *responseWriterDelegator + http.Pusher + http.CloseNotifier + }{d, pusherDelegator{d}, closeNotifierDelegator{d}} + } + pickDelegator[pusher+flusher] = func(d *responseWriterDelegator) delegator { // 18 + return struct { + *responseWriterDelegator + http.Pusher + http.Flusher + }{d, pusherDelegator{d}, flusherDelegator{d}} + } + pickDelegator[pusher+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 19 + return struct { + *responseWriterDelegator + http.Pusher + http.Flusher + http.CloseNotifier + }{d, pusherDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}} + } + pickDelegator[pusher+hijacker] = func(d *responseWriterDelegator) delegator { // 20 + return struct { + *responseWriterDelegator + http.Pusher + http.Hijacker + }{d, pusherDelegator{d}, hijackerDelegator{d}} + } + pickDelegator[pusher+hijacker+closeNotifier] = func(d *responseWriterDelegator) delegator { // 21 + return struct { + *responseWriterDelegator + http.Pusher + http.Hijacker + http.CloseNotifier + }{d, pusherDelegator{d}, hijackerDelegator{d}, closeNotifierDelegator{d}} + } + pickDelegator[pusher+hijacker+flusher] = func(d *responseWriterDelegator) delegator { // 22 + return struct { + *responseWriterDelegator + http.Pusher + http.Hijacker + http.Flusher + }{d, pusherDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}} + } + pickDelegator[pusher+hijacker+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 23 + return struct { + *responseWriterDelegator + http.Pusher + http.Hijacker + http.Flusher + http.CloseNotifier + }{d, pusherDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}} + } + pickDelegator[pusher+readerFrom] = func(d *responseWriterDelegator) delegator { // 24 + return struct { + *responseWriterDelegator + http.Pusher + io.ReaderFrom + }{d, pusherDelegator{d}, readerFromDelegator{d}} + } + pickDelegator[pusher+readerFrom+closeNotifier] = func(d *responseWriterDelegator) delegator { // 25 + return struct { + *responseWriterDelegator + http.Pusher + io.ReaderFrom + http.CloseNotifier + }{d, pusherDelegator{d}, readerFromDelegator{d}, closeNotifierDelegator{d}} + } + pickDelegator[pusher+readerFrom+flusher] = func(d *responseWriterDelegator) delegator { // 26 + return struct { + *responseWriterDelegator + http.Pusher + io.ReaderFrom + http.Flusher + }{d, pusherDelegator{d}, readerFromDelegator{d}, flusherDelegator{d}} + } + pickDelegator[pusher+readerFrom+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 27 + return struct { + *responseWriterDelegator + http.Pusher + io.ReaderFrom + http.Flusher + http.CloseNotifier + }{d, pusherDelegator{d}, readerFromDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}} + } + pickDelegator[pusher+readerFrom+hijacker] = func(d *responseWriterDelegator) delegator { // 28 + return struct { + *responseWriterDelegator + http.Pusher + io.ReaderFrom + http.Hijacker + }{d, pusherDelegator{d}, readerFromDelegator{d}, hijackerDelegator{d}} + } + pickDelegator[pusher+readerFrom+hijacker+closeNotifier] = func(d *responseWriterDelegator) delegator { // 29 + return struct { + *responseWriterDelegator + http.Pusher + io.ReaderFrom + http.Hijacker + http.CloseNotifier + }{d, pusherDelegator{d}, readerFromDelegator{d}, hijackerDelegator{d}, closeNotifierDelegator{d}} + } + pickDelegator[pusher+readerFrom+hijacker+flusher] = func(d *responseWriterDelegator) delegator { // 30 + return struct { + *responseWriterDelegator + http.Pusher + io.ReaderFrom + http.Hijacker + http.Flusher + }{d, pusherDelegator{d}, readerFromDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}} + } + pickDelegator[pusher+readerFrom+hijacker+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 31 + return struct { + *responseWriterDelegator + http.Pusher + io.ReaderFrom + http.Hijacker + http.Flusher + http.CloseNotifier + }{d, pusherDelegator{d}, readerFromDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}} + } +} + +func newDelegator(w http.ResponseWriter, observeWriteHeaderFunc func(int)) delegator { + d := &responseWriterDelegator{ + ResponseWriter: w, + observeWriteHeader: observeWriteHeaderFunc, + } + + id := 0 + //nolint:staticcheck // Ignore SA1019. http.CloseNotifier is deprecated but we keep it here to not break existing users. + if _, ok := w.(http.CloseNotifier); ok { + id += closeNotifier + } + if _, ok := w.(http.Flusher); ok { + id += flusher + } + if _, ok := w.(http.Hijacker); ok { + id += hijacker + } + if _, ok := w.(io.ReaderFrom); ok { + id += readerFrom + } + if _, ok := w.(http.Pusher); ok { + id += pusher + } + + return pickDelegator[id](d) +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go new file mode 100644 index 0000000..a4cc981 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go @@ -0,0 +1,395 @@ +// Copyright 2016 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package promhttp provides tooling around HTTP servers and clients. +// +// First, the package allows the creation of http.Handler instances to expose +// Prometheus metrics via HTTP. promhttp.Handler acts on the +// prometheus.DefaultGatherer. With HandlerFor, you can create a handler for a +// custom registry or anything that implements the Gatherer interface. It also +// allows the creation of handlers that act differently on errors or allow to +// log errors. +// +// Second, the package provides tooling to instrument instances of http.Handler +// via middleware. Middleware wrappers follow the naming scheme +// InstrumentHandlerX, where X describes the intended use of the middleware. +// See each function's doc comment for specific details. +// +// Finally, the package allows for an http.RoundTripper to be instrumented via +// middleware. Middleware wrappers follow the naming scheme +// InstrumentRoundTripperX, where X describes the intended use of the +// middleware. See each function's doc comment for specific details. +package promhttp + +import ( + "compress/gzip" + "errors" + "fmt" + "io" + "net/http" + "strings" + "sync" + "time" + + "github.com/prometheus/common/expfmt" + + "github.com/prometheus/client_golang/prometheus" +) + +const ( + contentTypeHeader = "Content-Type" + contentEncodingHeader = "Content-Encoding" + acceptEncodingHeader = "Accept-Encoding" +) + +var gzipPool = sync.Pool{ + New: func() interface{} { + return gzip.NewWriter(nil) + }, +} + +// Handler returns an http.Handler for the prometheus.DefaultGatherer, using +// default HandlerOpts, i.e. it reports the first error as an HTTP error, it has +// no error logging, and it applies compression if requested by the client. +// +// The returned http.Handler is already instrumented using the +// InstrumentMetricHandler function and the prometheus.DefaultRegisterer. If you +// create multiple http.Handlers by separate calls of the Handler function, the +// metrics used for instrumentation will be shared between them, providing +// global scrape counts. +// +// This function is meant to cover the bulk of basic use cases. If you are doing +// anything that requires more customization (including using a non-default +// Gatherer, different instrumentation, and non-default HandlerOpts), use the +// HandlerFor function. See there for details. +func Handler() http.Handler { + return InstrumentMetricHandler( + prometheus.DefaultRegisterer, HandlerFor(prometheus.DefaultGatherer, HandlerOpts{}), + ) +} + +// HandlerFor returns an uninstrumented http.Handler for the provided +// Gatherer. The behavior of the Handler is defined by the provided +// HandlerOpts. Thus, HandlerFor is useful to create http.Handlers for custom +// Gatherers, with non-default HandlerOpts, and/or with custom (or no) +// instrumentation. Use the InstrumentMetricHandler function to apply the same +// kind of instrumentation as it is used by the Handler function. +func HandlerFor(reg prometheus.Gatherer, opts HandlerOpts) http.Handler { + return HandlerForTransactional(prometheus.ToTransactionalGatherer(reg), opts) +} + +// HandlerForTransactional is like HandlerFor, but it uses transactional gather, which +// can safely change in-place returned *dto.MetricFamily before call to `Gather` and after +// call to `done` of that `Gather`. +func HandlerForTransactional(reg prometheus.TransactionalGatherer, opts HandlerOpts) http.Handler { + var ( + inFlightSem chan struct{} + errCnt = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Name: "promhttp_metric_handler_errors_total", + Help: "Total number of internal errors encountered by the promhttp metric handler.", + }, + []string{"cause"}, + ) + ) + + if opts.MaxRequestsInFlight > 0 { + inFlightSem = make(chan struct{}, opts.MaxRequestsInFlight) + } + if opts.Registry != nil { + // Initialize all possibilities that can occur below. + errCnt.WithLabelValues("gathering") + errCnt.WithLabelValues("encoding") + if err := opts.Registry.Register(errCnt); err != nil { + are := &prometheus.AlreadyRegisteredError{} + if errors.As(err, are) { + errCnt = are.ExistingCollector.(*prometheus.CounterVec) + } else { + panic(err) + } + } + } + + h := http.HandlerFunc(func(rsp http.ResponseWriter, req *http.Request) { + if inFlightSem != nil { + select { + case inFlightSem <- struct{}{}: // All good, carry on. + defer func() { <-inFlightSem }() + default: + http.Error(rsp, fmt.Sprintf( + "Limit of concurrent requests reached (%d), try again later.", opts.MaxRequestsInFlight, + ), http.StatusServiceUnavailable) + return + } + } + mfs, done, err := reg.Gather() + defer done() + if err != nil { + if opts.ErrorLog != nil { + opts.ErrorLog.Println("error gathering metrics:", err) + } + errCnt.WithLabelValues("gathering").Inc() + switch opts.ErrorHandling { + case PanicOnError: + panic(err) + case ContinueOnError: + if len(mfs) == 0 { + // Still report the error if no metrics have been gathered. + httpError(rsp, err) + return + } + case HTTPErrorOnError: + httpError(rsp, err) + return + } + } + + var contentType expfmt.Format + if opts.EnableOpenMetrics { + contentType = expfmt.NegotiateIncludingOpenMetrics(req.Header) + } else { + contentType = expfmt.Negotiate(req.Header) + } + header := rsp.Header() + header.Set(contentTypeHeader, string(contentType)) + + w := io.Writer(rsp) + if !opts.DisableCompression && gzipAccepted(req.Header) { + header.Set(contentEncodingHeader, "gzip") + gz := gzipPool.Get().(*gzip.Writer) + defer gzipPool.Put(gz) + + gz.Reset(w) + defer gz.Close() + + w = gz + } + + enc := expfmt.NewEncoder(w, contentType) + + // handleError handles the error according to opts.ErrorHandling + // and returns true if we have to abort after the handling. + handleError := func(err error) bool { + if err == nil { + return false + } + if opts.ErrorLog != nil { + opts.ErrorLog.Println("error encoding and sending metric family:", err) + } + errCnt.WithLabelValues("encoding").Inc() + switch opts.ErrorHandling { + case PanicOnError: + panic(err) + case HTTPErrorOnError: + // We cannot really send an HTTP error at this + // point because we most likely have written + // something to rsp already. But at least we can + // stop sending. + return true + } + // Do nothing in all other cases, including ContinueOnError. + return false + } + + for _, mf := range mfs { + if handleError(enc.Encode(mf)) { + return + } + } + if closer, ok := enc.(expfmt.Closer); ok { + // This in particular takes care of the final "# EOF\n" line for OpenMetrics. + if handleError(closer.Close()) { + return + } + } + }) + + if opts.Timeout <= 0 { + return h + } + return http.TimeoutHandler(h, opts.Timeout, fmt.Sprintf( + "Exceeded configured timeout of %v.\n", + opts.Timeout, + )) +} + +// InstrumentMetricHandler is usually used with an http.Handler returned by the +// HandlerFor function. It instruments the provided http.Handler with two +// metrics: A counter vector "promhttp_metric_handler_requests_total" to count +// scrapes partitioned by HTTP status code, and a gauge +// "promhttp_metric_handler_requests_in_flight" to track the number of +// simultaneous scrapes. This function idempotently registers collectors for +// both metrics with the provided Registerer. It panics if the registration +// fails. The provided metrics are useful to see how many scrapes hit the +// monitored target (which could be from different Prometheus servers or other +// scrapers), and how often they overlap (which would result in more than one +// scrape in flight at the same time). Note that the scrapes-in-flight gauge +// will contain the scrape by which it is exposed, while the scrape counter will +// only get incremented after the scrape is complete (as only then the status +// code is known). For tracking scrape durations, use the +// "scrape_duration_seconds" gauge created by the Prometheus server upon each +// scrape. +func InstrumentMetricHandler(reg prometheus.Registerer, handler http.Handler) http.Handler { + cnt := prometheus.NewCounterVec( + prometheus.CounterOpts{ + Name: "promhttp_metric_handler_requests_total", + Help: "Total number of scrapes by HTTP status code.", + }, + []string{"code"}, + ) + // Initialize the most likely HTTP status codes. + cnt.WithLabelValues("200") + cnt.WithLabelValues("500") + cnt.WithLabelValues("503") + if err := reg.Register(cnt); err != nil { + are := &prometheus.AlreadyRegisteredError{} + if errors.As(err, are) { + cnt = are.ExistingCollector.(*prometheus.CounterVec) + } else { + panic(err) + } + } + + gge := prometheus.NewGauge(prometheus.GaugeOpts{ + Name: "promhttp_metric_handler_requests_in_flight", + Help: "Current number of scrapes being served.", + }) + if err := reg.Register(gge); err != nil { + are := &prometheus.AlreadyRegisteredError{} + if errors.As(err, are) { + gge = are.ExistingCollector.(prometheus.Gauge) + } else { + panic(err) + } + } + + return InstrumentHandlerCounter(cnt, InstrumentHandlerInFlight(gge, handler)) +} + +// HandlerErrorHandling defines how a Handler serving metrics will handle +// errors. +type HandlerErrorHandling int + +// These constants cause handlers serving metrics to behave as described if +// errors are encountered. +const ( + // Serve an HTTP status code 500 upon the first error + // encountered. Report the error message in the body. Note that HTTP + // errors cannot be served anymore once the beginning of a regular + // payload has been sent. Thus, in the (unlikely) case that encoding the + // payload into the negotiated wire format fails, serving the response + // will simply be aborted. Set an ErrorLog in HandlerOpts to detect + // those errors. + HTTPErrorOnError HandlerErrorHandling = iota + // Ignore errors and try to serve as many metrics as possible. However, + // if no metrics can be served, serve an HTTP status code 500 and the + // last error message in the body. Only use this in deliberate "best + // effort" metrics collection scenarios. In this case, it is highly + // recommended to provide other means of detecting errors: By setting an + // ErrorLog in HandlerOpts, the errors are logged. By providing a + // Registry in HandlerOpts, the exposed metrics include an error counter + // "promhttp_metric_handler_errors_total", which can be used for + // alerts. + ContinueOnError + // Panic upon the first error encountered (useful for "crash only" apps). + PanicOnError +) + +// Logger is the minimal interface HandlerOpts needs for logging. Note that +// log.Logger from the standard library implements this interface, and it is +// easy to implement by custom loggers, if they don't do so already anyway. +type Logger interface { + Println(v ...interface{}) +} + +// HandlerOpts specifies options how to serve metrics via an http.Handler. The +// zero value of HandlerOpts is a reasonable default. +type HandlerOpts struct { + // ErrorLog specifies an optional Logger for errors collecting and + // serving metrics. If nil, errors are not logged at all. Note that the + // type of a reported error is often prometheus.MultiError, which + // formats into a multi-line error string. If you want to avoid the + // latter, create a Logger implementation that detects a + // prometheus.MultiError and formats the contained errors into one line. + ErrorLog Logger + // ErrorHandling defines how errors are handled. Note that errors are + // logged regardless of the configured ErrorHandling provided ErrorLog + // is not nil. + ErrorHandling HandlerErrorHandling + // If Registry is not nil, it is used to register a metric + // "promhttp_metric_handler_errors_total", partitioned by "cause". A + // failed registration causes a panic. Note that this error counter is + // different from the instrumentation you get from the various + // InstrumentHandler... helpers. It counts errors that don't necessarily + // result in a non-2xx HTTP status code. There are two typical cases: + // (1) Encoding errors that only happen after streaming of the HTTP body + // has already started (and the status code 200 has been sent). This + // should only happen with custom collectors. (2) Collection errors with + // no effect on the HTTP status code because ErrorHandling is set to + // ContinueOnError. + Registry prometheus.Registerer + // If DisableCompression is true, the handler will never compress the + // response, even if requested by the client. + DisableCompression bool + // The number of concurrent HTTP requests is limited to + // MaxRequestsInFlight. Additional requests are responded to with 503 + // Service Unavailable and a suitable message in the body. If + // MaxRequestsInFlight is 0 or negative, no limit is applied. + MaxRequestsInFlight int + // If handling a request takes longer than Timeout, it is responded to + // with 503 ServiceUnavailable and a suitable Message. No timeout is + // applied if Timeout is 0 or negative. Note that with the current + // implementation, reaching the timeout simply ends the HTTP requests as + // described above (and even that only if sending of the body hasn't + // started yet), while the bulk work of gathering all the metrics keeps + // running in the background (with the eventual result to be thrown + // away). Until the implementation is improved, it is recommended to + // implement a separate timeout in potentially slow Collectors. + Timeout time.Duration + // If true, the experimental OpenMetrics encoding is added to the + // possible options during content negotiation. Note that Prometheus + // 2.5.0+ will negotiate OpenMetrics as first priority. OpenMetrics is + // the only way to transmit exemplars. However, the move to OpenMetrics + // is not completely transparent. Most notably, the values of "quantile" + // labels of Summaries and "le" labels of Histograms are formatted with + // a trailing ".0" if they would otherwise look like integer numbers + // (which changes the identity of the resulting series on the Prometheus + // server). + EnableOpenMetrics bool +} + +// gzipAccepted returns whether the client will accept gzip-encoded content. +func gzipAccepted(header http.Header) bool { + a := header.Get(acceptEncodingHeader) + parts := strings.Split(a, ",") + for _, part := range parts { + part = strings.TrimSpace(part) + if part == "gzip" || strings.HasPrefix(part, "gzip;") { + return true + } + } + return false +} + +// httpError removes any content-encoding header and then calls http.Error with +// the provided error and http.StatusInternalServerError. Error contents is +// supposed to be uncompressed plain text. Same as with a plain http.Error, this +// must not be called if the header or any payload has already been sent. +func httpError(rsp http.ResponseWriter, err error) { + rsp.Header().Del(contentEncodingHeader) + http.Error( + rsp, + "An error has occurred while serving metrics:\n\n"+err.Error(), + http.StatusInternalServerError, + ) +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client.go new file mode 100644 index 0000000..2108678 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client.go @@ -0,0 +1,247 @@ +// Copyright 2017 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package promhttp + +import ( + "crypto/tls" + "net/http" + "net/http/httptrace" + "time" + + "github.com/prometheus/client_golang/prometheus" +) + +// The RoundTripperFunc type is an adapter to allow the use of ordinary +// functions as RoundTrippers. If f is a function with the appropriate +// signature, RountTripperFunc(f) is a RoundTripper that calls f. +type RoundTripperFunc func(req *http.Request) (*http.Response, error) + +// RoundTrip implements the RoundTripper interface. +func (rt RoundTripperFunc) RoundTrip(r *http.Request) (*http.Response, error) { + return rt(r) +} + +// InstrumentRoundTripperInFlight is a middleware that wraps the provided +// http.RoundTripper. It sets the provided prometheus.Gauge to the number of +// requests currently handled by the wrapped http.RoundTripper. +// +// See the example for ExampleInstrumentRoundTripperDuration for example usage. +func InstrumentRoundTripperInFlight(gauge prometheus.Gauge, next http.RoundTripper) RoundTripperFunc { + return func(r *http.Request) (*http.Response, error) { + gauge.Inc() + defer gauge.Dec() + return next.RoundTrip(r) + } +} + +// InstrumentRoundTripperCounter is a middleware that wraps the provided +// http.RoundTripper to observe the request result with the provided CounterVec. +// The CounterVec must have zero, one, or two non-const non-curried labels. For +// those, the only allowed label names are "code" and "method". The function +// panics otherwise. For the "method" label a predefined default label value set +// is used to filter given values. Values besides predefined values will count +// as `unknown` method.`WithExtraMethods` can be used to add more +// methods to the set. Partitioning of the CounterVec happens by HTTP status code +// and/or HTTP method if the respective instance label names are present in the +// CounterVec. For unpartitioned counting, use a CounterVec with zero labels. +// +// If the wrapped RoundTripper panics or returns a non-nil error, the Counter +// is not incremented. +// +// Use with WithExemplarFromContext to instrument the exemplars on the counter of requests. +// +// See the example for ExampleInstrumentRoundTripperDuration for example usage. +func InstrumentRoundTripperCounter(counter *prometheus.CounterVec, next http.RoundTripper, opts ...Option) RoundTripperFunc { + rtOpts := defaultOptions() + for _, o := range opts { + o.apply(rtOpts) + } + + code, method := checkLabels(counter) + + return func(r *http.Request) (*http.Response, error) { + resp, err := next.RoundTrip(r) + if err == nil { + addWithExemplar( + counter.With(labels(code, method, r.Method, resp.StatusCode, rtOpts.extraMethods...)), + 1, + rtOpts.getExemplarFn(r.Context()), + ) + } + return resp, err + } +} + +// InstrumentRoundTripperDuration is a middleware that wraps the provided +// http.RoundTripper to observe the request duration with the provided +// ObserverVec. The ObserverVec must have zero, one, or two non-const +// non-curried labels. For those, the only allowed label names are "code" and +// "method". The function panics otherwise. For the "method" label a predefined +// default label value set is used to filter given values. Values besides +// predefined values will count as `unknown` method. `WithExtraMethods` +// can be used to add more methods to the set. The Observe method of the Observer +// in the ObserverVec is called with the request duration in +// seconds. Partitioning happens by HTTP status code and/or HTTP method if the +// respective instance label names are present in the ObserverVec. For +// unpartitioned observations, use an ObserverVec with zero labels. Note that +// partitioning of Histograms is expensive and should be used judiciously. +// +// If the wrapped RoundTripper panics or returns a non-nil error, no values are +// reported. +// +// Use with WithExemplarFromContext to instrument the exemplars on the duration histograms. +// +// Note that this method is only guaranteed to never observe negative durations +// if used with Go1.9+. +func InstrumentRoundTripperDuration(obs prometheus.ObserverVec, next http.RoundTripper, opts ...Option) RoundTripperFunc { + rtOpts := defaultOptions() + for _, o := range opts { + o.apply(rtOpts) + } + + code, method := checkLabels(obs) + + return func(r *http.Request) (*http.Response, error) { + start := time.Now() + resp, err := next.RoundTrip(r) + if err == nil { + observeWithExemplar( + obs.With(labels(code, method, r.Method, resp.StatusCode, rtOpts.extraMethods...)), + time.Since(start).Seconds(), + rtOpts.getExemplarFn(r.Context()), + ) + } + return resp, err + } +} + +// InstrumentTrace is used to offer flexibility in instrumenting the available +// httptrace.ClientTrace hook functions. Each function is passed a float64 +// representing the time in seconds since the start of the http request. A user +// may choose to use separately buckets Histograms, or implement custom +// instance labels on a per function basis. +type InstrumentTrace struct { + GotConn func(float64) + PutIdleConn func(float64) + GotFirstResponseByte func(float64) + Got100Continue func(float64) + DNSStart func(float64) + DNSDone func(float64) + ConnectStart func(float64) + ConnectDone func(float64) + TLSHandshakeStart func(float64) + TLSHandshakeDone func(float64) + WroteHeaders func(float64) + Wait100Continue func(float64) + WroteRequest func(float64) +} + +// InstrumentRoundTripperTrace is a middleware that wraps the provided +// RoundTripper and reports times to hook functions provided in the +// InstrumentTrace struct. Hook functions that are not present in the provided +// InstrumentTrace struct are ignored. Times reported to the hook functions are +// time since the start of the request. Only with Go1.9+, those times are +// guaranteed to never be negative. (Earlier Go versions are not using a +// monotonic clock.) Note that partitioning of Histograms is expensive and +// should be used judiciously. +// +// For hook functions that receive an error as an argument, no observations are +// made in the event of a non-nil error value. +// +// See the example for ExampleInstrumentRoundTripperDuration for example usage. +func InstrumentRoundTripperTrace(it *InstrumentTrace, next http.RoundTripper) RoundTripperFunc { + return func(r *http.Request) (*http.Response, error) { + start := time.Now() + + trace := &httptrace.ClientTrace{ + GotConn: func(_ httptrace.GotConnInfo) { + if it.GotConn != nil { + it.GotConn(time.Since(start).Seconds()) + } + }, + PutIdleConn: func(err error) { + if err != nil { + return + } + if it.PutIdleConn != nil { + it.PutIdleConn(time.Since(start).Seconds()) + } + }, + DNSStart: func(_ httptrace.DNSStartInfo) { + if it.DNSStart != nil { + it.DNSStart(time.Since(start).Seconds()) + } + }, + DNSDone: func(_ httptrace.DNSDoneInfo) { + if it.DNSDone != nil { + it.DNSDone(time.Since(start).Seconds()) + } + }, + ConnectStart: func(_, _ string) { + if it.ConnectStart != nil { + it.ConnectStart(time.Since(start).Seconds()) + } + }, + ConnectDone: func(_, _ string, err error) { + if err != nil { + return + } + if it.ConnectDone != nil { + it.ConnectDone(time.Since(start).Seconds()) + } + }, + GotFirstResponseByte: func() { + if it.GotFirstResponseByte != nil { + it.GotFirstResponseByte(time.Since(start).Seconds()) + } + }, + Got100Continue: func() { + if it.Got100Continue != nil { + it.Got100Continue(time.Since(start).Seconds()) + } + }, + TLSHandshakeStart: func() { + if it.TLSHandshakeStart != nil { + it.TLSHandshakeStart(time.Since(start).Seconds()) + } + }, + TLSHandshakeDone: func(_ tls.ConnectionState, err error) { + if err != nil { + return + } + if it.TLSHandshakeDone != nil { + it.TLSHandshakeDone(time.Since(start).Seconds()) + } + }, + WroteHeaders: func() { + if it.WroteHeaders != nil { + it.WroteHeaders(time.Since(start).Seconds()) + } + }, + Wait100Continue: func() { + if it.Wait100Continue != nil { + it.Wait100Continue(time.Since(start).Seconds()) + } + }, + WroteRequest: func(_ httptrace.WroteRequestInfo) { + if it.WroteRequest != nil { + it.WroteRequest(time.Since(start).Seconds()) + } + }, + } + r = r.WithContext(httptrace.WithClientTrace(r.Context(), trace)) + + return next.RoundTrip(r) + } +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go new file mode 100644 index 0000000..cca67a7 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go @@ -0,0 +1,570 @@ +// Copyright 2017 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package promhttp + +import ( + "errors" + "net/http" + "strconv" + "strings" + "time" + + dto "github.com/prometheus/client_model/go" + + "github.com/prometheus/client_golang/prometheus" +) + +// magicString is used for the hacky label test in checkLabels. Remove once fixed. +const magicString = "zZgWfBxLqvG8kc8IMv3POi2Bb0tZI3vAnBx+gBaFi9FyPzB/CzKUer1yufDa" + +// observeWithExemplar is a wrapper for [prometheus.ExemplarAdder.ExemplarObserver], +// which falls back to [prometheus.Observer.Observe] if no labels are provided. +func observeWithExemplar(obs prometheus.Observer, val float64, labels map[string]string) { + if labels == nil { + obs.Observe(val) + return + } + obs.(prometheus.ExemplarObserver).ObserveWithExemplar(val, labels) +} + +// addWithExemplar is a wrapper for [prometheus.ExemplarAdder.AddWithExemplar], +// which falls back to [prometheus.Counter.Add] if no labels are provided. +func addWithExemplar(obs prometheus.Counter, val float64, labels map[string]string) { + if labels == nil { + obs.Add(val) + return + } + obs.(prometheus.ExemplarAdder).AddWithExemplar(val, labels) +} + +// InstrumentHandlerInFlight is a middleware that wraps the provided +// http.Handler. It sets the provided prometheus.Gauge to the number of +// requests currently handled by the wrapped http.Handler. +// +// See the example for InstrumentHandlerDuration for example usage. +func InstrumentHandlerInFlight(g prometheus.Gauge, next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + g.Inc() + defer g.Dec() + next.ServeHTTP(w, r) + }) +} + +// InstrumentHandlerDuration is a middleware that wraps the provided +// http.Handler to observe the request duration with the provided ObserverVec. +// The ObserverVec must have valid metric and label names and must have zero, +// one, or two non-const non-curried labels. For those, the only allowed label +// names are "code" and "method". The function panics otherwise. For the "method" +// label a predefined default label value set is used to filter given values. +// Values besides predefined values will count as `unknown` method. +// `WithExtraMethods` can be used to add more methods to the set. The Observe +// method of the Observer in the ObserverVec is called with the request duration +// in seconds. Partitioning happens by HTTP status code and/or HTTP method if +// the respective instance label names are present in the ObserverVec. For +// unpartitioned observations, use an ObserverVec with zero labels. Note that +// partitioning of Histograms is expensive and should be used judiciously. +// +// If the wrapped Handler does not set a status code, a status code of 200 is assumed. +// +// If the wrapped Handler panics, no values are reported. +// +// Note that this method is only guaranteed to never observe negative durations +// if used with Go1.9+. +func InstrumentHandlerDuration(obs prometheus.ObserverVec, next http.Handler, opts ...Option) http.HandlerFunc { + hOpts := defaultOptions() + for _, o := range opts { + o.apply(hOpts) + } + + code, method := checkLabels(obs) + + if code { + return func(w http.ResponseWriter, r *http.Request) { + now := time.Now() + d := newDelegator(w, nil) + next.ServeHTTP(d, r) + + observeWithExemplar( + obs.With(labels(code, method, r.Method, d.Status(), hOpts.extraMethods...)), + time.Since(now).Seconds(), + hOpts.getExemplarFn(r.Context()), + ) + } + } + + return func(w http.ResponseWriter, r *http.Request) { + now := time.Now() + next.ServeHTTP(w, r) + + observeWithExemplar( + obs.With(labels(code, method, r.Method, 0, hOpts.extraMethods...)), + time.Since(now).Seconds(), + hOpts.getExemplarFn(r.Context()), + ) + } +} + +// InstrumentHandlerCounter is a middleware that wraps the provided http.Handler +// to observe the request result with the provided CounterVec. The CounterVec +// must have valid metric and label names and must have zero, one, or two +// non-const non-curried labels. For those, the only allowed label names are +// "code" and "method". The function panics otherwise. For the "method" +// label a predefined default label value set is used to filter given values. +// Values besides predefined values will count as `unknown` method. +// `WithExtraMethods` can be used to add more methods to the set. Partitioning of the +// CounterVec happens by HTTP status code and/or HTTP method if the respective +// instance label names are present in the CounterVec. For unpartitioned +// counting, use a CounterVec with zero labels. +// +// If the wrapped Handler does not set a status code, a status code of 200 is assumed. +// +// If the wrapped Handler panics, the Counter is not incremented. +// +// See the example for InstrumentHandlerDuration for example usage. +func InstrumentHandlerCounter(counter *prometheus.CounterVec, next http.Handler, opts ...Option) http.HandlerFunc { + hOpts := defaultOptions() + for _, o := range opts { + o.apply(hOpts) + } + + code, method := checkLabels(counter) + + if code { + return func(w http.ResponseWriter, r *http.Request) { + d := newDelegator(w, nil) + next.ServeHTTP(d, r) + + addWithExemplar( + counter.With(labels(code, method, r.Method, d.Status(), hOpts.extraMethods...)), + 1, + hOpts.getExemplarFn(r.Context()), + ) + } + } + + return func(w http.ResponseWriter, r *http.Request) { + next.ServeHTTP(w, r) + addWithExemplar( + counter.With(labels(code, method, r.Method, 0, hOpts.extraMethods...)), + 1, + hOpts.getExemplarFn(r.Context()), + ) + } +} + +// InstrumentHandlerTimeToWriteHeader is a middleware that wraps the provided +// http.Handler to observe with the provided ObserverVec the request duration +// until the response headers are written. The ObserverVec must have valid +// metric and label names and must have zero, one, or two non-const non-curried +// labels. For those, the only allowed label names are "code" and "method". The +// function panics otherwise. For the "method" label a predefined default label +// value set is used to filter given values. Values besides predefined values +// will count as `unknown` method.`WithExtraMethods` can be used to add more +// methods to the set. The Observe method of the Observer in the +// ObserverVec is called with the request duration in seconds. Partitioning +// happens by HTTP status code and/or HTTP method if the respective instance +// label names are present in the ObserverVec. For unpartitioned observations, +// use an ObserverVec with zero labels. Note that partitioning of Histograms is +// expensive and should be used judiciously. +// +// If the wrapped Handler panics before calling WriteHeader, no value is +// reported. +// +// Note that this method is only guaranteed to never observe negative durations +// if used with Go1.9+. +// +// See the example for InstrumentHandlerDuration for example usage. +func InstrumentHandlerTimeToWriteHeader(obs prometheus.ObserverVec, next http.Handler, opts ...Option) http.HandlerFunc { + hOpts := defaultOptions() + for _, o := range opts { + o.apply(hOpts) + } + + code, method := checkLabels(obs) + + return func(w http.ResponseWriter, r *http.Request) { + now := time.Now() + d := newDelegator(w, func(status int) { + observeWithExemplar( + obs.With(labels(code, method, r.Method, status, hOpts.extraMethods...)), + time.Since(now).Seconds(), + hOpts.getExemplarFn(r.Context()), + ) + }) + next.ServeHTTP(d, r) + } +} + +// InstrumentHandlerRequestSize is a middleware that wraps the provided +// http.Handler to observe the request size with the provided ObserverVec. The +// ObserverVec must have valid metric and label names and must have zero, one, +// or two non-const non-curried labels. For those, the only allowed label names +// are "code" and "method". The function panics otherwise. For the "method" +// label a predefined default label value set is used to filter given values. +// Values besides predefined values will count as `unknown` method. +// `WithExtraMethods` can be used to add more methods to the set. The Observe +// method of the Observer in the ObserverVec is called with the request size in +// bytes. Partitioning happens by HTTP status code and/or HTTP method if the +// respective instance label names are present in the ObserverVec. For +// unpartitioned observations, use an ObserverVec with zero labels. Note that +// partitioning of Histograms is expensive and should be used judiciously. +// +// If the wrapped Handler does not set a status code, a status code of 200 is assumed. +// +// If the wrapped Handler panics, no values are reported. +// +// See the example for InstrumentHandlerDuration for example usage. +func InstrumentHandlerRequestSize(obs prometheus.ObserverVec, next http.Handler, opts ...Option) http.HandlerFunc { + hOpts := defaultOptions() + for _, o := range opts { + o.apply(hOpts) + } + + code, method := checkLabels(obs) + if code { + return func(w http.ResponseWriter, r *http.Request) { + d := newDelegator(w, nil) + next.ServeHTTP(d, r) + size := computeApproximateRequestSize(r) + observeWithExemplar( + obs.With(labels(code, method, r.Method, d.Status(), hOpts.extraMethods...)), + float64(size), + hOpts.getExemplarFn(r.Context()), + ) + } + } + + return func(w http.ResponseWriter, r *http.Request) { + next.ServeHTTP(w, r) + size := computeApproximateRequestSize(r) + observeWithExemplar( + obs.With(labels(code, method, r.Method, 0, hOpts.extraMethods...)), + float64(size), + hOpts.getExemplarFn(r.Context()), + ) + } +} + +// InstrumentHandlerResponseSize is a middleware that wraps the provided +// http.Handler to observe the response size with the provided ObserverVec. The +// ObserverVec must have valid metric and label names and must have zero, one, +// or two non-const non-curried labels. For those, the only allowed label names +// are "code" and "method". The function panics otherwise. For the "method" +// label a predefined default label value set is used to filter given values. +// Values besides predefined values will count as `unknown` method. +// `WithExtraMethods` can be used to add more methods to the set. The Observe +// method of the Observer in the ObserverVec is called with the response size in +// bytes. Partitioning happens by HTTP status code and/or HTTP method if the +// respective instance label names are present in the ObserverVec. For +// unpartitioned observations, use an ObserverVec with zero labels. Note that +// partitioning of Histograms is expensive and should be used judiciously. +// +// If the wrapped Handler does not set a status code, a status code of 200 is assumed. +// +// If the wrapped Handler panics, no values are reported. +// +// See the example for InstrumentHandlerDuration for example usage. +func InstrumentHandlerResponseSize(obs prometheus.ObserverVec, next http.Handler, opts ...Option) http.Handler { + hOpts := defaultOptions() + for _, o := range opts { + o.apply(hOpts) + } + + code, method := checkLabels(obs) + + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + d := newDelegator(w, nil) + next.ServeHTTP(d, r) + observeWithExemplar( + obs.With(labels(code, method, r.Method, d.Status(), hOpts.extraMethods...)), + float64(d.Written()), + hOpts.getExemplarFn(r.Context()), + ) + }) +} + +// checkLabels returns whether the provided Collector has a non-const, +// non-curried label named "code" and/or "method". It panics if the provided +// Collector does not have a Desc or has more than one Desc or its Desc is +// invalid. It also panics if the Collector has any non-const, non-curried +// labels that are not named "code" or "method". +func checkLabels(c prometheus.Collector) (code, method bool) { + // TODO(beorn7): Remove this hacky way to check for instance labels + // once Descriptors can have their dimensionality queried. + var ( + desc *prometheus.Desc + m prometheus.Metric + pm dto.Metric + lvs []string + ) + + // Get the Desc from the Collector. + descc := make(chan *prometheus.Desc, 1) + c.Describe(descc) + + select { + case desc = <-descc: + default: + panic("no description provided by collector") + } + select { + case <-descc: + panic("more than one description provided by collector") + default: + } + + close(descc) + + // Make sure the Collector has a valid Desc by registering it with a + // temporary registry. + prometheus.NewRegistry().MustRegister(c) + + // Create a ConstMetric with the Desc. Since we don't know how many + // variable labels there are, try for as long as it needs. + for err := errors.New("dummy"); err != nil; lvs = append(lvs, magicString) { + m, err = prometheus.NewConstMetric(desc, prometheus.UntypedValue, 0, lvs...) + } + + // Write out the metric into a proto message and look at the labels. + // If the value is not the magicString, it is a constLabel, which doesn't interest us. + // If the label is curried, it doesn't interest us. + // In all other cases, only "code" or "method" is allowed. + if err := m.Write(&pm); err != nil { + panic("error checking metric for labels") + } + for _, label := range pm.Label { + name, value := label.GetName(), label.GetValue() + if value != magicString || isLabelCurried(c, name) { + continue + } + switch name { + case "code": + code = true + case "method": + method = true + default: + panic("metric partitioned with non-supported labels") + } + } + return +} + +func isLabelCurried(c prometheus.Collector, label string) bool { + // This is even hackier than the label test above. + // We essentially try to curry again and see if it works. + // But for that, we need to type-convert to the two + // types we use here, ObserverVec or *CounterVec. + switch v := c.(type) { + case *prometheus.CounterVec: + if _, err := v.CurryWith(prometheus.Labels{label: "dummy"}); err == nil { + return false + } + case prometheus.ObserverVec: + if _, err := v.CurryWith(prometheus.Labels{label: "dummy"}); err == nil { + return false + } + default: + panic("unsupported metric vec type") + } + return true +} + +// emptyLabels is a one-time allocation for non-partitioned metrics to avoid +// unnecessary allocations on each request. +var emptyLabels = prometheus.Labels{} + +func labels(code, method bool, reqMethod string, status int, extraMethods ...string) prometheus.Labels { + if !(code || method) { + return emptyLabels + } + labels := prometheus.Labels{} + + if code { + labels["code"] = sanitizeCode(status) + } + if method { + labels["method"] = sanitizeMethod(reqMethod, extraMethods...) + } + + return labels +} + +func computeApproximateRequestSize(r *http.Request) int { + s := 0 + if r.URL != nil { + s += len(r.URL.String()) + } + + s += len(r.Method) + s += len(r.Proto) + for name, values := range r.Header { + s += len(name) + for _, value := range values { + s += len(value) + } + } + s += len(r.Host) + + // N.B. r.Form and r.MultipartForm are assumed to be included in r.URL. + + if r.ContentLength != -1 { + s += int(r.ContentLength) + } + return s +} + +// If the wrapped http.Handler has a known method, it will be sanitized and returned. +// Otherwise, "unknown" will be returned. The known method list can be extended +// as needed by using extraMethods parameter. +func sanitizeMethod(m string, extraMethods ...string) string { + // See https://developer.mozilla.org/en-US/docs/Web/HTTP/Methods for + // the methods chosen as default. + switch m { + case "GET", "get": + return "get" + case "PUT", "put": + return "put" + case "HEAD", "head": + return "head" + case "POST", "post": + return "post" + case "DELETE", "delete": + return "delete" + case "CONNECT", "connect": + return "connect" + case "OPTIONS", "options": + return "options" + case "NOTIFY", "notify": + return "notify" + case "TRACE", "trace": + return "trace" + case "PATCH", "patch": + return "patch" + default: + for _, method := range extraMethods { + if strings.EqualFold(m, method) { + return strings.ToLower(m) + } + } + return "unknown" + } +} + +// If the wrapped http.Handler has not set a status code, i.e. the value is +// currently 0, sanitizeCode will return 200, for consistency with behavior in +// the stdlib. +func sanitizeCode(s int) string { + // See for accepted codes https://www.iana.org/assignments/http-status-codes/http-status-codes.xhtml + switch s { + case 100: + return "100" + case 101: + return "101" + + case 200, 0: + return "200" + case 201: + return "201" + case 202: + return "202" + case 203: + return "203" + case 204: + return "204" + case 205: + return "205" + case 206: + return "206" + + case 300: + return "300" + case 301: + return "301" + case 302: + return "302" + case 304: + return "304" + case 305: + return "305" + case 307: + return "307" + + case 400: + return "400" + case 401: + return "401" + case 402: + return "402" + case 403: + return "403" + case 404: + return "404" + case 405: + return "405" + case 406: + return "406" + case 407: + return "407" + case 408: + return "408" + case 409: + return "409" + case 410: + return "410" + case 411: + return "411" + case 412: + return "412" + case 413: + return "413" + case 414: + return "414" + case 415: + return "415" + case 416: + return "416" + case 417: + return "417" + case 418: + return "418" + + case 500: + return "500" + case 501: + return "501" + case 502: + return "502" + case 503: + return "503" + case 504: + return "504" + case 505: + return "505" + + case 428: + return "428" + case 429: + return "429" + case 431: + return "431" + case 511: + return "511" + + default: + if s >= 100 && s <= 599 { + return strconv.Itoa(s) + } + return "unknown" + } +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/option.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/option.go new file mode 100644 index 0000000..c590d91 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/option.go @@ -0,0 +1,58 @@ +// Copyright 2022 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package promhttp + +import ( + "context" + + "github.com/prometheus/client_golang/prometheus" +) + +// Option are used to configure both handler (middleware) or round tripper. +type Option interface { + apply(*options) +} + +// options store options for both a handler or round tripper. +type options struct { + extraMethods []string + getExemplarFn func(requestCtx context.Context) prometheus.Labels +} + +func defaultOptions() *options { + return &options{getExemplarFn: func(ctx context.Context) prometheus.Labels { return nil }} +} + +type optionApplyFunc func(*options) + +func (o optionApplyFunc) apply(opt *options) { o(opt) } + +// WithExtraMethods adds additional HTTP methods to the list of allowed methods. +// See https://developer.mozilla.org/en-US/docs/Web/HTTP/Methods for the default list. +// +// See the example for ExampleInstrumentHandlerWithExtraMethods for example usage. +func WithExtraMethods(methods ...string) Option { + return optionApplyFunc(func(o *options) { + o.extraMethods = methods + }) +} + +// WithExemplarFromContext adds allows to put a hook to all counter and histogram metrics. +// If the hook function returns non-nil labels, exemplars will be added for that request, otherwise metric +// will get instrumented without exemplar. +func WithExemplarFromContext(getExemplarFn func(requestCtx context.Context) prometheus.Labels) Option { + return optionApplyFunc(func(o *options) { + o.getExemplarFn = getExemplarFn + }) +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/registry.go b/vendor/github.com/prometheus/client_golang/prometheus/registry.go new file mode 100644 index 0000000..09e34d3 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/registry.go @@ -0,0 +1,1072 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "bytes" + "errors" + "fmt" + "os" + "path/filepath" + "runtime" + "sort" + "strings" + "sync" + "unicode/utf8" + + "github.com/cespare/xxhash/v2" + //nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility. + "github.com/golang/protobuf/proto" + "github.com/prometheus/common/expfmt" + + dto "github.com/prometheus/client_model/go" + + "github.com/prometheus/client_golang/prometheus/internal" +) + +const ( + // Capacity for the channel to collect metrics and descriptors. + capMetricChan = 1000 + capDescChan = 10 +) + +// DefaultRegisterer and DefaultGatherer are the implementations of the +// Registerer and Gatherer interface a number of convenience functions in this +// package act on. Initially, both variables point to the same Registry, which +// has a process collector (currently on Linux only, see NewProcessCollector) +// and a Go collector (see NewGoCollector, in particular the note about +// stop-the-world implication with Go versions older than 1.9) already +// registered. This approach to keep default instances as global state mirrors +// the approach of other packages in the Go standard library. Note that there +// are caveats. Change the variables with caution and only if you understand the +// consequences. Users who want to avoid global state altogether should not use +// the convenience functions and act on custom instances instead. +var ( + defaultRegistry = NewRegistry() + DefaultRegisterer Registerer = defaultRegistry + DefaultGatherer Gatherer = defaultRegistry +) + +func init() { + MustRegister(NewProcessCollector(ProcessCollectorOpts{})) + MustRegister(NewGoCollector()) +} + +// NewRegistry creates a new vanilla Registry without any Collectors +// pre-registered. +func NewRegistry() *Registry { + return &Registry{ + collectorsByID: map[uint64]Collector{}, + descIDs: map[uint64]struct{}{}, + dimHashesByName: map[string]uint64{}, + } +} + +// NewPedanticRegistry returns a registry that checks during collection if each +// collected Metric is consistent with its reported Desc, and if the Desc has +// actually been registered with the registry. Unchecked Collectors (those whose +// Describe method does not yield any descriptors) are excluded from the check. +// +// Usually, a Registry will be happy as long as the union of all collected +// Metrics is consistent and valid even if some metrics are not consistent with +// their own Desc or a Desc provided by their registered Collector. Well-behaved +// Collectors and Metrics will only provide consistent Descs. This Registry is +// useful to test the implementation of Collectors and Metrics. +func NewPedanticRegistry() *Registry { + r := NewRegistry() + r.pedanticChecksEnabled = true + return r +} + +// Registerer is the interface for the part of a registry in charge of +// registering and unregistering. Users of custom registries should use +// Registerer as type for registration purposes (rather than the Registry type +// directly). In that way, they are free to use custom Registerer implementation +// (e.g. for testing purposes). +type Registerer interface { + // Register registers a new Collector to be included in metrics + // collection. It returns an error if the descriptors provided by the + // Collector are invalid or if they — in combination with descriptors of + // already registered Collectors — do not fulfill the consistency and + // uniqueness criteria described in the documentation of metric.Desc. + // + // If the provided Collector is equal to a Collector already registered + // (which includes the case of re-registering the same Collector), the + // returned error is an instance of AlreadyRegisteredError, which + // contains the previously registered Collector. + // + // A Collector whose Describe method does not yield any Desc is treated + // as unchecked. Registration will always succeed. No check for + // re-registering (see previous paragraph) is performed. Thus, the + // caller is responsible for not double-registering the same unchecked + // Collector, and for providing a Collector that will not cause + // inconsistent metrics on collection. (This would lead to scrape + // errors.) + Register(Collector) error + // MustRegister works like Register but registers any number of + // Collectors and panics upon the first registration that causes an + // error. + MustRegister(...Collector) + // Unregister unregisters the Collector that equals the Collector passed + // in as an argument. (Two Collectors are considered equal if their + // Describe method yields the same set of descriptors.) The function + // returns whether a Collector was unregistered. Note that an unchecked + // Collector cannot be unregistered (as its Describe method does not + // yield any descriptor). + // + // Note that even after unregistering, it will not be possible to + // register a new Collector that is inconsistent with the unregistered + // Collector, e.g. a Collector collecting metrics with the same name but + // a different help string. The rationale here is that the same registry + // instance must only collect consistent metrics throughout its + // lifetime. + Unregister(Collector) bool +} + +// Gatherer is the interface for the part of a registry in charge of gathering +// the collected metrics into a number of MetricFamilies. The Gatherer interface +// comes with the same general implication as described for the Registerer +// interface. +type Gatherer interface { + // Gather calls the Collect method of the registered Collectors and then + // gathers the collected metrics into a lexicographically sorted slice + // of uniquely named MetricFamily protobufs. Gather ensures that the + // returned slice is valid and self-consistent so that it can be used + // for valid exposition. As an exception to the strict consistency + // requirements described for metric.Desc, Gather will tolerate + // different sets of label names for metrics of the same metric family. + // + // Even if an error occurs, Gather attempts to gather as many metrics as + // possible. Hence, if a non-nil error is returned, the returned + // MetricFamily slice could be nil (in case of a fatal error that + // prevented any meaningful metric collection) or contain a number of + // MetricFamily protobufs, some of which might be incomplete, and some + // might be missing altogether. The returned error (which might be a + // MultiError) explains the details. Note that this is mostly useful for + // debugging purposes. If the gathered protobufs are to be used for + // exposition in actual monitoring, it is almost always better to not + // expose an incomplete result and instead disregard the returned + // MetricFamily protobufs in case the returned error is non-nil. + Gather() ([]*dto.MetricFamily, error) +} + +// Register registers the provided Collector with the DefaultRegisterer. +// +// Register is a shortcut for DefaultRegisterer.Register(c). See there for more +// details. +func Register(c Collector) error { + return DefaultRegisterer.Register(c) +} + +// MustRegister registers the provided Collectors with the DefaultRegisterer and +// panics if any error occurs. +// +// MustRegister is a shortcut for DefaultRegisterer.MustRegister(cs...). See +// there for more details. +func MustRegister(cs ...Collector) { + DefaultRegisterer.MustRegister(cs...) +} + +// Unregister removes the registration of the provided Collector from the +// DefaultRegisterer. +// +// Unregister is a shortcut for DefaultRegisterer.Unregister(c). See there for +// more details. +func Unregister(c Collector) bool { + return DefaultRegisterer.Unregister(c) +} + +// GathererFunc turns a function into a Gatherer. +type GathererFunc func() ([]*dto.MetricFamily, error) + +// Gather implements Gatherer. +func (gf GathererFunc) Gather() ([]*dto.MetricFamily, error) { + return gf() +} + +// AlreadyRegisteredError is returned by the Register method if the Collector to +// be registered has already been registered before, or a different Collector +// that collects the same metrics has been registered before. Registration fails +// in that case, but you can detect from the kind of error what has +// happened. The error contains fields for the existing Collector and the +// (rejected) new Collector that equals the existing one. This can be used to +// find out if an equal Collector has been registered before and switch over to +// using the old one, as demonstrated in the example. +type AlreadyRegisteredError struct { + ExistingCollector, NewCollector Collector +} + +func (err AlreadyRegisteredError) Error() string { + return "duplicate metrics collector registration attempted" +} + +// MultiError is a slice of errors implementing the error interface. It is used +// by a Gatherer to report multiple errors during MetricFamily gathering. +type MultiError []error + +// Error formats the contained errors as a bullet point list, preceded by the +// total number of errors. Note that this results in a multi-line string. +func (errs MultiError) Error() string { + if len(errs) == 0 { + return "" + } + buf := &bytes.Buffer{} + fmt.Fprintf(buf, "%d error(s) occurred:", len(errs)) + for _, err := range errs { + fmt.Fprintf(buf, "\n* %s", err) + } + return buf.String() +} + +// Append appends the provided error if it is not nil. +func (errs *MultiError) Append(err error) { + if err != nil { + *errs = append(*errs, err) + } +} + +// MaybeUnwrap returns nil if len(errs) is 0. It returns the first and only +// contained error as error if len(errs is 1). In all other cases, it returns +// the MultiError directly. This is helpful for returning a MultiError in a way +// that only uses the MultiError if needed. +func (errs MultiError) MaybeUnwrap() error { + switch len(errs) { + case 0: + return nil + case 1: + return errs[0] + default: + return errs + } +} + +// Registry registers Prometheus collectors, collects their metrics, and gathers +// them into MetricFamilies for exposition. It implements Registerer, Gatherer, +// and Collector. The zero value is not usable. Create instances with +// NewRegistry or NewPedanticRegistry. +// +// Registry implements Collector to allow it to be used for creating groups of +// metrics. See the Grouping example for how this can be done. +type Registry struct { + mtx sync.RWMutex + collectorsByID map[uint64]Collector // ID is a hash of the descIDs. + descIDs map[uint64]struct{} + dimHashesByName map[string]uint64 + uncheckedCollectors []Collector + pedanticChecksEnabled bool +} + +// Register implements Registerer. +func (r *Registry) Register(c Collector) error { + var ( + descChan = make(chan *Desc, capDescChan) + newDescIDs = map[uint64]struct{}{} + newDimHashesByName = map[string]uint64{} + collectorID uint64 // All desc IDs XOR'd together. + duplicateDescErr error + ) + go func() { + c.Describe(descChan) + close(descChan) + }() + r.mtx.Lock() + defer func() { + // Drain channel in case of premature return to not leak a goroutine. + for range descChan { + } + r.mtx.Unlock() + }() + // Conduct various tests... + for desc := range descChan { + + // Is the descriptor valid at all? + if desc.err != nil { + return fmt.Errorf("descriptor %s is invalid: %w", desc, desc.err) + } + + // Is the descID unique? + // (In other words: Is the fqName + constLabel combination unique?) + if _, exists := r.descIDs[desc.id]; exists { + duplicateDescErr = fmt.Errorf("descriptor %s already exists with the same fully-qualified name and const label values", desc) + } + // If it is not a duplicate desc in this collector, XOR it to + // the collectorID. (We allow duplicate descs within the same + // collector, but their existence must be a no-op.) + if _, exists := newDescIDs[desc.id]; !exists { + newDescIDs[desc.id] = struct{}{} + collectorID ^= desc.id + } + + // Are all the label names and the help string consistent with + // previous descriptors of the same name? + // First check existing descriptors... + if dimHash, exists := r.dimHashesByName[desc.fqName]; exists { + if dimHash != desc.dimHash { + return fmt.Errorf("a previously registered descriptor with the same fully-qualified name as %s has different label names or a different help string", desc) + } + } else { + // ...then check the new descriptors already seen. + if dimHash, exists := newDimHashesByName[desc.fqName]; exists { + if dimHash != desc.dimHash { + return fmt.Errorf("descriptors reported by collector have inconsistent label names or help strings for the same fully-qualified name, offender is %s", desc) + } + } else { + newDimHashesByName[desc.fqName] = desc.dimHash + } + } + } + // A Collector yielding no Desc at all is considered unchecked. + if len(newDescIDs) == 0 { + r.uncheckedCollectors = append(r.uncheckedCollectors, c) + return nil + } + if existing, exists := r.collectorsByID[collectorID]; exists { + switch e := existing.(type) { + case *wrappingCollector: + return AlreadyRegisteredError{ + ExistingCollector: e.unwrapRecursively(), + NewCollector: c, + } + default: + return AlreadyRegisteredError{ + ExistingCollector: e, + NewCollector: c, + } + } + } + // If the collectorID is new, but at least one of the descs existed + // before, we are in trouble. + if duplicateDescErr != nil { + return duplicateDescErr + } + + // Only after all tests have passed, actually register. + r.collectorsByID[collectorID] = c + for hash := range newDescIDs { + r.descIDs[hash] = struct{}{} + } + for name, dimHash := range newDimHashesByName { + r.dimHashesByName[name] = dimHash + } + return nil +} + +// Unregister implements Registerer. +func (r *Registry) Unregister(c Collector) bool { + var ( + descChan = make(chan *Desc, capDescChan) + descIDs = map[uint64]struct{}{} + collectorID uint64 // All desc IDs XOR'd together. + ) + go func() { + c.Describe(descChan) + close(descChan) + }() + for desc := range descChan { + if _, exists := descIDs[desc.id]; !exists { + collectorID ^= desc.id + descIDs[desc.id] = struct{}{} + } + } + + r.mtx.RLock() + if _, exists := r.collectorsByID[collectorID]; !exists { + r.mtx.RUnlock() + return false + } + r.mtx.RUnlock() + + r.mtx.Lock() + defer r.mtx.Unlock() + + delete(r.collectorsByID, collectorID) + for id := range descIDs { + delete(r.descIDs, id) + } + // dimHashesByName is left untouched as those must be consistent + // throughout the lifetime of a program. + return true +} + +// MustRegister implements Registerer. +func (r *Registry) MustRegister(cs ...Collector) { + for _, c := range cs { + if err := r.Register(c); err != nil { + panic(err) + } + } +} + +// Gather implements Gatherer. +func (r *Registry) Gather() ([]*dto.MetricFamily, error) { + r.mtx.RLock() + + if len(r.collectorsByID) == 0 && len(r.uncheckedCollectors) == 0 { + // Fast path. + r.mtx.RUnlock() + return nil, nil + } + + var ( + checkedMetricChan = make(chan Metric, capMetricChan) + uncheckedMetricChan = make(chan Metric, capMetricChan) + metricHashes = map[uint64]struct{}{} + wg sync.WaitGroup + errs MultiError // The collected errors to return in the end. + registeredDescIDs map[uint64]struct{} // Only used for pedantic checks + ) + + goroutineBudget := len(r.collectorsByID) + len(r.uncheckedCollectors) + metricFamiliesByName := make(map[string]*dto.MetricFamily, len(r.dimHashesByName)) + checkedCollectors := make(chan Collector, len(r.collectorsByID)) + uncheckedCollectors := make(chan Collector, len(r.uncheckedCollectors)) + for _, collector := range r.collectorsByID { + checkedCollectors <- collector + } + for _, collector := range r.uncheckedCollectors { + uncheckedCollectors <- collector + } + // In case pedantic checks are enabled, we have to copy the map before + // giving up the RLock. + if r.pedanticChecksEnabled { + registeredDescIDs = make(map[uint64]struct{}, len(r.descIDs)) + for id := range r.descIDs { + registeredDescIDs[id] = struct{}{} + } + } + r.mtx.RUnlock() + + wg.Add(goroutineBudget) + + collectWorker := func() { + for { + select { + case collector := <-checkedCollectors: + collector.Collect(checkedMetricChan) + case collector := <-uncheckedCollectors: + collector.Collect(uncheckedMetricChan) + default: + return + } + wg.Done() + } + } + + // Start the first worker now to make sure at least one is running. + go collectWorker() + goroutineBudget-- + + // Close checkedMetricChan and uncheckedMetricChan once all collectors + // are collected. + go func() { + wg.Wait() + close(checkedMetricChan) + close(uncheckedMetricChan) + }() + + // Drain checkedMetricChan and uncheckedMetricChan in case of premature return. + defer func() { + if checkedMetricChan != nil { + for range checkedMetricChan { + } + } + if uncheckedMetricChan != nil { + for range uncheckedMetricChan { + } + } + }() + + // Copy the channel references so we can nil them out later to remove + // them from the select statements below. + cmc := checkedMetricChan + umc := uncheckedMetricChan + + for { + select { + case metric, ok := <-cmc: + if !ok { + cmc = nil + break + } + errs.Append(processMetric( + metric, metricFamiliesByName, + metricHashes, + registeredDescIDs, + )) + case metric, ok := <-umc: + if !ok { + umc = nil + break + } + errs.Append(processMetric( + metric, metricFamiliesByName, + metricHashes, + nil, + )) + default: + if goroutineBudget <= 0 || len(checkedCollectors)+len(uncheckedCollectors) == 0 { + // All collectors are already being worked on or + // we have already as many goroutines started as + // there are collectors. Do the same as above, + // just without the default. + select { + case metric, ok := <-cmc: + if !ok { + cmc = nil + break + } + errs.Append(processMetric( + metric, metricFamiliesByName, + metricHashes, + registeredDescIDs, + )) + case metric, ok := <-umc: + if !ok { + umc = nil + break + } + errs.Append(processMetric( + metric, metricFamiliesByName, + metricHashes, + nil, + )) + } + break + } + // Start more workers. + go collectWorker() + goroutineBudget-- + runtime.Gosched() + } + // Once both checkedMetricChan and uncheckdMetricChan are closed + // and drained, the contraption above will nil out cmc and umc, + // and then we can leave the collect loop here. + if cmc == nil && umc == nil { + break + } + } + return internal.NormalizeMetricFamilies(metricFamiliesByName), errs.MaybeUnwrap() +} + +// Describe implements Collector. +func (r *Registry) Describe(ch chan<- *Desc) { + r.mtx.RLock() + defer r.mtx.RUnlock() + + // Only report the checked Collectors; unchecked collectors don't report any + // Desc. + for _, c := range r.collectorsByID { + c.Describe(ch) + } +} + +// Collect implements Collector. +func (r *Registry) Collect(ch chan<- Metric) { + r.mtx.RLock() + defer r.mtx.RUnlock() + + for _, c := range r.collectorsByID { + c.Collect(ch) + } + for _, c := range r.uncheckedCollectors { + c.Collect(ch) + } +} + +// WriteToTextfile calls Gather on the provided Gatherer, encodes the result in the +// Prometheus text format, and writes it to a temporary file. Upon success, the +// temporary file is renamed to the provided filename. +// +// This is intended for use with the textfile collector of the node exporter. +// Note that the node exporter expects the filename to be suffixed with ".prom". +func WriteToTextfile(filename string, g Gatherer) error { + tmp, err := os.CreateTemp(filepath.Dir(filename), filepath.Base(filename)) + if err != nil { + return err + } + defer os.Remove(tmp.Name()) + + mfs, err := g.Gather() + if err != nil { + return err + } + for _, mf := range mfs { + if _, err := expfmt.MetricFamilyToText(tmp, mf); err != nil { + return err + } + } + if err := tmp.Close(); err != nil { + return err + } + + if err := os.Chmod(tmp.Name(), 0o644); err != nil { + return err + } + return os.Rename(tmp.Name(), filename) +} + +// processMetric is an internal helper method only used by the Gather method. +func processMetric( + metric Metric, + metricFamiliesByName map[string]*dto.MetricFamily, + metricHashes map[uint64]struct{}, + registeredDescIDs map[uint64]struct{}, +) error { + desc := metric.Desc() + // Wrapped metrics collected by an unchecked Collector can have an + // invalid Desc. + if desc.err != nil { + return desc.err + } + dtoMetric := &dto.Metric{} + if err := metric.Write(dtoMetric); err != nil { + return fmt.Errorf("error collecting metric %v: %w", desc, err) + } + metricFamily, ok := metricFamiliesByName[desc.fqName] + if ok { // Existing name. + if metricFamily.GetHelp() != desc.help { + return fmt.Errorf( + "collected metric %s %s has help %q but should have %q", + desc.fqName, dtoMetric, desc.help, metricFamily.GetHelp(), + ) + } + // TODO(beorn7): Simplify switch once Desc has type. + switch metricFamily.GetType() { + case dto.MetricType_COUNTER: + if dtoMetric.Counter == nil { + return fmt.Errorf( + "collected metric %s %s should be a Counter", + desc.fqName, dtoMetric, + ) + } + case dto.MetricType_GAUGE: + if dtoMetric.Gauge == nil { + return fmt.Errorf( + "collected metric %s %s should be a Gauge", + desc.fqName, dtoMetric, + ) + } + case dto.MetricType_SUMMARY: + if dtoMetric.Summary == nil { + return fmt.Errorf( + "collected metric %s %s should be a Summary", + desc.fqName, dtoMetric, + ) + } + case dto.MetricType_UNTYPED: + if dtoMetric.Untyped == nil { + return fmt.Errorf( + "collected metric %s %s should be Untyped", + desc.fqName, dtoMetric, + ) + } + case dto.MetricType_HISTOGRAM: + if dtoMetric.Histogram == nil { + return fmt.Errorf( + "collected metric %s %s should be a Histogram", + desc.fqName, dtoMetric, + ) + } + default: + panic("encountered MetricFamily with invalid type") + } + } else { // New name. + metricFamily = &dto.MetricFamily{} + metricFamily.Name = proto.String(desc.fqName) + metricFamily.Help = proto.String(desc.help) + // TODO(beorn7): Simplify switch once Desc has type. + switch { + case dtoMetric.Gauge != nil: + metricFamily.Type = dto.MetricType_GAUGE.Enum() + case dtoMetric.Counter != nil: + metricFamily.Type = dto.MetricType_COUNTER.Enum() + case dtoMetric.Summary != nil: + metricFamily.Type = dto.MetricType_SUMMARY.Enum() + case dtoMetric.Untyped != nil: + metricFamily.Type = dto.MetricType_UNTYPED.Enum() + case dtoMetric.Histogram != nil: + metricFamily.Type = dto.MetricType_HISTOGRAM.Enum() + default: + return fmt.Errorf("empty metric collected: %s", dtoMetric) + } + if err := checkSuffixCollisions(metricFamily, metricFamiliesByName); err != nil { + return err + } + metricFamiliesByName[desc.fqName] = metricFamily + } + if err := checkMetricConsistency(metricFamily, dtoMetric, metricHashes); err != nil { + return err + } + if registeredDescIDs != nil { + // Is the desc registered at all? + if _, exist := registeredDescIDs[desc.id]; !exist { + return fmt.Errorf( + "collected metric %s %s with unregistered descriptor %s", + metricFamily.GetName(), dtoMetric, desc, + ) + } + if err := checkDescConsistency(metricFamily, dtoMetric, desc); err != nil { + return err + } + } + metricFamily.Metric = append(metricFamily.Metric, dtoMetric) + return nil +} + +// Gatherers is a slice of Gatherer instances that implements the Gatherer +// interface itself. Its Gather method calls Gather on all Gatherers in the +// slice in order and returns the merged results. Errors returned from the +// Gather calls are all returned in a flattened MultiError. Duplicate and +// inconsistent Metrics are skipped (first occurrence in slice order wins) and +// reported in the returned error. +// +// Gatherers can be used to merge the Gather results from multiple +// Registries. It also provides a way to directly inject existing MetricFamily +// protobufs into the gathering by creating a custom Gatherer with a Gather +// method that simply returns the existing MetricFamily protobufs. Note that no +// registration is involved (in contrast to Collector registration), so +// obviously registration-time checks cannot happen. Any inconsistencies between +// the gathered MetricFamilies are reported as errors by the Gather method, and +// inconsistent Metrics are dropped. Invalid parts of the MetricFamilies +// (e.g. syntactically invalid metric or label names) will go undetected. +type Gatherers []Gatherer + +// Gather implements Gatherer. +func (gs Gatherers) Gather() ([]*dto.MetricFamily, error) { + var ( + metricFamiliesByName = map[string]*dto.MetricFamily{} + metricHashes = map[uint64]struct{}{} + errs MultiError // The collected errors to return in the end. + ) + + for i, g := range gs { + mfs, err := g.Gather() + if err != nil { + multiErr := MultiError{} + if errors.As(err, &multiErr) { + for _, err := range multiErr { + errs = append(errs, fmt.Errorf("[from Gatherer #%d] %w", i+1, err)) + } + } else { + errs = append(errs, fmt.Errorf("[from Gatherer #%d] %w", i+1, err)) + } + } + for _, mf := range mfs { + existingMF, exists := metricFamiliesByName[mf.GetName()] + if exists { + if existingMF.GetHelp() != mf.GetHelp() { + errs = append(errs, fmt.Errorf( + "gathered metric family %s has help %q but should have %q", + mf.GetName(), mf.GetHelp(), existingMF.GetHelp(), + )) + continue + } + if existingMF.GetType() != mf.GetType() { + errs = append(errs, fmt.Errorf( + "gathered metric family %s has type %s but should have %s", + mf.GetName(), mf.GetType(), existingMF.GetType(), + )) + continue + } + } else { + existingMF = &dto.MetricFamily{} + existingMF.Name = mf.Name + existingMF.Help = mf.Help + existingMF.Type = mf.Type + if err := checkSuffixCollisions(existingMF, metricFamiliesByName); err != nil { + errs = append(errs, err) + continue + } + metricFamiliesByName[mf.GetName()] = existingMF + } + for _, m := range mf.Metric { + if err := checkMetricConsistency(existingMF, m, metricHashes); err != nil { + errs = append(errs, err) + continue + } + existingMF.Metric = append(existingMF.Metric, m) + } + } + } + return internal.NormalizeMetricFamilies(metricFamiliesByName), errs.MaybeUnwrap() +} + +// checkSuffixCollisions checks for collisions with the “magic” suffixes the +// Prometheus text format and the internal metric representation of the +// Prometheus server add while flattening Summaries and Histograms. +func checkSuffixCollisions(mf *dto.MetricFamily, mfs map[string]*dto.MetricFamily) error { + var ( + newName = mf.GetName() + newType = mf.GetType() + newNameWithoutSuffix = "" + ) + switch { + case strings.HasSuffix(newName, "_count"): + newNameWithoutSuffix = newName[:len(newName)-6] + case strings.HasSuffix(newName, "_sum"): + newNameWithoutSuffix = newName[:len(newName)-4] + case strings.HasSuffix(newName, "_bucket"): + newNameWithoutSuffix = newName[:len(newName)-7] + } + if newNameWithoutSuffix != "" { + if existingMF, ok := mfs[newNameWithoutSuffix]; ok { + switch existingMF.GetType() { + case dto.MetricType_SUMMARY: + if !strings.HasSuffix(newName, "_bucket") { + return fmt.Errorf( + "collected metric named %q collides with previously collected summary named %q", + newName, newNameWithoutSuffix, + ) + } + case dto.MetricType_HISTOGRAM: + return fmt.Errorf( + "collected metric named %q collides with previously collected histogram named %q", + newName, newNameWithoutSuffix, + ) + } + } + } + if newType == dto.MetricType_SUMMARY || newType == dto.MetricType_HISTOGRAM { + if _, ok := mfs[newName+"_count"]; ok { + return fmt.Errorf( + "collected histogram or summary named %q collides with previously collected metric named %q", + newName, newName+"_count", + ) + } + if _, ok := mfs[newName+"_sum"]; ok { + return fmt.Errorf( + "collected histogram or summary named %q collides with previously collected metric named %q", + newName, newName+"_sum", + ) + } + } + if newType == dto.MetricType_HISTOGRAM { + if _, ok := mfs[newName+"_bucket"]; ok { + return fmt.Errorf( + "collected histogram named %q collides with previously collected metric named %q", + newName, newName+"_bucket", + ) + } + } + return nil +} + +// checkMetricConsistency checks if the provided Metric is consistent with the +// provided MetricFamily. It also hashes the Metric labels and the MetricFamily +// name. If the resulting hash is already in the provided metricHashes, an error +// is returned. If not, it is added to metricHashes. +func checkMetricConsistency( + metricFamily *dto.MetricFamily, + dtoMetric *dto.Metric, + metricHashes map[uint64]struct{}, +) error { + name := metricFamily.GetName() + + // Type consistency with metric family. + if metricFamily.GetType() == dto.MetricType_GAUGE && dtoMetric.Gauge == nil || + metricFamily.GetType() == dto.MetricType_COUNTER && dtoMetric.Counter == nil || + metricFamily.GetType() == dto.MetricType_SUMMARY && dtoMetric.Summary == nil || + metricFamily.GetType() == dto.MetricType_HISTOGRAM && dtoMetric.Histogram == nil || + metricFamily.GetType() == dto.MetricType_UNTYPED && dtoMetric.Untyped == nil { + return fmt.Errorf( + "collected metric %q { %s} is not a %s", + name, dtoMetric, metricFamily.GetType(), + ) + } + + previousLabelName := "" + for _, labelPair := range dtoMetric.GetLabel() { + labelName := labelPair.GetName() + if labelName == previousLabelName { + return fmt.Errorf( + "collected metric %q { %s} has two or more labels with the same name: %s", + name, dtoMetric, labelName, + ) + } + if !checkLabelName(labelName) { + return fmt.Errorf( + "collected metric %q { %s} has a label with an invalid name: %s", + name, dtoMetric, labelName, + ) + } + if dtoMetric.Summary != nil && labelName == quantileLabel { + return fmt.Errorf( + "collected metric %q { %s} must not have an explicit %q label", + name, dtoMetric, quantileLabel, + ) + } + if !utf8.ValidString(labelPair.GetValue()) { + return fmt.Errorf( + "collected metric %q { %s} has a label named %q whose value is not utf8: %#v", + name, dtoMetric, labelName, labelPair.GetValue()) + } + previousLabelName = labelName + } + + // Is the metric unique (i.e. no other metric with the same name and the same labels)? + h := xxhash.New() + h.WriteString(name) + h.Write(separatorByteSlice) + // Make sure label pairs are sorted. We depend on it for the consistency + // check. + if !sort.IsSorted(internal.LabelPairSorter(dtoMetric.Label)) { + // We cannot sort dtoMetric.Label in place as it is immutable by contract. + copiedLabels := make([]*dto.LabelPair, len(dtoMetric.Label)) + copy(copiedLabels, dtoMetric.Label) + sort.Sort(internal.LabelPairSorter(copiedLabels)) + dtoMetric.Label = copiedLabels + } + for _, lp := range dtoMetric.Label { + h.WriteString(lp.GetName()) + h.Write(separatorByteSlice) + h.WriteString(lp.GetValue()) + h.Write(separatorByteSlice) + } + hSum := h.Sum64() + if _, exists := metricHashes[hSum]; exists { + return fmt.Errorf( + "collected metric %q { %s} was collected before with the same name and label values", + name, dtoMetric, + ) + } + metricHashes[hSum] = struct{}{} + return nil +} + +func checkDescConsistency( + metricFamily *dto.MetricFamily, + dtoMetric *dto.Metric, + desc *Desc, +) error { + // Desc help consistency with metric family help. + if metricFamily.GetHelp() != desc.help { + return fmt.Errorf( + "collected metric %s %s has help %q but should have %q", + metricFamily.GetName(), dtoMetric, metricFamily.GetHelp(), desc.help, + ) + } + + // Is the desc consistent with the content of the metric? + lpsFromDesc := make([]*dto.LabelPair, len(desc.constLabelPairs), len(dtoMetric.Label)) + copy(lpsFromDesc, desc.constLabelPairs) + for _, l := range desc.variableLabels { + lpsFromDesc = append(lpsFromDesc, &dto.LabelPair{ + Name: proto.String(l), + }) + } + if len(lpsFromDesc) != len(dtoMetric.Label) { + return fmt.Errorf( + "labels in collected metric %s %s are inconsistent with descriptor %s", + metricFamily.GetName(), dtoMetric, desc, + ) + } + sort.Sort(internal.LabelPairSorter(lpsFromDesc)) + for i, lpFromDesc := range lpsFromDesc { + lpFromMetric := dtoMetric.Label[i] + if lpFromDesc.GetName() != lpFromMetric.GetName() || + lpFromDesc.Value != nil && lpFromDesc.GetValue() != lpFromMetric.GetValue() { + return fmt.Errorf( + "labels in collected metric %s %s are inconsistent with descriptor %s", + metricFamily.GetName(), dtoMetric, desc, + ) + } + } + return nil +} + +var _ TransactionalGatherer = &MultiTRegistry{} + +// MultiTRegistry is a TransactionalGatherer that joins gathered metrics from multiple +// transactional gatherers. +// +// It is caller responsibility to ensure two registries have mutually exclusive metric families, +// no deduplication will happen. +type MultiTRegistry struct { + tGatherers []TransactionalGatherer +} + +// NewMultiTRegistry creates MultiTRegistry. +func NewMultiTRegistry(tGatherers ...TransactionalGatherer) *MultiTRegistry { + return &MultiTRegistry{ + tGatherers: tGatherers, + } +} + +// Gather implements TransactionalGatherer interface. +func (r *MultiTRegistry) Gather() (mfs []*dto.MetricFamily, done func(), err error) { + errs := MultiError{} + + dFns := make([]func(), 0, len(r.tGatherers)) + // TODO(bwplotka): Implement concurrency for those? + for _, g := range r.tGatherers { + // TODO(bwplotka): Check for duplicates? + m, d, err := g.Gather() + errs.Append(err) + + mfs = append(mfs, m...) + dFns = append(dFns, d) + } + + // TODO(bwplotka): Consider sort in place, given metric family in gather is sorted already. + sort.Slice(mfs, func(i, j int) bool { + return *mfs[i].Name < *mfs[j].Name + }) + return mfs, func() { + for _, d := range dFns { + d() + } + }, errs.MaybeUnwrap() +} + +// TransactionalGatherer represents transactional gatherer that can be triggered to notify gatherer that memory +// used by metric family is no longer used by a caller. This allows implementations with cache. +type TransactionalGatherer interface { + // Gather returns metrics in a lexicographically sorted slice + // of uniquely named MetricFamily protobufs. Gather ensures that the + // returned slice is valid and self-consistent so that it can be used + // for valid exposition. As an exception to the strict consistency + // requirements described for metric.Desc, Gather will tolerate + // different sets of label names for metrics of the same metric family. + // + // Even if an error occurs, Gather attempts to gather as many metrics as + // possible. Hence, if a non-nil error is returned, the returned + // MetricFamily slice could be nil (in case of a fatal error that + // prevented any meaningful metric collection) or contain a number of + // MetricFamily protobufs, some of which might be incomplete, and some + // might be missing altogether. The returned error (which might be a + // MultiError) explains the details. Note that this is mostly useful for + // debugging purposes. If the gathered protobufs are to be used for + // exposition in actual monitoring, it is almost always better to not + // expose an incomplete result and instead disregard the returned + // MetricFamily protobufs in case the returned error is non-nil. + // + // Important: done is expected to be triggered (even if the error occurs!) + // once caller does not need returned slice of dto.MetricFamily. + Gather() (_ []*dto.MetricFamily, done func(), err error) +} + +// ToTransactionalGatherer transforms Gatherer to transactional one with noop as done function. +func ToTransactionalGatherer(g Gatherer) TransactionalGatherer { + return &noTransactionGatherer{g: g} +} + +type noTransactionGatherer struct { + g Gatherer +} + +// Gather implements TransactionalGatherer interface. +func (g *noTransactionGatherer) Gather() (_ []*dto.MetricFamily, done func(), err error) { + mfs, err := g.g.Gather() + return mfs, func() {}, err +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/summary.go b/vendor/github.com/prometheus/client_golang/prometheus/summary.go new file mode 100644 index 0000000..7bc448a --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/summary.go @@ -0,0 +1,747 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "fmt" + "math" + "runtime" + "sort" + "sync" + "sync/atomic" + "time" + + "github.com/beorn7/perks/quantile" + //nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility. + "github.com/golang/protobuf/proto" + + dto "github.com/prometheus/client_model/go" +) + +// quantileLabel is used for the label that defines the quantile in a +// summary. +const quantileLabel = "quantile" + +// A Summary captures individual observations from an event or sample stream and +// summarizes them in a manner similar to traditional summary statistics: 1. sum +// of observations, 2. observation count, 3. rank estimations. +// +// A typical use-case is the observation of request latencies. By default, a +// Summary provides the median, the 90th and the 99th percentile of the latency +// as rank estimations. However, the default behavior will change in the +// upcoming v1.0.0 of the library. There will be no rank estimations at all by +// default. For a sane transition, it is recommended to set the desired rank +// estimations explicitly. +// +// Note that the rank estimations cannot be aggregated in a meaningful way with +// the Prometheus query language (i.e. you cannot average or add them). If you +// need aggregatable quantiles (e.g. you want the 99th percentile latency of all +// queries served across all instances of a service), consider the Histogram +// metric type. See the Prometheus documentation for more details. +// +// To create Summary instances, use NewSummary. +type Summary interface { + Metric + Collector + + // Observe adds a single observation to the summary. Observations are + // usually positive or zero. Negative observations are accepted but + // prevent current versions of Prometheus from properly detecting + // counter resets in the sum of observations. See + // https://prometheus.io/docs/practices/histograms/#count-and-sum-of-observations + // for details. + Observe(float64) +} + +var errQuantileLabelNotAllowed = fmt.Errorf( + "%q is not allowed as label name in summaries", quantileLabel, +) + +// Default values for SummaryOpts. +const ( + // DefMaxAge is the default duration for which observations stay + // relevant. + DefMaxAge time.Duration = 10 * time.Minute + // DefAgeBuckets is the default number of buckets used to calculate the + // age of observations. + DefAgeBuckets = 5 + // DefBufCap is the standard buffer size for collecting Summary observations. + DefBufCap = 500 +) + +// SummaryOpts bundles the options for creating a Summary metric. It is +// mandatory to set Name to a non-empty string. While all other fields are +// optional and can safely be left at their zero value, it is recommended to set +// a help string and to explicitly set the Objectives field to the desired value +// as the default value will change in the upcoming v1.0.0 of the library. +type SummaryOpts struct { + // Namespace, Subsystem, and Name are components of the fully-qualified + // name of the Summary (created by joining these components with + // "_"). Only Name is mandatory, the others merely help structuring the + // name. Note that the fully-qualified name of the Summary must be a + // valid Prometheus metric name. + Namespace string + Subsystem string + Name string + + // Help provides information about this Summary. + // + // Metrics with the same fully-qualified name must have the same Help + // string. + Help string + + // ConstLabels are used to attach fixed labels to this metric. Metrics + // with the same fully-qualified name must have the same label names in + // their ConstLabels. + // + // Due to the way a Summary is represented in the Prometheus text format + // and how it is handled by the Prometheus server internally, “quantile” + // is an illegal label name. Construction of a Summary or SummaryVec + // will panic if this label name is used in ConstLabels. + // + // ConstLabels are only used rarely. In particular, do not use them to + // attach the same labels to all your metrics. Those use cases are + // better covered by target labels set by the scraping Prometheus + // server, or by one specific metric (e.g. a build_info or a + // machine_role metric). See also + // https://prometheus.io/docs/instrumenting/writing_exporters/#target-labels-not-static-scraped-labels + ConstLabels Labels + + // Objectives defines the quantile rank estimates with their respective + // absolute error. If Objectives[q] = e, then the value reported for q + // will be the φ-quantile value for some φ between q-e and q+e. The + // default value is an empty map, resulting in a summary without + // quantiles. + Objectives map[float64]float64 + + // MaxAge defines the duration for which an observation stays relevant + // for the summary. Only applies to pre-calculated quantiles, does not + // apply to _sum and _count. Must be positive. The default value is + // DefMaxAge. + MaxAge time.Duration + + // AgeBuckets is the number of buckets used to exclude observations that + // are older than MaxAge from the summary. A higher number has a + // resource penalty, so only increase it if the higher resolution is + // really required. For very high observation rates, you might want to + // reduce the number of age buckets. With only one age bucket, you will + // effectively see a complete reset of the summary each time MaxAge has + // passed. The default value is DefAgeBuckets. + AgeBuckets uint32 + + // BufCap defines the default sample stream buffer size. The default + // value of DefBufCap should suffice for most uses. If there is a need + // to increase the value, a multiple of 500 is recommended (because that + // is the internal buffer size of the underlying package + // "github.com/bmizerany/perks/quantile"). + BufCap uint32 +} + +// Problem with the sliding-window decay algorithm... The Merge method of +// perk/quantile is actually not working as advertised - and it might be +// unfixable, as the underlying algorithm is apparently not capable of merging +// summaries in the first place. To avoid using Merge, we are currently adding +// observations to _each_ age bucket, i.e. the effort to add a sample is +// essentially multiplied by the number of age buckets. When rotating age +// buckets, we empty the previous head stream. On scrape time, we simply take +// the quantiles from the head stream (no merging required). Result: More effort +// on observation time, less effort on scrape time, which is exactly the +// opposite of what we try to accomplish, but at least the results are correct. +// +// The quite elegant previous contraption to merge the age buckets efficiently +// on scrape time (see code up commit 6b9530d72ea715f0ba612c0120e6e09fbf1d49d0) +// can't be used anymore. + +// NewSummary creates a new Summary based on the provided SummaryOpts. +func NewSummary(opts SummaryOpts) Summary { + return newSummary( + NewDesc( + BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), + opts.Help, + nil, + opts.ConstLabels, + ), + opts, + ) +} + +func newSummary(desc *Desc, opts SummaryOpts, labelValues ...string) Summary { + if len(desc.variableLabels) != len(labelValues) { + panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels, labelValues)) + } + + for _, n := range desc.variableLabels { + if n == quantileLabel { + panic(errQuantileLabelNotAllowed) + } + } + for _, lp := range desc.constLabelPairs { + if lp.GetName() == quantileLabel { + panic(errQuantileLabelNotAllowed) + } + } + + if opts.Objectives == nil { + opts.Objectives = map[float64]float64{} + } + + if opts.MaxAge < 0 { + panic(fmt.Errorf("illegal max age MaxAge=%v", opts.MaxAge)) + } + if opts.MaxAge == 0 { + opts.MaxAge = DefMaxAge + } + + if opts.AgeBuckets == 0 { + opts.AgeBuckets = DefAgeBuckets + } + + if opts.BufCap == 0 { + opts.BufCap = DefBufCap + } + + if len(opts.Objectives) == 0 { + // Use the lock-free implementation of a Summary without objectives. + s := &noObjectivesSummary{ + desc: desc, + labelPairs: MakeLabelPairs(desc, labelValues), + counts: [2]*summaryCounts{{}, {}}, + } + s.init(s) // Init self-collection. + return s + } + + s := &summary{ + desc: desc, + + objectives: opts.Objectives, + sortedObjectives: make([]float64, 0, len(opts.Objectives)), + + labelPairs: MakeLabelPairs(desc, labelValues), + + hotBuf: make([]float64, 0, opts.BufCap), + coldBuf: make([]float64, 0, opts.BufCap), + streamDuration: opts.MaxAge / time.Duration(opts.AgeBuckets), + } + s.headStreamExpTime = time.Now().Add(s.streamDuration) + s.hotBufExpTime = s.headStreamExpTime + + for i := uint32(0); i < opts.AgeBuckets; i++ { + s.streams = append(s.streams, s.newStream()) + } + s.headStream = s.streams[0] + + for qu := range s.objectives { + s.sortedObjectives = append(s.sortedObjectives, qu) + } + sort.Float64s(s.sortedObjectives) + + s.init(s) // Init self-collection. + return s +} + +type summary struct { + selfCollector + + bufMtx sync.Mutex // Protects hotBuf and hotBufExpTime. + mtx sync.Mutex // Protects every other moving part. + // Lock bufMtx before mtx if both are needed. + + desc *Desc + + objectives map[float64]float64 + sortedObjectives []float64 + + labelPairs []*dto.LabelPair + + sum float64 + cnt uint64 + + hotBuf, coldBuf []float64 + + streams []*quantile.Stream + streamDuration time.Duration + headStream *quantile.Stream + headStreamIdx int + headStreamExpTime, hotBufExpTime time.Time +} + +func (s *summary) Desc() *Desc { + return s.desc +} + +func (s *summary) Observe(v float64) { + s.bufMtx.Lock() + defer s.bufMtx.Unlock() + + now := time.Now() + if now.After(s.hotBufExpTime) { + s.asyncFlush(now) + } + s.hotBuf = append(s.hotBuf, v) + if len(s.hotBuf) == cap(s.hotBuf) { + s.asyncFlush(now) + } +} + +func (s *summary) Write(out *dto.Metric) error { + sum := &dto.Summary{} + qs := make([]*dto.Quantile, 0, len(s.objectives)) + + s.bufMtx.Lock() + s.mtx.Lock() + // Swap bufs even if hotBuf is empty to set new hotBufExpTime. + s.swapBufs(time.Now()) + s.bufMtx.Unlock() + + s.flushColdBuf() + sum.SampleCount = proto.Uint64(s.cnt) + sum.SampleSum = proto.Float64(s.sum) + + for _, rank := range s.sortedObjectives { + var q float64 + if s.headStream.Count() == 0 { + q = math.NaN() + } else { + q = s.headStream.Query(rank) + } + qs = append(qs, &dto.Quantile{ + Quantile: proto.Float64(rank), + Value: proto.Float64(q), + }) + } + + s.mtx.Unlock() + + if len(qs) > 0 { + sort.Sort(quantSort(qs)) + } + sum.Quantile = qs + + out.Summary = sum + out.Label = s.labelPairs + return nil +} + +func (s *summary) newStream() *quantile.Stream { + return quantile.NewTargeted(s.objectives) +} + +// asyncFlush needs bufMtx locked. +func (s *summary) asyncFlush(now time.Time) { + s.mtx.Lock() + s.swapBufs(now) + + // Unblock the original goroutine that was responsible for the mutation + // that triggered the compaction. But hold onto the global non-buffer + // state mutex until the operation finishes. + go func() { + s.flushColdBuf() + s.mtx.Unlock() + }() +} + +// rotateStreams needs mtx AND bufMtx locked. +func (s *summary) maybeRotateStreams() { + for !s.hotBufExpTime.Equal(s.headStreamExpTime) { + s.headStream.Reset() + s.headStreamIdx++ + if s.headStreamIdx >= len(s.streams) { + s.headStreamIdx = 0 + } + s.headStream = s.streams[s.headStreamIdx] + s.headStreamExpTime = s.headStreamExpTime.Add(s.streamDuration) + } +} + +// flushColdBuf needs mtx locked. +func (s *summary) flushColdBuf() { + for _, v := range s.coldBuf { + for _, stream := range s.streams { + stream.Insert(v) + } + s.cnt++ + s.sum += v + } + s.coldBuf = s.coldBuf[0:0] + s.maybeRotateStreams() +} + +// swapBufs needs mtx AND bufMtx locked, coldBuf must be empty. +func (s *summary) swapBufs(now time.Time) { + if len(s.coldBuf) != 0 { + panic("coldBuf is not empty") + } + s.hotBuf, s.coldBuf = s.coldBuf, s.hotBuf + // hotBuf is now empty and gets new expiration set. + for now.After(s.hotBufExpTime) { + s.hotBufExpTime = s.hotBufExpTime.Add(s.streamDuration) + } +} + +type summaryCounts struct { + // sumBits contains the bits of the float64 representing the sum of all + // observations. sumBits and count have to go first in the struct to + // guarantee alignment for atomic operations. + // http://golang.org/pkg/sync/atomic/#pkg-note-BUG + sumBits uint64 + count uint64 +} + +type noObjectivesSummary struct { + // countAndHotIdx enables lock-free writes with use of atomic updates. + // The most significant bit is the hot index [0 or 1] of the count field + // below. Observe calls update the hot one. All remaining bits count the + // number of Observe calls. Observe starts by incrementing this counter, + // and finish by incrementing the count field in the respective + // summaryCounts, as a marker for completion. + // + // Calls of the Write method (which are non-mutating reads from the + // perspective of the summary) swap the hot–cold under the writeMtx + // lock. A cooldown is awaited (while locked) by comparing the number of + // observations with the initiation count. Once they match, then the + // last observation on the now cool one has completed. All cool fields must + // be merged into the new hot before releasing writeMtx. + + // Fields with atomic access first! See alignment constraint: + // http://golang.org/pkg/sync/atomic/#pkg-note-BUG + countAndHotIdx uint64 + + selfCollector + desc *Desc + writeMtx sync.Mutex // Only used in the Write method. + + // Two counts, one is "hot" for lock-free observations, the other is + // "cold" for writing out a dto.Metric. It has to be an array of + // pointers to guarantee 64bit alignment of the histogramCounts, see + // http://golang.org/pkg/sync/atomic/#pkg-note-BUG. + counts [2]*summaryCounts + + labelPairs []*dto.LabelPair +} + +func (s *noObjectivesSummary) Desc() *Desc { + return s.desc +} + +func (s *noObjectivesSummary) Observe(v float64) { + // We increment h.countAndHotIdx so that the counter in the lower + // 63 bits gets incremented. At the same time, we get the new value + // back, which we can use to find the currently-hot counts. + n := atomic.AddUint64(&s.countAndHotIdx, 1) + hotCounts := s.counts[n>>63] + + for { + oldBits := atomic.LoadUint64(&hotCounts.sumBits) + newBits := math.Float64bits(math.Float64frombits(oldBits) + v) + if atomic.CompareAndSwapUint64(&hotCounts.sumBits, oldBits, newBits) { + break + } + } + // Increment count last as we take it as a signal that the observation + // is complete. + atomic.AddUint64(&hotCounts.count, 1) +} + +func (s *noObjectivesSummary) Write(out *dto.Metric) error { + // For simplicity, we protect this whole method by a mutex. It is not in + // the hot path, i.e. Observe is called much more often than Write. The + // complication of making Write lock-free isn't worth it, if possible at + // all. + s.writeMtx.Lock() + defer s.writeMtx.Unlock() + + // Adding 1<<63 switches the hot index (from 0 to 1 or from 1 to 0) + // without touching the count bits. See the struct comments for a full + // description of the algorithm. + n := atomic.AddUint64(&s.countAndHotIdx, 1<<63) + // count is contained unchanged in the lower 63 bits. + count := n & ((1 << 63) - 1) + // The most significant bit tells us which counts is hot. The complement + // is thus the cold one. + hotCounts := s.counts[n>>63] + coldCounts := s.counts[(^n)>>63] + + // Await cooldown. + for count != atomic.LoadUint64(&coldCounts.count) { + runtime.Gosched() // Let observations get work done. + } + + sum := &dto.Summary{ + SampleCount: proto.Uint64(count), + SampleSum: proto.Float64(math.Float64frombits(atomic.LoadUint64(&coldCounts.sumBits))), + } + + out.Summary = sum + out.Label = s.labelPairs + + // Finally add all the cold counts to the new hot counts and reset the cold counts. + atomic.AddUint64(&hotCounts.count, count) + atomic.StoreUint64(&coldCounts.count, 0) + for { + oldBits := atomic.LoadUint64(&hotCounts.sumBits) + newBits := math.Float64bits(math.Float64frombits(oldBits) + sum.GetSampleSum()) + if atomic.CompareAndSwapUint64(&hotCounts.sumBits, oldBits, newBits) { + atomic.StoreUint64(&coldCounts.sumBits, 0) + break + } + } + return nil +} + +type quantSort []*dto.Quantile + +func (s quantSort) Len() int { + return len(s) +} + +func (s quantSort) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} + +func (s quantSort) Less(i, j int) bool { + return s[i].GetQuantile() < s[j].GetQuantile() +} + +// SummaryVec is a Collector that bundles a set of Summaries that all share the +// same Desc, but have different values for their variable labels. This is used +// if you want to count the same thing partitioned by various dimensions +// (e.g. HTTP request latencies, partitioned by status code and method). Create +// instances with NewSummaryVec. +type SummaryVec struct { + *MetricVec +} + +// NewSummaryVec creates a new SummaryVec based on the provided SummaryOpts and +// partitioned by the given label names. +// +// Due to the way a Summary is represented in the Prometheus text format and how +// it is handled by the Prometheus server internally, “quantile” is an illegal +// label name. NewSummaryVec will panic if this label name is used. +func NewSummaryVec(opts SummaryOpts, labelNames []string) *SummaryVec { + for _, ln := range labelNames { + if ln == quantileLabel { + panic(errQuantileLabelNotAllowed) + } + } + desc := NewDesc( + BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), + opts.Help, + labelNames, + opts.ConstLabels, + ) + return &SummaryVec{ + MetricVec: NewMetricVec(desc, func(lvs ...string) Metric { + return newSummary(desc, opts, lvs...) + }), + } +} + +// GetMetricWithLabelValues returns the Summary for the given slice of label +// values (same order as the variable labels in Desc). If that combination of +// label values is accessed for the first time, a new Summary is created. +// +// It is possible to call this method without using the returned Summary to only +// create the new Summary but leave it at its starting value, a Summary without +// any observations. +// +// Keeping the Summary for later use is possible (and should be considered if +// performance is critical), but keep in mind that Reset, DeleteLabelValues and +// Delete can be used to delete the Summary from the SummaryVec. In that case, +// the Summary will still exist, but it will not be exported anymore, even if a +// Summary with the same label values is created later. See also the CounterVec +// example. +// +// An error is returned if the number of label values is not the same as the +// number of variable labels in Desc (minus any curried labels). +// +// Note that for more than one label value, this method is prone to mistakes +// caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as +// an alternative to avoid that type of mistake. For higher label numbers, the +// latter has a much more readable (albeit more verbose) syntax, but it comes +// with a performance overhead (for creating and processing the Labels map). +// See also the GaugeVec example. +func (v *SummaryVec) GetMetricWithLabelValues(lvs ...string) (Observer, error) { + metric, err := v.MetricVec.GetMetricWithLabelValues(lvs...) + if metric != nil { + return metric.(Observer), err + } + return nil, err +} + +// GetMetricWith returns the Summary for the given Labels map (the label names +// must match those of the variable labels in Desc). If that label map is +// accessed for the first time, a new Summary is created. Implications of +// creating a Summary without using it and keeping the Summary for later use are +// the same as for GetMetricWithLabelValues. +// +// An error is returned if the number and names of the Labels are inconsistent +// with those of the variable labels in Desc (minus any curried labels). +// +// This method is used for the same purpose as +// GetMetricWithLabelValues(...string). See there for pros and cons of the two +// methods. +func (v *SummaryVec) GetMetricWith(labels Labels) (Observer, error) { + metric, err := v.MetricVec.GetMetricWith(labels) + if metric != nil { + return metric.(Observer), err + } + return nil, err +} + +// WithLabelValues works as GetMetricWithLabelValues, but panics where +// GetMetricWithLabelValues would have returned an error. Not returning an +// error allows shortcuts like +// +// myVec.WithLabelValues("404", "GET").Observe(42.21) +func (v *SummaryVec) WithLabelValues(lvs ...string) Observer { + s, err := v.GetMetricWithLabelValues(lvs...) + if err != nil { + panic(err) + } + return s +} + +// With works as GetMetricWith, but panics where GetMetricWithLabels would have +// returned an error. Not returning an error allows shortcuts like +// +// myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Observe(42.21) +func (v *SummaryVec) With(labels Labels) Observer { + s, err := v.GetMetricWith(labels) + if err != nil { + panic(err) + } + return s +} + +// CurryWith returns a vector curried with the provided labels, i.e. the +// returned vector has those labels pre-set for all labeled operations performed +// on it. The cardinality of the curried vector is reduced accordingly. The +// order of the remaining labels stays the same (just with the curried labels +// taken out of the sequence – which is relevant for the +// (GetMetric)WithLabelValues methods). It is possible to curry a curried +// vector, but only with labels not yet used for currying before. +// +// The metrics contained in the SummaryVec are shared between the curried and +// uncurried vectors. They are just accessed differently. Curried and uncurried +// vectors behave identically in terms of collection. Only one must be +// registered with a given registry (usually the uncurried version). The Reset +// method deletes all metrics, even if called on a curried vector. +func (v *SummaryVec) CurryWith(labels Labels) (ObserverVec, error) { + vec, err := v.MetricVec.CurryWith(labels) + if vec != nil { + return &SummaryVec{vec}, err + } + return nil, err +} + +// MustCurryWith works as CurryWith but panics where CurryWith would have +// returned an error. +func (v *SummaryVec) MustCurryWith(labels Labels) ObserverVec { + vec, err := v.CurryWith(labels) + if err != nil { + panic(err) + } + return vec +} + +type constSummary struct { + desc *Desc + count uint64 + sum float64 + quantiles map[float64]float64 + labelPairs []*dto.LabelPair +} + +func (s *constSummary) Desc() *Desc { + return s.desc +} + +func (s *constSummary) Write(out *dto.Metric) error { + sum := &dto.Summary{} + qs := make([]*dto.Quantile, 0, len(s.quantiles)) + + sum.SampleCount = proto.Uint64(s.count) + sum.SampleSum = proto.Float64(s.sum) + + for rank, q := range s.quantiles { + qs = append(qs, &dto.Quantile{ + Quantile: proto.Float64(rank), + Value: proto.Float64(q), + }) + } + + if len(qs) > 0 { + sort.Sort(quantSort(qs)) + } + sum.Quantile = qs + + out.Summary = sum + out.Label = s.labelPairs + + return nil +} + +// NewConstSummary returns a metric representing a Prometheus summary with fixed +// values for the count, sum, and quantiles. As those parameters cannot be +// changed, the returned value does not implement the Summary interface (but +// only the Metric interface). Users of this package will not have much use for +// it in regular operations. However, when implementing custom Collectors, it is +// useful as a throw-away metric that is generated on the fly to send it to +// Prometheus in the Collect method. +// +// quantiles maps ranks to quantile values. For example, a median latency of +// 0.23s and a 99th percentile latency of 0.56s would be expressed as: +// +// map[float64]float64{0.5: 0.23, 0.99: 0.56} +// +// NewConstSummary returns an error if the length of labelValues is not +// consistent with the variable labels in Desc or if Desc is invalid. +func NewConstSummary( + desc *Desc, + count uint64, + sum float64, + quantiles map[float64]float64, + labelValues ...string, +) (Metric, error) { + if desc.err != nil { + return nil, desc.err + } + if err := validateLabelValues(labelValues, len(desc.variableLabels)); err != nil { + return nil, err + } + return &constSummary{ + desc: desc, + count: count, + sum: sum, + quantiles: quantiles, + labelPairs: MakeLabelPairs(desc, labelValues), + }, nil +} + +// MustNewConstSummary is a version of NewConstSummary that panics where +// NewConstMetric would have returned an error. +func MustNewConstSummary( + desc *Desc, + count uint64, + sum float64, + quantiles map[float64]float64, + labelValues ...string, +) Metric { + m, err := NewConstSummary(desc, count, sum, quantiles, labelValues...) + if err != nil { + panic(err) + } + return m +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/timer.go b/vendor/github.com/prometheus/client_golang/prometheus/timer.go new file mode 100644 index 0000000..f28a76f --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/timer.go @@ -0,0 +1,55 @@ +// Copyright 2016 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import "time" + +// Timer is a helper type to time functions. Use NewTimer to create new +// instances. +type Timer struct { + begin time.Time + observer Observer +} + +// NewTimer creates a new Timer. The provided Observer is used to observe a +// duration in seconds. Timer is usually used to time a function call in the +// following way: +// +// func TimeMe() { +// timer := NewTimer(myHistogram) +// defer timer.ObserveDuration() +// // Do actual work. +// } +func NewTimer(o Observer) *Timer { + return &Timer{ + begin: time.Now(), + observer: o, + } +} + +// ObserveDuration records the duration passed since the Timer was created with +// NewTimer. It calls the Observe method of the Observer provided during +// construction with the duration in seconds as an argument. The observed +// duration is also returned. ObserveDuration is usually called with a defer +// statement. +// +// Note that this method is only guaranteed to never observe negative durations +// if used with Go1.9+. +func (t *Timer) ObserveDuration() time.Duration { + d := time.Since(t.begin) + if t.observer != nil { + t.observer.Observe(d.Seconds()) + } + return d +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/untyped.go b/vendor/github.com/prometheus/client_golang/prometheus/untyped.go new file mode 100644 index 0000000..0f9ce63 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/untyped.go @@ -0,0 +1,42 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +// UntypedOpts is an alias for Opts. See there for doc comments. +type UntypedOpts Opts + +// UntypedFunc works like GaugeFunc but the collected metric is of type +// "Untyped". UntypedFunc is useful to mirror an external metric of unknown +// type. +// +// To create UntypedFunc instances, use NewUntypedFunc. +type UntypedFunc interface { + Metric + Collector +} + +// NewUntypedFunc creates a new UntypedFunc based on the provided +// UntypedOpts. The value reported is determined by calling the given function +// from within the Write method. Take into account that metric collection may +// happen concurrently. If that results in concurrent calls to Write, like in +// the case where an UntypedFunc is directly registered with Prometheus, the +// provided function must be concurrency-safe. +func NewUntypedFunc(opts UntypedOpts, function func() float64) UntypedFunc { + return newValueFunc(NewDesc( + BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), + opts.Help, + nil, + opts.ConstLabels, + ), UntypedValue, function) +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/value.go b/vendor/github.com/prometheus/client_golang/prometheus/value.go new file mode 100644 index 0000000..2d3abc1 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/value.go @@ -0,0 +1,237 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "fmt" + "sort" + "time" + "unicode/utf8" + + //nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility. + "github.com/golang/protobuf/proto" + "google.golang.org/protobuf/types/known/timestamppb" + + "github.com/prometheus/client_golang/prometheus/internal" + + dto "github.com/prometheus/client_model/go" +) + +// ValueType is an enumeration of metric types that represent a simple value. +type ValueType int + +// Possible values for the ValueType enum. Use UntypedValue to mark a metric +// with an unknown type. +const ( + _ ValueType = iota + CounterValue + GaugeValue + UntypedValue +) + +var ( + CounterMetricTypePtr = func() *dto.MetricType { d := dto.MetricType_COUNTER; return &d }() + GaugeMetricTypePtr = func() *dto.MetricType { d := dto.MetricType_GAUGE; return &d }() + UntypedMetricTypePtr = func() *dto.MetricType { d := dto.MetricType_UNTYPED; return &d }() +) + +func (v ValueType) ToDTO() *dto.MetricType { + switch v { + case CounterValue: + return CounterMetricTypePtr + case GaugeValue: + return GaugeMetricTypePtr + default: + return UntypedMetricTypePtr + } +} + +// valueFunc is a generic metric for simple values retrieved on collect time +// from a function. It implements Metric and Collector. Its effective type is +// determined by ValueType. This is a low-level building block used by the +// library to back the implementations of CounterFunc, GaugeFunc, and +// UntypedFunc. +type valueFunc struct { + selfCollector + + desc *Desc + valType ValueType + function func() float64 + labelPairs []*dto.LabelPair +} + +// newValueFunc returns a newly allocated valueFunc with the given Desc and +// ValueType. The value reported is determined by calling the given function +// from within the Write method. Take into account that metric collection may +// happen concurrently. If that results in concurrent calls to Write, like in +// the case where a valueFunc is directly registered with Prometheus, the +// provided function must be concurrency-safe. +func newValueFunc(desc *Desc, valueType ValueType, function func() float64) *valueFunc { + result := &valueFunc{ + desc: desc, + valType: valueType, + function: function, + labelPairs: MakeLabelPairs(desc, nil), + } + result.init(result) + return result +} + +func (v *valueFunc) Desc() *Desc { + return v.desc +} + +func (v *valueFunc) Write(out *dto.Metric) error { + return populateMetric(v.valType, v.function(), v.labelPairs, nil, out) +} + +// NewConstMetric returns a metric with one fixed value that cannot be +// changed. Users of this package will not have much use for it in regular +// operations. However, when implementing custom Collectors, it is useful as a +// throw-away metric that is generated on the fly to send it to Prometheus in +// the Collect method. NewConstMetric returns an error if the length of +// labelValues is not consistent with the variable labels in Desc or if Desc is +// invalid. +func NewConstMetric(desc *Desc, valueType ValueType, value float64, labelValues ...string) (Metric, error) { + if desc.err != nil { + return nil, desc.err + } + if err := validateLabelValues(labelValues, len(desc.variableLabels)); err != nil { + return nil, err + } + + metric := &dto.Metric{} + if err := populateMetric(valueType, value, MakeLabelPairs(desc, labelValues), nil, metric); err != nil { + return nil, err + } + + return &constMetric{ + desc: desc, + metric: metric, + }, nil +} + +// MustNewConstMetric is a version of NewConstMetric that panics where +// NewConstMetric would have returned an error. +func MustNewConstMetric(desc *Desc, valueType ValueType, value float64, labelValues ...string) Metric { + m, err := NewConstMetric(desc, valueType, value, labelValues...) + if err != nil { + panic(err) + } + return m +} + +type constMetric struct { + desc *Desc + metric *dto.Metric +} + +func (m *constMetric) Desc() *Desc { + return m.desc +} + +func (m *constMetric) Write(out *dto.Metric) error { + out.Label = m.metric.Label + out.Counter = m.metric.Counter + out.Gauge = m.metric.Gauge + out.Untyped = m.metric.Untyped + return nil +} + +func populateMetric( + t ValueType, + v float64, + labelPairs []*dto.LabelPair, + e *dto.Exemplar, + m *dto.Metric, +) error { + m.Label = labelPairs + switch t { + case CounterValue: + m.Counter = &dto.Counter{Value: proto.Float64(v), Exemplar: e} + case GaugeValue: + m.Gauge = &dto.Gauge{Value: proto.Float64(v)} + case UntypedValue: + m.Untyped = &dto.Untyped{Value: proto.Float64(v)} + default: + return fmt.Errorf("encountered unknown type %v", t) + } + return nil +} + +// MakeLabelPairs is a helper function to create protobuf LabelPairs from the +// variable and constant labels in the provided Desc. The values for the +// variable labels are defined by the labelValues slice, which must be in the +// same order as the corresponding variable labels in the Desc. +// +// This function is only needed for custom Metric implementations. See MetricVec +// example. +func MakeLabelPairs(desc *Desc, labelValues []string) []*dto.LabelPair { + totalLen := len(desc.variableLabels) + len(desc.constLabelPairs) + if totalLen == 0 { + // Super fast path. + return nil + } + if len(desc.variableLabels) == 0 { + // Moderately fast path. + return desc.constLabelPairs + } + labelPairs := make([]*dto.LabelPair, 0, totalLen) + for i, n := range desc.variableLabels { + labelPairs = append(labelPairs, &dto.LabelPair{ + Name: proto.String(n), + Value: proto.String(labelValues[i]), + }) + } + labelPairs = append(labelPairs, desc.constLabelPairs...) + sort.Sort(internal.LabelPairSorter(labelPairs)) + return labelPairs +} + +// ExemplarMaxRunes is the max total number of runes allowed in exemplar labels. +const ExemplarMaxRunes = 128 + +// newExemplar creates a new dto.Exemplar from the provided values. An error is +// returned if any of the label names or values are invalid or if the total +// number of runes in the label names and values exceeds ExemplarMaxRunes. +func newExemplar(value float64, ts time.Time, l Labels) (*dto.Exemplar, error) { + e := &dto.Exemplar{} + e.Value = proto.Float64(value) + tsProto := timestamppb.New(ts) + if err := tsProto.CheckValid(); err != nil { + return nil, err + } + e.Timestamp = tsProto + labelPairs := make([]*dto.LabelPair, 0, len(l)) + var runes int + for name, value := range l { + if !checkLabelName(name) { + return nil, fmt.Errorf("exemplar label name %q is invalid", name) + } + runes += utf8.RuneCountInString(name) + if !utf8.ValidString(value) { + return nil, fmt.Errorf("exemplar label value %q is not valid UTF-8", value) + } + runes += utf8.RuneCountInString(value) + labelPairs = append(labelPairs, &dto.LabelPair{ + Name: proto.String(name), + Value: proto.String(value), + }) + } + if runes > ExemplarMaxRunes { + return nil, fmt.Errorf("exemplar labels have %d runes, exceeding the limit of %d", runes, ExemplarMaxRunes) + } + e.Label = labelPairs + return e, nil +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/vec.go b/vendor/github.com/prometheus/client_golang/prometheus/vec.go new file mode 100644 index 0000000..7ae3225 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/vec.go @@ -0,0 +1,642 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "fmt" + "sync" + + "github.com/prometheus/common/model" +) + +// MetricVec is a Collector to bundle metrics of the same name that differ in +// their label values. MetricVec is not used directly but as a building block +// for implementations of vectors of a given metric type, like GaugeVec, +// CounterVec, SummaryVec, and HistogramVec. It is exported so that it can be +// used for custom Metric implementations. +// +// To create a FooVec for custom Metric Foo, embed a pointer to MetricVec in +// FooVec and initialize it with NewMetricVec. Implement wrappers for +// GetMetricWithLabelValues and GetMetricWith that return (Foo, error) rather +// than (Metric, error). Similarly, create a wrapper for CurryWith that returns +// (*FooVec, error) rather than (*MetricVec, error). It is recommended to also +// add the convenience methods WithLabelValues, With, and MustCurryWith, which +// panic instead of returning errors. See also the MetricVec example. +type MetricVec struct { + *metricMap + + curry []curriedLabelValue + + // hashAdd and hashAddByte can be replaced for testing collision handling. + hashAdd func(h uint64, s string) uint64 + hashAddByte func(h uint64, b byte) uint64 +} + +// NewMetricVec returns an initialized metricVec. +func NewMetricVec(desc *Desc, newMetric func(lvs ...string) Metric) *MetricVec { + return &MetricVec{ + metricMap: &metricMap{ + metrics: map[uint64][]metricWithLabelValues{}, + desc: desc, + newMetric: newMetric, + }, + hashAdd: hashAdd, + hashAddByte: hashAddByte, + } +} + +// DeleteLabelValues removes the metric where the variable labels are the same +// as those passed in as labels (same order as the VariableLabels in Desc). It +// returns true if a metric was deleted. +// +// It is not an error if the number of label values is not the same as the +// number of VariableLabels in Desc. However, such inconsistent label count can +// never match an actual metric, so the method will always return false in that +// case. +// +// Note that for more than one label value, this method is prone to mistakes +// caused by an incorrect order of arguments. Consider Delete(Labels) as an +// alternative to avoid that type of mistake. For higher label numbers, the +// latter has a much more readable (albeit more verbose) syntax, but it comes +// with a performance overhead (for creating and processing the Labels map). +// See also the CounterVec example. +func (m *MetricVec) DeleteLabelValues(lvs ...string) bool { + h, err := m.hashLabelValues(lvs) + if err != nil { + return false + } + + return m.metricMap.deleteByHashWithLabelValues(h, lvs, m.curry) +} + +// Delete deletes the metric where the variable labels are the same as those +// passed in as labels. It returns true if a metric was deleted. +// +// It is not an error if the number and names of the Labels are inconsistent +// with those of the VariableLabels in Desc. However, such inconsistent Labels +// can never match an actual metric, so the method will always return false in +// that case. +// +// This method is used for the same purpose as DeleteLabelValues(...string). See +// there for pros and cons of the two methods. +func (m *MetricVec) Delete(labels Labels) bool { + h, err := m.hashLabels(labels) + if err != nil { + return false + } + + return m.metricMap.deleteByHashWithLabels(h, labels, m.curry) +} + +// DeletePartialMatch deletes all metrics where the variable labels contain all of those +// passed in as labels. The order of the labels does not matter. +// It returns the number of metrics deleted. +// +// Note that curried labels will never be matched if deleting from the curried vector. +// To match curried labels with DeletePartialMatch, it must be called on the base vector. +func (m *MetricVec) DeletePartialMatch(labels Labels) int { + return m.metricMap.deleteByLabels(labels, m.curry) +} + +// Without explicit forwarding of Describe, Collect, Reset, those methods won't +// show up in GoDoc. + +// Describe implements Collector. +func (m *MetricVec) Describe(ch chan<- *Desc) { m.metricMap.Describe(ch) } + +// Collect implements Collector. +func (m *MetricVec) Collect(ch chan<- Metric) { m.metricMap.Collect(ch) } + +// Reset deletes all metrics in this vector. +func (m *MetricVec) Reset() { m.metricMap.Reset() } + +// CurryWith returns a vector curried with the provided labels, i.e. the +// returned vector has those labels pre-set for all labeled operations performed +// on it. The cardinality of the curried vector is reduced accordingly. The +// order of the remaining labels stays the same (just with the curried labels +// taken out of the sequence – which is relevant for the +// (GetMetric)WithLabelValues methods). It is possible to curry a curried +// vector, but only with labels not yet used for currying before. +// +// The metrics contained in the MetricVec are shared between the curried and +// uncurried vectors. They are just accessed differently. Curried and uncurried +// vectors behave identically in terms of collection. Only one must be +// registered with a given registry (usually the uncurried version). The Reset +// method deletes all metrics, even if called on a curried vector. +// +// Note that CurryWith is usually not called directly but through a wrapper +// around MetricVec, implementing a vector for a specific Metric +// implementation, for example GaugeVec. +func (m *MetricVec) CurryWith(labels Labels) (*MetricVec, error) { + var ( + newCurry []curriedLabelValue + oldCurry = m.curry + iCurry int + ) + for i, label := range m.desc.variableLabels { + val, ok := labels[label] + if iCurry < len(oldCurry) && oldCurry[iCurry].index == i { + if ok { + return nil, fmt.Errorf("label name %q is already curried", label) + } + newCurry = append(newCurry, oldCurry[iCurry]) + iCurry++ + } else { + if !ok { + continue // Label stays uncurried. + } + newCurry = append(newCurry, curriedLabelValue{i, val}) + } + } + if l := len(oldCurry) + len(labels) - len(newCurry); l > 0 { + return nil, fmt.Errorf("%d unknown label(s) found during currying", l) + } + + return &MetricVec{ + metricMap: m.metricMap, + curry: newCurry, + hashAdd: m.hashAdd, + hashAddByte: m.hashAddByte, + }, nil +} + +// GetMetricWithLabelValues returns the Metric for the given slice of label +// values (same order as the variable labels in Desc). If that combination of +// label values is accessed for the first time, a new Metric is created (by +// calling the newMetric function provided during construction of the +// MetricVec). +// +// It is possible to call this method without using the returned Metric to only +// create the new Metric but leave it in its initial state. +// +// Keeping the Metric for later use is possible (and should be considered if +// performance is critical), but keep in mind that Reset, DeleteLabelValues and +// Delete can be used to delete the Metric from the MetricVec. In that case, the +// Metric will still exist, but it will not be exported anymore, even if a +// Metric with the same label values is created later. +// +// An error is returned if the number of label values is not the same as the +// number of variable labels in Desc (minus any curried labels). +// +// Note that for more than one label value, this method is prone to mistakes +// caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as +// an alternative to avoid that type of mistake. For higher label numbers, the +// latter has a much more readable (albeit more verbose) syntax, but it comes +// with a performance overhead (for creating and processing the Labels map). +// +// Note that GetMetricWithLabelValues is usually not called directly but through +// a wrapper around MetricVec, implementing a vector for a specific Metric +// implementation, for example GaugeVec. +func (m *MetricVec) GetMetricWithLabelValues(lvs ...string) (Metric, error) { + h, err := m.hashLabelValues(lvs) + if err != nil { + return nil, err + } + + return m.metricMap.getOrCreateMetricWithLabelValues(h, lvs, m.curry), nil +} + +// GetMetricWith returns the Metric for the given Labels map (the label names +// must match those of the variable labels in Desc). If that label map is +// accessed for the first time, a new Metric is created. Implications of +// creating a Metric without using it and keeping the Metric for later use +// are the same as for GetMetricWithLabelValues. +// +// An error is returned if the number and names of the Labels are inconsistent +// with those of the variable labels in Desc (minus any curried labels). +// +// This method is used for the same purpose as +// GetMetricWithLabelValues(...string). See there for pros and cons of the two +// methods. +// +// Note that GetMetricWith is usually not called directly but through a wrapper +// around MetricVec, implementing a vector for a specific Metric implementation, +// for example GaugeVec. +func (m *MetricVec) GetMetricWith(labels Labels) (Metric, error) { + h, err := m.hashLabels(labels) + if err != nil { + return nil, err + } + + return m.metricMap.getOrCreateMetricWithLabels(h, labels, m.curry), nil +} + +func (m *MetricVec) hashLabelValues(vals []string) (uint64, error) { + if err := validateLabelValues(vals, len(m.desc.variableLabels)-len(m.curry)); err != nil { + return 0, err + } + + var ( + h = hashNew() + curry = m.curry + iVals, iCurry int + ) + for i := 0; i < len(m.desc.variableLabels); i++ { + if iCurry < len(curry) && curry[iCurry].index == i { + h = m.hashAdd(h, curry[iCurry].value) + iCurry++ + } else { + h = m.hashAdd(h, vals[iVals]) + iVals++ + } + h = m.hashAddByte(h, model.SeparatorByte) + } + return h, nil +} + +func (m *MetricVec) hashLabels(labels Labels) (uint64, error) { + if err := validateValuesInLabels(labels, len(m.desc.variableLabels)-len(m.curry)); err != nil { + return 0, err + } + + var ( + h = hashNew() + curry = m.curry + iCurry int + ) + for i, label := range m.desc.variableLabels { + val, ok := labels[label] + if iCurry < len(curry) && curry[iCurry].index == i { + if ok { + return 0, fmt.Errorf("label name %q is already curried", label) + } + h = m.hashAdd(h, curry[iCurry].value) + iCurry++ + } else { + if !ok { + return 0, fmt.Errorf("label name %q missing in label map", label) + } + h = m.hashAdd(h, val) + } + h = m.hashAddByte(h, model.SeparatorByte) + } + return h, nil +} + +// metricWithLabelValues provides the metric and its label values for +// disambiguation on hash collision. +type metricWithLabelValues struct { + values []string + metric Metric +} + +// curriedLabelValue sets the curried value for a label at the given index. +type curriedLabelValue struct { + index int + value string +} + +// metricMap is a helper for metricVec and shared between differently curried +// metricVecs. +type metricMap struct { + mtx sync.RWMutex // Protects metrics. + metrics map[uint64][]metricWithLabelValues + desc *Desc + newMetric func(labelValues ...string) Metric +} + +// Describe implements Collector. It will send exactly one Desc to the provided +// channel. +func (m *metricMap) Describe(ch chan<- *Desc) { + ch <- m.desc +} + +// Collect implements Collector. +func (m *metricMap) Collect(ch chan<- Metric) { + m.mtx.RLock() + defer m.mtx.RUnlock() + + for _, metrics := range m.metrics { + for _, metric := range metrics { + ch <- metric.metric + } + } +} + +// Reset deletes all metrics in this vector. +func (m *metricMap) Reset() { + m.mtx.Lock() + defer m.mtx.Unlock() + + for h := range m.metrics { + delete(m.metrics, h) + } +} + +// deleteByHashWithLabelValues removes the metric from the hash bucket h. If +// there are multiple matches in the bucket, use lvs to select a metric and +// remove only that metric. +func (m *metricMap) deleteByHashWithLabelValues( + h uint64, lvs []string, curry []curriedLabelValue, +) bool { + m.mtx.Lock() + defer m.mtx.Unlock() + + metrics, ok := m.metrics[h] + if !ok { + return false + } + + i := findMetricWithLabelValues(metrics, lvs, curry) + if i >= len(metrics) { + return false + } + + if len(metrics) > 1 { + old := metrics + m.metrics[h] = append(metrics[:i], metrics[i+1:]...) + old[len(old)-1] = metricWithLabelValues{} + } else { + delete(m.metrics, h) + } + return true +} + +// deleteByHashWithLabels removes the metric from the hash bucket h. If there +// are multiple matches in the bucket, use lvs to select a metric and remove +// only that metric. +func (m *metricMap) deleteByHashWithLabels( + h uint64, labels Labels, curry []curriedLabelValue, +) bool { + m.mtx.Lock() + defer m.mtx.Unlock() + + metrics, ok := m.metrics[h] + if !ok { + return false + } + i := findMetricWithLabels(m.desc, metrics, labels, curry) + if i >= len(metrics) { + return false + } + + if len(metrics) > 1 { + old := metrics + m.metrics[h] = append(metrics[:i], metrics[i+1:]...) + old[len(old)-1] = metricWithLabelValues{} + } else { + delete(m.metrics, h) + } + return true +} + +// deleteByLabels deletes a metric if the given labels are present in the metric. +func (m *metricMap) deleteByLabels(labels Labels, curry []curriedLabelValue) int { + m.mtx.Lock() + defer m.mtx.Unlock() + + var numDeleted int + + for h, metrics := range m.metrics { + i := findMetricWithPartialLabels(m.desc, metrics, labels, curry) + if i >= len(metrics) { + // Didn't find matching labels in this metric slice. + continue + } + delete(m.metrics, h) + numDeleted++ + } + + return numDeleted +} + +// findMetricWithPartialLabel returns the index of the matching metric or +// len(metrics) if not found. +func findMetricWithPartialLabels( + desc *Desc, metrics []metricWithLabelValues, labels Labels, curry []curriedLabelValue, +) int { + for i, metric := range metrics { + if matchPartialLabels(desc, metric.values, labels, curry) { + return i + } + } + return len(metrics) +} + +// indexOf searches the given slice of strings for the target string and returns +// the index or len(items) as well as a boolean whether the search succeeded. +func indexOf(target string, items []string) (int, bool) { + for i, l := range items { + if l == target { + return i, true + } + } + return len(items), false +} + +// valueMatchesVariableOrCurriedValue determines if a value was previously curried, +// and returns whether it matches either the "base" value or the curried value accordingly. +// It also indicates whether the match is against a curried or uncurried value. +func valueMatchesVariableOrCurriedValue(targetValue string, index int, values []string, curry []curriedLabelValue) (bool, bool) { + for _, curriedValue := range curry { + if curriedValue.index == index { + // This label was curried. See if the curried value matches our target. + return curriedValue.value == targetValue, true + } + } + // This label was not curried. See if the current value matches our target label. + return values[index] == targetValue, false +} + +// matchPartialLabels searches the current metric and returns whether all of the target label:value pairs are present. +func matchPartialLabels(desc *Desc, values []string, labels Labels, curry []curriedLabelValue) bool { + for l, v := range labels { + // Check if the target label exists in our metrics and get the index. + varLabelIndex, validLabel := indexOf(l, desc.variableLabels) + if validLabel { + // Check the value of that label against the target value. + // We don't consider curried values in partial matches. + matches, curried := valueMatchesVariableOrCurriedValue(v, varLabelIndex, values, curry) + if matches && !curried { + continue + } + } + return false + } + return true +} + +// getOrCreateMetricWithLabelValues retrieves the metric by hash and label value +// or creates it and returns the new one. +// +// This function holds the mutex. +func (m *metricMap) getOrCreateMetricWithLabelValues( + hash uint64, lvs []string, curry []curriedLabelValue, +) Metric { + m.mtx.RLock() + metric, ok := m.getMetricWithHashAndLabelValues(hash, lvs, curry) + m.mtx.RUnlock() + if ok { + return metric + } + + m.mtx.Lock() + defer m.mtx.Unlock() + metric, ok = m.getMetricWithHashAndLabelValues(hash, lvs, curry) + if !ok { + inlinedLVs := inlineLabelValues(lvs, curry) + metric = m.newMetric(inlinedLVs...) + m.metrics[hash] = append(m.metrics[hash], metricWithLabelValues{values: inlinedLVs, metric: metric}) + } + return metric +} + +// getOrCreateMetricWithLabelValues retrieves the metric by hash and label value +// or creates it and returns the new one. +// +// This function holds the mutex. +func (m *metricMap) getOrCreateMetricWithLabels( + hash uint64, labels Labels, curry []curriedLabelValue, +) Metric { + m.mtx.RLock() + metric, ok := m.getMetricWithHashAndLabels(hash, labels, curry) + m.mtx.RUnlock() + if ok { + return metric + } + + m.mtx.Lock() + defer m.mtx.Unlock() + metric, ok = m.getMetricWithHashAndLabels(hash, labels, curry) + if !ok { + lvs := extractLabelValues(m.desc, labels, curry) + metric = m.newMetric(lvs...) + m.metrics[hash] = append(m.metrics[hash], metricWithLabelValues{values: lvs, metric: metric}) + } + return metric +} + +// getMetricWithHashAndLabelValues gets a metric while handling possible +// collisions in the hash space. Must be called while holding the read mutex. +func (m *metricMap) getMetricWithHashAndLabelValues( + h uint64, lvs []string, curry []curriedLabelValue, +) (Metric, bool) { + metrics, ok := m.metrics[h] + if ok { + if i := findMetricWithLabelValues(metrics, lvs, curry); i < len(metrics) { + return metrics[i].metric, true + } + } + return nil, false +} + +// getMetricWithHashAndLabels gets a metric while handling possible collisions in +// the hash space. Must be called while holding read mutex. +func (m *metricMap) getMetricWithHashAndLabels( + h uint64, labels Labels, curry []curriedLabelValue, +) (Metric, bool) { + metrics, ok := m.metrics[h] + if ok { + if i := findMetricWithLabels(m.desc, metrics, labels, curry); i < len(metrics) { + return metrics[i].metric, true + } + } + return nil, false +} + +// findMetricWithLabelValues returns the index of the matching metric or +// len(metrics) if not found. +func findMetricWithLabelValues( + metrics []metricWithLabelValues, lvs []string, curry []curriedLabelValue, +) int { + for i, metric := range metrics { + if matchLabelValues(metric.values, lvs, curry) { + return i + } + } + return len(metrics) +} + +// findMetricWithLabels returns the index of the matching metric or len(metrics) +// if not found. +func findMetricWithLabels( + desc *Desc, metrics []metricWithLabelValues, labels Labels, curry []curriedLabelValue, +) int { + for i, metric := range metrics { + if matchLabels(desc, metric.values, labels, curry) { + return i + } + } + return len(metrics) +} + +func matchLabelValues(values, lvs []string, curry []curriedLabelValue) bool { + if len(values) != len(lvs)+len(curry) { + return false + } + var iLVs, iCurry int + for i, v := range values { + if iCurry < len(curry) && curry[iCurry].index == i { + if v != curry[iCurry].value { + return false + } + iCurry++ + continue + } + if v != lvs[iLVs] { + return false + } + iLVs++ + } + return true +} + +func matchLabels(desc *Desc, values []string, labels Labels, curry []curriedLabelValue) bool { + if len(values) != len(labels)+len(curry) { + return false + } + iCurry := 0 + for i, k := range desc.variableLabels { + if iCurry < len(curry) && curry[iCurry].index == i { + if values[i] != curry[iCurry].value { + return false + } + iCurry++ + continue + } + if values[i] != labels[k] { + return false + } + } + return true +} + +func extractLabelValues(desc *Desc, labels Labels, curry []curriedLabelValue) []string { + labelValues := make([]string, len(labels)+len(curry)) + iCurry := 0 + for i, k := range desc.variableLabels { + if iCurry < len(curry) && curry[iCurry].index == i { + labelValues[i] = curry[iCurry].value + iCurry++ + continue + } + labelValues[i] = labels[k] + } + return labelValues +} + +func inlineLabelValues(lvs []string, curry []curriedLabelValue) []string { + labelValues := make([]string, len(lvs)+len(curry)) + var iCurry, iLVs int + for i := range labelValues { + if iCurry < len(curry) && curry[iCurry].index == i { + labelValues[i] = curry[iCurry].value + iCurry++ + continue + } + labelValues[i] = lvs[iLVs] + iLVs++ + } + return labelValues +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/wrap.go b/vendor/github.com/prometheus/client_golang/prometheus/wrap.go new file mode 100644 index 0000000..1498ee1 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/wrap.go @@ -0,0 +1,216 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "fmt" + "sort" + + //nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility. + "github.com/golang/protobuf/proto" + + dto "github.com/prometheus/client_model/go" + + "github.com/prometheus/client_golang/prometheus/internal" +) + +// WrapRegistererWith returns a Registerer wrapping the provided +// Registerer. Collectors registered with the returned Registerer will be +// registered with the wrapped Registerer in a modified way. The modified +// Collector adds the provided Labels to all Metrics it collects (as +// ConstLabels). The Metrics collected by the unmodified Collector must not +// duplicate any of those labels. Wrapping a nil value is valid, resulting +// in a no-op Registerer. +// +// WrapRegistererWith provides a way to add fixed labels to a subset of +// Collectors. It should not be used to add fixed labels to all metrics +// exposed. See also +// https://prometheus.io/docs/instrumenting/writing_exporters/#target-labels-not-static-scraped-labels +// +// Conflicts between Collectors registered through the original Registerer with +// Collectors registered through the wrapping Registerer will still be +// detected. Any AlreadyRegisteredError returned by the Register method of +// either Registerer will contain the ExistingCollector in the form it was +// provided to the respective registry. +// +// The Collector example demonstrates a use of WrapRegistererWith. +func WrapRegistererWith(labels Labels, reg Registerer) Registerer { + return &wrappingRegisterer{ + wrappedRegisterer: reg, + labels: labels, + } +} + +// WrapRegistererWithPrefix returns a Registerer wrapping the provided +// Registerer. Collectors registered with the returned Registerer will be +// registered with the wrapped Registerer in a modified way. The modified +// Collector adds the provided prefix to the name of all Metrics it collects. +// Wrapping a nil value is valid, resulting in a no-op Registerer. +// +// WrapRegistererWithPrefix is useful to have one place to prefix all metrics of +// a sub-system. To make this work, register metrics of the sub-system with the +// wrapping Registerer returned by WrapRegistererWithPrefix. It is rarely useful +// to use the same prefix for all metrics exposed. In particular, do not prefix +// metric names that are standardized across applications, as that would break +// horizontal monitoring, for example the metrics provided by the Go collector +// (see NewGoCollector) and the process collector (see NewProcessCollector). (In +// fact, those metrics are already prefixed with “go_” or “process_”, +// respectively.) +// +// Conflicts between Collectors registered through the original Registerer with +// Collectors registered through the wrapping Registerer will still be +// detected. Any AlreadyRegisteredError returned by the Register method of +// either Registerer will contain the ExistingCollector in the form it was +// provided to the respective registry. +func WrapRegistererWithPrefix(prefix string, reg Registerer) Registerer { + return &wrappingRegisterer{ + wrappedRegisterer: reg, + prefix: prefix, + } +} + +type wrappingRegisterer struct { + wrappedRegisterer Registerer + prefix string + labels Labels +} + +func (r *wrappingRegisterer) Register(c Collector) error { + if r.wrappedRegisterer == nil { + return nil + } + return r.wrappedRegisterer.Register(&wrappingCollector{ + wrappedCollector: c, + prefix: r.prefix, + labels: r.labels, + }) +} + +func (r *wrappingRegisterer) MustRegister(cs ...Collector) { + if r.wrappedRegisterer == nil { + return + } + for _, c := range cs { + if err := r.Register(c); err != nil { + panic(err) + } + } +} + +func (r *wrappingRegisterer) Unregister(c Collector) bool { + if r.wrappedRegisterer == nil { + return false + } + return r.wrappedRegisterer.Unregister(&wrappingCollector{ + wrappedCollector: c, + prefix: r.prefix, + labels: r.labels, + }) +} + +type wrappingCollector struct { + wrappedCollector Collector + prefix string + labels Labels +} + +func (c *wrappingCollector) Collect(ch chan<- Metric) { + wrappedCh := make(chan Metric) + go func() { + c.wrappedCollector.Collect(wrappedCh) + close(wrappedCh) + }() + for m := range wrappedCh { + ch <- &wrappingMetric{ + wrappedMetric: m, + prefix: c.prefix, + labels: c.labels, + } + } +} + +func (c *wrappingCollector) Describe(ch chan<- *Desc) { + wrappedCh := make(chan *Desc) + go func() { + c.wrappedCollector.Describe(wrappedCh) + close(wrappedCh) + }() + for desc := range wrappedCh { + ch <- wrapDesc(desc, c.prefix, c.labels) + } +} + +func (c *wrappingCollector) unwrapRecursively() Collector { + switch wc := c.wrappedCollector.(type) { + case *wrappingCollector: + return wc.unwrapRecursively() + default: + return wc + } +} + +type wrappingMetric struct { + wrappedMetric Metric + prefix string + labels Labels +} + +func (m *wrappingMetric) Desc() *Desc { + return wrapDesc(m.wrappedMetric.Desc(), m.prefix, m.labels) +} + +func (m *wrappingMetric) Write(out *dto.Metric) error { + if err := m.wrappedMetric.Write(out); err != nil { + return err + } + if len(m.labels) == 0 { + // No wrapping labels. + return nil + } + for ln, lv := range m.labels { + out.Label = append(out.Label, &dto.LabelPair{ + Name: proto.String(ln), + Value: proto.String(lv), + }) + } + sort.Sort(internal.LabelPairSorter(out.Label)) + return nil +} + +func wrapDesc(desc *Desc, prefix string, labels Labels) *Desc { + constLabels := Labels{} + for _, lp := range desc.constLabelPairs { + constLabels[*lp.Name] = *lp.Value + } + for ln, lv := range labels { + if _, alreadyUsed := constLabels[ln]; alreadyUsed { + return &Desc{ + fqName: desc.fqName, + help: desc.help, + variableLabels: desc.variableLabels, + constLabelPairs: desc.constLabelPairs, + err: fmt.Errorf("attempted wrapping with already existing label name %q", ln), + } + } + constLabels[ln] = lv + } + // NewDesc will do remaining validations. + newDesc := NewDesc(prefix+desc.fqName, desc.help, desc.variableLabels, constLabels) + // Propagate errors if there was any. This will override any errer + // created by NewDesc above, i.e. earlier errors get precedence. + if desc.err != nil { + newDesc.err = desc.err + } + return newDesc +} diff --git a/vendor/github.com/prometheus/client_model/LICENSE b/vendor/github.com/prometheus/client_model/LICENSE new file mode 100644 index 0000000..261eeb9 --- /dev/null +++ b/vendor/github.com/prometheus/client_model/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/prometheus/client_model/NOTICE b/vendor/github.com/prometheus/client_model/NOTICE new file mode 100644 index 0000000..20110e4 --- /dev/null +++ b/vendor/github.com/prometheus/client_model/NOTICE @@ -0,0 +1,5 @@ +Data model artifacts for Prometheus. +Copyright 2012-2015 The Prometheus Authors + +This product includes software developed at +SoundCloud Ltd. (http://soundcloud.com/). diff --git a/vendor/github.com/prometheus/client_model/go/metrics.pb.go b/vendor/github.com/prometheus/client_model/go/metrics.pb.go new file mode 100644 index 0000000..35904ea --- /dev/null +++ b/vendor/github.com/prometheus/client_model/go/metrics.pb.go @@ -0,0 +1,914 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: io/prometheus/client/metrics.proto + +package io_prometheus_client + +import ( + fmt "fmt" + proto "github.com/golang/protobuf/proto" + timestamp "github.com/golang/protobuf/ptypes/timestamp" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +type MetricType int32 + +const ( + // COUNTER must use the Metric field "counter". + MetricType_COUNTER MetricType = 0 + // GAUGE must use the Metric field "gauge". + MetricType_GAUGE MetricType = 1 + // SUMMARY must use the Metric field "summary". + MetricType_SUMMARY MetricType = 2 + // UNTYPED must use the Metric field "untyped". + MetricType_UNTYPED MetricType = 3 + // HISTOGRAM must use the Metric field "histogram". + MetricType_HISTOGRAM MetricType = 4 + // GAUGE_HISTOGRAM must use the Metric field "histogram". + MetricType_GAUGE_HISTOGRAM MetricType = 5 +) + +var MetricType_name = map[int32]string{ + 0: "COUNTER", + 1: "GAUGE", + 2: "SUMMARY", + 3: "UNTYPED", + 4: "HISTOGRAM", + 5: "GAUGE_HISTOGRAM", +} + +var MetricType_value = map[string]int32{ + "COUNTER": 0, + "GAUGE": 1, + "SUMMARY": 2, + "UNTYPED": 3, + "HISTOGRAM": 4, + "GAUGE_HISTOGRAM": 5, +} + +func (x MetricType) Enum() *MetricType { + p := new(MetricType) + *p = x + return p +} + +func (x MetricType) String() string { + return proto.EnumName(MetricType_name, int32(x)) +} + +func (x *MetricType) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(MetricType_value, data, "MetricType") + if err != nil { + return err + } + *x = MetricType(value) + return nil +} + +func (MetricType) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_d1e5ddb18987a258, []int{0} +} + +type LabelPair struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Value *string `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *LabelPair) Reset() { *m = LabelPair{} } +func (m *LabelPair) String() string { return proto.CompactTextString(m) } +func (*LabelPair) ProtoMessage() {} +func (*LabelPair) Descriptor() ([]byte, []int) { + return fileDescriptor_d1e5ddb18987a258, []int{0} +} + +func (m *LabelPair) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_LabelPair.Unmarshal(m, b) +} +func (m *LabelPair) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_LabelPair.Marshal(b, m, deterministic) +} +func (m *LabelPair) XXX_Merge(src proto.Message) { + xxx_messageInfo_LabelPair.Merge(m, src) +} +func (m *LabelPair) XXX_Size() int { + return xxx_messageInfo_LabelPair.Size(m) +} +func (m *LabelPair) XXX_DiscardUnknown() { + xxx_messageInfo_LabelPair.DiscardUnknown(m) +} + +var xxx_messageInfo_LabelPair proto.InternalMessageInfo + +func (m *LabelPair) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *LabelPair) GetValue() string { + if m != nil && m.Value != nil { + return *m.Value + } + return "" +} + +type Gauge struct { + Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Gauge) Reset() { *m = Gauge{} } +func (m *Gauge) String() string { return proto.CompactTextString(m) } +func (*Gauge) ProtoMessage() {} +func (*Gauge) Descriptor() ([]byte, []int) { + return fileDescriptor_d1e5ddb18987a258, []int{1} +} + +func (m *Gauge) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Gauge.Unmarshal(m, b) +} +func (m *Gauge) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Gauge.Marshal(b, m, deterministic) +} +func (m *Gauge) XXX_Merge(src proto.Message) { + xxx_messageInfo_Gauge.Merge(m, src) +} +func (m *Gauge) XXX_Size() int { + return xxx_messageInfo_Gauge.Size(m) +} +func (m *Gauge) XXX_DiscardUnknown() { + xxx_messageInfo_Gauge.DiscardUnknown(m) +} + +var xxx_messageInfo_Gauge proto.InternalMessageInfo + +func (m *Gauge) GetValue() float64 { + if m != nil && m.Value != nil { + return *m.Value + } + return 0 +} + +type Counter struct { + Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"` + Exemplar *Exemplar `protobuf:"bytes,2,opt,name=exemplar" json:"exemplar,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Counter) Reset() { *m = Counter{} } +func (m *Counter) String() string { return proto.CompactTextString(m) } +func (*Counter) ProtoMessage() {} +func (*Counter) Descriptor() ([]byte, []int) { + return fileDescriptor_d1e5ddb18987a258, []int{2} +} + +func (m *Counter) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Counter.Unmarshal(m, b) +} +func (m *Counter) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Counter.Marshal(b, m, deterministic) +} +func (m *Counter) XXX_Merge(src proto.Message) { + xxx_messageInfo_Counter.Merge(m, src) +} +func (m *Counter) XXX_Size() int { + return xxx_messageInfo_Counter.Size(m) +} +func (m *Counter) XXX_DiscardUnknown() { + xxx_messageInfo_Counter.DiscardUnknown(m) +} + +var xxx_messageInfo_Counter proto.InternalMessageInfo + +func (m *Counter) GetValue() float64 { + if m != nil && m.Value != nil { + return *m.Value + } + return 0 +} + +func (m *Counter) GetExemplar() *Exemplar { + if m != nil { + return m.Exemplar + } + return nil +} + +type Quantile struct { + Quantile *float64 `protobuf:"fixed64,1,opt,name=quantile" json:"quantile,omitempty"` + Value *float64 `protobuf:"fixed64,2,opt,name=value" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Quantile) Reset() { *m = Quantile{} } +func (m *Quantile) String() string { return proto.CompactTextString(m) } +func (*Quantile) ProtoMessage() {} +func (*Quantile) Descriptor() ([]byte, []int) { + return fileDescriptor_d1e5ddb18987a258, []int{3} +} + +func (m *Quantile) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Quantile.Unmarshal(m, b) +} +func (m *Quantile) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Quantile.Marshal(b, m, deterministic) +} +func (m *Quantile) XXX_Merge(src proto.Message) { + xxx_messageInfo_Quantile.Merge(m, src) +} +func (m *Quantile) XXX_Size() int { + return xxx_messageInfo_Quantile.Size(m) +} +func (m *Quantile) XXX_DiscardUnknown() { + xxx_messageInfo_Quantile.DiscardUnknown(m) +} + +var xxx_messageInfo_Quantile proto.InternalMessageInfo + +func (m *Quantile) GetQuantile() float64 { + if m != nil && m.Quantile != nil { + return *m.Quantile + } + return 0 +} + +func (m *Quantile) GetValue() float64 { + if m != nil && m.Value != nil { + return *m.Value + } + return 0 +} + +type Summary struct { + SampleCount *uint64 `protobuf:"varint,1,opt,name=sample_count,json=sampleCount" json:"sample_count,omitempty"` + SampleSum *float64 `protobuf:"fixed64,2,opt,name=sample_sum,json=sampleSum" json:"sample_sum,omitempty"` + Quantile []*Quantile `protobuf:"bytes,3,rep,name=quantile" json:"quantile,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Summary) Reset() { *m = Summary{} } +func (m *Summary) String() string { return proto.CompactTextString(m) } +func (*Summary) ProtoMessage() {} +func (*Summary) Descriptor() ([]byte, []int) { + return fileDescriptor_d1e5ddb18987a258, []int{4} +} + +func (m *Summary) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Summary.Unmarshal(m, b) +} +func (m *Summary) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Summary.Marshal(b, m, deterministic) +} +func (m *Summary) XXX_Merge(src proto.Message) { + xxx_messageInfo_Summary.Merge(m, src) +} +func (m *Summary) XXX_Size() int { + return xxx_messageInfo_Summary.Size(m) +} +func (m *Summary) XXX_DiscardUnknown() { + xxx_messageInfo_Summary.DiscardUnknown(m) +} + +var xxx_messageInfo_Summary proto.InternalMessageInfo + +func (m *Summary) GetSampleCount() uint64 { + if m != nil && m.SampleCount != nil { + return *m.SampleCount + } + return 0 +} + +func (m *Summary) GetSampleSum() float64 { + if m != nil && m.SampleSum != nil { + return *m.SampleSum + } + return 0 +} + +func (m *Summary) GetQuantile() []*Quantile { + if m != nil { + return m.Quantile + } + return nil +} + +type Untyped struct { + Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Untyped) Reset() { *m = Untyped{} } +func (m *Untyped) String() string { return proto.CompactTextString(m) } +func (*Untyped) ProtoMessage() {} +func (*Untyped) Descriptor() ([]byte, []int) { + return fileDescriptor_d1e5ddb18987a258, []int{5} +} + +func (m *Untyped) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Untyped.Unmarshal(m, b) +} +func (m *Untyped) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Untyped.Marshal(b, m, deterministic) +} +func (m *Untyped) XXX_Merge(src proto.Message) { + xxx_messageInfo_Untyped.Merge(m, src) +} +func (m *Untyped) XXX_Size() int { + return xxx_messageInfo_Untyped.Size(m) +} +func (m *Untyped) XXX_DiscardUnknown() { + xxx_messageInfo_Untyped.DiscardUnknown(m) +} + +var xxx_messageInfo_Untyped proto.InternalMessageInfo + +func (m *Untyped) GetValue() float64 { + if m != nil && m.Value != nil { + return *m.Value + } + return 0 +} + +type Histogram struct { + SampleCount *uint64 `protobuf:"varint,1,opt,name=sample_count,json=sampleCount" json:"sample_count,omitempty"` + SampleCountFloat *float64 `protobuf:"fixed64,4,opt,name=sample_count_float,json=sampleCountFloat" json:"sample_count_float,omitempty"` + SampleSum *float64 `protobuf:"fixed64,2,opt,name=sample_sum,json=sampleSum" json:"sample_sum,omitempty"` + // Buckets for the conventional histogram. + Bucket []*Bucket `protobuf:"bytes,3,rep,name=bucket" json:"bucket,omitempty"` + // schema defines the bucket schema. Currently, valid numbers are -4 <= n <= 8. + // They are all for base-2 bucket schemas, where 1 is a bucket boundary in each case, and + // then each power of two is divided into 2^n logarithmic buckets. + // Or in other words, each bucket boundary is the previous boundary times 2^(2^-n). + // In the future, more bucket schemas may be added using numbers < -4 or > 8. + Schema *int32 `protobuf:"zigzag32,5,opt,name=schema" json:"schema,omitempty"` + ZeroThreshold *float64 `protobuf:"fixed64,6,opt,name=zero_threshold,json=zeroThreshold" json:"zero_threshold,omitempty"` + ZeroCount *uint64 `protobuf:"varint,7,opt,name=zero_count,json=zeroCount" json:"zero_count,omitempty"` + ZeroCountFloat *float64 `protobuf:"fixed64,8,opt,name=zero_count_float,json=zeroCountFloat" json:"zero_count_float,omitempty"` + // Negative buckets for the native histogram. + NegativeSpan []*BucketSpan `protobuf:"bytes,9,rep,name=negative_span,json=negativeSpan" json:"negative_span,omitempty"` + // Use either "negative_delta" or "negative_count", the former for + // regular histograms with integer counts, the latter for float + // histograms. + NegativeDelta []int64 `protobuf:"zigzag64,10,rep,name=negative_delta,json=negativeDelta" json:"negative_delta,omitempty"` + NegativeCount []float64 `protobuf:"fixed64,11,rep,name=negative_count,json=negativeCount" json:"negative_count,omitempty"` + // Positive buckets for the native histogram. + PositiveSpan []*BucketSpan `protobuf:"bytes,12,rep,name=positive_span,json=positiveSpan" json:"positive_span,omitempty"` + // Use either "positive_delta" or "positive_count", the former for + // regular histograms with integer counts, the latter for float + // histograms. + PositiveDelta []int64 `protobuf:"zigzag64,13,rep,name=positive_delta,json=positiveDelta" json:"positive_delta,omitempty"` + PositiveCount []float64 `protobuf:"fixed64,14,rep,name=positive_count,json=positiveCount" json:"positive_count,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Histogram) Reset() { *m = Histogram{} } +func (m *Histogram) String() string { return proto.CompactTextString(m) } +func (*Histogram) ProtoMessage() {} +func (*Histogram) Descriptor() ([]byte, []int) { + return fileDescriptor_d1e5ddb18987a258, []int{6} +} + +func (m *Histogram) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Histogram.Unmarshal(m, b) +} +func (m *Histogram) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Histogram.Marshal(b, m, deterministic) +} +func (m *Histogram) XXX_Merge(src proto.Message) { + xxx_messageInfo_Histogram.Merge(m, src) +} +func (m *Histogram) XXX_Size() int { + return xxx_messageInfo_Histogram.Size(m) +} +func (m *Histogram) XXX_DiscardUnknown() { + xxx_messageInfo_Histogram.DiscardUnknown(m) +} + +var xxx_messageInfo_Histogram proto.InternalMessageInfo + +func (m *Histogram) GetSampleCount() uint64 { + if m != nil && m.SampleCount != nil { + return *m.SampleCount + } + return 0 +} + +func (m *Histogram) GetSampleCountFloat() float64 { + if m != nil && m.SampleCountFloat != nil { + return *m.SampleCountFloat + } + return 0 +} + +func (m *Histogram) GetSampleSum() float64 { + if m != nil && m.SampleSum != nil { + return *m.SampleSum + } + return 0 +} + +func (m *Histogram) GetBucket() []*Bucket { + if m != nil { + return m.Bucket + } + return nil +} + +func (m *Histogram) GetSchema() int32 { + if m != nil && m.Schema != nil { + return *m.Schema + } + return 0 +} + +func (m *Histogram) GetZeroThreshold() float64 { + if m != nil && m.ZeroThreshold != nil { + return *m.ZeroThreshold + } + return 0 +} + +func (m *Histogram) GetZeroCount() uint64 { + if m != nil && m.ZeroCount != nil { + return *m.ZeroCount + } + return 0 +} + +func (m *Histogram) GetZeroCountFloat() float64 { + if m != nil && m.ZeroCountFloat != nil { + return *m.ZeroCountFloat + } + return 0 +} + +func (m *Histogram) GetNegativeSpan() []*BucketSpan { + if m != nil { + return m.NegativeSpan + } + return nil +} + +func (m *Histogram) GetNegativeDelta() []int64 { + if m != nil { + return m.NegativeDelta + } + return nil +} + +func (m *Histogram) GetNegativeCount() []float64 { + if m != nil { + return m.NegativeCount + } + return nil +} + +func (m *Histogram) GetPositiveSpan() []*BucketSpan { + if m != nil { + return m.PositiveSpan + } + return nil +} + +func (m *Histogram) GetPositiveDelta() []int64 { + if m != nil { + return m.PositiveDelta + } + return nil +} + +func (m *Histogram) GetPositiveCount() []float64 { + if m != nil { + return m.PositiveCount + } + return nil +} + +// A Bucket of a conventional histogram, each of which is treated as +// an individual counter-like time series by Prometheus. +type Bucket struct { + CumulativeCount *uint64 `protobuf:"varint,1,opt,name=cumulative_count,json=cumulativeCount" json:"cumulative_count,omitempty"` + CumulativeCountFloat *float64 `protobuf:"fixed64,4,opt,name=cumulative_count_float,json=cumulativeCountFloat" json:"cumulative_count_float,omitempty"` + UpperBound *float64 `protobuf:"fixed64,2,opt,name=upper_bound,json=upperBound" json:"upper_bound,omitempty"` + Exemplar *Exemplar `protobuf:"bytes,3,opt,name=exemplar" json:"exemplar,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Bucket) Reset() { *m = Bucket{} } +func (m *Bucket) String() string { return proto.CompactTextString(m) } +func (*Bucket) ProtoMessage() {} +func (*Bucket) Descriptor() ([]byte, []int) { + return fileDescriptor_d1e5ddb18987a258, []int{7} +} + +func (m *Bucket) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Bucket.Unmarshal(m, b) +} +func (m *Bucket) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Bucket.Marshal(b, m, deterministic) +} +func (m *Bucket) XXX_Merge(src proto.Message) { + xxx_messageInfo_Bucket.Merge(m, src) +} +func (m *Bucket) XXX_Size() int { + return xxx_messageInfo_Bucket.Size(m) +} +func (m *Bucket) XXX_DiscardUnknown() { + xxx_messageInfo_Bucket.DiscardUnknown(m) +} + +var xxx_messageInfo_Bucket proto.InternalMessageInfo + +func (m *Bucket) GetCumulativeCount() uint64 { + if m != nil && m.CumulativeCount != nil { + return *m.CumulativeCount + } + return 0 +} + +func (m *Bucket) GetCumulativeCountFloat() float64 { + if m != nil && m.CumulativeCountFloat != nil { + return *m.CumulativeCountFloat + } + return 0 +} + +func (m *Bucket) GetUpperBound() float64 { + if m != nil && m.UpperBound != nil { + return *m.UpperBound + } + return 0 +} + +func (m *Bucket) GetExemplar() *Exemplar { + if m != nil { + return m.Exemplar + } + return nil +} + +// A BucketSpan defines a number of consecutive buckets in a native +// histogram with their offset. Logically, it would be more +// straightforward to include the bucket counts in the Span. However, +// the protobuf representation is more compact in the way the data is +// structured here (with all the buckets in a single array separate +// from the Spans). +type BucketSpan struct { + Offset *int32 `protobuf:"zigzag32,1,opt,name=offset" json:"offset,omitempty"` + Length *uint32 `protobuf:"varint,2,opt,name=length" json:"length,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *BucketSpan) Reset() { *m = BucketSpan{} } +func (m *BucketSpan) String() string { return proto.CompactTextString(m) } +func (*BucketSpan) ProtoMessage() {} +func (*BucketSpan) Descriptor() ([]byte, []int) { + return fileDescriptor_d1e5ddb18987a258, []int{8} +} + +func (m *BucketSpan) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_BucketSpan.Unmarshal(m, b) +} +func (m *BucketSpan) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_BucketSpan.Marshal(b, m, deterministic) +} +func (m *BucketSpan) XXX_Merge(src proto.Message) { + xxx_messageInfo_BucketSpan.Merge(m, src) +} +func (m *BucketSpan) XXX_Size() int { + return xxx_messageInfo_BucketSpan.Size(m) +} +func (m *BucketSpan) XXX_DiscardUnknown() { + xxx_messageInfo_BucketSpan.DiscardUnknown(m) +} + +var xxx_messageInfo_BucketSpan proto.InternalMessageInfo + +func (m *BucketSpan) GetOffset() int32 { + if m != nil && m.Offset != nil { + return *m.Offset + } + return 0 +} + +func (m *BucketSpan) GetLength() uint32 { + if m != nil && m.Length != nil { + return *m.Length + } + return 0 +} + +type Exemplar struct { + Label []*LabelPair `protobuf:"bytes,1,rep,name=label" json:"label,omitempty"` + Value *float64 `protobuf:"fixed64,2,opt,name=value" json:"value,omitempty"` + Timestamp *timestamp.Timestamp `protobuf:"bytes,3,opt,name=timestamp" json:"timestamp,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Exemplar) Reset() { *m = Exemplar{} } +func (m *Exemplar) String() string { return proto.CompactTextString(m) } +func (*Exemplar) ProtoMessage() {} +func (*Exemplar) Descriptor() ([]byte, []int) { + return fileDescriptor_d1e5ddb18987a258, []int{9} +} + +func (m *Exemplar) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Exemplar.Unmarshal(m, b) +} +func (m *Exemplar) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Exemplar.Marshal(b, m, deterministic) +} +func (m *Exemplar) XXX_Merge(src proto.Message) { + xxx_messageInfo_Exemplar.Merge(m, src) +} +func (m *Exemplar) XXX_Size() int { + return xxx_messageInfo_Exemplar.Size(m) +} +func (m *Exemplar) XXX_DiscardUnknown() { + xxx_messageInfo_Exemplar.DiscardUnknown(m) +} + +var xxx_messageInfo_Exemplar proto.InternalMessageInfo + +func (m *Exemplar) GetLabel() []*LabelPair { + if m != nil { + return m.Label + } + return nil +} + +func (m *Exemplar) GetValue() float64 { + if m != nil && m.Value != nil { + return *m.Value + } + return 0 +} + +func (m *Exemplar) GetTimestamp() *timestamp.Timestamp { + if m != nil { + return m.Timestamp + } + return nil +} + +type Metric struct { + Label []*LabelPair `protobuf:"bytes,1,rep,name=label" json:"label,omitempty"` + Gauge *Gauge `protobuf:"bytes,2,opt,name=gauge" json:"gauge,omitempty"` + Counter *Counter `protobuf:"bytes,3,opt,name=counter" json:"counter,omitempty"` + Summary *Summary `protobuf:"bytes,4,opt,name=summary" json:"summary,omitempty"` + Untyped *Untyped `protobuf:"bytes,5,opt,name=untyped" json:"untyped,omitempty"` + Histogram *Histogram `protobuf:"bytes,7,opt,name=histogram" json:"histogram,omitempty"` + TimestampMs *int64 `protobuf:"varint,6,opt,name=timestamp_ms,json=timestampMs" json:"timestamp_ms,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Metric) Reset() { *m = Metric{} } +func (m *Metric) String() string { return proto.CompactTextString(m) } +func (*Metric) ProtoMessage() {} +func (*Metric) Descriptor() ([]byte, []int) { + return fileDescriptor_d1e5ddb18987a258, []int{10} +} + +func (m *Metric) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Metric.Unmarshal(m, b) +} +func (m *Metric) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Metric.Marshal(b, m, deterministic) +} +func (m *Metric) XXX_Merge(src proto.Message) { + xxx_messageInfo_Metric.Merge(m, src) +} +func (m *Metric) XXX_Size() int { + return xxx_messageInfo_Metric.Size(m) +} +func (m *Metric) XXX_DiscardUnknown() { + xxx_messageInfo_Metric.DiscardUnknown(m) +} + +var xxx_messageInfo_Metric proto.InternalMessageInfo + +func (m *Metric) GetLabel() []*LabelPair { + if m != nil { + return m.Label + } + return nil +} + +func (m *Metric) GetGauge() *Gauge { + if m != nil { + return m.Gauge + } + return nil +} + +func (m *Metric) GetCounter() *Counter { + if m != nil { + return m.Counter + } + return nil +} + +func (m *Metric) GetSummary() *Summary { + if m != nil { + return m.Summary + } + return nil +} + +func (m *Metric) GetUntyped() *Untyped { + if m != nil { + return m.Untyped + } + return nil +} + +func (m *Metric) GetHistogram() *Histogram { + if m != nil { + return m.Histogram + } + return nil +} + +func (m *Metric) GetTimestampMs() int64 { + if m != nil && m.TimestampMs != nil { + return *m.TimestampMs + } + return 0 +} + +type MetricFamily struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Help *string `protobuf:"bytes,2,opt,name=help" json:"help,omitempty"` + Type *MetricType `protobuf:"varint,3,opt,name=type,enum=io.prometheus.client.MetricType" json:"type,omitempty"` + Metric []*Metric `protobuf:"bytes,4,rep,name=metric" json:"metric,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *MetricFamily) Reset() { *m = MetricFamily{} } +func (m *MetricFamily) String() string { return proto.CompactTextString(m) } +func (*MetricFamily) ProtoMessage() {} +func (*MetricFamily) Descriptor() ([]byte, []int) { + return fileDescriptor_d1e5ddb18987a258, []int{11} +} + +func (m *MetricFamily) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_MetricFamily.Unmarshal(m, b) +} +func (m *MetricFamily) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_MetricFamily.Marshal(b, m, deterministic) +} +func (m *MetricFamily) XXX_Merge(src proto.Message) { + xxx_messageInfo_MetricFamily.Merge(m, src) +} +func (m *MetricFamily) XXX_Size() int { + return xxx_messageInfo_MetricFamily.Size(m) +} +func (m *MetricFamily) XXX_DiscardUnknown() { + xxx_messageInfo_MetricFamily.DiscardUnknown(m) +} + +var xxx_messageInfo_MetricFamily proto.InternalMessageInfo + +func (m *MetricFamily) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *MetricFamily) GetHelp() string { + if m != nil && m.Help != nil { + return *m.Help + } + return "" +} + +func (m *MetricFamily) GetType() MetricType { + if m != nil && m.Type != nil { + return *m.Type + } + return MetricType_COUNTER +} + +func (m *MetricFamily) GetMetric() []*Metric { + if m != nil { + return m.Metric + } + return nil +} + +func init() { + proto.RegisterEnum("io.prometheus.client.MetricType", MetricType_name, MetricType_value) + proto.RegisterType((*LabelPair)(nil), "io.prometheus.client.LabelPair") + proto.RegisterType((*Gauge)(nil), "io.prometheus.client.Gauge") + proto.RegisterType((*Counter)(nil), "io.prometheus.client.Counter") + proto.RegisterType((*Quantile)(nil), "io.prometheus.client.Quantile") + proto.RegisterType((*Summary)(nil), "io.prometheus.client.Summary") + proto.RegisterType((*Untyped)(nil), "io.prometheus.client.Untyped") + proto.RegisterType((*Histogram)(nil), "io.prometheus.client.Histogram") + proto.RegisterType((*Bucket)(nil), "io.prometheus.client.Bucket") + proto.RegisterType((*BucketSpan)(nil), "io.prometheus.client.BucketSpan") + proto.RegisterType((*Exemplar)(nil), "io.prometheus.client.Exemplar") + proto.RegisterType((*Metric)(nil), "io.prometheus.client.Metric") + proto.RegisterType((*MetricFamily)(nil), "io.prometheus.client.MetricFamily") +} + +func init() { + proto.RegisterFile("io/prometheus/client/metrics.proto", fileDescriptor_d1e5ddb18987a258) +} + +var fileDescriptor_d1e5ddb18987a258 = []byte{ + // 896 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x56, 0xdd, 0x8e, 0xdb, 0x44, + 0x18, 0xc5, 0x9b, 0x5f, 0x7f, 0xd9, 0x6c, 0xd3, 0x61, 0x55, 0x59, 0x0b, 0xcb, 0x06, 0x4b, 0x48, + 0x0b, 0x42, 0x8e, 0x40, 0x5b, 0x81, 0x0a, 0x5c, 0xec, 0xb6, 0xe9, 0x16, 0x89, 0xb4, 0x65, 0x92, + 0x5c, 0x14, 0x2e, 0xac, 0x49, 0x32, 0xeb, 0x58, 0x78, 0x3c, 0xc6, 0x1e, 0x57, 0x2c, 0x2f, 0xc0, + 0x35, 0xaf, 0xc0, 0xc3, 0xf0, 0x22, 0x3c, 0x08, 0x68, 0xfe, 0xec, 0xdd, 0xe2, 0x94, 0xd2, 0x3b, + 0x7f, 0x67, 0xce, 0xf7, 0xcd, 0x39, 0xe3, 0xc9, 0x71, 0xc0, 0x8f, 0xf9, 0x24, 0xcb, 0x39, 0xa3, + 0x62, 0x4b, 0xcb, 0x62, 0xb2, 0x4e, 0x62, 0x9a, 0x8a, 0x09, 0xa3, 0x22, 0x8f, 0xd7, 0x45, 0x90, + 0xe5, 0x5c, 0x70, 0x74, 0x18, 0xf3, 0xa0, 0xe6, 0x04, 0x9a, 0x73, 0x74, 0x12, 0x71, 0x1e, 0x25, + 0x74, 0xa2, 0x38, 0xab, 0xf2, 0x6a, 0x22, 0x62, 0x46, 0x0b, 0x41, 0x58, 0xa6, 0xdb, 0xfc, 0xfb, + 0xe0, 0x7e, 0x47, 0x56, 0x34, 0x79, 0x4e, 0xe2, 0x1c, 0x21, 0x68, 0xa7, 0x84, 0x51, 0xcf, 0x19, + 0x3b, 0xa7, 0x2e, 0x56, 0xcf, 0xe8, 0x10, 0x3a, 0x2f, 0x49, 0x52, 0x52, 0x6f, 0x4f, 0x81, 0xba, + 0xf0, 0x8f, 0xa1, 0x73, 0x49, 0xca, 0xe8, 0xc6, 0xb2, 0xec, 0x71, 0xec, 0xf2, 0x8f, 0xd0, 0x7b, + 0xc8, 0xcb, 0x54, 0xd0, 0xbc, 0x99, 0x80, 0x1e, 0x40, 0x9f, 0xfe, 0x42, 0x59, 0x96, 0x90, 0x5c, + 0x0d, 0x1e, 0x7c, 0xfe, 0x41, 0xd0, 0x64, 0x20, 0x98, 0x1a, 0x16, 0xae, 0xf8, 0xfe, 0xd7, 0xd0, + 0xff, 0xbe, 0x24, 0xa9, 0x88, 0x13, 0x8a, 0x8e, 0xa0, 0xff, 0xb3, 0x79, 0x36, 0x1b, 0x54, 0xf5, + 0x6d, 0xe5, 0x95, 0xb4, 0xdf, 0x1c, 0xe8, 0xcd, 0x4b, 0xc6, 0x48, 0x7e, 0x8d, 0x3e, 0x84, 0xfd, + 0x82, 0xb0, 0x2c, 0xa1, 0xe1, 0x5a, 0xaa, 0x55, 0x13, 0xda, 0x78, 0xa0, 0x31, 0x65, 0x00, 0x1d, + 0x03, 0x18, 0x4a, 0x51, 0x32, 0x33, 0xc9, 0xd5, 0xc8, 0xbc, 0x64, 0xd2, 0x47, 0xb5, 0x7f, 0x6b, + 0xdc, 0xda, 0xed, 0xc3, 0x2a, 0xae, 0xf5, 0xf9, 0x27, 0xd0, 0x5b, 0xa6, 0xe2, 0x3a, 0xa3, 0x9b, + 0x1d, 0xa7, 0xf8, 0x57, 0x1b, 0xdc, 0x27, 0x71, 0x21, 0x78, 0x94, 0x13, 0xf6, 0x26, 0x62, 0x3f, + 0x05, 0x74, 0x93, 0x12, 0x5e, 0x25, 0x9c, 0x08, 0xaf, 0xad, 0x66, 0x8e, 0x6e, 0x10, 0x1f, 0x4b, + 0xfc, 0xbf, 0xac, 0x9d, 0x41, 0x77, 0x55, 0xae, 0x7f, 0xa2, 0xc2, 0x18, 0x7b, 0xbf, 0xd9, 0xd8, + 0x85, 0xe2, 0x60, 0xc3, 0x45, 0xf7, 0xa0, 0x5b, 0xac, 0xb7, 0x94, 0x11, 0xaf, 0x33, 0x76, 0x4e, + 0xef, 0x62, 0x53, 0xa1, 0x8f, 0xe0, 0xe0, 0x57, 0x9a, 0xf3, 0x50, 0x6c, 0x73, 0x5a, 0x6c, 0x79, + 0xb2, 0xf1, 0xba, 0x6a, 0xc3, 0xa1, 0x44, 0x17, 0x16, 0x94, 0x9a, 0x14, 0x4d, 0x5b, 0xec, 0x29, + 0x8b, 0xae, 0x44, 0xb4, 0xc1, 0x53, 0x18, 0xd5, 0xcb, 0xc6, 0x5e, 0x5f, 0xcd, 0x39, 0xa8, 0x48, + 0xda, 0xdc, 0x14, 0x86, 0x29, 0x8d, 0x88, 0x88, 0x5f, 0xd2, 0xb0, 0xc8, 0x48, 0xea, 0xb9, 0xca, + 0xc4, 0xf8, 0x75, 0x26, 0xe6, 0x19, 0x49, 0xf1, 0xbe, 0x6d, 0x93, 0x95, 0x94, 0x5d, 0x8d, 0xd9, + 0xd0, 0x44, 0x10, 0x0f, 0xc6, 0xad, 0x53, 0x84, 0xab, 0xe1, 0x8f, 0x24, 0x78, 0x8b, 0xa6, 0xa5, + 0x0f, 0xc6, 0x2d, 0xe9, 0xce, 0xa2, 0x5a, 0xfe, 0x14, 0x86, 0x19, 0x2f, 0xe2, 0x5a, 0xd4, 0xfe, + 0x9b, 0x8a, 0xb2, 0x6d, 0x56, 0x54, 0x35, 0x46, 0x8b, 0x1a, 0x6a, 0x51, 0x16, 0xad, 0x44, 0x55, + 0x34, 0x2d, 0xea, 0x40, 0x8b, 0xb2, 0xa8, 0x12, 0xe5, 0xff, 0xe9, 0x40, 0x57, 0x6f, 0x85, 0x3e, + 0x86, 0xd1, 0xba, 0x64, 0x65, 0x72, 0xd3, 0x88, 0xbe, 0x66, 0x77, 0x6a, 0x5c, 0x5b, 0x39, 0x83, + 0x7b, 0xaf, 0x52, 0x6f, 0x5d, 0xb7, 0xc3, 0x57, 0x1a, 0xf4, 0x5b, 0x39, 0x81, 0x41, 0x99, 0x65, + 0x34, 0x0f, 0x57, 0xbc, 0x4c, 0x37, 0xe6, 0xce, 0x81, 0x82, 0x2e, 0x24, 0x72, 0x2b, 0x17, 0x5a, + 0xff, 0x3b, 0x17, 0xa0, 0x3e, 0x32, 0x79, 0x11, 0xf9, 0xd5, 0x55, 0x41, 0xb5, 0x83, 0xbb, 0xd8, + 0x54, 0x12, 0x4f, 0x68, 0x1a, 0x89, 0xad, 0xda, 0x7d, 0x88, 0x4d, 0xe5, 0xff, 0xee, 0x40, 0xdf, + 0x0e, 0x45, 0xf7, 0xa1, 0x93, 0xc8, 0x54, 0xf4, 0x1c, 0xf5, 0x82, 0x4e, 0x9a, 0x35, 0x54, 0xc1, + 0x89, 0x35, 0xbb, 0x39, 0x71, 0xd0, 0x97, 0xe0, 0x56, 0xa9, 0x6b, 0x4c, 0x1d, 0x05, 0x3a, 0x97, + 0x03, 0x9b, 0xcb, 0xc1, 0xc2, 0x32, 0x70, 0x4d, 0xf6, 0xff, 0xde, 0x83, 0xee, 0x4c, 0xa5, 0xfc, + 0xdb, 0x2a, 0xfa, 0x0c, 0x3a, 0x91, 0xcc, 0x69, 0x13, 0xb2, 0xef, 0x35, 0xb7, 0xa9, 0x28, 0xc7, + 0x9a, 0x89, 0xbe, 0x80, 0xde, 0x5a, 0x67, 0xb7, 0x11, 0x7b, 0xdc, 0xdc, 0x64, 0x02, 0x1e, 0x5b, + 0xb6, 0x6c, 0x2c, 0x74, 0xb0, 0xaa, 0x3b, 0xb0, 0xb3, 0xd1, 0xa4, 0x2f, 0xb6, 0x6c, 0xd9, 0x58, + 0xea, 0x20, 0x54, 0xa1, 0xb1, 0xb3, 0xd1, 0xa4, 0x25, 0xb6, 0x6c, 0xf4, 0x0d, 0xb8, 0x5b, 0x9b, + 0x8f, 0x2a, 0x2c, 0x76, 0x1e, 0x4c, 0x15, 0xa3, 0xb8, 0xee, 0x90, 0x89, 0x5a, 0x9d, 0x75, 0xc8, + 0x0a, 0x95, 0x48, 0x2d, 0x3c, 0xa8, 0xb0, 0x59, 0xe1, 0xff, 0xe1, 0xc0, 0xbe, 0x7e, 0x03, 0x8f, + 0x09, 0x8b, 0x93, 0xeb, 0xc6, 0x4f, 0x24, 0x82, 0xf6, 0x96, 0x26, 0x99, 0xf9, 0x42, 0xaa, 0x67, + 0x74, 0x06, 0x6d, 0xa9, 0x51, 0x1d, 0xe1, 0xc1, 0xae, 0x5f, 0xb8, 0x9e, 0xbc, 0xb8, 0xce, 0x28, + 0x56, 0x6c, 0x99, 0xb9, 0xfa, 0xab, 0xee, 0xb5, 0x5f, 0x97, 0xb9, 0xba, 0x0f, 0x1b, 0xee, 0x27, + 0x2b, 0x80, 0x7a, 0x12, 0x1a, 0x40, 0xef, 0xe1, 0xb3, 0xe5, 0xd3, 0xc5, 0x14, 0x8f, 0xde, 0x41, + 0x2e, 0x74, 0x2e, 0xcf, 0x97, 0x97, 0xd3, 0x91, 0x23, 0xf1, 0xf9, 0x72, 0x36, 0x3b, 0xc7, 0x2f, + 0x46, 0x7b, 0xb2, 0x58, 0x3e, 0x5d, 0xbc, 0x78, 0x3e, 0x7d, 0x34, 0x6a, 0xa1, 0x21, 0xb8, 0x4f, + 0xbe, 0x9d, 0x2f, 0x9e, 0x5d, 0xe2, 0xf3, 0xd9, 0xa8, 0x8d, 0xde, 0x85, 0x3b, 0xaa, 0x27, 0xac, + 0xc1, 0xce, 0x05, 0x86, 0xc6, 0x3f, 0x18, 0x3f, 0x3c, 0x88, 0x62, 0xb1, 0x2d, 0x57, 0xc1, 0x9a, + 0xb3, 0x7f, 0xff, 0x45, 0x09, 0x19, 0xdf, 0xd0, 0x64, 0x12, 0xf1, 0xaf, 0x62, 0x1e, 0xd6, 0xab, + 0xa1, 0x5e, 0xfd, 0x27, 0x00, 0x00, 0xff, 0xff, 0x16, 0x77, 0x81, 0x98, 0xd7, 0x08, 0x00, 0x00, +} diff --git a/vendor/github.com/prometheus/common/LICENSE b/vendor/github.com/prometheus/common/LICENSE new file mode 100644 index 0000000..261eeb9 --- /dev/null +++ b/vendor/github.com/prometheus/common/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/prometheus/common/NOTICE b/vendor/github.com/prometheus/common/NOTICE new file mode 100644 index 0000000..636a2c1 --- /dev/null +++ b/vendor/github.com/prometheus/common/NOTICE @@ -0,0 +1,5 @@ +Common libraries shared by Prometheus Go components. +Copyright 2015 The Prometheus Authors + +This product includes software developed at +SoundCloud Ltd. (http://soundcloud.com/). diff --git a/vendor/github.com/prometheus/common/expfmt/decode.go b/vendor/github.com/prometheus/common/expfmt/decode.go new file mode 100644 index 0000000..7657f84 --- /dev/null +++ b/vendor/github.com/prometheus/common/expfmt/decode.go @@ -0,0 +1,429 @@ +// Copyright 2015 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package expfmt + +import ( + "fmt" + "io" + "math" + "mime" + "net/http" + + dto "github.com/prometheus/client_model/go" + + "github.com/matttproud/golang_protobuf_extensions/pbutil" + "github.com/prometheus/common/model" +) + +// Decoder types decode an input stream into metric families. +type Decoder interface { + Decode(*dto.MetricFamily) error +} + +// DecodeOptions contains options used by the Decoder and in sample extraction. +type DecodeOptions struct { + // Timestamp is added to each value from the stream that has no explicit timestamp set. + Timestamp model.Time +} + +// ResponseFormat extracts the correct format from a HTTP response header. +// If no matching format can be found FormatUnknown is returned. +func ResponseFormat(h http.Header) Format { + ct := h.Get(hdrContentType) + + mediatype, params, err := mime.ParseMediaType(ct) + if err != nil { + return FmtUnknown + } + + const textType = "text/plain" + + switch mediatype { + case ProtoType: + if p, ok := params["proto"]; ok && p != ProtoProtocol { + return FmtUnknown + } + if e, ok := params["encoding"]; ok && e != "delimited" { + return FmtUnknown + } + return FmtProtoDelim + + case textType: + if v, ok := params["version"]; ok && v != TextVersion { + return FmtUnknown + } + return FmtText + } + + return FmtUnknown +} + +// NewDecoder returns a new decoder based on the given input format. +// If the input format does not imply otherwise, a text format decoder is returned. +func NewDecoder(r io.Reader, format Format) Decoder { + switch format { + case FmtProtoDelim: + return &protoDecoder{r: r} + } + return &textDecoder{r: r} +} + +// protoDecoder implements the Decoder interface for protocol buffers. +type protoDecoder struct { + r io.Reader +} + +// Decode implements the Decoder interface. +func (d *protoDecoder) Decode(v *dto.MetricFamily) error { + _, err := pbutil.ReadDelimited(d.r, v) + if err != nil { + return err + } + if !model.IsValidMetricName(model.LabelValue(v.GetName())) { + return fmt.Errorf("invalid metric name %q", v.GetName()) + } + for _, m := range v.GetMetric() { + if m == nil { + continue + } + for _, l := range m.GetLabel() { + if l == nil { + continue + } + if !model.LabelValue(l.GetValue()).IsValid() { + return fmt.Errorf("invalid label value %q", l.GetValue()) + } + if !model.LabelName(l.GetName()).IsValid() { + return fmt.Errorf("invalid label name %q", l.GetName()) + } + } + } + return nil +} + +// textDecoder implements the Decoder interface for the text protocol. +type textDecoder struct { + r io.Reader + p TextParser + fams []*dto.MetricFamily +} + +// Decode implements the Decoder interface. +func (d *textDecoder) Decode(v *dto.MetricFamily) error { + // TODO(fabxc): Wrap this as a line reader to make streaming safer. + if len(d.fams) == 0 { + // No cached metric families, read everything and parse metrics. + fams, err := d.p.TextToMetricFamilies(d.r) + if err != nil { + return err + } + if len(fams) == 0 { + return io.EOF + } + d.fams = make([]*dto.MetricFamily, 0, len(fams)) + for _, f := range fams { + d.fams = append(d.fams, f) + } + } + + *v = *d.fams[0] + d.fams = d.fams[1:] + + return nil +} + +// SampleDecoder wraps a Decoder to extract samples from the metric families +// decoded by the wrapped Decoder. +type SampleDecoder struct { + Dec Decoder + Opts *DecodeOptions + + f dto.MetricFamily +} + +// Decode calls the Decode method of the wrapped Decoder and then extracts the +// samples from the decoded MetricFamily into the provided model.Vector. +func (sd *SampleDecoder) Decode(s *model.Vector) error { + err := sd.Dec.Decode(&sd.f) + if err != nil { + return err + } + *s, err = extractSamples(&sd.f, sd.Opts) + return err +} + +// ExtractSamples builds a slice of samples from the provided metric +// families. If an error occurs during sample extraction, it continues to +// extract from the remaining metric families. The returned error is the last +// error that has occurred. +func ExtractSamples(o *DecodeOptions, fams ...*dto.MetricFamily) (model.Vector, error) { + var ( + all model.Vector + lastErr error + ) + for _, f := range fams { + some, err := extractSamples(f, o) + if err != nil { + lastErr = err + continue + } + all = append(all, some...) + } + return all, lastErr +} + +func extractSamples(f *dto.MetricFamily, o *DecodeOptions) (model.Vector, error) { + switch f.GetType() { + case dto.MetricType_COUNTER: + return extractCounter(o, f), nil + case dto.MetricType_GAUGE: + return extractGauge(o, f), nil + case dto.MetricType_SUMMARY: + return extractSummary(o, f), nil + case dto.MetricType_UNTYPED: + return extractUntyped(o, f), nil + case dto.MetricType_HISTOGRAM: + return extractHistogram(o, f), nil + } + return nil, fmt.Errorf("expfmt.extractSamples: unknown metric family type %v", f.GetType()) +} + +func extractCounter(o *DecodeOptions, f *dto.MetricFamily) model.Vector { + samples := make(model.Vector, 0, len(f.Metric)) + + for _, m := range f.Metric { + if m.Counter == nil { + continue + } + + lset := make(model.LabelSet, len(m.Label)+1) + for _, p := range m.Label { + lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) + } + lset[model.MetricNameLabel] = model.LabelValue(f.GetName()) + + smpl := &model.Sample{ + Metric: model.Metric(lset), + Value: model.SampleValue(m.Counter.GetValue()), + } + + if m.TimestampMs != nil { + smpl.Timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000) + } else { + smpl.Timestamp = o.Timestamp + } + + samples = append(samples, smpl) + } + + return samples +} + +func extractGauge(o *DecodeOptions, f *dto.MetricFamily) model.Vector { + samples := make(model.Vector, 0, len(f.Metric)) + + for _, m := range f.Metric { + if m.Gauge == nil { + continue + } + + lset := make(model.LabelSet, len(m.Label)+1) + for _, p := range m.Label { + lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) + } + lset[model.MetricNameLabel] = model.LabelValue(f.GetName()) + + smpl := &model.Sample{ + Metric: model.Metric(lset), + Value: model.SampleValue(m.Gauge.GetValue()), + } + + if m.TimestampMs != nil { + smpl.Timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000) + } else { + smpl.Timestamp = o.Timestamp + } + + samples = append(samples, smpl) + } + + return samples +} + +func extractUntyped(o *DecodeOptions, f *dto.MetricFamily) model.Vector { + samples := make(model.Vector, 0, len(f.Metric)) + + for _, m := range f.Metric { + if m.Untyped == nil { + continue + } + + lset := make(model.LabelSet, len(m.Label)+1) + for _, p := range m.Label { + lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) + } + lset[model.MetricNameLabel] = model.LabelValue(f.GetName()) + + smpl := &model.Sample{ + Metric: model.Metric(lset), + Value: model.SampleValue(m.Untyped.GetValue()), + } + + if m.TimestampMs != nil { + smpl.Timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000) + } else { + smpl.Timestamp = o.Timestamp + } + + samples = append(samples, smpl) + } + + return samples +} + +func extractSummary(o *DecodeOptions, f *dto.MetricFamily) model.Vector { + samples := make(model.Vector, 0, len(f.Metric)) + + for _, m := range f.Metric { + if m.Summary == nil { + continue + } + + timestamp := o.Timestamp + if m.TimestampMs != nil { + timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000) + } + + for _, q := range m.Summary.Quantile { + lset := make(model.LabelSet, len(m.Label)+2) + for _, p := range m.Label { + lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) + } + // BUG(matt): Update other names to "quantile". + lset[model.LabelName(model.QuantileLabel)] = model.LabelValue(fmt.Sprint(q.GetQuantile())) + lset[model.MetricNameLabel] = model.LabelValue(f.GetName()) + + samples = append(samples, &model.Sample{ + Metric: model.Metric(lset), + Value: model.SampleValue(q.GetValue()), + Timestamp: timestamp, + }) + } + + lset := make(model.LabelSet, len(m.Label)+1) + for _, p := range m.Label { + lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) + } + lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_sum") + + samples = append(samples, &model.Sample{ + Metric: model.Metric(lset), + Value: model.SampleValue(m.Summary.GetSampleSum()), + Timestamp: timestamp, + }) + + lset = make(model.LabelSet, len(m.Label)+1) + for _, p := range m.Label { + lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) + } + lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_count") + + samples = append(samples, &model.Sample{ + Metric: model.Metric(lset), + Value: model.SampleValue(m.Summary.GetSampleCount()), + Timestamp: timestamp, + }) + } + + return samples +} + +func extractHistogram(o *DecodeOptions, f *dto.MetricFamily) model.Vector { + samples := make(model.Vector, 0, len(f.Metric)) + + for _, m := range f.Metric { + if m.Histogram == nil { + continue + } + + timestamp := o.Timestamp + if m.TimestampMs != nil { + timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000) + } + + infSeen := false + + for _, q := range m.Histogram.Bucket { + lset := make(model.LabelSet, len(m.Label)+2) + for _, p := range m.Label { + lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) + } + lset[model.LabelName(model.BucketLabel)] = model.LabelValue(fmt.Sprint(q.GetUpperBound())) + lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_bucket") + + if math.IsInf(q.GetUpperBound(), +1) { + infSeen = true + } + + samples = append(samples, &model.Sample{ + Metric: model.Metric(lset), + Value: model.SampleValue(q.GetCumulativeCount()), + Timestamp: timestamp, + }) + } + + lset := make(model.LabelSet, len(m.Label)+1) + for _, p := range m.Label { + lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) + } + lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_sum") + + samples = append(samples, &model.Sample{ + Metric: model.Metric(lset), + Value: model.SampleValue(m.Histogram.GetSampleSum()), + Timestamp: timestamp, + }) + + lset = make(model.LabelSet, len(m.Label)+1) + for _, p := range m.Label { + lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) + } + lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_count") + + count := &model.Sample{ + Metric: model.Metric(lset), + Value: model.SampleValue(m.Histogram.GetSampleCount()), + Timestamp: timestamp, + } + samples = append(samples, count) + + if !infSeen { + // Append an infinity bucket sample. + lset := make(model.LabelSet, len(m.Label)+2) + for _, p := range m.Label { + lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) + } + lset[model.LabelName(model.BucketLabel)] = model.LabelValue("+Inf") + lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_bucket") + + samples = append(samples, &model.Sample{ + Metric: model.Metric(lset), + Value: count.Value, + Timestamp: timestamp, + }) + } + } + + return samples +} diff --git a/vendor/github.com/prometheus/common/expfmt/encode.go b/vendor/github.com/prometheus/common/expfmt/encode.go new file mode 100644 index 0000000..64dc0eb --- /dev/null +++ b/vendor/github.com/prometheus/common/expfmt/encode.go @@ -0,0 +1,162 @@ +// Copyright 2015 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package expfmt + +import ( + "fmt" + "io" + "net/http" + + "github.com/golang/protobuf/proto" //nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility. + "github.com/matttproud/golang_protobuf_extensions/pbutil" + "github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg" + + dto "github.com/prometheus/client_model/go" +) + +// Encoder types encode metric families into an underlying wire protocol. +type Encoder interface { + Encode(*dto.MetricFamily) error +} + +// Closer is implemented by Encoders that need to be closed to finalize +// encoding. (For example, OpenMetrics needs a final `# EOF` line.) +// +// Note that all Encoder implementations returned from this package implement +// Closer, too, even if the Close call is a no-op. This happens in preparation +// for adding a Close method to the Encoder interface directly in a (mildly +// breaking) release in the future. +type Closer interface { + Close() error +} + +type encoderCloser struct { + encode func(*dto.MetricFamily) error + close func() error +} + +func (ec encoderCloser) Encode(v *dto.MetricFamily) error { + return ec.encode(v) +} + +func (ec encoderCloser) Close() error { + return ec.close() +} + +// Negotiate returns the Content-Type based on the given Accept header. If no +// appropriate accepted type is found, FmtText is returned (which is the +// Prometheus text format). This function will never negotiate FmtOpenMetrics, +// as the support is still experimental. To include the option to negotiate +// FmtOpenMetrics, use NegotiateOpenMetrics. +func Negotiate(h http.Header) Format { + for _, ac := range goautoneg.ParseAccept(h.Get(hdrAccept)) { + ver := ac.Params["version"] + if ac.Type+"/"+ac.SubType == ProtoType && ac.Params["proto"] == ProtoProtocol { + switch ac.Params["encoding"] { + case "delimited": + return FmtProtoDelim + case "text": + return FmtProtoText + case "compact-text": + return FmtProtoCompact + } + } + if ac.Type == "text" && ac.SubType == "plain" && (ver == TextVersion || ver == "") { + return FmtText + } + } + return FmtText +} + +// NegotiateIncludingOpenMetrics works like Negotiate but includes +// FmtOpenMetrics as an option for the result. Note that this function is +// temporary and will disappear once FmtOpenMetrics is fully supported and as +// such may be negotiated by the normal Negotiate function. +func NegotiateIncludingOpenMetrics(h http.Header) Format { + for _, ac := range goautoneg.ParseAccept(h.Get(hdrAccept)) { + ver := ac.Params["version"] + if ac.Type+"/"+ac.SubType == ProtoType && ac.Params["proto"] == ProtoProtocol { + switch ac.Params["encoding"] { + case "delimited": + return FmtProtoDelim + case "text": + return FmtProtoText + case "compact-text": + return FmtProtoCompact + } + } + if ac.Type == "text" && ac.SubType == "plain" && (ver == TextVersion || ver == "") { + return FmtText + } + if ac.Type+"/"+ac.SubType == OpenMetricsType && (ver == OpenMetricsVersion || ver == "") { + return FmtOpenMetrics + } + } + return FmtText +} + +// NewEncoder returns a new encoder based on content type negotiation. All +// Encoder implementations returned by NewEncoder also implement Closer, and +// callers should always call the Close method. It is currently only required +// for FmtOpenMetrics, but a future (breaking) release will add the Close method +// to the Encoder interface directly. The current version of the Encoder +// interface is kept for backwards compatibility. +func NewEncoder(w io.Writer, format Format) Encoder { + switch format { + case FmtProtoDelim: + return encoderCloser{ + encode: func(v *dto.MetricFamily) error { + _, err := pbutil.WriteDelimited(w, v) + return err + }, + close: func() error { return nil }, + } + case FmtProtoCompact: + return encoderCloser{ + encode: func(v *dto.MetricFamily) error { + _, err := fmt.Fprintln(w, v.String()) + return err + }, + close: func() error { return nil }, + } + case FmtProtoText: + return encoderCloser{ + encode: func(v *dto.MetricFamily) error { + _, err := fmt.Fprintln(w, proto.MarshalTextString(v)) + return err + }, + close: func() error { return nil }, + } + case FmtText: + return encoderCloser{ + encode: func(v *dto.MetricFamily) error { + _, err := MetricFamilyToText(w, v) + return err + }, + close: func() error { return nil }, + } + case FmtOpenMetrics: + return encoderCloser{ + encode: func(v *dto.MetricFamily) error { + _, err := MetricFamilyToOpenMetrics(w, v) + return err + }, + close: func() error { + _, err := FinalizeOpenMetrics(w) + return err + }, + } + } + panic(fmt.Errorf("expfmt.NewEncoder: unknown format %q", format)) +} diff --git a/vendor/github.com/prometheus/common/expfmt/expfmt.go b/vendor/github.com/prometheus/common/expfmt/expfmt.go new file mode 100644 index 0000000..0f176fa --- /dev/null +++ b/vendor/github.com/prometheus/common/expfmt/expfmt.go @@ -0,0 +1,41 @@ +// Copyright 2015 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package expfmt contains tools for reading and writing Prometheus metrics. +package expfmt + +// Format specifies the HTTP content type of the different wire protocols. +type Format string + +// Constants to assemble the Content-Type values for the different wire protocols. +const ( + TextVersion = "0.0.4" + ProtoType = `application/vnd.google.protobuf` + ProtoProtocol = `io.prometheus.client.MetricFamily` + ProtoFmt = ProtoType + "; proto=" + ProtoProtocol + ";" + OpenMetricsType = `application/openmetrics-text` + OpenMetricsVersion = "0.0.1" + + // The Content-Type values for the different wire protocols. + FmtUnknown Format = `` + FmtText Format = `text/plain; version=` + TextVersion + `; charset=utf-8` + FmtProtoDelim Format = ProtoFmt + ` encoding=delimited` + FmtProtoText Format = ProtoFmt + ` encoding=text` + FmtProtoCompact Format = ProtoFmt + ` encoding=compact-text` + FmtOpenMetrics Format = OpenMetricsType + `; version=` + OpenMetricsVersion + `; charset=utf-8` +) + +const ( + hdrContentType = "Content-Type" + hdrAccept = "Accept" +) diff --git a/vendor/github.com/prometheus/common/expfmt/fuzz.go b/vendor/github.com/prometheus/common/expfmt/fuzz.go new file mode 100644 index 0000000..f819e4f --- /dev/null +++ b/vendor/github.com/prometheus/common/expfmt/fuzz.go @@ -0,0 +1,37 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Build only when actually fuzzing +//go:build gofuzz +// +build gofuzz + +package expfmt + +import "bytes" + +// Fuzz text metric parser with with github.com/dvyukov/go-fuzz: +// +// go-fuzz-build github.com/prometheus/common/expfmt +// go-fuzz -bin expfmt-fuzz.zip -workdir fuzz +// +// Further input samples should go in the folder fuzz/corpus. +func Fuzz(in []byte) int { + parser := TextParser{} + _, err := parser.TextToMetricFamilies(bytes.NewReader(in)) + + if err != nil { + return 0 + } + + return 1 +} diff --git a/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go b/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go new file mode 100644 index 0000000..9d94ae9 --- /dev/null +++ b/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go @@ -0,0 +1,527 @@ +// Copyright 2020 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package expfmt + +import ( + "bufio" + "bytes" + "fmt" + "io" + "math" + "strconv" + "strings" + + "github.com/prometheus/common/model" + + dto "github.com/prometheus/client_model/go" +) + +// MetricFamilyToOpenMetrics converts a MetricFamily proto message into the +// OpenMetrics text format and writes the resulting lines to 'out'. It returns +// the number of bytes written and any error encountered. The output will have +// the same order as the input, no further sorting is performed. Furthermore, +// this function assumes the input is already sanitized and does not perform any +// sanity checks. If the input contains duplicate metrics or invalid metric or +// label names, the conversion will result in invalid text format output. +// +// This function fulfills the type 'expfmt.encoder'. +// +// Note that OpenMetrics requires a final `# EOF` line. Since this function acts +// on individual metric families, it is the responsibility of the caller to +// append this line to 'out' once all metric families have been written. +// Conveniently, this can be done by calling FinalizeOpenMetrics. +// +// The output should be fully OpenMetrics compliant. However, there are a few +// missing features and peculiarities to avoid complications when switching from +// Prometheus to OpenMetrics or vice versa: +// +// - Counters are expected to have the `_total` suffix in their metric name. In +// the output, the suffix will be truncated from the `# TYPE` and `# HELP` +// line. A counter with a missing `_total` suffix is not an error. However, +// its type will be set to `unknown` in that case to avoid invalid OpenMetrics +// output. +// +// - No support for the following (optional) features: `# UNIT` line, `_created` +// line, info type, stateset type, gaugehistogram type. +// +// - The size of exemplar labels is not checked (i.e. it's possible to create +// exemplars that are larger than allowed by the OpenMetrics specification). +// +// - The value of Counters is not checked. (OpenMetrics doesn't allow counters +// with a `NaN` value.) +func MetricFamilyToOpenMetrics(out io.Writer, in *dto.MetricFamily) (written int, err error) { + name := in.GetName() + if name == "" { + return 0, fmt.Errorf("MetricFamily has no name: %s", in) + } + + // Try the interface upgrade. If it doesn't work, we'll use a + // bufio.Writer from the sync.Pool. + w, ok := out.(enhancedWriter) + if !ok { + b := bufPool.Get().(*bufio.Writer) + b.Reset(out) + w = b + defer func() { + bErr := b.Flush() + if err == nil { + err = bErr + } + bufPool.Put(b) + }() + } + + var ( + n int + metricType = in.GetType() + shortName = name + ) + if metricType == dto.MetricType_COUNTER && strings.HasSuffix(shortName, "_total") { + shortName = name[:len(name)-6] + } + + // Comments, first HELP, then TYPE. + if in.Help != nil { + n, err = w.WriteString("# HELP ") + written += n + if err != nil { + return + } + n, err = w.WriteString(shortName) + written += n + if err != nil { + return + } + err = w.WriteByte(' ') + written++ + if err != nil { + return + } + n, err = writeEscapedString(w, *in.Help, true) + written += n + if err != nil { + return + } + err = w.WriteByte('\n') + written++ + if err != nil { + return + } + } + n, err = w.WriteString("# TYPE ") + written += n + if err != nil { + return + } + n, err = w.WriteString(shortName) + written += n + if err != nil { + return + } + switch metricType { + case dto.MetricType_COUNTER: + if strings.HasSuffix(name, "_total") { + n, err = w.WriteString(" counter\n") + } else { + n, err = w.WriteString(" unknown\n") + } + case dto.MetricType_GAUGE: + n, err = w.WriteString(" gauge\n") + case dto.MetricType_SUMMARY: + n, err = w.WriteString(" summary\n") + case dto.MetricType_UNTYPED: + n, err = w.WriteString(" unknown\n") + case dto.MetricType_HISTOGRAM: + n, err = w.WriteString(" histogram\n") + default: + return written, fmt.Errorf("unknown metric type %s", metricType.String()) + } + written += n + if err != nil { + return + } + + // Finally the samples, one line for each. + for _, metric := range in.Metric { + switch metricType { + case dto.MetricType_COUNTER: + if metric.Counter == nil { + return written, fmt.Errorf( + "expected counter in metric %s %s", name, metric, + ) + } + // Note that we have ensured above that either the name + // ends on `_total` or that the rendered type is + // `unknown`. Therefore, no `_total` must be added here. + n, err = writeOpenMetricsSample( + w, name, "", metric, "", 0, + metric.Counter.GetValue(), 0, false, + metric.Counter.Exemplar, + ) + case dto.MetricType_GAUGE: + if metric.Gauge == nil { + return written, fmt.Errorf( + "expected gauge in metric %s %s", name, metric, + ) + } + n, err = writeOpenMetricsSample( + w, name, "", metric, "", 0, + metric.Gauge.GetValue(), 0, false, + nil, + ) + case dto.MetricType_UNTYPED: + if metric.Untyped == nil { + return written, fmt.Errorf( + "expected untyped in metric %s %s", name, metric, + ) + } + n, err = writeOpenMetricsSample( + w, name, "", metric, "", 0, + metric.Untyped.GetValue(), 0, false, + nil, + ) + case dto.MetricType_SUMMARY: + if metric.Summary == nil { + return written, fmt.Errorf( + "expected summary in metric %s %s", name, metric, + ) + } + for _, q := range metric.Summary.Quantile { + n, err = writeOpenMetricsSample( + w, name, "", metric, + model.QuantileLabel, q.GetQuantile(), + q.GetValue(), 0, false, + nil, + ) + written += n + if err != nil { + return + } + } + n, err = writeOpenMetricsSample( + w, name, "_sum", metric, "", 0, + metric.Summary.GetSampleSum(), 0, false, + nil, + ) + written += n + if err != nil { + return + } + n, err = writeOpenMetricsSample( + w, name, "_count", metric, "", 0, + 0, metric.Summary.GetSampleCount(), true, + nil, + ) + case dto.MetricType_HISTOGRAM: + if metric.Histogram == nil { + return written, fmt.Errorf( + "expected histogram in metric %s %s", name, metric, + ) + } + infSeen := false + for _, b := range metric.Histogram.Bucket { + n, err = writeOpenMetricsSample( + w, name, "_bucket", metric, + model.BucketLabel, b.GetUpperBound(), + 0, b.GetCumulativeCount(), true, + b.Exemplar, + ) + written += n + if err != nil { + return + } + if math.IsInf(b.GetUpperBound(), +1) { + infSeen = true + } + } + if !infSeen { + n, err = writeOpenMetricsSample( + w, name, "_bucket", metric, + model.BucketLabel, math.Inf(+1), + 0, metric.Histogram.GetSampleCount(), true, + nil, + ) + written += n + if err != nil { + return + } + } + n, err = writeOpenMetricsSample( + w, name, "_sum", metric, "", 0, + metric.Histogram.GetSampleSum(), 0, false, + nil, + ) + written += n + if err != nil { + return + } + n, err = writeOpenMetricsSample( + w, name, "_count", metric, "", 0, + 0, metric.Histogram.GetSampleCount(), true, + nil, + ) + default: + return written, fmt.Errorf( + "unexpected type in metric %s %s", name, metric, + ) + } + written += n + if err != nil { + return + } + } + return +} + +// FinalizeOpenMetrics writes the final `# EOF\n` line required by OpenMetrics. +func FinalizeOpenMetrics(w io.Writer) (written int, err error) { + return w.Write([]byte("# EOF\n")) +} + +// writeOpenMetricsSample writes a single sample in OpenMetrics text format to +// w, given the metric name, the metric proto message itself, optionally an +// additional label name with a float64 value (use empty string as label name if +// not required), the value (optionally as float64 or uint64, determined by +// useIntValue), and optionally an exemplar (use nil if not required). The +// function returns the number of bytes written and any error encountered. +func writeOpenMetricsSample( + w enhancedWriter, + name, suffix string, + metric *dto.Metric, + additionalLabelName string, additionalLabelValue float64, + floatValue float64, intValue uint64, useIntValue bool, + exemplar *dto.Exemplar, +) (int, error) { + var written int + n, err := w.WriteString(name) + written += n + if err != nil { + return written, err + } + if suffix != "" { + n, err = w.WriteString(suffix) + written += n + if err != nil { + return written, err + } + } + n, err = writeOpenMetricsLabelPairs( + w, metric.Label, additionalLabelName, additionalLabelValue, + ) + written += n + if err != nil { + return written, err + } + err = w.WriteByte(' ') + written++ + if err != nil { + return written, err + } + if useIntValue { + n, err = writeUint(w, intValue) + } else { + n, err = writeOpenMetricsFloat(w, floatValue) + } + written += n + if err != nil { + return written, err + } + if metric.TimestampMs != nil { + err = w.WriteByte(' ') + written++ + if err != nil { + return written, err + } + // TODO(beorn7): Format this directly without converting to a float first. + n, err = writeOpenMetricsFloat(w, float64(*metric.TimestampMs)/1000) + written += n + if err != nil { + return written, err + } + } + if exemplar != nil { + n, err = writeExemplar(w, exemplar) + written += n + if err != nil { + return written, err + } + } + err = w.WriteByte('\n') + written++ + if err != nil { + return written, err + } + return written, nil +} + +// writeOpenMetricsLabelPairs works like writeOpenMetrics but formats the float +// in OpenMetrics style. +func writeOpenMetricsLabelPairs( + w enhancedWriter, + in []*dto.LabelPair, + additionalLabelName string, additionalLabelValue float64, +) (int, error) { + if len(in) == 0 && additionalLabelName == "" { + return 0, nil + } + var ( + written int + separator byte = '{' + ) + for _, lp := range in { + err := w.WriteByte(separator) + written++ + if err != nil { + return written, err + } + n, err := w.WriteString(lp.GetName()) + written += n + if err != nil { + return written, err + } + n, err = w.WriteString(`="`) + written += n + if err != nil { + return written, err + } + n, err = writeEscapedString(w, lp.GetValue(), true) + written += n + if err != nil { + return written, err + } + err = w.WriteByte('"') + written++ + if err != nil { + return written, err + } + separator = ',' + } + if additionalLabelName != "" { + err := w.WriteByte(separator) + written++ + if err != nil { + return written, err + } + n, err := w.WriteString(additionalLabelName) + written += n + if err != nil { + return written, err + } + n, err = w.WriteString(`="`) + written += n + if err != nil { + return written, err + } + n, err = writeOpenMetricsFloat(w, additionalLabelValue) + written += n + if err != nil { + return written, err + } + err = w.WriteByte('"') + written++ + if err != nil { + return written, err + } + } + err := w.WriteByte('}') + written++ + if err != nil { + return written, err + } + return written, nil +} + +// writeExemplar writes the provided exemplar in OpenMetrics format to w. The +// function returns the number of bytes written and any error encountered. +func writeExemplar(w enhancedWriter, e *dto.Exemplar) (int, error) { + written := 0 + n, err := w.WriteString(" # ") + written += n + if err != nil { + return written, err + } + n, err = writeOpenMetricsLabelPairs(w, e.Label, "", 0) + written += n + if err != nil { + return written, err + } + err = w.WriteByte(' ') + written++ + if err != nil { + return written, err + } + n, err = writeOpenMetricsFloat(w, e.GetValue()) + written += n + if err != nil { + return written, err + } + if e.Timestamp != nil { + err = w.WriteByte(' ') + written++ + if err != nil { + return written, err + } + err = (*e).Timestamp.CheckValid() + if err != nil { + return written, err + } + ts := (*e).Timestamp.AsTime() + // TODO(beorn7): Format this directly from components of ts to + // avoid overflow/underflow and precision issues of the float + // conversion. + n, err = writeOpenMetricsFloat(w, float64(ts.UnixNano())/1e9) + written += n + if err != nil { + return written, err + } + } + return written, nil +} + +// writeOpenMetricsFloat works like writeFloat but appends ".0" if the resulting +// number would otherwise contain neither a "." nor an "e". +func writeOpenMetricsFloat(w enhancedWriter, f float64) (int, error) { + switch { + case f == 1: + return w.WriteString("1.0") + case f == 0: + return w.WriteString("0.0") + case f == -1: + return w.WriteString("-1.0") + case math.IsNaN(f): + return w.WriteString("NaN") + case math.IsInf(f, +1): + return w.WriteString("+Inf") + case math.IsInf(f, -1): + return w.WriteString("-Inf") + default: + bp := numBufPool.Get().(*[]byte) + *bp = strconv.AppendFloat((*bp)[:0], f, 'g', -1, 64) + if !bytes.ContainsAny(*bp, "e.") { + *bp = append(*bp, '.', '0') + } + written, err := w.Write(*bp) + numBufPool.Put(bp) + return written, err + } +} + +// writeUint is like writeInt just for uint64. +func writeUint(w enhancedWriter, u uint64) (int, error) { + bp := numBufPool.Get().(*[]byte) + *bp = strconv.AppendUint((*bp)[:0], u, 10) + written, err := w.Write(*bp) + numBufPool.Put(bp) + return written, err +} diff --git a/vendor/github.com/prometheus/common/expfmt/text_create.go b/vendor/github.com/prometheus/common/expfmt/text_create.go new file mode 100644 index 0000000..5ba503b --- /dev/null +++ b/vendor/github.com/prometheus/common/expfmt/text_create.go @@ -0,0 +1,465 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package expfmt + +import ( + "bufio" + "fmt" + "io" + "io/ioutil" + "math" + "strconv" + "strings" + "sync" + + "github.com/prometheus/common/model" + + dto "github.com/prometheus/client_model/go" +) + +// enhancedWriter has all the enhanced write functions needed here. bufio.Writer +// implements it. +type enhancedWriter interface { + io.Writer + WriteRune(r rune) (n int, err error) + WriteString(s string) (n int, err error) + WriteByte(c byte) error +} + +const ( + initialNumBufSize = 24 +) + +var ( + bufPool = sync.Pool{ + New: func() interface{} { + return bufio.NewWriter(ioutil.Discard) + }, + } + numBufPool = sync.Pool{ + New: func() interface{} { + b := make([]byte, 0, initialNumBufSize) + return &b + }, + } +) + +// MetricFamilyToText converts a MetricFamily proto message into text format and +// writes the resulting lines to 'out'. It returns the number of bytes written +// and any error encountered. The output will have the same order as the input, +// no further sorting is performed. Furthermore, this function assumes the input +// is already sanitized and does not perform any sanity checks. If the input +// contains duplicate metrics or invalid metric or label names, the conversion +// will result in invalid text format output. +// +// This method fulfills the type 'prometheus.encoder'. +func MetricFamilyToText(out io.Writer, in *dto.MetricFamily) (written int, err error) { + // Fail-fast checks. + if len(in.Metric) == 0 { + return 0, fmt.Errorf("MetricFamily has no metrics: %s", in) + } + name := in.GetName() + if name == "" { + return 0, fmt.Errorf("MetricFamily has no name: %s", in) + } + + // Try the interface upgrade. If it doesn't work, we'll use a + // bufio.Writer from the sync.Pool. + w, ok := out.(enhancedWriter) + if !ok { + b := bufPool.Get().(*bufio.Writer) + b.Reset(out) + w = b + defer func() { + bErr := b.Flush() + if err == nil { + err = bErr + } + bufPool.Put(b) + }() + } + + var n int + + // Comments, first HELP, then TYPE. + if in.Help != nil { + n, err = w.WriteString("# HELP ") + written += n + if err != nil { + return + } + n, err = w.WriteString(name) + written += n + if err != nil { + return + } + err = w.WriteByte(' ') + written++ + if err != nil { + return + } + n, err = writeEscapedString(w, *in.Help, false) + written += n + if err != nil { + return + } + err = w.WriteByte('\n') + written++ + if err != nil { + return + } + } + n, err = w.WriteString("# TYPE ") + written += n + if err != nil { + return + } + n, err = w.WriteString(name) + written += n + if err != nil { + return + } + metricType := in.GetType() + switch metricType { + case dto.MetricType_COUNTER: + n, err = w.WriteString(" counter\n") + case dto.MetricType_GAUGE: + n, err = w.WriteString(" gauge\n") + case dto.MetricType_SUMMARY: + n, err = w.WriteString(" summary\n") + case dto.MetricType_UNTYPED: + n, err = w.WriteString(" untyped\n") + case dto.MetricType_HISTOGRAM: + n, err = w.WriteString(" histogram\n") + default: + return written, fmt.Errorf("unknown metric type %s", metricType.String()) + } + written += n + if err != nil { + return + } + + // Finally the samples, one line for each. + for _, metric := range in.Metric { + switch metricType { + case dto.MetricType_COUNTER: + if metric.Counter == nil { + return written, fmt.Errorf( + "expected counter in metric %s %s", name, metric, + ) + } + n, err = writeSample( + w, name, "", metric, "", 0, + metric.Counter.GetValue(), + ) + case dto.MetricType_GAUGE: + if metric.Gauge == nil { + return written, fmt.Errorf( + "expected gauge in metric %s %s", name, metric, + ) + } + n, err = writeSample( + w, name, "", metric, "", 0, + metric.Gauge.GetValue(), + ) + case dto.MetricType_UNTYPED: + if metric.Untyped == nil { + return written, fmt.Errorf( + "expected untyped in metric %s %s", name, metric, + ) + } + n, err = writeSample( + w, name, "", metric, "", 0, + metric.Untyped.GetValue(), + ) + case dto.MetricType_SUMMARY: + if metric.Summary == nil { + return written, fmt.Errorf( + "expected summary in metric %s %s", name, metric, + ) + } + for _, q := range metric.Summary.Quantile { + n, err = writeSample( + w, name, "", metric, + model.QuantileLabel, q.GetQuantile(), + q.GetValue(), + ) + written += n + if err != nil { + return + } + } + n, err = writeSample( + w, name, "_sum", metric, "", 0, + metric.Summary.GetSampleSum(), + ) + written += n + if err != nil { + return + } + n, err = writeSample( + w, name, "_count", metric, "", 0, + float64(metric.Summary.GetSampleCount()), + ) + case dto.MetricType_HISTOGRAM: + if metric.Histogram == nil { + return written, fmt.Errorf( + "expected histogram in metric %s %s", name, metric, + ) + } + infSeen := false + for _, b := range metric.Histogram.Bucket { + n, err = writeSample( + w, name, "_bucket", metric, + model.BucketLabel, b.GetUpperBound(), + float64(b.GetCumulativeCount()), + ) + written += n + if err != nil { + return + } + if math.IsInf(b.GetUpperBound(), +1) { + infSeen = true + } + } + if !infSeen { + n, err = writeSample( + w, name, "_bucket", metric, + model.BucketLabel, math.Inf(+1), + float64(metric.Histogram.GetSampleCount()), + ) + written += n + if err != nil { + return + } + } + n, err = writeSample( + w, name, "_sum", metric, "", 0, + metric.Histogram.GetSampleSum(), + ) + written += n + if err != nil { + return + } + n, err = writeSample( + w, name, "_count", metric, "", 0, + float64(metric.Histogram.GetSampleCount()), + ) + default: + return written, fmt.Errorf( + "unexpected type in metric %s %s", name, metric, + ) + } + written += n + if err != nil { + return + } + } + return +} + +// writeSample writes a single sample in text format to w, given the metric +// name, the metric proto message itself, optionally an additional label name +// with a float64 value (use empty string as label name if not required), and +// the value. The function returns the number of bytes written and any error +// encountered. +func writeSample( + w enhancedWriter, + name, suffix string, + metric *dto.Metric, + additionalLabelName string, additionalLabelValue float64, + value float64, +) (int, error) { + var written int + n, err := w.WriteString(name) + written += n + if err != nil { + return written, err + } + if suffix != "" { + n, err = w.WriteString(suffix) + written += n + if err != nil { + return written, err + } + } + n, err = writeLabelPairs( + w, metric.Label, additionalLabelName, additionalLabelValue, + ) + written += n + if err != nil { + return written, err + } + err = w.WriteByte(' ') + written++ + if err != nil { + return written, err + } + n, err = writeFloat(w, value) + written += n + if err != nil { + return written, err + } + if metric.TimestampMs != nil { + err = w.WriteByte(' ') + written++ + if err != nil { + return written, err + } + n, err = writeInt(w, *metric.TimestampMs) + written += n + if err != nil { + return written, err + } + } + err = w.WriteByte('\n') + written++ + if err != nil { + return written, err + } + return written, nil +} + +// writeLabelPairs converts a slice of LabelPair proto messages plus the +// explicitly given additional label pair into text formatted as required by the +// text format and writes it to 'w'. An empty slice in combination with an empty +// string 'additionalLabelName' results in nothing being written. Otherwise, the +// label pairs are written, escaped as required by the text format, and enclosed +// in '{...}'. The function returns the number of bytes written and any error +// encountered. +func writeLabelPairs( + w enhancedWriter, + in []*dto.LabelPair, + additionalLabelName string, additionalLabelValue float64, +) (int, error) { + if len(in) == 0 && additionalLabelName == "" { + return 0, nil + } + var ( + written int + separator byte = '{' + ) + for _, lp := range in { + err := w.WriteByte(separator) + written++ + if err != nil { + return written, err + } + n, err := w.WriteString(lp.GetName()) + written += n + if err != nil { + return written, err + } + n, err = w.WriteString(`="`) + written += n + if err != nil { + return written, err + } + n, err = writeEscapedString(w, lp.GetValue(), true) + written += n + if err != nil { + return written, err + } + err = w.WriteByte('"') + written++ + if err != nil { + return written, err + } + separator = ',' + } + if additionalLabelName != "" { + err := w.WriteByte(separator) + written++ + if err != nil { + return written, err + } + n, err := w.WriteString(additionalLabelName) + written += n + if err != nil { + return written, err + } + n, err = w.WriteString(`="`) + written += n + if err != nil { + return written, err + } + n, err = writeFloat(w, additionalLabelValue) + written += n + if err != nil { + return written, err + } + err = w.WriteByte('"') + written++ + if err != nil { + return written, err + } + } + err := w.WriteByte('}') + written++ + if err != nil { + return written, err + } + return written, nil +} + +// writeEscapedString replaces '\' by '\\', new line character by '\n', and - if +// includeDoubleQuote is true - '"' by '\"'. +var ( + escaper = strings.NewReplacer("\\", `\\`, "\n", `\n`) + quotedEscaper = strings.NewReplacer("\\", `\\`, "\n", `\n`, "\"", `\"`) +) + +func writeEscapedString(w enhancedWriter, v string, includeDoubleQuote bool) (int, error) { + if includeDoubleQuote { + return quotedEscaper.WriteString(w, v) + } + return escaper.WriteString(w, v) +} + +// writeFloat is equivalent to fmt.Fprint with a float64 argument but hardcodes +// a few common cases for increased efficiency. For non-hardcoded cases, it uses +// strconv.AppendFloat to avoid allocations, similar to writeInt. +func writeFloat(w enhancedWriter, f float64) (int, error) { + switch { + case f == 1: + return 1, w.WriteByte('1') + case f == 0: + return 1, w.WriteByte('0') + case f == -1: + return w.WriteString("-1") + case math.IsNaN(f): + return w.WriteString("NaN") + case math.IsInf(f, +1): + return w.WriteString("+Inf") + case math.IsInf(f, -1): + return w.WriteString("-Inf") + default: + bp := numBufPool.Get().(*[]byte) + *bp = strconv.AppendFloat((*bp)[:0], f, 'g', -1, 64) + written, err := w.Write(*bp) + numBufPool.Put(bp) + return written, err + } +} + +// writeInt is equivalent to fmt.Fprint with an int64 argument but uses +// strconv.AppendInt with a byte slice taken from a sync.Pool to avoid +// allocations. +func writeInt(w enhancedWriter, i int64) (int, error) { + bp := numBufPool.Get().(*[]byte) + *bp = strconv.AppendInt((*bp)[:0], i, 10) + written, err := w.Write(*bp) + numBufPool.Put(bp) + return written, err +} diff --git a/vendor/github.com/prometheus/common/expfmt/text_parse.go b/vendor/github.com/prometheus/common/expfmt/text_parse.go new file mode 100644 index 0000000..84be064 --- /dev/null +++ b/vendor/github.com/prometheus/common/expfmt/text_parse.go @@ -0,0 +1,775 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package expfmt + +import ( + "bufio" + "bytes" + "fmt" + "io" + "math" + "strconv" + "strings" + + dto "github.com/prometheus/client_model/go" + + "github.com/golang/protobuf/proto" //nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility. + "github.com/prometheus/common/model" +) + +// A stateFn is a function that represents a state in a state machine. By +// executing it, the state is progressed to the next state. The stateFn returns +// another stateFn, which represents the new state. The end state is represented +// by nil. +type stateFn func() stateFn + +// ParseError signals errors while parsing the simple and flat text-based +// exchange format. +type ParseError struct { + Line int + Msg string +} + +// Error implements the error interface. +func (e ParseError) Error() string { + return fmt.Sprintf("text format parsing error in line %d: %s", e.Line, e.Msg) +} + +// TextParser is used to parse the simple and flat text-based exchange format. Its +// zero value is ready to use. +type TextParser struct { + metricFamiliesByName map[string]*dto.MetricFamily + buf *bufio.Reader // Where the parsed input is read through. + err error // Most recent error. + lineCount int // Tracks the line count for error messages. + currentByte byte // The most recent byte read. + currentToken bytes.Buffer // Re-used each time a token has to be gathered from multiple bytes. + currentMF *dto.MetricFamily + currentMetric *dto.Metric + currentLabelPair *dto.LabelPair + + // The remaining member variables are only used for summaries/histograms. + currentLabels map[string]string // All labels including '__name__' but excluding 'quantile'/'le' + // Summary specific. + summaries map[uint64]*dto.Metric // Key is created with LabelsToSignature. + currentQuantile float64 + // Histogram specific. + histograms map[uint64]*dto.Metric // Key is created with LabelsToSignature. + currentBucket float64 + // These tell us if the currently processed line ends on '_count' or + // '_sum' respectively and belong to a summary/histogram, representing the sample + // count and sum of that summary/histogram. + currentIsSummaryCount, currentIsSummarySum bool + currentIsHistogramCount, currentIsHistogramSum bool +} + +// TextToMetricFamilies reads 'in' as the simple and flat text-based exchange +// format and creates MetricFamily proto messages. It returns the MetricFamily +// proto messages in a map where the metric names are the keys, along with any +// error encountered. +// +// If the input contains duplicate metrics (i.e. lines with the same metric name +// and exactly the same label set), the resulting MetricFamily will contain +// duplicate Metric proto messages. Similar is true for duplicate label +// names. Checks for duplicates have to be performed separately, if required. +// Also note that neither the metrics within each MetricFamily are sorted nor +// the label pairs within each Metric. Sorting is not required for the most +// frequent use of this method, which is sample ingestion in the Prometheus +// server. However, for presentation purposes, you might want to sort the +// metrics, and in some cases, you must sort the labels, e.g. for consumption by +// the metric family injection hook of the Prometheus registry. +// +// Summaries and histograms are rather special beasts. You would probably not +// use them in the simple text format anyway. This method can deal with +// summaries and histograms if they are presented in exactly the way the +// text.Create function creates them. +// +// This method must not be called concurrently. If you want to parse different +// input concurrently, instantiate a separate Parser for each goroutine. +func (p *TextParser) TextToMetricFamilies(in io.Reader) (map[string]*dto.MetricFamily, error) { + p.reset(in) + for nextState := p.startOfLine; nextState != nil; nextState = nextState() { + // Magic happens here... + } + // Get rid of empty metric families. + for k, mf := range p.metricFamiliesByName { + if len(mf.GetMetric()) == 0 { + delete(p.metricFamiliesByName, k) + } + } + // If p.err is io.EOF now, we have run into a premature end of the input + // stream. Turn this error into something nicer and more + // meaningful. (io.EOF is often used as a signal for the legitimate end + // of an input stream.) + if p.err == io.EOF { + p.parseError("unexpected end of input stream") + } + return p.metricFamiliesByName, p.err +} + +func (p *TextParser) reset(in io.Reader) { + p.metricFamiliesByName = map[string]*dto.MetricFamily{} + if p.buf == nil { + p.buf = bufio.NewReader(in) + } else { + p.buf.Reset(in) + } + p.err = nil + p.lineCount = 0 + if p.summaries == nil || len(p.summaries) > 0 { + p.summaries = map[uint64]*dto.Metric{} + } + if p.histograms == nil || len(p.histograms) > 0 { + p.histograms = map[uint64]*dto.Metric{} + } + p.currentQuantile = math.NaN() + p.currentBucket = math.NaN() +} + +// startOfLine represents the state where the next byte read from p.buf is the +// start of a line (or whitespace leading up to it). +func (p *TextParser) startOfLine() stateFn { + p.lineCount++ + if p.skipBlankTab(); p.err != nil { + // End of input reached. This is the only case where + // that is not an error but a signal that we are done. + p.err = nil + return nil + } + switch p.currentByte { + case '#': + return p.startComment + case '\n': + return p.startOfLine // Empty line, start the next one. + } + return p.readingMetricName +} + +// startComment represents the state where the next byte read from p.buf is the +// start of a comment (or whitespace leading up to it). +func (p *TextParser) startComment() stateFn { + if p.skipBlankTab(); p.err != nil { + return nil // Unexpected end of input. + } + if p.currentByte == '\n' { + return p.startOfLine + } + if p.readTokenUntilWhitespace(); p.err != nil { + return nil // Unexpected end of input. + } + // If we have hit the end of line already, there is nothing left + // to do. This is not considered a syntax error. + if p.currentByte == '\n' { + return p.startOfLine + } + keyword := p.currentToken.String() + if keyword != "HELP" && keyword != "TYPE" { + // Generic comment, ignore by fast forwarding to end of line. + for p.currentByte != '\n' { + if p.currentByte, p.err = p.buf.ReadByte(); p.err != nil { + return nil // Unexpected end of input. + } + } + return p.startOfLine + } + // There is something. Next has to be a metric name. + if p.skipBlankTab(); p.err != nil { + return nil // Unexpected end of input. + } + if p.readTokenAsMetricName(); p.err != nil { + return nil // Unexpected end of input. + } + if p.currentByte == '\n' { + // At the end of the line already. + // Again, this is not considered a syntax error. + return p.startOfLine + } + if !isBlankOrTab(p.currentByte) { + p.parseError("invalid metric name in comment") + return nil + } + p.setOrCreateCurrentMF() + if p.skipBlankTab(); p.err != nil { + return nil // Unexpected end of input. + } + if p.currentByte == '\n' { + // At the end of the line already. + // Again, this is not considered a syntax error. + return p.startOfLine + } + switch keyword { + case "HELP": + return p.readingHelp + case "TYPE": + return p.readingType + } + panic(fmt.Sprintf("code error: unexpected keyword %q", keyword)) +} + +// readingMetricName represents the state where the last byte read (now in +// p.currentByte) is the first byte of a metric name. +func (p *TextParser) readingMetricName() stateFn { + if p.readTokenAsMetricName(); p.err != nil { + return nil + } + if p.currentToken.Len() == 0 { + p.parseError("invalid metric name") + return nil + } + p.setOrCreateCurrentMF() + // Now is the time to fix the type if it hasn't happened yet. + if p.currentMF.Type == nil { + p.currentMF.Type = dto.MetricType_UNTYPED.Enum() + } + p.currentMetric = &dto.Metric{} + // Do not append the newly created currentMetric to + // currentMF.Metric right now. First wait if this is a summary, + // and the metric exists already, which we can only know after + // having read all the labels. + if p.skipBlankTabIfCurrentBlankTab(); p.err != nil { + return nil // Unexpected end of input. + } + return p.readingLabels +} + +// readingLabels represents the state where the last byte read (now in +// p.currentByte) is either the first byte of the label set (i.e. a '{'), or the +// first byte of the value (otherwise). +func (p *TextParser) readingLabels() stateFn { + // Summaries/histograms are special. We have to reset the + // currentLabels map, currentQuantile and currentBucket before starting to + // read labels. + if p.currentMF.GetType() == dto.MetricType_SUMMARY || p.currentMF.GetType() == dto.MetricType_HISTOGRAM { + p.currentLabels = map[string]string{} + p.currentLabels[string(model.MetricNameLabel)] = p.currentMF.GetName() + p.currentQuantile = math.NaN() + p.currentBucket = math.NaN() + } + if p.currentByte != '{' { + return p.readingValue + } + return p.startLabelName +} + +// startLabelName represents the state where the next byte read from p.buf is +// the start of a label name (or whitespace leading up to it). +func (p *TextParser) startLabelName() stateFn { + if p.skipBlankTab(); p.err != nil { + return nil // Unexpected end of input. + } + if p.currentByte == '}' { + if p.skipBlankTab(); p.err != nil { + return nil // Unexpected end of input. + } + return p.readingValue + } + if p.readTokenAsLabelName(); p.err != nil { + return nil // Unexpected end of input. + } + if p.currentToken.Len() == 0 { + p.parseError(fmt.Sprintf("invalid label name for metric %q", p.currentMF.GetName())) + return nil + } + p.currentLabelPair = &dto.LabelPair{Name: proto.String(p.currentToken.String())} + if p.currentLabelPair.GetName() == string(model.MetricNameLabel) { + p.parseError(fmt.Sprintf("label name %q is reserved", model.MetricNameLabel)) + return nil + } + // Special summary/histogram treatment. Don't add 'quantile' and 'le' + // labels to 'real' labels. + if !(p.currentMF.GetType() == dto.MetricType_SUMMARY && p.currentLabelPair.GetName() == model.QuantileLabel) && + !(p.currentMF.GetType() == dto.MetricType_HISTOGRAM && p.currentLabelPair.GetName() == model.BucketLabel) { + p.currentMetric.Label = append(p.currentMetric.Label, p.currentLabelPair) + } + if p.skipBlankTabIfCurrentBlankTab(); p.err != nil { + return nil // Unexpected end of input. + } + if p.currentByte != '=' { + p.parseError(fmt.Sprintf("expected '=' after label name, found %q", p.currentByte)) + return nil + } + // Check for duplicate label names. + labels := make(map[string]struct{}) + for _, l := range p.currentMetric.Label { + lName := l.GetName() + if _, exists := labels[lName]; !exists { + labels[lName] = struct{}{} + } else { + p.parseError(fmt.Sprintf("duplicate label names for metric %q", p.currentMF.GetName())) + return nil + } + } + return p.startLabelValue +} + +// startLabelValue represents the state where the next byte read from p.buf is +// the start of a (quoted) label value (or whitespace leading up to it). +func (p *TextParser) startLabelValue() stateFn { + if p.skipBlankTab(); p.err != nil { + return nil // Unexpected end of input. + } + if p.currentByte != '"' { + p.parseError(fmt.Sprintf("expected '\"' at start of label value, found %q", p.currentByte)) + return nil + } + if p.readTokenAsLabelValue(); p.err != nil { + return nil + } + if !model.LabelValue(p.currentToken.String()).IsValid() { + p.parseError(fmt.Sprintf("invalid label value %q", p.currentToken.String())) + return nil + } + p.currentLabelPair.Value = proto.String(p.currentToken.String()) + // Special treatment of summaries: + // - Quantile labels are special, will result in dto.Quantile later. + // - Other labels have to be added to currentLabels for signature calculation. + if p.currentMF.GetType() == dto.MetricType_SUMMARY { + if p.currentLabelPair.GetName() == model.QuantileLabel { + if p.currentQuantile, p.err = parseFloat(p.currentLabelPair.GetValue()); p.err != nil { + // Create a more helpful error message. + p.parseError(fmt.Sprintf("expected float as value for 'quantile' label, got %q", p.currentLabelPair.GetValue())) + return nil + } + } else { + p.currentLabels[p.currentLabelPair.GetName()] = p.currentLabelPair.GetValue() + } + } + // Similar special treatment of histograms. + if p.currentMF.GetType() == dto.MetricType_HISTOGRAM { + if p.currentLabelPair.GetName() == model.BucketLabel { + if p.currentBucket, p.err = parseFloat(p.currentLabelPair.GetValue()); p.err != nil { + // Create a more helpful error message. + p.parseError(fmt.Sprintf("expected float as value for 'le' label, got %q", p.currentLabelPair.GetValue())) + return nil + } + } else { + p.currentLabels[p.currentLabelPair.GetName()] = p.currentLabelPair.GetValue() + } + } + if p.skipBlankTab(); p.err != nil { + return nil // Unexpected end of input. + } + switch p.currentByte { + case ',': + return p.startLabelName + + case '}': + if p.skipBlankTab(); p.err != nil { + return nil // Unexpected end of input. + } + return p.readingValue + default: + p.parseError(fmt.Sprintf("unexpected end of label value %q", p.currentLabelPair.GetValue())) + return nil + } +} + +// readingValue represents the state where the last byte read (now in +// p.currentByte) is the first byte of the sample value (i.e. a float). +func (p *TextParser) readingValue() stateFn { + // When we are here, we have read all the labels, so for the + // special case of a summary/histogram, we can finally find out + // if the metric already exists. + if p.currentMF.GetType() == dto.MetricType_SUMMARY { + signature := model.LabelsToSignature(p.currentLabels) + if summary := p.summaries[signature]; summary != nil { + p.currentMetric = summary + } else { + p.summaries[signature] = p.currentMetric + p.currentMF.Metric = append(p.currentMF.Metric, p.currentMetric) + } + } else if p.currentMF.GetType() == dto.MetricType_HISTOGRAM { + signature := model.LabelsToSignature(p.currentLabels) + if histogram := p.histograms[signature]; histogram != nil { + p.currentMetric = histogram + } else { + p.histograms[signature] = p.currentMetric + p.currentMF.Metric = append(p.currentMF.Metric, p.currentMetric) + } + } else { + p.currentMF.Metric = append(p.currentMF.Metric, p.currentMetric) + } + if p.readTokenUntilWhitespace(); p.err != nil { + return nil // Unexpected end of input. + } + value, err := parseFloat(p.currentToken.String()) + if err != nil { + // Create a more helpful error message. + p.parseError(fmt.Sprintf("expected float as value, got %q", p.currentToken.String())) + return nil + } + switch p.currentMF.GetType() { + case dto.MetricType_COUNTER: + p.currentMetric.Counter = &dto.Counter{Value: proto.Float64(value)} + case dto.MetricType_GAUGE: + p.currentMetric.Gauge = &dto.Gauge{Value: proto.Float64(value)} + case dto.MetricType_UNTYPED: + p.currentMetric.Untyped = &dto.Untyped{Value: proto.Float64(value)} + case dto.MetricType_SUMMARY: + // *sigh* + if p.currentMetric.Summary == nil { + p.currentMetric.Summary = &dto.Summary{} + } + switch { + case p.currentIsSummaryCount: + p.currentMetric.Summary.SampleCount = proto.Uint64(uint64(value)) + case p.currentIsSummarySum: + p.currentMetric.Summary.SampleSum = proto.Float64(value) + case !math.IsNaN(p.currentQuantile): + p.currentMetric.Summary.Quantile = append( + p.currentMetric.Summary.Quantile, + &dto.Quantile{ + Quantile: proto.Float64(p.currentQuantile), + Value: proto.Float64(value), + }, + ) + } + case dto.MetricType_HISTOGRAM: + // *sigh* + if p.currentMetric.Histogram == nil { + p.currentMetric.Histogram = &dto.Histogram{} + } + switch { + case p.currentIsHistogramCount: + p.currentMetric.Histogram.SampleCount = proto.Uint64(uint64(value)) + case p.currentIsHistogramSum: + p.currentMetric.Histogram.SampleSum = proto.Float64(value) + case !math.IsNaN(p.currentBucket): + p.currentMetric.Histogram.Bucket = append( + p.currentMetric.Histogram.Bucket, + &dto.Bucket{ + UpperBound: proto.Float64(p.currentBucket), + CumulativeCount: proto.Uint64(uint64(value)), + }, + ) + } + default: + p.err = fmt.Errorf("unexpected type for metric name %q", p.currentMF.GetName()) + } + if p.currentByte == '\n' { + return p.startOfLine + } + return p.startTimestamp +} + +// startTimestamp represents the state where the next byte read from p.buf is +// the start of the timestamp (or whitespace leading up to it). +func (p *TextParser) startTimestamp() stateFn { + if p.skipBlankTab(); p.err != nil { + return nil // Unexpected end of input. + } + if p.readTokenUntilWhitespace(); p.err != nil { + return nil // Unexpected end of input. + } + timestamp, err := strconv.ParseInt(p.currentToken.String(), 10, 64) + if err != nil { + // Create a more helpful error message. + p.parseError(fmt.Sprintf("expected integer as timestamp, got %q", p.currentToken.String())) + return nil + } + p.currentMetric.TimestampMs = proto.Int64(timestamp) + if p.readTokenUntilNewline(false); p.err != nil { + return nil // Unexpected end of input. + } + if p.currentToken.Len() > 0 { + p.parseError(fmt.Sprintf("spurious string after timestamp: %q", p.currentToken.String())) + return nil + } + return p.startOfLine +} + +// readingHelp represents the state where the last byte read (now in +// p.currentByte) is the first byte of the docstring after 'HELP'. +func (p *TextParser) readingHelp() stateFn { + if p.currentMF.Help != nil { + p.parseError(fmt.Sprintf("second HELP line for metric name %q", p.currentMF.GetName())) + return nil + } + // Rest of line is the docstring. + if p.readTokenUntilNewline(true); p.err != nil { + return nil // Unexpected end of input. + } + p.currentMF.Help = proto.String(p.currentToken.String()) + return p.startOfLine +} + +// readingType represents the state where the last byte read (now in +// p.currentByte) is the first byte of the type hint after 'HELP'. +func (p *TextParser) readingType() stateFn { + if p.currentMF.Type != nil { + p.parseError(fmt.Sprintf("second TYPE line for metric name %q, or TYPE reported after samples", p.currentMF.GetName())) + return nil + } + // Rest of line is the type. + if p.readTokenUntilNewline(false); p.err != nil { + return nil // Unexpected end of input. + } + metricType, ok := dto.MetricType_value[strings.ToUpper(p.currentToken.String())] + if !ok { + p.parseError(fmt.Sprintf("unknown metric type %q", p.currentToken.String())) + return nil + } + p.currentMF.Type = dto.MetricType(metricType).Enum() + return p.startOfLine +} + +// parseError sets p.err to a ParseError at the current line with the given +// message. +func (p *TextParser) parseError(msg string) { + p.err = ParseError{ + Line: p.lineCount, + Msg: msg, + } +} + +// skipBlankTab reads (and discards) bytes from p.buf until it encounters a byte +// that is neither ' ' nor '\t'. That byte is left in p.currentByte. +func (p *TextParser) skipBlankTab() { + for { + if p.currentByte, p.err = p.buf.ReadByte(); p.err != nil || !isBlankOrTab(p.currentByte) { + return + } + } +} + +// skipBlankTabIfCurrentBlankTab works exactly as skipBlankTab but doesn't do +// anything if p.currentByte is neither ' ' nor '\t'. +func (p *TextParser) skipBlankTabIfCurrentBlankTab() { + if isBlankOrTab(p.currentByte) { + p.skipBlankTab() + } +} + +// readTokenUntilWhitespace copies bytes from p.buf into p.currentToken. The +// first byte considered is the byte already read (now in p.currentByte). The +// first whitespace byte encountered is still copied into p.currentByte, but not +// into p.currentToken. +func (p *TextParser) readTokenUntilWhitespace() { + p.currentToken.Reset() + for p.err == nil && !isBlankOrTab(p.currentByte) && p.currentByte != '\n' { + p.currentToken.WriteByte(p.currentByte) + p.currentByte, p.err = p.buf.ReadByte() + } +} + +// readTokenUntilNewline copies bytes from p.buf into p.currentToken. The first +// byte considered is the byte already read (now in p.currentByte). The first +// newline byte encountered is still copied into p.currentByte, but not into +// p.currentToken. If recognizeEscapeSequence is true, two escape sequences are +// recognized: '\\' translates into '\', and '\n' into a line-feed character. +// All other escape sequences are invalid and cause an error. +func (p *TextParser) readTokenUntilNewline(recognizeEscapeSequence bool) { + p.currentToken.Reset() + escaped := false + for p.err == nil { + if recognizeEscapeSequence && escaped { + switch p.currentByte { + case '\\': + p.currentToken.WriteByte(p.currentByte) + case 'n': + p.currentToken.WriteByte('\n') + default: + p.parseError(fmt.Sprintf("invalid escape sequence '\\%c'", p.currentByte)) + return + } + escaped = false + } else { + switch p.currentByte { + case '\n': + return + case '\\': + escaped = true + default: + p.currentToken.WriteByte(p.currentByte) + } + } + p.currentByte, p.err = p.buf.ReadByte() + } +} + +// readTokenAsMetricName copies a metric name from p.buf into p.currentToken. +// The first byte considered is the byte already read (now in p.currentByte). +// The first byte not part of a metric name is still copied into p.currentByte, +// but not into p.currentToken. +func (p *TextParser) readTokenAsMetricName() { + p.currentToken.Reset() + if !isValidMetricNameStart(p.currentByte) { + return + } + for { + p.currentToken.WriteByte(p.currentByte) + p.currentByte, p.err = p.buf.ReadByte() + if p.err != nil || !isValidMetricNameContinuation(p.currentByte) { + return + } + } +} + +// readTokenAsLabelName copies a label name from p.buf into p.currentToken. +// The first byte considered is the byte already read (now in p.currentByte). +// The first byte not part of a label name is still copied into p.currentByte, +// but not into p.currentToken. +func (p *TextParser) readTokenAsLabelName() { + p.currentToken.Reset() + if !isValidLabelNameStart(p.currentByte) { + return + } + for { + p.currentToken.WriteByte(p.currentByte) + p.currentByte, p.err = p.buf.ReadByte() + if p.err != nil || !isValidLabelNameContinuation(p.currentByte) { + return + } + } +} + +// readTokenAsLabelValue copies a label value from p.buf into p.currentToken. +// In contrast to the other 'readTokenAs...' functions, which start with the +// last read byte in p.currentByte, this method ignores p.currentByte and starts +// with reading a new byte from p.buf. The first byte not part of a label value +// is still copied into p.currentByte, but not into p.currentToken. +func (p *TextParser) readTokenAsLabelValue() { + p.currentToken.Reset() + escaped := false + for { + if p.currentByte, p.err = p.buf.ReadByte(); p.err != nil { + return + } + if escaped { + switch p.currentByte { + case '"', '\\': + p.currentToken.WriteByte(p.currentByte) + case 'n': + p.currentToken.WriteByte('\n') + default: + p.parseError(fmt.Sprintf("invalid escape sequence '\\%c'", p.currentByte)) + return + } + escaped = false + continue + } + switch p.currentByte { + case '"': + return + case '\n': + p.parseError(fmt.Sprintf("label value %q contains unescaped new-line", p.currentToken.String())) + return + case '\\': + escaped = true + default: + p.currentToken.WriteByte(p.currentByte) + } + } +} + +func (p *TextParser) setOrCreateCurrentMF() { + p.currentIsSummaryCount = false + p.currentIsSummarySum = false + p.currentIsHistogramCount = false + p.currentIsHistogramSum = false + name := p.currentToken.String() + if p.currentMF = p.metricFamiliesByName[name]; p.currentMF != nil { + return + } + // Try out if this is a _sum or _count for a summary/histogram. + summaryName := summaryMetricName(name) + if p.currentMF = p.metricFamiliesByName[summaryName]; p.currentMF != nil { + if p.currentMF.GetType() == dto.MetricType_SUMMARY { + if isCount(name) { + p.currentIsSummaryCount = true + } + if isSum(name) { + p.currentIsSummarySum = true + } + return + } + } + histogramName := histogramMetricName(name) + if p.currentMF = p.metricFamiliesByName[histogramName]; p.currentMF != nil { + if p.currentMF.GetType() == dto.MetricType_HISTOGRAM { + if isCount(name) { + p.currentIsHistogramCount = true + } + if isSum(name) { + p.currentIsHistogramSum = true + } + return + } + } + p.currentMF = &dto.MetricFamily{Name: proto.String(name)} + p.metricFamiliesByName[name] = p.currentMF +} + +func isValidLabelNameStart(b byte) bool { + return (b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' +} + +func isValidLabelNameContinuation(b byte) bool { + return isValidLabelNameStart(b) || (b >= '0' && b <= '9') +} + +func isValidMetricNameStart(b byte) bool { + return isValidLabelNameStart(b) || b == ':' +} + +func isValidMetricNameContinuation(b byte) bool { + return isValidLabelNameContinuation(b) || b == ':' +} + +func isBlankOrTab(b byte) bool { + return b == ' ' || b == '\t' +} + +func isCount(name string) bool { + return len(name) > 6 && name[len(name)-6:] == "_count" +} + +func isSum(name string) bool { + return len(name) > 4 && name[len(name)-4:] == "_sum" +} + +func isBucket(name string) bool { + return len(name) > 7 && name[len(name)-7:] == "_bucket" +} + +func summaryMetricName(name string) string { + switch { + case isCount(name): + return name[:len(name)-6] + case isSum(name): + return name[:len(name)-4] + default: + return name + } +} + +func histogramMetricName(name string) string { + switch { + case isCount(name): + return name[:len(name)-6] + case isSum(name): + return name[:len(name)-4] + case isBucket(name): + return name[:len(name)-7] + default: + return name + } +} + +func parseFloat(s string) (float64, error) { + if strings.ContainsAny(s, "pP_") { + return 0, fmt.Errorf("unsupported character in float") + } + return strconv.ParseFloat(s, 64) +} diff --git a/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/README.txt b/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/README.txt new file mode 100644 index 0000000..7723656 --- /dev/null +++ b/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/README.txt @@ -0,0 +1,67 @@ +PACKAGE + +package goautoneg +import "bitbucket.org/ww/goautoneg" + +HTTP Content-Type Autonegotiation. + +The functions in this package implement the behaviour specified in +http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html + +Copyright (c) 2011, Open Knowledge Foundation Ltd. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + + Neither the name of the Open Knowledge Foundation Ltd. nor the + names of its contributors may be used to endorse or promote + products derived from this software without specific prior written + permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +FUNCTIONS + +func Negotiate(header string, alternatives []string) (content_type string) +Negotiate the most appropriate content_type given the accept header +and a list of alternatives. + +func ParseAccept(header string) (accept []Accept) +Parse an Accept Header string returning a sorted list +of clauses + + +TYPES + +type Accept struct { + Type, SubType string + Q float32 + Params map[string]string +} +Structure to represent a clause in an HTTP Accept Header + + +SUBDIRECTORIES + + .hg diff --git a/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg.go b/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg.go new file mode 100644 index 0000000..26e9228 --- /dev/null +++ b/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg.go @@ -0,0 +1,162 @@ +/* +Copyright (c) 2011, Open Knowledge Foundation Ltd. +All rights reserved. + +HTTP Content-Type Autonegotiation. + +The functions in this package implement the behaviour specified in +http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + + Neither the name of the Open Knowledge Foundation Ltd. nor the + names of its contributors may be used to endorse or promote + products derived from this software without specific prior written + permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +*/ +package goautoneg + +import ( + "sort" + "strconv" + "strings" +) + +// Structure to represent a clause in an HTTP Accept Header +type Accept struct { + Type, SubType string + Q float64 + Params map[string]string +} + +// For internal use, so that we can use the sort interface +type accept_slice []Accept + +func (accept accept_slice) Len() int { + slice := []Accept(accept) + return len(slice) +} + +func (accept accept_slice) Less(i, j int) bool { + slice := []Accept(accept) + ai, aj := slice[i], slice[j] + if ai.Q > aj.Q { + return true + } + if ai.Type != "*" && aj.Type == "*" { + return true + } + if ai.SubType != "*" && aj.SubType == "*" { + return true + } + return false +} + +func (accept accept_slice) Swap(i, j int) { + slice := []Accept(accept) + slice[i], slice[j] = slice[j], slice[i] +} + +// Parse an Accept Header string returning a sorted list +// of clauses +func ParseAccept(header string) (accept []Accept) { + parts := strings.Split(header, ",") + accept = make([]Accept, 0, len(parts)) + for _, part := range parts { + part := strings.Trim(part, " ") + + a := Accept{} + a.Params = make(map[string]string) + a.Q = 1.0 + + mrp := strings.Split(part, ";") + + media_range := mrp[0] + sp := strings.Split(media_range, "/") + a.Type = strings.Trim(sp[0], " ") + + switch { + case len(sp) == 1 && a.Type == "*": + a.SubType = "*" + case len(sp) == 2: + a.SubType = strings.Trim(sp[1], " ") + default: + continue + } + + if len(mrp) == 1 { + accept = append(accept, a) + continue + } + + for _, param := range mrp[1:] { + sp := strings.SplitN(param, "=", 2) + if len(sp) != 2 { + continue + } + token := strings.Trim(sp[0], " ") + if token == "q" { + a.Q, _ = strconv.ParseFloat(sp[1], 32) + } else { + a.Params[token] = strings.Trim(sp[1], " ") + } + } + + accept = append(accept, a) + } + + slice := accept_slice(accept) + sort.Sort(slice) + + return +} + +// Negotiate the most appropriate content_type given the accept header +// and a list of alternatives. +func Negotiate(header string, alternatives []string) (content_type string) { + asp := make([][]string, 0, len(alternatives)) + for _, ctype := range alternatives { + asp = append(asp, strings.SplitN(ctype, "/", 2)) + } + for _, clause := range ParseAccept(header) { + for i, ctsp := range asp { + if clause.Type == ctsp[0] && clause.SubType == ctsp[1] { + content_type = alternatives[i] + return + } + if clause.Type == ctsp[0] && clause.SubType == "*" { + content_type = alternatives[i] + return + } + if clause.Type == "*" && clause.SubType == "*" { + content_type = alternatives[i] + return + } + } + } + return +} diff --git a/vendor/github.com/prometheus/common/model/alert.go b/vendor/github.com/prometheus/common/model/alert.go new file mode 100644 index 0000000..35e739c --- /dev/null +++ b/vendor/github.com/prometheus/common/model/alert.go @@ -0,0 +1,136 @@ +// Copyright 2013 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +import ( + "fmt" + "time" +) + +type AlertStatus string + +const ( + AlertFiring AlertStatus = "firing" + AlertResolved AlertStatus = "resolved" +) + +// Alert is a generic representation of an alert in the Prometheus eco-system. +type Alert struct { + // Label value pairs for purpose of aggregation, matching, and disposition + // dispatching. This must minimally include an "alertname" label. + Labels LabelSet `json:"labels"` + + // Extra key/value information which does not define alert identity. + Annotations LabelSet `json:"annotations"` + + // The known time range for this alert. Both ends are optional. + StartsAt time.Time `json:"startsAt,omitempty"` + EndsAt time.Time `json:"endsAt,omitempty"` + GeneratorURL string `json:"generatorURL"` +} + +// Name returns the name of the alert. It is equivalent to the "alertname" label. +func (a *Alert) Name() string { + return string(a.Labels[AlertNameLabel]) +} + +// Fingerprint returns a unique hash for the alert. It is equivalent to +// the fingerprint of the alert's label set. +func (a *Alert) Fingerprint() Fingerprint { + return a.Labels.Fingerprint() +} + +func (a *Alert) String() string { + s := fmt.Sprintf("%s[%s]", a.Name(), a.Fingerprint().String()[:7]) + if a.Resolved() { + return s + "[resolved]" + } + return s + "[active]" +} + +// Resolved returns true iff the activity interval ended in the past. +func (a *Alert) Resolved() bool { + return a.ResolvedAt(time.Now()) +} + +// ResolvedAt returns true off the activity interval ended before +// the given timestamp. +func (a *Alert) ResolvedAt(ts time.Time) bool { + if a.EndsAt.IsZero() { + return false + } + return !a.EndsAt.After(ts) +} + +// Status returns the status of the alert. +func (a *Alert) Status() AlertStatus { + if a.Resolved() { + return AlertResolved + } + return AlertFiring +} + +// Validate checks whether the alert data is inconsistent. +func (a *Alert) Validate() error { + if a.StartsAt.IsZero() { + return fmt.Errorf("start time missing") + } + if !a.EndsAt.IsZero() && a.EndsAt.Before(a.StartsAt) { + return fmt.Errorf("start time must be before end time") + } + if err := a.Labels.Validate(); err != nil { + return fmt.Errorf("invalid label set: %s", err) + } + if len(a.Labels) == 0 { + return fmt.Errorf("at least one label pair required") + } + if err := a.Annotations.Validate(); err != nil { + return fmt.Errorf("invalid annotations: %s", err) + } + return nil +} + +// Alert is a list of alerts that can be sorted in chronological order. +type Alerts []*Alert + +func (as Alerts) Len() int { return len(as) } +func (as Alerts) Swap(i, j int) { as[i], as[j] = as[j], as[i] } + +func (as Alerts) Less(i, j int) bool { + if as[i].StartsAt.Before(as[j].StartsAt) { + return true + } + if as[i].EndsAt.Before(as[j].EndsAt) { + return true + } + return as[i].Fingerprint() < as[j].Fingerprint() +} + +// HasFiring returns true iff one of the alerts is not resolved. +func (as Alerts) HasFiring() bool { + for _, a := range as { + if !a.Resolved() { + return true + } + } + return false +} + +// Status returns StatusFiring iff at least one of the alerts is firing. +func (as Alerts) Status() AlertStatus { + if as.HasFiring() { + return AlertFiring + } + return AlertResolved +} diff --git a/vendor/github.com/prometheus/common/model/fingerprinting.go b/vendor/github.com/prometheus/common/model/fingerprinting.go new file mode 100644 index 0000000..fc4de41 --- /dev/null +++ b/vendor/github.com/prometheus/common/model/fingerprinting.go @@ -0,0 +1,105 @@ +// Copyright 2013 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +import ( + "fmt" + "strconv" +) + +// Fingerprint provides a hash-capable representation of a Metric. +// For our purposes, FNV-1A 64-bit is used. +type Fingerprint uint64 + +// FingerprintFromString transforms a string representation into a Fingerprint. +func FingerprintFromString(s string) (Fingerprint, error) { + num, err := strconv.ParseUint(s, 16, 64) + return Fingerprint(num), err +} + +// ParseFingerprint parses the input string into a fingerprint. +func ParseFingerprint(s string) (Fingerprint, error) { + num, err := strconv.ParseUint(s, 16, 64) + if err != nil { + return 0, err + } + return Fingerprint(num), nil +} + +func (f Fingerprint) String() string { + return fmt.Sprintf("%016x", uint64(f)) +} + +// Fingerprints represents a collection of Fingerprint subject to a given +// natural sorting scheme. It implements sort.Interface. +type Fingerprints []Fingerprint + +// Len implements sort.Interface. +func (f Fingerprints) Len() int { + return len(f) +} + +// Less implements sort.Interface. +func (f Fingerprints) Less(i, j int) bool { + return f[i] < f[j] +} + +// Swap implements sort.Interface. +func (f Fingerprints) Swap(i, j int) { + f[i], f[j] = f[j], f[i] +} + +// FingerprintSet is a set of Fingerprints. +type FingerprintSet map[Fingerprint]struct{} + +// Equal returns true if both sets contain the same elements (and not more). +func (s FingerprintSet) Equal(o FingerprintSet) bool { + if len(s) != len(o) { + return false + } + + for k := range s { + if _, ok := o[k]; !ok { + return false + } + } + + return true +} + +// Intersection returns the elements contained in both sets. +func (s FingerprintSet) Intersection(o FingerprintSet) FingerprintSet { + myLength, otherLength := len(s), len(o) + if myLength == 0 || otherLength == 0 { + return FingerprintSet{} + } + + subSet := s + superSet := o + + if otherLength < myLength { + subSet = o + superSet = s + } + + out := FingerprintSet{} + + for k := range subSet { + if _, ok := superSet[k]; ok { + out[k] = struct{}{} + } + } + + return out +} diff --git a/vendor/github.com/prometheus/common/model/fnv.go b/vendor/github.com/prometheus/common/model/fnv.go new file mode 100644 index 0000000..367afec --- /dev/null +++ b/vendor/github.com/prometheus/common/model/fnv.go @@ -0,0 +1,42 @@ +// Copyright 2015 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +// Inline and byte-free variant of hash/fnv's fnv64a. + +const ( + offset64 = 14695981039346656037 + prime64 = 1099511628211 +) + +// hashNew initializes a new fnv64a hash value. +func hashNew() uint64 { + return offset64 +} + +// hashAdd adds a string to a fnv64a hash value, returning the updated hash. +func hashAdd(h uint64, s string) uint64 { + for i := 0; i < len(s); i++ { + h ^= uint64(s[i]) + h *= prime64 + } + return h +} + +// hashAddByte adds a byte to a fnv64a hash value, returning the updated hash. +func hashAddByte(h uint64, b byte) uint64 { + h ^= uint64(b) + h *= prime64 + return h +} diff --git a/vendor/github.com/prometheus/common/model/labels.go b/vendor/github.com/prometheus/common/model/labels.go new file mode 100644 index 0000000..ef89563 --- /dev/null +++ b/vendor/github.com/prometheus/common/model/labels.go @@ -0,0 +1,218 @@ +// Copyright 2013 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +import ( + "encoding/json" + "fmt" + "regexp" + "strings" + "unicode/utf8" +) + +const ( + // AlertNameLabel is the name of the label containing the an alert's name. + AlertNameLabel = "alertname" + + // ExportedLabelPrefix is the prefix to prepend to the label names present in + // exported metrics if a label of the same name is added by the server. + ExportedLabelPrefix = "exported_" + + // MetricNameLabel is the label name indicating the metric name of a + // timeseries. + MetricNameLabel = "__name__" + + // SchemeLabel is the name of the label that holds the scheme on which to + // scrape a target. + SchemeLabel = "__scheme__" + + // AddressLabel is the name of the label that holds the address of + // a scrape target. + AddressLabel = "__address__" + + // MetricsPathLabel is the name of the label that holds the path on which to + // scrape a target. + MetricsPathLabel = "__metrics_path__" + + // ScrapeIntervalLabel is the name of the label that holds the scrape interval + // used to scrape a target. + ScrapeIntervalLabel = "__scrape_interval__" + + // ScrapeTimeoutLabel is the name of the label that holds the scrape + // timeout used to scrape a target. + ScrapeTimeoutLabel = "__scrape_timeout__" + + // ReservedLabelPrefix is a prefix which is not legal in user-supplied + // label names. + ReservedLabelPrefix = "__" + + // MetaLabelPrefix is a prefix for labels that provide meta information. + // Labels with this prefix are used for intermediate label processing and + // will not be attached to time series. + MetaLabelPrefix = "__meta_" + + // TmpLabelPrefix is a prefix for temporary labels as part of relabelling. + // Labels with this prefix are used for intermediate label processing and + // will not be attached to time series. This is reserved for use in + // Prometheus configuration files by users. + TmpLabelPrefix = "__tmp_" + + // ParamLabelPrefix is a prefix for labels that provide URL parameters + // used to scrape a target. + ParamLabelPrefix = "__param_" + + // JobLabel is the label name indicating the job from which a timeseries + // was scraped. + JobLabel = "job" + + // InstanceLabel is the label name used for the instance label. + InstanceLabel = "instance" + + // BucketLabel is used for the label that defines the upper bound of a + // bucket of a histogram ("le" -> "less or equal"). + BucketLabel = "le" + + // QuantileLabel is used for the label that defines the quantile in a + // summary. + QuantileLabel = "quantile" +) + +// LabelNameRE is a regular expression matching valid label names. Note that the +// IsValid method of LabelName performs the same check but faster than a match +// with this regular expression. +var LabelNameRE = regexp.MustCompile("^[a-zA-Z_][a-zA-Z0-9_]*$") + +// A LabelName is a key for a LabelSet or Metric. It has a value associated +// therewith. +type LabelName string + +// IsValid is true iff the label name matches the pattern of LabelNameRE. This +// method, however, does not use LabelNameRE for the check but a much faster +// hardcoded implementation. +func (ln LabelName) IsValid() bool { + if len(ln) == 0 { + return false + } + for i, b := range ln { + if !((b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || (b >= '0' && b <= '9' && i > 0)) { + return false + } + } + return true +} + +// UnmarshalYAML implements the yaml.Unmarshaler interface. +func (ln *LabelName) UnmarshalYAML(unmarshal func(interface{}) error) error { + var s string + if err := unmarshal(&s); err != nil { + return err + } + if !LabelName(s).IsValid() { + return fmt.Errorf("%q is not a valid label name", s) + } + *ln = LabelName(s) + return nil +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (ln *LabelName) UnmarshalJSON(b []byte) error { + var s string + if err := json.Unmarshal(b, &s); err != nil { + return err + } + if !LabelName(s).IsValid() { + return fmt.Errorf("%q is not a valid label name", s) + } + *ln = LabelName(s) + return nil +} + +// LabelNames is a sortable LabelName slice. In implements sort.Interface. +type LabelNames []LabelName + +func (l LabelNames) Len() int { + return len(l) +} + +func (l LabelNames) Less(i, j int) bool { + return l[i] < l[j] +} + +func (l LabelNames) Swap(i, j int) { + l[i], l[j] = l[j], l[i] +} + +func (l LabelNames) String() string { + labelStrings := make([]string, 0, len(l)) + for _, label := range l { + labelStrings = append(labelStrings, string(label)) + } + return strings.Join(labelStrings, ", ") +} + +// A LabelValue is an associated value for a LabelName. +type LabelValue string + +// IsValid returns true iff the string is a valid UTF8. +func (lv LabelValue) IsValid() bool { + return utf8.ValidString(string(lv)) +} + +// LabelValues is a sortable LabelValue slice. It implements sort.Interface. +type LabelValues []LabelValue + +func (l LabelValues) Len() int { + return len(l) +} + +func (l LabelValues) Less(i, j int) bool { + return string(l[i]) < string(l[j]) +} + +func (l LabelValues) Swap(i, j int) { + l[i], l[j] = l[j], l[i] +} + +// LabelPair pairs a name with a value. +type LabelPair struct { + Name LabelName + Value LabelValue +} + +// LabelPairs is a sortable slice of LabelPair pointers. It implements +// sort.Interface. +type LabelPairs []*LabelPair + +func (l LabelPairs) Len() int { + return len(l) +} + +func (l LabelPairs) Less(i, j int) bool { + switch { + case l[i].Name > l[j].Name: + return false + case l[i].Name < l[j].Name: + return true + case l[i].Value > l[j].Value: + return false + case l[i].Value < l[j].Value: + return true + default: + return false + } +} + +func (l LabelPairs) Swap(i, j int) { + l[i], l[j] = l[j], l[i] +} diff --git a/vendor/github.com/prometheus/common/model/labelset.go b/vendor/github.com/prometheus/common/model/labelset.go new file mode 100644 index 0000000..6eda08a --- /dev/null +++ b/vendor/github.com/prometheus/common/model/labelset.go @@ -0,0 +1,169 @@ +// Copyright 2013 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +import ( + "encoding/json" + "fmt" + "sort" + "strings" +) + +// A LabelSet is a collection of LabelName and LabelValue pairs. The LabelSet +// may be fully-qualified down to the point where it may resolve to a single +// Metric in the data store or not. All operations that occur within the realm +// of a LabelSet can emit a vector of Metric entities to which the LabelSet may +// match. +type LabelSet map[LabelName]LabelValue + +// Validate checks whether all names and values in the label set +// are valid. +func (ls LabelSet) Validate() error { + for ln, lv := range ls { + if !ln.IsValid() { + return fmt.Errorf("invalid name %q", ln) + } + if !lv.IsValid() { + return fmt.Errorf("invalid value %q", lv) + } + } + return nil +} + +// Equal returns true iff both label sets have exactly the same key/value pairs. +func (ls LabelSet) Equal(o LabelSet) bool { + if len(ls) != len(o) { + return false + } + for ln, lv := range ls { + olv, ok := o[ln] + if !ok { + return false + } + if olv != lv { + return false + } + } + return true +} + +// Before compares the metrics, using the following criteria: +// +// If m has fewer labels than o, it is before o. If it has more, it is not. +// +// If the number of labels is the same, the superset of all label names is +// sorted alphanumerically. The first differing label pair found in that order +// determines the outcome: If the label does not exist at all in m, then m is +// before o, and vice versa. Otherwise the label value is compared +// alphanumerically. +// +// If m and o are equal, the method returns false. +func (ls LabelSet) Before(o LabelSet) bool { + if len(ls) < len(o) { + return true + } + if len(ls) > len(o) { + return false + } + + lns := make(LabelNames, 0, len(ls)+len(o)) + for ln := range ls { + lns = append(lns, ln) + } + for ln := range o { + lns = append(lns, ln) + } + // It's probably not worth it to de-dup lns. + sort.Sort(lns) + for _, ln := range lns { + mlv, ok := ls[ln] + if !ok { + return true + } + olv, ok := o[ln] + if !ok { + return false + } + if mlv < olv { + return true + } + if mlv > olv { + return false + } + } + return false +} + +// Clone returns a copy of the label set. +func (ls LabelSet) Clone() LabelSet { + lsn := make(LabelSet, len(ls)) + for ln, lv := range ls { + lsn[ln] = lv + } + return lsn +} + +// Merge is a helper function to non-destructively merge two label sets. +func (l LabelSet) Merge(other LabelSet) LabelSet { + result := make(LabelSet, len(l)) + + for k, v := range l { + result[k] = v + } + + for k, v := range other { + result[k] = v + } + + return result +} + +func (l LabelSet) String() string { + lstrs := make([]string, 0, len(l)) + for l, v := range l { + lstrs = append(lstrs, fmt.Sprintf("%s=%q", l, v)) + } + + sort.Strings(lstrs) + return fmt.Sprintf("{%s}", strings.Join(lstrs, ", ")) +} + +// Fingerprint returns the LabelSet's fingerprint. +func (ls LabelSet) Fingerprint() Fingerprint { + return labelSetToFingerprint(ls) +} + +// FastFingerprint returns the LabelSet's Fingerprint calculated by a faster hashing +// algorithm, which is, however, more susceptible to hash collisions. +func (ls LabelSet) FastFingerprint() Fingerprint { + return labelSetToFastFingerprint(ls) +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (l *LabelSet) UnmarshalJSON(b []byte) error { + var m map[LabelName]LabelValue + if err := json.Unmarshal(b, &m); err != nil { + return err + } + // encoding/json only unmarshals maps of the form map[string]T. It treats + // LabelName as a string and does not call its UnmarshalJSON method. + // Thus, we have to replicate the behavior here. + for ln := range m { + if !ln.IsValid() { + return fmt.Errorf("%q is not a valid label name", ln) + } + } + *l = LabelSet(m) + return nil +} diff --git a/vendor/github.com/prometheus/common/model/metric.go b/vendor/github.com/prometheus/common/model/metric.go new file mode 100644 index 0000000..00804b7 --- /dev/null +++ b/vendor/github.com/prometheus/common/model/metric.go @@ -0,0 +1,102 @@ +// Copyright 2013 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +import ( + "fmt" + "regexp" + "sort" + "strings" +) + +var ( + // MetricNameRE is a regular expression matching valid metric + // names. Note that the IsValidMetricName function performs the same + // check but faster than a match with this regular expression. + MetricNameRE = regexp.MustCompile(`^[a-zA-Z_:][a-zA-Z0-9_:]*$`) +) + +// A Metric is similar to a LabelSet, but the key difference is that a Metric is +// a singleton and refers to one and only one stream of samples. +type Metric LabelSet + +// Equal compares the metrics. +func (m Metric) Equal(o Metric) bool { + return LabelSet(m).Equal(LabelSet(o)) +} + +// Before compares the metrics' underlying label sets. +func (m Metric) Before(o Metric) bool { + return LabelSet(m).Before(LabelSet(o)) +} + +// Clone returns a copy of the Metric. +func (m Metric) Clone() Metric { + clone := make(Metric, len(m)) + for k, v := range m { + clone[k] = v + } + return clone +} + +func (m Metric) String() string { + metricName, hasName := m[MetricNameLabel] + numLabels := len(m) - 1 + if !hasName { + numLabels = len(m) + } + labelStrings := make([]string, 0, numLabels) + for label, value := range m { + if label != MetricNameLabel { + labelStrings = append(labelStrings, fmt.Sprintf("%s=%q", label, value)) + } + } + + switch numLabels { + case 0: + if hasName { + return string(metricName) + } + return "{}" + default: + sort.Strings(labelStrings) + return fmt.Sprintf("%s{%s}", metricName, strings.Join(labelStrings, ", ")) + } +} + +// Fingerprint returns a Metric's Fingerprint. +func (m Metric) Fingerprint() Fingerprint { + return LabelSet(m).Fingerprint() +} + +// FastFingerprint returns a Metric's Fingerprint calculated by a faster hashing +// algorithm, which is, however, more susceptible to hash collisions. +func (m Metric) FastFingerprint() Fingerprint { + return LabelSet(m).FastFingerprint() +} + +// IsValidMetricName returns true iff name matches the pattern of MetricNameRE. +// This function, however, does not use MetricNameRE for the check but a much +// faster hardcoded implementation. +func IsValidMetricName(n LabelValue) bool { + if len(n) == 0 { + return false + } + for i, b := range n { + if !((b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || b == ':' || (b >= '0' && b <= '9' && i > 0)) { + return false + } + } + return true +} diff --git a/vendor/github.com/prometheus/common/model/model.go b/vendor/github.com/prometheus/common/model/model.go new file mode 100644 index 0000000..a7b9691 --- /dev/null +++ b/vendor/github.com/prometheus/common/model/model.go @@ -0,0 +1,16 @@ +// Copyright 2013 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package model contains common data structures that are shared across +// Prometheus components and libraries. +package model diff --git a/vendor/github.com/prometheus/common/model/signature.go b/vendor/github.com/prometheus/common/model/signature.go new file mode 100644 index 0000000..8762b13 --- /dev/null +++ b/vendor/github.com/prometheus/common/model/signature.go @@ -0,0 +1,144 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +import ( + "sort" +) + +// SeparatorByte is a byte that cannot occur in valid UTF-8 sequences and is +// used to separate label names, label values, and other strings from each other +// when calculating their combined hash value (aka signature aka fingerprint). +const SeparatorByte byte = 255 + +var ( + // cache the signature of an empty label set. + emptyLabelSignature = hashNew() +) + +// LabelsToSignature returns a quasi-unique signature (i.e., fingerprint) for a +// given label set. (Collisions are possible but unlikely if the number of label +// sets the function is applied to is small.) +func LabelsToSignature(labels map[string]string) uint64 { + if len(labels) == 0 { + return emptyLabelSignature + } + + labelNames := make([]string, 0, len(labels)) + for labelName := range labels { + labelNames = append(labelNames, labelName) + } + sort.Strings(labelNames) + + sum := hashNew() + for _, labelName := range labelNames { + sum = hashAdd(sum, labelName) + sum = hashAddByte(sum, SeparatorByte) + sum = hashAdd(sum, labels[labelName]) + sum = hashAddByte(sum, SeparatorByte) + } + return sum +} + +// labelSetToFingerprint works exactly as LabelsToSignature but takes a LabelSet as +// parameter (rather than a label map) and returns a Fingerprint. +func labelSetToFingerprint(ls LabelSet) Fingerprint { + if len(ls) == 0 { + return Fingerprint(emptyLabelSignature) + } + + labelNames := make(LabelNames, 0, len(ls)) + for labelName := range ls { + labelNames = append(labelNames, labelName) + } + sort.Sort(labelNames) + + sum := hashNew() + for _, labelName := range labelNames { + sum = hashAdd(sum, string(labelName)) + sum = hashAddByte(sum, SeparatorByte) + sum = hashAdd(sum, string(ls[labelName])) + sum = hashAddByte(sum, SeparatorByte) + } + return Fingerprint(sum) +} + +// labelSetToFastFingerprint works similar to labelSetToFingerprint but uses a +// faster and less allocation-heavy hash function, which is more susceptible to +// create hash collisions. Therefore, collision detection should be applied. +func labelSetToFastFingerprint(ls LabelSet) Fingerprint { + if len(ls) == 0 { + return Fingerprint(emptyLabelSignature) + } + + var result uint64 + for labelName, labelValue := range ls { + sum := hashNew() + sum = hashAdd(sum, string(labelName)) + sum = hashAddByte(sum, SeparatorByte) + sum = hashAdd(sum, string(labelValue)) + result ^= sum + } + return Fingerprint(result) +} + +// SignatureForLabels works like LabelsToSignature but takes a Metric as +// parameter (rather than a label map) and only includes the labels with the +// specified LabelNames into the signature calculation. The labels passed in +// will be sorted by this function. +func SignatureForLabels(m Metric, labels ...LabelName) uint64 { + if len(labels) == 0 { + return emptyLabelSignature + } + + sort.Sort(LabelNames(labels)) + + sum := hashNew() + for _, label := range labels { + sum = hashAdd(sum, string(label)) + sum = hashAddByte(sum, SeparatorByte) + sum = hashAdd(sum, string(m[label])) + sum = hashAddByte(sum, SeparatorByte) + } + return sum +} + +// SignatureWithoutLabels works like LabelsToSignature but takes a Metric as +// parameter (rather than a label map) and excludes the labels with any of the +// specified LabelNames from the signature calculation. +func SignatureWithoutLabels(m Metric, labels map[LabelName]struct{}) uint64 { + if len(m) == 0 { + return emptyLabelSignature + } + + labelNames := make(LabelNames, 0, len(m)) + for labelName := range m { + if _, exclude := labels[labelName]; !exclude { + labelNames = append(labelNames, labelName) + } + } + if len(labelNames) == 0 { + return emptyLabelSignature + } + sort.Sort(labelNames) + + sum := hashNew() + for _, labelName := range labelNames { + sum = hashAdd(sum, string(labelName)) + sum = hashAddByte(sum, SeparatorByte) + sum = hashAdd(sum, string(m[labelName])) + sum = hashAddByte(sum, SeparatorByte) + } + return sum +} diff --git a/vendor/github.com/prometheus/common/model/silence.go b/vendor/github.com/prometheus/common/model/silence.go new file mode 100644 index 0000000..bb99889 --- /dev/null +++ b/vendor/github.com/prometheus/common/model/silence.go @@ -0,0 +1,106 @@ +// Copyright 2015 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +import ( + "encoding/json" + "fmt" + "regexp" + "time" +) + +// Matcher describes a matches the value of a given label. +type Matcher struct { + Name LabelName `json:"name"` + Value string `json:"value"` + IsRegex bool `json:"isRegex"` +} + +func (m *Matcher) UnmarshalJSON(b []byte) error { + type plain Matcher + if err := json.Unmarshal(b, (*plain)(m)); err != nil { + return err + } + + if len(m.Name) == 0 { + return fmt.Errorf("label name in matcher must not be empty") + } + if m.IsRegex { + if _, err := regexp.Compile(m.Value); err != nil { + return err + } + } + return nil +} + +// Validate returns true iff all fields of the matcher have valid values. +func (m *Matcher) Validate() error { + if !m.Name.IsValid() { + return fmt.Errorf("invalid name %q", m.Name) + } + if m.IsRegex { + if _, err := regexp.Compile(m.Value); err != nil { + return fmt.Errorf("invalid regular expression %q", m.Value) + } + } else if !LabelValue(m.Value).IsValid() || len(m.Value) == 0 { + return fmt.Errorf("invalid value %q", m.Value) + } + return nil +} + +// Silence defines the representation of a silence definition in the Prometheus +// eco-system. +type Silence struct { + ID uint64 `json:"id,omitempty"` + + Matchers []*Matcher `json:"matchers"` + + StartsAt time.Time `json:"startsAt"` + EndsAt time.Time `json:"endsAt"` + + CreatedAt time.Time `json:"createdAt,omitempty"` + CreatedBy string `json:"createdBy"` + Comment string `json:"comment,omitempty"` +} + +// Validate returns true iff all fields of the silence have valid values. +func (s *Silence) Validate() error { + if len(s.Matchers) == 0 { + return fmt.Errorf("at least one matcher required") + } + for _, m := range s.Matchers { + if err := m.Validate(); err != nil { + return fmt.Errorf("invalid matcher: %s", err) + } + } + if s.StartsAt.IsZero() { + return fmt.Errorf("start time missing") + } + if s.EndsAt.IsZero() { + return fmt.Errorf("end time missing") + } + if s.EndsAt.Before(s.StartsAt) { + return fmt.Errorf("start time must be before end time") + } + if s.CreatedBy == "" { + return fmt.Errorf("creator information missing") + } + if s.Comment == "" { + return fmt.Errorf("comment missing") + } + if s.CreatedAt.IsZero() { + return fmt.Errorf("creation timestamp missing") + } + return nil +} diff --git a/vendor/github.com/prometheus/common/model/time.go b/vendor/github.com/prometheus/common/model/time.go new file mode 100644 index 0000000..c909b8a --- /dev/null +++ b/vendor/github.com/prometheus/common/model/time.go @@ -0,0 +1,317 @@ +// Copyright 2013 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +import ( + "encoding/json" + "errors" + "fmt" + "math" + "regexp" + "strconv" + "strings" + "time" +) + +const ( + // MinimumTick is the minimum supported time resolution. This has to be + // at least time.Second in order for the code below to work. + minimumTick = time.Millisecond + // second is the Time duration equivalent to one second. + second = int64(time.Second / minimumTick) + // The number of nanoseconds per minimum tick. + nanosPerTick = int64(minimumTick / time.Nanosecond) + + // Earliest is the earliest Time representable. Handy for + // initializing a high watermark. + Earliest = Time(math.MinInt64) + // Latest is the latest Time representable. Handy for initializing + // a low watermark. + Latest = Time(math.MaxInt64) +) + +// Time is the number of milliseconds since the epoch +// (1970-01-01 00:00 UTC) excluding leap seconds. +type Time int64 + +// Interval describes an interval between two timestamps. +type Interval struct { + Start, End Time +} + +// Now returns the current time as a Time. +func Now() Time { + return TimeFromUnixNano(time.Now().UnixNano()) +} + +// TimeFromUnix returns the Time equivalent to the Unix Time t +// provided in seconds. +func TimeFromUnix(t int64) Time { + return Time(t * second) +} + +// TimeFromUnixNano returns the Time equivalent to the Unix Time +// t provided in nanoseconds. +func TimeFromUnixNano(t int64) Time { + return Time(t / nanosPerTick) +} + +// Equal reports whether two Times represent the same instant. +func (t Time) Equal(o Time) bool { + return t == o +} + +// Before reports whether the Time t is before o. +func (t Time) Before(o Time) bool { + return t < o +} + +// After reports whether the Time t is after o. +func (t Time) After(o Time) bool { + return t > o +} + +// Add returns the Time t + d. +func (t Time) Add(d time.Duration) Time { + return t + Time(d/minimumTick) +} + +// Sub returns the Duration t - o. +func (t Time) Sub(o Time) time.Duration { + return time.Duration(t-o) * minimumTick +} + +// Time returns the time.Time representation of t. +func (t Time) Time() time.Time { + return time.Unix(int64(t)/second, (int64(t)%second)*nanosPerTick) +} + +// Unix returns t as a Unix time, the number of seconds elapsed +// since January 1, 1970 UTC. +func (t Time) Unix() int64 { + return int64(t) / second +} + +// UnixNano returns t as a Unix time, the number of nanoseconds elapsed +// since January 1, 1970 UTC. +func (t Time) UnixNano() int64 { + return int64(t) * nanosPerTick +} + +// The number of digits after the dot. +var dotPrecision = int(math.Log10(float64(second))) + +// String returns a string representation of the Time. +func (t Time) String() string { + return strconv.FormatFloat(float64(t)/float64(second), 'f', -1, 64) +} + +// MarshalJSON implements the json.Marshaler interface. +func (t Time) MarshalJSON() ([]byte, error) { + return []byte(t.String()), nil +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (t *Time) UnmarshalJSON(b []byte) error { + p := strings.Split(string(b), ".") + switch len(p) { + case 1: + v, err := strconv.ParseInt(string(p[0]), 10, 64) + if err != nil { + return err + } + *t = Time(v * second) + + case 2: + v, err := strconv.ParseInt(string(p[0]), 10, 64) + if err != nil { + return err + } + v *= second + + prec := dotPrecision - len(p[1]) + if prec < 0 { + p[1] = p[1][:dotPrecision] + } else if prec > 0 { + p[1] = p[1] + strings.Repeat("0", prec) + } + + va, err := strconv.ParseInt(p[1], 10, 32) + if err != nil { + return err + } + + // If the value was something like -0.1 the negative is lost in the + // parsing because of the leading zero, this ensures that we capture it. + if len(p[0]) > 0 && p[0][0] == '-' && v+va > 0 { + *t = Time(v+va) * -1 + } else { + *t = Time(v + va) + } + + default: + return fmt.Errorf("invalid time %q", string(b)) + } + return nil +} + +// Duration wraps time.Duration. It is used to parse the custom duration format +// from YAML. +// This type should not propagate beyond the scope of input/output processing. +type Duration time.Duration + +// Set implements pflag/flag.Value +func (d *Duration) Set(s string) error { + var err error + *d, err = ParseDuration(s) + return err +} + +// Type implements pflag.Value +func (d *Duration) Type() string { + return "duration" +} + +var durationRE = regexp.MustCompile("^(([0-9]+)y)?(([0-9]+)w)?(([0-9]+)d)?(([0-9]+)h)?(([0-9]+)m)?(([0-9]+)s)?(([0-9]+)ms)?$") + +// ParseDuration parses a string into a time.Duration, assuming that a year +// always has 365d, a week always has 7d, and a day always has 24h. +func ParseDuration(durationStr string) (Duration, error) { + switch durationStr { + case "0": + // Allow 0 without a unit. + return 0, nil + case "": + return 0, errors.New("empty duration string") + } + matches := durationRE.FindStringSubmatch(durationStr) + if matches == nil { + return 0, fmt.Errorf("not a valid duration string: %q", durationStr) + } + var dur time.Duration + + // Parse the match at pos `pos` in the regex and use `mult` to turn that + // into ms, then add that value to the total parsed duration. + var overflowErr error + m := func(pos int, mult time.Duration) { + if matches[pos] == "" { + return + } + n, _ := strconv.Atoi(matches[pos]) + + // Check if the provided duration overflows time.Duration (> ~ 290years). + if n > int((1<<63-1)/mult/time.Millisecond) { + overflowErr = errors.New("duration out of range") + } + d := time.Duration(n) * time.Millisecond + dur += d * mult + + if dur < 0 { + overflowErr = errors.New("duration out of range") + } + } + + m(2, 1000*60*60*24*365) // y + m(4, 1000*60*60*24*7) // w + m(6, 1000*60*60*24) // d + m(8, 1000*60*60) // h + m(10, 1000*60) // m + m(12, 1000) // s + m(14, 1) // ms + + return Duration(dur), overflowErr +} + +func (d Duration) String() string { + var ( + ms = int64(time.Duration(d) / time.Millisecond) + r = "" + ) + if ms == 0 { + return "0s" + } + + f := func(unit string, mult int64, exact bool) { + if exact && ms%mult != 0 { + return + } + if v := ms / mult; v > 0 { + r += fmt.Sprintf("%d%s", v, unit) + ms -= v * mult + } + } + + // Only format years and weeks if the remainder is zero, as it is often + // easier to read 90d than 12w6d. + f("y", 1000*60*60*24*365, true) + f("w", 1000*60*60*24*7, true) + + f("d", 1000*60*60*24, false) + f("h", 1000*60*60, false) + f("m", 1000*60, false) + f("s", 1000, false) + f("ms", 1, false) + + return r +} + +// MarshalJSON implements the json.Marshaler interface. +func (d Duration) MarshalJSON() ([]byte, error) { + return json.Marshal(d.String()) +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (d *Duration) UnmarshalJSON(bytes []byte) error { + var s string + if err := json.Unmarshal(bytes, &s); err != nil { + return err + } + dur, err := ParseDuration(s) + if err != nil { + return err + } + *d = dur + return nil +} + +// MarshalText implements the encoding.TextMarshaler interface. +func (d *Duration) MarshalText() ([]byte, error) { + return []byte(d.String()), nil +} + +// UnmarshalText implements the encoding.TextUnmarshaler interface. +func (d *Duration) UnmarshalText(text []byte) error { + var err error + *d, err = ParseDuration(string(text)) + return err +} + +// MarshalYAML implements the yaml.Marshaler interface. +func (d Duration) MarshalYAML() (interface{}, error) { + return d.String(), nil +} + +// UnmarshalYAML implements the yaml.Unmarshaler interface. +func (d *Duration) UnmarshalYAML(unmarshal func(interface{}) error) error { + var s string + if err := unmarshal(&s); err != nil { + return err + } + dur, err := ParseDuration(s) + if err != nil { + return err + } + *d = dur + return nil +} diff --git a/vendor/github.com/prometheus/common/model/value.go b/vendor/github.com/prometheus/common/model/value.go new file mode 100644 index 0000000..c9d8fb1 --- /dev/null +++ b/vendor/github.com/prometheus/common/model/value.go @@ -0,0 +1,416 @@ +// Copyright 2013 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +import ( + "encoding/json" + "fmt" + "math" + "sort" + "strconv" + "strings" +) + +var ( + // ZeroSamplePair is the pseudo zero-value of SamplePair used to signal a + // non-existing sample pair. It is a SamplePair with timestamp Earliest and + // value 0.0. Note that the natural zero value of SamplePair has a timestamp + // of 0, which is possible to appear in a real SamplePair and thus not + // suitable to signal a non-existing SamplePair. + ZeroSamplePair = SamplePair{Timestamp: Earliest} + + // ZeroSample is the pseudo zero-value of Sample used to signal a + // non-existing sample. It is a Sample with timestamp Earliest, value 0.0, + // and metric nil. Note that the natural zero value of Sample has a timestamp + // of 0, which is possible to appear in a real Sample and thus not suitable + // to signal a non-existing Sample. + ZeroSample = Sample{Timestamp: Earliest} +) + +// A SampleValue is a representation of a value for a given sample at a given +// time. +type SampleValue float64 + +// MarshalJSON implements json.Marshaler. +func (v SampleValue) MarshalJSON() ([]byte, error) { + return json.Marshal(v.String()) +} + +// UnmarshalJSON implements json.Unmarshaler. +func (v *SampleValue) UnmarshalJSON(b []byte) error { + if len(b) < 2 || b[0] != '"' || b[len(b)-1] != '"' { + return fmt.Errorf("sample value must be a quoted string") + } + f, err := strconv.ParseFloat(string(b[1:len(b)-1]), 64) + if err != nil { + return err + } + *v = SampleValue(f) + return nil +} + +// Equal returns true if the value of v and o is equal or if both are NaN. Note +// that v==o is false if both are NaN. If you want the conventional float +// behavior, use == to compare two SampleValues. +func (v SampleValue) Equal(o SampleValue) bool { + if v == o { + return true + } + return math.IsNaN(float64(v)) && math.IsNaN(float64(o)) +} + +func (v SampleValue) String() string { + return strconv.FormatFloat(float64(v), 'f', -1, 64) +} + +// SamplePair pairs a SampleValue with a Timestamp. +type SamplePair struct { + Timestamp Time + Value SampleValue +} + +// MarshalJSON implements json.Marshaler. +func (s SamplePair) MarshalJSON() ([]byte, error) { + t, err := json.Marshal(s.Timestamp) + if err != nil { + return nil, err + } + v, err := json.Marshal(s.Value) + if err != nil { + return nil, err + } + return []byte(fmt.Sprintf("[%s,%s]", t, v)), nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (s *SamplePair) UnmarshalJSON(b []byte) error { + v := [...]json.Unmarshaler{&s.Timestamp, &s.Value} + return json.Unmarshal(b, &v) +} + +// Equal returns true if this SamplePair and o have equal Values and equal +// Timestamps. The semantics of Value equality is defined by SampleValue.Equal. +func (s *SamplePair) Equal(o *SamplePair) bool { + return s == o || (s.Value.Equal(o.Value) && s.Timestamp.Equal(o.Timestamp)) +} + +func (s SamplePair) String() string { + return fmt.Sprintf("%s @[%s]", s.Value, s.Timestamp) +} + +// Sample is a sample pair associated with a metric. +type Sample struct { + Metric Metric `json:"metric"` + Value SampleValue `json:"value"` + Timestamp Time `json:"timestamp"` +} + +// Equal compares first the metrics, then the timestamp, then the value. The +// semantics of value equality is defined by SampleValue.Equal. +func (s *Sample) Equal(o *Sample) bool { + if s == o { + return true + } + + if !s.Metric.Equal(o.Metric) { + return false + } + if !s.Timestamp.Equal(o.Timestamp) { + return false + } + + return s.Value.Equal(o.Value) +} + +func (s Sample) String() string { + return fmt.Sprintf("%s => %s", s.Metric, SamplePair{ + Timestamp: s.Timestamp, + Value: s.Value, + }) +} + +// MarshalJSON implements json.Marshaler. +func (s Sample) MarshalJSON() ([]byte, error) { + v := struct { + Metric Metric `json:"metric"` + Value SamplePair `json:"value"` + }{ + Metric: s.Metric, + Value: SamplePair{ + Timestamp: s.Timestamp, + Value: s.Value, + }, + } + + return json.Marshal(&v) +} + +// UnmarshalJSON implements json.Unmarshaler. +func (s *Sample) UnmarshalJSON(b []byte) error { + v := struct { + Metric Metric `json:"metric"` + Value SamplePair `json:"value"` + }{ + Metric: s.Metric, + Value: SamplePair{ + Timestamp: s.Timestamp, + Value: s.Value, + }, + } + + if err := json.Unmarshal(b, &v); err != nil { + return err + } + + s.Metric = v.Metric + s.Timestamp = v.Value.Timestamp + s.Value = v.Value.Value + + return nil +} + +// Samples is a sortable Sample slice. It implements sort.Interface. +type Samples []*Sample + +func (s Samples) Len() int { + return len(s) +} + +// Less compares first the metrics, then the timestamp. +func (s Samples) Less(i, j int) bool { + switch { + case s[i].Metric.Before(s[j].Metric): + return true + case s[j].Metric.Before(s[i].Metric): + return false + case s[i].Timestamp.Before(s[j].Timestamp): + return true + default: + return false + } +} + +func (s Samples) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} + +// Equal compares two sets of samples and returns true if they are equal. +func (s Samples) Equal(o Samples) bool { + if len(s) != len(o) { + return false + } + + for i, sample := range s { + if !sample.Equal(o[i]) { + return false + } + } + return true +} + +// SampleStream is a stream of Values belonging to an attached COWMetric. +type SampleStream struct { + Metric Metric `json:"metric"` + Values []SamplePair `json:"values"` +} + +func (ss SampleStream) String() string { + vals := make([]string, len(ss.Values)) + for i, v := range ss.Values { + vals[i] = v.String() + } + return fmt.Sprintf("%s =>\n%s", ss.Metric, strings.Join(vals, "\n")) +} + +// Value is a generic interface for values resulting from a query evaluation. +type Value interface { + Type() ValueType + String() string +} + +func (Matrix) Type() ValueType { return ValMatrix } +func (Vector) Type() ValueType { return ValVector } +func (*Scalar) Type() ValueType { return ValScalar } +func (*String) Type() ValueType { return ValString } + +type ValueType int + +const ( + ValNone ValueType = iota + ValScalar + ValVector + ValMatrix + ValString +) + +// MarshalJSON implements json.Marshaler. +func (et ValueType) MarshalJSON() ([]byte, error) { + return json.Marshal(et.String()) +} + +func (et *ValueType) UnmarshalJSON(b []byte) error { + var s string + if err := json.Unmarshal(b, &s); err != nil { + return err + } + switch s { + case "": + *et = ValNone + case "scalar": + *et = ValScalar + case "vector": + *et = ValVector + case "matrix": + *et = ValMatrix + case "string": + *et = ValString + default: + return fmt.Errorf("unknown value type %q", s) + } + return nil +} + +func (e ValueType) String() string { + switch e { + case ValNone: + return "" + case ValScalar: + return "scalar" + case ValVector: + return "vector" + case ValMatrix: + return "matrix" + case ValString: + return "string" + } + panic("ValueType.String: unhandled value type") +} + +// Scalar is a scalar value evaluated at the set timestamp. +type Scalar struct { + Value SampleValue `json:"value"` + Timestamp Time `json:"timestamp"` +} + +func (s Scalar) String() string { + return fmt.Sprintf("scalar: %v @[%v]", s.Value, s.Timestamp) +} + +// MarshalJSON implements json.Marshaler. +func (s Scalar) MarshalJSON() ([]byte, error) { + v := strconv.FormatFloat(float64(s.Value), 'f', -1, 64) + return json.Marshal([...]interface{}{s.Timestamp, string(v)}) +} + +// UnmarshalJSON implements json.Unmarshaler. +func (s *Scalar) UnmarshalJSON(b []byte) error { + var f string + v := [...]interface{}{&s.Timestamp, &f} + + if err := json.Unmarshal(b, &v); err != nil { + return err + } + + value, err := strconv.ParseFloat(f, 64) + if err != nil { + return fmt.Errorf("error parsing sample value: %s", err) + } + s.Value = SampleValue(value) + return nil +} + +// String is a string value evaluated at the set timestamp. +type String struct { + Value string `json:"value"` + Timestamp Time `json:"timestamp"` +} + +func (s *String) String() string { + return s.Value +} + +// MarshalJSON implements json.Marshaler. +func (s String) MarshalJSON() ([]byte, error) { + return json.Marshal([]interface{}{s.Timestamp, s.Value}) +} + +// UnmarshalJSON implements json.Unmarshaler. +func (s *String) UnmarshalJSON(b []byte) error { + v := [...]interface{}{&s.Timestamp, &s.Value} + return json.Unmarshal(b, &v) +} + +// Vector is basically only an alias for Samples, but the +// contract is that in a Vector, all Samples have the same timestamp. +type Vector []*Sample + +func (vec Vector) String() string { + entries := make([]string, len(vec)) + for i, s := range vec { + entries[i] = s.String() + } + return strings.Join(entries, "\n") +} + +func (vec Vector) Len() int { return len(vec) } +func (vec Vector) Swap(i, j int) { vec[i], vec[j] = vec[j], vec[i] } + +// Less compares first the metrics, then the timestamp. +func (vec Vector) Less(i, j int) bool { + switch { + case vec[i].Metric.Before(vec[j].Metric): + return true + case vec[j].Metric.Before(vec[i].Metric): + return false + case vec[i].Timestamp.Before(vec[j].Timestamp): + return true + default: + return false + } +} + +// Equal compares two sets of samples and returns true if they are equal. +func (vec Vector) Equal(o Vector) bool { + if len(vec) != len(o) { + return false + } + + for i, sample := range vec { + if !sample.Equal(o[i]) { + return false + } + } + return true +} + +// Matrix is a list of time series. +type Matrix []*SampleStream + +func (m Matrix) Len() int { return len(m) } +func (m Matrix) Less(i, j int) bool { return m[i].Metric.Before(m[j].Metric) } +func (m Matrix) Swap(i, j int) { m[i], m[j] = m[j], m[i] } + +func (mat Matrix) String() string { + matCp := make(Matrix, len(mat)) + copy(matCp, mat) + sort.Sort(matCp) + + strs := make([]string, len(matCp)) + + for i, ss := range matCp { + strs[i] = ss.String() + } + + return strings.Join(strs, "\n") +} diff --git a/vendor/github.com/prometheus/procfs/.gitignore b/vendor/github.com/prometheus/procfs/.gitignore new file mode 100644 index 0000000..7cc33ae --- /dev/null +++ b/vendor/github.com/prometheus/procfs/.gitignore @@ -0,0 +1,2 @@ +/testdata/fixtures/ +/fixtures diff --git a/vendor/github.com/prometheus/procfs/.golangci.yml b/vendor/github.com/prometheus/procfs/.golangci.yml new file mode 100644 index 0000000..a197699 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/.golangci.yml @@ -0,0 +1,12 @@ +--- +linters: + enable: + - godot + - revive + +linter-settings: + godot: + capital: true + exclude: + # Ignore "See: URL" + - 'See:' diff --git a/vendor/github.com/prometheus/procfs/CODE_OF_CONDUCT.md b/vendor/github.com/prometheus/procfs/CODE_OF_CONDUCT.md new file mode 100644 index 0000000..d325872 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/CODE_OF_CONDUCT.md @@ -0,0 +1,3 @@ +# Prometheus Community Code of Conduct + +Prometheus follows the [CNCF Code of Conduct](https://github.com/cncf/foundation/blob/main/code-of-conduct.md). diff --git a/vendor/github.com/prometheus/procfs/CONTRIBUTING.md b/vendor/github.com/prometheus/procfs/CONTRIBUTING.md new file mode 100644 index 0000000..853eb9d --- /dev/null +++ b/vendor/github.com/prometheus/procfs/CONTRIBUTING.md @@ -0,0 +1,121 @@ +# Contributing + +Prometheus uses GitHub to manage reviews of pull requests. + +* If you are a new contributor see: [Steps to Contribute](#steps-to-contribute) + +* If you have a trivial fix or improvement, go ahead and create a pull request, + addressing (with `@...`) a suitable maintainer of this repository (see + [MAINTAINERS.md](MAINTAINERS.md)) in the description of the pull request. + +* If you plan to do something more involved, first discuss your ideas + on our [mailing list](https://groups.google.com/forum/?fromgroups#!forum/prometheus-developers). + This will avoid unnecessary work and surely give you and us a good deal + of inspiration. Also please see our [non-goals issue](https://github.com/prometheus/docs/issues/149) on areas that the Prometheus community doesn't plan to work on. + +* Relevant coding style guidelines are the [Go Code Review + Comments](https://code.google.com/p/go-wiki/wiki/CodeReviewComments) + and the _Formatting and style_ section of Peter Bourgon's [Go: Best + Practices for Production + Environments](https://peter.bourgon.org/go-in-production/#formatting-and-style). + +* Be sure to sign off on the [DCO](https://github.com/probot/dco#how-it-works) + +## Steps to Contribute + +Should you wish to work on an issue, please claim it first by commenting on the GitHub issue that you want to work on it. This is to prevent duplicated efforts from contributors on the same issue. + +Please check the [`help-wanted`](https://github.com/prometheus/procfs/issues?q=is%3Aissue+is%3Aopen+label%3A%22help+wanted%22) label to find issues that are good for getting started. If you have questions about one of the issues, with or without the tag, please comment on them and one of the maintainers will clarify it. For a quicker response, contact us over [IRC](https://prometheus.io/community). + +For quickly compiling and testing your changes do: +``` +make test # Make sure all the tests pass before you commit and push :) +``` + +We use [`golangci-lint`](https://github.com/golangci/golangci-lint) for linting the code. If it reports an issue and you think that the warning needs to be disregarded or is a false-positive, you can add a special comment `//nolint:linter1[,linter2,...]` before the offending line. Use this sparingly though, fixing the code to comply with the linter's recommendation is in general the preferred course of action. + +## Pull Request Checklist + +* Branch from the master branch and, if needed, rebase to the current master branch before submitting your pull request. If it doesn't merge cleanly with master you may be asked to rebase your changes. + +* Commits should be as small as possible, while ensuring that each commit is correct independently (i.e., each commit should compile and pass tests). + +* If your patch is not getting reviewed or you need a specific person to review it, you can @-reply a reviewer asking for a review in the pull request or a comment, or you can ask for a review on IRC channel [#prometheus](https://webchat.freenode.net/?channels=#prometheus) on irc.freenode.net (for the easiest start, [join via Riot](https://riot.im/app/#/room/#prometheus:matrix.org)). + +* Add tests relevant to the fixed bug or new feature. + +## Dependency management + +The Prometheus project uses [Go modules](https://golang.org/cmd/go/#hdr-Modules__module_versions__and_more) to manage dependencies on external packages. This requires a working Go environment with version 1.12 or greater installed. + +All dependencies are vendored in the `vendor/` directory. + +To add or update a new dependency, use the `go get` command: + +```bash +# Pick the latest tagged release. +go get example.com/some/module/pkg + +# Pick a specific version. +go get example.com/some/module/pkg@vX.Y.Z +``` + +Tidy up the `go.mod` and `go.sum` files and copy the new/updated dependency to the `vendor/` directory: + + +```bash +# The GO111MODULE variable can be omitted when the code isn't located in GOPATH. +GO111MODULE=on go mod tidy + +GO111MODULE=on go mod vendor +``` + +You have to commit the changes to `go.mod`, `go.sum` and the `vendor/` directory before submitting the pull request. + + +## API Implementation Guidelines + +### Naming and Documentation + +Public functions and structs should normally be named according to the file(s) being read and parsed. For example, +the `fs.BuddyInfo()` function reads the file `/proc/buddyinfo`. In addition, the godoc for each public function +should contain the path to the file(s) being read and a URL of the linux kernel documentation describing the file(s). + +### Reading vs. Parsing + +Most functionality in this library consists of reading files and then parsing the text into structured data. In most +cases reading and parsing should be separated into different functions/methods with a public `fs.Thing()` method and +a private `parseThing(r Reader)` function. This provides a logical separation and allows parsing to be tested +directly without the need to read from the filesystem. Using a `Reader` argument is preferred over other data types +such as `string` or `*File` because it provides the most flexibility regarding the data source. When a set of files +in a directory needs to be parsed, then a `path` string parameter to the parse function can be used instead. + +### /proc and /sys filesystem I/O + +The `proc` and `sys` filesystems are pseudo file systems and work a bit differently from standard disk I/O. +Many of the files are changing continuously and the data being read can in some cases change between subsequent +reads in the same file. Also, most of the files are relatively small (less than a few KBs), and system calls +to the `stat` function will often return the wrong size. Therefore, for most files it's recommended to read the +full file in a single operation using an internal utility function called `util.ReadFileNoStat`. +This function is similar to `os.ReadFile`, but it avoids the system call to `stat` to get the current size of +the file. + +Note that parsing the file's contents can still be performed one line at a time. This is done by first reading +the full file, and then using a scanner on the `[]byte` or `string` containing the data. + +``` + data, err := util.ReadFileNoStat("/proc/cpuinfo") + if err != nil { + return err + } + reader := bytes.NewReader(data) + scanner := bufio.NewScanner(reader) +``` + +The `/sys` filesystem contains many very small files which contain only a single numeric or text value. These files +can be read using an internal function called `util.SysReadFile` which is similar to `os.ReadFile` but does +not bother to check the size of the file before reading. +``` + data, err := util.SysReadFile("/sys/class/power_supply/BAT0/capacity") +``` + diff --git a/vendor/github.com/prometheus/procfs/LICENSE b/vendor/github.com/prometheus/procfs/LICENSE new file mode 100644 index 0000000..261eeb9 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/prometheus/procfs/MAINTAINERS.md b/vendor/github.com/prometheus/procfs/MAINTAINERS.md new file mode 100644 index 0000000..56ba67d --- /dev/null +++ b/vendor/github.com/prometheus/procfs/MAINTAINERS.md @@ -0,0 +1,2 @@ +* Johannes 'fish' Ziemke @discordianfish +* Paul Gier @pgier diff --git a/vendor/github.com/prometheus/procfs/Makefile b/vendor/github.com/prometheus/procfs/Makefile new file mode 100644 index 0000000..7edfe4d --- /dev/null +++ b/vendor/github.com/prometheus/procfs/Makefile @@ -0,0 +1,31 @@ +# Copyright 2018 The Prometheus Authors +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +include Makefile.common + +%/.unpacked: %.ttar + @echo ">> extracting fixtures $*" + ./ttar -C $(dir $*) -x -f $*.ttar + touch $@ + +fixtures: testdata/fixtures/.unpacked + +update_fixtures: + rm -vf testdata/fixtures/.unpacked + ./ttar -c -f testdata/fixtures.ttar -C testdata/ fixtures/ + +.PHONY: build +build: + +.PHONY: test +test: testdata/fixtures/.unpacked common-test diff --git a/vendor/github.com/prometheus/procfs/Makefile.common b/vendor/github.com/prometheus/procfs/Makefile.common new file mode 100644 index 0000000..6c8e3e2 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/Makefile.common @@ -0,0 +1,264 @@ +# Copyright 2018 The Prometheus Authors +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# A common Makefile that includes rules to be reused in different prometheus projects. +# !!! Open PRs only against the prometheus/prometheus/Makefile.common repository! + +# Example usage : +# Create the main Makefile in the root project directory. +# include Makefile.common +# customTarget: +# @echo ">> Running customTarget" +# + +# Ensure GOBIN is not set during build so that promu is installed to the correct path +unexport GOBIN + +GO ?= go +GOFMT ?= $(GO)fmt +FIRST_GOPATH := $(firstword $(subst :, ,$(shell $(GO) env GOPATH))) +GOOPTS ?= +GOHOSTOS ?= $(shell $(GO) env GOHOSTOS) +GOHOSTARCH ?= $(shell $(GO) env GOHOSTARCH) + +GO_VERSION ?= $(shell $(GO) version) +GO_VERSION_NUMBER ?= $(word 3, $(GO_VERSION)) +PRE_GO_111 ?= $(shell echo $(GO_VERSION_NUMBER) | grep -E 'go1\.(10|[0-9])\.') + +PROMU := $(FIRST_GOPATH)/bin/promu +pkgs = ./... + +ifeq (arm, $(GOHOSTARCH)) + GOHOSTARM ?= $(shell GOARM= $(GO) env GOARM) + GO_BUILD_PLATFORM ?= $(GOHOSTOS)-$(GOHOSTARCH)v$(GOHOSTARM) +else + GO_BUILD_PLATFORM ?= $(GOHOSTOS)-$(GOHOSTARCH) +endif + +GOTEST := $(GO) test +GOTEST_DIR := +ifneq ($(CIRCLE_JOB),) +ifneq ($(shell which gotestsum),) + GOTEST_DIR := test-results + GOTEST := gotestsum --junitfile $(GOTEST_DIR)/unit-tests.xml -- +endif +endif + +PROMU_VERSION ?= 0.13.0 +PROMU_URL := https://github.com/prometheus/promu/releases/download/v$(PROMU_VERSION)/promu-$(PROMU_VERSION).$(GO_BUILD_PLATFORM).tar.gz + +GOLANGCI_LINT := +GOLANGCI_LINT_OPTS ?= +GOLANGCI_LINT_VERSION ?= v1.45.2 +# golangci-lint only supports linux, darwin and windows platforms on i386/amd64. +# windows isn't included here because of the path separator being different. +ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux darwin)) + ifeq ($(GOHOSTARCH),$(filter $(GOHOSTARCH),amd64 i386)) + # If we're in CI and there is an Actions file, that means the linter + # is being run in Actions, so we don't need to run it here. + ifeq (,$(CIRCLE_JOB)) + GOLANGCI_LINT := $(FIRST_GOPATH)/bin/golangci-lint + else ifeq (,$(wildcard .github/workflows/golangci-lint.yml)) + GOLANGCI_LINT := $(FIRST_GOPATH)/bin/golangci-lint + endif + endif +endif + +PREFIX ?= $(shell pwd) +BIN_DIR ?= $(shell pwd) +DOCKER_IMAGE_TAG ?= $(subst /,-,$(shell git rev-parse --abbrev-ref HEAD)) +DOCKERFILE_PATH ?= ./Dockerfile +DOCKERBUILD_CONTEXT ?= ./ +DOCKER_REPO ?= prom + +DOCKER_ARCHS ?= amd64 + +BUILD_DOCKER_ARCHS = $(addprefix common-docker-,$(DOCKER_ARCHS)) +PUBLISH_DOCKER_ARCHS = $(addprefix common-docker-publish-,$(DOCKER_ARCHS)) +TAG_DOCKER_ARCHS = $(addprefix common-docker-tag-latest-,$(DOCKER_ARCHS)) + +ifeq ($(GOHOSTARCH),amd64) + ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux freebsd darwin windows)) + # Only supported on amd64 + test-flags := -race + endif +endif + +# This rule is used to forward a target like "build" to "common-build". This +# allows a new "build" target to be defined in a Makefile which includes this +# one and override "common-build" without override warnings. +%: common-% ; + +.PHONY: common-all +common-all: precheck style check_license lint yamllint unused build test + +.PHONY: common-style +common-style: + @echo ">> checking code style" + @fmtRes=$$($(GOFMT) -d $$(find . -path ./vendor -prune -o -name '*.go' -print)); \ + if [ -n "$${fmtRes}" ]; then \ + echo "gofmt checking failed!"; echo "$${fmtRes}"; echo; \ + echo "Please ensure you are using $$($(GO) version) for formatting code."; \ + exit 1; \ + fi + +.PHONY: common-check_license +common-check_license: + @echo ">> checking license header" + @licRes=$$(for file in $$(find . -type f -iname '*.go' ! -path './vendor/*') ; do \ + awk 'NR<=3' $$file | grep -Eq "(Copyright|generated|GENERATED)" || echo $$file; \ + done); \ + if [ -n "$${licRes}" ]; then \ + echo "license header checking failed:"; echo "$${licRes}"; \ + exit 1; \ + fi + +.PHONY: common-deps +common-deps: + @echo ">> getting dependencies" + $(GO) mod download + +.PHONY: update-go-deps +update-go-deps: + @echo ">> updating Go dependencies" + @for m in $$($(GO) list -mod=readonly -m -f '{{ if and (not .Indirect) (not .Main)}}{{.Path}}{{end}}' all); do \ + $(GO) get -d $$m; \ + done + $(GO) mod tidy + +.PHONY: common-test-short +common-test-short: $(GOTEST_DIR) + @echo ">> running short tests" + $(GOTEST) -short $(GOOPTS) $(pkgs) + +.PHONY: common-test +common-test: $(GOTEST_DIR) + @echo ">> running all tests" + $(GOTEST) $(test-flags) $(GOOPTS) $(pkgs) + +$(GOTEST_DIR): + @mkdir -p $@ + +.PHONY: common-format +common-format: + @echo ">> formatting code" + $(GO) fmt $(pkgs) + +.PHONY: common-vet +common-vet: + @echo ">> vetting code" + $(GO) vet $(GOOPTS) $(pkgs) + +.PHONY: common-lint +common-lint: $(GOLANGCI_LINT) +ifdef GOLANGCI_LINT + @echo ">> running golangci-lint" +# 'go list' needs to be executed before staticcheck to prepopulate the modules cache. +# Otherwise staticcheck might fail randomly for some reason not yet explained. + $(GO) list -e -compiled -test=true -export=false -deps=true -find=false -tags= -- ./... > /dev/null + $(GOLANGCI_LINT) run $(GOLANGCI_LINT_OPTS) $(pkgs) +endif + +.PHONY: common-yamllint +common-yamllint: + @echo ">> running yamllint on all YAML files in the repository" +ifeq (, $(shell which yamllint)) + @echo "yamllint not installed so skipping" +else + yamllint . +endif + +# For backward-compatibility. +.PHONY: common-staticcheck +common-staticcheck: lint + +.PHONY: common-unused +common-unused: + @echo ">> running check for unused/missing packages in go.mod" + $(GO) mod tidy + @git diff --exit-code -- go.sum go.mod + +.PHONY: common-build +common-build: promu + @echo ">> building binaries" + $(PROMU) build --prefix $(PREFIX) $(PROMU_BINARIES) + +.PHONY: common-tarball +common-tarball: promu + @echo ">> building release tarball" + $(PROMU) tarball --prefix $(PREFIX) $(BIN_DIR) + +.PHONY: common-docker $(BUILD_DOCKER_ARCHS) +common-docker: $(BUILD_DOCKER_ARCHS) +$(BUILD_DOCKER_ARCHS): common-docker-%: + docker build -t "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(DOCKER_IMAGE_TAG)" \ + -f $(DOCKERFILE_PATH) \ + --build-arg ARCH="$*" \ + --build-arg OS="linux" \ + $(DOCKERBUILD_CONTEXT) + +.PHONY: common-docker-publish $(PUBLISH_DOCKER_ARCHS) +common-docker-publish: $(PUBLISH_DOCKER_ARCHS) +$(PUBLISH_DOCKER_ARCHS): common-docker-publish-%: + docker push "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(DOCKER_IMAGE_TAG)" + +DOCKER_MAJOR_VERSION_TAG = $(firstword $(subst ., ,$(shell cat VERSION))) +.PHONY: common-docker-tag-latest $(TAG_DOCKER_ARCHS) +common-docker-tag-latest: $(TAG_DOCKER_ARCHS) +$(TAG_DOCKER_ARCHS): common-docker-tag-latest-%: + docker tag "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(DOCKER_IMAGE_TAG)" "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:latest" + docker tag "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(DOCKER_IMAGE_TAG)" "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:v$(DOCKER_MAJOR_VERSION_TAG)" + +.PHONY: common-docker-manifest +common-docker-manifest: + DOCKER_CLI_EXPERIMENTAL=enabled docker manifest create -a "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(DOCKER_IMAGE_TAG)" $(foreach ARCH,$(DOCKER_ARCHS),$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$(ARCH):$(DOCKER_IMAGE_TAG)) + DOCKER_CLI_EXPERIMENTAL=enabled docker manifest push "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(DOCKER_IMAGE_TAG)" + +.PHONY: promu +promu: $(PROMU) + +$(PROMU): + $(eval PROMU_TMP := $(shell mktemp -d)) + curl -s -L $(PROMU_URL) | tar -xvzf - -C $(PROMU_TMP) + mkdir -p $(FIRST_GOPATH)/bin + cp $(PROMU_TMP)/promu-$(PROMU_VERSION).$(GO_BUILD_PLATFORM)/promu $(FIRST_GOPATH)/bin/promu + rm -r $(PROMU_TMP) + +.PHONY: proto +proto: + @echo ">> generating code from proto files" + @./scripts/genproto.sh + +ifdef GOLANGCI_LINT +$(GOLANGCI_LINT): + mkdir -p $(FIRST_GOPATH)/bin + curl -sfL https://raw.githubusercontent.com/golangci/golangci-lint/$(GOLANGCI_LINT_VERSION)/install.sh \ + | sed -e '/install -d/d' \ + | sh -s -- -b $(FIRST_GOPATH)/bin $(GOLANGCI_LINT_VERSION) +endif + +.PHONY: precheck +precheck:: + +define PRECHECK_COMMAND_template = +precheck:: $(1)_precheck + +PRECHECK_COMMAND_$(1) ?= $(1) $$(strip $$(PRECHECK_OPTIONS_$(1))) +.PHONY: $(1)_precheck +$(1)_precheck: + @if ! $$(PRECHECK_COMMAND_$(1)) 1>/dev/null 2>&1; then \ + echo "Execution of '$$(PRECHECK_COMMAND_$(1))' command failed. Is $(1) installed?"; \ + exit 1; \ + fi +endef diff --git a/vendor/github.com/prometheus/procfs/NOTICE b/vendor/github.com/prometheus/procfs/NOTICE new file mode 100644 index 0000000..53c5e9a --- /dev/null +++ b/vendor/github.com/prometheus/procfs/NOTICE @@ -0,0 +1,7 @@ +procfs provides functions to retrieve system, kernel and process +metrics from the pseudo-filesystem proc. + +Copyright 2014-2015 The Prometheus Authors + +This product includes software developed at +SoundCloud Ltd. (http://soundcloud.com/). diff --git a/vendor/github.com/prometheus/procfs/README.md b/vendor/github.com/prometheus/procfs/README.md new file mode 100644 index 0000000..43c3773 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/README.md @@ -0,0 +1,61 @@ +# procfs + +This package provides functions to retrieve system, kernel, and process +metrics from the pseudo-filesystems /proc and /sys. + +*WARNING*: This package is a work in progress. Its API may still break in +backwards-incompatible ways without warnings. Use it at your own risk. + +[![Go Reference](https://pkg.go.dev/badge/github.com/prometheus/procfs.svg)](https://pkg.go.dev/github.com/prometheus/procfs) +[![CircleCI](https://circleci.com/gh/prometheus/procfs/tree/master.svg?style=svg)](https://circleci.com/gh/prometheus/procfs/tree/master) +[![Go Report Card](https://goreportcard.com/badge/github.com/prometheus/procfs)](https://goreportcard.com/report/github.com/prometheus/procfs) + +## Usage + +The procfs library is organized by packages based on whether the gathered data is coming from +/proc, /sys, or both. Each package contains an `FS` type which represents the path to either /proc, +/sys, or both. For example, cpu statistics are gathered from +`/proc/stat` and are available via the root procfs package. First, the proc filesystem mount +point is initialized, and then the stat information is read. + +```go +fs, err := procfs.NewFS("/proc") +stats, err := fs.Stat() +``` + +Some sub-packages such as `blockdevice`, require access to both the proc and sys filesystems. + +```go + fs, err := blockdevice.NewFS("/proc", "/sys") + stats, err := fs.ProcDiskstats() +``` + +## Package Organization + +The packages in this project are organized according to (1) whether the data comes from the `/proc` or +`/sys` filesystem and (2) the type of information being retrieved. For example, most process information +can be gathered from the functions in the root `procfs` package. Information about block devices such as disk drives +is available in the `blockdevices` sub-package. + +## Building and Testing + +The procfs library is intended to be built as part of another application, so there are no distributable binaries. +However, most of the API includes unit tests which can be run with `make test`. + +### Updating Test Fixtures + +The procfs library includes a set of test fixtures which include many example files from +the `/proc` and `/sys` filesystems. These fixtures are included as a [ttar](https://github.com/ideaship/ttar) file +which is extracted automatically during testing. To add/update the test fixtures, first +ensure the `fixtures` directory is up to date by removing the existing directory and then +extracting the ttar file using `make fixtures/.unpacked` or just `make test`. + +```bash +rm -rf fixtures +make test +``` + +Next, make the required changes to the extracted files in the `fixtures` directory. When +the changes are complete, run `make update_fixtures` to create a new `fixtures.ttar` file +based on the updated `fixtures` directory. And finally, verify the changes using +`git diff fixtures.ttar`. diff --git a/vendor/github.com/prometheus/procfs/SECURITY.md b/vendor/github.com/prometheus/procfs/SECURITY.md new file mode 100644 index 0000000..fed02d8 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/SECURITY.md @@ -0,0 +1,6 @@ +# Reporting a security issue + +The Prometheus security policy, including how to report vulnerabilities, can be +found here: + + diff --git a/vendor/github.com/prometheus/procfs/arp.go b/vendor/github.com/prometheus/procfs/arp.go new file mode 100644 index 0000000..68f36e8 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/arp.go @@ -0,0 +1,116 @@ +// Copyright 2019 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "fmt" + "net" + "os" + "strconv" + "strings" +) + +// Learned from include/uapi/linux/if_arp.h. +const ( + // completed entry (ha valid). + ATFComplete = 0x02 + // permanent entry. + ATFPermanent = 0x04 + // Publish entry. + ATFPublish = 0x08 + // Has requested trailers. + ATFUseTrailers = 0x10 + // Obsoleted: Want to use a netmask (only for proxy entries). + ATFNetmask = 0x20 + // Don't answer this addresses. + ATFDontPublish = 0x40 +) + +// ARPEntry contains a single row of the columnar data represented in +// /proc/net/arp. +type ARPEntry struct { + // IP address + IPAddr net.IP + // MAC address + HWAddr net.HardwareAddr + // Name of the device + Device string + // Flags + Flags byte +} + +// GatherARPEntries retrieves all the ARP entries, parse the relevant columns, +// and then return a slice of ARPEntry's. +func (fs FS) GatherARPEntries() ([]ARPEntry, error) { + data, err := os.ReadFile(fs.proc.Path("net/arp")) + if err != nil { + return nil, fmt.Errorf("error reading arp %q: %w", fs.proc.Path("net/arp"), err) + } + + return parseARPEntries(data) +} + +func parseARPEntries(data []byte) ([]ARPEntry, error) { + lines := strings.Split(string(data), "\n") + entries := make([]ARPEntry, 0) + var err error + const ( + expectedDataWidth = 6 + expectedHeaderWidth = 9 + ) + for _, line := range lines { + columns := strings.Fields(line) + width := len(columns) + + if width == expectedHeaderWidth || width == 0 { + continue + } else if width == expectedDataWidth { + entry, err := parseARPEntry(columns) + if err != nil { + return []ARPEntry{}, fmt.Errorf("failed to parse ARP entry: %w", err) + } + entries = append(entries, entry) + } else { + return []ARPEntry{}, fmt.Errorf("%d columns were detected, but %d were expected", width, expectedDataWidth) + } + + } + + return entries, err +} + +func parseARPEntry(columns []string) (ARPEntry, error) { + entry := ARPEntry{Device: columns[5]} + ip := net.ParseIP(columns[0]) + entry.IPAddr = ip + + if mac, err := net.ParseMAC(columns[3]); err == nil { + entry.HWAddr = mac + } else { + return ARPEntry{}, err + } + + if flags, err := strconv.ParseUint(columns[2], 0, 8); err == nil { + entry.Flags = byte(flags) + } else { + return ARPEntry{}, err + } + + return entry, nil +} + +// IsComplete returns true if ARP entry is marked with complete flag. +func (entry *ARPEntry) IsComplete() bool { + return entry.Flags&ATFComplete != 0 +} diff --git a/vendor/github.com/prometheus/procfs/buddyinfo.go b/vendor/github.com/prometheus/procfs/buddyinfo.go new file mode 100644 index 0000000..f5b7939 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/buddyinfo.go @@ -0,0 +1,85 @@ +// Copyright 2017 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bufio" + "fmt" + "io" + "os" + "strconv" + "strings" +) + +// A BuddyInfo is the details parsed from /proc/buddyinfo. +// The data is comprised of an array of free fragments of each size. +// The sizes are 2^n*PAGE_SIZE, where n is the array index. +type BuddyInfo struct { + Node string + Zone string + Sizes []float64 +} + +// BuddyInfo reads the buddyinfo statistics from the specified `proc` filesystem. +func (fs FS) BuddyInfo() ([]BuddyInfo, error) { + file, err := os.Open(fs.proc.Path("buddyinfo")) + if err != nil { + return nil, err + } + defer file.Close() + + return parseBuddyInfo(file) +} + +func parseBuddyInfo(r io.Reader) ([]BuddyInfo, error) { + var ( + buddyInfo = []BuddyInfo{} + scanner = bufio.NewScanner(r) + bucketCount = -1 + ) + + for scanner.Scan() { + var err error + line := scanner.Text() + parts := strings.Fields(line) + + if len(parts) < 4 { + return nil, fmt.Errorf("invalid number of fields when parsing buddyinfo") + } + + node := strings.TrimRight(parts[1], ",") + zone := strings.TrimRight(parts[3], ",") + arraySize := len(parts[4:]) + + if bucketCount == -1 { + bucketCount = arraySize + } else { + if bucketCount != arraySize { + return nil, fmt.Errorf("mismatch in number of buddyinfo buckets, previous count %d, new count %d", bucketCount, arraySize) + } + } + + sizes := make([]float64, arraySize) + for i := 0; i < arraySize; i++ { + sizes[i], err = strconv.ParseFloat(parts[i+4], 64) + if err != nil { + return nil, fmt.Errorf("invalid value in buddyinfo: %w", err) + } + } + + buddyInfo = append(buddyInfo, BuddyInfo{node, zone, sizes}) + } + + return buddyInfo, scanner.Err() +} diff --git a/vendor/github.com/prometheus/procfs/cmdline.go b/vendor/github.com/prometheus/procfs/cmdline.go new file mode 100644 index 0000000..bf4f3b4 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/cmdline.go @@ -0,0 +1,30 @@ +// Copyright 2021 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "strings" + + "github.com/prometheus/procfs/internal/util" +) + +// CmdLine returns the command line of the kernel. +func (fs FS) CmdLine() ([]string, error) { + data, err := util.ReadFileNoStat(fs.proc.Path("cmdline")) + if err != nil { + return nil, err + } + + return strings.Fields(string(data)), nil +} diff --git a/vendor/github.com/prometheus/procfs/cpuinfo.go b/vendor/github.com/prometheus/procfs/cpuinfo.go new file mode 100644 index 0000000..ff6b927 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/cpuinfo.go @@ -0,0 +1,482 @@ +// Copyright 2019 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build linux +// +build linux + +package procfs + +import ( + "bufio" + "bytes" + "errors" + "fmt" + "regexp" + "strconv" + "strings" + + "github.com/prometheus/procfs/internal/util" +) + +// CPUInfo contains general information about a system CPU found in /proc/cpuinfo. +type CPUInfo struct { + Processor uint + VendorID string + CPUFamily string + Model string + ModelName string + Stepping string + Microcode string + CPUMHz float64 + CacheSize string + PhysicalID string + Siblings uint + CoreID string + CPUCores uint + APICID string + InitialAPICID string + FPU string + FPUException string + CPUIDLevel uint + WP string + Flags []string + Bugs []string + BogoMips float64 + CLFlushSize uint + CacheAlignment uint + AddressSizes string + PowerManagement string +} + +var ( + cpuinfoClockRegexp = regexp.MustCompile(`([\d.]+)`) + cpuinfoS390XProcessorRegexp = regexp.MustCompile(`^processor\s+(\d+):.*`) +) + +// CPUInfo returns information about current system CPUs. +// See https://www.kernel.org/doc/Documentation/filesystems/proc.txt +func (fs FS) CPUInfo() ([]CPUInfo, error) { + data, err := util.ReadFileNoStat(fs.proc.Path("cpuinfo")) + if err != nil { + return nil, err + } + return parseCPUInfo(data) +} + +func parseCPUInfoX86(info []byte) ([]CPUInfo, error) { + scanner := bufio.NewScanner(bytes.NewReader(info)) + + // find the first "processor" line + firstLine := firstNonEmptyLine(scanner) + if !strings.HasPrefix(firstLine, "processor") || !strings.Contains(firstLine, ":") { + return nil, fmt.Errorf("invalid cpuinfo file: %q", firstLine) + } + field := strings.SplitN(firstLine, ": ", 2) + v, err := strconv.ParseUint(field[1], 0, 32) + if err != nil { + return nil, err + } + firstcpu := CPUInfo{Processor: uint(v)} + cpuinfo := []CPUInfo{firstcpu} + i := 0 + + for scanner.Scan() { + line := scanner.Text() + if !strings.Contains(line, ":") { + continue + } + field := strings.SplitN(line, ": ", 2) + switch strings.TrimSpace(field[0]) { + case "processor": + cpuinfo = append(cpuinfo, CPUInfo{}) // start of the next processor + i++ + v, err := strconv.ParseUint(field[1], 0, 32) + if err != nil { + return nil, err + } + cpuinfo[i].Processor = uint(v) + case "vendor", "vendor_id": + cpuinfo[i].VendorID = field[1] + case "cpu family": + cpuinfo[i].CPUFamily = field[1] + case "model": + cpuinfo[i].Model = field[1] + case "model name": + cpuinfo[i].ModelName = field[1] + case "stepping": + cpuinfo[i].Stepping = field[1] + case "microcode": + cpuinfo[i].Microcode = field[1] + case "cpu MHz": + v, err := strconv.ParseFloat(field[1], 64) + if err != nil { + return nil, err + } + cpuinfo[i].CPUMHz = v + case "cache size": + cpuinfo[i].CacheSize = field[1] + case "physical id": + cpuinfo[i].PhysicalID = field[1] + case "siblings": + v, err := strconv.ParseUint(field[1], 0, 32) + if err != nil { + return nil, err + } + cpuinfo[i].Siblings = uint(v) + case "core id": + cpuinfo[i].CoreID = field[1] + case "cpu cores": + v, err := strconv.ParseUint(field[1], 0, 32) + if err != nil { + return nil, err + } + cpuinfo[i].CPUCores = uint(v) + case "apicid": + cpuinfo[i].APICID = field[1] + case "initial apicid": + cpuinfo[i].InitialAPICID = field[1] + case "fpu": + cpuinfo[i].FPU = field[1] + case "fpu_exception": + cpuinfo[i].FPUException = field[1] + case "cpuid level": + v, err := strconv.ParseUint(field[1], 0, 32) + if err != nil { + return nil, err + } + cpuinfo[i].CPUIDLevel = uint(v) + case "wp": + cpuinfo[i].WP = field[1] + case "flags": + cpuinfo[i].Flags = strings.Fields(field[1]) + case "bugs": + cpuinfo[i].Bugs = strings.Fields(field[1]) + case "bogomips": + v, err := strconv.ParseFloat(field[1], 64) + if err != nil { + return nil, err + } + cpuinfo[i].BogoMips = v + case "clflush size": + v, err := strconv.ParseUint(field[1], 0, 32) + if err != nil { + return nil, err + } + cpuinfo[i].CLFlushSize = uint(v) + case "cache_alignment": + v, err := strconv.ParseUint(field[1], 0, 32) + if err != nil { + return nil, err + } + cpuinfo[i].CacheAlignment = uint(v) + case "address sizes": + cpuinfo[i].AddressSizes = field[1] + case "power management": + cpuinfo[i].PowerManagement = field[1] + } + } + return cpuinfo, nil +} + +func parseCPUInfoARM(info []byte) ([]CPUInfo, error) { + scanner := bufio.NewScanner(bytes.NewReader(info)) + + firstLine := firstNonEmptyLine(scanner) + match, _ := regexp.MatchString("^[Pp]rocessor", firstLine) + if !match || !strings.Contains(firstLine, ":") { + return nil, fmt.Errorf("invalid cpuinfo file: %q", firstLine) + } + field := strings.SplitN(firstLine, ": ", 2) + cpuinfo := []CPUInfo{} + featuresLine := "" + commonCPUInfo := CPUInfo{} + i := 0 + if strings.TrimSpace(field[0]) == "Processor" { + commonCPUInfo = CPUInfo{ModelName: field[1]} + i = -1 + } else { + v, err := strconv.ParseUint(field[1], 0, 32) + if err != nil { + return nil, err + } + firstcpu := CPUInfo{Processor: uint(v)} + cpuinfo = []CPUInfo{firstcpu} + } + + for scanner.Scan() { + line := scanner.Text() + if !strings.Contains(line, ":") { + continue + } + field := strings.SplitN(line, ": ", 2) + switch strings.TrimSpace(field[0]) { + case "processor": + cpuinfo = append(cpuinfo, commonCPUInfo) // start of the next processor + i++ + v, err := strconv.ParseUint(field[1], 0, 32) + if err != nil { + return nil, err + } + cpuinfo[i].Processor = uint(v) + case "BogoMIPS": + if i == -1 { + cpuinfo = append(cpuinfo, commonCPUInfo) // There is only one processor + i++ + cpuinfo[i].Processor = 0 + } + v, err := strconv.ParseFloat(field[1], 64) + if err != nil { + return nil, err + } + cpuinfo[i].BogoMips = v + case "Features": + featuresLine = line + case "model name": + cpuinfo[i].ModelName = field[1] + } + } + fields := strings.SplitN(featuresLine, ": ", 2) + for i := range cpuinfo { + cpuinfo[i].Flags = strings.Fields(fields[1]) + } + return cpuinfo, nil + +} + +func parseCPUInfoS390X(info []byte) ([]CPUInfo, error) { + scanner := bufio.NewScanner(bytes.NewReader(info)) + + firstLine := firstNonEmptyLine(scanner) + if !strings.HasPrefix(firstLine, "vendor_id") || !strings.Contains(firstLine, ":") { + return nil, fmt.Errorf("invalid cpuinfo file: %q", firstLine) + } + field := strings.SplitN(firstLine, ": ", 2) + cpuinfo := []CPUInfo{} + commonCPUInfo := CPUInfo{VendorID: field[1]} + + for scanner.Scan() { + line := scanner.Text() + if !strings.Contains(line, ":") { + continue + } + field := strings.SplitN(line, ": ", 2) + switch strings.TrimSpace(field[0]) { + case "bogomips per cpu": + v, err := strconv.ParseFloat(field[1], 64) + if err != nil { + return nil, err + } + commonCPUInfo.BogoMips = v + case "features": + commonCPUInfo.Flags = strings.Fields(field[1]) + } + if strings.HasPrefix(line, "processor") { + match := cpuinfoS390XProcessorRegexp.FindStringSubmatch(line) + if len(match) < 2 { + return nil, fmt.Errorf("invalid cpuinfo file: %q", firstLine) + } + cpu := commonCPUInfo + v, err := strconv.ParseUint(match[1], 0, 32) + if err != nil { + return nil, err + } + cpu.Processor = uint(v) + cpuinfo = append(cpuinfo, cpu) + } + if strings.HasPrefix(line, "cpu number") { + break + } + } + + i := 0 + for scanner.Scan() { + line := scanner.Text() + if !strings.Contains(line, ":") { + continue + } + field := strings.SplitN(line, ": ", 2) + switch strings.TrimSpace(field[0]) { + case "cpu number": + i++ + case "cpu MHz dynamic": + clock := cpuinfoClockRegexp.FindString(strings.TrimSpace(field[1])) + v, err := strconv.ParseFloat(clock, 64) + if err != nil { + return nil, err + } + cpuinfo[i].CPUMHz = v + case "physical id": + cpuinfo[i].PhysicalID = field[1] + case "core id": + cpuinfo[i].CoreID = field[1] + case "cpu cores": + v, err := strconv.ParseUint(field[1], 0, 32) + if err != nil { + return nil, err + } + cpuinfo[i].CPUCores = uint(v) + case "siblings": + v, err := strconv.ParseUint(field[1], 0, 32) + if err != nil { + return nil, err + } + cpuinfo[i].Siblings = uint(v) + } + } + + return cpuinfo, nil +} + +func parseCPUInfoMips(info []byte) ([]CPUInfo, error) { + scanner := bufio.NewScanner(bytes.NewReader(info)) + + // find the first "processor" line + firstLine := firstNonEmptyLine(scanner) + if !strings.HasPrefix(firstLine, "system type") || !strings.Contains(firstLine, ":") { + return nil, fmt.Errorf("invalid cpuinfo file: %q", firstLine) + } + field := strings.SplitN(firstLine, ": ", 2) + cpuinfo := []CPUInfo{} + systemType := field[1] + + i := 0 + + for scanner.Scan() { + line := scanner.Text() + if !strings.Contains(line, ":") { + continue + } + field := strings.SplitN(line, ": ", 2) + switch strings.TrimSpace(field[0]) { + case "processor": + v, err := strconv.ParseUint(field[1], 0, 32) + if err != nil { + return nil, err + } + i = int(v) + cpuinfo = append(cpuinfo, CPUInfo{}) // start of the next processor + cpuinfo[i].Processor = uint(v) + cpuinfo[i].VendorID = systemType + case "cpu model": + cpuinfo[i].ModelName = field[1] + case "BogoMIPS": + v, err := strconv.ParseFloat(field[1], 64) + if err != nil { + return nil, err + } + cpuinfo[i].BogoMips = v + } + } + return cpuinfo, nil +} + +func parseCPUInfoPPC(info []byte) ([]CPUInfo, error) { + scanner := bufio.NewScanner(bytes.NewReader(info)) + + firstLine := firstNonEmptyLine(scanner) + if !strings.HasPrefix(firstLine, "processor") || !strings.Contains(firstLine, ":") { + return nil, fmt.Errorf("invalid cpuinfo file: %q", firstLine) + } + field := strings.SplitN(firstLine, ": ", 2) + v, err := strconv.ParseUint(field[1], 0, 32) + if err != nil { + return nil, err + } + firstcpu := CPUInfo{Processor: uint(v)} + cpuinfo := []CPUInfo{firstcpu} + i := 0 + + for scanner.Scan() { + line := scanner.Text() + if !strings.Contains(line, ":") { + continue + } + field := strings.SplitN(line, ": ", 2) + switch strings.TrimSpace(field[0]) { + case "processor": + cpuinfo = append(cpuinfo, CPUInfo{}) // start of the next processor + i++ + v, err := strconv.ParseUint(field[1], 0, 32) + if err != nil { + return nil, err + } + cpuinfo[i].Processor = uint(v) + case "cpu": + cpuinfo[i].VendorID = field[1] + case "clock": + clock := cpuinfoClockRegexp.FindString(strings.TrimSpace(field[1])) + v, err := strconv.ParseFloat(clock, 64) + if err != nil { + return nil, err + } + cpuinfo[i].CPUMHz = v + } + } + return cpuinfo, nil +} + +func parseCPUInfoRISCV(info []byte) ([]CPUInfo, error) { + scanner := bufio.NewScanner(bytes.NewReader(info)) + + firstLine := firstNonEmptyLine(scanner) + if !strings.HasPrefix(firstLine, "processor") || !strings.Contains(firstLine, ":") { + return nil, fmt.Errorf("invalid cpuinfo file: %q", firstLine) + } + field := strings.SplitN(firstLine, ": ", 2) + v, err := strconv.ParseUint(field[1], 0, 32) + if err != nil { + return nil, err + } + firstcpu := CPUInfo{Processor: uint(v)} + cpuinfo := []CPUInfo{firstcpu} + i := 0 + + for scanner.Scan() { + line := scanner.Text() + if !strings.Contains(line, ":") { + continue + } + field := strings.SplitN(line, ": ", 2) + switch strings.TrimSpace(field[0]) { + case "processor": + v, err := strconv.ParseUint(field[1], 0, 32) + if err != nil { + return nil, err + } + i = int(v) + cpuinfo = append(cpuinfo, CPUInfo{}) // start of the next processor + cpuinfo[i].Processor = uint(v) + case "hart": + cpuinfo[i].CoreID = field[1] + case "isa": + cpuinfo[i].ModelName = field[1] + } + } + return cpuinfo, nil +} + +func parseCPUInfoDummy(_ []byte) ([]CPUInfo, error) { // nolint:unused,deadcode + return nil, errors.New("not implemented") +} + +// firstNonEmptyLine advances the scanner to the first non-empty line +// and returns the contents of that line. +func firstNonEmptyLine(scanner *bufio.Scanner) string { + for scanner.Scan() { + line := scanner.Text() + if strings.TrimSpace(line) != "" { + return line + } + } + return "" +} diff --git a/vendor/github.com/prometheus/procfs/cpuinfo_armx.go b/vendor/github.com/prometheus/procfs/cpuinfo_armx.go new file mode 100644 index 0000000..64cfd53 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/cpuinfo_armx.go @@ -0,0 +1,20 @@ +// Copyright 2020 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build linux && (arm || arm64) +// +build linux +// +build arm arm64 + +package procfs + +var parseCPUInfo = parseCPUInfoARM diff --git a/vendor/github.com/prometheus/procfs/cpuinfo_mipsx.go b/vendor/github.com/prometheus/procfs/cpuinfo_mipsx.go new file mode 100644 index 0000000..c11207f --- /dev/null +++ b/vendor/github.com/prometheus/procfs/cpuinfo_mipsx.go @@ -0,0 +1,20 @@ +// Copyright 2020 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build linux && (mips || mipsle || mips64 || mips64le) +// +build linux +// +build mips mipsle mips64 mips64le + +package procfs + +var parseCPUInfo = parseCPUInfoMips diff --git a/vendor/github.com/prometheus/procfs/cpuinfo_others.go b/vendor/github.com/prometheus/procfs/cpuinfo_others.go new file mode 100644 index 0000000..ea41bf2 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/cpuinfo_others.go @@ -0,0 +1,19 @@ +// Copyright 2020 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build linux && !386 && !amd64 && !arm && !arm64 && !mips && !mips64 && !mips64le && !mipsle && !ppc64 && !ppc64le && !riscv64 && !s390x +// +build linux,!386,!amd64,!arm,!arm64,!mips,!mips64,!mips64le,!mipsle,!ppc64,!ppc64le,!riscv64,!s390x + +package procfs + +var parseCPUInfo = parseCPUInfoDummy diff --git a/vendor/github.com/prometheus/procfs/cpuinfo_ppcx.go b/vendor/github.com/prometheus/procfs/cpuinfo_ppcx.go new file mode 100644 index 0000000..003bc2a --- /dev/null +++ b/vendor/github.com/prometheus/procfs/cpuinfo_ppcx.go @@ -0,0 +1,20 @@ +// Copyright 2020 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build linux && (ppc64 || ppc64le) +// +build linux +// +build ppc64 ppc64le + +package procfs + +var parseCPUInfo = parseCPUInfoPPC diff --git a/vendor/github.com/prometheus/procfs/cpuinfo_riscvx.go b/vendor/github.com/prometheus/procfs/cpuinfo_riscvx.go new file mode 100644 index 0000000..1c9b731 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/cpuinfo_riscvx.go @@ -0,0 +1,20 @@ +// Copyright 2020 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build linux && (riscv || riscv64) +// +build linux +// +build riscv riscv64 + +package procfs + +var parseCPUInfo = parseCPUInfoRISCV diff --git a/vendor/github.com/prometheus/procfs/cpuinfo_s390x.go b/vendor/github.com/prometheus/procfs/cpuinfo_s390x.go new file mode 100644 index 0000000..fa3686b --- /dev/null +++ b/vendor/github.com/prometheus/procfs/cpuinfo_s390x.go @@ -0,0 +1,19 @@ +// Copyright 2020 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build linux +// +build linux + +package procfs + +var parseCPUInfo = parseCPUInfoS390X diff --git a/vendor/github.com/prometheus/procfs/cpuinfo_x86.go b/vendor/github.com/prometheus/procfs/cpuinfo_x86.go new file mode 100644 index 0000000..a0ef555 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/cpuinfo_x86.go @@ -0,0 +1,20 @@ +// Copyright 2020 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build linux && (386 || amd64) +// +build linux +// +build 386 amd64 + +package procfs + +var parseCPUInfo = parseCPUInfoX86 diff --git a/vendor/github.com/prometheus/procfs/crypto.go b/vendor/github.com/prometheus/procfs/crypto.go new file mode 100644 index 0000000..5048ad1 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/crypto.go @@ -0,0 +1,153 @@ +// Copyright 2019 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bufio" + "bytes" + "fmt" + "io" + "strings" + + "github.com/prometheus/procfs/internal/util" +) + +// Crypto holds info parsed from /proc/crypto. +type Crypto struct { + Alignmask *uint64 + Async bool + Blocksize *uint64 + Chunksize *uint64 + Ctxsize *uint64 + Digestsize *uint64 + Driver string + Geniv string + Internal string + Ivsize *uint64 + Maxauthsize *uint64 + MaxKeysize *uint64 + MinKeysize *uint64 + Module string + Name string + Priority *int64 + Refcnt *int64 + Seedsize *uint64 + Selftest string + Type string + Walksize *uint64 +} + +// Crypto parses an crypto-file (/proc/crypto) and returns a slice of +// structs containing the relevant info. More information available here: +// https://kernel.readthedocs.io/en/sphinx-samples/crypto-API.html +func (fs FS) Crypto() ([]Crypto, error) { + path := fs.proc.Path("crypto") + b, err := util.ReadFileNoStat(path) + if err != nil { + return nil, fmt.Errorf("error reading crypto %q: %w", path, err) + } + + crypto, err := parseCrypto(bytes.NewReader(b)) + if err != nil { + return nil, fmt.Errorf("error parsing crypto %q: %w", path, err) + } + + return crypto, nil +} + +// parseCrypto parses a /proc/crypto stream into Crypto elements. +func parseCrypto(r io.Reader) ([]Crypto, error) { + var out []Crypto + + s := bufio.NewScanner(r) + for s.Scan() { + text := s.Text() + switch { + case strings.HasPrefix(text, "name"): + // Each crypto element begins with its name. + out = append(out, Crypto{}) + case text == "": + continue + } + + kv := strings.Split(text, ":") + if len(kv) != 2 { + return nil, fmt.Errorf("malformed crypto line: %q", text) + } + + k := strings.TrimSpace(kv[0]) + v := strings.TrimSpace(kv[1]) + + // Parse the key/value pair into the currently focused element. + c := &out[len(out)-1] + if err := c.parseKV(k, v); err != nil { + return nil, err + } + } + + if err := s.Err(); err != nil { + return nil, err + } + + return out, nil +} + +// parseKV parses a key/value pair into the appropriate field of c. +func (c *Crypto) parseKV(k, v string) error { + vp := util.NewValueParser(v) + + switch k { + case "async": + // Interpret literal yes as true. + c.Async = v == "yes" + case "blocksize": + c.Blocksize = vp.PUInt64() + case "chunksize": + c.Chunksize = vp.PUInt64() + case "digestsize": + c.Digestsize = vp.PUInt64() + case "driver": + c.Driver = v + case "geniv": + c.Geniv = v + case "internal": + c.Internal = v + case "ivsize": + c.Ivsize = vp.PUInt64() + case "maxauthsize": + c.Maxauthsize = vp.PUInt64() + case "max keysize": + c.MaxKeysize = vp.PUInt64() + case "min keysize": + c.MinKeysize = vp.PUInt64() + case "module": + c.Module = v + case "name": + c.Name = v + case "priority": + c.Priority = vp.PInt64() + case "refcnt": + c.Refcnt = vp.PInt64() + case "seedsize": + c.Seedsize = vp.PUInt64() + case "selftest": + c.Selftest = v + case "type": + c.Type = v + case "walksize": + c.Walksize = vp.PUInt64() + } + + return vp.Err() +} diff --git a/vendor/github.com/prometheus/procfs/doc.go b/vendor/github.com/prometheus/procfs/doc.go new file mode 100644 index 0000000..d31a826 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/doc.go @@ -0,0 +1,45 @@ +// Copyright 2014 Prometheus Team +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package procfs provides functions to retrieve system, kernel and process +// metrics from the pseudo-filesystem proc. +// +// Example: +// +// package main +// +// import ( +// "fmt" +// "log" +// +// "github.com/prometheus/procfs" +// ) +// +// func main() { +// p, err := procfs.Self() +// if err != nil { +// log.Fatalf("could not get process: %s", err) +// } +// +// stat, err := p.Stat() +// if err != nil { +// log.Fatalf("could not get process stat: %s", err) +// } +// +// fmt.Printf("command: %s\n", stat.Comm) +// fmt.Printf("cpu time: %fs\n", stat.CPUTime()) +// fmt.Printf("vsize: %dB\n", stat.VirtualMemory()) +// fmt.Printf("rss: %dB\n", stat.ResidentMemory()) +// } +// +package procfs diff --git a/vendor/github.com/prometheus/procfs/fs.go b/vendor/github.com/prometheus/procfs/fs.go new file mode 100644 index 0000000..0102ab0 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/fs.go @@ -0,0 +1,43 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "github.com/prometheus/procfs/internal/fs" +) + +// FS represents the pseudo-filesystem sys, which provides an interface to +// kernel data structures. +type FS struct { + proc fs.FS +} + +// DefaultMountPoint is the common mount point of the proc filesystem. +const DefaultMountPoint = fs.DefaultProcMountPoint + +// NewDefaultFS returns a new proc FS mounted under the default proc mountPoint. +// It will error if the mount point directory can't be read or is a file. +func NewDefaultFS() (FS, error) { + return NewFS(DefaultMountPoint) +} + +// NewFS returns a new proc FS mounted under the given proc mountPoint. It will error +// if the mount point directory can't be read or is a file. +func NewFS(mountPoint string) (FS, error) { + fs, err := fs.NewFS(mountPoint) + if err != nil { + return FS{}, err + } + return FS{fs}, nil +} diff --git a/vendor/github.com/prometheus/procfs/fscache.go b/vendor/github.com/prometheus/procfs/fscache.go new file mode 100644 index 0000000..f8070e6 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/fscache.go @@ -0,0 +1,422 @@ +// Copyright 2019 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bufio" + "bytes" + "fmt" + "io" + "strconv" + "strings" + + "github.com/prometheus/procfs/internal/util" +) + +// Fscacheinfo represents fscache statistics. +type Fscacheinfo struct { + // Number of index cookies allocated + IndexCookiesAllocated uint64 + // data storage cookies allocated + DataStorageCookiesAllocated uint64 + // Number of special cookies allocated + SpecialCookiesAllocated uint64 + // Number of objects allocated + ObjectsAllocated uint64 + // Number of object allocation failures + ObjectAllocationsFailure uint64 + // Number of objects that reached the available state + ObjectsAvailable uint64 + // Number of objects that reached the dead state + ObjectsDead uint64 + // Number of objects that didn't have a coherency check + ObjectsWithoutCoherencyCheck uint64 + // Number of objects that passed a coherency check + ObjectsWithCoherencyCheck uint64 + // Number of objects that needed a coherency data update + ObjectsNeedCoherencyCheckUpdate uint64 + // Number of objects that were declared obsolete + ObjectsDeclaredObsolete uint64 + // Number of pages marked as being cached + PagesMarkedAsBeingCached uint64 + // Number of uncache page requests seen + UncachePagesRequestSeen uint64 + // Number of acquire cookie requests seen + AcquireCookiesRequestSeen uint64 + // Number of acq reqs given a NULL parent + AcquireRequestsWithNullParent uint64 + // Number of acq reqs rejected due to no cache available + AcquireRequestsRejectedNoCacheAvailable uint64 + // Number of acq reqs succeeded + AcquireRequestsSucceeded uint64 + // Number of acq reqs rejected due to error + AcquireRequestsRejectedDueToError uint64 + // Number of acq reqs failed on ENOMEM + AcquireRequestsFailedDueToEnomem uint64 + // Number of lookup calls made on cache backends + LookupsNumber uint64 + // Number of negative lookups made + LookupsNegative uint64 + // Number of positive lookups made + LookupsPositive uint64 + // Number of objects created by lookup + ObjectsCreatedByLookup uint64 + // Number of lookups timed out and requeued + LookupsTimedOutAndRequed uint64 + InvalidationsNumber uint64 + InvalidationsRunning uint64 + // Number of update cookie requests seen + UpdateCookieRequestSeen uint64 + // Number of upd reqs given a NULL parent + UpdateRequestsWithNullParent uint64 + // Number of upd reqs granted CPU time + UpdateRequestsRunning uint64 + // Number of relinquish cookie requests seen + RelinquishCookiesRequestSeen uint64 + // Number of rlq reqs given a NULL parent + RelinquishCookiesWithNullParent uint64 + // Number of rlq reqs waited on completion of creation + RelinquishRequestsWaitingCompleteCreation uint64 + // Relinqs rtr + RelinquishRetries uint64 + // Number of attribute changed requests seen + AttributeChangedRequestsSeen uint64 + // Number of attr changed requests queued + AttributeChangedRequestsQueued uint64 + // Number of attr changed rejected -ENOBUFS + AttributeChangedRejectDueToEnobufs uint64 + // Number of attr changed failed -ENOMEM + AttributeChangedFailedDueToEnomem uint64 + // Number of attr changed ops given CPU time + AttributeChangedOps uint64 + // Number of allocation requests seen + AllocationRequestsSeen uint64 + // Number of successful alloc reqs + AllocationOkRequests uint64 + // Number of alloc reqs that waited on lookup completion + AllocationWaitingOnLookup uint64 + // Number of alloc reqs rejected -ENOBUFS + AllocationsRejectedDueToEnobufs uint64 + // Number of alloc reqs aborted -ERESTARTSYS + AllocationsAbortedDueToErestartsys uint64 + // Number of alloc reqs submitted + AllocationOperationsSubmitted uint64 + // Number of alloc reqs waited for CPU time + AllocationsWaitedForCPU uint64 + // Number of alloc reqs aborted due to object death + AllocationsAbortedDueToObjectDeath uint64 + // Number of retrieval (read) requests seen + RetrievalsReadRequests uint64 + // Number of successful retr reqs + RetrievalsOk uint64 + // Number of retr reqs that waited on lookup completion + RetrievalsWaitingLookupCompletion uint64 + // Number of retr reqs returned -ENODATA + RetrievalsReturnedEnodata uint64 + // Number of retr reqs rejected -ENOBUFS + RetrievalsRejectedDueToEnobufs uint64 + // Number of retr reqs aborted -ERESTARTSYS + RetrievalsAbortedDueToErestartsys uint64 + // Number of retr reqs failed -ENOMEM + RetrievalsFailedDueToEnomem uint64 + // Number of retr reqs submitted + RetrievalsRequests uint64 + // Number of retr reqs waited for CPU time + RetrievalsWaitingCPU uint64 + // Number of retr reqs aborted due to object death + RetrievalsAbortedDueToObjectDeath uint64 + // Number of storage (write) requests seen + StoreWriteRequests uint64 + // Number of successful store reqs + StoreSuccessfulRequests uint64 + // Number of store reqs on a page already pending storage + StoreRequestsOnPendingStorage uint64 + // Number of store reqs rejected -ENOBUFS + StoreRequestsRejectedDueToEnobufs uint64 + // Number of store reqs failed -ENOMEM + StoreRequestsFailedDueToEnomem uint64 + // Number of store reqs submitted + StoreRequestsSubmitted uint64 + // Number of store reqs granted CPU time + StoreRequestsRunning uint64 + // Number of pages given store req processing time + StorePagesWithRequestsProcessing uint64 + // Number of store reqs deleted from tracking tree + StoreRequestsDeleted uint64 + // Number of store reqs over store limit + StoreRequestsOverStoreLimit uint64 + // Number of release reqs against pages with no pending store + ReleaseRequestsAgainstPagesWithNoPendingStorage uint64 + // Number of release reqs against pages stored by time lock granted + ReleaseRequestsAgainstPagesStoredByTimeLockGranted uint64 + // Number of release reqs ignored due to in-progress store + ReleaseRequestsIgnoredDueToInProgressStore uint64 + // Number of page stores cancelled due to release req + PageStoresCancelledByReleaseRequests uint64 + VmscanWaiting uint64 + // Number of times async ops added to pending queues + OpsPending uint64 + // Number of times async ops given CPU time + OpsRunning uint64 + // Number of times async ops queued for processing + OpsEnqueued uint64 + // Number of async ops cancelled + OpsCancelled uint64 + // Number of async ops rejected due to object lookup/create failure + OpsRejected uint64 + // Number of async ops initialised + OpsInitialised uint64 + // Number of async ops queued for deferred release + OpsDeferred uint64 + // Number of async ops released (should equal ini=N when idle) + OpsReleased uint64 + // Number of deferred-release async ops garbage collected + OpsGarbageCollected uint64 + // Number of in-progress alloc_object() cache ops + CacheopAllocationsinProgress uint64 + // Number of in-progress lookup_object() cache ops + CacheopLookupObjectInProgress uint64 + // Number of in-progress lookup_complete() cache ops + CacheopLookupCompleteInPorgress uint64 + // Number of in-progress grab_object() cache ops + CacheopGrabObjectInProgress uint64 + CacheopInvalidations uint64 + // Number of in-progress update_object() cache ops + CacheopUpdateObjectInProgress uint64 + // Number of in-progress drop_object() cache ops + CacheopDropObjectInProgress uint64 + // Number of in-progress put_object() cache ops + CacheopPutObjectInProgress uint64 + // Number of in-progress attr_changed() cache ops + CacheopAttributeChangeInProgress uint64 + // Number of in-progress sync_cache() cache ops + CacheopSyncCacheInProgress uint64 + // Number of in-progress read_or_alloc_page() cache ops + CacheopReadOrAllocPageInProgress uint64 + // Number of in-progress read_or_alloc_pages() cache ops + CacheopReadOrAllocPagesInProgress uint64 + // Number of in-progress allocate_page() cache ops + CacheopAllocatePageInProgress uint64 + // Number of in-progress allocate_pages() cache ops + CacheopAllocatePagesInProgress uint64 + // Number of in-progress write_page() cache ops + CacheopWritePagesInProgress uint64 + // Number of in-progress uncache_page() cache ops + CacheopUncachePagesInProgress uint64 + // Number of in-progress dissociate_pages() cache ops + CacheopDissociatePagesInProgress uint64 + // Number of object lookups/creations rejected due to lack of space + CacheevLookupsAndCreationsRejectedLackSpace uint64 + // Number of stale objects deleted + CacheevStaleObjectsDeleted uint64 + // Number of objects retired when relinquished + CacheevRetiredWhenReliquished uint64 + // Number of objects culled + CacheevObjectsCulled uint64 +} + +// Fscacheinfo returns information about current fscache statistics. +// See https://www.kernel.org/doc/Documentation/filesystems/caching/fscache.txt +func (fs FS) Fscacheinfo() (Fscacheinfo, error) { + b, err := util.ReadFileNoStat(fs.proc.Path("fs/fscache/stats")) + if err != nil { + return Fscacheinfo{}, err + } + + m, err := parseFscacheinfo(bytes.NewReader(b)) + if err != nil { + return Fscacheinfo{}, fmt.Errorf("failed to parse Fscacheinfo: %w", err) + } + + return *m, nil +} + +func setFSCacheFields(fields []string, setFields ...*uint64) error { + var err error + if len(fields) < len(setFields) { + return fmt.Errorf("Insufficient number of fields, expected %v, got %v", len(setFields), len(fields)) + } + + for i := range setFields { + *setFields[i], err = strconv.ParseUint(strings.Split(fields[i], "=")[1], 0, 64) + if err != nil { + return err + } + } + return nil +} + +func parseFscacheinfo(r io.Reader) (*Fscacheinfo, error) { + var m Fscacheinfo + s := bufio.NewScanner(r) + for s.Scan() { + fields := strings.Fields(s.Text()) + if len(fields) < 2 { + return nil, fmt.Errorf("malformed Fscacheinfo line: %q", s.Text()) + } + + switch fields[0] { + case "Cookies:": + err := setFSCacheFields(fields[1:], &m.IndexCookiesAllocated, &m.DataStorageCookiesAllocated, + &m.SpecialCookiesAllocated) + if err != nil { + return &m, err + } + case "Objects:": + err := setFSCacheFields(fields[1:], &m.ObjectsAllocated, &m.ObjectAllocationsFailure, + &m.ObjectsAvailable, &m.ObjectsDead) + if err != nil { + return &m, err + } + case "ChkAux": + err := setFSCacheFields(fields[2:], &m.ObjectsWithoutCoherencyCheck, &m.ObjectsWithCoherencyCheck, + &m.ObjectsNeedCoherencyCheckUpdate, &m.ObjectsDeclaredObsolete) + if err != nil { + return &m, err + } + case "Pages": + err := setFSCacheFields(fields[2:], &m.PagesMarkedAsBeingCached, &m.UncachePagesRequestSeen) + if err != nil { + return &m, err + } + case "Acquire:": + err := setFSCacheFields(fields[1:], &m.AcquireCookiesRequestSeen, &m.AcquireRequestsWithNullParent, + &m.AcquireRequestsRejectedNoCacheAvailable, &m.AcquireRequestsSucceeded, &m.AcquireRequestsRejectedDueToError, + &m.AcquireRequestsFailedDueToEnomem) + if err != nil { + return &m, err + } + case "Lookups:": + err := setFSCacheFields(fields[1:], &m.LookupsNumber, &m.LookupsNegative, &m.LookupsPositive, + &m.ObjectsCreatedByLookup, &m.LookupsTimedOutAndRequed) + if err != nil { + return &m, err + } + case "Invals": + err := setFSCacheFields(fields[2:], &m.InvalidationsNumber, &m.InvalidationsRunning) + if err != nil { + return &m, err + } + case "Updates:": + err := setFSCacheFields(fields[1:], &m.UpdateCookieRequestSeen, &m.UpdateRequestsWithNullParent, + &m.UpdateRequestsRunning) + if err != nil { + return &m, err + } + case "Relinqs:": + err := setFSCacheFields(fields[1:], &m.RelinquishCookiesRequestSeen, &m.RelinquishCookiesWithNullParent, + &m.RelinquishRequestsWaitingCompleteCreation, &m.RelinquishRetries) + if err != nil { + return &m, err + } + case "AttrChg:": + err := setFSCacheFields(fields[1:], &m.AttributeChangedRequestsSeen, &m.AttributeChangedRequestsQueued, + &m.AttributeChangedRejectDueToEnobufs, &m.AttributeChangedFailedDueToEnomem, &m.AttributeChangedOps) + if err != nil { + return &m, err + } + case "Allocs": + if strings.Split(fields[2], "=")[0] == "n" { + err := setFSCacheFields(fields[2:], &m.AllocationRequestsSeen, &m.AllocationOkRequests, + &m.AllocationWaitingOnLookup, &m.AllocationsRejectedDueToEnobufs, &m.AllocationsAbortedDueToErestartsys) + if err != nil { + return &m, err + } + } else { + err := setFSCacheFields(fields[2:], &m.AllocationOperationsSubmitted, &m.AllocationsWaitedForCPU, + &m.AllocationsAbortedDueToObjectDeath) + if err != nil { + return &m, err + } + } + case "Retrvls:": + if strings.Split(fields[1], "=")[0] == "n" { + err := setFSCacheFields(fields[1:], &m.RetrievalsReadRequests, &m.RetrievalsOk, &m.RetrievalsWaitingLookupCompletion, + &m.RetrievalsReturnedEnodata, &m.RetrievalsRejectedDueToEnobufs, &m.RetrievalsAbortedDueToErestartsys, + &m.RetrievalsFailedDueToEnomem) + if err != nil { + return &m, err + } + } else { + err := setFSCacheFields(fields[1:], &m.RetrievalsRequests, &m.RetrievalsWaitingCPU, &m.RetrievalsAbortedDueToObjectDeath) + if err != nil { + return &m, err + } + } + case "Stores": + if strings.Split(fields[2], "=")[0] == "n" { + err := setFSCacheFields(fields[2:], &m.StoreWriteRequests, &m.StoreSuccessfulRequests, + &m.StoreRequestsOnPendingStorage, &m.StoreRequestsRejectedDueToEnobufs, &m.StoreRequestsFailedDueToEnomem) + if err != nil { + return &m, err + } + } else { + err := setFSCacheFields(fields[2:], &m.StoreRequestsSubmitted, &m.StoreRequestsRunning, + &m.StorePagesWithRequestsProcessing, &m.StoreRequestsDeleted, &m.StoreRequestsOverStoreLimit) + if err != nil { + return &m, err + } + } + case "VmScan": + err := setFSCacheFields(fields[2:], &m.ReleaseRequestsAgainstPagesWithNoPendingStorage, + &m.ReleaseRequestsAgainstPagesStoredByTimeLockGranted, &m.ReleaseRequestsIgnoredDueToInProgressStore, + &m.PageStoresCancelledByReleaseRequests, &m.VmscanWaiting) + if err != nil { + return &m, err + } + case "Ops": + if strings.Split(fields[2], "=")[0] == "pend" { + err := setFSCacheFields(fields[2:], &m.OpsPending, &m.OpsRunning, &m.OpsEnqueued, &m.OpsCancelled, &m.OpsRejected) + if err != nil { + return &m, err + } + } else { + err := setFSCacheFields(fields[2:], &m.OpsInitialised, &m.OpsDeferred, &m.OpsReleased, &m.OpsGarbageCollected) + if err != nil { + return &m, err + } + } + case "CacheOp:": + if strings.Split(fields[1], "=")[0] == "alo" { + err := setFSCacheFields(fields[1:], &m.CacheopAllocationsinProgress, &m.CacheopLookupObjectInProgress, + &m.CacheopLookupCompleteInPorgress, &m.CacheopGrabObjectInProgress) + if err != nil { + return &m, err + } + } else if strings.Split(fields[1], "=")[0] == "inv" { + err := setFSCacheFields(fields[1:], &m.CacheopInvalidations, &m.CacheopUpdateObjectInProgress, + &m.CacheopDropObjectInProgress, &m.CacheopPutObjectInProgress, &m.CacheopAttributeChangeInProgress, + &m.CacheopSyncCacheInProgress) + if err != nil { + return &m, err + } + } else { + err := setFSCacheFields(fields[1:], &m.CacheopReadOrAllocPageInProgress, &m.CacheopReadOrAllocPagesInProgress, + &m.CacheopAllocatePageInProgress, &m.CacheopAllocatePagesInProgress, &m.CacheopWritePagesInProgress, + &m.CacheopUncachePagesInProgress, &m.CacheopDissociatePagesInProgress) + if err != nil { + return &m, err + } + } + case "CacheEv:": + err := setFSCacheFields(fields[1:], &m.CacheevLookupsAndCreationsRejectedLackSpace, &m.CacheevStaleObjectsDeleted, + &m.CacheevRetiredWhenReliquished, &m.CacheevObjectsCulled) + if err != nil { + return &m, err + } + } + } + + return &m, nil +} diff --git a/vendor/github.com/prometheus/procfs/internal/fs/fs.go b/vendor/github.com/prometheus/procfs/internal/fs/fs.go new file mode 100644 index 0000000..3c18c76 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/internal/fs/fs.go @@ -0,0 +1,55 @@ +// Copyright 2019 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package fs + +import ( + "fmt" + "os" + "path/filepath" +) + +const ( + // DefaultProcMountPoint is the common mount point of the proc filesystem. + DefaultProcMountPoint = "/proc" + + // DefaultSysMountPoint is the common mount point of the sys filesystem. + DefaultSysMountPoint = "/sys" + + // DefaultConfigfsMountPoint is the common mount point of the configfs. + DefaultConfigfsMountPoint = "/sys/kernel/config" +) + +// FS represents a pseudo-filesystem, normally /proc or /sys, which provides an +// interface to kernel data structures. +type FS string + +// NewFS returns a new FS mounted under the given mountPoint. It will error +// if the mount point can't be read. +func NewFS(mountPoint string) (FS, error) { + info, err := os.Stat(mountPoint) + if err != nil { + return "", fmt.Errorf("could not read %q: %w", mountPoint, err) + } + if !info.IsDir() { + return "", fmt.Errorf("mount point %q is not a directory", mountPoint) + } + + return FS(mountPoint), nil +} + +// Path appends the given path elements to the filesystem path, adding separators +// as necessary. +func (fs FS) Path(p ...string) string { + return filepath.Join(append([]string{string(fs)}, p...)...) +} diff --git a/vendor/github.com/prometheus/procfs/internal/util/parse.go b/vendor/github.com/prometheus/procfs/internal/util/parse.go new file mode 100644 index 0000000..b030951 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/internal/util/parse.go @@ -0,0 +1,97 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package util + +import ( + "os" + "strconv" + "strings" +) + +// ParseUint32s parses a slice of strings into a slice of uint32s. +func ParseUint32s(ss []string) ([]uint32, error) { + us := make([]uint32, 0, len(ss)) + for _, s := range ss { + u, err := strconv.ParseUint(s, 10, 32) + if err != nil { + return nil, err + } + + us = append(us, uint32(u)) + } + + return us, nil +} + +// ParseUint64s parses a slice of strings into a slice of uint64s. +func ParseUint64s(ss []string) ([]uint64, error) { + us := make([]uint64, 0, len(ss)) + for _, s := range ss { + u, err := strconv.ParseUint(s, 10, 64) + if err != nil { + return nil, err + } + + us = append(us, u) + } + + return us, nil +} + +// ParsePInt64s parses a slice of strings into a slice of int64 pointers. +func ParsePInt64s(ss []string) ([]*int64, error) { + us := make([]*int64, 0, len(ss)) + for _, s := range ss { + u, err := strconv.ParseInt(s, 10, 64) + if err != nil { + return nil, err + } + + us = append(us, &u) + } + + return us, nil +} + +// ReadUintFromFile reads a file and attempts to parse a uint64 from it. +func ReadUintFromFile(path string) (uint64, error) { + data, err := os.ReadFile(path) + if err != nil { + return 0, err + } + return strconv.ParseUint(strings.TrimSpace(string(data)), 10, 64) +} + +// ReadIntFromFile reads a file and attempts to parse a int64 from it. +func ReadIntFromFile(path string) (int64, error) { + data, err := os.ReadFile(path) + if err != nil { + return 0, err + } + return strconv.ParseInt(strings.TrimSpace(string(data)), 10, 64) +} + +// ParseBool parses a string into a boolean pointer. +func ParseBool(b string) *bool { + var truth bool + switch b { + case "enabled": + truth = true + case "disabled": + truth = false + default: + return nil + } + return &truth +} diff --git a/vendor/github.com/prometheus/procfs/internal/util/readfile.go b/vendor/github.com/prometheus/procfs/internal/util/readfile.go new file mode 100644 index 0000000..71b7a70 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/internal/util/readfile.go @@ -0,0 +1,37 @@ +// Copyright 2019 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package util + +import ( + "io" + "os" +) + +// ReadFileNoStat uses io.ReadAll to read contents of entire file. +// This is similar to os.ReadFile but without the call to os.Stat, because +// many files in /proc and /sys report incorrect file sizes (either 0 or 4096). +// Reads a max file size of 1024kB. For files larger than this, a scanner +// should be used. +func ReadFileNoStat(filename string) ([]byte, error) { + const maxBufferSize = 1024 * 1024 + + f, err := os.Open(filename) + if err != nil { + return nil, err + } + defer f.Close() + + reader := io.LimitReader(f, maxBufferSize) + return io.ReadAll(reader) +} diff --git a/vendor/github.com/prometheus/procfs/internal/util/sysreadfile.go b/vendor/github.com/prometheus/procfs/internal/util/sysreadfile.go new file mode 100644 index 0000000..1ab875c --- /dev/null +++ b/vendor/github.com/prometheus/procfs/internal/util/sysreadfile.go @@ -0,0 +1,50 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build (linux || darwin) && !appengine +// +build linux darwin +// +build !appengine + +package util + +import ( + "bytes" + "os" + "syscall" +) + +// SysReadFile is a simplified os.ReadFile that invokes syscall.Read directly. +// https://github.com/prometheus/node_exporter/pull/728/files +// +// Note that this function will not read files larger than 128 bytes. +func SysReadFile(file string) (string, error) { + f, err := os.Open(file) + if err != nil { + return "", err + } + defer f.Close() + + // On some machines, hwmon drivers are broken and return EAGAIN. This causes + // Go's os.ReadFile implementation to poll forever. + // + // Since we either want to read data or bail immediately, do the simplest + // possible read using syscall directly. + const sysFileBufferSize = 128 + b := make([]byte, sysFileBufferSize) + n, err := syscall.Read(int(f.Fd()), b) + if err != nil { + return "", err + } + + return string(bytes.TrimSpace(b[:n])), nil +} diff --git a/vendor/github.com/prometheus/procfs/internal/util/sysreadfile_compat.go b/vendor/github.com/prometheus/procfs/internal/util/sysreadfile_compat.go new file mode 100644 index 0000000..1d86f5e --- /dev/null +++ b/vendor/github.com/prometheus/procfs/internal/util/sysreadfile_compat.go @@ -0,0 +1,27 @@ +// Copyright 2019 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build (linux && appengine) || (!linux && !darwin) +// +build linux,appengine !linux,!darwin + +package util + +import ( + "fmt" +) + +// SysReadFile is here implemented as a noop for builds that do not support +// the read syscall. For example Windows, or Linux on Google App Engine. +func SysReadFile(file string) (string, error) { + return "", fmt.Errorf("not supported on this platform") +} diff --git a/vendor/github.com/prometheus/procfs/internal/util/valueparser.go b/vendor/github.com/prometheus/procfs/internal/util/valueparser.go new file mode 100644 index 0000000..fe2355d --- /dev/null +++ b/vendor/github.com/prometheus/procfs/internal/util/valueparser.go @@ -0,0 +1,91 @@ +// Copyright 2019 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package util + +import ( + "strconv" +) + +// TODO(mdlayher): util packages are an anti-pattern and this should be moved +// somewhere else that is more focused in the future. + +// A ValueParser enables parsing a single string into a variety of data types +// in a concise and safe way. The Err method must be invoked after invoking +// any other methods to ensure a value was successfully parsed. +type ValueParser struct { + v string + err error +} + +// NewValueParser creates a ValueParser using the input string. +func NewValueParser(v string) *ValueParser { + return &ValueParser{v: v} +} + +// Int interprets the underlying value as an int and returns that value. +func (vp *ValueParser) Int() int { return int(vp.int64()) } + +// PInt64 interprets the underlying value as an int64 and returns a pointer to +// that value. +func (vp *ValueParser) PInt64() *int64 { + if vp.err != nil { + return nil + } + + v := vp.int64() + return &v +} + +// int64 interprets the underlying value as an int64 and returns that value. +// TODO: export if/when necessary. +func (vp *ValueParser) int64() int64 { + if vp.err != nil { + return 0 + } + + // A base value of zero makes ParseInt infer the correct base using the + // string's prefix, if any. + const base = 0 + v, err := strconv.ParseInt(vp.v, base, 64) + if err != nil { + vp.err = err + return 0 + } + + return v +} + +// PUInt64 interprets the underlying value as an uint64 and returns a pointer to +// that value. +func (vp *ValueParser) PUInt64() *uint64 { + if vp.err != nil { + return nil + } + + // A base value of zero makes ParseInt infer the correct base using the + // string's prefix, if any. + const base = 0 + v, err := strconv.ParseUint(vp.v, base, 64) + if err != nil { + vp.err = err + return nil + } + + return &v +} + +// Err returns the last error, if any, encountered by the ValueParser. +func (vp *ValueParser) Err() error { + return vp.err +} diff --git a/vendor/github.com/prometheus/procfs/ipvs.go b/vendor/github.com/prometheus/procfs/ipvs.go new file mode 100644 index 0000000..391c079 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/ipvs.go @@ -0,0 +1,240 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bufio" + "bytes" + "encoding/hex" + "errors" + "fmt" + "io" + "net" + "os" + "strconv" + "strings" + + "github.com/prometheus/procfs/internal/util" +) + +// IPVSStats holds IPVS statistics, as exposed by the kernel in `/proc/net/ip_vs_stats`. +type IPVSStats struct { + // Total count of connections. + Connections uint64 + // Total incoming packages processed. + IncomingPackets uint64 + // Total outgoing packages processed. + OutgoingPackets uint64 + // Total incoming traffic. + IncomingBytes uint64 + // Total outgoing traffic. + OutgoingBytes uint64 +} + +// IPVSBackendStatus holds current metrics of one virtual / real address pair. +type IPVSBackendStatus struct { + // The local (virtual) IP address. + LocalAddress net.IP + // The remote (real) IP address. + RemoteAddress net.IP + // The local (virtual) port. + LocalPort uint16 + // The remote (real) port. + RemotePort uint16 + // The local firewall mark + LocalMark string + // The transport protocol (TCP, UDP). + Proto string + // The current number of active connections for this virtual/real address pair. + ActiveConn uint64 + // The current number of inactive connections for this virtual/real address pair. + InactConn uint64 + // The current weight of this virtual/real address pair. + Weight uint64 +} + +// IPVSStats reads the IPVS statistics from the specified `proc` filesystem. +func (fs FS) IPVSStats() (IPVSStats, error) { + data, err := util.ReadFileNoStat(fs.proc.Path("net/ip_vs_stats")) + if err != nil { + return IPVSStats{}, err + } + + return parseIPVSStats(bytes.NewReader(data)) +} + +// parseIPVSStats performs the actual parsing of `ip_vs_stats`. +func parseIPVSStats(r io.Reader) (IPVSStats, error) { + var ( + statContent []byte + statLines []string + statFields []string + stats IPVSStats + ) + + statContent, err := io.ReadAll(r) + if err != nil { + return IPVSStats{}, err + } + + statLines = strings.SplitN(string(statContent), "\n", 4) + if len(statLines) != 4 { + return IPVSStats{}, errors.New("ip_vs_stats corrupt: too short") + } + + statFields = strings.Fields(statLines[2]) + if len(statFields) != 5 { + return IPVSStats{}, errors.New("ip_vs_stats corrupt: unexpected number of fields") + } + + stats.Connections, err = strconv.ParseUint(statFields[0], 16, 64) + if err != nil { + return IPVSStats{}, err + } + stats.IncomingPackets, err = strconv.ParseUint(statFields[1], 16, 64) + if err != nil { + return IPVSStats{}, err + } + stats.OutgoingPackets, err = strconv.ParseUint(statFields[2], 16, 64) + if err != nil { + return IPVSStats{}, err + } + stats.IncomingBytes, err = strconv.ParseUint(statFields[3], 16, 64) + if err != nil { + return IPVSStats{}, err + } + stats.OutgoingBytes, err = strconv.ParseUint(statFields[4], 16, 64) + if err != nil { + return IPVSStats{}, err + } + + return stats, nil +} + +// IPVSBackendStatus reads and returns the status of all (virtual,real) server pairs from the specified `proc` filesystem. +func (fs FS) IPVSBackendStatus() ([]IPVSBackendStatus, error) { + file, err := os.Open(fs.proc.Path("net/ip_vs")) + if err != nil { + return nil, err + } + defer file.Close() + + return parseIPVSBackendStatus(file) +} + +func parseIPVSBackendStatus(file io.Reader) ([]IPVSBackendStatus, error) { + var ( + status []IPVSBackendStatus + scanner = bufio.NewScanner(file) + proto string + localMark string + localAddress net.IP + localPort uint16 + err error + ) + + for scanner.Scan() { + fields := strings.Fields(scanner.Text()) + if len(fields) == 0 { + continue + } + switch { + case fields[0] == "IP" || fields[0] == "Prot" || fields[1] == "RemoteAddress:Port": + continue + case fields[0] == "TCP" || fields[0] == "UDP": + if len(fields) < 2 { + continue + } + proto = fields[0] + localMark = "" + localAddress, localPort, err = parseIPPort(fields[1]) + if err != nil { + return nil, err + } + case fields[0] == "FWM": + if len(fields) < 2 { + continue + } + proto = fields[0] + localMark = fields[1] + localAddress = nil + localPort = 0 + case fields[0] == "->": + if len(fields) < 6 { + continue + } + remoteAddress, remotePort, err := parseIPPort(fields[1]) + if err != nil { + return nil, err + } + weight, err := strconv.ParseUint(fields[3], 10, 64) + if err != nil { + return nil, err + } + activeConn, err := strconv.ParseUint(fields[4], 10, 64) + if err != nil { + return nil, err + } + inactConn, err := strconv.ParseUint(fields[5], 10, 64) + if err != nil { + return nil, err + } + status = append(status, IPVSBackendStatus{ + LocalAddress: localAddress, + LocalPort: localPort, + LocalMark: localMark, + RemoteAddress: remoteAddress, + RemotePort: remotePort, + Proto: proto, + Weight: weight, + ActiveConn: activeConn, + InactConn: inactConn, + }) + } + } + return status, nil +} + +func parseIPPort(s string) (net.IP, uint16, error) { + var ( + ip net.IP + err error + ) + + switch len(s) { + case 13: + ip, err = hex.DecodeString(s[0:8]) + if err != nil { + return nil, 0, err + } + case 46: + ip = net.ParseIP(s[1:40]) + if ip == nil { + return nil, 0, fmt.Errorf("invalid IPv6 address: %s", s[1:40]) + } + default: + return nil, 0, fmt.Errorf("unexpected IP:Port: %s", s) + } + + portString := s[len(s)-4:] + if len(portString) != 4 { + return nil, 0, fmt.Errorf("unexpected port string format: %s", portString) + } + port, err := strconv.ParseUint(portString, 16, 16) + if err != nil { + return nil, 0, err + } + + return ip, uint16(port), nil +} diff --git a/vendor/github.com/prometheus/procfs/kernel_random.go b/vendor/github.com/prometheus/procfs/kernel_random.go new file mode 100644 index 0000000..db88566 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/kernel_random.go @@ -0,0 +1,63 @@ +// Copyright 2020 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build !windows +// +build !windows + +package procfs + +import ( + "os" + + "github.com/prometheus/procfs/internal/util" +) + +// KernelRandom contains information about to the kernel's random number generator. +type KernelRandom struct { + // EntropyAvaliable gives the available entropy, in bits. + EntropyAvaliable *uint64 + // PoolSize gives the size of the entropy pool, in bits. + PoolSize *uint64 + // URandomMinReseedSeconds is the number of seconds after which the DRNG will be reseeded. + URandomMinReseedSeconds *uint64 + // WriteWakeupThreshold the number of bits of entropy below which we wake up processes + // that do a select(2) or poll(2) for write access to /dev/random. + WriteWakeupThreshold *uint64 + // ReadWakeupThreshold is the number of bits of entropy required for waking up processes that sleep + // waiting for entropy from /dev/random. + ReadWakeupThreshold *uint64 +} + +// KernelRandom returns values from /proc/sys/kernel/random. +func (fs FS) KernelRandom() (KernelRandom, error) { + random := KernelRandom{} + + for file, p := range map[string]**uint64{ + "entropy_avail": &random.EntropyAvaliable, + "poolsize": &random.PoolSize, + "urandom_min_reseed_secs": &random.URandomMinReseedSeconds, + "write_wakeup_threshold": &random.WriteWakeupThreshold, + "read_wakeup_threshold": &random.ReadWakeupThreshold, + } { + val, err := util.ReadUintFromFile(fs.proc.Path("sys", "kernel", "random", file)) + if os.IsNotExist(err) { + continue + } + if err != nil { + return random, err + } + *p = &val + } + + return random, nil +} diff --git a/vendor/github.com/prometheus/procfs/loadavg.go b/vendor/github.com/prometheus/procfs/loadavg.go new file mode 100644 index 0000000..0096caf --- /dev/null +++ b/vendor/github.com/prometheus/procfs/loadavg.go @@ -0,0 +1,62 @@ +// Copyright 2019 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "fmt" + "strconv" + "strings" + + "github.com/prometheus/procfs/internal/util" +) + +// LoadAvg represents an entry in /proc/loadavg. +type LoadAvg struct { + Load1 float64 + Load5 float64 + Load15 float64 +} + +// LoadAvg returns loadavg from /proc. +func (fs FS) LoadAvg() (*LoadAvg, error) { + path := fs.proc.Path("loadavg") + + data, err := util.ReadFileNoStat(path) + if err != nil { + return nil, err + } + return parseLoad(data) +} + +// Parse /proc loadavg and return 1m, 5m and 15m. +func parseLoad(loadavgBytes []byte) (*LoadAvg, error) { + loads := make([]float64, 3) + parts := strings.Fields(string(loadavgBytes)) + if len(parts) < 3 { + return nil, fmt.Errorf("malformed loadavg line: too few fields in loadavg string: %q", string(loadavgBytes)) + } + + var err error + for i, load := range parts[0:3] { + loads[i], err = strconv.ParseFloat(load, 64) + if err != nil { + return nil, fmt.Errorf("could not parse load %q: %w", load, err) + } + } + return &LoadAvg{ + Load1: loads[0], + Load5: loads[1], + Load15: loads[2], + }, nil +} diff --git a/vendor/github.com/prometheus/procfs/mdstat.go b/vendor/github.com/prometheus/procfs/mdstat.go new file mode 100644 index 0000000..a95c889 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/mdstat.go @@ -0,0 +1,266 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "fmt" + "os" + "regexp" + "strconv" + "strings" +) + +var ( + statusLineRE = regexp.MustCompile(`(\d+) blocks .*\[(\d+)/(\d+)\] \[([U_]+)\]`) + recoveryLineBlocksRE = regexp.MustCompile(`\((\d+)/\d+\)`) + recoveryLinePctRE = regexp.MustCompile(`= (.+)%`) + recoveryLineFinishRE = regexp.MustCompile(`finish=(.+)min`) + recoveryLineSpeedRE = regexp.MustCompile(`speed=(.+)[A-Z]`) + componentDeviceRE = regexp.MustCompile(`(.*)\[\d+\]`) +) + +// MDStat holds info parsed from /proc/mdstat. +type MDStat struct { + // Name of the device. + Name string + // activity-state of the device. + ActivityState string + // Number of active disks. + DisksActive int64 + // Total number of disks the device requires. + DisksTotal int64 + // Number of failed disks. + DisksFailed int64 + // Number of "down" disks. (the _ indicator in the status line) + DisksDown int64 + // Spare disks in the device. + DisksSpare int64 + // Number of blocks the device holds. + BlocksTotal int64 + // Number of blocks on the device that are in sync. + BlocksSynced int64 + // progress percentage of current sync + BlocksSyncedPct float64 + // estimated finishing time for current sync (in minutes) + BlocksSyncedFinishTime float64 + // current sync speed (in Kilobytes/sec) + BlocksSyncedSpeed float64 + // Name of md component devices + Devices []string +} + +// MDStat parses an mdstat-file (/proc/mdstat) and returns a slice of +// structs containing the relevant info. More information available here: +// https://raid.wiki.kernel.org/index.php/Mdstat +func (fs FS) MDStat() ([]MDStat, error) { + data, err := os.ReadFile(fs.proc.Path("mdstat")) + if err != nil { + return nil, err + } + mdstat, err := parseMDStat(data) + if err != nil { + return nil, fmt.Errorf("error parsing mdstat %q: %w", fs.proc.Path("mdstat"), err) + } + return mdstat, nil +} + +// parseMDStat parses data from mdstat file (/proc/mdstat) and returns a slice of +// structs containing the relevant info. +func parseMDStat(mdStatData []byte) ([]MDStat, error) { + mdStats := []MDStat{} + lines := strings.Split(string(mdStatData), "\n") + + for i, line := range lines { + if strings.TrimSpace(line) == "" || line[0] == ' ' || + strings.HasPrefix(line, "Personalities") || + strings.HasPrefix(line, "unused") { + continue + } + + deviceFields := strings.Fields(line) + if len(deviceFields) < 3 { + return nil, fmt.Errorf("not enough fields in mdline (expected at least 3): %s", line) + } + mdName := deviceFields[0] // mdx + state := deviceFields[2] // active or inactive + + if len(lines) <= i+3 { + return nil, fmt.Errorf("error parsing %q: too few lines for md device", mdName) + } + + // Failed disks have the suffix (F) & Spare disks have the suffix (S). + fail := int64(strings.Count(line, "(F)")) + spare := int64(strings.Count(line, "(S)")) + active, total, down, size, err := evalStatusLine(lines[i], lines[i+1]) + + if err != nil { + return nil, fmt.Errorf("error parsing md device lines: %w", err) + } + + syncLineIdx := i + 2 + if strings.Contains(lines[i+2], "bitmap") { // skip bitmap line + syncLineIdx++ + } + + // If device is syncing at the moment, get the number of currently + // synced bytes, otherwise that number equals the size of the device. + syncedBlocks := size + speed := float64(0) + finish := float64(0) + pct := float64(0) + recovering := strings.Contains(lines[syncLineIdx], "recovery") + resyncing := strings.Contains(lines[syncLineIdx], "resync") + checking := strings.Contains(lines[syncLineIdx], "check") + + // Append recovery and resyncing state info. + if recovering || resyncing || checking { + if recovering { + state = "recovering" + } else if checking { + state = "checking" + } else { + state = "resyncing" + } + + // Handle case when resync=PENDING or resync=DELAYED. + if strings.Contains(lines[syncLineIdx], "PENDING") || + strings.Contains(lines[syncLineIdx], "DELAYED") { + syncedBlocks = 0 + } else { + syncedBlocks, pct, finish, speed, err = evalRecoveryLine(lines[syncLineIdx]) + if err != nil { + return nil, fmt.Errorf("error parsing sync line in md device %q: %w", mdName, err) + } + } + } + + mdStats = append(mdStats, MDStat{ + Name: mdName, + ActivityState: state, + DisksActive: active, + DisksFailed: fail, + DisksDown: down, + DisksSpare: spare, + DisksTotal: total, + BlocksTotal: size, + BlocksSynced: syncedBlocks, + BlocksSyncedPct: pct, + BlocksSyncedFinishTime: finish, + BlocksSyncedSpeed: speed, + Devices: evalComponentDevices(deviceFields), + }) + } + + return mdStats, nil +} + +func evalStatusLine(deviceLine, statusLine string) (active, total, down, size int64, err error) { + statusFields := strings.Fields(statusLine) + if len(statusFields) < 1 { + return 0, 0, 0, 0, fmt.Errorf("unexpected statusLine %q", statusLine) + } + + sizeStr := statusFields[0] + size, err = strconv.ParseInt(sizeStr, 10, 64) + if err != nil { + return 0, 0, 0, 0, fmt.Errorf("unexpected statusLine %q: %w", statusLine, err) + } + + if strings.Contains(deviceLine, "raid0") || strings.Contains(deviceLine, "linear") { + // In the device deviceLine, only disks have a number associated with them in []. + total = int64(strings.Count(deviceLine, "[")) + return total, total, 0, size, nil + } + + if strings.Contains(deviceLine, "inactive") { + return 0, 0, 0, size, nil + } + + matches := statusLineRE.FindStringSubmatch(statusLine) + if len(matches) != 5 { + return 0, 0, 0, 0, fmt.Errorf("couldn't find all the substring matches: %s", statusLine) + } + + total, err = strconv.ParseInt(matches[2], 10, 64) + if err != nil { + return 0, 0, 0, 0, fmt.Errorf("unexpected statusLine %q: %w", statusLine, err) + } + + active, err = strconv.ParseInt(matches[3], 10, 64) + if err != nil { + return 0, 0, 0, 0, fmt.Errorf("unexpected statusLine %q: %w", statusLine, err) + } + down = int64(strings.Count(matches[4], "_")) + + return active, total, down, size, nil +} + +func evalRecoveryLine(recoveryLine string) (syncedBlocks int64, pct float64, finish float64, speed float64, err error) { + matches := recoveryLineBlocksRE.FindStringSubmatch(recoveryLine) + if len(matches) != 2 { + return 0, 0, 0, 0, fmt.Errorf("unexpected recoveryLine: %s", recoveryLine) + } + + syncedBlocks, err = strconv.ParseInt(matches[1], 10, 64) + if err != nil { + return 0, 0, 0, 0, fmt.Errorf("error parsing int from recoveryLine %q: %w", recoveryLine, err) + } + + // Get percentage complete + matches = recoveryLinePctRE.FindStringSubmatch(recoveryLine) + if len(matches) != 2 { + return syncedBlocks, 0, 0, 0, fmt.Errorf("unexpected recoveryLine matching percentage: %s", recoveryLine) + } + pct, err = strconv.ParseFloat(strings.TrimSpace(matches[1]), 64) + if err != nil { + return syncedBlocks, 0, 0, 0, fmt.Errorf("error parsing float from recoveryLine %q: %w", recoveryLine, err) + } + + // Get time expected left to complete + matches = recoveryLineFinishRE.FindStringSubmatch(recoveryLine) + if len(matches) != 2 { + return syncedBlocks, pct, 0, 0, fmt.Errorf("unexpected recoveryLine matching est. finish time: %s", recoveryLine) + } + finish, err = strconv.ParseFloat(matches[1], 64) + if err != nil { + return syncedBlocks, pct, 0, 0, fmt.Errorf("error parsing float from recoveryLine %q: %w", recoveryLine, err) + } + + // Get recovery speed + matches = recoveryLineSpeedRE.FindStringSubmatch(recoveryLine) + if len(matches) != 2 { + return syncedBlocks, pct, finish, 0, fmt.Errorf("unexpected recoveryLine matching speed: %s", recoveryLine) + } + speed, err = strconv.ParseFloat(matches[1], 64) + if err != nil { + return syncedBlocks, pct, finish, 0, fmt.Errorf("error parsing float from recoveryLine %q: %w", recoveryLine, err) + } + + return syncedBlocks, pct, finish, speed, nil +} + +func evalComponentDevices(deviceFields []string) []string { + mdComponentDevices := make([]string, 0) + if len(deviceFields) > 3 { + for _, field := range deviceFields[4:] { + match := componentDeviceRE.FindStringSubmatch(field) + if match == nil { + continue + } + mdComponentDevices = append(mdComponentDevices, match[1]) + } + } + + return mdComponentDevices +} diff --git a/vendor/github.com/prometheus/procfs/meminfo.go b/vendor/github.com/prometheus/procfs/meminfo.go new file mode 100644 index 0000000..f65e174 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/meminfo.go @@ -0,0 +1,277 @@ +// Copyright 2019 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bufio" + "bytes" + "fmt" + "io" + "strconv" + "strings" + + "github.com/prometheus/procfs/internal/util" +) + +// Meminfo represents memory statistics. +type Meminfo struct { + // Total usable ram (i.e. physical ram minus a few reserved + // bits and the kernel binary code) + MemTotal *uint64 + // The sum of LowFree+HighFree + MemFree *uint64 + // An estimate of how much memory is available for starting + // new applications, without swapping. Calculated from + // MemFree, SReclaimable, the size of the file LRU lists, and + // the low watermarks in each zone. The estimate takes into + // account that the system needs some page cache to function + // well, and that not all reclaimable slab will be + // reclaimable, due to items being in use. The impact of those + // factors will vary from system to system. + MemAvailable *uint64 + // Relatively temporary storage for raw disk blocks shouldn't + // get tremendously large (20MB or so) + Buffers *uint64 + Cached *uint64 + // Memory that once was swapped out, is swapped back in but + // still also is in the swapfile (if memory is needed it + // doesn't need to be swapped out AGAIN because it is already + // in the swapfile. This saves I/O) + SwapCached *uint64 + // Memory that has been used more recently and usually not + // reclaimed unless absolutely necessary. + Active *uint64 + // Memory which has been less recently used. It is more + // eligible to be reclaimed for other purposes + Inactive *uint64 + ActiveAnon *uint64 + InactiveAnon *uint64 + ActiveFile *uint64 + InactiveFile *uint64 + Unevictable *uint64 + Mlocked *uint64 + // total amount of swap space available + SwapTotal *uint64 + // Memory which has been evicted from RAM, and is temporarily + // on the disk + SwapFree *uint64 + // Memory which is waiting to get written back to the disk + Dirty *uint64 + // Memory which is actively being written back to the disk + Writeback *uint64 + // Non-file backed pages mapped into userspace page tables + AnonPages *uint64 + // files which have been mapped, such as libraries + Mapped *uint64 + Shmem *uint64 + // in-kernel data structures cache + Slab *uint64 + // Part of Slab, that might be reclaimed, such as caches + SReclaimable *uint64 + // Part of Slab, that cannot be reclaimed on memory pressure + SUnreclaim *uint64 + KernelStack *uint64 + // amount of memory dedicated to the lowest level of page + // tables. + PageTables *uint64 + // NFS pages sent to the server, but not yet committed to + // stable storage + NFSUnstable *uint64 + // Memory used for block device "bounce buffers" + Bounce *uint64 + // Memory used by FUSE for temporary writeback buffers + WritebackTmp *uint64 + // Based on the overcommit ratio ('vm.overcommit_ratio'), + // this is the total amount of memory currently available to + // be allocated on the system. This limit is only adhered to + // if strict overcommit accounting is enabled (mode 2 in + // 'vm.overcommit_memory'). + // The CommitLimit is calculated with the following formula: + // CommitLimit = ([total RAM pages] - [total huge TLB pages]) * + // overcommit_ratio / 100 + [total swap pages] + // For example, on a system with 1G of physical RAM and 7G + // of swap with a `vm.overcommit_ratio` of 30 it would + // yield a CommitLimit of 7.3G. + // For more details, see the memory overcommit documentation + // in vm/overcommit-accounting. + CommitLimit *uint64 + // The amount of memory presently allocated on the system. + // The committed memory is a sum of all of the memory which + // has been allocated by processes, even if it has not been + // "used" by them as of yet. A process which malloc()'s 1G + // of memory, but only touches 300M of it will show up as + // using 1G. This 1G is memory which has been "committed" to + // by the VM and can be used at any time by the allocating + // application. With strict overcommit enabled on the system + // (mode 2 in 'vm.overcommit_memory'),allocations which would + // exceed the CommitLimit (detailed above) will not be permitted. + // This is useful if one needs to guarantee that processes will + // not fail due to lack of memory once that memory has been + // successfully allocated. + CommittedAS *uint64 + // total size of vmalloc memory area + VmallocTotal *uint64 + // amount of vmalloc area which is used + VmallocUsed *uint64 + // largest contiguous block of vmalloc area which is free + VmallocChunk *uint64 + HardwareCorrupted *uint64 + AnonHugePages *uint64 + ShmemHugePages *uint64 + ShmemPmdMapped *uint64 + CmaTotal *uint64 + CmaFree *uint64 + HugePagesTotal *uint64 + HugePagesFree *uint64 + HugePagesRsvd *uint64 + HugePagesSurp *uint64 + Hugepagesize *uint64 + DirectMap4k *uint64 + DirectMap2M *uint64 + DirectMap1G *uint64 +} + +// Meminfo returns an information about current kernel/system memory statistics. +// See https://www.kernel.org/doc/Documentation/filesystems/proc.txt +func (fs FS) Meminfo() (Meminfo, error) { + b, err := util.ReadFileNoStat(fs.proc.Path("meminfo")) + if err != nil { + return Meminfo{}, err + } + + m, err := parseMemInfo(bytes.NewReader(b)) + if err != nil { + return Meminfo{}, fmt.Errorf("failed to parse meminfo: %w", err) + } + + return *m, nil +} + +func parseMemInfo(r io.Reader) (*Meminfo, error) { + var m Meminfo + s := bufio.NewScanner(r) + for s.Scan() { + // Each line has at least a name and value; we ignore the unit. + fields := strings.Fields(s.Text()) + if len(fields) < 2 { + return nil, fmt.Errorf("malformed meminfo line: %q", s.Text()) + } + + v, err := strconv.ParseUint(fields[1], 0, 64) + if err != nil { + return nil, err + } + + switch fields[0] { + case "MemTotal:": + m.MemTotal = &v + case "MemFree:": + m.MemFree = &v + case "MemAvailable:": + m.MemAvailable = &v + case "Buffers:": + m.Buffers = &v + case "Cached:": + m.Cached = &v + case "SwapCached:": + m.SwapCached = &v + case "Active:": + m.Active = &v + case "Inactive:": + m.Inactive = &v + case "Active(anon):": + m.ActiveAnon = &v + case "Inactive(anon):": + m.InactiveAnon = &v + case "Active(file):": + m.ActiveFile = &v + case "Inactive(file):": + m.InactiveFile = &v + case "Unevictable:": + m.Unevictable = &v + case "Mlocked:": + m.Mlocked = &v + case "SwapTotal:": + m.SwapTotal = &v + case "SwapFree:": + m.SwapFree = &v + case "Dirty:": + m.Dirty = &v + case "Writeback:": + m.Writeback = &v + case "AnonPages:": + m.AnonPages = &v + case "Mapped:": + m.Mapped = &v + case "Shmem:": + m.Shmem = &v + case "Slab:": + m.Slab = &v + case "SReclaimable:": + m.SReclaimable = &v + case "SUnreclaim:": + m.SUnreclaim = &v + case "KernelStack:": + m.KernelStack = &v + case "PageTables:": + m.PageTables = &v + case "NFS_Unstable:": + m.NFSUnstable = &v + case "Bounce:": + m.Bounce = &v + case "WritebackTmp:": + m.WritebackTmp = &v + case "CommitLimit:": + m.CommitLimit = &v + case "Committed_AS:": + m.CommittedAS = &v + case "VmallocTotal:": + m.VmallocTotal = &v + case "VmallocUsed:": + m.VmallocUsed = &v + case "VmallocChunk:": + m.VmallocChunk = &v + case "HardwareCorrupted:": + m.HardwareCorrupted = &v + case "AnonHugePages:": + m.AnonHugePages = &v + case "ShmemHugePages:": + m.ShmemHugePages = &v + case "ShmemPmdMapped:": + m.ShmemPmdMapped = &v + case "CmaTotal:": + m.CmaTotal = &v + case "CmaFree:": + m.CmaFree = &v + case "HugePages_Total:": + m.HugePagesTotal = &v + case "HugePages_Free:": + m.HugePagesFree = &v + case "HugePages_Rsvd:": + m.HugePagesRsvd = &v + case "HugePages_Surp:": + m.HugePagesSurp = &v + case "Hugepagesize:": + m.Hugepagesize = &v + case "DirectMap4k:": + m.DirectMap4k = &v + case "DirectMap2M:": + m.DirectMap2M = &v + case "DirectMap1G:": + m.DirectMap1G = &v + } + } + + return &m, nil +} diff --git a/vendor/github.com/prometheus/procfs/mountinfo.go b/vendor/github.com/prometheus/procfs/mountinfo.go new file mode 100644 index 0000000..59f4d50 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/mountinfo.go @@ -0,0 +1,180 @@ +// Copyright 2019 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bufio" + "bytes" + "fmt" + "strconv" + "strings" + + "github.com/prometheus/procfs/internal/util" +) + +// A MountInfo is a type that describes the details, options +// for each mount, parsed from /proc/self/mountinfo. +// The fields described in each entry of /proc/self/mountinfo +// is described in the following man page. +// http://man7.org/linux/man-pages/man5/proc.5.html +type MountInfo struct { + // Unique ID for the mount + MountID int + // The ID of the parent mount + ParentID int + // The value of `st_dev` for the files on this FS + MajorMinorVer string + // The pathname of the directory in the FS that forms + // the root for this mount + Root string + // The pathname of the mount point relative to the root + MountPoint string + // Mount options + Options map[string]string + // Zero or more optional fields + OptionalFields map[string]string + // The Filesystem type + FSType string + // FS specific information or "none" + Source string + // Superblock options + SuperOptions map[string]string +} + +// Reads each line of the mountinfo file, and returns a list of formatted MountInfo structs. +func parseMountInfo(info []byte) ([]*MountInfo, error) { + mounts := []*MountInfo{} + scanner := bufio.NewScanner(bytes.NewReader(info)) + for scanner.Scan() { + mountString := scanner.Text() + parsedMounts, err := parseMountInfoString(mountString) + if err != nil { + return nil, err + } + mounts = append(mounts, parsedMounts) + } + + err := scanner.Err() + return mounts, err +} + +// Parses a mountinfo file line, and converts it to a MountInfo struct. +// An important check here is to see if the hyphen separator, as if it does not exist, +// it means that the line is malformed. +func parseMountInfoString(mountString string) (*MountInfo, error) { + var err error + + mountInfo := strings.Split(mountString, " ") + mountInfoLength := len(mountInfo) + if mountInfoLength < 10 { + return nil, fmt.Errorf("couldn't find enough fields in mount string: %s", mountString) + } + + if mountInfo[mountInfoLength-4] != "-" { + return nil, fmt.Errorf("couldn't find separator in expected field: %s", mountInfo[mountInfoLength-4]) + } + + mount := &MountInfo{ + MajorMinorVer: mountInfo[2], + Root: mountInfo[3], + MountPoint: mountInfo[4], + Options: mountOptionsParser(mountInfo[5]), + OptionalFields: nil, + FSType: mountInfo[mountInfoLength-3], + Source: mountInfo[mountInfoLength-2], + SuperOptions: mountOptionsParser(mountInfo[mountInfoLength-1]), + } + + mount.MountID, err = strconv.Atoi(mountInfo[0]) + if err != nil { + return nil, fmt.Errorf("failed to parse mount ID") + } + mount.ParentID, err = strconv.Atoi(mountInfo[1]) + if err != nil { + return nil, fmt.Errorf("failed to parse parent ID") + } + // Has optional fields, which is a space separated list of values. + // Example: shared:2 master:7 + if mountInfo[6] != "" { + mount.OptionalFields, err = mountOptionsParseOptionalFields(mountInfo[6 : mountInfoLength-4]) + if err != nil { + return nil, err + } + } + return mount, nil +} + +// mountOptionsIsValidField checks a string against a valid list of optional fields keys. +func mountOptionsIsValidField(s string) bool { + switch s { + case + "shared", + "master", + "propagate_from", + "unbindable": + return true + } + return false +} + +// mountOptionsParseOptionalFields parses a list of optional fields strings into a double map of strings. +func mountOptionsParseOptionalFields(o []string) (map[string]string, error) { + optionalFields := make(map[string]string) + for _, field := range o { + optionSplit := strings.SplitN(field, ":", 2) + value := "" + if len(optionSplit) == 2 { + value = optionSplit[1] + } + if mountOptionsIsValidField(optionSplit[0]) { + optionalFields[optionSplit[0]] = value + } + } + return optionalFields, nil +} + +// mountOptionsParser parses the mount options, superblock options. +func mountOptionsParser(mountOptions string) map[string]string { + opts := make(map[string]string) + options := strings.Split(mountOptions, ",") + for _, opt := range options { + splitOption := strings.Split(opt, "=") + if len(splitOption) < 2 { + key := splitOption[0] + opts[key] = "" + } else { + key, value := splitOption[0], splitOption[1] + opts[key] = value + } + } + return opts +} + +// GetMounts retrieves mountinfo information from `/proc/self/mountinfo`. +func GetMounts() ([]*MountInfo, error) { + data, err := util.ReadFileNoStat("/proc/self/mountinfo") + if err != nil { + return nil, err + } + return parseMountInfo(data) +} + +// GetProcMounts retrieves mountinfo information from a processes' `/proc//mountinfo`. +func GetProcMounts(pid int) ([]*MountInfo, error) { + data, err := util.ReadFileNoStat(fmt.Sprintf("/proc/%d/mountinfo", pid)) + if err != nil { + return nil, err + } + return parseMountInfo(data) +} diff --git a/vendor/github.com/prometheus/procfs/mountstats.go b/vendor/github.com/prometheus/procfs/mountstats.go new file mode 100644 index 0000000..f7a828b --- /dev/null +++ b/vendor/github.com/prometheus/procfs/mountstats.go @@ -0,0 +1,638 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +// While implementing parsing of /proc/[pid]/mountstats, this blog was used +// heavily as a reference: +// https://utcc.utoronto.ca/~cks/space/blog/linux/NFSMountstatsIndex +// +// Special thanks to Chris Siebenmann for all of his posts explaining the +// various statistics available for NFS. + +import ( + "bufio" + "fmt" + "io" + "strconv" + "strings" + "time" +) + +// Constants shared between multiple functions. +const ( + deviceEntryLen = 8 + + fieldBytesLen = 8 + fieldEventsLen = 27 + + statVersion10 = "1.0" + statVersion11 = "1.1" + + fieldTransport10TCPLen = 10 + fieldTransport10UDPLen = 7 + + fieldTransport11TCPLen = 13 + fieldTransport11UDPLen = 10 +) + +// A Mount is a device mount parsed from /proc/[pid]/mountstats. +type Mount struct { + // Name of the device. + Device string + // The mount point of the device. + Mount string + // The filesystem type used by the device. + Type string + // If available additional statistics related to this Mount. + // Use a type assertion to determine if additional statistics are available. + Stats MountStats +} + +// A MountStats is a type which contains detailed statistics for a specific +// type of Mount. +type MountStats interface { + mountStats() +} + +// A MountStatsNFS is a MountStats implementation for NFSv3 and v4 mounts. +type MountStatsNFS struct { + // The version of statistics provided. + StatVersion string + // The mount options of the NFS mount. + Opts map[string]string + // The age of the NFS mount. + Age time.Duration + // Statistics related to byte counters for various operations. + Bytes NFSBytesStats + // Statistics related to various NFS event occurrences. + Events NFSEventsStats + // Statistics broken down by filesystem operation. + Operations []NFSOperationStats + // Statistics about the NFS RPC transport. + Transport NFSTransportStats +} + +// mountStats implements MountStats. +func (m MountStatsNFS) mountStats() {} + +// A NFSBytesStats contains statistics about the number of bytes read and written +// by an NFS client to and from an NFS server. +type NFSBytesStats struct { + // Number of bytes read using the read() syscall. + Read uint64 + // Number of bytes written using the write() syscall. + Write uint64 + // Number of bytes read using the read() syscall in O_DIRECT mode. + DirectRead uint64 + // Number of bytes written using the write() syscall in O_DIRECT mode. + DirectWrite uint64 + // Number of bytes read from the NFS server, in total. + ReadTotal uint64 + // Number of bytes written to the NFS server, in total. + WriteTotal uint64 + // Number of pages read directly via mmap()'d files. + ReadPages uint64 + // Number of pages written directly via mmap()'d files. + WritePages uint64 +} + +// A NFSEventsStats contains statistics about NFS event occurrences. +type NFSEventsStats struct { + // Number of times cached inode attributes are re-validated from the server. + InodeRevalidate uint64 + // Number of times cached dentry nodes are re-validated from the server. + DnodeRevalidate uint64 + // Number of times an inode cache is cleared. + DataInvalidate uint64 + // Number of times cached inode attributes are invalidated. + AttributeInvalidate uint64 + // Number of times files or directories have been open()'d. + VFSOpen uint64 + // Number of times a directory lookup has occurred. + VFSLookup uint64 + // Number of times permissions have been checked. + VFSAccess uint64 + // Number of updates (and potential writes) to pages. + VFSUpdatePage uint64 + // Number of pages read directly via mmap()'d files. + VFSReadPage uint64 + // Number of times a group of pages have been read. + VFSReadPages uint64 + // Number of pages written directly via mmap()'d files. + VFSWritePage uint64 + // Number of times a group of pages have been written. + VFSWritePages uint64 + // Number of times directory entries have been read with getdents(). + VFSGetdents uint64 + // Number of times attributes have been set on inodes. + VFSSetattr uint64 + // Number of pending writes that have been forcefully flushed to the server. + VFSFlush uint64 + // Number of times fsync() has been called on directories and files. + VFSFsync uint64 + // Number of times locking has been attempted on a file. + VFSLock uint64 + // Number of times files have been closed and released. + VFSFileRelease uint64 + // Unknown. Possibly unused. + CongestionWait uint64 + // Number of times files have been truncated. + Truncation uint64 + // Number of times a file has been grown due to writes beyond its existing end. + WriteExtension uint64 + // Number of times a file was removed while still open by another process. + SillyRename uint64 + // Number of times the NFS server gave less data than expected while reading. + ShortRead uint64 + // Number of times the NFS server wrote less data than expected while writing. + ShortWrite uint64 + // Number of times the NFS server indicated EJUKEBOX; retrieving data from + // offline storage. + JukeboxDelay uint64 + // Number of NFS v4.1+ pNFS reads. + PNFSRead uint64 + // Number of NFS v4.1+ pNFS writes. + PNFSWrite uint64 +} + +// A NFSOperationStats contains statistics for a single operation. +type NFSOperationStats struct { + // The name of the operation. + Operation string + // Number of requests performed for this operation. + Requests uint64 + // Number of times an actual RPC request has been transmitted for this operation. + Transmissions uint64 + // Number of times a request has had a major timeout. + MajorTimeouts uint64 + // Number of bytes sent for this operation, including RPC headers and payload. + BytesSent uint64 + // Number of bytes received for this operation, including RPC headers and payload. + BytesReceived uint64 + // Duration all requests spent queued for transmission before they were sent. + CumulativeQueueMilliseconds uint64 + // Duration it took to get a reply back after the request was transmitted. + CumulativeTotalResponseMilliseconds uint64 + // Duration from when a request was enqueued to when it was completely handled. + CumulativeTotalRequestMilliseconds uint64 + // The count of operations that complete with tk_status < 0. These statuses usually indicate error conditions. + Errors uint64 +} + +// A NFSTransportStats contains statistics for the NFS mount RPC requests and +// responses. +type NFSTransportStats struct { + // The transport protocol used for the NFS mount. + Protocol string + // The local port used for the NFS mount. + Port uint64 + // Number of times the client has had to establish a connection from scratch + // to the NFS server. + Bind uint64 + // Number of times the client has made a TCP connection to the NFS server. + Connect uint64 + // Duration (in jiffies, a kernel internal unit of time) the NFS mount has + // spent waiting for connections to the server to be established. + ConnectIdleTime uint64 + // Duration since the NFS mount last saw any RPC traffic. + IdleTimeSeconds uint64 + // Number of RPC requests for this mount sent to the NFS server. + Sends uint64 + // Number of RPC responses for this mount received from the NFS server. + Receives uint64 + // Number of times the NFS server sent a response with a transaction ID + // unknown to this client. + BadTransactionIDs uint64 + // A running counter, incremented on each request as the current difference + // ebetween sends and receives. + CumulativeActiveRequests uint64 + // A running counter, incremented on each request by the current backlog + // queue size. + CumulativeBacklog uint64 + + // Stats below only available with stat version 1.1. + + // Maximum number of simultaneously active RPC requests ever used. + MaximumRPCSlotsUsed uint64 + // A running counter, incremented on each request as the current size of the + // sending queue. + CumulativeSendingQueue uint64 + // A running counter, incremented on each request as the current size of the + // pending queue. + CumulativePendingQueue uint64 +} + +// parseMountStats parses a /proc/[pid]/mountstats file and returns a slice +// of Mount structures containing detailed information about each mount. +// If available, statistics for each mount are parsed as well. +func parseMountStats(r io.Reader) ([]*Mount, error) { + const ( + device = "device" + statVersionPrefix = "statvers=" + + nfs3Type = "nfs" + nfs4Type = "nfs4" + ) + + var mounts []*Mount + + s := bufio.NewScanner(r) + for s.Scan() { + // Only look for device entries in this function + ss := strings.Fields(string(s.Bytes())) + if len(ss) == 0 || ss[0] != device { + continue + } + + m, err := parseMount(ss) + if err != nil { + return nil, err + } + + // Does this mount also possess statistics information? + if len(ss) > deviceEntryLen { + // Only NFSv3 and v4 are supported for parsing statistics + if m.Type != nfs3Type && m.Type != nfs4Type { + return nil, fmt.Errorf("cannot parse MountStats for fstype %q", m.Type) + } + + statVersion := strings.TrimPrefix(ss[8], statVersionPrefix) + + stats, err := parseMountStatsNFS(s, statVersion) + if err != nil { + return nil, err + } + + m.Stats = stats + } + + mounts = append(mounts, m) + } + + return mounts, s.Err() +} + +// parseMount parses an entry in /proc/[pid]/mountstats in the format: +// device [device] mounted on [mount] with fstype [type] +func parseMount(ss []string) (*Mount, error) { + if len(ss) < deviceEntryLen { + return nil, fmt.Errorf("invalid device entry: %v", ss) + } + + // Check for specific words appearing at specific indices to ensure + // the format is consistent with what we expect + format := []struct { + i int + s string + }{ + {i: 0, s: "device"}, + {i: 2, s: "mounted"}, + {i: 3, s: "on"}, + {i: 5, s: "with"}, + {i: 6, s: "fstype"}, + } + + for _, f := range format { + if ss[f.i] != f.s { + return nil, fmt.Errorf("invalid device entry: %v", ss) + } + } + + return &Mount{ + Device: ss[1], + Mount: ss[4], + Type: ss[7], + }, nil +} + +// parseMountStatsNFS parses a MountStatsNFS by scanning additional information +// related to NFS statistics. +func parseMountStatsNFS(s *bufio.Scanner, statVersion string) (*MountStatsNFS, error) { + // Field indicators for parsing specific types of data + const ( + fieldOpts = "opts:" + fieldAge = "age:" + fieldBytes = "bytes:" + fieldEvents = "events:" + fieldPerOpStats = "per-op" + fieldTransport = "xprt:" + ) + + stats := &MountStatsNFS{ + StatVersion: statVersion, + } + + for s.Scan() { + ss := strings.Fields(string(s.Bytes())) + if len(ss) == 0 { + break + } + + switch ss[0] { + case fieldOpts: + if len(ss) < 2 { + return nil, fmt.Errorf("not enough information for NFS stats: %v", ss) + } + if stats.Opts == nil { + stats.Opts = map[string]string{} + } + for _, opt := range strings.Split(ss[1], ",") { + split := strings.Split(opt, "=") + if len(split) == 2 { + stats.Opts[split[0]] = split[1] + } else { + stats.Opts[opt] = "" + } + } + case fieldAge: + if len(ss) < 2 { + return nil, fmt.Errorf("not enough information for NFS stats: %v", ss) + } + // Age integer is in seconds + d, err := time.ParseDuration(ss[1] + "s") + if err != nil { + return nil, err + } + + stats.Age = d + case fieldBytes: + if len(ss) < 2 { + return nil, fmt.Errorf("not enough information for NFS stats: %v", ss) + } + bstats, err := parseNFSBytesStats(ss[1:]) + if err != nil { + return nil, err + } + + stats.Bytes = *bstats + case fieldEvents: + if len(ss) < 2 { + return nil, fmt.Errorf("not enough information for NFS stats: %v", ss) + } + estats, err := parseNFSEventsStats(ss[1:]) + if err != nil { + return nil, err + } + + stats.Events = *estats + case fieldTransport: + if len(ss) < 3 { + return nil, fmt.Errorf("not enough information for NFS transport stats: %v", ss) + } + + tstats, err := parseNFSTransportStats(ss[1:], statVersion) + if err != nil { + return nil, err + } + + stats.Transport = *tstats + } + + // When encountering "per-operation statistics", we must break this + // loop and parse them separately to ensure we can terminate parsing + // before reaching another device entry; hence why this 'if' statement + // is not just another switch case + if ss[0] == fieldPerOpStats { + break + } + } + + if err := s.Err(); err != nil { + return nil, err + } + + // NFS per-operation stats appear last before the next device entry + perOpStats, err := parseNFSOperationStats(s) + if err != nil { + return nil, err + } + + stats.Operations = perOpStats + + return stats, nil +} + +// parseNFSBytesStats parses a NFSBytesStats line using an input set of +// integer fields. +func parseNFSBytesStats(ss []string) (*NFSBytesStats, error) { + if len(ss) != fieldBytesLen { + return nil, fmt.Errorf("invalid NFS bytes stats: %v", ss) + } + + ns := make([]uint64, 0, fieldBytesLen) + for _, s := range ss { + n, err := strconv.ParseUint(s, 10, 64) + if err != nil { + return nil, err + } + + ns = append(ns, n) + } + + return &NFSBytesStats{ + Read: ns[0], + Write: ns[1], + DirectRead: ns[2], + DirectWrite: ns[3], + ReadTotal: ns[4], + WriteTotal: ns[5], + ReadPages: ns[6], + WritePages: ns[7], + }, nil +} + +// parseNFSEventsStats parses a NFSEventsStats line using an input set of +// integer fields. +func parseNFSEventsStats(ss []string) (*NFSEventsStats, error) { + if len(ss) != fieldEventsLen { + return nil, fmt.Errorf("invalid NFS events stats: %v", ss) + } + + ns := make([]uint64, 0, fieldEventsLen) + for _, s := range ss { + n, err := strconv.ParseUint(s, 10, 64) + if err != nil { + return nil, err + } + + ns = append(ns, n) + } + + return &NFSEventsStats{ + InodeRevalidate: ns[0], + DnodeRevalidate: ns[1], + DataInvalidate: ns[2], + AttributeInvalidate: ns[3], + VFSOpen: ns[4], + VFSLookup: ns[5], + VFSAccess: ns[6], + VFSUpdatePage: ns[7], + VFSReadPage: ns[8], + VFSReadPages: ns[9], + VFSWritePage: ns[10], + VFSWritePages: ns[11], + VFSGetdents: ns[12], + VFSSetattr: ns[13], + VFSFlush: ns[14], + VFSFsync: ns[15], + VFSLock: ns[16], + VFSFileRelease: ns[17], + CongestionWait: ns[18], + Truncation: ns[19], + WriteExtension: ns[20], + SillyRename: ns[21], + ShortRead: ns[22], + ShortWrite: ns[23], + JukeboxDelay: ns[24], + PNFSRead: ns[25], + PNFSWrite: ns[26], + }, nil +} + +// parseNFSOperationStats parses a slice of NFSOperationStats by scanning +// additional information about per-operation statistics until an empty +// line is reached. +func parseNFSOperationStats(s *bufio.Scanner) ([]NFSOperationStats, error) { + const ( + // Minimum number of expected fields in each per-operation statistics set + minFields = 9 + ) + + var ops []NFSOperationStats + + for s.Scan() { + ss := strings.Fields(string(s.Bytes())) + if len(ss) == 0 { + // Must break when reading a blank line after per-operation stats to + // enable top-level function to parse the next device entry + break + } + + if len(ss) < minFields { + return nil, fmt.Errorf("invalid NFS per-operations stats: %v", ss) + } + + // Skip string operation name for integers + ns := make([]uint64, 0, minFields-1) + for _, st := range ss[1:] { + n, err := strconv.ParseUint(st, 10, 64) + if err != nil { + return nil, err + } + + ns = append(ns, n) + } + + opStats := NFSOperationStats{ + Operation: strings.TrimSuffix(ss[0], ":"), + Requests: ns[0], + Transmissions: ns[1], + MajorTimeouts: ns[2], + BytesSent: ns[3], + BytesReceived: ns[4], + CumulativeQueueMilliseconds: ns[5], + CumulativeTotalResponseMilliseconds: ns[6], + CumulativeTotalRequestMilliseconds: ns[7], + } + + if len(ns) > 8 { + opStats.Errors = ns[8] + } + + ops = append(ops, opStats) + } + + return ops, s.Err() +} + +// parseNFSTransportStats parses a NFSTransportStats line using an input set of +// integer fields matched to a specific stats version. +func parseNFSTransportStats(ss []string, statVersion string) (*NFSTransportStats, error) { + // Extract the protocol field. It is the only string value in the line + protocol := ss[0] + ss = ss[1:] + + switch statVersion { + case statVersion10: + var expectedLength int + if protocol == "tcp" { + expectedLength = fieldTransport10TCPLen + } else if protocol == "udp" { + expectedLength = fieldTransport10UDPLen + } else { + return nil, fmt.Errorf("invalid NFS protocol \"%s\" in stats 1.0 statement: %v", protocol, ss) + } + if len(ss) != expectedLength { + return nil, fmt.Errorf("invalid NFS transport stats 1.0 statement: %v", ss) + } + case statVersion11: + var expectedLength int + if protocol == "tcp" { + expectedLength = fieldTransport11TCPLen + } else if protocol == "udp" { + expectedLength = fieldTransport11UDPLen + } else { + return nil, fmt.Errorf("invalid NFS protocol \"%s\" in stats 1.1 statement: %v", protocol, ss) + } + if len(ss) != expectedLength { + return nil, fmt.Errorf("invalid NFS transport stats 1.1 statement: %v", ss) + } + default: + return nil, fmt.Errorf("unrecognized NFS transport stats version: %q", statVersion) + } + + // Allocate enough for v1.1 stats since zero value for v1.1 stats will be okay + // in a v1.0 response. Since the stat length is bigger for TCP stats, we use + // the TCP length here. + // + // Note: slice length must be set to length of v1.1 stats to avoid a panic when + // only v1.0 stats are present. + // See: https://github.com/prometheus/node_exporter/issues/571. + ns := make([]uint64, fieldTransport11TCPLen) + for i, s := range ss { + n, err := strconv.ParseUint(s, 10, 64) + if err != nil { + return nil, err + } + + ns[i] = n + } + + // The fields differ depending on the transport protocol (TCP or UDP) + // From https://utcc.utoronto.ca/%7Ecks/space/blog/linux/NFSMountstatsXprt + // + // For the udp RPC transport there is no connection count, connect idle time, + // or idle time (fields #3, #4, and #5); all other fields are the same. So + // we set them to 0 here. + if protocol == "udp" { + ns = append(ns[:2], append(make([]uint64, 3), ns[2:]...)...) + } + + return &NFSTransportStats{ + Protocol: protocol, + Port: ns[0], + Bind: ns[1], + Connect: ns[2], + ConnectIdleTime: ns[3], + IdleTimeSeconds: ns[4], + Sends: ns[5], + Receives: ns[6], + BadTransactionIDs: ns[7], + CumulativeActiveRequests: ns[8], + CumulativeBacklog: ns[9], + MaximumRPCSlotsUsed: ns[10], + CumulativeSendingQueue: ns[11], + CumulativePendingQueue: ns[12], + }, nil +} diff --git a/vendor/github.com/prometheus/procfs/net_conntrackstat.go b/vendor/github.com/prometheus/procfs/net_conntrackstat.go new file mode 100644 index 0000000..8300dac --- /dev/null +++ b/vendor/github.com/prometheus/procfs/net_conntrackstat.go @@ -0,0 +1,153 @@ +// Copyright 2020 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bufio" + "bytes" + "fmt" + "io" + "strconv" + "strings" + + "github.com/prometheus/procfs/internal/util" +) + +// A ConntrackStatEntry represents one line from net/stat/nf_conntrack +// and contains netfilter conntrack statistics at one CPU core. +type ConntrackStatEntry struct { + Entries uint64 + Found uint64 + Invalid uint64 + Ignore uint64 + Insert uint64 + InsertFailed uint64 + Drop uint64 + EarlyDrop uint64 + SearchRestart uint64 +} + +// ConntrackStat retrieves netfilter's conntrack statistics, split by CPU cores. +func (fs FS) ConntrackStat() ([]ConntrackStatEntry, error) { + return readConntrackStat(fs.proc.Path("net", "stat", "nf_conntrack")) +} + +// Parses a slice of ConntrackStatEntries from the given filepath. +func readConntrackStat(path string) ([]ConntrackStatEntry, error) { + // This file is small and can be read with one syscall. + b, err := util.ReadFileNoStat(path) + if err != nil { + // Do not wrap this error so the caller can detect os.IsNotExist and + // similar conditions. + return nil, err + } + + stat, err := parseConntrackStat(bytes.NewReader(b)) + if err != nil { + return nil, fmt.Errorf("failed to read conntrack stats from %q: %w", path, err) + } + + return stat, nil +} + +// Reads the contents of a conntrack statistics file and parses a slice of ConntrackStatEntries. +func parseConntrackStat(r io.Reader) ([]ConntrackStatEntry, error) { + var entries []ConntrackStatEntry + + scanner := bufio.NewScanner(r) + scanner.Scan() + for scanner.Scan() { + fields := strings.Fields(scanner.Text()) + conntrackEntry, err := parseConntrackStatEntry(fields) + if err != nil { + return nil, err + } + entries = append(entries, *conntrackEntry) + } + + return entries, nil +} + +// Parses a ConntrackStatEntry from given array of fields. +func parseConntrackStatEntry(fields []string) (*ConntrackStatEntry, error) { + if len(fields) != 17 { + return nil, fmt.Errorf("invalid conntrackstat entry, missing fields") + } + entry := &ConntrackStatEntry{} + + entries, err := parseConntrackStatField(fields[0]) + if err != nil { + return nil, err + } + entry.Entries = entries + + found, err := parseConntrackStatField(fields[2]) + if err != nil { + return nil, err + } + entry.Found = found + + invalid, err := parseConntrackStatField(fields[4]) + if err != nil { + return nil, err + } + entry.Invalid = invalid + + ignore, err := parseConntrackStatField(fields[5]) + if err != nil { + return nil, err + } + entry.Ignore = ignore + + insert, err := parseConntrackStatField(fields[8]) + if err != nil { + return nil, err + } + entry.Insert = insert + + insertFailed, err := parseConntrackStatField(fields[9]) + if err != nil { + return nil, err + } + entry.InsertFailed = insertFailed + + drop, err := parseConntrackStatField(fields[10]) + if err != nil { + return nil, err + } + entry.Drop = drop + + earlyDrop, err := parseConntrackStatField(fields[11]) + if err != nil { + return nil, err + } + entry.EarlyDrop = earlyDrop + + searchRestart, err := parseConntrackStatField(fields[16]) + if err != nil { + return nil, err + } + entry.SearchRestart = searchRestart + + return entry, nil +} + +// Parses a uint64 from given hex in string. +func parseConntrackStatField(field string) (uint64, error) { + val, err := strconv.ParseUint(field, 16, 64) + if err != nil { + return 0, fmt.Errorf("couldn't parse %q field: %w", field, err) + } + return val, err +} diff --git a/vendor/github.com/prometheus/procfs/net_dev.go b/vendor/github.com/prometheus/procfs/net_dev.go new file mode 100644 index 0000000..e66208a --- /dev/null +++ b/vendor/github.com/prometheus/procfs/net_dev.go @@ -0,0 +1,205 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bufio" + "errors" + "os" + "sort" + "strconv" + "strings" +) + +// NetDevLine is single line parsed from /proc/net/dev or /proc/[pid]/net/dev. +type NetDevLine struct { + Name string `json:"name"` // The name of the interface. + RxBytes uint64 `json:"rx_bytes"` // Cumulative count of bytes received. + RxPackets uint64 `json:"rx_packets"` // Cumulative count of packets received. + RxErrors uint64 `json:"rx_errors"` // Cumulative count of receive errors encountered. + RxDropped uint64 `json:"rx_dropped"` // Cumulative count of packets dropped while receiving. + RxFIFO uint64 `json:"rx_fifo"` // Cumulative count of FIFO buffer errors. + RxFrame uint64 `json:"rx_frame"` // Cumulative count of packet framing errors. + RxCompressed uint64 `json:"rx_compressed"` // Cumulative count of compressed packets received by the device driver. + RxMulticast uint64 `json:"rx_multicast"` // Cumulative count of multicast frames received by the device driver. + TxBytes uint64 `json:"tx_bytes"` // Cumulative count of bytes transmitted. + TxPackets uint64 `json:"tx_packets"` // Cumulative count of packets transmitted. + TxErrors uint64 `json:"tx_errors"` // Cumulative count of transmit errors encountered. + TxDropped uint64 `json:"tx_dropped"` // Cumulative count of packets dropped while transmitting. + TxFIFO uint64 `json:"tx_fifo"` // Cumulative count of FIFO buffer errors. + TxCollisions uint64 `json:"tx_collisions"` // Cumulative count of collisions detected on the interface. + TxCarrier uint64 `json:"tx_carrier"` // Cumulative count of carrier losses detected by the device driver. + TxCompressed uint64 `json:"tx_compressed"` // Cumulative count of compressed packets transmitted by the device driver. +} + +// NetDev is parsed from /proc/net/dev or /proc/[pid]/net/dev. The map keys +// are interface names. +type NetDev map[string]NetDevLine + +// NetDev returns kernel/system statistics read from /proc/net/dev. +func (fs FS) NetDev() (NetDev, error) { + return newNetDev(fs.proc.Path("net/dev")) +} + +// NetDev returns kernel/system statistics read from /proc/[pid]/net/dev. +func (p Proc) NetDev() (NetDev, error) { + return newNetDev(p.path("net/dev")) +} + +// newNetDev creates a new NetDev from the contents of the given file. +func newNetDev(file string) (NetDev, error) { + f, err := os.Open(file) + if err != nil { + return NetDev{}, err + } + defer f.Close() + + netDev := NetDev{} + s := bufio.NewScanner(f) + for n := 0; s.Scan(); n++ { + // Skip the 2 header lines. + if n < 2 { + continue + } + + line, err := netDev.parseLine(s.Text()) + if err != nil { + return netDev, err + } + + netDev[line.Name] = *line + } + + return netDev, s.Err() +} + +// parseLine parses a single line from the /proc/net/dev file. Header lines +// must be filtered prior to calling this method. +func (netDev NetDev) parseLine(rawLine string) (*NetDevLine, error) { + idx := strings.LastIndex(rawLine, ":") + if idx == -1 { + return nil, errors.New("invalid net/dev line, missing colon") + } + fields := strings.Fields(strings.TrimSpace(rawLine[idx+1:])) + + var err error + line := &NetDevLine{} + + // Interface Name + line.Name = strings.TrimSpace(rawLine[:idx]) + if line.Name == "" { + return nil, errors.New("invalid net/dev line, empty interface name") + } + + // RX + line.RxBytes, err = strconv.ParseUint(fields[0], 10, 64) + if err != nil { + return nil, err + } + line.RxPackets, err = strconv.ParseUint(fields[1], 10, 64) + if err != nil { + return nil, err + } + line.RxErrors, err = strconv.ParseUint(fields[2], 10, 64) + if err != nil { + return nil, err + } + line.RxDropped, err = strconv.ParseUint(fields[3], 10, 64) + if err != nil { + return nil, err + } + line.RxFIFO, err = strconv.ParseUint(fields[4], 10, 64) + if err != nil { + return nil, err + } + line.RxFrame, err = strconv.ParseUint(fields[5], 10, 64) + if err != nil { + return nil, err + } + line.RxCompressed, err = strconv.ParseUint(fields[6], 10, 64) + if err != nil { + return nil, err + } + line.RxMulticast, err = strconv.ParseUint(fields[7], 10, 64) + if err != nil { + return nil, err + } + + // TX + line.TxBytes, err = strconv.ParseUint(fields[8], 10, 64) + if err != nil { + return nil, err + } + line.TxPackets, err = strconv.ParseUint(fields[9], 10, 64) + if err != nil { + return nil, err + } + line.TxErrors, err = strconv.ParseUint(fields[10], 10, 64) + if err != nil { + return nil, err + } + line.TxDropped, err = strconv.ParseUint(fields[11], 10, 64) + if err != nil { + return nil, err + } + line.TxFIFO, err = strconv.ParseUint(fields[12], 10, 64) + if err != nil { + return nil, err + } + line.TxCollisions, err = strconv.ParseUint(fields[13], 10, 64) + if err != nil { + return nil, err + } + line.TxCarrier, err = strconv.ParseUint(fields[14], 10, 64) + if err != nil { + return nil, err + } + line.TxCompressed, err = strconv.ParseUint(fields[15], 10, 64) + if err != nil { + return nil, err + } + + return line, nil +} + +// Total aggregates the values across interfaces and returns a new NetDevLine. +// The Name field will be a sorted comma separated list of interface names. +func (netDev NetDev) Total() NetDevLine { + total := NetDevLine{} + + names := make([]string, 0, len(netDev)) + for _, ifc := range netDev { + names = append(names, ifc.Name) + total.RxBytes += ifc.RxBytes + total.RxPackets += ifc.RxPackets + total.RxErrors += ifc.RxErrors + total.RxDropped += ifc.RxDropped + total.RxFIFO += ifc.RxFIFO + total.RxFrame += ifc.RxFrame + total.RxCompressed += ifc.RxCompressed + total.RxMulticast += ifc.RxMulticast + total.TxBytes += ifc.TxBytes + total.TxPackets += ifc.TxPackets + total.TxErrors += ifc.TxErrors + total.TxDropped += ifc.TxDropped + total.TxFIFO += ifc.TxFIFO + total.TxCollisions += ifc.TxCollisions + total.TxCarrier += ifc.TxCarrier + total.TxCompressed += ifc.TxCompressed + } + sort.Strings(names) + total.Name = strings.Join(names, ", ") + + return total +} diff --git a/vendor/github.com/prometheus/procfs/net_ip_socket.go b/vendor/github.com/prometheus/procfs/net_ip_socket.go new file mode 100644 index 0000000..7fd57d7 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/net_ip_socket.go @@ -0,0 +1,226 @@ +// Copyright 2020 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bufio" + "encoding/hex" + "fmt" + "io" + "net" + "os" + "strconv" + "strings" +) + +const ( + // readLimit is used by io.LimitReader while reading the content of the + // /proc/net/udp{,6} files. The number of lines inside such a file is dynamic + // as each line represents a single used socket. + // In theory, the number of available sockets is 65535 (2^16 - 1) per IP. + // With e.g. 150 Byte per line and the maximum number of 65535, + // the reader needs to handle 150 Byte * 65535 =~ 10 MB for a single IP. + readLimit = 4294967296 // Byte -> 4 GiB +) + +// This contains generic data structures for both udp and tcp sockets. +type ( + // NetIPSocket represents the contents of /proc/net/{t,u}dp{,6} file without the header. + NetIPSocket []*netIPSocketLine + + // NetIPSocketSummary provides already computed values like the total queue lengths or + // the total number of used sockets. In contrast to NetIPSocket it does not collect + // the parsed lines into a slice. + NetIPSocketSummary struct { + // TxQueueLength shows the total queue length of all parsed tx_queue lengths. + TxQueueLength uint64 + // RxQueueLength shows the total queue length of all parsed rx_queue lengths. + RxQueueLength uint64 + // UsedSockets shows the total number of parsed lines representing the + // number of used sockets. + UsedSockets uint64 + } + + // netIPSocketLine represents the fields parsed from a single line + // in /proc/net/{t,u}dp{,6}. Fields which are not used by IPSocket are skipped. + // For the proc file format details, see https://linux.die.net/man/5/proc. + netIPSocketLine struct { + Sl uint64 + LocalAddr net.IP + LocalPort uint64 + RemAddr net.IP + RemPort uint64 + St uint64 + TxQueue uint64 + RxQueue uint64 + UID uint64 + Inode uint64 + } +) + +func newNetIPSocket(file string) (NetIPSocket, error) { + f, err := os.Open(file) + if err != nil { + return nil, err + } + defer f.Close() + + var netIPSocket NetIPSocket + + lr := io.LimitReader(f, readLimit) + s := bufio.NewScanner(lr) + s.Scan() // skip first line with headers + for s.Scan() { + fields := strings.Fields(s.Text()) + line, err := parseNetIPSocketLine(fields) + if err != nil { + return nil, err + } + netIPSocket = append(netIPSocket, line) + } + if err := s.Err(); err != nil { + return nil, err + } + return netIPSocket, nil +} + +// newNetIPSocketSummary creates a new NetIPSocket{,6} from the contents of the given file. +func newNetIPSocketSummary(file string) (*NetIPSocketSummary, error) { + f, err := os.Open(file) + if err != nil { + return nil, err + } + defer f.Close() + + var netIPSocketSummary NetIPSocketSummary + + lr := io.LimitReader(f, readLimit) + s := bufio.NewScanner(lr) + s.Scan() // skip first line with headers + for s.Scan() { + fields := strings.Fields(s.Text()) + line, err := parseNetIPSocketLine(fields) + if err != nil { + return nil, err + } + netIPSocketSummary.TxQueueLength += line.TxQueue + netIPSocketSummary.RxQueueLength += line.RxQueue + netIPSocketSummary.UsedSockets++ + } + if err := s.Err(); err != nil { + return nil, err + } + return &netIPSocketSummary, nil +} + +// the /proc/net/{t,u}dp{,6} files are network byte order for ipv4 and for ipv6 the address is four words consisting of four bytes each. In each of those four words the four bytes are written in reverse order. + +func parseIP(hexIP string) (net.IP, error) { + var byteIP []byte + byteIP, err := hex.DecodeString(hexIP) + if err != nil { + return nil, fmt.Errorf("cannot parse address field in socket line %q", hexIP) + } + switch len(byteIP) { + case 4: + return net.IP{byteIP[3], byteIP[2], byteIP[1], byteIP[0]}, nil + case 16: + i := net.IP{ + byteIP[3], byteIP[2], byteIP[1], byteIP[0], + byteIP[7], byteIP[6], byteIP[5], byteIP[4], + byteIP[11], byteIP[10], byteIP[9], byteIP[8], + byteIP[15], byteIP[14], byteIP[13], byteIP[12], + } + return i, nil + default: + return nil, fmt.Errorf("Unable to parse IP %s", hexIP) + } +} + +// parseNetIPSocketLine parses a single line, represented by a list of fields. +func parseNetIPSocketLine(fields []string) (*netIPSocketLine, error) { + line := &netIPSocketLine{} + if len(fields) < 10 { + return nil, fmt.Errorf( + "cannot parse net socket line as it has less then 10 columns %q", + strings.Join(fields, " "), + ) + } + var err error // parse error + + // sl + s := strings.Split(fields[0], ":") + if len(s) != 2 { + return nil, fmt.Errorf("cannot parse sl field in socket line %q", fields[0]) + } + + if line.Sl, err = strconv.ParseUint(s[0], 0, 64); err != nil { + return nil, fmt.Errorf("cannot parse sl value in socket line: %w", err) + } + // local_address + l := strings.Split(fields[1], ":") + if len(l) != 2 { + return nil, fmt.Errorf("cannot parse local_address field in socket line %q", fields[1]) + } + if line.LocalAddr, err = parseIP(l[0]); err != nil { + return nil, err + } + if line.LocalPort, err = strconv.ParseUint(l[1], 16, 64); err != nil { + return nil, fmt.Errorf("cannot parse local_address port value in socket line: %w", err) + } + + // remote_address + r := strings.Split(fields[2], ":") + if len(r) != 2 { + return nil, fmt.Errorf("cannot parse rem_address field in socket line %q", fields[1]) + } + if line.RemAddr, err = parseIP(r[0]); err != nil { + return nil, err + } + if line.RemPort, err = strconv.ParseUint(r[1], 16, 64); err != nil { + return nil, fmt.Errorf("cannot parse rem_address port value in socket line: %w", err) + } + + // st + if line.St, err = strconv.ParseUint(fields[3], 16, 64); err != nil { + return nil, fmt.Errorf("cannot parse st value in socket line: %w", err) + } + + // tx_queue and rx_queue + q := strings.Split(fields[4], ":") + if len(q) != 2 { + return nil, fmt.Errorf( + "cannot parse tx/rx queues in socket line as it has a missing colon %q", + fields[4], + ) + } + if line.TxQueue, err = strconv.ParseUint(q[0], 16, 64); err != nil { + return nil, fmt.Errorf("cannot parse tx_queue value in socket line: %w", err) + } + if line.RxQueue, err = strconv.ParseUint(q[1], 16, 64); err != nil { + return nil, fmt.Errorf("cannot parse rx_queue value in socket line: %w", err) + } + + // uid + if line.UID, err = strconv.ParseUint(fields[7], 0, 64); err != nil { + return nil, fmt.Errorf("cannot parse uid value in socket line: %w", err) + } + + // inode + if line.Inode, err = strconv.ParseUint(fields[9], 0, 64); err != nil { + return nil, fmt.Errorf("cannot parse inode value in socket line: %w", err) + } + + return line, nil +} diff --git a/vendor/github.com/prometheus/procfs/net_protocols.go b/vendor/github.com/prometheus/procfs/net_protocols.go new file mode 100644 index 0000000..374b6f7 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/net_protocols.go @@ -0,0 +1,180 @@ +// Copyright 2020 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bufio" + "bytes" + "fmt" + "strconv" + "strings" + + "github.com/prometheus/procfs/internal/util" +) + +// NetProtocolStats stores the contents from /proc/net/protocols. +type NetProtocolStats map[string]NetProtocolStatLine + +// NetProtocolStatLine contains a single line parsed from /proc/net/protocols. We +// only care about the first six columns as the rest are not likely to change +// and only serve to provide a set of capabilities for each protocol. +type NetProtocolStatLine struct { + Name string // 0 The name of the protocol + Size uint64 // 1 The size, in bytes, of a given protocol structure. e.g. sizeof(struct tcp_sock) or sizeof(struct unix_sock) + Sockets int64 // 2 Number of sockets in use by this protocol + Memory int64 // 3 Number of 4KB pages allocated by all sockets of this protocol + Pressure int // 4 This is either yes, no, or NI (not implemented). For the sake of simplicity we treat NI as not experiencing memory pressure. + MaxHeader uint64 // 5 Protocol specific max header size + Slab bool // 6 Indicates whether or not memory is allocated from the SLAB + ModuleName string // 7 The name of the module that implemented this protocol or "kernel" if not from a module + Capabilities NetProtocolCapabilities +} + +// NetProtocolCapabilities contains a list of capabilities for each protocol. +type NetProtocolCapabilities struct { + Close bool // 8 + Connect bool // 9 + Disconnect bool // 10 + Accept bool // 11 + IoCtl bool // 12 + Init bool // 13 + Destroy bool // 14 + Shutdown bool // 15 + SetSockOpt bool // 16 + GetSockOpt bool // 17 + SendMsg bool // 18 + RecvMsg bool // 19 + SendPage bool // 20 + Bind bool // 21 + BacklogRcv bool // 22 + Hash bool // 23 + UnHash bool // 24 + GetPort bool // 25 + EnterMemoryPressure bool // 26 +} + +// NetProtocols reads stats from /proc/net/protocols and returns a map of +// PortocolStatLine entries. As of this writing no official Linux Documentation +// exists, however the source is fairly self-explanatory and the format seems +// stable since its introduction in 2.6.12-rc2 +// Linux 2.6.12-rc2 - https://elixir.bootlin.com/linux/v2.6.12-rc2/source/net/core/sock.c#L1452 +// Linux 5.10 - https://elixir.bootlin.com/linux/v5.10.4/source/net/core/sock.c#L3586 +func (fs FS) NetProtocols() (NetProtocolStats, error) { + data, err := util.ReadFileNoStat(fs.proc.Path("net/protocols")) + if err != nil { + return NetProtocolStats{}, err + } + return parseNetProtocols(bufio.NewScanner(bytes.NewReader(data))) +} + +func parseNetProtocols(s *bufio.Scanner) (NetProtocolStats, error) { + nps := NetProtocolStats{} + + // Skip the header line + s.Scan() + + for s.Scan() { + line, err := nps.parseLine(s.Text()) + if err != nil { + return NetProtocolStats{}, err + } + + nps[line.Name] = *line + } + return nps, nil +} + +func (ps NetProtocolStats) parseLine(rawLine string) (*NetProtocolStatLine, error) { + line := &NetProtocolStatLine{Capabilities: NetProtocolCapabilities{}} + var err error + const enabled = "yes" + const disabled = "no" + + fields := strings.Fields(rawLine) + line.Name = fields[0] + line.Size, err = strconv.ParseUint(fields[1], 10, 64) + if err != nil { + return nil, err + } + line.Sockets, err = strconv.ParseInt(fields[2], 10, 64) + if err != nil { + return nil, err + } + line.Memory, err = strconv.ParseInt(fields[3], 10, 64) + if err != nil { + return nil, err + } + if fields[4] == enabled { + line.Pressure = 1 + } else if fields[4] == disabled { + line.Pressure = 0 + } else { + line.Pressure = -1 + } + line.MaxHeader, err = strconv.ParseUint(fields[5], 10, 64) + if err != nil { + return nil, err + } + if fields[6] == enabled { + line.Slab = true + } else if fields[6] == disabled { + line.Slab = false + } else { + return nil, fmt.Errorf("unable to parse capability for protocol: %s", line.Name) + } + line.ModuleName = fields[7] + + err = line.Capabilities.parseCapabilities(fields[8:]) + if err != nil { + return nil, err + } + + return line, nil +} + +func (pc *NetProtocolCapabilities) parseCapabilities(capabilities []string) error { + // The capabilities are all bools so we can loop over to map them + capabilityFields := [...]*bool{ + &pc.Close, + &pc.Connect, + &pc.Disconnect, + &pc.Accept, + &pc.IoCtl, + &pc.Init, + &pc.Destroy, + &pc.Shutdown, + &pc.SetSockOpt, + &pc.GetSockOpt, + &pc.SendMsg, + &pc.RecvMsg, + &pc.SendPage, + &pc.Bind, + &pc.BacklogRcv, + &pc.Hash, + &pc.UnHash, + &pc.GetPort, + &pc.EnterMemoryPressure, + } + + for i := 0; i < len(capabilities); i++ { + if capabilities[i] == "y" { + *capabilityFields[i] = true + } else if capabilities[i] == "n" { + *capabilityFields[i] = false + } else { + return fmt.Errorf("unable to parse capability block for protocol: position %d", i) + } + } + return nil +} diff --git a/vendor/github.com/prometheus/procfs/net_sockstat.go b/vendor/github.com/prometheus/procfs/net_sockstat.go new file mode 100644 index 0000000..e36f487 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/net_sockstat.go @@ -0,0 +1,163 @@ +// Copyright 2019 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bufio" + "bytes" + "errors" + "fmt" + "io" + "strings" + + "github.com/prometheus/procfs/internal/util" +) + +// A NetSockstat contains the output of /proc/net/sockstat{,6} for IPv4 or IPv6, +// respectively. +type NetSockstat struct { + // Used is non-nil for IPv4 sockstat results, but nil for IPv6. + Used *int + Protocols []NetSockstatProtocol +} + +// A NetSockstatProtocol contains statistics about a given socket protocol. +// Pointer fields indicate that the value may or may not be present on any +// given protocol. +type NetSockstatProtocol struct { + Protocol string + InUse int + Orphan *int + TW *int + Alloc *int + Mem *int + Memory *int +} + +// NetSockstat retrieves IPv4 socket statistics. +func (fs FS) NetSockstat() (*NetSockstat, error) { + return readSockstat(fs.proc.Path("net", "sockstat")) +} + +// NetSockstat6 retrieves IPv6 socket statistics. +// +// If IPv6 is disabled on this kernel, the returned error can be checked with +// os.IsNotExist. +func (fs FS) NetSockstat6() (*NetSockstat, error) { + return readSockstat(fs.proc.Path("net", "sockstat6")) +} + +// readSockstat opens and parses a NetSockstat from the input file. +func readSockstat(name string) (*NetSockstat, error) { + // This file is small and can be read with one syscall. + b, err := util.ReadFileNoStat(name) + if err != nil { + // Do not wrap this error so the caller can detect os.IsNotExist and + // similar conditions. + return nil, err + } + + stat, err := parseSockstat(bytes.NewReader(b)) + if err != nil { + return nil, fmt.Errorf("failed to read sockstats from %q: %w", name, err) + } + + return stat, nil +} + +// parseSockstat reads the contents of a sockstat file and parses a NetSockstat. +func parseSockstat(r io.Reader) (*NetSockstat, error) { + var stat NetSockstat + s := bufio.NewScanner(r) + for s.Scan() { + // Expect a minimum of a protocol and one key/value pair. + fields := strings.Split(s.Text(), " ") + if len(fields) < 3 { + return nil, fmt.Errorf("malformed sockstat line: %q", s.Text()) + } + + // The remaining fields are key/value pairs. + kvs, err := parseSockstatKVs(fields[1:]) + if err != nil { + return nil, fmt.Errorf("error parsing sockstat key/value pairs from %q: %w", s.Text(), err) + } + + // The first field is the protocol. We must trim its colon suffix. + proto := strings.TrimSuffix(fields[0], ":") + switch proto { + case "sockets": + // Special case: IPv4 has a sockets "used" key/value pair that we + // embed at the top level of the structure. + used := kvs["used"] + stat.Used = &used + default: + // Parse all other lines as individual protocols. + nsp := parseSockstatProtocol(kvs) + nsp.Protocol = proto + stat.Protocols = append(stat.Protocols, nsp) + } + } + + if err := s.Err(); err != nil { + return nil, err + } + + return &stat, nil +} + +// parseSockstatKVs parses a string slice into a map of key/value pairs. +func parseSockstatKVs(kvs []string) (map[string]int, error) { + if len(kvs)%2 != 0 { + return nil, errors.New("odd number of fields in key/value pairs") + } + + // Iterate two values at a time to gather key/value pairs. + out := make(map[string]int, len(kvs)/2) + for i := 0; i < len(kvs); i += 2 { + vp := util.NewValueParser(kvs[i+1]) + out[kvs[i]] = vp.Int() + + if err := vp.Err(); err != nil { + return nil, err + } + } + + return out, nil +} + +// parseSockstatProtocol parses a NetSockstatProtocol from the input kvs map. +func parseSockstatProtocol(kvs map[string]int) NetSockstatProtocol { + var nsp NetSockstatProtocol + for k, v := range kvs { + // Capture the range variable to ensure we get unique pointers for + // each of the optional fields. + v := v + switch k { + case "inuse": + nsp.InUse = v + case "orphan": + nsp.Orphan = &v + case "tw": + nsp.TW = &v + case "alloc": + nsp.Alloc = &v + case "mem": + nsp.Mem = &v + case "memory": + nsp.Memory = &v + } + } + + return nsp +} diff --git a/vendor/github.com/prometheus/procfs/net_softnet.go b/vendor/github.com/prometheus/procfs/net_softnet.go new file mode 100644 index 0000000..a94f86d --- /dev/null +++ b/vendor/github.com/prometheus/procfs/net_softnet.go @@ -0,0 +1,102 @@ +// Copyright 2019 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bufio" + "bytes" + "fmt" + "io" + "strconv" + "strings" + + "github.com/prometheus/procfs/internal/util" +) + +// For the proc file format details, +// See: +// * Linux 2.6.23 https://elixir.bootlin.com/linux/v2.6.23/source/net/core/dev.c#L2343 +// * Linux 4.17 https://elixir.bootlin.com/linux/v4.17/source/net/core/net-procfs.c#L162 +// and https://elixir.bootlin.com/linux/v4.17/source/include/linux/netdevice.h#L2810. + +// SoftnetStat contains a single row of data from /proc/net/softnet_stat. +type SoftnetStat struct { + // Number of processed packets. + Processed uint32 + // Number of dropped packets. + Dropped uint32 + // Number of times processing packets ran out of quota. + TimeSqueezed uint32 +} + +var softNetProcFile = "net/softnet_stat" + +// NetSoftnetStat reads data from /proc/net/softnet_stat. +func (fs FS) NetSoftnetStat() ([]SoftnetStat, error) { + b, err := util.ReadFileNoStat(fs.proc.Path(softNetProcFile)) + if err != nil { + return nil, err + } + + entries, err := parseSoftnet(bytes.NewReader(b)) + if err != nil { + return nil, fmt.Errorf("failed to parse /proc/net/softnet_stat: %w", err) + } + + return entries, nil +} + +func parseSoftnet(r io.Reader) ([]SoftnetStat, error) { + const minColumns = 9 + + s := bufio.NewScanner(r) + + var stats []SoftnetStat + for s.Scan() { + columns := strings.Fields(s.Text()) + width := len(columns) + + if width < minColumns { + return nil, fmt.Errorf("%d columns were detected, but at least %d were expected", width, minColumns) + } + + // We only parse the first three columns at the moment. + us, err := parseHexUint32s(columns[0:3]) + if err != nil { + return nil, err + } + + stats = append(stats, SoftnetStat{ + Processed: us[0], + Dropped: us[1], + TimeSqueezed: us[2], + }) + } + + return stats, nil +} + +func parseHexUint32s(ss []string) ([]uint32, error) { + us := make([]uint32, 0, len(ss)) + for _, s := range ss { + u, err := strconv.ParseUint(s, 16, 32) + if err != nil { + return nil, err + } + + us = append(us, uint32(u)) + } + + return us, nil +} diff --git a/vendor/github.com/prometheus/procfs/net_tcp.go b/vendor/github.com/prometheus/procfs/net_tcp.go new file mode 100644 index 0000000..5277629 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/net_tcp.go @@ -0,0 +1,64 @@ +// Copyright 2020 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +type ( + // NetTCP represents the contents of /proc/net/tcp{,6} file without the header. + NetTCP []*netIPSocketLine + + // NetTCPSummary provides already computed values like the total queue lengths or + // the total number of used sockets. In contrast to NetTCP it does not collect + // the parsed lines into a slice. + NetTCPSummary NetIPSocketSummary +) + +// NetTCP returns the IPv4 kernel/networking statistics for TCP datagrams +// read from /proc/net/tcp. +func (fs FS) NetTCP() (NetTCP, error) { + return newNetTCP(fs.proc.Path("net/tcp")) +} + +// NetTCP6 returns the IPv6 kernel/networking statistics for TCP datagrams +// read from /proc/net/tcp6. +func (fs FS) NetTCP6() (NetTCP, error) { + return newNetTCP(fs.proc.Path("net/tcp6")) +} + +// NetTCPSummary returns already computed statistics like the total queue lengths +// for TCP datagrams read from /proc/net/tcp. +func (fs FS) NetTCPSummary() (*NetTCPSummary, error) { + return newNetTCPSummary(fs.proc.Path("net/tcp")) +} + +// NetTCP6Summary returns already computed statistics like the total queue lengths +// for TCP datagrams read from /proc/net/tcp6. +func (fs FS) NetTCP6Summary() (*NetTCPSummary, error) { + return newNetTCPSummary(fs.proc.Path("net/tcp6")) +} + +// newNetTCP creates a new NetTCP{,6} from the contents of the given file. +func newNetTCP(file string) (NetTCP, error) { + n, err := newNetIPSocket(file) + n1 := NetTCP(n) + return n1, err +} + +func newNetTCPSummary(file string) (*NetTCPSummary, error) { + n, err := newNetIPSocketSummary(file) + if n == nil { + return nil, err + } + n1 := NetTCPSummary(*n) + return &n1, err +} diff --git a/vendor/github.com/prometheus/procfs/net_udp.go b/vendor/github.com/prometheus/procfs/net_udp.go new file mode 100644 index 0000000..9ac3daf --- /dev/null +++ b/vendor/github.com/prometheus/procfs/net_udp.go @@ -0,0 +1,64 @@ +// Copyright 2020 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +type ( + // NetUDP represents the contents of /proc/net/udp{,6} file without the header. + NetUDP []*netIPSocketLine + + // NetUDPSummary provides already computed values like the total queue lengths or + // the total number of used sockets. In contrast to NetUDP it does not collect + // the parsed lines into a slice. + NetUDPSummary NetIPSocketSummary +) + +// NetUDP returns the IPv4 kernel/networking statistics for UDP datagrams +// read from /proc/net/udp. +func (fs FS) NetUDP() (NetUDP, error) { + return newNetUDP(fs.proc.Path("net/udp")) +} + +// NetUDP6 returns the IPv6 kernel/networking statistics for UDP datagrams +// read from /proc/net/udp6. +func (fs FS) NetUDP6() (NetUDP, error) { + return newNetUDP(fs.proc.Path("net/udp6")) +} + +// NetUDPSummary returns already computed statistics like the total queue lengths +// for UDP datagrams read from /proc/net/udp. +func (fs FS) NetUDPSummary() (*NetUDPSummary, error) { + return newNetUDPSummary(fs.proc.Path("net/udp")) +} + +// NetUDP6Summary returns already computed statistics like the total queue lengths +// for UDP datagrams read from /proc/net/udp6. +func (fs FS) NetUDP6Summary() (*NetUDPSummary, error) { + return newNetUDPSummary(fs.proc.Path("net/udp6")) +} + +// newNetUDP creates a new NetUDP{,6} from the contents of the given file. +func newNetUDP(file string) (NetUDP, error) { + n, err := newNetIPSocket(file) + n1 := NetUDP(n) + return n1, err +} + +func newNetUDPSummary(file string) (*NetUDPSummary, error) { + n, err := newNetIPSocketSummary(file) + if n == nil { + return nil, err + } + n1 := NetUDPSummary(*n) + return &n1, err +} diff --git a/vendor/github.com/prometheus/procfs/net_unix.go b/vendor/github.com/prometheus/procfs/net_unix.go new file mode 100644 index 0000000..98aa8e1 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/net_unix.go @@ -0,0 +1,257 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bufio" + "fmt" + "io" + "os" + "strconv" + "strings" +) + +// For the proc file format details, +// see https://elixir.bootlin.com/linux/v4.17/source/net/unix/af_unix.c#L2815 +// and https://elixir.bootlin.com/linux/latest/source/include/uapi/linux/net.h#L48. + +// Constants for the various /proc/net/unix enumerations. +// TODO: match against x/sys/unix or similar? +const ( + netUnixTypeStream = 1 + netUnixTypeDgram = 2 + netUnixTypeSeqpacket = 5 + + netUnixFlagDefault = 0 + netUnixFlagListen = 1 << 16 + + netUnixStateUnconnected = 1 + netUnixStateConnecting = 2 + netUnixStateConnected = 3 + netUnixStateDisconnected = 4 +) + +// NetUNIXType is the type of the type field. +type NetUNIXType uint64 + +// NetUNIXFlags is the type of the flags field. +type NetUNIXFlags uint64 + +// NetUNIXState is the type of the state field. +type NetUNIXState uint64 + +// NetUNIXLine represents a line of /proc/net/unix. +type NetUNIXLine struct { + KernelPtr string + RefCount uint64 + Protocol uint64 + Flags NetUNIXFlags + Type NetUNIXType + State NetUNIXState + Inode uint64 + Path string +} + +// NetUNIX holds the data read from /proc/net/unix. +type NetUNIX struct { + Rows []*NetUNIXLine +} + +// NetUNIX returns data read from /proc/net/unix. +func (fs FS) NetUNIX() (*NetUNIX, error) { + return readNetUNIX(fs.proc.Path("net/unix")) +} + +// readNetUNIX reads data in /proc/net/unix format from the specified file. +func readNetUNIX(file string) (*NetUNIX, error) { + // This file could be quite large and a streaming read is desirable versus + // reading the entire contents at once. + f, err := os.Open(file) + if err != nil { + return nil, err + } + defer f.Close() + + return parseNetUNIX(f) +} + +// parseNetUNIX creates a NetUnix structure from the incoming stream. +func parseNetUNIX(r io.Reader) (*NetUNIX, error) { + // Begin scanning by checking for the existence of Inode. + s := bufio.NewScanner(r) + s.Scan() + + // From the man page of proc(5), it does not contain an Inode field, + // but in actually it exists. This code works for both cases. + hasInode := strings.Contains(s.Text(), "Inode") + + // Expect a minimum number of fields, but Inode and Path are optional: + // Num RefCount Protocol Flags Type St Inode Path + minFields := 6 + if hasInode { + minFields++ + } + + var nu NetUNIX + for s.Scan() { + line := s.Text() + item, err := nu.parseLine(line, hasInode, minFields) + if err != nil { + return nil, fmt.Errorf("failed to parse /proc/net/unix data %q: %w", line, err) + } + + nu.Rows = append(nu.Rows, item) + } + + if err := s.Err(); err != nil { + return nil, fmt.Errorf("failed to scan /proc/net/unix data: %w", err) + } + + return &nu, nil +} + +func (u *NetUNIX) parseLine(line string, hasInode bool, min int) (*NetUNIXLine, error) { + fields := strings.Fields(line) + + l := len(fields) + if l < min { + return nil, fmt.Errorf("expected at least %d fields but got %d", min, l) + } + + // Field offsets are as follows: + // Num RefCount Protocol Flags Type St Inode Path + + kernelPtr := strings.TrimSuffix(fields[0], ":") + + users, err := u.parseUsers(fields[1]) + if err != nil { + return nil, fmt.Errorf("failed to parse ref count %q: %w", fields[1], err) + } + + flags, err := u.parseFlags(fields[3]) + if err != nil { + return nil, fmt.Errorf("failed to parse flags %q: %w", fields[3], err) + } + + typ, err := u.parseType(fields[4]) + if err != nil { + return nil, fmt.Errorf("failed to parse type %q: %w", fields[4], err) + } + + state, err := u.parseState(fields[5]) + if err != nil { + return nil, fmt.Errorf("failed to parse state %q: %w", fields[5], err) + } + + var inode uint64 + if hasInode { + inode, err = u.parseInode(fields[6]) + if err != nil { + return nil, fmt.Errorf("failed to parse inode %q: %w", fields[6], err) + } + } + + n := &NetUNIXLine{ + KernelPtr: kernelPtr, + RefCount: users, + Type: typ, + Flags: flags, + State: state, + Inode: inode, + } + + // Path field is optional. + if l > min { + // Path occurs at either index 6 or 7 depending on whether inode is + // already present. + pathIdx := 7 + if !hasInode { + pathIdx-- + } + + n.Path = fields[pathIdx] + } + + return n, nil +} + +func (u NetUNIX) parseUsers(s string) (uint64, error) { + return strconv.ParseUint(s, 16, 32) +} + +func (u NetUNIX) parseType(s string) (NetUNIXType, error) { + typ, err := strconv.ParseUint(s, 16, 16) + if err != nil { + return 0, err + } + + return NetUNIXType(typ), nil +} + +func (u NetUNIX) parseFlags(s string) (NetUNIXFlags, error) { + flags, err := strconv.ParseUint(s, 16, 32) + if err != nil { + return 0, err + } + + return NetUNIXFlags(flags), nil +} + +func (u NetUNIX) parseState(s string) (NetUNIXState, error) { + st, err := strconv.ParseInt(s, 16, 8) + if err != nil { + return 0, err + } + + return NetUNIXState(st), nil +} + +func (u NetUNIX) parseInode(s string) (uint64, error) { + return strconv.ParseUint(s, 10, 64) +} + +func (t NetUNIXType) String() string { + switch t { + case netUnixTypeStream: + return "stream" + case netUnixTypeDgram: + return "dgram" + case netUnixTypeSeqpacket: + return "seqpacket" + } + return "unknown" +} + +func (f NetUNIXFlags) String() string { + switch f { + case netUnixFlagListen: + return "listen" + default: + return "default" + } +} + +func (s NetUNIXState) String() string { + switch s { + case netUnixStateUnconnected: + return "unconnected" + case netUnixStateConnecting: + return "connecting" + case netUnixStateConnected: + return "connected" + case netUnixStateDisconnected: + return "disconnected" + } + return "unknown" +} diff --git a/vendor/github.com/prometheus/procfs/net_xfrm.go b/vendor/github.com/prometheus/procfs/net_xfrm.go new file mode 100644 index 0000000..f9d9d24 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/net_xfrm.go @@ -0,0 +1,189 @@ +// Copyright 2017 Prometheus Team +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bufio" + "fmt" + "os" + "strconv" + "strings" +) + +// XfrmStat models the contents of /proc/net/xfrm_stat. +type XfrmStat struct { + // All errors which are not matched by other + XfrmInError int + // No buffer is left + XfrmInBufferError int + // Header Error + XfrmInHdrError int + // No state found + // i.e. either inbound SPI, address, or IPSEC protocol at SA is wrong + XfrmInNoStates int + // Transformation protocol specific error + // e.g. SA Key is wrong + XfrmInStateProtoError int + // Transformation mode specific error + XfrmInStateModeError int + // Sequence error + // e.g. sequence number is out of window + XfrmInStateSeqError int + // State is expired + XfrmInStateExpired int + // State has mismatch option + // e.g. UDP encapsulation type is mismatched + XfrmInStateMismatch int + // State is invalid + XfrmInStateInvalid int + // No matching template for states + // e.g. Inbound SAs are correct but SP rule is wrong + XfrmInTmplMismatch int + // No policy is found for states + // e.g. Inbound SAs are correct but no SP is found + XfrmInNoPols int + // Policy discards + XfrmInPolBlock int + // Policy error + XfrmInPolError int + // All errors which are not matched by others + XfrmOutError int + // Bundle generation error + XfrmOutBundleGenError int + // Bundle check error + XfrmOutBundleCheckError int + // No state was found + XfrmOutNoStates int + // Transformation protocol specific error + XfrmOutStateProtoError int + // Transportation mode specific error + XfrmOutStateModeError int + // Sequence error + // i.e sequence number overflow + XfrmOutStateSeqError int + // State is expired + XfrmOutStateExpired int + // Policy discads + XfrmOutPolBlock int + // Policy is dead + XfrmOutPolDead int + // Policy Error + XfrmOutPolError int + // Forward routing of a packet is not allowed + XfrmFwdHdrError int + // State is invalid, perhaps expired + XfrmOutStateInvalid int + // State hasn’t been fully acquired before use + XfrmAcquireError int +} + +// NewXfrmStat reads the xfrm_stat statistics. +func NewXfrmStat() (XfrmStat, error) { + fs, err := NewFS(DefaultMountPoint) + if err != nil { + return XfrmStat{}, err + } + + return fs.NewXfrmStat() +} + +// NewXfrmStat reads the xfrm_stat statistics from the 'proc' filesystem. +func (fs FS) NewXfrmStat() (XfrmStat, error) { + file, err := os.Open(fs.proc.Path("net/xfrm_stat")) + if err != nil { + return XfrmStat{}, err + } + defer file.Close() + + var ( + x = XfrmStat{} + s = bufio.NewScanner(file) + ) + + for s.Scan() { + fields := strings.Fields(s.Text()) + + if len(fields) != 2 { + return XfrmStat{}, fmt.Errorf("couldn't parse %q line %q", file.Name(), s.Text()) + } + + name := fields[0] + value, err := strconv.Atoi(fields[1]) + if err != nil { + return XfrmStat{}, err + } + + switch name { + case "XfrmInError": + x.XfrmInError = value + case "XfrmInBufferError": + x.XfrmInBufferError = value + case "XfrmInHdrError": + x.XfrmInHdrError = value + case "XfrmInNoStates": + x.XfrmInNoStates = value + case "XfrmInStateProtoError": + x.XfrmInStateProtoError = value + case "XfrmInStateModeError": + x.XfrmInStateModeError = value + case "XfrmInStateSeqError": + x.XfrmInStateSeqError = value + case "XfrmInStateExpired": + x.XfrmInStateExpired = value + case "XfrmInStateInvalid": + x.XfrmInStateInvalid = value + case "XfrmInTmplMismatch": + x.XfrmInTmplMismatch = value + case "XfrmInNoPols": + x.XfrmInNoPols = value + case "XfrmInPolBlock": + x.XfrmInPolBlock = value + case "XfrmInPolError": + x.XfrmInPolError = value + case "XfrmOutError": + x.XfrmOutError = value + case "XfrmInStateMismatch": + x.XfrmInStateMismatch = value + case "XfrmOutBundleGenError": + x.XfrmOutBundleGenError = value + case "XfrmOutBundleCheckError": + x.XfrmOutBundleCheckError = value + case "XfrmOutNoStates": + x.XfrmOutNoStates = value + case "XfrmOutStateProtoError": + x.XfrmOutStateProtoError = value + case "XfrmOutStateModeError": + x.XfrmOutStateModeError = value + case "XfrmOutStateSeqError": + x.XfrmOutStateSeqError = value + case "XfrmOutStateExpired": + x.XfrmOutStateExpired = value + case "XfrmOutPolBlock": + x.XfrmOutPolBlock = value + case "XfrmOutPolDead": + x.XfrmOutPolDead = value + case "XfrmOutPolError": + x.XfrmOutPolError = value + case "XfrmFwdHdrError": + x.XfrmFwdHdrError = value + case "XfrmOutStateInvalid": + x.XfrmOutStateInvalid = value + case "XfrmAcquireError": + x.XfrmAcquireError = value + } + + } + + return x, s.Err() +} diff --git a/vendor/github.com/prometheus/procfs/netstat.go b/vendor/github.com/prometheus/procfs/netstat.go new file mode 100644 index 0000000..dcea9c5 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/netstat.go @@ -0,0 +1,68 @@ +// Copyright 2020 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bufio" + "os" + "path/filepath" + "strconv" + "strings" +) + +// NetStat contains statistics for all the counters from one file. +type NetStat struct { + Stats map[string][]uint64 + Filename string +} + +// NetStat retrieves stats from `/proc/net/stat/`. +func (fs FS) NetStat() ([]NetStat, error) { + statFiles, err := filepath.Glob(fs.proc.Path("net/stat/*")) + if err != nil { + return nil, err + } + + var netStatsTotal []NetStat + + for _, filePath := range statFiles { + file, err := os.Open(filePath) + if err != nil { + return nil, err + } + + netStatFile := NetStat{ + Filename: filepath.Base(filePath), + Stats: make(map[string][]uint64), + } + scanner := bufio.NewScanner(file) + scanner.Scan() + // First string is always a header for stats + var headers []string + headers = append(headers, strings.Fields(scanner.Text())...) + + // Other strings represent per-CPU counters + for scanner.Scan() { + for num, counter := range strings.Fields(scanner.Text()) { + value, err := strconv.ParseUint(counter, 16, 64) + if err != nil { + return nil, err + } + netStatFile.Stats[headers[num]] = append(netStatFile.Stats[headers[num]], value) + } + } + netStatsTotal = append(netStatsTotal, netStatFile) + } + return netStatsTotal, nil +} diff --git a/vendor/github.com/prometheus/procfs/proc.go b/vendor/github.com/prometheus/procfs/proc.go new file mode 100644 index 0000000..c30223a --- /dev/null +++ b/vendor/github.com/prometheus/procfs/proc.go @@ -0,0 +1,319 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bytes" + "fmt" + "io" + "os" + "strconv" + "strings" + + "github.com/prometheus/procfs/internal/fs" + "github.com/prometheus/procfs/internal/util" +) + +// Proc provides information about a running process. +type Proc struct { + // The process ID. + PID int + + fs fs.FS +} + +// Procs represents a list of Proc structs. +type Procs []Proc + +func (p Procs) Len() int { return len(p) } +func (p Procs) Swap(i, j int) { p[i], p[j] = p[j], p[i] } +func (p Procs) Less(i, j int) bool { return p[i].PID < p[j].PID } + +// Self returns a process for the current process read via /proc/self. +func Self() (Proc, error) { + fs, err := NewFS(DefaultMountPoint) + if err != nil { + return Proc{}, err + } + return fs.Self() +} + +// NewProc returns a process for the given pid under /proc. +func NewProc(pid int) (Proc, error) { + fs, err := NewFS(DefaultMountPoint) + if err != nil { + return Proc{}, err + } + return fs.Proc(pid) +} + +// AllProcs returns a list of all currently available processes under /proc. +func AllProcs() (Procs, error) { + fs, err := NewFS(DefaultMountPoint) + if err != nil { + return Procs{}, err + } + return fs.AllProcs() +} + +// Self returns a process for the current process. +func (fs FS) Self() (Proc, error) { + p, err := os.Readlink(fs.proc.Path("self")) + if err != nil { + return Proc{}, err + } + pid, err := strconv.Atoi(strings.Replace(p, string(fs.proc), "", -1)) + if err != nil { + return Proc{}, err + } + return fs.Proc(pid) +} + +// NewProc returns a process for the given pid. +// +// Deprecated: Use fs.Proc() instead. +func (fs FS) NewProc(pid int) (Proc, error) { + return fs.Proc(pid) +} + +// Proc returns a process for the given pid. +func (fs FS) Proc(pid int) (Proc, error) { + if _, err := os.Stat(fs.proc.Path(strconv.Itoa(pid))); err != nil { + return Proc{}, err + } + return Proc{PID: pid, fs: fs.proc}, nil +} + +// AllProcs returns a list of all currently available processes. +func (fs FS) AllProcs() (Procs, error) { + d, err := os.Open(fs.proc.Path()) + if err != nil { + return Procs{}, err + } + defer d.Close() + + names, err := d.Readdirnames(-1) + if err != nil { + return Procs{}, fmt.Errorf("could not read %q: %w", d.Name(), err) + } + + p := Procs{} + for _, n := range names { + pid, err := strconv.ParseInt(n, 10, 64) + if err != nil { + continue + } + p = append(p, Proc{PID: int(pid), fs: fs.proc}) + } + + return p, nil +} + +// CmdLine returns the command line of a process. +func (p Proc) CmdLine() ([]string, error) { + data, err := util.ReadFileNoStat(p.path("cmdline")) + if err != nil { + return nil, err + } + + if len(data) < 1 { + return []string{}, nil + } + + return strings.Split(string(bytes.TrimRight(data, string("\x00"))), string(byte(0))), nil +} + +// Wchan returns the wchan (wait channel) of a process. +func (p Proc) Wchan() (string, error) { + f, err := os.Open(p.path("wchan")) + if err != nil { + return "", err + } + defer f.Close() + + data, err := io.ReadAll(f) + if err != nil { + return "", err + } + + wchan := string(data) + if wchan == "" || wchan == "0" { + return "", nil + } + + return wchan, nil +} + +// Comm returns the command name of a process. +func (p Proc) Comm() (string, error) { + data, err := util.ReadFileNoStat(p.path("comm")) + if err != nil { + return "", err + } + + return strings.TrimSpace(string(data)), nil +} + +// Executable returns the absolute path of the executable command of a process. +func (p Proc) Executable() (string, error) { + exe, err := os.Readlink(p.path("exe")) + if os.IsNotExist(err) { + return "", nil + } + + return exe, err +} + +// Cwd returns the absolute path to the current working directory of the process. +func (p Proc) Cwd() (string, error) { + wd, err := os.Readlink(p.path("cwd")) + if os.IsNotExist(err) { + return "", nil + } + + return wd, err +} + +// RootDir returns the absolute path to the process's root directory (as set by chroot). +func (p Proc) RootDir() (string, error) { + rdir, err := os.Readlink(p.path("root")) + if os.IsNotExist(err) { + return "", nil + } + + return rdir, err +} + +// FileDescriptors returns the currently open file descriptors of a process. +func (p Proc) FileDescriptors() ([]uintptr, error) { + names, err := p.fileDescriptors() + if err != nil { + return nil, err + } + + fds := make([]uintptr, len(names)) + for i, n := range names { + fd, err := strconv.ParseInt(n, 10, 32) + if err != nil { + return nil, fmt.Errorf("could not parse fd %q: %w", n, err) + } + fds[i] = uintptr(fd) + } + + return fds, nil +} + +// FileDescriptorTargets returns the targets of all file descriptors of a process. +// If a file descriptor is not a symlink to a file (like a socket), that value will be the empty string. +func (p Proc) FileDescriptorTargets() ([]string, error) { + names, err := p.fileDescriptors() + if err != nil { + return nil, err + } + + targets := make([]string, len(names)) + + for i, name := range names { + target, err := os.Readlink(p.path("fd", name)) + if err == nil { + targets[i] = target + } + } + + return targets, nil +} + +// FileDescriptorsLen returns the number of currently open file descriptors of +// a process. +func (p Proc) FileDescriptorsLen() (int, error) { + fds, err := p.fileDescriptors() + if err != nil { + return 0, err + } + + return len(fds), nil +} + +// MountStats retrieves statistics and configuration for mount points in a +// process's namespace. +func (p Proc) MountStats() ([]*Mount, error) { + f, err := os.Open(p.path("mountstats")) + if err != nil { + return nil, err + } + defer f.Close() + + return parseMountStats(f) +} + +// MountInfo retrieves mount information for mount points in a +// process's namespace. +// It supplies information missing in `/proc/self/mounts` and +// fixes various other problems with that file too. +func (p Proc) MountInfo() ([]*MountInfo, error) { + data, err := util.ReadFileNoStat(p.path("mountinfo")) + if err != nil { + return nil, err + } + return parseMountInfo(data) +} + +func (p Proc) fileDescriptors() ([]string, error) { + d, err := os.Open(p.path("fd")) + if err != nil { + return nil, err + } + defer d.Close() + + names, err := d.Readdirnames(-1) + if err != nil { + return nil, fmt.Errorf("could not read %q: %w", d.Name(), err) + } + + return names, nil +} + +func (p Proc) path(pa ...string) string { + return p.fs.Path(append([]string{strconv.Itoa(p.PID)}, pa...)...) +} + +// FileDescriptorsInfo retrieves information about all file descriptors of +// the process. +func (p Proc) FileDescriptorsInfo() (ProcFDInfos, error) { + names, err := p.fileDescriptors() + if err != nil { + return nil, err + } + + var fdinfos ProcFDInfos + + for _, n := range names { + fdinfo, err := p.FDInfo(n) + if err != nil { + continue + } + fdinfos = append(fdinfos, *fdinfo) + } + + return fdinfos, nil +} + +// Schedstat returns task scheduling information for the process. +func (p Proc) Schedstat() (ProcSchedstat, error) { + contents, err := os.ReadFile(p.path("schedstat")) + if err != nil { + return ProcSchedstat{}, err + } + return parseProcSchedstat(string(contents)) +} diff --git a/vendor/github.com/prometheus/procfs/proc_cgroup.go b/vendor/github.com/prometheus/procfs/proc_cgroup.go new file mode 100644 index 0000000..cca0332 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/proc_cgroup.go @@ -0,0 +1,98 @@ +// Copyright 2020 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bufio" + "bytes" + "fmt" + "strconv" + "strings" + + "github.com/prometheus/procfs/internal/util" +) + +// Cgroup models one line from /proc/[pid]/cgroup. Each Cgroup struct describes the the placement of a PID inside a +// specific control hierarchy. The kernel has two cgroup APIs, v1 and v2. v1 has one hierarchy per available resource +// controller, while v2 has one unified hierarchy shared by all controllers. Regardless of v1 or v2, all hierarchies +// contain all running processes, so the question answerable with a Cgroup struct is 'where is this process in +// this hierarchy' (where==what path on the specific cgroupfs). By prefixing this path with the mount point of +// *this specific* hierarchy, you can locate the relevant pseudo-files needed to read/set the data for this PID +// in this hierarchy +// +// Also see http://man7.org/linux/man-pages/man7/cgroups.7.html +type Cgroup struct { + // HierarchyID that can be matched to a named hierarchy using /proc/cgroups. Cgroups V2 only has one + // hierarchy, so HierarchyID is always 0. For cgroups v1 this is a unique ID number + HierarchyID int + // Controllers using this hierarchy of processes. Controllers are also known as subsystems. For + // Cgroups V2 this may be empty, as all active controllers use the same hierarchy + Controllers []string + // Path of this control group, relative to the mount point of the cgroupfs representing this specific + // hierarchy + Path string +} + +// parseCgroupString parses each line of the /proc/[pid]/cgroup file +// Line format is hierarchyID:[controller1,controller2]:path. +func parseCgroupString(cgroupStr string) (*Cgroup, error) { + var err error + + fields := strings.SplitN(cgroupStr, ":", 3) + if len(fields) < 3 { + return nil, fmt.Errorf("at least 3 fields required, found %d fields in cgroup string: %s", len(fields), cgroupStr) + } + + cgroup := &Cgroup{ + Path: fields[2], + Controllers: nil, + } + cgroup.HierarchyID, err = strconv.Atoi(fields[0]) + if err != nil { + return nil, fmt.Errorf("failed to parse hierarchy ID") + } + if fields[1] != "" { + ssNames := strings.Split(fields[1], ",") + cgroup.Controllers = append(cgroup.Controllers, ssNames...) + } + return cgroup, nil +} + +// parseCgroups reads each line of the /proc/[pid]/cgroup file. +func parseCgroups(data []byte) ([]Cgroup, error) { + var cgroups []Cgroup + scanner := bufio.NewScanner(bytes.NewReader(data)) + for scanner.Scan() { + mountString := scanner.Text() + parsedMounts, err := parseCgroupString(mountString) + if err != nil { + return nil, err + } + cgroups = append(cgroups, *parsedMounts) + } + + err := scanner.Err() + return cgroups, err +} + +// Cgroups reads from /proc//cgroups and returns a []*Cgroup struct locating this PID in each process +// control hierarchy running on this system. On every system (v1 and v2), all hierarchies contain all processes, +// so the len of the returned struct is equal to the number of active hierarchies on this system. +func (p Proc) Cgroups() ([]Cgroup, error) { + data, err := util.ReadFileNoStat(p.path("cgroup")) + if err != nil { + return nil, err + } + return parseCgroups(data) +} diff --git a/vendor/github.com/prometheus/procfs/proc_cgroups.go b/vendor/github.com/prometheus/procfs/proc_cgroups.go new file mode 100644 index 0000000..24d4dce --- /dev/null +++ b/vendor/github.com/prometheus/procfs/proc_cgroups.go @@ -0,0 +1,98 @@ +// Copyright 2021 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bufio" + "bytes" + "fmt" + "strconv" + "strings" + + "github.com/prometheus/procfs/internal/util" +) + +// CgroupSummary models one line from /proc/cgroups. +// This file contains information about the controllers that are compiled into the kernel. +// +// Also see http://man7.org/linux/man-pages/man7/cgroups.7.html +type CgroupSummary struct { + // The name of the controller. controller is also known as subsystem. + SubsysName string + // The unique ID of the cgroup hierarchy on which this controller is mounted. + Hierarchy int + // The number of control groups in this hierarchy using this controller. + Cgroups int + // This field contains the value 1 if this controller is enabled, or 0 if it has been disabled + Enabled int +} + +// parseCgroupSummary parses each line of the /proc/cgroup file +// Line format is `subsys_name hierarchy num_cgroups enabled`. +func parseCgroupSummaryString(CgroupSummaryStr string) (*CgroupSummary, error) { + var err error + + fields := strings.Fields(CgroupSummaryStr) + // require at least 4 fields + if len(fields) < 4 { + return nil, fmt.Errorf("at least 4 fields required, found %d fields in cgroup info string: %s", len(fields), CgroupSummaryStr) + } + + CgroupSummary := &CgroupSummary{ + SubsysName: fields[0], + } + CgroupSummary.Hierarchy, err = strconv.Atoi(fields[1]) + if err != nil { + return nil, fmt.Errorf("failed to parse hierarchy ID") + } + CgroupSummary.Cgroups, err = strconv.Atoi(fields[2]) + if err != nil { + return nil, fmt.Errorf("failed to parse Cgroup Num") + } + CgroupSummary.Enabled, err = strconv.Atoi(fields[3]) + if err != nil { + return nil, fmt.Errorf("failed to parse Enabled") + } + return CgroupSummary, nil +} + +// parseCgroupSummary reads each line of the /proc/cgroup file. +func parseCgroupSummary(data []byte) ([]CgroupSummary, error) { + var CgroupSummarys []CgroupSummary + scanner := bufio.NewScanner(bytes.NewReader(data)) + for scanner.Scan() { + CgroupSummaryString := scanner.Text() + // ignore comment lines + if strings.HasPrefix(CgroupSummaryString, "#") { + continue + } + CgroupSummary, err := parseCgroupSummaryString(CgroupSummaryString) + if err != nil { + return nil, err + } + CgroupSummarys = append(CgroupSummarys, *CgroupSummary) + } + + err := scanner.Err() + return CgroupSummarys, err +} + +// CgroupSummarys returns information about current /proc/cgroups. +func (fs FS) CgroupSummarys() ([]CgroupSummary, error) { + data, err := util.ReadFileNoStat(fs.proc.Path("cgroups")) + if err != nil { + return nil, err + } + return parseCgroupSummary(data) +} diff --git a/vendor/github.com/prometheus/procfs/proc_environ.go b/vendor/github.com/prometheus/procfs/proc_environ.go new file mode 100644 index 0000000..57a8989 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/proc_environ.go @@ -0,0 +1,37 @@ +// Copyright 2019 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "strings" + + "github.com/prometheus/procfs/internal/util" +) + +// Environ reads process environments from `/proc//environ`. +func (p Proc) Environ() ([]string, error) { + environments := make([]string, 0) + + data, err := util.ReadFileNoStat(p.path("environ")) + if err != nil { + return environments, err + } + + environments = strings.Split(string(data), "\000") + if len(environments) > 0 { + environments = environments[:len(environments)-1] + } + + return environments, nil +} diff --git a/vendor/github.com/prometheus/procfs/proc_fdinfo.go b/vendor/github.com/prometheus/procfs/proc_fdinfo.go new file mode 100644 index 0000000..1bbdd4a --- /dev/null +++ b/vendor/github.com/prometheus/procfs/proc_fdinfo.go @@ -0,0 +1,132 @@ +// Copyright 2019 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bufio" + "bytes" + "fmt" + "regexp" + + "github.com/prometheus/procfs/internal/util" +) + +var ( + rPos = regexp.MustCompile(`^pos:\s+(\d+)$`) + rFlags = regexp.MustCompile(`^flags:\s+(\d+)$`) + rMntID = regexp.MustCompile(`^mnt_id:\s+(\d+)$`) + rInotify = regexp.MustCompile(`^inotify`) + rInotifyParts = regexp.MustCompile(`^inotify\s+wd:([0-9a-f]+)\s+ino:([0-9a-f]+)\s+sdev:([0-9a-f]+)(?:\s+mask:([0-9a-f]+))?`) +) + +// ProcFDInfo contains represents file descriptor information. +type ProcFDInfo struct { + // File descriptor + FD string + // File offset + Pos string + // File access mode and status flags + Flags string + // Mount point ID + MntID string + // List of inotify lines (structured) in the fdinfo file (kernel 3.8+ only) + InotifyInfos []InotifyInfo +} + +// FDInfo constructor. On kernels older than 3.8, InotifyInfos will always be empty. +func (p Proc) FDInfo(fd string) (*ProcFDInfo, error) { + data, err := util.ReadFileNoStat(p.path("fdinfo", fd)) + if err != nil { + return nil, err + } + + var text, pos, flags, mntid string + var inotify []InotifyInfo + + scanner := bufio.NewScanner(bytes.NewReader(data)) + for scanner.Scan() { + text = scanner.Text() + if rPos.MatchString(text) { + pos = rPos.FindStringSubmatch(text)[1] + } else if rFlags.MatchString(text) { + flags = rFlags.FindStringSubmatch(text)[1] + } else if rMntID.MatchString(text) { + mntid = rMntID.FindStringSubmatch(text)[1] + } else if rInotify.MatchString(text) { + newInotify, err := parseInotifyInfo(text) + if err != nil { + return nil, err + } + inotify = append(inotify, *newInotify) + } + } + + i := &ProcFDInfo{ + FD: fd, + Pos: pos, + Flags: flags, + MntID: mntid, + InotifyInfos: inotify, + } + + return i, nil +} + +// InotifyInfo represents a single inotify line in the fdinfo file. +type InotifyInfo struct { + // Watch descriptor number + WD string + // Inode number + Ino string + // Device ID + Sdev string + // Mask of events being monitored + Mask string +} + +// InotifyInfo constructor. Only available on kernel 3.8+. +func parseInotifyInfo(line string) (*InotifyInfo, error) { + m := rInotifyParts.FindStringSubmatch(line) + if len(m) >= 4 { + var mask string + if len(m) == 5 { + mask = m[4] + } + i := &InotifyInfo{ + WD: m[1], + Ino: m[2], + Sdev: m[3], + Mask: mask, + } + return i, nil + } + return nil, fmt.Errorf("invalid inode entry: %q", line) +} + +// ProcFDInfos represents a list of ProcFDInfo structs. +type ProcFDInfos []ProcFDInfo + +func (p ProcFDInfos) Len() int { return len(p) } +func (p ProcFDInfos) Swap(i, j int) { p[i], p[j] = p[j], p[i] } +func (p ProcFDInfos) Less(i, j int) bool { return p[i].FD < p[j].FD } + +// InotifyWatchLen returns the total number of inotify watches. +func (p ProcFDInfos) InotifyWatchLen() (int, error) { + length := 0 + for _, f := range p { + length += len(f.InotifyInfos) + } + + return length, nil +} diff --git a/vendor/github.com/prometheus/procfs/proc_io.go b/vendor/github.com/prometheus/procfs/proc_io.go new file mode 100644 index 0000000..776f349 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/proc_io.go @@ -0,0 +1,59 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "fmt" + + "github.com/prometheus/procfs/internal/util" +) + +// ProcIO models the content of /proc//io. +type ProcIO struct { + // Chars read. + RChar uint64 + // Chars written. + WChar uint64 + // Read syscalls. + SyscR uint64 + // Write syscalls. + SyscW uint64 + // Bytes read. + ReadBytes uint64 + // Bytes written. + WriteBytes uint64 + // Bytes written, but taking into account truncation. See + // Documentation/filesystems/proc.txt in the kernel sources for + // detailed explanation. + CancelledWriteBytes int64 +} + +// IO creates a new ProcIO instance from a given Proc instance. +func (p Proc) IO() (ProcIO, error) { + pio := ProcIO{} + + data, err := util.ReadFileNoStat(p.path("io")) + if err != nil { + return pio, err + } + + ioFormat := "rchar: %d\nwchar: %d\nsyscr: %d\nsyscw: %d\n" + + "read_bytes: %d\nwrite_bytes: %d\n" + + "cancelled_write_bytes: %d\n" + + _, err = fmt.Sscanf(string(data), ioFormat, &pio.RChar, &pio.WChar, &pio.SyscR, + &pio.SyscW, &pio.ReadBytes, &pio.WriteBytes, &pio.CancelledWriteBytes) + + return pio, err +} diff --git a/vendor/github.com/prometheus/procfs/proc_limits.go b/vendor/github.com/prometheus/procfs/proc_limits.go new file mode 100644 index 0000000..7a13881 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/proc_limits.go @@ -0,0 +1,160 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bufio" + "fmt" + "os" + "regexp" + "strconv" +) + +// ProcLimits represents the soft limits for each of the process's resource +// limits. For more information see getrlimit(2): +// http://man7.org/linux/man-pages/man2/getrlimit.2.html. +type ProcLimits struct { + // CPU time limit in seconds. + CPUTime uint64 + // Maximum size of files that the process may create. + FileSize uint64 + // Maximum size of the process's data segment (initialized data, + // uninitialized data, and heap). + DataSize uint64 + // Maximum size of the process stack in bytes. + StackSize uint64 + // Maximum size of a core file. + CoreFileSize uint64 + // Limit of the process's resident set in pages. + ResidentSet uint64 + // Maximum number of processes that can be created for the real user ID of + // the calling process. + Processes uint64 + // Value one greater than the maximum file descriptor number that can be + // opened by this process. + OpenFiles uint64 + // Maximum number of bytes of memory that may be locked into RAM. + LockedMemory uint64 + // Maximum size of the process's virtual memory address space in bytes. + AddressSpace uint64 + // Limit on the combined number of flock(2) locks and fcntl(2) leases that + // this process may establish. + FileLocks uint64 + // Limit of signals that may be queued for the real user ID of the calling + // process. + PendingSignals uint64 + // Limit on the number of bytes that can be allocated for POSIX message + // queues for the real user ID of the calling process. + MsqqueueSize uint64 + // Limit of the nice priority set using setpriority(2) or nice(2). + NicePriority uint64 + // Limit of the real-time priority set using sched_setscheduler(2) or + // sched_setparam(2). + RealtimePriority uint64 + // Limit (in microseconds) on the amount of CPU time that a process + // scheduled under a real-time scheduling policy may consume without making + // a blocking system call. + RealtimeTimeout uint64 +} + +const ( + limitsFields = 4 + limitsUnlimited = "unlimited" +) + +var ( + limitsMatch = regexp.MustCompile(`(Max \w+\s{0,1}?\w*\s{0,1}\w*)\s{2,}(\w+)\s+(\w+)`) +) + +// NewLimits returns the current soft limits of the process. +// +// Deprecated: Use p.Limits() instead. +func (p Proc) NewLimits() (ProcLimits, error) { + return p.Limits() +} + +// Limits returns the current soft limits of the process. +func (p Proc) Limits() (ProcLimits, error) { + f, err := os.Open(p.path("limits")) + if err != nil { + return ProcLimits{}, err + } + defer f.Close() + + var ( + l = ProcLimits{} + s = bufio.NewScanner(f) + ) + + s.Scan() // Skip limits header + + for s.Scan() { + //fields := limitsMatch.Split(s.Text(), limitsFields) + fields := limitsMatch.FindStringSubmatch(s.Text()) + if len(fields) != limitsFields { + return ProcLimits{}, fmt.Errorf("couldn't parse %q line %q", f.Name(), s.Text()) + } + + switch fields[1] { + case "Max cpu time": + l.CPUTime, err = parseUint(fields[2]) + case "Max file size": + l.FileSize, err = parseUint(fields[2]) + case "Max data size": + l.DataSize, err = parseUint(fields[2]) + case "Max stack size": + l.StackSize, err = parseUint(fields[2]) + case "Max core file size": + l.CoreFileSize, err = parseUint(fields[2]) + case "Max resident set": + l.ResidentSet, err = parseUint(fields[2]) + case "Max processes": + l.Processes, err = parseUint(fields[2]) + case "Max open files": + l.OpenFiles, err = parseUint(fields[2]) + case "Max locked memory": + l.LockedMemory, err = parseUint(fields[2]) + case "Max address space": + l.AddressSpace, err = parseUint(fields[2]) + case "Max file locks": + l.FileLocks, err = parseUint(fields[2]) + case "Max pending signals": + l.PendingSignals, err = parseUint(fields[2]) + case "Max msgqueue size": + l.MsqqueueSize, err = parseUint(fields[2]) + case "Max nice priority": + l.NicePriority, err = parseUint(fields[2]) + case "Max realtime priority": + l.RealtimePriority, err = parseUint(fields[2]) + case "Max realtime timeout": + l.RealtimeTimeout, err = parseUint(fields[2]) + } + if err != nil { + return ProcLimits{}, err + } + } + + return l, s.Err() +} + +func parseUint(s string) (uint64, error) { + if s == limitsUnlimited { + return 18446744073709551615, nil + } + i, err := strconv.ParseUint(s, 10, 64) + if err != nil { + return 0, fmt.Errorf("couldn't parse value %q: %w", s, err) + } + return i, nil +} diff --git a/vendor/github.com/prometheus/procfs/proc_maps.go b/vendor/github.com/prometheus/procfs/proc_maps.go new file mode 100644 index 0000000..f1bcbf3 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/proc_maps.go @@ -0,0 +1,211 @@ +// Copyright 2019 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build (aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris) && !js +// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris +// +build !js + +package procfs + +import ( + "bufio" + "fmt" + "os" + "strconv" + "strings" + + "golang.org/x/sys/unix" +) + +// ProcMapPermissions contains permission settings read from `/proc/[pid]/maps`. +type ProcMapPermissions struct { + // mapping has the [R]ead flag set + Read bool + // mapping has the [W]rite flag set + Write bool + // mapping has the [X]ecutable flag set + Execute bool + // mapping has the [S]hared flag set + Shared bool + // mapping is marked as [P]rivate (copy on write) + Private bool +} + +// ProcMap contains the process memory-mappings of the process +// read from `/proc/[pid]/maps`. +type ProcMap struct { + // The start address of current mapping. + StartAddr uintptr + // The end address of the current mapping + EndAddr uintptr + // The permissions for this mapping + Perms *ProcMapPermissions + // The current offset into the file/fd (e.g., shared libs) + Offset int64 + // Device owner of this mapping (major:minor) in Mkdev format. + Dev uint64 + // The inode of the device above + Inode uint64 + // The file or psuedofile (or empty==anonymous) + Pathname string +} + +// parseDevice parses the device token of a line and converts it to a dev_t +// (mkdev) like structure. +func parseDevice(s string) (uint64, error) { + toks := strings.Split(s, ":") + if len(toks) < 2 { + return 0, fmt.Errorf("unexpected number of fields") + } + + major, err := strconv.ParseUint(toks[0], 16, 0) + if err != nil { + return 0, err + } + + minor, err := strconv.ParseUint(toks[1], 16, 0) + if err != nil { + return 0, err + } + + return unix.Mkdev(uint32(major), uint32(minor)), nil +} + +// parseAddress converts a hex-string to a uintptr. +func parseAddress(s string) (uintptr, error) { + a, err := strconv.ParseUint(s, 16, 0) + if err != nil { + return 0, err + } + + return uintptr(a), nil +} + +// parseAddresses parses the start-end address. +func parseAddresses(s string) (uintptr, uintptr, error) { + toks := strings.Split(s, "-") + if len(toks) < 2 { + return 0, 0, fmt.Errorf("invalid address") + } + + saddr, err := parseAddress(toks[0]) + if err != nil { + return 0, 0, err + } + + eaddr, err := parseAddress(toks[1]) + if err != nil { + return 0, 0, err + } + + return saddr, eaddr, nil +} + +// parsePermissions parses a token and returns any that are set. +func parsePermissions(s string) (*ProcMapPermissions, error) { + if len(s) < 4 { + return nil, fmt.Errorf("invalid permissions token") + } + + perms := ProcMapPermissions{} + for _, ch := range s { + switch ch { + case 'r': + perms.Read = true + case 'w': + perms.Write = true + case 'x': + perms.Execute = true + case 'p': + perms.Private = true + case 's': + perms.Shared = true + } + } + + return &perms, nil +} + +// parseProcMap will attempt to parse a single line within a proc/[pid]/maps +// buffer. +func parseProcMap(text string) (*ProcMap, error) { + fields := strings.Fields(text) + if len(fields) < 5 { + return nil, fmt.Errorf("truncated procmap entry") + } + + saddr, eaddr, err := parseAddresses(fields[0]) + if err != nil { + return nil, err + } + + perms, err := parsePermissions(fields[1]) + if err != nil { + return nil, err + } + + offset, err := strconv.ParseInt(fields[2], 16, 0) + if err != nil { + return nil, err + } + + device, err := parseDevice(fields[3]) + if err != nil { + return nil, err + } + + inode, err := strconv.ParseUint(fields[4], 10, 0) + if err != nil { + return nil, err + } + + pathname := "" + + if len(fields) >= 5 { + pathname = strings.Join(fields[5:], " ") + } + + return &ProcMap{ + StartAddr: saddr, + EndAddr: eaddr, + Perms: perms, + Offset: offset, + Dev: device, + Inode: inode, + Pathname: pathname, + }, nil +} + +// ProcMaps reads from /proc/[pid]/maps to get the memory-mappings of the +// process. +func (p Proc) ProcMaps() ([]*ProcMap, error) { + file, err := os.Open(p.path("maps")) + if err != nil { + return nil, err + } + defer file.Close() + + maps := []*ProcMap{} + scan := bufio.NewScanner(file) + + for scan.Scan() { + m, err := parseProcMap(scan.Text()) + if err != nil { + return nil, err + } + + maps = append(maps, m) + } + + return maps, nil +} diff --git a/vendor/github.com/prometheus/procfs/proc_netstat.go b/vendor/github.com/prometheus/procfs/proc_netstat.go new file mode 100644 index 0000000..48b5238 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/proc_netstat.go @@ -0,0 +1,440 @@ +// Copyright 2022 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bufio" + "bytes" + "fmt" + "io" + "strconv" + "strings" + + "github.com/prometheus/procfs/internal/util" +) + +// ProcNetstat models the content of /proc//net/netstat. +type ProcNetstat struct { + // The process ID. + PID int + TcpExt + IpExt +} + +type TcpExt struct { // nolint:revive + SyncookiesSent float64 + SyncookiesRecv float64 + SyncookiesFailed float64 + EmbryonicRsts float64 + PruneCalled float64 + RcvPruned float64 + OfoPruned float64 + OutOfWindowIcmps float64 + LockDroppedIcmps float64 + ArpFilter float64 + TW float64 + TWRecycled float64 + TWKilled float64 + PAWSActive float64 + PAWSEstab float64 + DelayedACKs float64 + DelayedACKLocked float64 + DelayedACKLost float64 + ListenOverflows float64 + ListenDrops float64 + TCPHPHits float64 + TCPPureAcks float64 + TCPHPAcks float64 + TCPRenoRecovery float64 + TCPSackRecovery float64 + TCPSACKReneging float64 + TCPSACKReorder float64 + TCPRenoReorder float64 + TCPTSReorder float64 + TCPFullUndo float64 + TCPPartialUndo float64 + TCPDSACKUndo float64 + TCPLossUndo float64 + TCPLostRetransmit float64 + TCPRenoFailures float64 + TCPSackFailures float64 + TCPLossFailures float64 + TCPFastRetrans float64 + TCPSlowStartRetrans float64 + TCPTimeouts float64 + TCPLossProbes float64 + TCPLossProbeRecovery float64 + TCPRenoRecoveryFail float64 + TCPSackRecoveryFail float64 + TCPRcvCollapsed float64 + TCPDSACKOldSent float64 + TCPDSACKOfoSent float64 + TCPDSACKRecv float64 + TCPDSACKOfoRecv float64 + TCPAbortOnData float64 + TCPAbortOnClose float64 + TCPAbortOnMemory float64 + TCPAbortOnTimeout float64 + TCPAbortOnLinger float64 + TCPAbortFailed float64 + TCPMemoryPressures float64 + TCPMemoryPressuresChrono float64 + TCPSACKDiscard float64 + TCPDSACKIgnoredOld float64 + TCPDSACKIgnoredNoUndo float64 + TCPSpuriousRTOs float64 + TCPMD5NotFound float64 + TCPMD5Unexpected float64 + TCPMD5Failure float64 + TCPSackShifted float64 + TCPSackMerged float64 + TCPSackShiftFallback float64 + TCPBacklogDrop float64 + PFMemallocDrop float64 + TCPMinTTLDrop float64 + TCPDeferAcceptDrop float64 + IPReversePathFilter float64 + TCPTimeWaitOverflow float64 + TCPReqQFullDoCookies float64 + TCPReqQFullDrop float64 + TCPRetransFail float64 + TCPRcvCoalesce float64 + TCPOFOQueue float64 + TCPOFODrop float64 + TCPOFOMerge float64 + TCPChallengeACK float64 + TCPSYNChallenge float64 + TCPFastOpenActive float64 + TCPFastOpenActiveFail float64 + TCPFastOpenPassive float64 + TCPFastOpenPassiveFail float64 + TCPFastOpenListenOverflow float64 + TCPFastOpenCookieReqd float64 + TCPFastOpenBlackhole float64 + TCPSpuriousRtxHostQueues float64 + BusyPollRxPackets float64 + TCPAutoCorking float64 + TCPFromZeroWindowAdv float64 + TCPToZeroWindowAdv float64 + TCPWantZeroWindowAdv float64 + TCPSynRetrans float64 + TCPOrigDataSent float64 + TCPHystartTrainDetect float64 + TCPHystartTrainCwnd float64 + TCPHystartDelayDetect float64 + TCPHystartDelayCwnd float64 + TCPACKSkippedSynRecv float64 + TCPACKSkippedPAWS float64 + TCPACKSkippedSeq float64 + TCPACKSkippedFinWait2 float64 + TCPACKSkippedTimeWait float64 + TCPACKSkippedChallenge float64 + TCPWinProbe float64 + TCPKeepAlive float64 + TCPMTUPFail float64 + TCPMTUPSuccess float64 + TCPWqueueTooBig float64 +} + +type IpExt struct { // nolint:revive + InNoRoutes float64 + InTruncatedPkts float64 + InMcastPkts float64 + OutMcastPkts float64 + InBcastPkts float64 + OutBcastPkts float64 + InOctets float64 + OutOctets float64 + InMcastOctets float64 + OutMcastOctets float64 + InBcastOctets float64 + OutBcastOctets float64 + InCsumErrors float64 + InNoECTPkts float64 + InECT1Pkts float64 + InECT0Pkts float64 + InCEPkts float64 + ReasmOverlaps float64 +} + +func (p Proc) Netstat() (ProcNetstat, error) { + filename := p.path("net/netstat") + data, err := util.ReadFileNoStat(filename) + if err != nil { + return ProcNetstat{PID: p.PID}, err + } + procNetstat, err := parseNetstat(bytes.NewReader(data), filename) + procNetstat.PID = p.PID + return procNetstat, err +} + +// parseNetstat parses the metrics from proc//net/netstat file +// and returns a ProcNetstat structure. +func parseNetstat(r io.Reader, fileName string) (ProcNetstat, error) { + var ( + scanner = bufio.NewScanner(r) + procNetstat = ProcNetstat{} + ) + + for scanner.Scan() { + nameParts := strings.Split(scanner.Text(), " ") + scanner.Scan() + valueParts := strings.Split(scanner.Text(), " ") + // Remove trailing :. + protocol := strings.TrimSuffix(nameParts[0], ":") + if len(nameParts) != len(valueParts) { + return procNetstat, fmt.Errorf("mismatch field count mismatch in %s: %s", + fileName, protocol) + } + for i := 1; i < len(nameParts); i++ { + value, err := strconv.ParseFloat(valueParts[i], 64) + if err != nil { + return procNetstat, err + } + key := nameParts[i] + + switch protocol { + case "TcpExt": + switch key { + case "SyncookiesSent": + procNetstat.TcpExt.SyncookiesSent = value + case "SyncookiesRecv": + procNetstat.TcpExt.SyncookiesRecv = value + case "SyncookiesFailed": + procNetstat.TcpExt.SyncookiesFailed = value + case "EmbryonicRsts": + procNetstat.TcpExt.EmbryonicRsts = value + case "PruneCalled": + procNetstat.TcpExt.PruneCalled = value + case "RcvPruned": + procNetstat.TcpExt.RcvPruned = value + case "OfoPruned": + procNetstat.TcpExt.OfoPruned = value + case "OutOfWindowIcmps": + procNetstat.TcpExt.OutOfWindowIcmps = value + case "LockDroppedIcmps": + procNetstat.TcpExt.LockDroppedIcmps = value + case "ArpFilter": + procNetstat.TcpExt.ArpFilter = value + case "TW": + procNetstat.TcpExt.TW = value + case "TWRecycled": + procNetstat.TcpExt.TWRecycled = value + case "TWKilled": + procNetstat.TcpExt.TWKilled = value + case "PAWSActive": + procNetstat.TcpExt.PAWSActive = value + case "PAWSEstab": + procNetstat.TcpExt.PAWSEstab = value + case "DelayedACKs": + procNetstat.TcpExt.DelayedACKs = value + case "DelayedACKLocked": + procNetstat.TcpExt.DelayedACKLocked = value + case "DelayedACKLost": + procNetstat.TcpExt.DelayedACKLost = value + case "ListenOverflows": + procNetstat.TcpExt.ListenOverflows = value + case "ListenDrops": + procNetstat.TcpExt.ListenDrops = value + case "TCPHPHits": + procNetstat.TcpExt.TCPHPHits = value + case "TCPPureAcks": + procNetstat.TcpExt.TCPPureAcks = value + case "TCPHPAcks": + procNetstat.TcpExt.TCPHPAcks = value + case "TCPRenoRecovery": + procNetstat.TcpExt.TCPRenoRecovery = value + case "TCPSackRecovery": + procNetstat.TcpExt.TCPSackRecovery = value + case "TCPSACKReneging": + procNetstat.TcpExt.TCPSACKReneging = value + case "TCPSACKReorder": + procNetstat.TcpExt.TCPSACKReorder = value + case "TCPRenoReorder": + procNetstat.TcpExt.TCPRenoReorder = value + case "TCPTSReorder": + procNetstat.TcpExt.TCPTSReorder = value + case "TCPFullUndo": + procNetstat.TcpExt.TCPFullUndo = value + case "TCPPartialUndo": + procNetstat.TcpExt.TCPPartialUndo = value + case "TCPDSACKUndo": + procNetstat.TcpExt.TCPDSACKUndo = value + case "TCPLossUndo": + procNetstat.TcpExt.TCPLossUndo = value + case "TCPLostRetransmit": + procNetstat.TcpExt.TCPLostRetransmit = value + case "TCPRenoFailures": + procNetstat.TcpExt.TCPRenoFailures = value + case "TCPSackFailures": + procNetstat.TcpExt.TCPSackFailures = value + case "TCPLossFailures": + procNetstat.TcpExt.TCPLossFailures = value + case "TCPFastRetrans": + procNetstat.TcpExt.TCPFastRetrans = value + case "TCPSlowStartRetrans": + procNetstat.TcpExt.TCPSlowStartRetrans = value + case "TCPTimeouts": + procNetstat.TcpExt.TCPTimeouts = value + case "TCPLossProbes": + procNetstat.TcpExt.TCPLossProbes = value + case "TCPLossProbeRecovery": + procNetstat.TcpExt.TCPLossProbeRecovery = value + case "TCPRenoRecoveryFail": + procNetstat.TcpExt.TCPRenoRecoveryFail = value + case "TCPSackRecoveryFail": + procNetstat.TcpExt.TCPSackRecoveryFail = value + case "TCPRcvCollapsed": + procNetstat.TcpExt.TCPRcvCollapsed = value + case "TCPDSACKOldSent": + procNetstat.TcpExt.TCPDSACKOldSent = value + case "TCPDSACKOfoSent": + procNetstat.TcpExt.TCPDSACKOfoSent = value + case "TCPDSACKRecv": + procNetstat.TcpExt.TCPDSACKRecv = value + case "TCPDSACKOfoRecv": + procNetstat.TcpExt.TCPDSACKOfoRecv = value + case "TCPAbortOnData": + procNetstat.TcpExt.TCPAbortOnData = value + case "TCPAbortOnClose": + procNetstat.TcpExt.TCPAbortOnClose = value + case "TCPDeferAcceptDrop": + procNetstat.TcpExt.TCPDeferAcceptDrop = value + case "IPReversePathFilter": + procNetstat.TcpExt.IPReversePathFilter = value + case "TCPTimeWaitOverflow": + procNetstat.TcpExt.TCPTimeWaitOverflow = value + case "TCPReqQFullDoCookies": + procNetstat.TcpExt.TCPReqQFullDoCookies = value + case "TCPReqQFullDrop": + procNetstat.TcpExt.TCPReqQFullDrop = value + case "TCPRetransFail": + procNetstat.TcpExt.TCPRetransFail = value + case "TCPRcvCoalesce": + procNetstat.TcpExt.TCPRcvCoalesce = value + case "TCPOFOQueue": + procNetstat.TcpExt.TCPOFOQueue = value + case "TCPOFODrop": + procNetstat.TcpExt.TCPOFODrop = value + case "TCPOFOMerge": + procNetstat.TcpExt.TCPOFOMerge = value + case "TCPChallengeACK": + procNetstat.TcpExt.TCPChallengeACK = value + case "TCPSYNChallenge": + procNetstat.TcpExt.TCPSYNChallenge = value + case "TCPFastOpenActive": + procNetstat.TcpExt.TCPFastOpenActive = value + case "TCPFastOpenActiveFail": + procNetstat.TcpExt.TCPFastOpenActiveFail = value + case "TCPFastOpenPassive": + procNetstat.TcpExt.TCPFastOpenPassive = value + case "TCPFastOpenPassiveFail": + procNetstat.TcpExt.TCPFastOpenPassiveFail = value + case "TCPFastOpenListenOverflow": + procNetstat.TcpExt.TCPFastOpenListenOverflow = value + case "TCPFastOpenCookieReqd": + procNetstat.TcpExt.TCPFastOpenCookieReqd = value + case "TCPFastOpenBlackhole": + procNetstat.TcpExt.TCPFastOpenBlackhole = value + case "TCPSpuriousRtxHostQueues": + procNetstat.TcpExt.TCPSpuriousRtxHostQueues = value + case "BusyPollRxPackets": + procNetstat.TcpExt.BusyPollRxPackets = value + case "TCPAutoCorking": + procNetstat.TcpExt.TCPAutoCorking = value + case "TCPFromZeroWindowAdv": + procNetstat.TcpExt.TCPFromZeroWindowAdv = value + case "TCPToZeroWindowAdv": + procNetstat.TcpExt.TCPToZeroWindowAdv = value + case "TCPWantZeroWindowAdv": + procNetstat.TcpExt.TCPWantZeroWindowAdv = value + case "TCPSynRetrans": + procNetstat.TcpExt.TCPSynRetrans = value + case "TCPOrigDataSent": + procNetstat.TcpExt.TCPOrigDataSent = value + case "TCPHystartTrainDetect": + procNetstat.TcpExt.TCPHystartTrainDetect = value + case "TCPHystartTrainCwnd": + procNetstat.TcpExt.TCPHystartTrainCwnd = value + case "TCPHystartDelayDetect": + procNetstat.TcpExt.TCPHystartDelayDetect = value + case "TCPHystartDelayCwnd": + procNetstat.TcpExt.TCPHystartDelayCwnd = value + case "TCPACKSkippedSynRecv": + procNetstat.TcpExt.TCPACKSkippedSynRecv = value + case "TCPACKSkippedPAWS": + procNetstat.TcpExt.TCPACKSkippedPAWS = value + case "TCPACKSkippedSeq": + procNetstat.TcpExt.TCPACKSkippedSeq = value + case "TCPACKSkippedFinWait2": + procNetstat.TcpExt.TCPACKSkippedFinWait2 = value + case "TCPACKSkippedTimeWait": + procNetstat.TcpExt.TCPACKSkippedTimeWait = value + case "TCPACKSkippedChallenge": + procNetstat.TcpExt.TCPACKSkippedChallenge = value + case "TCPWinProbe": + procNetstat.TcpExt.TCPWinProbe = value + case "TCPKeepAlive": + procNetstat.TcpExt.TCPKeepAlive = value + case "TCPMTUPFail": + procNetstat.TcpExt.TCPMTUPFail = value + case "TCPMTUPSuccess": + procNetstat.TcpExt.TCPMTUPSuccess = value + case "TCPWqueueTooBig": + procNetstat.TcpExt.TCPWqueueTooBig = value + } + case "IpExt": + switch key { + case "InNoRoutes": + procNetstat.IpExt.InNoRoutes = value + case "InTruncatedPkts": + procNetstat.IpExt.InTruncatedPkts = value + case "InMcastPkts": + procNetstat.IpExt.InMcastPkts = value + case "OutMcastPkts": + procNetstat.IpExt.OutMcastPkts = value + case "InBcastPkts": + procNetstat.IpExt.InBcastPkts = value + case "OutBcastPkts": + procNetstat.IpExt.OutBcastPkts = value + case "InOctets": + procNetstat.IpExt.InOctets = value + case "OutOctets": + procNetstat.IpExt.OutOctets = value + case "InMcastOctets": + procNetstat.IpExt.InMcastOctets = value + case "OutMcastOctets": + procNetstat.IpExt.OutMcastOctets = value + case "InBcastOctets": + procNetstat.IpExt.InBcastOctets = value + case "OutBcastOctets": + procNetstat.IpExt.OutBcastOctets = value + case "InCsumErrors": + procNetstat.IpExt.InCsumErrors = value + case "InNoECTPkts": + procNetstat.IpExt.InNoECTPkts = value + case "InECT1Pkts": + procNetstat.IpExt.InECT1Pkts = value + case "InECT0Pkts": + procNetstat.IpExt.InECT0Pkts = value + case "InCEPkts": + procNetstat.IpExt.InCEPkts = value + case "ReasmOverlaps": + procNetstat.IpExt.ReasmOverlaps = value + } + } + } + } + return procNetstat, scanner.Err() +} diff --git a/vendor/github.com/prometheus/procfs/proc_ns.go b/vendor/github.com/prometheus/procfs/proc_ns.go new file mode 100644 index 0000000..391b4cb --- /dev/null +++ b/vendor/github.com/prometheus/procfs/proc_ns.go @@ -0,0 +1,68 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "fmt" + "os" + "strconv" + "strings" +) + +// Namespace represents a single namespace of a process. +type Namespace struct { + Type string // Namespace type. + Inode uint32 // Inode number of the namespace. If two processes are in the same namespace their inodes will match. +} + +// Namespaces contains all of the namespaces that the process is contained in. +type Namespaces map[string]Namespace + +// Namespaces reads from /proc//ns/* to get the namespaces of which the +// process is a member. +func (p Proc) Namespaces() (Namespaces, error) { + d, err := os.Open(p.path("ns")) + if err != nil { + return nil, err + } + defer d.Close() + + names, err := d.Readdirnames(-1) + if err != nil { + return nil, fmt.Errorf("failed to read contents of ns dir: %w", err) + } + + ns := make(Namespaces, len(names)) + for _, name := range names { + target, err := os.Readlink(p.path("ns", name)) + if err != nil { + return nil, err + } + + fields := strings.SplitN(target, ":", 2) + if len(fields) != 2 { + return nil, fmt.Errorf("failed to parse namespace type and inode from %q", target) + } + + typ := fields[0] + inode, err := strconv.ParseUint(strings.Trim(fields[1], "[]"), 10, 32) + if err != nil { + return nil, fmt.Errorf("failed to parse inode from %q: %w", fields[1], err) + } + + ns[name] = Namespace{typ, uint32(inode)} + } + + return ns, nil +} diff --git a/vendor/github.com/prometheus/procfs/proc_psi.go b/vendor/github.com/prometheus/procfs/proc_psi.go new file mode 100644 index 0000000..a68fe15 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/proc_psi.go @@ -0,0 +1,102 @@ +// Copyright 2019 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +// The PSI / pressure interface is described at +// https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/tree/Documentation/accounting/psi.txt +// Each resource (cpu, io, memory, ...) is exposed as a single file. +// Each file may contain up to two lines, one for "some" pressure and one for "full" pressure. +// Each line contains several averages (over n seconds) and a total in µs. +// +// Example io pressure file: +// > some avg10=0.06 avg60=0.21 avg300=0.99 total=8537362 +// > full avg10=0.00 avg60=0.13 avg300=0.96 total=8183134 + +import ( + "bufio" + "bytes" + "fmt" + "io" + "strings" + + "github.com/prometheus/procfs/internal/util" +) + +const lineFormat = "avg10=%f avg60=%f avg300=%f total=%d" + +// PSILine is a single line of values as returned by `/proc/pressure/*`. +// +// The Avg entries are averages over n seconds, as a percentage. +// The Total line is in microseconds. +type PSILine struct { + Avg10 float64 + Avg60 float64 + Avg300 float64 + Total uint64 +} + +// PSIStats represent pressure stall information from /proc/pressure/* +// +// "Some" indicates the share of time in which at least some tasks are stalled. +// "Full" indicates the share of time in which all non-idle tasks are stalled simultaneously. +type PSIStats struct { + Some *PSILine + Full *PSILine +} + +// PSIStatsForResource reads pressure stall information for the specified +// resource from /proc/pressure/. At time of writing this can be +// either "cpu", "memory" or "io". +func (fs FS) PSIStatsForResource(resource string) (PSIStats, error) { + data, err := util.ReadFileNoStat(fs.proc.Path(fmt.Sprintf("%s/%s", "pressure", resource))) + if err != nil { + return PSIStats{}, fmt.Errorf("psi_stats: unavailable for %q: %w", resource, err) + } + + return parsePSIStats(resource, bytes.NewReader(data)) +} + +// parsePSIStats parses the specified file for pressure stall information. +func parsePSIStats(resource string, r io.Reader) (PSIStats, error) { + psiStats := PSIStats{} + + scanner := bufio.NewScanner(r) + for scanner.Scan() { + l := scanner.Text() + prefix := strings.Split(l, " ")[0] + switch prefix { + case "some": + psi := PSILine{} + _, err := fmt.Sscanf(l, fmt.Sprintf("some %s", lineFormat), &psi.Avg10, &psi.Avg60, &psi.Avg300, &psi.Total) + if err != nil { + return PSIStats{}, err + } + psiStats.Some = &psi + case "full": + psi := PSILine{} + _, err := fmt.Sscanf(l, fmt.Sprintf("full %s", lineFormat), &psi.Avg10, &psi.Avg60, &psi.Avg300, &psi.Total) + if err != nil { + return PSIStats{}, err + } + psiStats.Full = &psi + default: + // If we encounter a line with an unknown prefix, ignore it and move on + // Should new measurement types be added in the future we'll simply ignore them instead + // of erroring on retrieval + continue + } + } + + return psiStats, nil +} diff --git a/vendor/github.com/prometheus/procfs/proc_smaps.go b/vendor/github.com/prometheus/procfs/proc_smaps.go new file mode 100644 index 0000000..0e97d99 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/proc_smaps.go @@ -0,0 +1,166 @@ +// Copyright 2020 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build !windows +// +build !windows + +package procfs + +import ( + "bufio" + "errors" + "fmt" + "os" + "regexp" + "strconv" + "strings" + + "github.com/prometheus/procfs/internal/util" +) + +var ( + // match the header line before each mapped zone in `/proc/pid/smaps`. + procSMapsHeaderLine = regexp.MustCompile(`^[a-f0-9].*$`) +) + +type ProcSMapsRollup struct { + // Amount of the mapping that is currently resident in RAM. + Rss uint64 + // Process's proportional share of this mapping. + Pss uint64 + // Size in bytes of clean shared pages. + SharedClean uint64 + // Size in bytes of dirty shared pages. + SharedDirty uint64 + // Size in bytes of clean private pages. + PrivateClean uint64 + // Size in bytes of dirty private pages. + PrivateDirty uint64 + // Amount of memory currently marked as referenced or accessed. + Referenced uint64 + // Amount of memory that does not belong to any file. + Anonymous uint64 + // Amount would-be-anonymous memory currently on swap. + Swap uint64 + // Process's proportional memory on swap. + SwapPss uint64 +} + +// ProcSMapsRollup reads from /proc/[pid]/smaps_rollup to get summed memory information of the +// process. +// +// If smaps_rollup does not exists (require kernel >= 4.15), the content of /proc/pid/smaps will +// we read and summed. +func (p Proc) ProcSMapsRollup() (ProcSMapsRollup, error) { + data, err := util.ReadFileNoStat(p.path("smaps_rollup")) + if err != nil && os.IsNotExist(err) { + return p.procSMapsRollupManual() + } + if err != nil { + return ProcSMapsRollup{}, err + } + + lines := strings.Split(string(data), "\n") + smaps := ProcSMapsRollup{} + + // skip first line which don't contains information we need + lines = lines[1:] + for _, line := range lines { + if line == "" { + continue + } + + if err := smaps.parseLine(line); err != nil { + return ProcSMapsRollup{}, err + } + } + + return smaps, nil +} + +// Read /proc/pid/smaps and do the roll-up in Go code. +func (p Proc) procSMapsRollupManual() (ProcSMapsRollup, error) { + file, err := os.Open(p.path("smaps")) + if err != nil { + return ProcSMapsRollup{}, err + } + defer file.Close() + + smaps := ProcSMapsRollup{} + scan := bufio.NewScanner(file) + + for scan.Scan() { + line := scan.Text() + + if procSMapsHeaderLine.MatchString(line) { + continue + } + + if err := smaps.parseLine(line); err != nil { + return ProcSMapsRollup{}, err + } + } + + return smaps, nil +} + +func (s *ProcSMapsRollup) parseLine(line string) error { + kv := strings.SplitN(line, ":", 2) + if len(kv) != 2 { + fmt.Println(line) + return errors.New("invalid net/dev line, missing colon") + } + + k := kv[0] + if k == "VmFlags" { + return nil + } + + v := strings.TrimSpace(kv[1]) + v = strings.TrimRight(v, " kB") + + vKBytes, err := strconv.ParseUint(v, 10, 64) + if err != nil { + return err + } + vBytes := vKBytes * 1024 + + s.addValue(k, v, vKBytes, vBytes) + + return nil +} + +func (s *ProcSMapsRollup) addValue(k string, vString string, vUint uint64, vUintBytes uint64) { + switch k { + case "Rss": + s.Rss += vUintBytes + case "Pss": + s.Pss += vUintBytes + case "Shared_Clean": + s.SharedClean += vUintBytes + case "Shared_Dirty": + s.SharedDirty += vUintBytes + case "Private_Clean": + s.PrivateClean += vUintBytes + case "Private_Dirty": + s.PrivateDirty += vUintBytes + case "Referenced": + s.Referenced += vUintBytes + case "Anonymous": + s.Anonymous += vUintBytes + case "Swap": + s.Swap += vUintBytes + case "SwapPss": + s.SwapPss += vUintBytes + } +} diff --git a/vendor/github.com/prometheus/procfs/proc_snmp.go b/vendor/github.com/prometheus/procfs/proc_snmp.go new file mode 100644 index 0000000..ae19189 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/proc_snmp.go @@ -0,0 +1,353 @@ +// Copyright 2022 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bufio" + "bytes" + "fmt" + "io" + "strconv" + "strings" + + "github.com/prometheus/procfs/internal/util" +) + +// ProcSnmp models the content of /proc//net/snmp. +type ProcSnmp struct { + // The process ID. + PID int + Ip + Icmp + IcmpMsg + Tcp + Udp + UdpLite +} + +type Ip struct { // nolint:revive + Forwarding float64 + DefaultTTL float64 + InReceives float64 + InHdrErrors float64 + InAddrErrors float64 + ForwDatagrams float64 + InUnknownProtos float64 + InDiscards float64 + InDelivers float64 + OutRequests float64 + OutDiscards float64 + OutNoRoutes float64 + ReasmTimeout float64 + ReasmReqds float64 + ReasmOKs float64 + ReasmFails float64 + FragOKs float64 + FragFails float64 + FragCreates float64 +} + +type Icmp struct { + InMsgs float64 + InErrors float64 + InCsumErrors float64 + InDestUnreachs float64 + InTimeExcds float64 + InParmProbs float64 + InSrcQuenchs float64 + InRedirects float64 + InEchos float64 + InEchoReps float64 + InTimestamps float64 + InTimestampReps float64 + InAddrMasks float64 + InAddrMaskReps float64 + OutMsgs float64 + OutErrors float64 + OutDestUnreachs float64 + OutTimeExcds float64 + OutParmProbs float64 + OutSrcQuenchs float64 + OutRedirects float64 + OutEchos float64 + OutEchoReps float64 + OutTimestamps float64 + OutTimestampReps float64 + OutAddrMasks float64 + OutAddrMaskReps float64 +} + +type IcmpMsg struct { + InType3 float64 + OutType3 float64 +} + +type Tcp struct { // nolint:revive + RtoAlgorithm float64 + RtoMin float64 + RtoMax float64 + MaxConn float64 + ActiveOpens float64 + PassiveOpens float64 + AttemptFails float64 + EstabResets float64 + CurrEstab float64 + InSegs float64 + OutSegs float64 + RetransSegs float64 + InErrs float64 + OutRsts float64 + InCsumErrors float64 +} + +type Udp struct { // nolint:revive + InDatagrams float64 + NoPorts float64 + InErrors float64 + OutDatagrams float64 + RcvbufErrors float64 + SndbufErrors float64 + InCsumErrors float64 + IgnoredMulti float64 +} + +type UdpLite struct { // nolint:revive + InDatagrams float64 + NoPorts float64 + InErrors float64 + OutDatagrams float64 + RcvbufErrors float64 + SndbufErrors float64 + InCsumErrors float64 + IgnoredMulti float64 +} + +func (p Proc) Snmp() (ProcSnmp, error) { + filename := p.path("net/snmp") + data, err := util.ReadFileNoStat(filename) + if err != nil { + return ProcSnmp{PID: p.PID}, err + } + procSnmp, err := parseSnmp(bytes.NewReader(data), filename) + procSnmp.PID = p.PID + return procSnmp, err +} + +// parseSnmp parses the metrics from proc//net/snmp file +// and returns a map contains those metrics (e.g. {"Ip": {"Forwarding": 2}}). +func parseSnmp(r io.Reader, fileName string) (ProcSnmp, error) { + var ( + scanner = bufio.NewScanner(r) + procSnmp = ProcSnmp{} + ) + + for scanner.Scan() { + nameParts := strings.Split(scanner.Text(), " ") + scanner.Scan() + valueParts := strings.Split(scanner.Text(), " ") + // Remove trailing :. + protocol := strings.TrimSuffix(nameParts[0], ":") + if len(nameParts) != len(valueParts) { + return procSnmp, fmt.Errorf("mismatch field count mismatch in %s: %s", + fileName, protocol) + } + for i := 1; i < len(nameParts); i++ { + value, err := strconv.ParseFloat(valueParts[i], 64) + if err != nil { + return procSnmp, err + } + key := nameParts[i] + + switch protocol { + case "Ip": + switch key { + case "Forwarding": + procSnmp.Ip.Forwarding = value + case "DefaultTTL": + procSnmp.Ip.DefaultTTL = value + case "InReceives": + procSnmp.Ip.InReceives = value + case "InHdrErrors": + procSnmp.Ip.InHdrErrors = value + case "InAddrErrors": + procSnmp.Ip.InAddrErrors = value + case "ForwDatagrams": + procSnmp.Ip.ForwDatagrams = value + case "InUnknownProtos": + procSnmp.Ip.InUnknownProtos = value + case "InDiscards": + procSnmp.Ip.InDiscards = value + case "InDelivers": + procSnmp.Ip.InDelivers = value + case "OutRequests": + procSnmp.Ip.OutRequests = value + case "OutDiscards": + procSnmp.Ip.OutDiscards = value + case "OutNoRoutes": + procSnmp.Ip.OutNoRoutes = value + case "ReasmTimeout": + procSnmp.Ip.ReasmTimeout = value + case "ReasmReqds": + procSnmp.Ip.ReasmReqds = value + case "ReasmOKs": + procSnmp.Ip.ReasmOKs = value + case "ReasmFails": + procSnmp.Ip.ReasmFails = value + case "FragOKs": + procSnmp.Ip.FragOKs = value + case "FragFails": + procSnmp.Ip.FragFails = value + case "FragCreates": + procSnmp.Ip.FragCreates = value + } + case "Icmp": + switch key { + case "InMsgs": + procSnmp.Icmp.InMsgs = value + case "InErrors": + procSnmp.Icmp.InErrors = value + case "InCsumErrors": + procSnmp.Icmp.InCsumErrors = value + case "InDestUnreachs": + procSnmp.Icmp.InDestUnreachs = value + case "InTimeExcds": + procSnmp.Icmp.InTimeExcds = value + case "InParmProbs": + procSnmp.Icmp.InParmProbs = value + case "InSrcQuenchs": + procSnmp.Icmp.InSrcQuenchs = value + case "InRedirects": + procSnmp.Icmp.InRedirects = value + case "InEchos": + procSnmp.Icmp.InEchos = value + case "InEchoReps": + procSnmp.Icmp.InEchoReps = value + case "InTimestamps": + procSnmp.Icmp.InTimestamps = value + case "InTimestampReps": + procSnmp.Icmp.InTimestampReps = value + case "InAddrMasks": + procSnmp.Icmp.InAddrMasks = value + case "InAddrMaskReps": + procSnmp.Icmp.InAddrMaskReps = value + case "OutMsgs": + procSnmp.Icmp.OutMsgs = value + case "OutErrors": + procSnmp.Icmp.OutErrors = value + case "OutDestUnreachs": + procSnmp.Icmp.OutDestUnreachs = value + case "OutTimeExcds": + procSnmp.Icmp.OutTimeExcds = value + case "OutParmProbs": + procSnmp.Icmp.OutParmProbs = value + case "OutSrcQuenchs": + procSnmp.Icmp.OutSrcQuenchs = value + case "OutRedirects": + procSnmp.Icmp.OutRedirects = value + case "OutEchos": + procSnmp.Icmp.OutEchos = value + case "OutEchoReps": + procSnmp.Icmp.OutEchoReps = value + case "OutTimestamps": + procSnmp.Icmp.OutTimestamps = value + case "OutTimestampReps": + procSnmp.Icmp.OutTimestampReps = value + case "OutAddrMasks": + procSnmp.Icmp.OutAddrMasks = value + case "OutAddrMaskReps": + procSnmp.Icmp.OutAddrMaskReps = value + } + case "IcmpMsg": + switch key { + case "InType3": + procSnmp.IcmpMsg.InType3 = value + case "OutType3": + procSnmp.IcmpMsg.OutType3 = value + } + case "Tcp": + switch key { + case "RtoAlgorithm": + procSnmp.Tcp.RtoAlgorithm = value + case "RtoMin": + procSnmp.Tcp.RtoMin = value + case "RtoMax": + procSnmp.Tcp.RtoMax = value + case "MaxConn": + procSnmp.Tcp.MaxConn = value + case "ActiveOpens": + procSnmp.Tcp.ActiveOpens = value + case "PassiveOpens": + procSnmp.Tcp.PassiveOpens = value + case "AttemptFails": + procSnmp.Tcp.AttemptFails = value + case "EstabResets": + procSnmp.Tcp.EstabResets = value + case "CurrEstab": + procSnmp.Tcp.CurrEstab = value + case "InSegs": + procSnmp.Tcp.InSegs = value + case "OutSegs": + procSnmp.Tcp.OutSegs = value + case "RetransSegs": + procSnmp.Tcp.RetransSegs = value + case "InErrs": + procSnmp.Tcp.InErrs = value + case "OutRsts": + procSnmp.Tcp.OutRsts = value + case "InCsumErrors": + procSnmp.Tcp.InCsumErrors = value + } + case "Udp": + switch key { + case "InDatagrams": + procSnmp.Udp.InDatagrams = value + case "NoPorts": + procSnmp.Udp.NoPorts = value + case "InErrors": + procSnmp.Udp.InErrors = value + case "OutDatagrams": + procSnmp.Udp.OutDatagrams = value + case "RcvbufErrors": + procSnmp.Udp.RcvbufErrors = value + case "SndbufErrors": + procSnmp.Udp.SndbufErrors = value + case "InCsumErrors": + procSnmp.Udp.InCsumErrors = value + case "IgnoredMulti": + procSnmp.Udp.IgnoredMulti = value + } + case "UdpLite": + switch key { + case "InDatagrams": + procSnmp.UdpLite.InDatagrams = value + case "NoPorts": + procSnmp.UdpLite.NoPorts = value + case "InErrors": + procSnmp.UdpLite.InErrors = value + case "OutDatagrams": + procSnmp.UdpLite.OutDatagrams = value + case "RcvbufErrors": + procSnmp.UdpLite.RcvbufErrors = value + case "SndbufErrors": + procSnmp.UdpLite.SndbufErrors = value + case "InCsumErrors": + procSnmp.UdpLite.InCsumErrors = value + case "IgnoredMulti": + procSnmp.UdpLite.IgnoredMulti = value + } + } + } + } + return procSnmp, scanner.Err() +} diff --git a/vendor/github.com/prometheus/procfs/proc_snmp6.go b/vendor/github.com/prometheus/procfs/proc_snmp6.go new file mode 100644 index 0000000..f611992 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/proc_snmp6.go @@ -0,0 +1,381 @@ +// Copyright 2022 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bufio" + "bytes" + "errors" + "io" + "os" + "strconv" + "strings" + + "github.com/prometheus/procfs/internal/util" +) + +// ProcSnmp6 models the content of /proc//net/snmp6. +type ProcSnmp6 struct { + // The process ID. + PID int + Ip6 + Icmp6 + Udp6 + UdpLite6 +} + +type Ip6 struct { // nolint:revive + InReceives float64 + InHdrErrors float64 + InTooBigErrors float64 + InNoRoutes float64 + InAddrErrors float64 + InUnknownProtos float64 + InTruncatedPkts float64 + InDiscards float64 + InDelivers float64 + OutForwDatagrams float64 + OutRequests float64 + OutDiscards float64 + OutNoRoutes float64 + ReasmTimeout float64 + ReasmReqds float64 + ReasmOKs float64 + ReasmFails float64 + FragOKs float64 + FragFails float64 + FragCreates float64 + InMcastPkts float64 + OutMcastPkts float64 + InOctets float64 + OutOctets float64 + InMcastOctets float64 + OutMcastOctets float64 + InBcastOctets float64 + OutBcastOctets float64 + InNoECTPkts float64 + InECT1Pkts float64 + InECT0Pkts float64 + InCEPkts float64 +} + +type Icmp6 struct { + InMsgs float64 + InErrors float64 + OutMsgs float64 + OutErrors float64 + InCsumErrors float64 + InDestUnreachs float64 + InPktTooBigs float64 + InTimeExcds float64 + InParmProblems float64 + InEchos float64 + InEchoReplies float64 + InGroupMembQueries float64 + InGroupMembResponses float64 + InGroupMembReductions float64 + InRouterSolicits float64 + InRouterAdvertisements float64 + InNeighborSolicits float64 + InNeighborAdvertisements float64 + InRedirects float64 + InMLDv2Reports float64 + OutDestUnreachs float64 + OutPktTooBigs float64 + OutTimeExcds float64 + OutParmProblems float64 + OutEchos float64 + OutEchoReplies float64 + OutGroupMembQueries float64 + OutGroupMembResponses float64 + OutGroupMembReductions float64 + OutRouterSolicits float64 + OutRouterAdvertisements float64 + OutNeighborSolicits float64 + OutNeighborAdvertisements float64 + OutRedirects float64 + OutMLDv2Reports float64 + InType1 float64 + InType134 float64 + InType135 float64 + InType136 float64 + InType143 float64 + OutType133 float64 + OutType135 float64 + OutType136 float64 + OutType143 float64 +} + +type Udp6 struct { // nolint:revive + InDatagrams float64 + NoPorts float64 + InErrors float64 + OutDatagrams float64 + RcvbufErrors float64 + SndbufErrors float64 + InCsumErrors float64 + IgnoredMulti float64 +} + +type UdpLite6 struct { // nolint:revive + InDatagrams float64 + NoPorts float64 + InErrors float64 + OutDatagrams float64 + RcvbufErrors float64 + SndbufErrors float64 + InCsumErrors float64 +} + +func (p Proc) Snmp6() (ProcSnmp6, error) { + filename := p.path("net/snmp6") + data, err := util.ReadFileNoStat(filename) + if err != nil { + // On systems with IPv6 disabled, this file won't exist. + // Do nothing. + if errors.Is(err, os.ErrNotExist) { + return ProcSnmp6{PID: p.PID}, nil + } + + return ProcSnmp6{PID: p.PID}, err + } + + procSnmp6, err := parseSNMP6Stats(bytes.NewReader(data)) + procSnmp6.PID = p.PID + return procSnmp6, err +} + +// parseSnmp6 parses the metrics from proc//net/snmp6 file +// and returns a map contains those metrics. +func parseSNMP6Stats(r io.Reader) (ProcSnmp6, error) { + var ( + scanner = bufio.NewScanner(r) + procSnmp6 = ProcSnmp6{} + ) + + for scanner.Scan() { + stat := strings.Fields(scanner.Text()) + if len(stat) < 2 { + continue + } + // Expect to have "6" in metric name, skip line otherwise + if sixIndex := strings.Index(stat[0], "6"); sixIndex != -1 { + protocol := stat[0][:sixIndex+1] + key := stat[0][sixIndex+1:] + value, err := strconv.ParseFloat(stat[1], 64) + if err != nil { + return procSnmp6, err + } + + switch protocol { + case "Ip6": + switch key { + case "InReceives": + procSnmp6.Ip6.InReceives = value + case "InHdrErrors": + procSnmp6.Ip6.InHdrErrors = value + case "InTooBigErrors": + procSnmp6.Ip6.InTooBigErrors = value + case "InNoRoutes": + procSnmp6.Ip6.InNoRoutes = value + case "InAddrErrors": + procSnmp6.Ip6.InAddrErrors = value + case "InUnknownProtos": + procSnmp6.Ip6.InUnknownProtos = value + case "InTruncatedPkts": + procSnmp6.Ip6.InTruncatedPkts = value + case "InDiscards": + procSnmp6.Ip6.InDiscards = value + case "InDelivers": + procSnmp6.Ip6.InDelivers = value + case "OutForwDatagrams": + procSnmp6.Ip6.OutForwDatagrams = value + case "OutRequests": + procSnmp6.Ip6.OutRequests = value + case "OutDiscards": + procSnmp6.Ip6.OutDiscards = value + case "OutNoRoutes": + procSnmp6.Ip6.OutNoRoutes = value + case "ReasmTimeout": + procSnmp6.Ip6.ReasmTimeout = value + case "ReasmReqds": + procSnmp6.Ip6.ReasmReqds = value + case "ReasmOKs": + procSnmp6.Ip6.ReasmOKs = value + case "ReasmFails": + procSnmp6.Ip6.ReasmFails = value + case "FragOKs": + procSnmp6.Ip6.FragOKs = value + case "FragFails": + procSnmp6.Ip6.FragFails = value + case "FragCreates": + procSnmp6.Ip6.FragCreates = value + case "InMcastPkts": + procSnmp6.Ip6.InMcastPkts = value + case "OutMcastPkts": + procSnmp6.Ip6.OutMcastPkts = value + case "InOctets": + procSnmp6.Ip6.InOctets = value + case "OutOctets": + procSnmp6.Ip6.OutOctets = value + case "InMcastOctets": + procSnmp6.Ip6.InMcastOctets = value + case "OutMcastOctets": + procSnmp6.Ip6.OutMcastOctets = value + case "InBcastOctets": + procSnmp6.Ip6.InBcastOctets = value + case "OutBcastOctets": + procSnmp6.Ip6.OutBcastOctets = value + case "InNoECTPkts": + procSnmp6.Ip6.InNoECTPkts = value + case "InECT1Pkts": + procSnmp6.Ip6.InECT1Pkts = value + case "InECT0Pkts": + procSnmp6.Ip6.InECT0Pkts = value + case "InCEPkts": + procSnmp6.Ip6.InCEPkts = value + + } + case "Icmp6": + switch key { + case "InMsgs": + procSnmp6.Icmp6.InMsgs = value + case "InErrors": + procSnmp6.Icmp6.InErrors = value + case "OutMsgs": + procSnmp6.Icmp6.OutMsgs = value + case "OutErrors": + procSnmp6.Icmp6.OutErrors = value + case "InCsumErrors": + procSnmp6.Icmp6.InCsumErrors = value + case "InDestUnreachs": + procSnmp6.Icmp6.InDestUnreachs = value + case "InPktTooBigs": + procSnmp6.Icmp6.InPktTooBigs = value + case "InTimeExcds": + procSnmp6.Icmp6.InTimeExcds = value + case "InParmProblems": + procSnmp6.Icmp6.InParmProblems = value + case "InEchos": + procSnmp6.Icmp6.InEchos = value + case "InEchoReplies": + procSnmp6.Icmp6.InEchoReplies = value + case "InGroupMembQueries": + procSnmp6.Icmp6.InGroupMembQueries = value + case "InGroupMembResponses": + procSnmp6.Icmp6.InGroupMembResponses = value + case "InGroupMembReductions": + procSnmp6.Icmp6.InGroupMembReductions = value + case "InRouterSolicits": + procSnmp6.Icmp6.InRouterSolicits = value + case "InRouterAdvertisements": + procSnmp6.Icmp6.InRouterAdvertisements = value + case "InNeighborSolicits": + procSnmp6.Icmp6.InNeighborSolicits = value + case "InNeighborAdvertisements": + procSnmp6.Icmp6.InNeighborAdvertisements = value + case "InRedirects": + procSnmp6.Icmp6.InRedirects = value + case "InMLDv2Reports": + procSnmp6.Icmp6.InMLDv2Reports = value + case "OutDestUnreachs": + procSnmp6.Icmp6.OutDestUnreachs = value + case "OutPktTooBigs": + procSnmp6.Icmp6.OutPktTooBigs = value + case "OutTimeExcds": + procSnmp6.Icmp6.OutTimeExcds = value + case "OutParmProblems": + procSnmp6.Icmp6.OutParmProblems = value + case "OutEchos": + procSnmp6.Icmp6.OutEchos = value + case "OutEchoReplies": + procSnmp6.Icmp6.OutEchoReplies = value + case "OutGroupMembQueries": + procSnmp6.Icmp6.OutGroupMembQueries = value + case "OutGroupMembResponses": + procSnmp6.Icmp6.OutGroupMembResponses = value + case "OutGroupMembReductions": + procSnmp6.Icmp6.OutGroupMembReductions = value + case "OutRouterSolicits": + procSnmp6.Icmp6.OutRouterSolicits = value + case "OutRouterAdvertisements": + procSnmp6.Icmp6.OutRouterAdvertisements = value + case "OutNeighborSolicits": + procSnmp6.Icmp6.OutNeighborSolicits = value + case "OutNeighborAdvertisements": + procSnmp6.Icmp6.OutNeighborAdvertisements = value + case "OutRedirects": + procSnmp6.Icmp6.OutRedirects = value + case "OutMLDv2Reports": + procSnmp6.Icmp6.OutMLDv2Reports = value + case "InType1": + procSnmp6.Icmp6.InType1 = value + case "InType134": + procSnmp6.Icmp6.InType134 = value + case "InType135": + procSnmp6.Icmp6.InType135 = value + case "InType136": + procSnmp6.Icmp6.InType136 = value + case "InType143": + procSnmp6.Icmp6.InType143 = value + case "OutType133": + procSnmp6.Icmp6.OutType133 = value + case "OutType135": + procSnmp6.Icmp6.OutType135 = value + case "OutType136": + procSnmp6.Icmp6.OutType136 = value + case "OutType143": + procSnmp6.Icmp6.OutType143 = value + } + case "Udp6": + switch key { + case "InDatagrams": + procSnmp6.Udp6.InDatagrams = value + case "NoPorts": + procSnmp6.Udp6.NoPorts = value + case "InErrors": + procSnmp6.Udp6.InErrors = value + case "OutDatagrams": + procSnmp6.Udp6.OutDatagrams = value + case "RcvbufErrors": + procSnmp6.Udp6.RcvbufErrors = value + case "SndbufErrors": + procSnmp6.Udp6.SndbufErrors = value + case "InCsumErrors": + procSnmp6.Udp6.InCsumErrors = value + case "IgnoredMulti": + procSnmp6.Udp6.IgnoredMulti = value + } + case "UdpLite6": + switch key { + case "InDatagrams": + procSnmp6.UdpLite6.InDatagrams = value + case "NoPorts": + procSnmp6.UdpLite6.NoPorts = value + case "InErrors": + procSnmp6.UdpLite6.InErrors = value + case "OutDatagrams": + procSnmp6.UdpLite6.OutDatagrams = value + case "RcvbufErrors": + procSnmp6.UdpLite6.RcvbufErrors = value + case "SndbufErrors": + procSnmp6.UdpLite6.SndbufErrors = value + case "InCsumErrors": + procSnmp6.UdpLite6.InCsumErrors = value + } + } + } + } + return procSnmp6, scanner.Err() +} diff --git a/vendor/github.com/prometheus/procfs/proc_stat.go b/vendor/github.com/prometheus/procfs/proc_stat.go new file mode 100644 index 0000000..06c556e --- /dev/null +++ b/vendor/github.com/prometheus/procfs/proc_stat.go @@ -0,0 +1,222 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bytes" + "fmt" + "os" + + "github.com/prometheus/procfs/internal/fs" + "github.com/prometheus/procfs/internal/util" +) + +// Originally, this USER_HZ value was dynamically retrieved via a sysconf call +// which required cgo. However, that caused a lot of problems regarding +// cross-compilation. Alternatives such as running a binary to determine the +// value, or trying to derive it in some other way were all problematic. After +// much research it was determined that USER_HZ is actually hardcoded to 100 on +// all Go-supported platforms as of the time of this writing. This is why we +// decided to hardcode it here as well. It is not impossible that there could +// be systems with exceptions, but they should be very exotic edge cases, and +// in that case, the worst outcome will be two misreported metrics. +// +// See also the following discussions: +// +// - https://github.com/prometheus/node_exporter/issues/52 +// - https://github.com/prometheus/procfs/pull/2 +// - http://stackoverflow.com/questions/17410841/how-does-user-hz-solve-the-jiffy-scaling-issue +const userHZ = 100 + +// ProcStat provides status information about the process, +// read from /proc/[pid]/stat. +type ProcStat struct { + // The process ID. + PID int + // The filename of the executable. + Comm string + // The process state. + State string + // The PID of the parent of this process. + PPID int + // The process group ID of the process. + PGRP int + // The session ID of the process. + Session int + // The controlling terminal of the process. + TTY int + // The ID of the foreground process group of the controlling terminal of + // the process. + TPGID int + // The kernel flags word of the process. + Flags uint + // The number of minor faults the process has made which have not required + // loading a memory page from disk. + MinFlt uint + // The number of minor faults that the process's waited-for children have + // made. + CMinFlt uint + // The number of major faults the process has made which have required + // loading a memory page from disk. + MajFlt uint + // The number of major faults that the process's waited-for children have + // made. + CMajFlt uint + // Amount of time that this process has been scheduled in user mode, + // measured in clock ticks. + UTime uint + // Amount of time that this process has been scheduled in kernel mode, + // measured in clock ticks. + STime uint + // Amount of time that this process's waited-for children have been + // scheduled in user mode, measured in clock ticks. + CUTime int + // Amount of time that this process's waited-for children have been + // scheduled in kernel mode, measured in clock ticks. + CSTime int + // For processes running a real-time scheduling policy, this is the negated + // scheduling priority, minus one. + Priority int + // The nice value, a value in the range 19 (low priority) to -20 (high + // priority). + Nice int + // Number of threads in this process. + NumThreads int + // The time the process started after system boot, the value is expressed + // in clock ticks. + Starttime uint64 + // Virtual memory size in bytes. + VSize uint + // Resident set size in pages. + RSS int + // Soft limit in bytes on the rss of the process. + RSSLimit uint64 + // Real-time scheduling priority, a number in the range 1 to 99 for processes + // scheduled under a real-time policy, or 0, for non-real-time processes. + RTPriority uint + // Scheduling policy. + Policy uint + // Aggregated block I/O delays, measured in clock ticks (centiseconds). + DelayAcctBlkIOTicks uint64 + + proc fs.FS +} + +// NewStat returns the current status information of the process. +// +// Deprecated: Use p.Stat() instead. +func (p Proc) NewStat() (ProcStat, error) { + return p.Stat() +} + +// Stat returns the current status information of the process. +func (p Proc) Stat() (ProcStat, error) { + data, err := util.ReadFileNoStat(p.path("stat")) + if err != nil { + return ProcStat{}, err + } + + var ( + ignoreInt64 int64 + ignoreUint64 uint64 + + s = ProcStat{PID: p.PID, proc: p.fs} + l = bytes.Index(data, []byte("(")) + r = bytes.LastIndex(data, []byte(")")) + ) + + if l < 0 || r < 0 { + return ProcStat{}, fmt.Errorf("unexpected format, couldn't extract comm %q", data) + } + + s.Comm = string(data[l+1 : r]) + + // Check the following resources for the details about the particular stat + // fields and their data types: + // * https://man7.org/linux/man-pages/man5/proc.5.html + // * https://man7.org/linux/man-pages/man3/scanf.3.html + _, err = fmt.Fscan( + bytes.NewBuffer(data[r+2:]), + &s.State, + &s.PPID, + &s.PGRP, + &s.Session, + &s.TTY, + &s.TPGID, + &s.Flags, + &s.MinFlt, + &s.CMinFlt, + &s.MajFlt, + &s.CMajFlt, + &s.UTime, + &s.STime, + &s.CUTime, + &s.CSTime, + &s.Priority, + &s.Nice, + &s.NumThreads, + &ignoreInt64, + &s.Starttime, + &s.VSize, + &s.RSS, + &s.RSSLimit, + &ignoreUint64, + &ignoreUint64, + &ignoreUint64, + &ignoreUint64, + &ignoreUint64, + &ignoreUint64, + &ignoreUint64, + &ignoreUint64, + &ignoreUint64, + &ignoreUint64, + &ignoreUint64, + &ignoreUint64, + &ignoreInt64, + &ignoreInt64, + &s.RTPriority, + &s.Policy, + &s.DelayAcctBlkIOTicks, + ) + if err != nil { + return ProcStat{}, err + } + + return s, nil +} + +// VirtualMemory returns the virtual memory size in bytes. +func (s ProcStat) VirtualMemory() uint { + return s.VSize +} + +// ResidentMemory returns the resident memory size in bytes. +func (s ProcStat) ResidentMemory() int { + return s.RSS * os.Getpagesize() +} + +// StartTime returns the unix timestamp of the process in seconds. +func (s ProcStat) StartTime() (float64, error) { + fs := FS{proc: s.proc} + stat, err := fs.Stat() + if err != nil { + return 0, err + } + return float64(stat.BootTime) + (float64(s.Starttime) / userHZ), nil +} + +// CPUTime returns the total CPU user and system time in seconds. +func (s ProcStat) CPUTime() float64 { + return float64(s.UTime+s.STime) / userHZ +} diff --git a/vendor/github.com/prometheus/procfs/proc_status.go b/vendor/github.com/prometheus/procfs/proc_status.go new file mode 100644 index 0000000..594022d --- /dev/null +++ b/vendor/github.com/prometheus/procfs/proc_status.go @@ -0,0 +1,170 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bytes" + "strconv" + "strings" + + "github.com/prometheus/procfs/internal/util" +) + +// ProcStatus provides status information about the process, +// read from /proc/[pid]/stat. +type ProcStatus struct { + // The process ID. + PID int + // The process name. + Name string + + // Thread group ID. + TGID int + + // Peak virtual memory size. + VmPeak uint64 // nolint:revive + // Virtual memory size. + VmSize uint64 // nolint:revive + // Locked memory size. + VmLck uint64 // nolint:revive + // Pinned memory size. + VmPin uint64 // nolint:revive + // Peak resident set size. + VmHWM uint64 // nolint:revive + // Resident set size (sum of RssAnnon RssFile and RssShmem). + VmRSS uint64 // nolint:revive + // Size of resident anonymous memory. + RssAnon uint64 // nolint:revive + // Size of resident file mappings. + RssFile uint64 // nolint:revive + // Size of resident shared memory. + RssShmem uint64 // nolint:revive + // Size of data segments. + VmData uint64 // nolint:revive + // Size of stack segments. + VmStk uint64 // nolint:revive + // Size of text segments. + VmExe uint64 // nolint:revive + // Shared library code size. + VmLib uint64 // nolint:revive + // Page table entries size. + VmPTE uint64 // nolint:revive + // Size of second-level page tables. + VmPMD uint64 // nolint:revive + // Swapped-out virtual memory size by anonymous private. + VmSwap uint64 // nolint:revive + // Size of hugetlb memory portions + HugetlbPages uint64 + + // Number of voluntary context switches. + VoluntaryCtxtSwitches uint64 + // Number of involuntary context switches. + NonVoluntaryCtxtSwitches uint64 + + // UIDs of the process (Real, effective, saved set, and filesystem UIDs) + UIDs [4]string + // GIDs of the process (Real, effective, saved set, and filesystem GIDs) + GIDs [4]string +} + +// NewStatus returns the current status information of the process. +func (p Proc) NewStatus() (ProcStatus, error) { + data, err := util.ReadFileNoStat(p.path("status")) + if err != nil { + return ProcStatus{}, err + } + + s := ProcStatus{PID: p.PID} + + lines := strings.Split(string(data), "\n") + for _, line := range lines { + if !bytes.Contains([]byte(line), []byte(":")) { + continue + } + + kv := strings.SplitN(line, ":", 2) + + // removes spaces + k := string(strings.TrimSpace(kv[0])) + v := string(strings.TrimSpace(kv[1])) + // removes "kB" + v = string(bytes.Trim([]byte(v), " kB")) + + // value to int when possible + // we can skip error check here, 'cause vKBytes is not used when value is a string + vKBytes, _ := strconv.ParseUint(v, 10, 64) + // convert kB to B + vBytes := vKBytes * 1024 + + s.fillStatus(k, v, vKBytes, vBytes) + } + + return s, nil +} + +func (s *ProcStatus) fillStatus(k string, vString string, vUint uint64, vUintBytes uint64) { + switch k { + case "Tgid": + s.TGID = int(vUint) + case "Name": + s.Name = vString + case "Uid": + copy(s.UIDs[:], strings.Split(vString, "\t")) + case "Gid": + copy(s.GIDs[:], strings.Split(vString, "\t")) + case "VmPeak": + s.VmPeak = vUintBytes + case "VmSize": + s.VmSize = vUintBytes + case "VmLck": + s.VmLck = vUintBytes + case "VmPin": + s.VmPin = vUintBytes + case "VmHWM": + s.VmHWM = vUintBytes + case "VmRSS": + s.VmRSS = vUintBytes + case "RssAnon": + s.RssAnon = vUintBytes + case "RssFile": + s.RssFile = vUintBytes + case "RssShmem": + s.RssShmem = vUintBytes + case "VmData": + s.VmData = vUintBytes + case "VmStk": + s.VmStk = vUintBytes + case "VmExe": + s.VmExe = vUintBytes + case "VmLib": + s.VmLib = vUintBytes + case "VmPTE": + s.VmPTE = vUintBytes + case "VmPMD": + s.VmPMD = vUintBytes + case "VmSwap": + s.VmSwap = vUintBytes + case "HugetlbPages": + s.HugetlbPages = vUintBytes + case "voluntary_ctxt_switches": + s.VoluntaryCtxtSwitches = vUint + case "nonvoluntary_ctxt_switches": + s.NonVoluntaryCtxtSwitches = vUint + } +} + +// TotalCtxtSwitches returns the total context switch. +func (s ProcStatus) TotalCtxtSwitches() uint64 { + return s.VoluntaryCtxtSwitches + s.NonVoluntaryCtxtSwitches +} diff --git a/vendor/github.com/prometheus/procfs/proc_sys.go b/vendor/github.com/prometheus/procfs/proc_sys.go new file mode 100644 index 0000000..d46533e --- /dev/null +++ b/vendor/github.com/prometheus/procfs/proc_sys.go @@ -0,0 +1,51 @@ +// Copyright 2022 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "fmt" + "strings" + + "github.com/prometheus/procfs/internal/util" +) + +func sysctlToPath(sysctl string) string { + return strings.Replace(sysctl, ".", "/", -1) +} + +func (fs FS) SysctlStrings(sysctl string) ([]string, error) { + value, err := util.SysReadFile(fs.proc.Path("sys", sysctlToPath(sysctl))) + if err != nil { + return nil, err + } + return strings.Fields(value), nil + +} + +func (fs FS) SysctlInts(sysctl string) ([]int, error) { + fields, err := fs.SysctlStrings(sysctl) + if err != nil { + return nil, err + } + + values := make([]int, len(fields)) + for i, f := range fields { + vp := util.NewValueParser(f) + values[i] = vp.Int() + if err := vp.Err(); err != nil { + return nil, fmt.Errorf("field %d in sysctl %s is not a valid int: %w", i, sysctl, err) + } + } + return values, nil +} diff --git a/vendor/github.com/prometheus/procfs/schedstat.go b/vendor/github.com/prometheus/procfs/schedstat.go new file mode 100644 index 0000000..5f7f32d --- /dev/null +++ b/vendor/github.com/prometheus/procfs/schedstat.go @@ -0,0 +1,121 @@ +// Copyright 2019 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bufio" + "errors" + "os" + "regexp" + "strconv" +) + +var ( + cpuLineRE = regexp.MustCompile(`cpu(\d+) (\d+) (\d+) (\d+) (\d+) (\d+) (\d+) (\d+) (\d+) (\d+)`) + procLineRE = regexp.MustCompile(`(\d+) (\d+) (\d+)`) +) + +// Schedstat contains scheduler statistics from /proc/schedstat +// +// See +// https://www.kernel.org/doc/Documentation/scheduler/sched-stats.txt +// for a detailed description of what these numbers mean. +// +// Note the current kernel documentation claims some of the time units are in +// jiffies when they are actually in nanoseconds since 2.6.23 with the +// introduction of CFS. A fix to the documentation is pending. See +// https://lore.kernel.org/patchwork/project/lkml/list/?series=403473 +type Schedstat struct { + CPUs []*SchedstatCPU +} + +// SchedstatCPU contains the values from one "cpu" line. +type SchedstatCPU struct { + CPUNum string + + RunningNanoseconds uint64 + WaitingNanoseconds uint64 + RunTimeslices uint64 +} + +// ProcSchedstat contains the values from `/proc//schedstat`. +type ProcSchedstat struct { + RunningNanoseconds uint64 + WaitingNanoseconds uint64 + RunTimeslices uint64 +} + +// Schedstat reads data from `/proc/schedstat`. +func (fs FS) Schedstat() (*Schedstat, error) { + file, err := os.Open(fs.proc.Path("schedstat")) + if err != nil { + return nil, err + } + defer file.Close() + + stats := &Schedstat{} + scanner := bufio.NewScanner(file) + + for scanner.Scan() { + match := cpuLineRE.FindStringSubmatch(scanner.Text()) + if match != nil { + cpu := &SchedstatCPU{} + cpu.CPUNum = match[1] + + cpu.RunningNanoseconds, err = strconv.ParseUint(match[8], 10, 64) + if err != nil { + continue + } + + cpu.WaitingNanoseconds, err = strconv.ParseUint(match[9], 10, 64) + if err != nil { + continue + } + + cpu.RunTimeslices, err = strconv.ParseUint(match[10], 10, 64) + if err != nil { + continue + } + + stats.CPUs = append(stats.CPUs, cpu) + } + } + + return stats, nil +} + +func parseProcSchedstat(contents string) (ProcSchedstat, error) { + var ( + stats ProcSchedstat + err error + ) + match := procLineRE.FindStringSubmatch(contents) + + if match != nil { + stats.RunningNanoseconds, err = strconv.ParseUint(match[1], 10, 64) + if err != nil { + return stats, err + } + + stats.WaitingNanoseconds, err = strconv.ParseUint(match[2], 10, 64) + if err != nil { + return stats, err + } + + stats.RunTimeslices, err = strconv.ParseUint(match[3], 10, 64) + return stats, err + } + + return stats, errors.New("could not parse schedstat") +} diff --git a/vendor/github.com/prometheus/procfs/slab.go b/vendor/github.com/prometheus/procfs/slab.go new file mode 100644 index 0000000..bc9aaf5 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/slab.go @@ -0,0 +1,151 @@ +// Copyright 2020 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bufio" + "bytes" + "fmt" + "regexp" + "strconv" + "strings" + + "github.com/prometheus/procfs/internal/util" +) + +var ( + slabSpace = regexp.MustCompile(`\s+`) + slabVer = regexp.MustCompile(`slabinfo -`) + slabHeader = regexp.MustCompile(`# name`) +) + +// Slab represents a slab pool in the kernel. +type Slab struct { + Name string + ObjActive int64 + ObjNum int64 + ObjSize int64 + ObjPerSlab int64 + PagesPerSlab int64 + // tunables + Limit int64 + Batch int64 + SharedFactor int64 + SlabActive int64 + SlabNum int64 + SharedAvail int64 +} + +// SlabInfo represents info for all slabs. +type SlabInfo struct { + Slabs []*Slab +} + +func shouldParseSlab(line string) bool { + if slabVer.MatchString(line) { + return false + } + if slabHeader.MatchString(line) { + return false + } + return true +} + +// parseV21SlabEntry is used to parse a line from /proc/slabinfo version 2.1. +func parseV21SlabEntry(line string) (*Slab, error) { + // First cleanup whitespace. + l := slabSpace.ReplaceAllString(line, " ") + s := strings.Split(l, " ") + if len(s) != 16 { + return nil, fmt.Errorf("unable to parse: %q", line) + } + var err error + i := &Slab{Name: s[0]} + i.ObjActive, err = strconv.ParseInt(s[1], 10, 64) + if err != nil { + return nil, err + } + i.ObjNum, err = strconv.ParseInt(s[2], 10, 64) + if err != nil { + return nil, err + } + i.ObjSize, err = strconv.ParseInt(s[3], 10, 64) + if err != nil { + return nil, err + } + i.ObjPerSlab, err = strconv.ParseInt(s[4], 10, 64) + if err != nil { + return nil, err + } + i.PagesPerSlab, err = strconv.ParseInt(s[5], 10, 64) + if err != nil { + return nil, err + } + i.Limit, err = strconv.ParseInt(s[8], 10, 64) + if err != nil { + return nil, err + } + i.Batch, err = strconv.ParseInt(s[9], 10, 64) + if err != nil { + return nil, err + } + i.SharedFactor, err = strconv.ParseInt(s[10], 10, 64) + if err != nil { + return nil, err + } + i.SlabActive, err = strconv.ParseInt(s[13], 10, 64) + if err != nil { + return nil, err + } + i.SlabNum, err = strconv.ParseInt(s[14], 10, 64) + if err != nil { + return nil, err + } + i.SharedAvail, err = strconv.ParseInt(s[15], 10, 64) + if err != nil { + return nil, err + } + return i, nil +} + +// parseSlabInfo21 is used to parse a slabinfo 2.1 file. +func parseSlabInfo21(r *bytes.Reader) (SlabInfo, error) { + scanner := bufio.NewScanner(r) + s := SlabInfo{Slabs: []*Slab{}} + for scanner.Scan() { + line := scanner.Text() + if !shouldParseSlab(line) { + continue + } + slab, err := parseV21SlabEntry(line) + if err != nil { + return s, err + } + s.Slabs = append(s.Slabs, slab) + } + return s, nil +} + +// SlabInfo reads data from `/proc/slabinfo`. +func (fs FS) SlabInfo() (SlabInfo, error) { + // TODO: Consider passing options to allow for parsing different + // slabinfo versions. However, slabinfo 2.1 has been stable since + // kernel 2.6.10 and later. + data, err := util.ReadFileNoStat(fs.proc.Path("slabinfo")) + if err != nil { + return SlabInfo{}, err + } + + return parseSlabInfo21(bytes.NewReader(data)) +} diff --git a/vendor/github.com/prometheus/procfs/softirqs.go b/vendor/github.com/prometheus/procfs/softirqs.go new file mode 100644 index 0000000..559129c --- /dev/null +++ b/vendor/github.com/prometheus/procfs/softirqs.go @@ -0,0 +1,160 @@ +// Copyright 2022 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bufio" + "bytes" + "fmt" + "io" + "strconv" + "strings" + + "github.com/prometheus/procfs/internal/util" +) + +// Softirqs represents the softirq statistics. +type Softirqs struct { + Hi []uint64 + Timer []uint64 + NetTx []uint64 + NetRx []uint64 + Block []uint64 + IRQPoll []uint64 + Tasklet []uint64 + Sched []uint64 + HRTimer []uint64 + RCU []uint64 +} + +func (fs FS) Softirqs() (Softirqs, error) { + fileName := fs.proc.Path("softirqs") + data, err := util.ReadFileNoStat(fileName) + if err != nil { + return Softirqs{}, err + } + + reader := bytes.NewReader(data) + + return parseSoftirqs(reader) +} + +func parseSoftirqs(r io.Reader) (Softirqs, error) { + var ( + softirqs = Softirqs{} + scanner = bufio.NewScanner(r) + ) + + if !scanner.Scan() { + return Softirqs{}, fmt.Errorf("softirqs empty") + } + + for scanner.Scan() { + parts := strings.Fields(scanner.Text()) + var err error + + // require at least one cpu + if len(parts) < 2 { + continue + } + switch { + case parts[0] == "HI:": + perCPU := parts[1:] + softirqs.Hi = make([]uint64, len(perCPU)) + for i, count := range perCPU { + if softirqs.Hi[i], err = strconv.ParseUint(count, 10, 64); err != nil { + return Softirqs{}, fmt.Errorf("couldn't parse %q (HI%d): %w", count, i, err) + } + } + case parts[0] == "TIMER:": + perCPU := parts[1:] + softirqs.Timer = make([]uint64, len(perCPU)) + for i, count := range perCPU { + if softirqs.Timer[i], err = strconv.ParseUint(count, 10, 64); err != nil { + return Softirqs{}, fmt.Errorf("couldn't parse %q (TIMER%d): %w", count, i, err) + } + } + case parts[0] == "NET_TX:": + perCPU := parts[1:] + softirqs.NetTx = make([]uint64, len(perCPU)) + for i, count := range perCPU { + if softirqs.NetTx[i], err = strconv.ParseUint(count, 10, 64); err != nil { + return Softirqs{}, fmt.Errorf("couldn't parse %q (NET_TX%d): %w", count, i, err) + } + } + case parts[0] == "NET_RX:": + perCPU := parts[1:] + softirqs.NetRx = make([]uint64, len(perCPU)) + for i, count := range perCPU { + if softirqs.NetRx[i], err = strconv.ParseUint(count, 10, 64); err != nil { + return Softirqs{}, fmt.Errorf("couldn't parse %q (NET_RX%d): %w", count, i, err) + } + } + case parts[0] == "BLOCK:": + perCPU := parts[1:] + softirqs.Block = make([]uint64, len(perCPU)) + for i, count := range perCPU { + if softirqs.Block[i], err = strconv.ParseUint(count, 10, 64); err != nil { + return Softirqs{}, fmt.Errorf("couldn't parse %q (BLOCK%d): %w", count, i, err) + } + } + case parts[0] == "IRQ_POLL:": + perCPU := parts[1:] + softirqs.IRQPoll = make([]uint64, len(perCPU)) + for i, count := range perCPU { + if softirqs.IRQPoll[i], err = strconv.ParseUint(count, 10, 64); err != nil { + return Softirqs{}, fmt.Errorf("couldn't parse %q (IRQ_POLL%d): %w", count, i, err) + } + } + case parts[0] == "TASKLET:": + perCPU := parts[1:] + softirqs.Tasklet = make([]uint64, len(perCPU)) + for i, count := range perCPU { + if softirqs.Tasklet[i], err = strconv.ParseUint(count, 10, 64); err != nil { + return Softirqs{}, fmt.Errorf("couldn't parse %q (TASKLET%d): %w", count, i, err) + } + } + case parts[0] == "SCHED:": + perCPU := parts[1:] + softirqs.Sched = make([]uint64, len(perCPU)) + for i, count := range perCPU { + if softirqs.Sched[i], err = strconv.ParseUint(count, 10, 64); err != nil { + return Softirqs{}, fmt.Errorf("couldn't parse %q (SCHED%d): %w", count, i, err) + } + } + case parts[0] == "HRTIMER:": + perCPU := parts[1:] + softirqs.HRTimer = make([]uint64, len(perCPU)) + for i, count := range perCPU { + if softirqs.HRTimer[i], err = strconv.ParseUint(count, 10, 64); err != nil { + return Softirqs{}, fmt.Errorf("couldn't parse %q (HRTIMER%d): %w", count, i, err) + } + } + case parts[0] == "RCU:": + perCPU := parts[1:] + softirqs.RCU = make([]uint64, len(perCPU)) + for i, count := range perCPU { + if softirqs.RCU[i], err = strconv.ParseUint(count, 10, 64); err != nil { + return Softirqs{}, fmt.Errorf("couldn't parse %q (RCU%d): %w", count, i, err) + } + } + } + } + + if err := scanner.Err(); err != nil { + return Softirqs{}, fmt.Errorf("couldn't parse softirqs: %w", err) + } + + return softirqs, scanner.Err() +} diff --git a/vendor/github.com/prometheus/procfs/stat.go b/vendor/github.com/prometheus/procfs/stat.go new file mode 100644 index 0000000..33f97ca --- /dev/null +++ b/vendor/github.com/prometheus/procfs/stat.go @@ -0,0 +1,244 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bufio" + "bytes" + "fmt" + "io" + "strconv" + "strings" + + "github.com/prometheus/procfs/internal/fs" + "github.com/prometheus/procfs/internal/util" +) + +// CPUStat shows how much time the cpu spend in various stages. +type CPUStat struct { + User float64 + Nice float64 + System float64 + Idle float64 + Iowait float64 + IRQ float64 + SoftIRQ float64 + Steal float64 + Guest float64 + GuestNice float64 +} + +// SoftIRQStat represent the softirq statistics as exported in the procfs stat file. +// A nice introduction can be found at https://0xax.gitbooks.io/linux-insides/content/interrupts/interrupts-9.html +// It is possible to get per-cpu stats by reading `/proc/softirqs`. +type SoftIRQStat struct { + Hi uint64 + Timer uint64 + NetTx uint64 + NetRx uint64 + Block uint64 + BlockIoPoll uint64 + Tasklet uint64 + Sched uint64 + Hrtimer uint64 + Rcu uint64 +} + +// Stat represents kernel/system statistics. +type Stat struct { + // Boot time in seconds since the Epoch. + BootTime uint64 + // Summed up cpu statistics. + CPUTotal CPUStat + // Per-CPU statistics. + CPU []CPUStat + // Number of times interrupts were handled, which contains numbered and unnumbered IRQs. + IRQTotal uint64 + // Number of times a numbered IRQ was triggered. + IRQ []uint64 + // Number of times a context switch happened. + ContextSwitches uint64 + // Number of times a process was created. + ProcessCreated uint64 + // Number of processes currently running. + ProcessesRunning uint64 + // Number of processes currently blocked (waiting for IO). + ProcessesBlocked uint64 + // Number of times a softirq was scheduled. + SoftIRQTotal uint64 + // Detailed softirq statistics. + SoftIRQ SoftIRQStat +} + +// Parse a cpu statistics line and returns the CPUStat struct plus the cpu id (or -1 for the overall sum). +func parseCPUStat(line string) (CPUStat, int64, error) { + cpuStat := CPUStat{} + var cpu string + + count, err := fmt.Sscanf(line, "%s %f %f %f %f %f %f %f %f %f %f", + &cpu, + &cpuStat.User, &cpuStat.Nice, &cpuStat.System, &cpuStat.Idle, + &cpuStat.Iowait, &cpuStat.IRQ, &cpuStat.SoftIRQ, &cpuStat.Steal, + &cpuStat.Guest, &cpuStat.GuestNice) + + if err != nil && err != io.EOF { + return CPUStat{}, -1, fmt.Errorf("couldn't parse %q (cpu): %w", line, err) + } + if count == 0 { + return CPUStat{}, -1, fmt.Errorf("couldn't parse %q (cpu): 0 elements parsed", line) + } + + cpuStat.User /= userHZ + cpuStat.Nice /= userHZ + cpuStat.System /= userHZ + cpuStat.Idle /= userHZ + cpuStat.Iowait /= userHZ + cpuStat.IRQ /= userHZ + cpuStat.SoftIRQ /= userHZ + cpuStat.Steal /= userHZ + cpuStat.Guest /= userHZ + cpuStat.GuestNice /= userHZ + + if cpu == "cpu" { + return cpuStat, -1, nil + } + + cpuID, err := strconv.ParseInt(cpu[3:], 10, 64) + if err != nil { + return CPUStat{}, -1, fmt.Errorf("couldn't parse %q (cpu/cpuid): %w", line, err) + } + + return cpuStat, cpuID, nil +} + +// Parse a softirq line. +func parseSoftIRQStat(line string) (SoftIRQStat, uint64, error) { + softIRQStat := SoftIRQStat{} + var total uint64 + var prefix string + + _, err := fmt.Sscanf(line, "%s %d %d %d %d %d %d %d %d %d %d %d", + &prefix, &total, + &softIRQStat.Hi, &softIRQStat.Timer, &softIRQStat.NetTx, &softIRQStat.NetRx, + &softIRQStat.Block, &softIRQStat.BlockIoPoll, + &softIRQStat.Tasklet, &softIRQStat.Sched, + &softIRQStat.Hrtimer, &softIRQStat.Rcu) + + if err != nil { + return SoftIRQStat{}, 0, fmt.Errorf("couldn't parse %q (softirq): %w", line, err) + } + + return softIRQStat, total, nil +} + +// NewStat returns information about current cpu/process statistics. +// See https://www.kernel.org/doc/Documentation/filesystems/proc.txt +// +// Deprecated: Use fs.Stat() instead. +func NewStat() (Stat, error) { + fs, err := NewFS(fs.DefaultProcMountPoint) + if err != nil { + return Stat{}, err + } + return fs.Stat() +} + +// NewStat returns information about current cpu/process statistics. +// See: https://www.kernel.org/doc/Documentation/filesystems/proc.txt +// +// Deprecated: Use fs.Stat() instead. +func (fs FS) NewStat() (Stat, error) { + return fs.Stat() +} + +// Stat returns information about current cpu/process statistics. +// See: https://www.kernel.org/doc/Documentation/filesystems/proc.txt +func (fs FS) Stat() (Stat, error) { + fileName := fs.proc.Path("stat") + data, err := util.ReadFileNoStat(fileName) + if err != nil { + return Stat{}, err + } + + stat := Stat{} + + scanner := bufio.NewScanner(bytes.NewReader(data)) + for scanner.Scan() { + line := scanner.Text() + parts := strings.Fields(scanner.Text()) + // require at least + if len(parts) < 2 { + continue + } + switch { + case parts[0] == "btime": + if stat.BootTime, err = strconv.ParseUint(parts[1], 10, 64); err != nil { + return Stat{}, fmt.Errorf("couldn't parse %q (btime): %w", parts[1], err) + } + case parts[0] == "intr": + if stat.IRQTotal, err = strconv.ParseUint(parts[1], 10, 64); err != nil { + return Stat{}, fmt.Errorf("couldn't parse %q (intr): %w", parts[1], err) + } + numberedIRQs := parts[2:] + stat.IRQ = make([]uint64, len(numberedIRQs)) + for i, count := range numberedIRQs { + if stat.IRQ[i], err = strconv.ParseUint(count, 10, 64); err != nil { + return Stat{}, fmt.Errorf("couldn't parse %q (intr%d): %w", count, i, err) + } + } + case parts[0] == "ctxt": + if stat.ContextSwitches, err = strconv.ParseUint(parts[1], 10, 64); err != nil { + return Stat{}, fmt.Errorf("couldn't parse %q (ctxt): %w", parts[1], err) + } + case parts[0] == "processes": + if stat.ProcessCreated, err = strconv.ParseUint(parts[1], 10, 64); err != nil { + return Stat{}, fmt.Errorf("couldn't parse %q (processes): %w", parts[1], err) + } + case parts[0] == "procs_running": + if stat.ProcessesRunning, err = strconv.ParseUint(parts[1], 10, 64); err != nil { + return Stat{}, fmt.Errorf("couldn't parse %q (procs_running): %w", parts[1], err) + } + case parts[0] == "procs_blocked": + if stat.ProcessesBlocked, err = strconv.ParseUint(parts[1], 10, 64); err != nil { + return Stat{}, fmt.Errorf("couldn't parse %q (procs_blocked): %w", parts[1], err) + } + case parts[0] == "softirq": + softIRQStats, total, err := parseSoftIRQStat(line) + if err != nil { + return Stat{}, err + } + stat.SoftIRQTotal = total + stat.SoftIRQ = softIRQStats + case strings.HasPrefix(parts[0], "cpu"): + cpuStat, cpuID, err := parseCPUStat(line) + if err != nil { + return Stat{}, err + } + if cpuID == -1 { + stat.CPUTotal = cpuStat + } else { + for int64(len(stat.CPU)) <= cpuID { + stat.CPU = append(stat.CPU, CPUStat{}) + } + stat.CPU[cpuID] = cpuStat + } + } + } + + if err := scanner.Err(); err != nil { + return Stat{}, fmt.Errorf("couldn't parse %q: %w", fileName, err) + } + + return stat, nil +} diff --git a/vendor/github.com/prometheus/procfs/swaps.go b/vendor/github.com/prometheus/procfs/swaps.go new file mode 100644 index 0000000..15edc22 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/swaps.go @@ -0,0 +1,89 @@ +// Copyright 2019 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bufio" + "bytes" + "fmt" + "strconv" + "strings" + + "github.com/prometheus/procfs/internal/util" +) + +// Swap represents an entry in /proc/swaps. +type Swap struct { + Filename string + Type string + Size int + Used int + Priority int +} + +// Swaps returns a slice of all configured swap devices on the system. +func (fs FS) Swaps() ([]*Swap, error) { + data, err := util.ReadFileNoStat(fs.proc.Path("swaps")) + if err != nil { + return nil, err + } + return parseSwaps(data) +} + +func parseSwaps(info []byte) ([]*Swap, error) { + swaps := []*Swap{} + scanner := bufio.NewScanner(bytes.NewReader(info)) + scanner.Scan() // ignore header line + for scanner.Scan() { + swapString := scanner.Text() + parsedSwap, err := parseSwapString(swapString) + if err != nil { + return nil, err + } + swaps = append(swaps, parsedSwap) + } + + err := scanner.Err() + return swaps, err +} + +func parseSwapString(swapString string) (*Swap, error) { + var err error + + swapFields := strings.Fields(swapString) + swapLength := len(swapFields) + if swapLength < 5 { + return nil, fmt.Errorf("too few fields in swap string: %s", swapString) + } + + swap := &Swap{ + Filename: swapFields[0], + Type: swapFields[1], + } + + swap.Size, err = strconv.Atoi(swapFields[2]) + if err != nil { + return nil, fmt.Errorf("invalid swap size: %s", swapFields[2]) + } + swap.Used, err = strconv.Atoi(swapFields[3]) + if err != nil { + return nil, fmt.Errorf("invalid swap used: %s", swapFields[3]) + } + swap.Priority, err = strconv.Atoi(swapFields[4]) + if err != nil { + return nil, fmt.Errorf("invalid swap priority: %s", swapFields[4]) + } + + return swap, nil +} diff --git a/vendor/github.com/prometheus/procfs/ttar b/vendor/github.com/prometheus/procfs/ttar new file mode 100644 index 0000000..19ef02b --- /dev/null +++ b/vendor/github.com/prometheus/procfs/ttar @@ -0,0 +1,413 @@ +#!/usr/bin/env bash + +# Purpose: plain text tar format +# Limitations: - only suitable for text files, directories, and symlinks +# - stores only filename, content, and mode +# - not designed for untrusted input +# +# Note: must work with bash version 3.2 (macOS) + +# Copyright 2017 Roger Luethi +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -o errexit -o nounset + +# Sanitize environment (for instance, standard sorting of glob matches) +export LC_ALL=C + +path="" +CMD="" +ARG_STRING="$*" + +#------------------------------------------------------------------------------ +# Not all sed implementations can work on null bytes. In order to make ttar +# work out of the box on macOS, use Python as a stream editor. + +USE_PYTHON=0 + +PYTHON_CREATE_FILTER=$(cat << 'PCF' +#!/usr/bin/env python + +import re +import sys + +for line in sys.stdin: + line = re.sub(r'EOF', r'\EOF', line) + line = re.sub(r'NULLBYTE', r'\NULLBYTE', line) + line = re.sub('\x00', r'NULLBYTE', line) + sys.stdout.write(line) +PCF +) + +PYTHON_EXTRACT_FILTER=$(cat << 'PEF' +#!/usr/bin/env python + +import re +import sys + +for line in sys.stdin: + line = re.sub(r'(?/dev/null; then + echo "ERROR Python not found. Aborting." + exit 2 + fi + USE_PYTHON=1 + fi +} + +#------------------------------------------------------------------------------ + +function usage { + bname=$(basename "$0") + cat << USAGE +Usage: $bname [-C

] -c -f (create archive) + $bname -t -f (list archive contents) + $bname [-C ] -x -f (extract archive) + +Options: + -C (change directory) + -v (verbose) + --recursive-unlink (recursively delete existing directory if path + collides with file or directory to extract) + +Example: Change to sysfs directory, create ttar file from fixtures directory + $bname -C sysfs -c -f sysfs/fixtures.ttar fixtures/ +USAGE +exit "$1" +} + +function vecho { + if [ "${VERBOSE:-}" == "yes" ]; then + echo >&7 "$@" + fi +} + +function set_cmd { + if [ -n "$CMD" ]; then + echo "ERROR: more than one command given" + echo + usage 2 + fi + CMD=$1 +} + +unset VERBOSE +unset RECURSIVE_UNLINK + +while getopts :cf:-:htxvC: opt; do + case $opt in + c) + set_cmd "create" + ;; + f) + ARCHIVE=$OPTARG + ;; + h) + usage 0 + ;; + t) + set_cmd "list" + ;; + x) + set_cmd "extract" + ;; + v) + VERBOSE=yes + exec 7>&1 + ;; + C) + CDIR=$OPTARG + ;; + -) + case $OPTARG in + recursive-unlink) + RECURSIVE_UNLINK="yes" + ;; + *) + echo -e "Error: invalid option -$OPTARG" + echo + usage 1 + ;; + esac + ;; + *) + echo >&2 "ERROR: invalid option -$OPTARG" + echo + usage 1 + ;; + esac +done + +# Remove processed options from arguments +shift $(( OPTIND - 1 )); + +if [ "${CMD:-}" == "" ]; then + echo >&2 "ERROR: no command given" + echo + usage 1 +elif [ "${ARCHIVE:-}" == "" ]; then + echo >&2 "ERROR: no archive name given" + echo + usage 1 +fi + +function list { + local path="" + local size=0 + local line_no=0 + local ttar_file=$1 + if [ -n "${2:-}" ]; then + echo >&2 "ERROR: too many arguments." + echo + usage 1 + fi + if [ ! -e "$ttar_file" ]; then + echo >&2 "ERROR: file not found ($ttar_file)" + echo + usage 1 + fi + while read -r line; do + line_no=$(( line_no + 1 )) + if [ $size -gt 0 ]; then + size=$(( size - 1 )) + continue + fi + if [[ $line =~ ^Path:\ (.*)$ ]]; then + path=${BASH_REMATCH[1]} + elif [[ $line =~ ^Lines:\ (.*)$ ]]; then + size=${BASH_REMATCH[1]} + echo "$path" + elif [[ $line =~ ^Directory:\ (.*)$ ]]; then + path=${BASH_REMATCH[1]} + echo "$path/" + elif [[ $line =~ ^SymlinkTo:\ (.*)$ ]]; then + echo "$path -> ${BASH_REMATCH[1]}" + fi + done < "$ttar_file" +} + +function extract { + local path="" + local size=0 + local line_no=0 + local ttar_file=$1 + if [ -n "${2:-}" ]; then + echo >&2 "ERROR: too many arguments." + echo + usage 1 + fi + if [ ! -e "$ttar_file" ]; then + echo >&2 "ERROR: file not found ($ttar_file)" + echo + usage 1 + fi + while IFS= read -r line; do + line_no=$(( line_no + 1 )) + local eof_without_newline + if [ "$size" -gt 0 ]; then + if [[ "$line" =~ [^\\]EOF ]]; then + # An EOF not preceded by a backslash indicates that the line + # does not end with a newline + eof_without_newline=1 + else + eof_without_newline=0 + fi + # Replace NULLBYTE with null byte if at beginning of line + # Replace NULLBYTE with null byte unless preceded by backslash + # Remove one backslash in front of NULLBYTE (if any) + # Remove EOF unless preceded by backslash + # Remove one backslash in front of EOF + if [ $USE_PYTHON -eq 1 ]; then + echo -n "$line" | python -c "$PYTHON_EXTRACT_FILTER" >> "$path" + else + # The repeated pattern makes up for sed's lack of negative + # lookbehind assertions (for consecutive null bytes). + echo -n "$line" | \ + sed -e 's/^NULLBYTE/\x0/g; + s/\([^\\]\)NULLBYTE/\1\x0/g; + s/\([^\\]\)NULLBYTE/\1\x0/g; + s/\\NULLBYTE/NULLBYTE/g; + s/\([^\\]\)EOF/\1/g; + s/\\EOF/EOF/g; + ' >> "$path" + fi + if [[ "$eof_without_newline" -eq 0 ]]; then + echo >> "$path" + fi + size=$(( size - 1 )) + continue + fi + if [[ $line =~ ^Path:\ (.*)$ ]]; then + path=${BASH_REMATCH[1]} + if [ -L "$path" ]; then + rm "$path" + elif [ -d "$path" ]; then + if [ "${RECURSIVE_UNLINK:-}" == "yes" ]; then + rm -r "$path" + else + # Safe because symlinks to directories are dealt with above + rmdir "$path" + fi + elif [ -e "$path" ]; then + rm "$path" + fi + elif [[ $line =~ ^Lines:\ (.*)$ ]]; then + size=${BASH_REMATCH[1]} + # Create file even if it is zero-length. + touch "$path" + vecho " $path" + elif [[ $line =~ ^Mode:\ (.*)$ ]]; then + mode=${BASH_REMATCH[1]} + chmod "$mode" "$path" + vecho "$mode" + elif [[ $line =~ ^Directory:\ (.*)$ ]]; then + path=${BASH_REMATCH[1]} + mkdir -p "$path" + vecho " $path/" + elif [[ $line =~ ^SymlinkTo:\ (.*)$ ]]; then + ln -s "${BASH_REMATCH[1]}" "$path" + vecho " $path -> ${BASH_REMATCH[1]}" + elif [[ $line =~ ^# ]]; then + # Ignore comments between files + continue + else + echo >&2 "ERROR: Unknown keyword on line $line_no: $line" + exit 1 + fi + done < "$ttar_file" +} + +function div { + echo "# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -" \ + "- - - - - -" +} + +function get_mode { + local mfile=$1 + if [ -z "${STAT_OPTION:-}" ]; then + if stat -c '%a' "$mfile" >/dev/null 2>&1; then + # GNU stat + STAT_OPTION='-c' + STAT_FORMAT='%a' + else + # BSD stat + STAT_OPTION='-f' + # Octal output, user/group/other (omit file type, sticky bit) + STAT_FORMAT='%OLp' + fi + fi + stat "${STAT_OPTION}" "${STAT_FORMAT}" "$mfile" +} + +function _create { + shopt -s nullglob + local mode + local eof_without_newline + while (( "$#" )); do + file=$1 + if [ -L "$file" ]; then + echo "Path: $file" + symlinkTo=$(readlink "$file") + echo "SymlinkTo: $symlinkTo" + vecho " $file -> $symlinkTo" + div + elif [ -d "$file" ]; then + # Strip trailing slash (if there is one) + file=${file%/} + echo "Directory: $file" + mode=$(get_mode "$file") + echo "Mode: $mode" + vecho "$mode $file/" + div + # Find all files and dirs, including hidden/dot files + for x in "$file/"{*,.[^.]*}; do + _create "$x" + done + elif [ -f "$file" ]; then + echo "Path: $file" + lines=$(wc -l "$file"|awk '{print $1}') + eof_without_newline=0 + if [[ "$(wc -c "$file"|awk '{print $1}')" -gt 0 ]] && \ + [[ "$(tail -c 1 "$file" | wc -l)" -eq 0 ]]; then + eof_without_newline=1 + lines=$((lines+1)) + fi + echo "Lines: $lines" + # Add backslash in front of EOF + # Add backslash in front of NULLBYTE + # Replace null byte with NULLBYTE + if [ $USE_PYTHON -eq 1 ]; then + < "$file" python -c "$PYTHON_CREATE_FILTER" + else + < "$file" \ + sed 's/EOF/\\EOF/g; + s/NULLBYTE/\\NULLBYTE/g; + s/\x0/NULLBYTE/g; + ' + fi + if [[ "$eof_without_newline" -eq 1 ]]; then + # Finish line with EOF to indicate that the original line did + # not end with a linefeed + echo "EOF" + fi + mode=$(get_mode "$file") + echo "Mode: $mode" + vecho "$mode $file" + div + else + echo >&2 "ERROR: file not found ($file in $(pwd))" + exit 2 + fi + shift + done +} + +function create { + ttar_file=$1 + shift + if [ -z "${1:-}" ]; then + echo >&2 "ERROR: missing arguments." + echo + usage 1 + fi + if [ -e "$ttar_file" ]; then + rm "$ttar_file" + fi + exec > "$ttar_file" + echo "# Archive created by ttar $ARG_STRING" + _create "$@" +} + +test_environment + +if [ -n "${CDIR:-}" ]; then + if [[ "$ARCHIVE" != /* ]]; then + # Relative path: preserve the archive's location before changing + # directory + ARCHIVE="$(pwd)/$ARCHIVE" + fi + cd "$CDIR" +fi + +"$CMD" "$ARCHIVE" "$@" diff --git a/vendor/github.com/prometheus/procfs/vm.go b/vendor/github.com/prometheus/procfs/vm.go new file mode 100644 index 0000000..20ceb77 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/vm.go @@ -0,0 +1,210 @@ +// Copyright 2019 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build !windows +// +build !windows + +package procfs + +import ( + "fmt" + "os" + "path/filepath" + "strings" + + "github.com/prometheus/procfs/internal/util" +) + +// The VM interface is described at +// https://www.kernel.org/doc/Documentation/sysctl/vm.txt +// Each setting is exposed as a single file. +// Each file contains one line with a single numerical value, except lowmem_reserve_ratio which holds an array +// and numa_zonelist_order (deprecated) which is a string. +type VM struct { + AdminReserveKbytes *int64 // /proc/sys/vm/admin_reserve_kbytes + BlockDump *int64 // /proc/sys/vm/block_dump + CompactUnevictableAllowed *int64 // /proc/sys/vm/compact_unevictable_allowed + DirtyBackgroundBytes *int64 // /proc/sys/vm/dirty_background_bytes + DirtyBackgroundRatio *int64 // /proc/sys/vm/dirty_background_ratio + DirtyBytes *int64 // /proc/sys/vm/dirty_bytes + DirtyExpireCentisecs *int64 // /proc/sys/vm/dirty_expire_centisecs + DirtyRatio *int64 // /proc/sys/vm/dirty_ratio + DirtytimeExpireSeconds *int64 // /proc/sys/vm/dirtytime_expire_seconds + DirtyWritebackCentisecs *int64 // /proc/sys/vm/dirty_writeback_centisecs + DropCaches *int64 // /proc/sys/vm/drop_caches + ExtfragThreshold *int64 // /proc/sys/vm/extfrag_threshold + HugetlbShmGroup *int64 // /proc/sys/vm/hugetlb_shm_group + LaptopMode *int64 // /proc/sys/vm/laptop_mode + LegacyVaLayout *int64 // /proc/sys/vm/legacy_va_layout + LowmemReserveRatio []*int64 // /proc/sys/vm/lowmem_reserve_ratio + MaxMapCount *int64 // /proc/sys/vm/max_map_count + MemoryFailureEarlyKill *int64 // /proc/sys/vm/memory_failure_early_kill + MemoryFailureRecovery *int64 // /proc/sys/vm/memory_failure_recovery + MinFreeKbytes *int64 // /proc/sys/vm/min_free_kbytes + MinSlabRatio *int64 // /proc/sys/vm/min_slab_ratio + MinUnmappedRatio *int64 // /proc/sys/vm/min_unmapped_ratio + MmapMinAddr *int64 // /proc/sys/vm/mmap_min_addr + NrHugepages *int64 // /proc/sys/vm/nr_hugepages + NrHugepagesMempolicy *int64 // /proc/sys/vm/nr_hugepages_mempolicy + NrOvercommitHugepages *int64 // /proc/sys/vm/nr_overcommit_hugepages + NumaStat *int64 // /proc/sys/vm/numa_stat + NumaZonelistOrder string // /proc/sys/vm/numa_zonelist_order + OomDumpTasks *int64 // /proc/sys/vm/oom_dump_tasks + OomKillAllocatingTask *int64 // /proc/sys/vm/oom_kill_allocating_task + OvercommitKbytes *int64 // /proc/sys/vm/overcommit_kbytes + OvercommitMemory *int64 // /proc/sys/vm/overcommit_memory + OvercommitRatio *int64 // /proc/sys/vm/overcommit_ratio + PageCluster *int64 // /proc/sys/vm/page-cluster + PanicOnOom *int64 // /proc/sys/vm/panic_on_oom + PercpuPagelistFraction *int64 // /proc/sys/vm/percpu_pagelist_fraction + StatInterval *int64 // /proc/sys/vm/stat_interval + Swappiness *int64 // /proc/sys/vm/swappiness + UserReserveKbytes *int64 // /proc/sys/vm/user_reserve_kbytes + VfsCachePressure *int64 // /proc/sys/vm/vfs_cache_pressure + WatermarkBoostFactor *int64 // /proc/sys/vm/watermark_boost_factor + WatermarkScaleFactor *int64 // /proc/sys/vm/watermark_scale_factor + ZoneReclaimMode *int64 // /proc/sys/vm/zone_reclaim_mode +} + +// VM reads the VM statistics from the specified `proc` filesystem. +func (fs FS) VM() (*VM, error) { + path := fs.proc.Path("sys/vm") + file, err := os.Stat(path) + if err != nil { + return nil, err + } + if !file.Mode().IsDir() { + return nil, fmt.Errorf("%s is not a directory", path) + } + + files, err := os.ReadDir(path) + if err != nil { + return nil, err + } + + var vm VM + for _, f := range files { + if f.IsDir() { + continue + } + + name := filepath.Join(path, f.Name()) + // ignore errors on read, as there are some write only + // in /proc/sys/vm + value, err := util.SysReadFile(name) + if err != nil { + continue + } + vp := util.NewValueParser(value) + + switch f.Name() { + case "admin_reserve_kbytes": + vm.AdminReserveKbytes = vp.PInt64() + case "block_dump": + vm.BlockDump = vp.PInt64() + case "compact_unevictable_allowed": + vm.CompactUnevictableAllowed = vp.PInt64() + case "dirty_background_bytes": + vm.DirtyBackgroundBytes = vp.PInt64() + case "dirty_background_ratio": + vm.DirtyBackgroundRatio = vp.PInt64() + case "dirty_bytes": + vm.DirtyBytes = vp.PInt64() + case "dirty_expire_centisecs": + vm.DirtyExpireCentisecs = vp.PInt64() + case "dirty_ratio": + vm.DirtyRatio = vp.PInt64() + case "dirtytime_expire_seconds": + vm.DirtytimeExpireSeconds = vp.PInt64() + case "dirty_writeback_centisecs": + vm.DirtyWritebackCentisecs = vp.PInt64() + case "drop_caches": + vm.DropCaches = vp.PInt64() + case "extfrag_threshold": + vm.ExtfragThreshold = vp.PInt64() + case "hugetlb_shm_group": + vm.HugetlbShmGroup = vp.PInt64() + case "laptop_mode": + vm.LaptopMode = vp.PInt64() + case "legacy_va_layout": + vm.LegacyVaLayout = vp.PInt64() + case "lowmem_reserve_ratio": + stringSlice := strings.Fields(value) + pint64Slice := make([]*int64, 0, len(stringSlice)) + for _, value := range stringSlice { + vp := util.NewValueParser(value) + pint64Slice = append(pint64Slice, vp.PInt64()) + } + vm.LowmemReserveRatio = pint64Slice + case "max_map_count": + vm.MaxMapCount = vp.PInt64() + case "memory_failure_early_kill": + vm.MemoryFailureEarlyKill = vp.PInt64() + case "memory_failure_recovery": + vm.MemoryFailureRecovery = vp.PInt64() + case "min_free_kbytes": + vm.MinFreeKbytes = vp.PInt64() + case "min_slab_ratio": + vm.MinSlabRatio = vp.PInt64() + case "min_unmapped_ratio": + vm.MinUnmappedRatio = vp.PInt64() + case "mmap_min_addr": + vm.MmapMinAddr = vp.PInt64() + case "nr_hugepages": + vm.NrHugepages = vp.PInt64() + case "nr_hugepages_mempolicy": + vm.NrHugepagesMempolicy = vp.PInt64() + case "nr_overcommit_hugepages": + vm.NrOvercommitHugepages = vp.PInt64() + case "numa_stat": + vm.NumaStat = vp.PInt64() + case "numa_zonelist_order": + vm.NumaZonelistOrder = value + case "oom_dump_tasks": + vm.OomDumpTasks = vp.PInt64() + case "oom_kill_allocating_task": + vm.OomKillAllocatingTask = vp.PInt64() + case "overcommit_kbytes": + vm.OvercommitKbytes = vp.PInt64() + case "overcommit_memory": + vm.OvercommitMemory = vp.PInt64() + case "overcommit_ratio": + vm.OvercommitRatio = vp.PInt64() + case "page-cluster": + vm.PageCluster = vp.PInt64() + case "panic_on_oom": + vm.PanicOnOom = vp.PInt64() + case "percpu_pagelist_fraction": + vm.PercpuPagelistFraction = vp.PInt64() + case "stat_interval": + vm.StatInterval = vp.PInt64() + case "swappiness": + vm.Swappiness = vp.PInt64() + case "user_reserve_kbytes": + vm.UserReserveKbytes = vp.PInt64() + case "vfs_cache_pressure": + vm.VfsCachePressure = vp.PInt64() + case "watermark_boost_factor": + vm.WatermarkBoostFactor = vp.PInt64() + case "watermark_scale_factor": + vm.WatermarkScaleFactor = vp.PInt64() + case "zone_reclaim_mode": + vm.ZoneReclaimMode = vp.PInt64() + } + if err := vp.Err(); err != nil { + return nil, err + } + } + + return &vm, nil +} diff --git a/vendor/github.com/prometheus/procfs/zoneinfo.go b/vendor/github.com/prometheus/procfs/zoneinfo.go new file mode 100644 index 0000000..c745a4c --- /dev/null +++ b/vendor/github.com/prometheus/procfs/zoneinfo.go @@ -0,0 +1,196 @@ +// Copyright 2019 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build !windows +// +build !windows + +package procfs + +import ( + "bytes" + "fmt" + "os" + "regexp" + "strings" + + "github.com/prometheus/procfs/internal/util" +) + +// Zoneinfo holds info parsed from /proc/zoneinfo. +type Zoneinfo struct { + Node string + Zone string + NrFreePages *int64 + Min *int64 + Low *int64 + High *int64 + Scanned *int64 + Spanned *int64 + Present *int64 + Managed *int64 + NrActiveAnon *int64 + NrInactiveAnon *int64 + NrIsolatedAnon *int64 + NrAnonPages *int64 + NrAnonTransparentHugepages *int64 + NrActiveFile *int64 + NrInactiveFile *int64 + NrIsolatedFile *int64 + NrFilePages *int64 + NrSlabReclaimable *int64 + NrSlabUnreclaimable *int64 + NrMlockStack *int64 + NrKernelStack *int64 + NrMapped *int64 + NrDirty *int64 + NrWriteback *int64 + NrUnevictable *int64 + NrShmem *int64 + NrDirtied *int64 + NrWritten *int64 + NumaHit *int64 + NumaMiss *int64 + NumaForeign *int64 + NumaInterleave *int64 + NumaLocal *int64 + NumaOther *int64 + Protection []*int64 +} + +var nodeZoneRE = regexp.MustCompile(`(\d+), zone\s+(\w+)`) + +// Zoneinfo parses an zoneinfo-file (/proc/zoneinfo) and returns a slice of +// structs containing the relevant info. More information available here: +// https://www.kernel.org/doc/Documentation/sysctl/vm.txt +func (fs FS) Zoneinfo() ([]Zoneinfo, error) { + data, err := os.ReadFile(fs.proc.Path("zoneinfo")) + if err != nil { + return nil, fmt.Errorf("error reading zoneinfo %q: %w", fs.proc.Path("zoneinfo"), err) + } + zoneinfo, err := parseZoneinfo(data) + if err != nil { + return nil, fmt.Errorf("error parsing zoneinfo %q: %w", fs.proc.Path("zoneinfo"), err) + } + return zoneinfo, nil +} + +func parseZoneinfo(zoneinfoData []byte) ([]Zoneinfo, error) { + + zoneinfo := []Zoneinfo{} + + zoneinfoBlocks := bytes.Split(zoneinfoData, []byte("\nNode")) + for _, block := range zoneinfoBlocks { + var zoneinfoElement Zoneinfo + lines := strings.Split(string(block), "\n") + for _, line := range lines { + + if nodeZone := nodeZoneRE.FindStringSubmatch(line); nodeZone != nil { + zoneinfoElement.Node = nodeZone[1] + zoneinfoElement.Zone = nodeZone[2] + continue + } + if strings.HasPrefix(strings.TrimSpace(line), "per-node stats") { + continue + } + parts := strings.Fields(strings.TrimSpace(line)) + if len(parts) < 2 { + continue + } + vp := util.NewValueParser(parts[1]) + switch parts[0] { + case "nr_free_pages": + zoneinfoElement.NrFreePages = vp.PInt64() + case "min": + zoneinfoElement.Min = vp.PInt64() + case "low": + zoneinfoElement.Low = vp.PInt64() + case "high": + zoneinfoElement.High = vp.PInt64() + case "scanned": + zoneinfoElement.Scanned = vp.PInt64() + case "spanned": + zoneinfoElement.Spanned = vp.PInt64() + case "present": + zoneinfoElement.Present = vp.PInt64() + case "managed": + zoneinfoElement.Managed = vp.PInt64() + case "nr_active_anon": + zoneinfoElement.NrActiveAnon = vp.PInt64() + case "nr_inactive_anon": + zoneinfoElement.NrInactiveAnon = vp.PInt64() + case "nr_isolated_anon": + zoneinfoElement.NrIsolatedAnon = vp.PInt64() + case "nr_anon_pages": + zoneinfoElement.NrAnonPages = vp.PInt64() + case "nr_anon_transparent_hugepages": + zoneinfoElement.NrAnonTransparentHugepages = vp.PInt64() + case "nr_active_file": + zoneinfoElement.NrActiveFile = vp.PInt64() + case "nr_inactive_file": + zoneinfoElement.NrInactiveFile = vp.PInt64() + case "nr_isolated_file": + zoneinfoElement.NrIsolatedFile = vp.PInt64() + case "nr_file_pages": + zoneinfoElement.NrFilePages = vp.PInt64() + case "nr_slab_reclaimable": + zoneinfoElement.NrSlabReclaimable = vp.PInt64() + case "nr_slab_unreclaimable": + zoneinfoElement.NrSlabUnreclaimable = vp.PInt64() + case "nr_mlock_stack": + zoneinfoElement.NrMlockStack = vp.PInt64() + case "nr_kernel_stack": + zoneinfoElement.NrKernelStack = vp.PInt64() + case "nr_mapped": + zoneinfoElement.NrMapped = vp.PInt64() + case "nr_dirty": + zoneinfoElement.NrDirty = vp.PInt64() + case "nr_writeback": + zoneinfoElement.NrWriteback = vp.PInt64() + case "nr_unevictable": + zoneinfoElement.NrUnevictable = vp.PInt64() + case "nr_shmem": + zoneinfoElement.NrShmem = vp.PInt64() + case "nr_dirtied": + zoneinfoElement.NrDirtied = vp.PInt64() + case "nr_written": + zoneinfoElement.NrWritten = vp.PInt64() + case "numa_hit": + zoneinfoElement.NumaHit = vp.PInt64() + case "numa_miss": + zoneinfoElement.NumaMiss = vp.PInt64() + case "numa_foreign": + zoneinfoElement.NumaForeign = vp.PInt64() + case "numa_interleave": + zoneinfoElement.NumaInterleave = vp.PInt64() + case "numa_local": + zoneinfoElement.NumaLocal = vp.PInt64() + case "numa_other": + zoneinfoElement.NumaOther = vp.PInt64() + case "protection:": + protectionParts := strings.Split(line, ":") + protectionValues := strings.Replace(protectionParts[1], "(", "", 1) + protectionValues = strings.Replace(protectionValues, ")", "", 1) + protectionValues = strings.TrimSpace(protectionValues) + protectionStringMap := strings.Split(protectionValues, ", ") + val, err := util.ParsePInt64s(protectionStringMap) + if err == nil { + zoneinfoElement.Protection = val + } + } + + } + + zoneinfo = append(zoneinfo, zoneinfoElement) + } + return zoneinfo, nil +} diff --git a/vendor/go.etcd.io/bbolt/.gitignore b/vendor/go.etcd.io/bbolt/.gitignore new file mode 100644 index 0000000..18312f0 --- /dev/null +++ b/vendor/go.etcd.io/bbolt/.gitignore @@ -0,0 +1,7 @@ +*.prof +*.test +*.swp +/bin/ +cover.out +/.idea +*.iml diff --git a/vendor/go.etcd.io/bbolt/.travis.yml b/vendor/go.etcd.io/bbolt/.travis.yml new file mode 100644 index 0000000..452601e --- /dev/null +++ b/vendor/go.etcd.io/bbolt/.travis.yml @@ -0,0 +1,18 @@ +language: go +go_import_path: go.etcd.io/bbolt + +sudo: false + +go: +- 1.15 + +before_install: +- go get -v golang.org/x/sys/unix +- go get -v honnef.co/go/tools/... +- go get -v github.com/kisielk/errcheck + +script: +- make fmt +- make test +- make race +# - make errcheck diff --git a/vendor/go.etcd.io/bbolt/LICENSE b/vendor/go.etcd.io/bbolt/LICENSE new file mode 100644 index 0000000..004e77f --- /dev/null +++ b/vendor/go.etcd.io/bbolt/LICENSE @@ -0,0 +1,20 @@ +The MIT License (MIT) + +Copyright (c) 2013 Ben Johnson + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/go.etcd.io/bbolt/Makefile b/vendor/go.etcd.io/bbolt/Makefile new file mode 100644 index 0000000..21ecf48 --- /dev/null +++ b/vendor/go.etcd.io/bbolt/Makefile @@ -0,0 +1,36 @@ +BRANCH=`git rev-parse --abbrev-ref HEAD` +COMMIT=`git rev-parse --short HEAD` +GOLDFLAGS="-X main.branch $(BRANCH) -X main.commit $(COMMIT)" + +race: + @TEST_FREELIST_TYPE=hashmap go test -v -race -test.run="TestSimulate_(100op|1000op)" + @echo "array freelist test" + @TEST_FREELIST_TYPE=array go test -v -race -test.run="TestSimulate_(100op|1000op)" + +fmt: + !(gofmt -l -s -d $(shell find . -name \*.go) | grep '[a-z]') + +# go get honnef.co/go/tools/simple +gosimple: + gosimple ./... + +# go get honnef.co/go/tools/unused +unused: + unused ./... + +# go get github.com/kisielk/errcheck +errcheck: + @errcheck -ignorepkg=bytes -ignore=os:Remove go.etcd.io/bbolt + +test: + TEST_FREELIST_TYPE=hashmap go test -timeout 20m -v -coverprofile cover.out -covermode atomic + # Note: gets "program not an importable package" in out of path builds + TEST_FREELIST_TYPE=hashmap go test -v ./cmd/bbolt + + @echo "array freelist test" + + @TEST_FREELIST_TYPE=array go test -timeout 20m -v -coverprofile cover.out -covermode atomic + # Note: gets "program not an importable package" in out of path builds + @TEST_FREELIST_TYPE=array go test -v ./cmd/bbolt + +.PHONY: race fmt errcheck test gosimple unused diff --git a/vendor/go.etcd.io/bbolt/README.md b/vendor/go.etcd.io/bbolt/README.md new file mode 100644 index 0000000..f1b4a7b --- /dev/null +++ b/vendor/go.etcd.io/bbolt/README.md @@ -0,0 +1,958 @@ +bbolt +===== + +[![Go Report Card](https://goreportcard.com/badge/github.com/etcd-io/bbolt?style=flat-square)](https://goreportcard.com/report/github.com/etcd-io/bbolt) +[![Coverage](https://codecov.io/gh/etcd-io/bbolt/branch/master/graph/badge.svg)](https://codecov.io/gh/etcd-io/bbolt) +[![Build Status Travis](https://img.shields.io/travis/etcd-io/bboltlabs.svg?style=flat-square&&branch=master)](https://travis-ci.com/etcd-io/bbolt) +[![Godoc](http://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)](https://godoc.org/github.com/etcd-io/bbolt) +[![Releases](https://img.shields.io/github/release/etcd-io/bbolt/all.svg?style=flat-square)](https://github.com/etcd-io/bbolt/releases) +[![LICENSE](https://img.shields.io/github/license/etcd-io/bbolt.svg?style=flat-square)](https://github.com/etcd-io/bbolt/blob/master/LICENSE) + +bbolt is a fork of [Ben Johnson's][gh_ben] [Bolt][bolt] key/value +store. The purpose of this fork is to provide the Go community with an active +maintenance and development target for Bolt; the goal is improved reliability +and stability. bbolt includes bug fixes, performance enhancements, and features +not found in Bolt while preserving backwards compatibility with the Bolt API. + +Bolt is a pure Go key/value store inspired by [Howard Chu's][hyc_symas] +[LMDB project][lmdb]. The goal of the project is to provide a simple, +fast, and reliable database for projects that don't require a full database +server such as Postgres or MySQL. + +Since Bolt is meant to be used as such a low-level piece of functionality, +simplicity is key. The API will be small and only focus on getting values +and setting values. That's it. + +[gh_ben]: https://github.com/benbjohnson +[bolt]: https://github.com/boltdb/bolt +[hyc_symas]: https://twitter.com/hyc_symas +[lmdb]: http://symas.com/mdb/ + +## Project Status + +Bolt is stable, the API is fixed, and the file format is fixed. Full unit +test coverage and randomized black box testing are used to ensure database +consistency and thread safety. Bolt is currently used in high-load production +environments serving databases as large as 1TB. Many companies such as +Shopify and Heroku use Bolt-backed services every day. + +## Project versioning + +bbolt uses [semantic versioning](http://semver.org). +API should not change between patch and minor releases. +New minor versions may add additional features to the API. + +## Table of Contents + + - [Getting Started](#getting-started) + - [Installing](#installing) + - [Opening a database](#opening-a-database) + - [Transactions](#transactions) + - [Read-write transactions](#read-write-transactions) + - [Read-only transactions](#read-only-transactions) + - [Batch read-write transactions](#batch-read-write-transactions) + - [Managing transactions manually](#managing-transactions-manually) + - [Using buckets](#using-buckets) + - [Using key/value pairs](#using-keyvalue-pairs) + - [Autoincrementing integer for the bucket](#autoincrementing-integer-for-the-bucket) + - [Iterating over keys](#iterating-over-keys) + - [Prefix scans](#prefix-scans) + - [Range scans](#range-scans) + - [ForEach()](#foreach) + - [Nested buckets](#nested-buckets) + - [Database backups](#database-backups) + - [Statistics](#statistics) + - [Read-Only Mode](#read-only-mode) + - [Mobile Use (iOS/Android)](#mobile-use-iosandroid) + - [Resources](#resources) + - [Comparison with other databases](#comparison-with-other-databases) + - [Postgres, MySQL, & other relational databases](#postgres-mysql--other-relational-databases) + - [LevelDB, RocksDB](#leveldb-rocksdb) + - [LMDB](#lmdb) + - [Caveats & Limitations](#caveats--limitations) + - [Reading the Source](#reading-the-source) + - [Other Projects Using Bolt](#other-projects-using-bolt) + +## Getting Started + +### Installing + +To start using Bolt, install Go and run `go get`: + +```sh +$ go get go.etcd.io/bbolt/... +``` + +This will retrieve the library and install the `bolt` command line utility into +your `$GOBIN` path. + + +### Importing bbolt + +To use bbolt as an embedded key-value store, import as: + +```go +import bolt "go.etcd.io/bbolt" + +db, err := bolt.Open(path, 0666, nil) +if err != nil { + return err +} +defer db.Close() +``` + + +### Opening a database + +The top-level object in Bolt is a `DB`. It is represented as a single file on +your disk and represents a consistent snapshot of your data. + +To open your database, simply use the `bolt.Open()` function: + +```go +package main + +import ( + "log" + + bolt "go.etcd.io/bbolt" +) + +func main() { + // Open the my.db data file in your current directory. + // It will be created if it doesn't exist. + db, err := bolt.Open("my.db", 0600, nil) + if err != nil { + log.Fatal(err) + } + defer db.Close() + + ... +} +``` + +Please note that Bolt obtains a file lock on the data file so multiple processes +cannot open the same database at the same time. Opening an already open Bolt +database will cause it to hang until the other process closes it. To prevent +an indefinite wait you can pass a timeout option to the `Open()` function: + +```go +db, err := bolt.Open("my.db", 0600, &bolt.Options{Timeout: 1 * time.Second}) +``` + + +### Transactions + +Bolt allows only one read-write transaction at a time but allows as many +read-only transactions as you want at a time. Each transaction has a consistent +view of the data as it existed when the transaction started. + +Individual transactions and all objects created from them (e.g. buckets, keys) +are not thread safe. To work with data in multiple goroutines you must start +a transaction for each one or use locking to ensure only one goroutine accesses +a transaction at a time. Creating transaction from the `DB` is thread safe. + +Transactions should not depend on one another and generally shouldn't be opened +simultaneously in the same goroutine. This can cause a deadlock as the read-write +transaction needs to periodically re-map the data file but it cannot do so while +any read-only transaction is open. Even a nested read-only transaction can cause +a deadlock, as the child transaction can block the parent transaction from releasing +its resources. + +#### Read-write transactions + +To start a read-write transaction, you can use the `DB.Update()` function: + +```go +err := db.Update(func(tx *bolt.Tx) error { + ... + return nil +}) +``` + +Inside the closure, you have a consistent view of the database. You commit the +transaction by returning `nil` at the end. You can also rollback the transaction +at any point by returning an error. All database operations are allowed inside +a read-write transaction. + +Always check the return error as it will report any disk failures that can cause +your transaction to not complete. If you return an error within your closure +it will be passed through. + + +#### Read-only transactions + +To start a read-only transaction, you can use the `DB.View()` function: + +```go +err := db.View(func(tx *bolt.Tx) error { + ... + return nil +}) +``` + +You also get a consistent view of the database within this closure, however, +no mutating operations are allowed within a read-only transaction. You can only +retrieve buckets, retrieve values, and copy the database within a read-only +transaction. + + +#### Batch read-write transactions + +Each `DB.Update()` waits for disk to commit the writes. This overhead +can be minimized by combining multiple updates with the `DB.Batch()` +function: + +```go +err := db.Batch(func(tx *bolt.Tx) error { + ... + return nil +}) +``` + +Concurrent Batch calls are opportunistically combined into larger +transactions. Batch is only useful when there are multiple goroutines +calling it. + +The trade-off is that `Batch` can call the given +function multiple times, if parts of the transaction fail. The +function must be idempotent and side effects must take effect only +after a successful return from `DB.Batch()`. + +For example: don't display messages from inside the function, instead +set variables in the enclosing scope: + +```go +var id uint64 +err := db.Batch(func(tx *bolt.Tx) error { + // Find last key in bucket, decode as bigendian uint64, increment + // by one, encode back to []byte, and add new key. + ... + id = newValue + return nil +}) +if err != nil { + return ... +} +fmt.Println("Allocated ID %d", id) +``` + + +#### Managing transactions manually + +The `DB.View()` and `DB.Update()` functions are wrappers around the `DB.Begin()` +function. These helper functions will start the transaction, execute a function, +and then safely close your transaction if an error is returned. This is the +recommended way to use Bolt transactions. + +However, sometimes you may want to manually start and end your transactions. +You can use the `DB.Begin()` function directly but **please** be sure to close +the transaction. + +```go +// Start a writable transaction. +tx, err := db.Begin(true) +if err != nil { + return err +} +defer tx.Rollback() + +// Use the transaction... +_, err := tx.CreateBucket([]byte("MyBucket")) +if err != nil { + return err +} + +// Commit the transaction and check for error. +if err := tx.Commit(); err != nil { + return err +} +``` + +The first argument to `DB.Begin()` is a boolean stating if the transaction +should be writable. + + +### Using buckets + +Buckets are collections of key/value pairs within the database. All keys in a +bucket must be unique. You can create a bucket using the `Tx.CreateBucket()` +function: + +```go +db.Update(func(tx *bolt.Tx) error { + b, err := tx.CreateBucket([]byte("MyBucket")) + if err != nil { + return fmt.Errorf("create bucket: %s", err) + } + return nil +}) +``` + +You can also create a bucket only if it doesn't exist by using the +`Tx.CreateBucketIfNotExists()` function. It's a common pattern to call this +function for all your top-level buckets after you open your database so you can +guarantee that they exist for future transactions. + +To delete a bucket, simply call the `Tx.DeleteBucket()` function. + + +### Using key/value pairs + +To save a key/value pair to a bucket, use the `Bucket.Put()` function: + +```go +db.Update(func(tx *bolt.Tx) error { + b := tx.Bucket([]byte("MyBucket")) + err := b.Put([]byte("answer"), []byte("42")) + return err +}) +``` + +This will set the value of the `"answer"` key to `"42"` in the `MyBucket` +bucket. To retrieve this value, we can use the `Bucket.Get()` function: + +```go +db.View(func(tx *bolt.Tx) error { + b := tx.Bucket([]byte("MyBucket")) + v := b.Get([]byte("answer")) + fmt.Printf("The answer is: %s\n", v) + return nil +}) +``` + +The `Get()` function does not return an error because its operation is +guaranteed to work (unless there is some kind of system failure). If the key +exists then it will return its byte slice value. If it doesn't exist then it +will return `nil`. It's important to note that you can have a zero-length value +set to a key which is different than the key not existing. + +Use the `Bucket.Delete()` function to delete a key from the bucket. + +Please note that values returned from `Get()` are only valid while the +transaction is open. If you need to use a value outside of the transaction +then you must use `copy()` to copy it to another byte slice. + + +### Autoincrementing integer for the bucket +By using the `NextSequence()` function, you can let Bolt determine a sequence +which can be used as the unique identifier for your key/value pairs. See the +example below. + +```go +// CreateUser saves u to the store. The new user ID is set on u once the data is persisted. +func (s *Store) CreateUser(u *User) error { + return s.db.Update(func(tx *bolt.Tx) error { + // Retrieve the users bucket. + // This should be created when the DB is first opened. + b := tx.Bucket([]byte("users")) + + // Generate ID for the user. + // This returns an error only if the Tx is closed or not writeable. + // That can't happen in an Update() call so I ignore the error check. + id, _ := b.NextSequence() + u.ID = int(id) + + // Marshal user data into bytes. + buf, err := json.Marshal(u) + if err != nil { + return err + } + + // Persist bytes to users bucket. + return b.Put(itob(u.ID), buf) + }) +} + +// itob returns an 8-byte big endian representation of v. +func itob(v int) []byte { + b := make([]byte, 8) + binary.BigEndian.PutUint64(b, uint64(v)) + return b +} + +type User struct { + ID int + ... +} +``` + +### Iterating over keys + +Bolt stores its keys in byte-sorted order within a bucket. This makes sequential +iteration over these keys extremely fast. To iterate over keys we'll use a +`Cursor`: + +```go +db.View(func(tx *bolt.Tx) error { + // Assume bucket exists and has keys + b := tx.Bucket([]byte("MyBucket")) + + c := b.Cursor() + + for k, v := c.First(); k != nil; k, v = c.Next() { + fmt.Printf("key=%s, value=%s\n", k, v) + } + + return nil +}) +``` + +The cursor allows you to move to a specific point in the list of keys and move +forward or backward through the keys one at a time. + +The following functions are available on the cursor: + +``` +First() Move to the first key. +Last() Move to the last key. +Seek() Move to a specific key. +Next() Move to the next key. +Prev() Move to the previous key. +``` + +Each of those functions has a return signature of `(key []byte, value []byte)`. +When you have iterated to the end of the cursor then `Next()` will return a +`nil` key. You must seek to a position using `First()`, `Last()`, or `Seek()` +before calling `Next()` or `Prev()`. If you do not seek to a position then +these functions will return a `nil` key. + +During iteration, if the key is non-`nil` but the value is `nil`, that means +the key refers to a bucket rather than a value. Use `Bucket.Bucket()` to +access the sub-bucket. + + +#### Prefix scans + +To iterate over a key prefix, you can combine `Seek()` and `bytes.HasPrefix()`: + +```go +db.View(func(tx *bolt.Tx) error { + // Assume bucket exists and has keys + c := tx.Bucket([]byte("MyBucket")).Cursor() + + prefix := []byte("1234") + for k, v := c.Seek(prefix); k != nil && bytes.HasPrefix(k, prefix); k, v = c.Next() { + fmt.Printf("key=%s, value=%s\n", k, v) + } + + return nil +}) +``` + +#### Range scans + +Another common use case is scanning over a range such as a time range. If you +use a sortable time encoding such as RFC3339 then you can query a specific +date range like this: + +```go +db.View(func(tx *bolt.Tx) error { + // Assume our events bucket exists and has RFC3339 encoded time keys. + c := tx.Bucket([]byte("Events")).Cursor() + + // Our time range spans the 90's decade. + min := []byte("1990-01-01T00:00:00Z") + max := []byte("2000-01-01T00:00:00Z") + + // Iterate over the 90's. + for k, v := c.Seek(min); k != nil && bytes.Compare(k, max) <= 0; k, v = c.Next() { + fmt.Printf("%s: %s\n", k, v) + } + + return nil +}) +``` + +Note that, while RFC3339 is sortable, the Golang implementation of RFC3339Nano does not use a fixed number of digits after the decimal point and is therefore not sortable. + + +#### ForEach() + +You can also use the function `ForEach()` if you know you'll be iterating over +all the keys in a bucket: + +```go +db.View(func(tx *bolt.Tx) error { + // Assume bucket exists and has keys + b := tx.Bucket([]byte("MyBucket")) + + b.ForEach(func(k, v []byte) error { + fmt.Printf("key=%s, value=%s\n", k, v) + return nil + }) + return nil +}) +``` + +Please note that keys and values in `ForEach()` are only valid while +the transaction is open. If you need to use a key or value outside of +the transaction, you must use `copy()` to copy it to another byte +slice. + +### Nested buckets + +You can also store a bucket in a key to create nested buckets. The API is the +same as the bucket management API on the `DB` object: + +```go +func (*Bucket) CreateBucket(key []byte) (*Bucket, error) +func (*Bucket) CreateBucketIfNotExists(key []byte) (*Bucket, error) +func (*Bucket) DeleteBucket(key []byte) error +``` + +Say you had a multi-tenant application where the root level bucket was the account bucket. Inside of this bucket was a sequence of accounts which themselves are buckets. And inside the sequence bucket you could have many buckets pertaining to the Account itself (Users, Notes, etc) isolating the information into logical groupings. + +```go + +// createUser creates a new user in the given account. +func createUser(accountID int, u *User) error { + // Start the transaction. + tx, err := db.Begin(true) + if err != nil { + return err + } + defer tx.Rollback() + + // Retrieve the root bucket for the account. + // Assume this has already been created when the account was set up. + root := tx.Bucket([]byte(strconv.FormatUint(accountID, 10))) + + // Setup the users bucket. + bkt, err := root.CreateBucketIfNotExists([]byte("USERS")) + if err != nil { + return err + } + + // Generate an ID for the new user. + userID, err := bkt.NextSequence() + if err != nil { + return err + } + u.ID = userID + + // Marshal and save the encoded user. + if buf, err := json.Marshal(u); err != nil { + return err + } else if err := bkt.Put([]byte(strconv.FormatUint(u.ID, 10)), buf); err != nil { + return err + } + + // Commit the transaction. + if err := tx.Commit(); err != nil { + return err + } + + return nil +} + +``` + + + + +### Database backups + +Bolt is a single file so it's easy to backup. You can use the `Tx.WriteTo()` +function to write a consistent view of the database to a writer. If you call +this from a read-only transaction, it will perform a hot backup and not block +your other database reads and writes. + +By default, it will use a regular file handle which will utilize the operating +system's page cache. See the [`Tx`](https://godoc.org/go.etcd.io/bbolt#Tx) +documentation for information about optimizing for larger-than-RAM datasets. + +One common use case is to backup over HTTP so you can use tools like `cURL` to +do database backups: + +```go +func BackupHandleFunc(w http.ResponseWriter, req *http.Request) { + err := db.View(func(tx *bolt.Tx) error { + w.Header().Set("Content-Type", "application/octet-stream") + w.Header().Set("Content-Disposition", `attachment; filename="my.db"`) + w.Header().Set("Content-Length", strconv.Itoa(int(tx.Size()))) + _, err := tx.WriteTo(w) + return err + }) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + } +} +``` + +Then you can backup using this command: + +```sh +$ curl http://localhost/backup > my.db +``` + +Or you can open your browser to `http://localhost/backup` and it will download +automatically. + +If you want to backup to another file you can use the `Tx.CopyFile()` helper +function. + + +### Statistics + +The database keeps a running count of many of the internal operations it +performs so you can better understand what's going on. By grabbing a snapshot +of these stats at two points in time we can see what operations were performed +in that time range. + +For example, we could start a goroutine to log stats every 10 seconds: + +```go +go func() { + // Grab the initial stats. + prev := db.Stats() + + for { + // Wait for 10s. + time.Sleep(10 * time.Second) + + // Grab the current stats and diff them. + stats := db.Stats() + diff := stats.Sub(&prev) + + // Encode stats to JSON and print to STDERR. + json.NewEncoder(os.Stderr).Encode(diff) + + // Save stats for the next loop. + prev = stats + } +}() +``` + +It's also useful to pipe these stats to a service such as statsd for monitoring +or to provide an HTTP endpoint that will perform a fixed-length sample. + + +### Read-Only Mode + +Sometimes it is useful to create a shared, read-only Bolt database. To this, +set the `Options.ReadOnly` flag when opening your database. Read-only mode +uses a shared lock to allow multiple processes to read from the database but +it will block any processes from opening the database in read-write mode. + +```go +db, err := bolt.Open("my.db", 0666, &bolt.Options{ReadOnly: true}) +if err != nil { + log.Fatal(err) +} +``` + +### Mobile Use (iOS/Android) + +Bolt is able to run on mobile devices by leveraging the binding feature of the +[gomobile](https://github.com/golang/mobile) tool. Create a struct that will +contain your database logic and a reference to a `*bolt.DB` with a initializing +constructor that takes in a filepath where the database file will be stored. +Neither Android nor iOS require extra permissions or cleanup from using this method. + +```go +func NewBoltDB(filepath string) *BoltDB { + db, err := bolt.Open(filepath+"/demo.db", 0600, nil) + if err != nil { + log.Fatal(err) + } + + return &BoltDB{db} +} + +type BoltDB struct { + db *bolt.DB + ... +} + +func (b *BoltDB) Path() string { + return b.db.Path() +} + +func (b *BoltDB) Close() { + b.db.Close() +} +``` + +Database logic should be defined as methods on this wrapper struct. + +To initialize this struct from the native language (both platforms now sync +their local storage to the cloud. These snippets disable that functionality for the +database file): + +#### Android + +```java +String path; +if (android.os.Build.VERSION.SDK_INT >=android.os.Build.VERSION_CODES.LOLLIPOP){ + path = getNoBackupFilesDir().getAbsolutePath(); +} else{ + path = getFilesDir().getAbsolutePath(); +} +Boltmobiledemo.BoltDB boltDB = Boltmobiledemo.NewBoltDB(path) +``` + +#### iOS + +```objc +- (void)demo { + NSString* path = [NSSearchPathForDirectoriesInDomains(NSLibraryDirectory, + NSUserDomainMask, + YES) objectAtIndex:0]; + GoBoltmobiledemoBoltDB * demo = GoBoltmobiledemoNewBoltDB(path); + [self addSkipBackupAttributeToItemAtPath:demo.path]; + //Some DB Logic would go here + [demo close]; +} + +- (BOOL)addSkipBackupAttributeToItemAtPath:(NSString *) filePathString +{ + NSURL* URL= [NSURL fileURLWithPath: filePathString]; + assert([[NSFileManager defaultManager] fileExistsAtPath: [URL path]]); + + NSError *error = nil; + BOOL success = [URL setResourceValue: [NSNumber numberWithBool: YES] + forKey: NSURLIsExcludedFromBackupKey error: &error]; + if(!success){ + NSLog(@"Error excluding %@ from backup %@", [URL lastPathComponent], error); + } + return success; +} + +``` + +## Resources + +For more information on getting started with Bolt, check out the following articles: + +* [Intro to BoltDB: Painless Performant Persistence](http://npf.io/2014/07/intro-to-boltdb-painless-performant-persistence/) by [Nate Finch](https://github.com/natefinch). +* [Bolt -- an embedded key/value database for Go](https://www.progville.com/go/bolt-embedded-db-golang/) by Progville + + +## Comparison with other databases + +### Postgres, MySQL, & other relational databases + +Relational databases structure data into rows and are only accessible through +the use of SQL. This approach provides flexibility in how you store and query +your data but also incurs overhead in parsing and planning SQL statements. Bolt +accesses all data by a byte slice key. This makes Bolt fast to read and write +data by key but provides no built-in support for joining values together. + +Most relational databases (with the exception of SQLite) are standalone servers +that run separately from your application. This gives your systems +flexibility to connect multiple application servers to a single database +server but also adds overhead in serializing and transporting data over the +network. Bolt runs as a library included in your application so all data access +has to go through your application's process. This brings data closer to your +application but limits multi-process access to the data. + + +### LevelDB, RocksDB + +LevelDB and its derivatives (RocksDB, HyperLevelDB) are similar to Bolt in that +they are libraries bundled into the application, however, their underlying +structure is a log-structured merge-tree (LSM tree). An LSM tree optimizes +random writes by using a write ahead log and multi-tiered, sorted files called +SSTables. Bolt uses a B+tree internally and only a single file. Both approaches +have trade-offs. + +If you require a high random write throughput (>10,000 w/sec) or you need to use +spinning disks then LevelDB could be a good choice. If your application is +read-heavy or does a lot of range scans then Bolt could be a good choice. + +One other important consideration is that LevelDB does not have transactions. +It supports batch writing of key/values pairs and it supports read snapshots +but it will not give you the ability to do a compare-and-swap operation safely. +Bolt supports fully serializable ACID transactions. + + +### LMDB + +Bolt was originally a port of LMDB so it is architecturally similar. Both use +a B+tree, have ACID semantics with fully serializable transactions, and support +lock-free MVCC using a single writer and multiple readers. + +The two projects have somewhat diverged. LMDB heavily focuses on raw performance +while Bolt has focused on simplicity and ease of use. For example, LMDB allows +several unsafe actions such as direct writes for the sake of performance. Bolt +opts to disallow actions which can leave the database in a corrupted state. The +only exception to this in Bolt is `DB.NoSync`. + +There are also a few differences in API. LMDB requires a maximum mmap size when +opening an `mdb_env` whereas Bolt will handle incremental mmap resizing +automatically. LMDB overloads the getter and setter functions with multiple +flags whereas Bolt splits these specialized cases into their own functions. + + +## Caveats & Limitations + +It's important to pick the right tool for the job and Bolt is no exception. +Here are a few things to note when evaluating and using Bolt: + +* Bolt is good for read intensive workloads. Sequential write performance is + also fast but random writes can be slow. You can use `DB.Batch()` or add a + write-ahead log to help mitigate this issue. + +* Bolt uses a B+tree internally so there can be a lot of random page access. + SSDs provide a significant performance boost over spinning disks. + +* Try to avoid long running read transactions. Bolt uses copy-on-write so + old pages cannot be reclaimed while an old transaction is using them. + +* Byte slices returned from Bolt are only valid during a transaction. Once the + transaction has been committed or rolled back then the memory they point to + can be reused by a new page or can be unmapped from virtual memory and you'll + see an `unexpected fault address` panic when accessing it. + +* Bolt uses an exclusive write lock on the database file so it cannot be + shared by multiple processes. + +* Be careful when using `Bucket.FillPercent`. Setting a high fill percent for + buckets that have random inserts will cause your database to have very poor + page utilization. + +* Use larger buckets in general. Smaller buckets causes poor page utilization + once they become larger than the page size (typically 4KB). + +* Bulk loading a lot of random writes into a new bucket can be slow as the + page will not split until the transaction is committed. Randomly inserting + more than 100,000 key/value pairs into a single new bucket in a single + transaction is not advised. + +* Bolt uses a memory-mapped file so the underlying operating system handles the + caching of the data. Typically, the OS will cache as much of the file as it + can in memory and will release memory as needed to other processes. This means + that Bolt can show very high memory usage when working with large databases. + However, this is expected and the OS will release memory as needed. Bolt can + handle databases much larger than the available physical RAM, provided its + memory-map fits in the process virtual address space. It may be problematic + on 32-bits systems. + +* The data structures in the Bolt database are memory mapped so the data file + will be endian specific. This means that you cannot copy a Bolt file from a + little endian machine to a big endian machine and have it work. For most + users this is not a concern since most modern CPUs are little endian. + +* Because of the way pages are laid out on disk, Bolt cannot truncate data files + and return free pages back to the disk. Instead, Bolt maintains a free list + of unused pages within its data file. These free pages can be reused by later + transactions. This works well for many use cases as databases generally tend + to grow. However, it's important to note that deleting large chunks of data + will not allow you to reclaim that space on disk. + + For more information on page allocation, [see this comment][page-allocation]. + +[page-allocation]: https://github.com/boltdb/bolt/issues/308#issuecomment-74811638 + + +## Reading the Source + +Bolt is a relatively small code base (<5KLOC) for an embedded, serializable, +transactional key/value database so it can be a good starting point for people +interested in how databases work. + +The best places to start are the main entry points into Bolt: + +- `Open()` - Initializes the reference to the database. It's responsible for + creating the database if it doesn't exist, obtaining an exclusive lock on the + file, reading the meta pages, & memory-mapping the file. + +- `DB.Begin()` - Starts a read-only or read-write transaction depending on the + value of the `writable` argument. This requires briefly obtaining the "meta" + lock to keep track of open transactions. Only one read-write transaction can + exist at a time so the "rwlock" is acquired during the life of a read-write + transaction. + +- `Bucket.Put()` - Writes a key/value pair into a bucket. After validating the + arguments, a cursor is used to traverse the B+tree to the page and position + where they key & value will be written. Once the position is found, the bucket + materializes the underlying page and the page's parent pages into memory as + "nodes". These nodes are where mutations occur during read-write transactions. + These changes get flushed to disk during commit. + +- `Bucket.Get()` - Retrieves a key/value pair from a bucket. This uses a cursor + to move to the page & position of a key/value pair. During a read-only + transaction, the key and value data is returned as a direct reference to the + underlying mmap file so there's no allocation overhead. For read-write + transactions, this data may reference the mmap file or one of the in-memory + node values. + +- `Cursor` - This object is simply for traversing the B+tree of on-disk pages + or in-memory nodes. It can seek to a specific key, move to the first or last + value, or it can move forward or backward. The cursor handles the movement up + and down the B+tree transparently to the end user. + +- `Tx.Commit()` - Converts the in-memory dirty nodes and the list of free pages + into pages to be written to disk. Writing to disk then occurs in two phases. + First, the dirty pages are written to disk and an `fsync()` occurs. Second, a + new meta page with an incremented transaction ID is written and another + `fsync()` occurs. This two phase write ensures that partially written data + pages are ignored in the event of a crash since the meta page pointing to them + is never written. Partially written meta pages are invalidated because they + are written with a checksum. + +If you have additional notes that could be helpful for others, please submit +them via pull request. + + +## Other Projects Using Bolt + +Below is a list of public, open source projects that use Bolt: + +* [Algernon](https://github.com/xyproto/algernon) - A HTTP/2 web server with built-in support for Lua. Uses BoltDB as the default database backend. +* [Bazil](https://bazil.org/) - A file system that lets your data reside where it is most convenient for it to reside. +* [bolter](https://github.com/hasit/bolter) - Command-line app for viewing BoltDB file in your terminal. +* [boltcli](https://github.com/spacewander/boltcli) - the redis-cli for boltdb with Lua script support. +* [BoltHold](https://github.com/timshannon/bolthold) - An embeddable NoSQL store for Go types built on BoltDB +* [BoltStore](https://github.com/yosssi/boltstore) - Session store using Bolt. +* [Boltdb Boilerplate](https://github.com/bobintornado/boltdb-boilerplate) - Boilerplate wrapper around bolt aiming to make simple calls one-liners. +* [BoltDbWeb](https://github.com/evnix/boltdbweb) - A web based GUI for BoltDB files. +* [BoltDB Viewer](https://github.com/zc310/rich_boltdb) - A BoltDB Viewer Can run on Windows、Linux、Android system. +* [bleve](http://www.blevesearch.com/) - A pure Go search engine similar to ElasticSearch that uses Bolt as the default storage backend. +* [btcwallet](https://github.com/btcsuite/btcwallet) - A bitcoin wallet. +* [buckets](https://github.com/joyrexus/buckets) - a bolt wrapper streamlining + simple tx and key scans. +* [cayley](https://github.com/google/cayley) - Cayley is an open-source graph database using Bolt as optional backend. +* [ChainStore](https://github.com/pressly/chainstore) - Simple key-value interface to a variety of storage engines organized as a chain of operations. +* [🌰 Chestnut](https://github.com/jrapoport/chestnut) - Chestnut is encrypted storage for Go. +* [Consul](https://github.com/hashicorp/consul) - Consul is service discovery and configuration made easy. Distributed, highly available, and datacenter-aware. +* [DVID](https://github.com/janelia-flyem/dvid) - Added Bolt as optional storage engine and testing it against Basho-tuned leveldb. +* [dcrwallet](https://github.com/decred/dcrwallet) - A wallet for the Decred cryptocurrency. +* [drive](https://github.com/odeke-em/drive) - drive is an unofficial Google Drive command line client for \*NIX operating systems. +* [event-shuttle](https://github.com/sclasen/event-shuttle) - A Unix system service to collect and reliably deliver messages to Kafka. +* [Freehold](http://tshannon.bitbucket.org/freehold/) - An open, secure, and lightweight platform for your files and data. +* [Go Report Card](https://goreportcard.com/) - Go code quality report cards as a (free and open source) service. +* [GoWebApp](https://github.com/josephspurrier/gowebapp) - A basic MVC web application in Go using BoltDB. +* [GoShort](https://github.com/pankajkhairnar/goShort) - GoShort is a URL shortener written in Golang and BoltDB for persistent key/value storage and for routing it's using high performent HTTPRouter. +* [gopherpit](https://github.com/gopherpit/gopherpit) - A web service to manage Go remote import paths with custom domains +* [gokv](https://github.com/philippgille/gokv) - Simple key-value store abstraction and implementations for Go (Redis, Consul, etcd, bbolt, BadgerDB, LevelDB, Memcached, DynamoDB, S3, PostgreSQL, MongoDB, CockroachDB and many more) +* [Gitchain](https://github.com/gitchain/gitchain) - Decentralized, peer-to-peer Git repositories aka "Git meets Bitcoin". +* [InfluxDB](https://influxdata.com) - Scalable datastore for metrics, events, and real-time analytics. +* [ipLocator](https://github.com/AndreasBriese/ipLocator) - A fast ip-geo-location-server using bolt with bloom filters. +* [ipxed](https://github.com/kelseyhightower/ipxed) - Web interface and api for ipxed. +* [Ironsmith](https://github.com/timshannon/ironsmith) - A simple, script-driven continuous integration (build - > test -> release) tool, with no external dependencies +* [Kala](https://github.com/ajvb/kala) - Kala is a modern job scheduler optimized to run on a single node. It is persistent, JSON over HTTP API, ISO 8601 duration notation, and dependent jobs. +* [Key Value Access Langusge (KVAL)](https://github.com/kval-access-language) - A proposed grammar for key-value datastores offering a bbolt binding. +* [LedisDB](https://github.com/siddontang/ledisdb) - A high performance NoSQL, using Bolt as optional storage. +* [lru](https://github.com/crowdriff/lru) - Easy to use Bolt-backed Least-Recently-Used (LRU) read-through cache with chainable remote stores. +* [mbuckets](https://github.com/abhigupta912/mbuckets) - A Bolt wrapper that allows easy operations on multi level (nested) buckets. +* [MetricBase](https://github.com/msiebuhr/MetricBase) - Single-binary version of Graphite. +* [MuLiFS](https://github.com/dankomiocevic/mulifs) - Music Library Filesystem creates a filesystem to organise your music files. +* [NATS](https://github.com/nats-io/nats-streaming-server) - NATS Streaming uses bbolt for message and metadata storage. +* [Prometheus Annotation Server](https://github.com/oliver006/prom_annotation_server) - Annotation server for PromDash & Prometheus service monitoring system. +* [Rain](https://github.com/cenkalti/rain) - BitTorrent client and library. +* [reef-pi](https://github.com/reef-pi/reef-pi) - reef-pi is an award winning, modular, DIY reef tank controller using easy to learn electronics based on a Raspberry Pi. +* [Request Baskets](https://github.com/darklynx/request-baskets) - A web service to collect arbitrary HTTP requests and inspect them via REST API or simple web UI, similar to [RequestBin](http://requestb.in/) service +* [Seaweed File System](https://github.com/chrislusf/seaweedfs) - Highly scalable distributed key~file system with O(1) disk read. +* [stow](https://github.com/djherbis/stow) - a persistence manager for objects + backed by boltdb. +* [Storm](https://github.com/asdine/storm) - Simple and powerful ORM for BoltDB. +* [SimpleBolt](https://github.com/xyproto/simplebolt) - A simple way to use BoltDB. Deals mainly with strings. +* [Skybox Analytics](https://github.com/skybox/skybox) - A standalone funnel analysis tool for web analytics. +* [Scuttlebutt](https://github.com/benbjohnson/scuttlebutt) - Uses Bolt to store and process all Twitter mentions of GitHub projects. +* [tentacool](https://github.com/optiflows/tentacool) - REST api server to manage system stuff (IP, DNS, Gateway...) on a linux server. +* [torrent](https://github.com/anacrolix/torrent) - Full-featured BitTorrent client package and utilities in Go. BoltDB is a storage backend in development. +* [Wiki](https://github.com/peterhellberg/wiki) - A tiny wiki using Goji, BoltDB and Blackfriday. + +If you are using Bolt in a project please send a pull request to add it to the list. diff --git a/vendor/go.etcd.io/bbolt/bolt_386.go b/vendor/go.etcd.io/bbolt/bolt_386.go new file mode 100644 index 0000000..aee2596 --- /dev/null +++ b/vendor/go.etcd.io/bbolt/bolt_386.go @@ -0,0 +1,7 @@ +package bbolt + +// maxMapSize represents the largest mmap size supported by Bolt. +const maxMapSize = 0x7FFFFFFF // 2GB + +// maxAllocSize is the size used when creating array pointers. +const maxAllocSize = 0xFFFFFFF diff --git a/vendor/go.etcd.io/bbolt/bolt_amd64.go b/vendor/go.etcd.io/bbolt/bolt_amd64.go new file mode 100644 index 0000000..5dd8f3f --- /dev/null +++ b/vendor/go.etcd.io/bbolt/bolt_amd64.go @@ -0,0 +1,7 @@ +package bbolt + +// maxMapSize represents the largest mmap size supported by Bolt. +const maxMapSize = 0xFFFFFFFFFFFF // 256TB + +// maxAllocSize is the size used when creating array pointers. +const maxAllocSize = 0x7FFFFFFF diff --git a/vendor/go.etcd.io/bbolt/bolt_arm.go b/vendor/go.etcd.io/bbolt/bolt_arm.go new file mode 100644 index 0000000..aee2596 --- /dev/null +++ b/vendor/go.etcd.io/bbolt/bolt_arm.go @@ -0,0 +1,7 @@ +package bbolt + +// maxMapSize represents the largest mmap size supported by Bolt. +const maxMapSize = 0x7FFFFFFF // 2GB + +// maxAllocSize is the size used when creating array pointers. +const maxAllocSize = 0xFFFFFFF diff --git a/vendor/go.etcd.io/bbolt/bolt_arm64.go b/vendor/go.etcd.io/bbolt/bolt_arm64.go new file mode 100644 index 0000000..810dfd5 --- /dev/null +++ b/vendor/go.etcd.io/bbolt/bolt_arm64.go @@ -0,0 +1,9 @@ +// +build arm64 + +package bbolt + +// maxMapSize represents the largest mmap size supported by Bolt. +const maxMapSize = 0xFFFFFFFFFFFF // 256TB + +// maxAllocSize is the size used when creating array pointers. +const maxAllocSize = 0x7FFFFFFF diff --git a/vendor/go.etcd.io/bbolt/bolt_linux.go b/vendor/go.etcd.io/bbolt/bolt_linux.go new file mode 100644 index 0000000..7707bca --- /dev/null +++ b/vendor/go.etcd.io/bbolt/bolt_linux.go @@ -0,0 +1,10 @@ +package bbolt + +import ( + "syscall" +) + +// fdatasync flushes written data to a file descriptor. +func fdatasync(db *DB) error { + return syscall.Fdatasync(int(db.file.Fd())) +} diff --git a/vendor/go.etcd.io/bbolt/bolt_mips64x.go b/vendor/go.etcd.io/bbolt/bolt_mips64x.go new file mode 100644 index 0000000..dd8ffe1 --- /dev/null +++ b/vendor/go.etcd.io/bbolt/bolt_mips64x.go @@ -0,0 +1,9 @@ +// +build mips64 mips64le + +package bbolt + +// maxMapSize represents the largest mmap size supported by Bolt. +const maxMapSize = 0x8000000000 // 512GB + +// maxAllocSize is the size used when creating array pointers. +const maxAllocSize = 0x7FFFFFFF diff --git a/vendor/go.etcd.io/bbolt/bolt_mipsx.go b/vendor/go.etcd.io/bbolt/bolt_mipsx.go new file mode 100644 index 0000000..a669703 --- /dev/null +++ b/vendor/go.etcd.io/bbolt/bolt_mipsx.go @@ -0,0 +1,9 @@ +// +build mips mipsle + +package bbolt + +// maxMapSize represents the largest mmap size supported by Bolt. +const maxMapSize = 0x40000000 // 1GB + +// maxAllocSize is the size used when creating array pointers. +const maxAllocSize = 0xFFFFFFF diff --git a/vendor/go.etcd.io/bbolt/bolt_openbsd.go b/vendor/go.etcd.io/bbolt/bolt_openbsd.go new file mode 100644 index 0000000..d7f5035 --- /dev/null +++ b/vendor/go.etcd.io/bbolt/bolt_openbsd.go @@ -0,0 +1,27 @@ +package bbolt + +import ( + "syscall" + "unsafe" +) + +const ( + msAsync = 1 << iota // perform asynchronous writes + msSync // perform synchronous writes + msInvalidate // invalidate cached data +) + +func msync(db *DB) error { + _, _, errno := syscall.Syscall(syscall.SYS_MSYNC, uintptr(unsafe.Pointer(db.data)), uintptr(db.datasz), msInvalidate) + if errno != 0 { + return errno + } + return nil +} + +func fdatasync(db *DB) error { + if db.data != nil { + return msync(db) + } + return db.file.Sync() +} diff --git a/vendor/go.etcd.io/bbolt/bolt_ppc.go b/vendor/go.etcd.io/bbolt/bolt_ppc.go new file mode 100644 index 0000000..84e545e --- /dev/null +++ b/vendor/go.etcd.io/bbolt/bolt_ppc.go @@ -0,0 +1,9 @@ +// +build ppc + +package bbolt + +// maxMapSize represents the largest mmap size supported by Bolt. +const maxMapSize = 0x7FFFFFFF // 2GB + +// maxAllocSize is the size used when creating array pointers. +const maxAllocSize = 0xFFFFFFF diff --git a/vendor/go.etcd.io/bbolt/bolt_ppc64.go b/vendor/go.etcd.io/bbolt/bolt_ppc64.go new file mode 100644 index 0000000..a761209 --- /dev/null +++ b/vendor/go.etcd.io/bbolt/bolt_ppc64.go @@ -0,0 +1,9 @@ +// +build ppc64 + +package bbolt + +// maxMapSize represents the largest mmap size supported by Bolt. +const maxMapSize = 0xFFFFFFFFFFFF // 256TB + +// maxAllocSize is the size used when creating array pointers. +const maxAllocSize = 0x7FFFFFFF diff --git a/vendor/go.etcd.io/bbolt/bolt_ppc64le.go b/vendor/go.etcd.io/bbolt/bolt_ppc64le.go new file mode 100644 index 0000000..c830f2f --- /dev/null +++ b/vendor/go.etcd.io/bbolt/bolt_ppc64le.go @@ -0,0 +1,9 @@ +// +build ppc64le + +package bbolt + +// maxMapSize represents the largest mmap size supported by Bolt. +const maxMapSize = 0xFFFFFFFFFFFF // 256TB + +// maxAllocSize is the size used when creating array pointers. +const maxAllocSize = 0x7FFFFFFF diff --git a/vendor/go.etcd.io/bbolt/bolt_riscv64.go b/vendor/go.etcd.io/bbolt/bolt_riscv64.go new file mode 100644 index 0000000..c967613 --- /dev/null +++ b/vendor/go.etcd.io/bbolt/bolt_riscv64.go @@ -0,0 +1,9 @@ +// +build riscv64 + +package bbolt + +// maxMapSize represents the largest mmap size supported by Bolt. +const maxMapSize = 0xFFFFFFFFFFFF // 256TB + +// maxAllocSize is the size used when creating array pointers. +const maxAllocSize = 0x7FFFFFFF diff --git a/vendor/go.etcd.io/bbolt/bolt_s390x.go b/vendor/go.etcd.io/bbolt/bolt_s390x.go new file mode 100644 index 0000000..ff2a560 --- /dev/null +++ b/vendor/go.etcd.io/bbolt/bolt_s390x.go @@ -0,0 +1,9 @@ +// +build s390x + +package bbolt + +// maxMapSize represents the largest mmap size supported by Bolt. +const maxMapSize = 0xFFFFFFFFFFFF // 256TB + +// maxAllocSize is the size used when creating array pointers. +const maxAllocSize = 0x7FFFFFFF diff --git a/vendor/go.etcd.io/bbolt/bolt_unix.go b/vendor/go.etcd.io/bbolt/bolt_unix.go new file mode 100644 index 0000000..4e5f65c --- /dev/null +++ b/vendor/go.etcd.io/bbolt/bolt_unix.go @@ -0,0 +1,86 @@ +// +build !windows,!plan9,!solaris,!aix + +package bbolt + +import ( + "fmt" + "syscall" + "time" + "unsafe" + + "golang.org/x/sys/unix" +) + +// flock acquires an advisory lock on a file descriptor. +func flock(db *DB, exclusive bool, timeout time.Duration) error { + var t time.Time + if timeout != 0 { + t = time.Now() + } + fd := db.file.Fd() + flag := syscall.LOCK_NB + if exclusive { + flag |= syscall.LOCK_EX + } else { + flag |= syscall.LOCK_SH + } + for { + // Attempt to obtain an exclusive lock. + err := syscall.Flock(int(fd), flag) + if err == nil { + return nil + } else if err != syscall.EWOULDBLOCK { + return err + } + + // If we timed out then return an error. + if timeout != 0 && time.Since(t) > timeout-flockRetryTimeout { + return ErrTimeout + } + + // Wait for a bit and try again. + time.Sleep(flockRetryTimeout) + } +} + +// funlock releases an advisory lock on a file descriptor. +func funlock(db *DB) error { + return syscall.Flock(int(db.file.Fd()), syscall.LOCK_UN) +} + +// mmap memory maps a DB's data file. +func mmap(db *DB, sz int) error { + // Map the data file to memory. + b, err := unix.Mmap(int(db.file.Fd()), 0, sz, syscall.PROT_READ, syscall.MAP_SHARED|db.MmapFlags) + if err != nil { + return err + } + + // Advise the kernel that the mmap is accessed randomly. + err = unix.Madvise(b, syscall.MADV_RANDOM) + if err != nil && err != syscall.ENOSYS { + // Ignore not implemented error in kernel because it still works. + return fmt.Errorf("madvise: %s", err) + } + + // Save the original byte slice and convert to a byte array pointer. + db.dataref = b + db.data = (*[maxMapSize]byte)(unsafe.Pointer(&b[0])) + db.datasz = sz + return nil +} + +// munmap unmaps a DB's data file from memory. +func munmap(db *DB) error { + // Ignore the unmap if we have no mapped data. + if db.dataref == nil { + return nil + } + + // Unmap using the original byte slice. + err := unix.Munmap(db.dataref) + db.dataref = nil + db.data = nil + db.datasz = 0 + return err +} diff --git a/vendor/go.etcd.io/bbolt/bolt_unix_aix.go b/vendor/go.etcd.io/bbolt/bolt_unix_aix.go new file mode 100644 index 0000000..a64c16f --- /dev/null +++ b/vendor/go.etcd.io/bbolt/bolt_unix_aix.go @@ -0,0 +1,90 @@ +// +build aix + +package bbolt + +import ( + "fmt" + "syscall" + "time" + "unsafe" + + "golang.org/x/sys/unix" +) + +// flock acquires an advisory lock on a file descriptor. +func flock(db *DB, exclusive bool, timeout time.Duration) error { + var t time.Time + if timeout != 0 { + t = time.Now() + } + fd := db.file.Fd() + var lockType int16 + if exclusive { + lockType = syscall.F_WRLCK + } else { + lockType = syscall.F_RDLCK + } + for { + // Attempt to obtain an exclusive lock. + lock := syscall.Flock_t{Type: lockType} + err := syscall.FcntlFlock(fd, syscall.F_SETLK, &lock) + if err == nil { + return nil + } else if err != syscall.EAGAIN { + return err + } + + // If we timed out then return an error. + if timeout != 0 && time.Since(t) > timeout-flockRetryTimeout { + return ErrTimeout + } + + // Wait for a bit and try again. + time.Sleep(flockRetryTimeout) + } +} + +// funlock releases an advisory lock on a file descriptor. +func funlock(db *DB) error { + var lock syscall.Flock_t + lock.Start = 0 + lock.Len = 0 + lock.Type = syscall.F_UNLCK + lock.Whence = 0 + return syscall.FcntlFlock(uintptr(db.file.Fd()), syscall.F_SETLK, &lock) +} + +// mmap memory maps a DB's data file. +func mmap(db *DB, sz int) error { + // Map the data file to memory. + b, err := unix.Mmap(int(db.file.Fd()), 0, sz, syscall.PROT_READ, syscall.MAP_SHARED|db.MmapFlags) + if err != nil { + return err + } + + // Advise the kernel that the mmap is accessed randomly. + if err := unix.Madvise(b, syscall.MADV_RANDOM); err != nil { + return fmt.Errorf("madvise: %s", err) + } + + // Save the original byte slice and convert to a byte array pointer. + db.dataref = b + db.data = (*[maxMapSize]byte)(unsafe.Pointer(&b[0])) + db.datasz = sz + return nil +} + +// munmap unmaps a DB's data file from memory. +func munmap(db *DB) error { + // Ignore the unmap if we have no mapped data. + if db.dataref == nil { + return nil + } + + // Unmap using the original byte slice. + err := unix.Munmap(db.dataref) + db.dataref = nil + db.data = nil + db.datasz = 0 + return err +} diff --git a/vendor/go.etcd.io/bbolt/bolt_unix_solaris.go b/vendor/go.etcd.io/bbolt/bolt_unix_solaris.go new file mode 100644 index 0000000..babad65 --- /dev/null +++ b/vendor/go.etcd.io/bbolt/bolt_unix_solaris.go @@ -0,0 +1,88 @@ +package bbolt + +import ( + "fmt" + "syscall" + "time" + "unsafe" + + "golang.org/x/sys/unix" +) + +// flock acquires an advisory lock on a file descriptor. +func flock(db *DB, exclusive bool, timeout time.Duration) error { + var t time.Time + if timeout != 0 { + t = time.Now() + } + fd := db.file.Fd() + var lockType int16 + if exclusive { + lockType = syscall.F_WRLCK + } else { + lockType = syscall.F_RDLCK + } + for { + // Attempt to obtain an exclusive lock. + lock := syscall.Flock_t{Type: lockType} + err := syscall.FcntlFlock(fd, syscall.F_SETLK, &lock) + if err == nil { + return nil + } else if err != syscall.EAGAIN { + return err + } + + // If we timed out then return an error. + if timeout != 0 && time.Since(t) > timeout-flockRetryTimeout { + return ErrTimeout + } + + // Wait for a bit and try again. + time.Sleep(flockRetryTimeout) + } +} + +// funlock releases an advisory lock on a file descriptor. +func funlock(db *DB) error { + var lock syscall.Flock_t + lock.Start = 0 + lock.Len = 0 + lock.Type = syscall.F_UNLCK + lock.Whence = 0 + return syscall.FcntlFlock(uintptr(db.file.Fd()), syscall.F_SETLK, &lock) +} + +// mmap memory maps a DB's data file. +func mmap(db *DB, sz int) error { + // Map the data file to memory. + b, err := unix.Mmap(int(db.file.Fd()), 0, sz, syscall.PROT_READ, syscall.MAP_SHARED|db.MmapFlags) + if err != nil { + return err + } + + // Advise the kernel that the mmap is accessed randomly. + if err := unix.Madvise(b, syscall.MADV_RANDOM); err != nil { + return fmt.Errorf("madvise: %s", err) + } + + // Save the original byte slice and convert to a byte array pointer. + db.dataref = b + db.data = (*[maxMapSize]byte)(unsafe.Pointer(&b[0])) + db.datasz = sz + return nil +} + +// munmap unmaps a DB's data file from memory. +func munmap(db *DB) error { + // Ignore the unmap if we have no mapped data. + if db.dataref == nil { + return nil + } + + // Unmap using the original byte slice. + err := unix.Munmap(db.dataref) + db.dataref = nil + db.data = nil + db.datasz = 0 + return err +} diff --git a/vendor/go.etcd.io/bbolt/bolt_windows.go b/vendor/go.etcd.io/bbolt/bolt_windows.go new file mode 100644 index 0000000..fca178b --- /dev/null +++ b/vendor/go.etcd.io/bbolt/bolt_windows.go @@ -0,0 +1,141 @@ +package bbolt + +import ( + "fmt" + "os" + "syscall" + "time" + "unsafe" +) + +// LockFileEx code derived from golang build filemutex_windows.go @ v1.5.1 +var ( + modkernel32 = syscall.NewLazyDLL("kernel32.dll") + procLockFileEx = modkernel32.NewProc("LockFileEx") + procUnlockFileEx = modkernel32.NewProc("UnlockFileEx") +) + +const ( + // see https://msdn.microsoft.com/en-us/library/windows/desktop/aa365203(v=vs.85).aspx + flagLockExclusive = 2 + flagLockFailImmediately = 1 + + // see https://msdn.microsoft.com/en-us/library/windows/desktop/ms681382(v=vs.85).aspx + errLockViolation syscall.Errno = 0x21 +) + +func lockFileEx(h syscall.Handle, flags, reserved, locklow, lockhigh uint32, ol *syscall.Overlapped) (err error) { + r, _, err := procLockFileEx.Call(uintptr(h), uintptr(flags), uintptr(reserved), uintptr(locklow), uintptr(lockhigh), uintptr(unsafe.Pointer(ol))) + if r == 0 { + return err + } + return nil +} + +func unlockFileEx(h syscall.Handle, reserved, locklow, lockhigh uint32, ol *syscall.Overlapped) (err error) { + r, _, err := procUnlockFileEx.Call(uintptr(h), uintptr(reserved), uintptr(locklow), uintptr(lockhigh), uintptr(unsafe.Pointer(ol)), 0) + if r == 0 { + return err + } + return nil +} + +// fdatasync flushes written data to a file descriptor. +func fdatasync(db *DB) error { + return db.file.Sync() +} + +// flock acquires an advisory lock on a file descriptor. +func flock(db *DB, exclusive bool, timeout time.Duration) error { + var t time.Time + if timeout != 0 { + t = time.Now() + } + var flag uint32 = flagLockFailImmediately + if exclusive { + flag |= flagLockExclusive + } + for { + // Fix for https://github.com/etcd-io/bbolt/issues/121. Use byte-range + // -1..0 as the lock on the database file. + var m1 uint32 = (1 << 32) - 1 // -1 in a uint32 + err := lockFileEx(syscall.Handle(db.file.Fd()), flag, 0, 1, 0, &syscall.Overlapped{ + Offset: m1, + OffsetHigh: m1, + }) + + if err == nil { + return nil + } else if err != errLockViolation { + return err + } + + // If we timed oumercit then return an error. + if timeout != 0 && time.Since(t) > timeout-flockRetryTimeout { + return ErrTimeout + } + + // Wait for a bit and try again. + time.Sleep(flockRetryTimeout) + } +} + +// funlock releases an advisory lock on a file descriptor. +func funlock(db *DB) error { + var m1 uint32 = (1 << 32) - 1 // -1 in a uint32 + err := unlockFileEx(syscall.Handle(db.file.Fd()), 0, 1, 0, &syscall.Overlapped{ + Offset: m1, + OffsetHigh: m1, + }) + return err +} + +// mmap memory maps a DB's data file. +// Based on: https://github.com/edsrzf/mmap-go +func mmap(db *DB, sz int) error { + if !db.readOnly { + // Truncate the database to the size of the mmap. + if err := db.file.Truncate(int64(sz)); err != nil { + return fmt.Errorf("truncate: %s", err) + } + } + + // Open a file mapping handle. + sizelo := uint32(sz >> 32) + sizehi := uint32(sz) & 0xffffffff + h, errno := syscall.CreateFileMapping(syscall.Handle(db.file.Fd()), nil, syscall.PAGE_READONLY, sizelo, sizehi, nil) + if h == 0 { + return os.NewSyscallError("CreateFileMapping", errno) + } + + // Create the memory map. + addr, errno := syscall.MapViewOfFile(h, syscall.FILE_MAP_READ, 0, 0, uintptr(sz)) + if addr == 0 { + return os.NewSyscallError("MapViewOfFile", errno) + } + + // Close mapping handle. + if err := syscall.CloseHandle(syscall.Handle(h)); err != nil { + return os.NewSyscallError("CloseHandle", err) + } + + // Convert to a byte array. + db.data = ((*[maxMapSize]byte)(unsafe.Pointer(addr))) + db.datasz = sz + + return nil +} + +// munmap unmaps a pointer from a file. +// Based on: https://github.com/edsrzf/mmap-go +func munmap(db *DB) error { + if db.data == nil { + return nil + } + + addr := (uintptr)(unsafe.Pointer(&db.data[0])) + if err := syscall.UnmapViewOfFile(addr); err != nil { + return os.NewSyscallError("UnmapViewOfFile", err) + } + return nil +} diff --git a/vendor/go.etcd.io/bbolt/boltsync_unix.go b/vendor/go.etcd.io/bbolt/boltsync_unix.go new file mode 100644 index 0000000..9587afe --- /dev/null +++ b/vendor/go.etcd.io/bbolt/boltsync_unix.go @@ -0,0 +1,8 @@ +// +build !windows,!plan9,!linux,!openbsd + +package bbolt + +// fdatasync flushes written data to a file descriptor. +func fdatasync(db *DB) error { + return db.file.Sync() +} diff --git a/vendor/go.etcd.io/bbolt/bucket.go b/vendor/go.etcd.io/bbolt/bucket.go new file mode 100644 index 0000000..d8750b1 --- /dev/null +++ b/vendor/go.etcd.io/bbolt/bucket.go @@ -0,0 +1,777 @@ +package bbolt + +import ( + "bytes" + "fmt" + "unsafe" +) + +const ( + // MaxKeySize is the maximum length of a key, in bytes. + MaxKeySize = 32768 + + // MaxValueSize is the maximum length of a value, in bytes. + MaxValueSize = (1 << 31) - 2 +) + +const bucketHeaderSize = int(unsafe.Sizeof(bucket{})) + +const ( + minFillPercent = 0.1 + maxFillPercent = 1.0 +) + +// DefaultFillPercent is the percentage that split pages are filled. +// This value can be changed by setting Bucket.FillPercent. +const DefaultFillPercent = 0.5 + +// Bucket represents a collection of key/value pairs inside the database. +type Bucket struct { + *bucket + tx *Tx // the associated transaction + buckets map[string]*Bucket // subbucket cache + page *page // inline page reference + rootNode *node // materialized node for the root page. + nodes map[pgid]*node // node cache + + // Sets the threshold for filling nodes when they split. By default, + // the bucket will fill to 50% but it can be useful to increase this + // amount if you know that your write workloads are mostly append-only. + // + // This is non-persisted across transactions so it must be set in every Tx. + FillPercent float64 +} + +// bucket represents the on-file representation of a bucket. +// This is stored as the "value" of a bucket key. If the bucket is small enough, +// then its root page can be stored inline in the "value", after the bucket +// header. In the case of inline buckets, the "root" will be 0. +type bucket struct { + root pgid // page id of the bucket's root-level page + sequence uint64 // monotonically incrementing, used by NextSequence() +} + +// newBucket returns a new bucket associated with a transaction. +func newBucket(tx *Tx) Bucket { + var b = Bucket{tx: tx, FillPercent: DefaultFillPercent} + if tx.writable { + b.buckets = make(map[string]*Bucket) + b.nodes = make(map[pgid]*node) + } + return b +} + +// Tx returns the tx of the bucket. +func (b *Bucket) Tx() *Tx { + return b.tx +} + +// Root returns the root of the bucket. +func (b *Bucket) Root() pgid { + return b.root +} + +// Writable returns whether the bucket is writable. +func (b *Bucket) Writable() bool { + return b.tx.writable +} + +// Cursor creates a cursor associated with the bucket. +// The cursor is only valid as long as the transaction is open. +// Do not use a cursor after the transaction is closed. +func (b *Bucket) Cursor() *Cursor { + // Update transaction statistics. + b.tx.stats.CursorCount++ + + // Allocate and return a cursor. + return &Cursor{ + bucket: b, + stack: make([]elemRef, 0), + } +} + +// Bucket retrieves a nested bucket by name. +// Returns nil if the bucket does not exist. +// The bucket instance is only valid for the lifetime of the transaction. +func (b *Bucket) Bucket(name []byte) *Bucket { + if b.buckets != nil { + if child := b.buckets[string(name)]; child != nil { + return child + } + } + + // Move cursor to key. + c := b.Cursor() + k, v, flags := c.seek(name) + + // Return nil if the key doesn't exist or it is not a bucket. + if !bytes.Equal(name, k) || (flags&bucketLeafFlag) == 0 { + return nil + } + + // Otherwise create a bucket and cache it. + var child = b.openBucket(v) + if b.buckets != nil { + b.buckets[string(name)] = child + } + + return child +} + +// Helper method that re-interprets a sub-bucket value +// from a parent into a Bucket +func (b *Bucket) openBucket(value []byte) *Bucket { + var child = newBucket(b.tx) + + // Unaligned access requires a copy to be made. + const unalignedMask = unsafe.Alignof(struct { + bucket + page + }{}) - 1 + unaligned := uintptr(unsafe.Pointer(&value[0]))&unalignedMask != 0 + if unaligned { + value = cloneBytes(value) + } + + // If this is a writable transaction then we need to copy the bucket entry. + // Read-only transactions can point directly at the mmap entry. + if b.tx.writable && !unaligned { + child.bucket = &bucket{} + *child.bucket = *(*bucket)(unsafe.Pointer(&value[0])) + } else { + child.bucket = (*bucket)(unsafe.Pointer(&value[0])) + } + + // Save a reference to the inline page if the bucket is inline. + if child.root == 0 { + child.page = (*page)(unsafe.Pointer(&value[bucketHeaderSize])) + } + + return &child +} + +// CreateBucket creates a new bucket at the given key and returns the new bucket. +// Returns an error if the key already exists, if the bucket name is blank, or if the bucket name is too long. +// The bucket instance is only valid for the lifetime of the transaction. +func (b *Bucket) CreateBucket(key []byte) (*Bucket, error) { + if b.tx.db == nil { + return nil, ErrTxClosed + } else if !b.tx.writable { + return nil, ErrTxNotWritable + } else if len(key) == 0 { + return nil, ErrBucketNameRequired + } + + // Move cursor to correct position. + c := b.Cursor() + k, _, flags := c.seek(key) + + // Return an error if there is an existing key. + if bytes.Equal(key, k) { + if (flags & bucketLeafFlag) != 0 { + return nil, ErrBucketExists + } + return nil, ErrIncompatibleValue + } + + // Create empty, inline bucket. + var bucket = Bucket{ + bucket: &bucket{}, + rootNode: &node{isLeaf: true}, + FillPercent: DefaultFillPercent, + } + var value = bucket.write() + + // Insert into node. + key = cloneBytes(key) + c.node().put(key, key, value, 0, bucketLeafFlag) + + // Since subbuckets are not allowed on inline buckets, we need to + // dereference the inline page, if it exists. This will cause the bucket + // to be treated as a regular, non-inline bucket for the rest of the tx. + b.page = nil + + return b.Bucket(key), nil +} + +// CreateBucketIfNotExists creates a new bucket if it doesn't already exist and returns a reference to it. +// Returns an error if the bucket name is blank, or if the bucket name is too long. +// The bucket instance is only valid for the lifetime of the transaction. +func (b *Bucket) CreateBucketIfNotExists(key []byte) (*Bucket, error) { + child, err := b.CreateBucket(key) + if err == ErrBucketExists { + return b.Bucket(key), nil + } else if err != nil { + return nil, err + } + return child, nil +} + +// DeleteBucket deletes a bucket at the given key. +// Returns an error if the bucket does not exist, or if the key represents a non-bucket value. +func (b *Bucket) DeleteBucket(key []byte) error { + if b.tx.db == nil { + return ErrTxClosed + } else if !b.Writable() { + return ErrTxNotWritable + } + + // Move cursor to correct position. + c := b.Cursor() + k, _, flags := c.seek(key) + + // Return an error if bucket doesn't exist or is not a bucket. + if !bytes.Equal(key, k) { + return ErrBucketNotFound + } else if (flags & bucketLeafFlag) == 0 { + return ErrIncompatibleValue + } + + // Recursively delete all child buckets. + child := b.Bucket(key) + err := child.ForEach(func(k, v []byte) error { + if _, _, childFlags := child.Cursor().seek(k); (childFlags & bucketLeafFlag) != 0 { + if err := child.DeleteBucket(k); err != nil { + return fmt.Errorf("delete bucket: %s", err) + } + } + return nil + }) + if err != nil { + return err + } + + // Remove cached copy. + delete(b.buckets, string(key)) + + // Release all bucket pages to freelist. + child.nodes = nil + child.rootNode = nil + child.free() + + // Delete the node if we have a matching key. + c.node().del(key) + + return nil +} + +// Get retrieves the value for a key in the bucket. +// Returns a nil value if the key does not exist or if the key is a nested bucket. +// The returned value is only valid for the life of the transaction. +func (b *Bucket) Get(key []byte) []byte { + k, v, flags := b.Cursor().seek(key) + + // Return nil if this is a bucket. + if (flags & bucketLeafFlag) != 0 { + return nil + } + + // If our target node isn't the same key as what's passed in then return nil. + if !bytes.Equal(key, k) { + return nil + } + return v +} + +// Put sets the value for a key in the bucket. +// If the key exist then its previous value will be overwritten. +// Supplied value must remain valid for the life of the transaction. +// Returns an error if the bucket was created from a read-only transaction, if the key is blank, if the key is too large, or if the value is too large. +func (b *Bucket) Put(key []byte, value []byte) error { + if b.tx.db == nil { + return ErrTxClosed + } else if !b.Writable() { + return ErrTxNotWritable + } else if len(key) == 0 { + return ErrKeyRequired + } else if len(key) > MaxKeySize { + return ErrKeyTooLarge + } else if int64(len(value)) > MaxValueSize { + return ErrValueTooLarge + } + + // Move cursor to correct position. + c := b.Cursor() + k, _, flags := c.seek(key) + + // Return an error if there is an existing key with a bucket value. + if bytes.Equal(key, k) && (flags&bucketLeafFlag) != 0 { + return ErrIncompatibleValue + } + + // Insert into node. + key = cloneBytes(key) + c.node().put(key, key, value, 0, 0) + + return nil +} + +// Delete removes a key from the bucket. +// If the key does not exist then nothing is done and a nil error is returned. +// Returns an error if the bucket was created from a read-only transaction. +func (b *Bucket) Delete(key []byte) error { + if b.tx.db == nil { + return ErrTxClosed + } else if !b.Writable() { + return ErrTxNotWritable + } + + // Move cursor to correct position. + c := b.Cursor() + k, _, flags := c.seek(key) + + // Return nil if the key doesn't exist. + if !bytes.Equal(key, k) { + return nil + } + + // Return an error if there is already existing bucket value. + if (flags & bucketLeafFlag) != 0 { + return ErrIncompatibleValue + } + + // Delete the node if we have a matching key. + c.node().del(key) + + return nil +} + +// Sequence returns the current integer for the bucket without incrementing it. +func (b *Bucket) Sequence() uint64 { return b.bucket.sequence } + +// SetSequence updates the sequence number for the bucket. +func (b *Bucket) SetSequence(v uint64) error { + if b.tx.db == nil { + return ErrTxClosed + } else if !b.Writable() { + return ErrTxNotWritable + } + + // Materialize the root node if it hasn't been already so that the + // bucket will be saved during commit. + if b.rootNode == nil { + _ = b.node(b.root, nil) + } + + // Increment and return the sequence. + b.bucket.sequence = v + return nil +} + +// NextSequence returns an autoincrementing integer for the bucket. +func (b *Bucket) NextSequence() (uint64, error) { + if b.tx.db == nil { + return 0, ErrTxClosed + } else if !b.Writable() { + return 0, ErrTxNotWritable + } + + // Materialize the root node if it hasn't been already so that the + // bucket will be saved during commit. + if b.rootNode == nil { + _ = b.node(b.root, nil) + } + + // Increment and return the sequence. + b.bucket.sequence++ + return b.bucket.sequence, nil +} + +// ForEach executes a function for each key/value pair in a bucket. +// If the provided function returns an error then the iteration is stopped and +// the error is returned to the caller. The provided function must not modify +// the bucket; this will result in undefined behavior. +func (b *Bucket) ForEach(fn func(k, v []byte) error) error { + if b.tx.db == nil { + return ErrTxClosed + } + c := b.Cursor() + for k, v := c.First(); k != nil; k, v = c.Next() { + if err := fn(k, v); err != nil { + return err + } + } + return nil +} + +// Stat returns stats on a bucket. +func (b *Bucket) Stats() BucketStats { + var s, subStats BucketStats + pageSize := b.tx.db.pageSize + s.BucketN += 1 + if b.root == 0 { + s.InlineBucketN += 1 + } + b.forEachPage(func(p *page, depth int) { + if (p.flags & leafPageFlag) != 0 { + s.KeyN += int(p.count) + + // used totals the used bytes for the page + used := pageHeaderSize + + if p.count != 0 { + // If page has any elements, add all element headers. + used += leafPageElementSize * uintptr(p.count-1) + + // Add all element key, value sizes. + // The computation takes advantage of the fact that the position + // of the last element's key/value equals to the total of the sizes + // of all previous elements' keys and values. + // It also includes the last element's header. + lastElement := p.leafPageElement(p.count - 1) + used += uintptr(lastElement.pos + lastElement.ksize + lastElement.vsize) + } + + if b.root == 0 { + // For inlined bucket just update the inline stats + s.InlineBucketInuse += int(used) + } else { + // For non-inlined bucket update all the leaf stats + s.LeafPageN++ + s.LeafInuse += int(used) + s.LeafOverflowN += int(p.overflow) + + // Collect stats from sub-buckets. + // Do that by iterating over all element headers + // looking for the ones with the bucketLeafFlag. + for i := uint16(0); i < p.count; i++ { + e := p.leafPageElement(i) + if (e.flags & bucketLeafFlag) != 0 { + // For any bucket element, open the element value + // and recursively call Stats on the contained bucket. + subStats.Add(b.openBucket(e.value()).Stats()) + } + } + } + } else if (p.flags & branchPageFlag) != 0 { + s.BranchPageN++ + lastElement := p.branchPageElement(p.count - 1) + + // used totals the used bytes for the page + // Add header and all element headers. + used := pageHeaderSize + (branchPageElementSize * uintptr(p.count-1)) + + // Add size of all keys and values. + // Again, use the fact that last element's position equals to + // the total of key, value sizes of all previous elements. + used += uintptr(lastElement.pos + lastElement.ksize) + s.BranchInuse += int(used) + s.BranchOverflowN += int(p.overflow) + } + + // Keep track of maximum page depth. + if depth+1 > s.Depth { + s.Depth = (depth + 1) + } + }) + + // Alloc stats can be computed from page counts and pageSize. + s.BranchAlloc = (s.BranchPageN + s.BranchOverflowN) * pageSize + s.LeafAlloc = (s.LeafPageN + s.LeafOverflowN) * pageSize + + // Add the max depth of sub-buckets to get total nested depth. + s.Depth += subStats.Depth + // Add the stats for all sub-buckets + s.Add(subStats) + return s +} + +// forEachPage iterates over every page in a bucket, including inline pages. +func (b *Bucket) forEachPage(fn func(*page, int)) { + // If we have an inline page then just use that. + if b.page != nil { + fn(b.page, 0) + return + } + + // Otherwise traverse the page hierarchy. + b.tx.forEachPage(b.root, 0, fn) +} + +// forEachPageNode iterates over every page (or node) in a bucket. +// This also includes inline pages. +func (b *Bucket) forEachPageNode(fn func(*page, *node, int)) { + // If we have an inline page or root node then just use that. + if b.page != nil { + fn(b.page, nil, 0) + return + } + b._forEachPageNode(b.root, 0, fn) +} + +func (b *Bucket) _forEachPageNode(pgid pgid, depth int, fn func(*page, *node, int)) { + var p, n = b.pageNode(pgid) + + // Execute function. + fn(p, n, depth) + + // Recursively loop over children. + if p != nil { + if (p.flags & branchPageFlag) != 0 { + for i := 0; i < int(p.count); i++ { + elem := p.branchPageElement(uint16(i)) + b._forEachPageNode(elem.pgid, depth+1, fn) + } + } + } else { + if !n.isLeaf { + for _, inode := range n.inodes { + b._forEachPageNode(inode.pgid, depth+1, fn) + } + } + } +} + +// spill writes all the nodes for this bucket to dirty pages. +func (b *Bucket) spill() error { + // Spill all child buckets first. + for name, child := range b.buckets { + // If the child bucket is small enough and it has no child buckets then + // write it inline into the parent bucket's page. Otherwise spill it + // like a normal bucket and make the parent value a pointer to the page. + var value []byte + if child.inlineable() { + child.free() + value = child.write() + } else { + if err := child.spill(); err != nil { + return err + } + + // Update the child bucket header in this bucket. + value = make([]byte, unsafe.Sizeof(bucket{})) + var bucket = (*bucket)(unsafe.Pointer(&value[0])) + *bucket = *child.bucket + } + + // Skip writing the bucket if there are no materialized nodes. + if child.rootNode == nil { + continue + } + + // Update parent node. + var c = b.Cursor() + k, _, flags := c.seek([]byte(name)) + if !bytes.Equal([]byte(name), k) { + panic(fmt.Sprintf("misplaced bucket header: %x -> %x", []byte(name), k)) + } + if flags&bucketLeafFlag == 0 { + panic(fmt.Sprintf("unexpected bucket header flag: %x", flags)) + } + c.node().put([]byte(name), []byte(name), value, 0, bucketLeafFlag) + } + + // Ignore if there's not a materialized root node. + if b.rootNode == nil { + return nil + } + + // Spill nodes. + if err := b.rootNode.spill(); err != nil { + return err + } + b.rootNode = b.rootNode.root() + + // Update the root node for this bucket. + if b.rootNode.pgid >= b.tx.meta.pgid { + panic(fmt.Sprintf("pgid (%d) above high water mark (%d)", b.rootNode.pgid, b.tx.meta.pgid)) + } + b.root = b.rootNode.pgid + + return nil +} + +// inlineable returns true if a bucket is small enough to be written inline +// and if it contains no subbuckets. Otherwise returns false. +func (b *Bucket) inlineable() bool { + var n = b.rootNode + + // Bucket must only contain a single leaf node. + if n == nil || !n.isLeaf { + return false + } + + // Bucket is not inlineable if it contains subbuckets or if it goes beyond + // our threshold for inline bucket size. + var size = pageHeaderSize + for _, inode := range n.inodes { + size += leafPageElementSize + uintptr(len(inode.key)) + uintptr(len(inode.value)) + + if inode.flags&bucketLeafFlag != 0 { + return false + } else if size > b.maxInlineBucketSize() { + return false + } + } + + return true +} + +// Returns the maximum total size of a bucket to make it a candidate for inlining. +func (b *Bucket) maxInlineBucketSize() uintptr { + return uintptr(b.tx.db.pageSize / 4) +} + +// write allocates and writes a bucket to a byte slice. +func (b *Bucket) write() []byte { + // Allocate the appropriate size. + var n = b.rootNode + var value = make([]byte, bucketHeaderSize+n.size()) + + // Write a bucket header. + var bucket = (*bucket)(unsafe.Pointer(&value[0])) + *bucket = *b.bucket + + // Convert byte slice to a fake page and write the root node. + var p = (*page)(unsafe.Pointer(&value[bucketHeaderSize])) + n.write(p) + + return value +} + +// rebalance attempts to balance all nodes. +func (b *Bucket) rebalance() { + for _, n := range b.nodes { + n.rebalance() + } + for _, child := range b.buckets { + child.rebalance() + } +} + +// node creates a node from a page and associates it with a given parent. +func (b *Bucket) node(pgid pgid, parent *node) *node { + _assert(b.nodes != nil, "nodes map expected") + + // Retrieve node if it's already been created. + if n := b.nodes[pgid]; n != nil { + return n + } + + // Otherwise create a node and cache it. + n := &node{bucket: b, parent: parent} + if parent == nil { + b.rootNode = n + } else { + parent.children = append(parent.children, n) + } + + // Use the inline page if this is an inline bucket. + var p = b.page + if p == nil { + p = b.tx.page(pgid) + } + + // Read the page into the node and cache it. + n.read(p) + b.nodes[pgid] = n + + // Update statistics. + b.tx.stats.NodeCount++ + + return n +} + +// free recursively frees all pages in the bucket. +func (b *Bucket) free() { + if b.root == 0 { + return + } + + var tx = b.tx + b.forEachPageNode(func(p *page, n *node, _ int) { + if p != nil { + tx.db.freelist.free(tx.meta.txid, p) + } else { + n.free() + } + }) + b.root = 0 +} + +// dereference removes all references to the old mmap. +func (b *Bucket) dereference() { + if b.rootNode != nil { + b.rootNode.root().dereference() + } + + for _, child := range b.buckets { + child.dereference() + } +} + +// pageNode returns the in-memory node, if it exists. +// Otherwise returns the underlying page. +func (b *Bucket) pageNode(id pgid) (*page, *node) { + // Inline buckets have a fake page embedded in their value so treat them + // differently. We'll return the rootNode (if available) or the fake page. + if b.root == 0 { + if id != 0 { + panic(fmt.Sprintf("inline bucket non-zero page access(2): %d != 0", id)) + } + if b.rootNode != nil { + return nil, b.rootNode + } + return b.page, nil + } + + // Check the node cache for non-inline buckets. + if b.nodes != nil { + if n := b.nodes[id]; n != nil { + return nil, n + } + } + + // Finally lookup the page from the transaction if no node is materialized. + return b.tx.page(id), nil +} + +// BucketStats records statistics about resources used by a bucket. +type BucketStats struct { + // Page count statistics. + BranchPageN int // number of logical branch pages + BranchOverflowN int // number of physical branch overflow pages + LeafPageN int // number of logical leaf pages + LeafOverflowN int // number of physical leaf overflow pages + + // Tree statistics. + KeyN int // number of keys/value pairs + Depth int // number of levels in B+tree + + // Page size utilization. + BranchAlloc int // bytes allocated for physical branch pages + BranchInuse int // bytes actually used for branch data + LeafAlloc int // bytes allocated for physical leaf pages + LeafInuse int // bytes actually used for leaf data + + // Bucket statistics + BucketN int // total number of buckets including the top bucket + InlineBucketN int // total number on inlined buckets + InlineBucketInuse int // bytes used for inlined buckets (also accounted for in LeafInuse) +} + +func (s *BucketStats) Add(other BucketStats) { + s.BranchPageN += other.BranchPageN + s.BranchOverflowN += other.BranchOverflowN + s.LeafPageN += other.LeafPageN + s.LeafOverflowN += other.LeafOverflowN + s.KeyN += other.KeyN + if s.Depth < other.Depth { + s.Depth = other.Depth + } + s.BranchAlloc += other.BranchAlloc + s.BranchInuse += other.BranchInuse + s.LeafAlloc += other.LeafAlloc + s.LeafInuse += other.LeafInuse + + s.BucketN += other.BucketN + s.InlineBucketN += other.InlineBucketN + s.InlineBucketInuse += other.InlineBucketInuse +} + +// cloneBytes returns a copy of a given slice. +func cloneBytes(v []byte) []byte { + var clone = make([]byte, len(v)) + copy(clone, v) + return clone +} diff --git a/vendor/go.etcd.io/bbolt/compact.go b/vendor/go.etcd.io/bbolt/compact.go new file mode 100644 index 0000000..e4fe91b --- /dev/null +++ b/vendor/go.etcd.io/bbolt/compact.go @@ -0,0 +1,114 @@ +package bbolt + +// Compact will create a copy of the source DB and in the destination DB. This may +// reclaim space that the source database no longer has use for. txMaxSize can be +// used to limit the transactions size of this process and may trigger intermittent +// commits. A value of zero will ignore transaction sizes. +// TODO: merge with: https://github.com/etcd-io/etcd/blob/b7f0f52a16dbf83f18ca1d803f7892d750366a94/mvcc/backend/backend.go#L349 +func Compact(dst, src *DB, txMaxSize int64) error { + // commit regularly, or we'll run out of memory for large datasets if using one transaction. + var size int64 + tx, err := dst.Begin(true) + if err != nil { + return err + } + defer tx.Rollback() + + if err := walk(src, func(keys [][]byte, k, v []byte, seq uint64) error { + // On each key/value, check if we have exceeded tx size. + sz := int64(len(k) + len(v)) + if size+sz > txMaxSize && txMaxSize != 0 { + // Commit previous transaction. + if err := tx.Commit(); err != nil { + return err + } + + // Start new transaction. + tx, err = dst.Begin(true) + if err != nil { + return err + } + size = 0 + } + size += sz + + // Create bucket on the root transaction if this is the first level. + nk := len(keys) + if nk == 0 { + bkt, err := tx.CreateBucket(k) + if err != nil { + return err + } + if err := bkt.SetSequence(seq); err != nil { + return err + } + return nil + } + + // Create buckets on subsequent levels, if necessary. + b := tx.Bucket(keys[0]) + if nk > 1 { + for _, k := range keys[1:] { + b = b.Bucket(k) + } + } + + // Fill the entire page for best compaction. + b.FillPercent = 1.0 + + // If there is no value then this is a bucket call. + if v == nil { + bkt, err := b.CreateBucket(k) + if err != nil { + return err + } + if err := bkt.SetSequence(seq); err != nil { + return err + } + return nil + } + + // Otherwise treat it as a key/value pair. + return b.Put(k, v) + }); err != nil { + return err + } + + return tx.Commit() +} + +// walkFunc is the type of the function called for keys (buckets and "normal" +// values) discovered by Walk. keys is the list of keys to descend to the bucket +// owning the discovered key/value pair k/v. +type walkFunc func(keys [][]byte, k, v []byte, seq uint64) error + +// walk walks recursively the bolt database db, calling walkFn for each key it finds. +func walk(db *DB, walkFn walkFunc) error { + return db.View(func(tx *Tx) error { + return tx.ForEach(func(name []byte, b *Bucket) error { + return walkBucket(b, nil, name, nil, b.Sequence(), walkFn) + }) + }) +} + +func walkBucket(b *Bucket, keypath [][]byte, k, v []byte, seq uint64, fn walkFunc) error { + // Execute callback. + if err := fn(keypath, k, v, seq); err != nil { + return err + } + + // If this is not a bucket then stop. + if v != nil { + return nil + } + + // Iterate over each child key/value. + keypath = append(keypath, k) + return b.ForEach(func(k, v []byte) error { + if v == nil { + bkt := b.Bucket(k) + return walkBucket(bkt, keypath, k, nil, bkt.Sequence(), fn) + } + return walkBucket(b, keypath, k, v, b.Sequence(), fn) + }) +} diff --git a/vendor/go.etcd.io/bbolt/cursor.go b/vendor/go.etcd.io/bbolt/cursor.go new file mode 100644 index 0000000..98aeb44 --- /dev/null +++ b/vendor/go.etcd.io/bbolt/cursor.go @@ -0,0 +1,396 @@ +package bbolt + +import ( + "bytes" + "fmt" + "sort" +) + +// Cursor represents an iterator that can traverse over all key/value pairs in a bucket in sorted order. +// Cursors see nested buckets with value == nil. +// Cursors can be obtained from a transaction and are valid as long as the transaction is open. +// +// Keys and values returned from the cursor are only valid for the life of the transaction. +// +// Changing data while traversing with a cursor may cause it to be invalidated +// and return unexpected keys and/or values. You must reposition your cursor +// after mutating data. +type Cursor struct { + bucket *Bucket + stack []elemRef +} + +// Bucket returns the bucket that this cursor was created from. +func (c *Cursor) Bucket() *Bucket { + return c.bucket +} + +// First moves the cursor to the first item in the bucket and returns its key and value. +// If the bucket is empty then a nil key and value are returned. +// The returned key and value are only valid for the life of the transaction. +func (c *Cursor) First() (key []byte, value []byte) { + _assert(c.bucket.tx.db != nil, "tx closed") + c.stack = c.stack[:0] + p, n := c.bucket.pageNode(c.bucket.root) + c.stack = append(c.stack, elemRef{page: p, node: n, index: 0}) + c.first() + + // If we land on an empty page then move to the next value. + // https://github.com/boltdb/bolt/issues/450 + if c.stack[len(c.stack)-1].count() == 0 { + c.next() + } + + k, v, flags := c.keyValue() + if (flags & uint32(bucketLeafFlag)) != 0 { + return k, nil + } + return k, v + +} + +// Last moves the cursor to the last item in the bucket and returns its key and value. +// If the bucket is empty then a nil key and value are returned. +// The returned key and value are only valid for the life of the transaction. +func (c *Cursor) Last() (key []byte, value []byte) { + _assert(c.bucket.tx.db != nil, "tx closed") + c.stack = c.stack[:0] + p, n := c.bucket.pageNode(c.bucket.root) + ref := elemRef{page: p, node: n} + ref.index = ref.count() - 1 + c.stack = append(c.stack, ref) + c.last() + k, v, flags := c.keyValue() + if (flags & uint32(bucketLeafFlag)) != 0 { + return k, nil + } + return k, v +} + +// Next moves the cursor to the next item in the bucket and returns its key and value. +// If the cursor is at the end of the bucket then a nil key and value are returned. +// The returned key and value are only valid for the life of the transaction. +func (c *Cursor) Next() (key []byte, value []byte) { + _assert(c.bucket.tx.db != nil, "tx closed") + k, v, flags := c.next() + if (flags & uint32(bucketLeafFlag)) != 0 { + return k, nil + } + return k, v +} + +// Prev moves the cursor to the previous item in the bucket and returns its key and value. +// If the cursor is at the beginning of the bucket then a nil key and value are returned. +// The returned key and value are only valid for the life of the transaction. +func (c *Cursor) Prev() (key []byte, value []byte) { + _assert(c.bucket.tx.db != nil, "tx closed") + + // Attempt to move back one element until we're successful. + // Move up the stack as we hit the beginning of each page in our stack. + for i := len(c.stack) - 1; i >= 0; i-- { + elem := &c.stack[i] + if elem.index > 0 { + elem.index-- + break + } + c.stack = c.stack[:i] + } + + // If we've hit the end then return nil. + if len(c.stack) == 0 { + return nil, nil + } + + // Move down the stack to find the last element of the last leaf under this branch. + c.last() + k, v, flags := c.keyValue() + if (flags & uint32(bucketLeafFlag)) != 0 { + return k, nil + } + return k, v +} + +// Seek moves the cursor to a given key and returns it. +// If the key does not exist then the next key is used. If no keys +// follow, a nil key is returned. +// The returned key and value are only valid for the life of the transaction. +func (c *Cursor) Seek(seek []byte) (key []byte, value []byte) { + k, v, flags := c.seek(seek) + + // If we ended up after the last element of a page then move to the next one. + if ref := &c.stack[len(c.stack)-1]; ref.index >= ref.count() { + k, v, flags = c.next() + } + + if k == nil { + return nil, nil + } else if (flags & uint32(bucketLeafFlag)) != 0 { + return k, nil + } + return k, v +} + +// Delete removes the current key/value under the cursor from the bucket. +// Delete fails if current key/value is a bucket or if the transaction is not writable. +func (c *Cursor) Delete() error { + if c.bucket.tx.db == nil { + return ErrTxClosed + } else if !c.bucket.Writable() { + return ErrTxNotWritable + } + + key, _, flags := c.keyValue() + // Return an error if current value is a bucket. + if (flags & bucketLeafFlag) != 0 { + return ErrIncompatibleValue + } + c.node().del(key) + + return nil +} + +// seek moves the cursor to a given key and returns it. +// If the key does not exist then the next key is used. +func (c *Cursor) seek(seek []byte) (key []byte, value []byte, flags uint32) { + _assert(c.bucket.tx.db != nil, "tx closed") + + // Start from root page/node and traverse to correct page. + c.stack = c.stack[:0] + c.search(seek, c.bucket.root) + + // If this is a bucket then return a nil value. + return c.keyValue() +} + +// first moves the cursor to the first leaf element under the last page in the stack. +func (c *Cursor) first() { + for { + // Exit when we hit a leaf page. + var ref = &c.stack[len(c.stack)-1] + if ref.isLeaf() { + break + } + + // Keep adding pages pointing to the first element to the stack. + var pgid pgid + if ref.node != nil { + pgid = ref.node.inodes[ref.index].pgid + } else { + pgid = ref.page.branchPageElement(uint16(ref.index)).pgid + } + p, n := c.bucket.pageNode(pgid) + c.stack = append(c.stack, elemRef{page: p, node: n, index: 0}) + } +} + +// last moves the cursor to the last leaf element under the last page in the stack. +func (c *Cursor) last() { + for { + // Exit when we hit a leaf page. + ref := &c.stack[len(c.stack)-1] + if ref.isLeaf() { + break + } + + // Keep adding pages pointing to the last element in the stack. + var pgid pgid + if ref.node != nil { + pgid = ref.node.inodes[ref.index].pgid + } else { + pgid = ref.page.branchPageElement(uint16(ref.index)).pgid + } + p, n := c.bucket.pageNode(pgid) + + var nextRef = elemRef{page: p, node: n} + nextRef.index = nextRef.count() - 1 + c.stack = append(c.stack, nextRef) + } +} + +// next moves to the next leaf element and returns the key and value. +// If the cursor is at the last leaf element then it stays there and returns nil. +func (c *Cursor) next() (key []byte, value []byte, flags uint32) { + for { + // Attempt to move over one element until we're successful. + // Move up the stack as we hit the end of each page in our stack. + var i int + for i = len(c.stack) - 1; i >= 0; i-- { + elem := &c.stack[i] + if elem.index < elem.count()-1 { + elem.index++ + break + } + } + + // If we've hit the root page then stop and return. This will leave the + // cursor on the last element of the last page. + if i == -1 { + return nil, nil, 0 + } + + // Otherwise start from where we left off in the stack and find the + // first element of the first leaf page. + c.stack = c.stack[:i+1] + c.first() + + // If this is an empty page then restart and move back up the stack. + // https://github.com/boltdb/bolt/issues/450 + if c.stack[len(c.stack)-1].count() == 0 { + continue + } + + return c.keyValue() + } +} + +// search recursively performs a binary search against a given page/node until it finds a given key. +func (c *Cursor) search(key []byte, pgid pgid) { + p, n := c.bucket.pageNode(pgid) + if p != nil && (p.flags&(branchPageFlag|leafPageFlag)) == 0 { + panic(fmt.Sprintf("invalid page type: %d: %x", p.id, p.flags)) + } + e := elemRef{page: p, node: n} + c.stack = append(c.stack, e) + + // If we're on a leaf page/node then find the specific node. + if e.isLeaf() { + c.nsearch(key) + return + } + + if n != nil { + c.searchNode(key, n) + return + } + c.searchPage(key, p) +} + +func (c *Cursor) searchNode(key []byte, n *node) { + var exact bool + index := sort.Search(len(n.inodes), func(i int) bool { + // TODO(benbjohnson): Optimize this range search. It's a bit hacky right now. + // sort.Search() finds the lowest index where f() != -1 but we need the highest index. + ret := bytes.Compare(n.inodes[i].key, key) + if ret == 0 { + exact = true + } + return ret != -1 + }) + if !exact && index > 0 { + index-- + } + c.stack[len(c.stack)-1].index = index + + // Recursively search to the next page. + c.search(key, n.inodes[index].pgid) +} + +func (c *Cursor) searchPage(key []byte, p *page) { + // Binary search for the correct range. + inodes := p.branchPageElements() + + var exact bool + index := sort.Search(int(p.count), func(i int) bool { + // TODO(benbjohnson): Optimize this range search. It's a bit hacky right now. + // sort.Search() finds the lowest index where f() != -1 but we need the highest index. + ret := bytes.Compare(inodes[i].key(), key) + if ret == 0 { + exact = true + } + return ret != -1 + }) + if !exact && index > 0 { + index-- + } + c.stack[len(c.stack)-1].index = index + + // Recursively search to the next page. + c.search(key, inodes[index].pgid) +} + +// nsearch searches the leaf node on the top of the stack for a key. +func (c *Cursor) nsearch(key []byte) { + e := &c.stack[len(c.stack)-1] + p, n := e.page, e.node + + // If we have a node then search its inodes. + if n != nil { + index := sort.Search(len(n.inodes), func(i int) bool { + return bytes.Compare(n.inodes[i].key, key) != -1 + }) + e.index = index + return + } + + // If we have a page then search its leaf elements. + inodes := p.leafPageElements() + index := sort.Search(int(p.count), func(i int) bool { + return bytes.Compare(inodes[i].key(), key) != -1 + }) + e.index = index +} + +// keyValue returns the key and value of the current leaf element. +func (c *Cursor) keyValue() ([]byte, []byte, uint32) { + ref := &c.stack[len(c.stack)-1] + + // If the cursor is pointing to the end of page/node then return nil. + if ref.count() == 0 || ref.index >= ref.count() { + return nil, nil, 0 + } + + // Retrieve value from node. + if ref.node != nil { + inode := &ref.node.inodes[ref.index] + return inode.key, inode.value, inode.flags + } + + // Or retrieve value from page. + elem := ref.page.leafPageElement(uint16(ref.index)) + return elem.key(), elem.value(), elem.flags +} + +// node returns the node that the cursor is currently positioned on. +func (c *Cursor) node() *node { + _assert(len(c.stack) > 0, "accessing a node with a zero-length cursor stack") + + // If the top of the stack is a leaf node then just return it. + if ref := &c.stack[len(c.stack)-1]; ref.node != nil && ref.isLeaf() { + return ref.node + } + + // Start from root and traverse down the hierarchy. + var n = c.stack[0].node + if n == nil { + n = c.bucket.node(c.stack[0].page.id, nil) + } + for _, ref := range c.stack[:len(c.stack)-1] { + _assert(!n.isLeaf, "expected branch node") + n = n.childAt(ref.index) + } + _assert(n.isLeaf, "expected leaf node") + return n +} + +// elemRef represents a reference to an element on a given page/node. +type elemRef struct { + page *page + node *node + index int +} + +// isLeaf returns whether the ref is pointing at a leaf page/node. +func (r *elemRef) isLeaf() bool { + if r.node != nil { + return r.node.isLeaf + } + return (r.page.flags & leafPageFlag) != 0 +} + +// count returns the number of inodes or page elements. +func (r *elemRef) count() int { + if r.node != nil { + return len(r.node.inodes) + } + return int(r.page.count) +} diff --git a/vendor/go.etcd.io/bbolt/db.go b/vendor/go.etcd.io/bbolt/db.go new file mode 100644 index 0000000..a798c39 --- /dev/null +++ b/vendor/go.etcd.io/bbolt/db.go @@ -0,0 +1,1232 @@ +package bbolt + +import ( + "errors" + "fmt" + "hash/fnv" + "log" + "os" + "runtime" + "sort" + "sync" + "time" + "unsafe" +) + +// The largest step that can be taken when remapping the mmap. +const maxMmapStep = 1 << 30 // 1GB + +// The data file format version. +const version = 2 + +// Represents a marker value to indicate that a file is a Bolt DB. +const magic uint32 = 0xED0CDAED + +const pgidNoFreelist pgid = 0xffffffffffffffff + +// IgnoreNoSync specifies whether the NoSync field of a DB is ignored when +// syncing changes to a file. This is required as some operating systems, +// such as OpenBSD, do not have a unified buffer cache (UBC) and writes +// must be synchronized using the msync(2) syscall. +const IgnoreNoSync = runtime.GOOS == "openbsd" + +// Default values if not set in a DB instance. +const ( + DefaultMaxBatchSize int = 1000 + DefaultMaxBatchDelay = 10 * time.Millisecond + DefaultAllocSize = 16 * 1024 * 1024 +) + +// default page size for db is set to the OS page size. +var defaultPageSize = os.Getpagesize() + +// The time elapsed between consecutive file locking attempts. +const flockRetryTimeout = 50 * time.Millisecond + +// FreelistType is the type of the freelist backend +type FreelistType string + +const ( + // FreelistArrayType indicates backend freelist type is array + FreelistArrayType = FreelistType("array") + // FreelistMapType indicates backend freelist type is hashmap + FreelistMapType = FreelistType("hashmap") +) + +// DB represents a collection of buckets persisted to a file on disk. +// All data access is performed through transactions which can be obtained through the DB. +// All the functions on DB will return a ErrDatabaseNotOpen if accessed before Open() is called. +type DB struct { + // When enabled, the database will perform a Check() after every commit. + // A panic is issued if the database is in an inconsistent state. This + // flag has a large performance impact so it should only be used for + // debugging purposes. + StrictMode bool + + // Setting the NoSync flag will cause the database to skip fsync() + // calls after each commit. This can be useful when bulk loading data + // into a database and you can restart the bulk load in the event of + // a system failure or database corruption. Do not set this flag for + // normal use. + // + // If the package global IgnoreNoSync constant is true, this value is + // ignored. See the comment on that constant for more details. + // + // THIS IS UNSAFE. PLEASE USE WITH CAUTION. + NoSync bool + + // When true, skips syncing freelist to disk. This improves the database + // write performance under normal operation, but requires a full database + // re-sync during recovery. + NoFreelistSync bool + + // FreelistType sets the backend freelist type. There are two options. Array which is simple but endures + // dramatic performance degradation if database is large and framentation in freelist is common. + // The alternative one is using hashmap, it is faster in almost all circumstances + // but it doesn't guarantee that it offers the smallest page id available. In normal case it is safe. + // The default type is array + FreelistType FreelistType + + // When true, skips the truncate call when growing the database. + // Setting this to true is only safe on non-ext3/ext4 systems. + // Skipping truncation avoids preallocation of hard drive space and + // bypasses a truncate() and fsync() syscall on remapping. + // + // https://github.com/boltdb/bolt/issues/284 + NoGrowSync bool + + // If you want to read the entire database fast, you can set MmapFlag to + // syscall.MAP_POPULATE on Linux 2.6.23+ for sequential read-ahead. + MmapFlags int + + // MaxBatchSize is the maximum size of a batch. Default value is + // copied from DefaultMaxBatchSize in Open. + // + // If <=0, disables batching. + // + // Do not change concurrently with calls to Batch. + MaxBatchSize int + + // MaxBatchDelay is the maximum delay before a batch starts. + // Default value is copied from DefaultMaxBatchDelay in Open. + // + // If <=0, effectively disables batching. + // + // Do not change concurrently with calls to Batch. + MaxBatchDelay time.Duration + + // AllocSize is the amount of space allocated when the database + // needs to create new pages. This is done to amortize the cost + // of truncate() and fsync() when growing the data file. + AllocSize int + + // Mlock locks database file in memory when set to true. + // It prevents major page faults, however used memory can't be reclaimed. + // + // Supported only on Unix via mlock/munlock syscalls. + Mlock bool + + path string + openFile func(string, int, os.FileMode) (*os.File, error) + file *os.File + dataref []byte // mmap'ed readonly, write throws SEGV + data *[maxMapSize]byte + datasz int + filesz int // current on disk file size + meta0 *meta + meta1 *meta + pageSize int + opened bool + rwtx *Tx + txs []*Tx + stats Stats + + freelist *freelist + freelistLoad sync.Once + + pagePool sync.Pool + + batchMu sync.Mutex + batch *batch + + rwlock sync.Mutex // Allows only one writer at a time. + metalock sync.Mutex // Protects meta page access. + mmaplock sync.RWMutex // Protects mmap access during remapping. + statlock sync.RWMutex // Protects stats access. + + ops struct { + writeAt func(b []byte, off int64) (n int, err error) + } + + // Read only mode. + // When true, Update() and Begin(true) return ErrDatabaseReadOnly immediately. + readOnly bool +} + +// Path returns the path to currently open database file. +func (db *DB) Path() string { + return db.path +} + +// GoString returns the Go string representation of the database. +func (db *DB) GoString() string { + return fmt.Sprintf("bolt.DB{path:%q}", db.path) +} + +// String returns the string representation of the database. +func (db *DB) String() string { + return fmt.Sprintf("DB<%q>", db.path) +} + +// Open creates and opens a database at the given path. +// If the file does not exist then it will be created automatically. +// Passing in nil options will cause Bolt to open the database with the default options. +func Open(path string, mode os.FileMode, options *Options) (*DB, error) { + db := &DB{ + opened: true, + } + // Set default options if no options are provided. + if options == nil { + options = DefaultOptions + } + db.NoSync = options.NoSync + db.NoGrowSync = options.NoGrowSync + db.MmapFlags = options.MmapFlags + db.NoFreelistSync = options.NoFreelistSync + db.FreelistType = options.FreelistType + db.Mlock = options.Mlock + + // Set default values for later DB operations. + db.MaxBatchSize = DefaultMaxBatchSize + db.MaxBatchDelay = DefaultMaxBatchDelay + db.AllocSize = DefaultAllocSize + + flag := os.O_RDWR + if options.ReadOnly { + flag = os.O_RDONLY + db.readOnly = true + } + + db.openFile = options.OpenFile + if db.openFile == nil { + db.openFile = os.OpenFile + } + + // Open data file and separate sync handler for metadata writes. + var err error + if db.file, err = db.openFile(path, flag|os.O_CREATE, mode); err != nil { + _ = db.close() + return nil, err + } + db.path = db.file.Name() + + // Lock file so that other processes using Bolt in read-write mode cannot + // use the database at the same time. This would cause corruption since + // the two processes would write meta pages and free pages separately. + // The database file is locked exclusively (only one process can grab the lock) + // if !options.ReadOnly. + // The database file is locked using the shared lock (more than one process may + // hold a lock at the same time) otherwise (options.ReadOnly is set). + if err := flock(db, !db.readOnly, options.Timeout); err != nil { + _ = db.close() + return nil, err + } + + // Default values for test hooks + db.ops.writeAt = db.file.WriteAt + + if db.pageSize = options.PageSize; db.pageSize == 0 { + // Set the default page size to the OS page size. + db.pageSize = defaultPageSize + } + + // Initialize the database if it doesn't exist. + if info, err := db.file.Stat(); err != nil { + _ = db.close() + return nil, err + } else if info.Size() == 0 { + // Initialize new files with meta pages. + if err := db.init(); err != nil { + // clean up file descriptor on initialization fail + _ = db.close() + return nil, err + } + } else { + // Read the first meta page to determine the page size. + var buf [0x1000]byte + // If we can't read the page size, but can read a page, assume + // it's the same as the OS or one given -- since that's how the + // page size was chosen in the first place. + // + // If the first page is invalid and this OS uses a different + // page size than what the database was created with then we + // are out of luck and cannot access the database. + // + // TODO: scan for next page + if bw, err := db.file.ReadAt(buf[:], 0); err == nil && bw == len(buf) { + if m := db.pageInBuffer(buf[:], 0).meta(); m.validate() == nil { + db.pageSize = int(m.pageSize) + } + } else { + _ = db.close() + return nil, ErrInvalid + } + } + + // Initialize page pool. + db.pagePool = sync.Pool{ + New: func() interface{} { + return make([]byte, db.pageSize) + }, + } + + // Memory map the data file. + if err := db.mmap(options.InitialMmapSize); err != nil { + _ = db.close() + return nil, err + } + + if db.readOnly { + return db, nil + } + + db.loadFreelist() + + // Flush freelist when transitioning from no sync to sync so + // NoFreelistSync unaware boltdb can open the db later. + if !db.NoFreelistSync && !db.hasSyncedFreelist() { + tx, err := db.Begin(true) + if tx != nil { + err = tx.Commit() + } + if err != nil { + _ = db.close() + return nil, err + } + } + + // Mark the database as opened and return. + return db, nil +} + +// loadFreelist reads the freelist if it is synced, or reconstructs it +// by scanning the DB if it is not synced. It assumes there are no +// concurrent accesses being made to the freelist. +func (db *DB) loadFreelist() { + db.freelistLoad.Do(func() { + db.freelist = newFreelist(db.FreelistType) + if !db.hasSyncedFreelist() { + // Reconstruct free list by scanning the DB. + db.freelist.readIDs(db.freepages()) + } else { + // Read free list from freelist page. + db.freelist.read(db.page(db.meta().freelist)) + } + db.stats.FreePageN = db.freelist.free_count() + }) +} + +func (db *DB) hasSyncedFreelist() bool { + return db.meta().freelist != pgidNoFreelist +} + +// mmap opens the underlying memory-mapped file and initializes the meta references. +// minsz is the minimum size that the new mmap can be. +func (db *DB) mmap(minsz int) error { + db.mmaplock.Lock() + defer db.mmaplock.Unlock() + + info, err := db.file.Stat() + if err != nil { + return fmt.Errorf("mmap stat error: %s", err) + } else if int(info.Size()) < db.pageSize*2 { + return fmt.Errorf("file size too small") + } + + // Ensure the size is at least the minimum size. + fileSize := int(info.Size()) + var size = fileSize + if size < minsz { + size = minsz + } + size, err = db.mmapSize(size) + if err != nil { + return err + } + + if db.Mlock { + // Unlock db memory + if err := db.munlock(fileSize); err != nil { + return err + } + } + + // Dereference all mmap references before unmapping. + if db.rwtx != nil { + db.rwtx.root.dereference() + } + + // Unmap existing data before continuing. + if err := db.munmap(); err != nil { + return err + } + + // Memory-map the data file as a byte slice. + if err := mmap(db, size); err != nil { + return err + } + + if db.Mlock { + // Don't allow swapping of data file + if err := db.mlock(fileSize); err != nil { + return err + } + } + + // Save references to the meta pages. + db.meta0 = db.page(0).meta() + db.meta1 = db.page(1).meta() + + // Validate the meta pages. We only return an error if both meta pages fail + // validation, since meta0 failing validation means that it wasn't saved + // properly -- but we can recover using meta1. And vice-versa. + err0 := db.meta0.validate() + err1 := db.meta1.validate() + if err0 != nil && err1 != nil { + return err0 + } + + return nil +} + +// munmap unmaps the data file from memory. +func (db *DB) munmap() error { + if err := munmap(db); err != nil { + return fmt.Errorf("unmap error: " + err.Error()) + } + return nil +} + +// mmapSize determines the appropriate size for the mmap given the current size +// of the database. The minimum size is 32KB and doubles until it reaches 1GB. +// Returns an error if the new mmap size is greater than the max allowed. +func (db *DB) mmapSize(size int) (int, error) { + // Double the size from 32KB until 1GB. + for i := uint(15); i <= 30; i++ { + if size <= 1< maxMapSize { + return 0, fmt.Errorf("mmap too large") + } + + // If larger than 1GB then grow by 1GB at a time. + sz := int64(size) + if remainder := sz % int64(maxMmapStep); remainder > 0 { + sz += int64(maxMmapStep) - remainder + } + + // Ensure that the mmap size is a multiple of the page size. + // This should always be true since we're incrementing in MBs. + pageSize := int64(db.pageSize) + if (sz % pageSize) != 0 { + sz = ((sz / pageSize) + 1) * pageSize + } + + // If we've exceeded the max size then only grow up to the max size. + if sz > maxMapSize { + sz = maxMapSize + } + + return int(sz), nil +} + +func (db *DB) munlock(fileSize int) error { + if err := munlock(db, fileSize); err != nil { + return fmt.Errorf("munlock error: " + err.Error()) + } + return nil +} + +func (db *DB) mlock(fileSize int) error { + if err := mlock(db, fileSize); err != nil { + return fmt.Errorf("mlock error: " + err.Error()) + } + return nil +} + +func (db *DB) mrelock(fileSizeFrom, fileSizeTo int) error { + if err := db.munlock(fileSizeFrom); err != nil { + return err + } + if err := db.mlock(fileSizeTo); err != nil { + return err + } + return nil +} + +// init creates a new database file and initializes its meta pages. +func (db *DB) init() error { + // Create two meta pages on a buffer. + buf := make([]byte, db.pageSize*4) + for i := 0; i < 2; i++ { + p := db.pageInBuffer(buf, pgid(i)) + p.id = pgid(i) + p.flags = metaPageFlag + + // Initialize the meta page. + m := p.meta() + m.magic = magic + m.version = version + m.pageSize = uint32(db.pageSize) + m.freelist = 2 + m.root = bucket{root: 3} + m.pgid = 4 + m.txid = txid(i) + m.checksum = m.sum64() + } + + // Write an empty freelist at page 3. + p := db.pageInBuffer(buf, pgid(2)) + p.id = pgid(2) + p.flags = freelistPageFlag + p.count = 0 + + // Write an empty leaf page at page 4. + p = db.pageInBuffer(buf, pgid(3)) + p.id = pgid(3) + p.flags = leafPageFlag + p.count = 0 + + // Write the buffer to our data file. + if _, err := db.ops.writeAt(buf, 0); err != nil { + return err + } + if err := fdatasync(db); err != nil { + return err + } + db.filesz = len(buf) + + return nil +} + +// Close releases all database resources. +// It will block waiting for any open transactions to finish +// before closing the database and returning. +func (db *DB) Close() error { + db.rwlock.Lock() + defer db.rwlock.Unlock() + + db.metalock.Lock() + defer db.metalock.Unlock() + + db.mmaplock.Lock() + defer db.mmaplock.Unlock() + + return db.close() +} + +func (db *DB) close() error { + if !db.opened { + return nil + } + + db.opened = false + + db.freelist = nil + + // Clear ops. + db.ops.writeAt = nil + + // Close the mmap. + if err := db.munmap(); err != nil { + return err + } + + // Close file handles. + if db.file != nil { + // No need to unlock read-only file. + if !db.readOnly { + // Unlock the file. + if err := funlock(db); err != nil { + log.Printf("bolt.Close(): funlock error: %s", err) + } + } + + // Close the file descriptor. + if err := db.file.Close(); err != nil { + return fmt.Errorf("db file close: %s", err) + } + db.file = nil + } + + db.path = "" + return nil +} + +// Begin starts a new transaction. +// Multiple read-only transactions can be used concurrently but only one +// write transaction can be used at a time. Starting multiple write transactions +// will cause the calls to block and be serialized until the current write +// transaction finishes. +// +// Transactions should not be dependent on one another. Opening a read +// transaction and a write transaction in the same goroutine can cause the +// writer to deadlock because the database periodically needs to re-mmap itself +// as it grows and it cannot do that while a read transaction is open. +// +// If a long running read transaction (for example, a snapshot transaction) is +// needed, you might want to set DB.InitialMmapSize to a large enough value +// to avoid potential blocking of write transaction. +// +// IMPORTANT: You must close read-only transactions after you are finished or +// else the database will not reclaim old pages. +func (db *DB) Begin(writable bool) (*Tx, error) { + if writable { + return db.beginRWTx() + } + return db.beginTx() +} + +func (db *DB) beginTx() (*Tx, error) { + // Lock the meta pages while we initialize the transaction. We obtain + // the meta lock before the mmap lock because that's the order that the + // write transaction will obtain them. + db.metalock.Lock() + + // Obtain a read-only lock on the mmap. When the mmap is remapped it will + // obtain a write lock so all transactions must finish before it can be + // remapped. + db.mmaplock.RLock() + + // Exit if the database is not open yet. + if !db.opened { + db.mmaplock.RUnlock() + db.metalock.Unlock() + return nil, ErrDatabaseNotOpen + } + + // Create a transaction associated with the database. + t := &Tx{} + t.init(db) + + // Keep track of transaction until it closes. + db.txs = append(db.txs, t) + n := len(db.txs) + + // Unlock the meta pages. + db.metalock.Unlock() + + // Update the transaction stats. + db.statlock.Lock() + db.stats.TxN++ + db.stats.OpenTxN = n + db.statlock.Unlock() + + return t, nil +} + +func (db *DB) beginRWTx() (*Tx, error) { + // If the database was opened with Options.ReadOnly, return an error. + if db.readOnly { + return nil, ErrDatabaseReadOnly + } + + // Obtain writer lock. This is released by the transaction when it closes. + // This enforces only one writer transaction at a time. + db.rwlock.Lock() + + // Once we have the writer lock then we can lock the meta pages so that + // we can set up the transaction. + db.metalock.Lock() + defer db.metalock.Unlock() + + // Exit if the database is not open yet. + if !db.opened { + db.rwlock.Unlock() + return nil, ErrDatabaseNotOpen + } + + // Create a transaction associated with the database. + t := &Tx{writable: true} + t.init(db) + db.rwtx = t + db.freePages() + return t, nil +} + +// freePages releases any pages associated with closed read-only transactions. +func (db *DB) freePages() { + // Free all pending pages prior to earliest open transaction. + sort.Sort(txsById(db.txs)) + minid := txid(0xFFFFFFFFFFFFFFFF) + if len(db.txs) > 0 { + minid = db.txs[0].meta.txid + } + if minid > 0 { + db.freelist.release(minid - 1) + } + // Release unused txid extents. + for _, t := range db.txs { + db.freelist.releaseRange(minid, t.meta.txid-1) + minid = t.meta.txid + 1 + } + db.freelist.releaseRange(minid, txid(0xFFFFFFFFFFFFFFFF)) + // Any page both allocated and freed in an extent is safe to release. +} + +type txsById []*Tx + +func (t txsById) Len() int { return len(t) } +func (t txsById) Swap(i, j int) { t[i], t[j] = t[j], t[i] } +func (t txsById) Less(i, j int) bool { return t[i].meta.txid < t[j].meta.txid } + +// removeTx removes a transaction from the database. +func (db *DB) removeTx(tx *Tx) { + // Release the read lock on the mmap. + db.mmaplock.RUnlock() + + // Use the meta lock to restrict access to the DB object. + db.metalock.Lock() + + // Remove the transaction. + for i, t := range db.txs { + if t == tx { + last := len(db.txs) - 1 + db.txs[i] = db.txs[last] + db.txs[last] = nil + db.txs = db.txs[:last] + break + } + } + n := len(db.txs) + + // Unlock the meta pages. + db.metalock.Unlock() + + // Merge statistics. + db.statlock.Lock() + db.stats.OpenTxN = n + db.stats.TxStats.add(&tx.stats) + db.statlock.Unlock() +} + +// Update executes a function within the context of a read-write managed transaction. +// If no error is returned from the function then the transaction is committed. +// If an error is returned then the entire transaction is rolled back. +// Any error that is returned from the function or returned from the commit is +// returned from the Update() method. +// +// Attempting to manually commit or rollback within the function will cause a panic. +func (db *DB) Update(fn func(*Tx) error) error { + t, err := db.Begin(true) + if err != nil { + return err + } + + // Make sure the transaction rolls back in the event of a panic. + defer func() { + if t.db != nil { + t.rollback() + } + }() + + // Mark as a managed tx so that the inner function cannot manually commit. + t.managed = true + + // If an error is returned from the function then rollback and return error. + err = fn(t) + t.managed = false + if err != nil { + _ = t.Rollback() + return err + } + + return t.Commit() +} + +// View executes a function within the context of a managed read-only transaction. +// Any error that is returned from the function is returned from the View() method. +// +// Attempting to manually rollback within the function will cause a panic. +func (db *DB) View(fn func(*Tx) error) error { + t, err := db.Begin(false) + if err != nil { + return err + } + + // Make sure the transaction rolls back in the event of a panic. + defer func() { + if t.db != nil { + t.rollback() + } + }() + + // Mark as a managed tx so that the inner function cannot manually rollback. + t.managed = true + + // If an error is returned from the function then pass it through. + err = fn(t) + t.managed = false + if err != nil { + _ = t.Rollback() + return err + } + + return t.Rollback() +} + +// Batch calls fn as part of a batch. It behaves similar to Update, +// except: +// +// 1. concurrent Batch calls can be combined into a single Bolt +// transaction. +// +// 2. the function passed to Batch may be called multiple times, +// regardless of whether it returns error or not. +// +// This means that Batch function side effects must be idempotent and +// take permanent effect only after a successful return is seen in +// caller. +// +// The maximum batch size and delay can be adjusted with DB.MaxBatchSize +// and DB.MaxBatchDelay, respectively. +// +// Batch is only useful when there are multiple goroutines calling it. +func (db *DB) Batch(fn func(*Tx) error) error { + errCh := make(chan error, 1) + + db.batchMu.Lock() + if (db.batch == nil) || (db.batch != nil && len(db.batch.calls) >= db.MaxBatchSize) { + // There is no existing batch, or the existing batch is full; start a new one. + db.batch = &batch{ + db: db, + } + db.batch.timer = time.AfterFunc(db.MaxBatchDelay, db.batch.trigger) + } + db.batch.calls = append(db.batch.calls, call{fn: fn, err: errCh}) + if len(db.batch.calls) >= db.MaxBatchSize { + // wake up batch, it's ready to run + go db.batch.trigger() + } + db.batchMu.Unlock() + + err := <-errCh + if err == trySolo { + err = db.Update(fn) + } + return err +} + +type call struct { + fn func(*Tx) error + err chan<- error +} + +type batch struct { + db *DB + timer *time.Timer + start sync.Once + calls []call +} + +// trigger runs the batch if it hasn't already been run. +func (b *batch) trigger() { + b.start.Do(b.run) +} + +// run performs the transactions in the batch and communicates results +// back to DB.Batch. +func (b *batch) run() { + b.db.batchMu.Lock() + b.timer.Stop() + // Make sure no new work is added to this batch, but don't break + // other batches. + if b.db.batch == b { + b.db.batch = nil + } + b.db.batchMu.Unlock() + +retry: + for len(b.calls) > 0 { + var failIdx = -1 + err := b.db.Update(func(tx *Tx) error { + for i, c := range b.calls { + if err := safelyCall(c.fn, tx); err != nil { + failIdx = i + return err + } + } + return nil + }) + + if failIdx >= 0 { + // take the failing transaction out of the batch. it's + // safe to shorten b.calls here because db.batch no longer + // points to us, and we hold the mutex anyway. + c := b.calls[failIdx] + b.calls[failIdx], b.calls = b.calls[len(b.calls)-1], b.calls[:len(b.calls)-1] + // tell the submitter re-run it solo, continue with the rest of the batch + c.err <- trySolo + continue retry + } + + // pass success, or bolt internal errors, to all callers + for _, c := range b.calls { + c.err <- err + } + break retry + } +} + +// trySolo is a special sentinel error value used for signaling that a +// transaction function should be re-run. It should never be seen by +// callers. +var trySolo = errors.New("batch function returned an error and should be re-run solo") + +type panicked struct { + reason interface{} +} + +func (p panicked) Error() string { + if err, ok := p.reason.(error); ok { + return err.Error() + } + return fmt.Sprintf("panic: %v", p.reason) +} + +func safelyCall(fn func(*Tx) error, tx *Tx) (err error) { + defer func() { + if p := recover(); p != nil { + err = panicked{p} + } + }() + return fn(tx) +} + +// Sync executes fdatasync() against the database file handle. +// +// This is not necessary under normal operation, however, if you use NoSync +// then it allows you to force the database file to sync against the disk. +func (db *DB) Sync() error { return fdatasync(db) } + +// Stats retrieves ongoing performance stats for the database. +// This is only updated when a transaction closes. +func (db *DB) Stats() Stats { + db.statlock.RLock() + defer db.statlock.RUnlock() + return db.stats +} + +// This is for internal access to the raw data bytes from the C cursor, use +// carefully, or not at all. +func (db *DB) Info() *Info { + return &Info{uintptr(unsafe.Pointer(&db.data[0])), db.pageSize} +} + +// page retrieves a page reference from the mmap based on the current page size. +func (db *DB) page(id pgid) *page { + pos := id * pgid(db.pageSize) + return (*page)(unsafe.Pointer(&db.data[pos])) +} + +// pageInBuffer retrieves a page reference from a given byte array based on the current page size. +func (db *DB) pageInBuffer(b []byte, id pgid) *page { + return (*page)(unsafe.Pointer(&b[id*pgid(db.pageSize)])) +} + +// meta retrieves the current meta page reference. +func (db *DB) meta() *meta { + // We have to return the meta with the highest txid which doesn't fail + // validation. Otherwise, we can cause errors when in fact the database is + // in a consistent state. metaA is the one with the higher txid. + metaA := db.meta0 + metaB := db.meta1 + if db.meta1.txid > db.meta0.txid { + metaA = db.meta1 + metaB = db.meta0 + } + + // Use higher meta page if valid. Otherwise fallback to previous, if valid. + if err := metaA.validate(); err == nil { + return metaA + } else if err := metaB.validate(); err == nil { + return metaB + } + + // This should never be reached, because both meta1 and meta0 were validated + // on mmap() and we do fsync() on every write. + panic("bolt.DB.meta(): invalid meta pages") +} + +// allocate returns a contiguous block of memory starting at a given page. +func (db *DB) allocate(txid txid, count int) (*page, error) { + // Allocate a temporary buffer for the page. + var buf []byte + if count == 1 { + buf = db.pagePool.Get().([]byte) + } else { + buf = make([]byte, count*db.pageSize) + } + p := (*page)(unsafe.Pointer(&buf[0])) + p.overflow = uint32(count - 1) + + // Use pages from the freelist if they are available. + if p.id = db.freelist.allocate(txid, count); p.id != 0 { + return p, nil + } + + // Resize mmap() if we're at the end. + p.id = db.rwtx.meta.pgid + var minsz = int((p.id+pgid(count))+1) * db.pageSize + if minsz >= db.datasz { + if err := db.mmap(minsz); err != nil { + return nil, fmt.Errorf("mmap allocate error: %s", err) + } + } + + // Move the page id high water mark. + db.rwtx.meta.pgid += pgid(count) + + return p, nil +} + +// grow grows the size of the database to the given sz. +func (db *DB) grow(sz int) error { + // Ignore if the new size is less than available file size. + if sz <= db.filesz { + return nil + } + + // If the data is smaller than the alloc size then only allocate what's needed. + // Once it goes over the allocation size then allocate in chunks. + if db.datasz < db.AllocSize { + sz = db.datasz + } else { + sz += db.AllocSize + } + + // Truncate and fsync to ensure file size metadata is flushed. + // https://github.com/boltdb/bolt/issues/284 + if !db.NoGrowSync && !db.readOnly { + if runtime.GOOS != "windows" { + if err := db.file.Truncate(int64(sz)); err != nil { + return fmt.Errorf("file resize error: %s", err) + } + } + if err := db.file.Sync(); err != nil { + return fmt.Errorf("file sync error: %s", err) + } + if db.Mlock { + // unlock old file and lock new one + if err := db.mrelock(db.filesz, sz); err != nil { + return fmt.Errorf("mlock/munlock error: %s", err) + } + } + } + + db.filesz = sz + return nil +} + +func (db *DB) IsReadOnly() bool { + return db.readOnly +} + +func (db *DB) freepages() []pgid { + tx, err := db.beginTx() + defer func() { + err = tx.Rollback() + if err != nil { + panic("freepages: failed to rollback tx") + } + }() + if err != nil { + panic("freepages: failed to open read only tx") + } + + reachable := make(map[pgid]*page) + nofreed := make(map[pgid]bool) + ech := make(chan error) + go func() { + for e := range ech { + panic(fmt.Sprintf("freepages: failed to get all reachable pages (%v)", e)) + } + }() + tx.checkBucket(&tx.root, reachable, nofreed, ech) + close(ech) + + var fids []pgid + for i := pgid(2); i < db.meta().pgid; i++ { + if _, ok := reachable[i]; !ok { + fids = append(fids, i) + } + } + return fids +} + +// Options represents the options that can be set when opening a database. +type Options struct { + // Timeout is the amount of time to wait to obtain a file lock. + // When set to zero it will wait indefinitely. This option is only + // available on Darwin and Linux. + Timeout time.Duration + + // Sets the DB.NoGrowSync flag before memory mapping the file. + NoGrowSync bool + + // Do not sync freelist to disk. This improves the database write performance + // under normal operation, but requires a full database re-sync during recovery. + NoFreelistSync bool + + // FreelistType sets the backend freelist type. There are two options. Array which is simple but endures + // dramatic performance degradation if database is large and framentation in freelist is common. + // The alternative one is using hashmap, it is faster in almost all circumstances + // but it doesn't guarantee that it offers the smallest page id available. In normal case it is safe. + // The default type is array + FreelistType FreelistType + + // Open database in read-only mode. Uses flock(..., LOCK_SH |LOCK_NB) to + // grab a shared lock (UNIX). + ReadOnly bool + + // Sets the DB.MmapFlags flag before memory mapping the file. + MmapFlags int + + // InitialMmapSize is the initial mmap size of the database + // in bytes. Read transactions won't block write transaction + // if the InitialMmapSize is large enough to hold database mmap + // size. (See DB.Begin for more information) + // + // If <=0, the initial map size is 0. + // If initialMmapSize is smaller than the previous database size, + // it takes no effect. + InitialMmapSize int + + // PageSize overrides the default OS page size. + PageSize int + + // NoSync sets the initial value of DB.NoSync. Normally this can just be + // set directly on the DB itself when returned from Open(), but this option + // is useful in APIs which expose Options but not the underlying DB. + NoSync bool + + // OpenFile is used to open files. It defaults to os.OpenFile. This option + // is useful for writing hermetic tests. + OpenFile func(string, int, os.FileMode) (*os.File, error) + + // Mlock locks database file in memory when set to true. + // It prevents potential page faults, however + // used memory can't be reclaimed. (UNIX only) + Mlock bool +} + +// DefaultOptions represent the options used if nil options are passed into Open(). +// No timeout is used which will cause Bolt to wait indefinitely for a lock. +var DefaultOptions = &Options{ + Timeout: 0, + NoGrowSync: false, + FreelistType: FreelistArrayType, +} + +// Stats represents statistics about the database. +type Stats struct { + // Freelist stats + FreePageN int // total number of free pages on the freelist + PendingPageN int // total number of pending pages on the freelist + FreeAlloc int // total bytes allocated in free pages + FreelistInuse int // total bytes used by the freelist + + // Transaction stats + TxN int // total number of started read transactions + OpenTxN int // number of currently open read transactions + + TxStats TxStats // global, ongoing stats. +} + +// Sub calculates and returns the difference between two sets of database stats. +// This is useful when obtaining stats at two different points and time and +// you need the performance counters that occurred within that time span. +func (s *Stats) Sub(other *Stats) Stats { + if other == nil { + return *s + } + var diff Stats + diff.FreePageN = s.FreePageN + diff.PendingPageN = s.PendingPageN + diff.FreeAlloc = s.FreeAlloc + diff.FreelistInuse = s.FreelistInuse + diff.TxN = s.TxN - other.TxN + diff.TxStats = s.TxStats.Sub(&other.TxStats) + return diff +} + +type Info struct { + Data uintptr + PageSize int +} + +type meta struct { + magic uint32 + version uint32 + pageSize uint32 + flags uint32 + root bucket + freelist pgid + pgid pgid + txid txid + checksum uint64 +} + +// validate checks the marker bytes and version of the meta page to ensure it matches this binary. +func (m *meta) validate() error { + if m.magic != magic { + return ErrInvalid + } else if m.version != version { + return ErrVersionMismatch + } else if m.checksum != 0 && m.checksum != m.sum64() { + return ErrChecksum + } + return nil +} + +// copy copies one meta object to another. +func (m *meta) copy(dest *meta) { + *dest = *m +} + +// write writes the meta onto a page. +func (m *meta) write(p *page) { + if m.root.root >= m.pgid { + panic(fmt.Sprintf("root bucket pgid (%d) above high water mark (%d)", m.root.root, m.pgid)) + } else if m.freelist >= m.pgid && m.freelist != pgidNoFreelist { + // TODO: reject pgidNoFreeList if !NoFreelistSync + panic(fmt.Sprintf("freelist pgid (%d) above high water mark (%d)", m.freelist, m.pgid)) + } + + // Page id is either going to be 0 or 1 which we can determine by the transaction ID. + p.id = pgid(m.txid % 2) + p.flags |= metaPageFlag + + // Calculate the checksum. + m.checksum = m.sum64() + + m.copy(p.meta()) +} + +// generates the checksum for the meta. +func (m *meta) sum64() uint64 { + var h = fnv.New64a() + _, _ = h.Write((*[unsafe.Offsetof(meta{}.checksum)]byte)(unsafe.Pointer(m))[:]) + return h.Sum64() +} + +// _assert will panic with a given formatted message if the given condition is false. +func _assert(condition bool, msg string, v ...interface{}) { + if !condition { + panic(fmt.Sprintf("assertion failed: "+msg, v...)) + } +} diff --git a/vendor/go.etcd.io/bbolt/doc.go b/vendor/go.etcd.io/bbolt/doc.go new file mode 100644 index 0000000..95f25f0 --- /dev/null +++ b/vendor/go.etcd.io/bbolt/doc.go @@ -0,0 +1,44 @@ +/* +package bbolt implements a low-level key/value store in pure Go. It supports +fully serializable transactions, ACID semantics, and lock-free MVCC with +multiple readers and a single writer. Bolt can be used for projects that +want a simple data store without the need to add large dependencies such as +Postgres or MySQL. + +Bolt is a single-level, zero-copy, B+tree data store. This means that Bolt is +optimized for fast read access and does not require recovery in the event of a +system crash. Transactions which have not finished committing will simply be +rolled back in the event of a crash. + +The design of Bolt is based on Howard Chu's LMDB database project. + +Bolt currently works on Windows, Mac OS X, and Linux. + + +Basics + +There are only a few types in Bolt: DB, Bucket, Tx, and Cursor. The DB is +a collection of buckets and is represented by a single file on disk. A bucket is +a collection of unique keys that are associated with values. + +Transactions provide either read-only or read-write access to the database. +Read-only transactions can retrieve key/value pairs and can use Cursors to +iterate over the dataset sequentially. Read-write transactions can create and +delete buckets and can insert and remove keys. Only one read-write transaction +is allowed at a time. + + +Caveats + +The database uses a read-only, memory-mapped data file to ensure that +applications cannot corrupt the database, however, this means that keys and +values returned from Bolt cannot be changed. Writing to a read-only byte slice +will cause Go to panic. + +Keys and values retrieved from the database are only valid for the life of +the transaction. When used outside the transaction, these byte slices can +point to different data or can point to invalid memory which will cause a panic. + + +*/ +package bbolt diff --git a/vendor/go.etcd.io/bbolt/errors.go b/vendor/go.etcd.io/bbolt/errors.go new file mode 100644 index 0000000..48758ca --- /dev/null +++ b/vendor/go.etcd.io/bbolt/errors.go @@ -0,0 +1,71 @@ +package bbolt + +import "errors" + +// These errors can be returned when opening or calling methods on a DB. +var ( + // ErrDatabaseNotOpen is returned when a DB instance is accessed before it + // is opened or after it is closed. + ErrDatabaseNotOpen = errors.New("database not open") + + // ErrDatabaseOpen is returned when opening a database that is + // already open. + ErrDatabaseOpen = errors.New("database already open") + + // ErrInvalid is returned when both meta pages on a database are invalid. + // This typically occurs when a file is not a bolt database. + ErrInvalid = errors.New("invalid database") + + // ErrVersionMismatch is returned when the data file was created with a + // different version of Bolt. + ErrVersionMismatch = errors.New("version mismatch") + + // ErrChecksum is returned when either meta page checksum does not match. + ErrChecksum = errors.New("checksum error") + + // ErrTimeout is returned when a database cannot obtain an exclusive lock + // on the data file after the timeout passed to Open(). + ErrTimeout = errors.New("timeout") +) + +// These errors can occur when beginning or committing a Tx. +var ( + // ErrTxNotWritable is returned when performing a write operation on a + // read-only transaction. + ErrTxNotWritable = errors.New("tx not writable") + + // ErrTxClosed is returned when committing or rolling back a transaction + // that has already been committed or rolled back. + ErrTxClosed = errors.New("tx closed") + + // ErrDatabaseReadOnly is returned when a mutating transaction is started on a + // read-only database. + ErrDatabaseReadOnly = errors.New("database is in read-only mode") +) + +// These errors can occur when putting or deleting a value or a bucket. +var ( + // ErrBucketNotFound is returned when trying to access a bucket that has + // not been created yet. + ErrBucketNotFound = errors.New("bucket not found") + + // ErrBucketExists is returned when creating a bucket that already exists. + ErrBucketExists = errors.New("bucket already exists") + + // ErrBucketNameRequired is returned when creating a bucket with a blank name. + ErrBucketNameRequired = errors.New("bucket name required") + + // ErrKeyRequired is returned when inserting a zero-length key. + ErrKeyRequired = errors.New("key required") + + // ErrKeyTooLarge is returned when inserting a key that is larger than MaxKeySize. + ErrKeyTooLarge = errors.New("key too large") + + // ErrValueTooLarge is returned when inserting a value that is larger than MaxValueSize. + ErrValueTooLarge = errors.New("value too large") + + // ErrIncompatibleValue is returned when trying create or delete a bucket + // on an existing non-bucket key or when trying to create or delete a + // non-bucket key on an existing bucket key. + ErrIncompatibleValue = errors.New("incompatible value") +) diff --git a/vendor/go.etcd.io/bbolt/freelist.go b/vendor/go.etcd.io/bbolt/freelist.go new file mode 100644 index 0000000..697a469 --- /dev/null +++ b/vendor/go.etcd.io/bbolt/freelist.go @@ -0,0 +1,404 @@ +package bbolt + +import ( + "fmt" + "sort" + "unsafe" +) + +// txPending holds a list of pgids and corresponding allocation txns +// that are pending to be freed. +type txPending struct { + ids []pgid + alloctx []txid // txids allocating the ids + lastReleaseBegin txid // beginning txid of last matching releaseRange +} + +// pidSet holds the set of starting pgids which have the same span size +type pidSet map[pgid]struct{} + +// freelist represents a list of all pages that are available for allocation. +// It also tracks pages that have been freed but are still in use by open transactions. +type freelist struct { + freelistType FreelistType // freelist type + ids []pgid // all free and available free page ids. + allocs map[pgid]txid // mapping of txid that allocated a pgid. + pending map[txid]*txPending // mapping of soon-to-be free page ids by tx. + cache map[pgid]bool // fast lookup of all free and pending page ids. + freemaps map[uint64]pidSet // key is the size of continuous pages(span), value is a set which contains the starting pgids of same size + forwardMap map[pgid]uint64 // key is start pgid, value is its span size + backwardMap map[pgid]uint64 // key is end pgid, value is its span size + allocate func(txid txid, n int) pgid // the freelist allocate func + free_count func() int // the function which gives you free page number + mergeSpans func(ids pgids) // the mergeSpan func + getFreePageIDs func() []pgid // get free pgids func + readIDs func(pgids []pgid) // readIDs func reads list of pages and init the freelist +} + +// newFreelist returns an empty, initialized freelist. +func newFreelist(freelistType FreelistType) *freelist { + f := &freelist{ + freelistType: freelistType, + allocs: make(map[pgid]txid), + pending: make(map[txid]*txPending), + cache: make(map[pgid]bool), + freemaps: make(map[uint64]pidSet), + forwardMap: make(map[pgid]uint64), + backwardMap: make(map[pgid]uint64), + } + + if freelistType == FreelistMapType { + f.allocate = f.hashmapAllocate + f.free_count = f.hashmapFreeCount + f.mergeSpans = f.hashmapMergeSpans + f.getFreePageIDs = f.hashmapGetFreePageIDs + f.readIDs = f.hashmapReadIDs + } else { + f.allocate = f.arrayAllocate + f.free_count = f.arrayFreeCount + f.mergeSpans = f.arrayMergeSpans + f.getFreePageIDs = f.arrayGetFreePageIDs + f.readIDs = f.arrayReadIDs + } + + return f +} + +// size returns the size of the page after serialization. +func (f *freelist) size() int { + n := f.count() + if n >= 0xFFFF { + // The first element will be used to store the count. See freelist.write. + n++ + } + return int(pageHeaderSize) + (int(unsafe.Sizeof(pgid(0))) * n) +} + +// count returns count of pages on the freelist +func (f *freelist) count() int { + return f.free_count() + f.pending_count() +} + +// arrayFreeCount returns count of free pages(array version) +func (f *freelist) arrayFreeCount() int { + return len(f.ids) +} + +// pending_count returns count of pending pages +func (f *freelist) pending_count() int { + var count int + for _, txp := range f.pending { + count += len(txp.ids) + } + return count +} + +// copyall copies a list of all free ids and all pending ids in one sorted list. +// f.count returns the minimum length required for dst. +func (f *freelist) copyall(dst []pgid) { + m := make(pgids, 0, f.pending_count()) + for _, txp := range f.pending { + m = append(m, txp.ids...) + } + sort.Sort(m) + mergepgids(dst, f.getFreePageIDs(), m) +} + +// arrayAllocate returns the starting page id of a contiguous list of pages of a given size. +// If a contiguous block cannot be found then 0 is returned. +func (f *freelist) arrayAllocate(txid txid, n int) pgid { + if len(f.ids) == 0 { + return 0 + } + + var initial, previd pgid + for i, id := range f.ids { + if id <= 1 { + panic(fmt.Sprintf("invalid page allocation: %d", id)) + } + + // Reset initial page if this is not contiguous. + if previd == 0 || id-previd != 1 { + initial = id + } + + // If we found a contiguous block then remove it and return it. + if (id-initial)+1 == pgid(n) { + // If we're allocating off the beginning then take the fast path + // and just adjust the existing slice. This will use extra memory + // temporarily but the append() in free() will realloc the slice + // as is necessary. + if (i + 1) == n { + f.ids = f.ids[i+1:] + } else { + copy(f.ids[i-n+1:], f.ids[i+1:]) + f.ids = f.ids[:len(f.ids)-n] + } + + // Remove from the free cache. + for i := pgid(0); i < pgid(n); i++ { + delete(f.cache, initial+i) + } + f.allocs[initial] = txid + return initial + } + + previd = id + } + return 0 +} + +// free releases a page and its overflow for a given transaction id. +// If the page is already free then a panic will occur. +func (f *freelist) free(txid txid, p *page) { + if p.id <= 1 { + panic(fmt.Sprintf("cannot free page 0 or 1: %d", p.id)) + } + + // Free page and all its overflow pages. + txp := f.pending[txid] + if txp == nil { + txp = &txPending{} + f.pending[txid] = txp + } + allocTxid, ok := f.allocs[p.id] + if ok { + delete(f.allocs, p.id) + } else if (p.flags & freelistPageFlag) != 0 { + // Freelist is always allocated by prior tx. + allocTxid = txid - 1 + } + + for id := p.id; id <= p.id+pgid(p.overflow); id++ { + // Verify that page is not already free. + if f.cache[id] { + panic(fmt.Sprintf("page %d already freed", id)) + } + // Add to the freelist and cache. + txp.ids = append(txp.ids, id) + txp.alloctx = append(txp.alloctx, allocTxid) + f.cache[id] = true + } +} + +// release moves all page ids for a transaction id (or older) to the freelist. +func (f *freelist) release(txid txid) { + m := make(pgids, 0) + for tid, txp := range f.pending { + if tid <= txid { + // Move transaction's pending pages to the available freelist. + // Don't remove from the cache since the page is still free. + m = append(m, txp.ids...) + delete(f.pending, tid) + } + } + f.mergeSpans(m) +} + +// releaseRange moves pending pages allocated within an extent [begin,end] to the free list. +func (f *freelist) releaseRange(begin, end txid) { + if begin > end { + return + } + var m pgids + for tid, txp := range f.pending { + if tid < begin || tid > end { + continue + } + // Don't recompute freed pages if ranges haven't updated. + if txp.lastReleaseBegin == begin { + continue + } + for i := 0; i < len(txp.ids); i++ { + if atx := txp.alloctx[i]; atx < begin || atx > end { + continue + } + m = append(m, txp.ids[i]) + txp.ids[i] = txp.ids[len(txp.ids)-1] + txp.ids = txp.ids[:len(txp.ids)-1] + txp.alloctx[i] = txp.alloctx[len(txp.alloctx)-1] + txp.alloctx = txp.alloctx[:len(txp.alloctx)-1] + i-- + } + txp.lastReleaseBegin = begin + if len(txp.ids) == 0 { + delete(f.pending, tid) + } + } + f.mergeSpans(m) +} + +// rollback removes the pages from a given pending tx. +func (f *freelist) rollback(txid txid) { + // Remove page ids from cache. + txp := f.pending[txid] + if txp == nil { + return + } + var m pgids + for i, pgid := range txp.ids { + delete(f.cache, pgid) + tx := txp.alloctx[i] + if tx == 0 { + continue + } + if tx != txid { + // Pending free aborted; restore page back to alloc list. + f.allocs[pgid] = tx + } else { + // Freed page was allocated by this txn; OK to throw away. + m = append(m, pgid) + } + } + // Remove pages from pending list and mark as free if allocated by txid. + delete(f.pending, txid) + f.mergeSpans(m) +} + +// freed returns whether a given page is in the free list. +func (f *freelist) freed(pgid pgid) bool { + return f.cache[pgid] +} + +// read initializes the freelist from a freelist page. +func (f *freelist) read(p *page) { + if (p.flags & freelistPageFlag) == 0 { + panic(fmt.Sprintf("invalid freelist page: %d, page type is %s", p.id, p.typ())) + } + // If the page.count is at the max uint16 value (64k) then it's considered + // an overflow and the size of the freelist is stored as the first element. + var idx, count = 0, int(p.count) + if count == 0xFFFF { + idx = 1 + c := *(*pgid)(unsafeAdd(unsafe.Pointer(p), unsafe.Sizeof(*p))) + count = int(c) + if count < 0 { + panic(fmt.Sprintf("leading element count %d overflows int", c)) + } + } + + // Copy the list of page ids from the freelist. + if count == 0 { + f.ids = nil + } else { + var ids []pgid + data := unsafeIndex(unsafe.Pointer(p), unsafe.Sizeof(*p), unsafe.Sizeof(ids[0]), idx) + unsafeSlice(unsafe.Pointer(&ids), data, count) + + // copy the ids, so we don't modify on the freelist page directly + idsCopy := make([]pgid, count) + copy(idsCopy, ids) + // Make sure they're sorted. + sort.Sort(pgids(idsCopy)) + + f.readIDs(idsCopy) + } +} + +// arrayReadIDs initializes the freelist from a given list of ids. +func (f *freelist) arrayReadIDs(ids []pgid) { + f.ids = ids + f.reindex() +} + +func (f *freelist) arrayGetFreePageIDs() []pgid { + return f.ids +} + +// write writes the page ids onto a freelist page. All free and pending ids are +// saved to disk since in the event of a program crash, all pending ids will +// become free. +func (f *freelist) write(p *page) error { + // Combine the old free pgids and pgids waiting on an open transaction. + + // Update the header flag. + p.flags |= freelistPageFlag + + // The page.count can only hold up to 64k elements so if we overflow that + // number then we handle it by putting the size in the first element. + l := f.count() + if l == 0 { + p.count = uint16(l) + } else if l < 0xFFFF { + p.count = uint16(l) + var ids []pgid + data := unsafeAdd(unsafe.Pointer(p), unsafe.Sizeof(*p)) + unsafeSlice(unsafe.Pointer(&ids), data, l) + f.copyall(ids) + } else { + p.count = 0xFFFF + var ids []pgid + data := unsafeAdd(unsafe.Pointer(p), unsafe.Sizeof(*p)) + unsafeSlice(unsafe.Pointer(&ids), data, l+1) + ids[0] = pgid(l) + f.copyall(ids[1:]) + } + + return nil +} + +// reload reads the freelist from a page and filters out pending items. +func (f *freelist) reload(p *page) { + f.read(p) + + // Build a cache of only pending pages. + pcache := make(map[pgid]bool) + for _, txp := range f.pending { + for _, pendingID := range txp.ids { + pcache[pendingID] = true + } + } + + // Check each page in the freelist and build a new available freelist + // with any pages not in the pending lists. + var a []pgid + for _, id := range f.getFreePageIDs() { + if !pcache[id] { + a = append(a, id) + } + } + + f.readIDs(a) +} + +// noSyncReload reads the freelist from pgids and filters out pending items. +func (f *freelist) noSyncReload(pgids []pgid) { + // Build a cache of only pending pages. + pcache := make(map[pgid]bool) + for _, txp := range f.pending { + for _, pendingID := range txp.ids { + pcache[pendingID] = true + } + } + + // Check each page in the freelist and build a new available freelist + // with any pages not in the pending lists. + var a []pgid + for _, id := range pgids { + if !pcache[id] { + a = append(a, id) + } + } + + f.readIDs(a) +} + +// reindex rebuilds the free cache based on available and pending free lists. +func (f *freelist) reindex() { + ids := f.getFreePageIDs() + f.cache = make(map[pgid]bool, len(ids)) + for _, id := range ids { + f.cache[id] = true + } + for _, txp := range f.pending { + for _, pendingID := range txp.ids { + f.cache[pendingID] = true + } + } +} + +// arrayMergeSpans try to merge list of pages(represented by pgids) with existing spans but using array +func (f *freelist) arrayMergeSpans(ids pgids) { + sort.Sort(ids) + f.ids = pgids(f.ids).merge(ids) +} diff --git a/vendor/go.etcd.io/bbolt/freelist_hmap.go b/vendor/go.etcd.io/bbolt/freelist_hmap.go new file mode 100644 index 0000000..dbd67a1 --- /dev/null +++ b/vendor/go.etcd.io/bbolt/freelist_hmap.go @@ -0,0 +1,178 @@ +package bbolt + +import "sort" + +// hashmapFreeCount returns count of free pages(hashmap version) +func (f *freelist) hashmapFreeCount() int { + // use the forwardMap to get the total count + count := 0 + for _, size := range f.forwardMap { + count += int(size) + } + return count +} + +// hashmapAllocate serves the same purpose as arrayAllocate, but use hashmap as backend +func (f *freelist) hashmapAllocate(txid txid, n int) pgid { + if n == 0 { + return 0 + } + + // if we have a exact size match just return short path + if bm, ok := f.freemaps[uint64(n)]; ok { + for pid := range bm { + // remove the span + f.delSpan(pid, uint64(n)) + + f.allocs[pid] = txid + + for i := pgid(0); i < pgid(n); i++ { + delete(f.cache, pid+i) + } + return pid + } + } + + // lookup the map to find larger span + for size, bm := range f.freemaps { + if size < uint64(n) { + continue + } + + for pid := range bm { + // remove the initial + f.delSpan(pid, size) + + f.allocs[pid] = txid + + remain := size - uint64(n) + + // add remain span + f.addSpan(pid+pgid(n), remain) + + for i := pgid(0); i < pgid(n); i++ { + delete(f.cache, pid+i) + } + return pid + } + } + + return 0 +} + +// hashmapReadIDs reads pgids as input an initial the freelist(hashmap version) +func (f *freelist) hashmapReadIDs(pgids []pgid) { + f.init(pgids) + + // Rebuild the page cache. + f.reindex() +} + +// hashmapGetFreePageIDs returns the sorted free page ids +func (f *freelist) hashmapGetFreePageIDs() []pgid { + count := f.free_count() + if count == 0 { + return nil + } + + m := make([]pgid, 0, count) + for start, size := range f.forwardMap { + for i := 0; i < int(size); i++ { + m = append(m, start+pgid(i)) + } + } + sort.Sort(pgids(m)) + + return m +} + +// hashmapMergeSpans try to merge list of pages(represented by pgids) with existing spans +func (f *freelist) hashmapMergeSpans(ids pgids) { + for _, id := range ids { + // try to see if we can merge and update + f.mergeWithExistingSpan(id) + } +} + +// mergeWithExistingSpan merges pid to the existing free spans, try to merge it backward and forward +func (f *freelist) mergeWithExistingSpan(pid pgid) { + prev := pid - 1 + next := pid + 1 + + preSize, mergeWithPrev := f.backwardMap[prev] + nextSize, mergeWithNext := f.forwardMap[next] + newStart := pid + newSize := uint64(1) + + if mergeWithPrev { + //merge with previous span + start := prev + 1 - pgid(preSize) + f.delSpan(start, preSize) + + newStart -= pgid(preSize) + newSize += preSize + } + + if mergeWithNext { + // merge with next span + f.delSpan(next, nextSize) + newSize += nextSize + } + + f.addSpan(newStart, newSize) +} + +func (f *freelist) addSpan(start pgid, size uint64) { + f.backwardMap[start-1+pgid(size)] = size + f.forwardMap[start] = size + if _, ok := f.freemaps[size]; !ok { + f.freemaps[size] = make(map[pgid]struct{}) + } + + f.freemaps[size][start] = struct{}{} +} + +func (f *freelist) delSpan(start pgid, size uint64) { + delete(f.forwardMap, start) + delete(f.backwardMap, start+pgid(size-1)) + delete(f.freemaps[size], start) + if len(f.freemaps[size]) == 0 { + delete(f.freemaps, size) + } +} + +// initial from pgids using when use hashmap version +// pgids must be sorted +func (f *freelist) init(pgids []pgid) { + if len(pgids) == 0 { + return + } + + size := uint64(1) + start := pgids[0] + + if !sort.SliceIsSorted([]pgid(pgids), func(i, j int) bool { return pgids[i] < pgids[j] }) { + panic("pgids not sorted") + } + + f.freemaps = make(map[uint64]pidSet) + f.forwardMap = make(map[pgid]uint64) + f.backwardMap = make(map[pgid]uint64) + + for i := 1; i < len(pgids); i++ { + // continuous page + if pgids[i] == pgids[i-1]+1 { + size++ + } else { + f.addSpan(start, size) + + size = 1 + start = pgids[i] + } + } + + // init the tail + if size != 0 && start != 0 { + f.addSpan(start, size) + } +} diff --git a/vendor/go.etcd.io/bbolt/mlock_unix.go b/vendor/go.etcd.io/bbolt/mlock_unix.go new file mode 100644 index 0000000..6a6c7b3 --- /dev/null +++ b/vendor/go.etcd.io/bbolt/mlock_unix.go @@ -0,0 +1,36 @@ +// +build !windows + +package bbolt + +import "golang.org/x/sys/unix" + +// mlock locks memory of db file +func mlock(db *DB, fileSize int) error { + sizeToLock := fileSize + if sizeToLock > db.datasz { + // Can't lock more than mmaped slice + sizeToLock = db.datasz + } + if err := unix.Mlock(db.dataref[:sizeToLock]); err != nil { + return err + } + return nil +} + +//munlock unlocks memory of db file +func munlock(db *DB, fileSize int) error { + if db.dataref == nil { + return nil + } + + sizeToUnlock := fileSize + if sizeToUnlock > db.datasz { + // Can't unlock more than mmaped slice + sizeToUnlock = db.datasz + } + + if err := unix.Munlock(db.dataref[:sizeToUnlock]); err != nil { + return err + } + return nil +} diff --git a/vendor/go.etcd.io/bbolt/mlock_windows.go b/vendor/go.etcd.io/bbolt/mlock_windows.go new file mode 100644 index 0000000..b4a36a4 --- /dev/null +++ b/vendor/go.etcd.io/bbolt/mlock_windows.go @@ -0,0 +1,11 @@ +package bbolt + +// mlock locks memory of db file +func mlock(_ *DB, _ int) error { + panic("mlock is supported only on UNIX systems") +} + +//munlock unlocks memory of db file +func munlock(_ *DB, _ int) error { + panic("munlock is supported only on UNIX systems") +} diff --git a/vendor/go.etcd.io/bbolt/node.go b/vendor/go.etcd.io/bbolt/node.go new file mode 100644 index 0000000..73988b5 --- /dev/null +++ b/vendor/go.etcd.io/bbolt/node.go @@ -0,0 +1,602 @@ +package bbolt + +import ( + "bytes" + "fmt" + "sort" + "unsafe" +) + +// node represents an in-memory, deserialized page. +type node struct { + bucket *Bucket + isLeaf bool + unbalanced bool + spilled bool + key []byte + pgid pgid + parent *node + children nodes + inodes inodes +} + +// root returns the top-level node this node is attached to. +func (n *node) root() *node { + if n.parent == nil { + return n + } + return n.parent.root() +} + +// minKeys returns the minimum number of inodes this node should have. +func (n *node) minKeys() int { + if n.isLeaf { + return 1 + } + return 2 +} + +// size returns the size of the node after serialization. +func (n *node) size() int { + sz, elsz := pageHeaderSize, n.pageElementSize() + for i := 0; i < len(n.inodes); i++ { + item := &n.inodes[i] + sz += elsz + uintptr(len(item.key)) + uintptr(len(item.value)) + } + return int(sz) +} + +// sizeLessThan returns true if the node is less than a given size. +// This is an optimization to avoid calculating a large node when we only need +// to know if it fits inside a certain page size. +func (n *node) sizeLessThan(v uintptr) bool { + sz, elsz := pageHeaderSize, n.pageElementSize() + for i := 0; i < len(n.inodes); i++ { + item := &n.inodes[i] + sz += elsz + uintptr(len(item.key)) + uintptr(len(item.value)) + if sz >= v { + return false + } + } + return true +} + +// pageElementSize returns the size of each page element based on the type of node. +func (n *node) pageElementSize() uintptr { + if n.isLeaf { + return leafPageElementSize + } + return branchPageElementSize +} + +// childAt returns the child node at a given index. +func (n *node) childAt(index int) *node { + if n.isLeaf { + panic(fmt.Sprintf("invalid childAt(%d) on a leaf node", index)) + } + return n.bucket.node(n.inodes[index].pgid, n) +} + +// childIndex returns the index of a given child node. +func (n *node) childIndex(child *node) int { + index := sort.Search(len(n.inodes), func(i int) bool { return bytes.Compare(n.inodes[i].key, child.key) != -1 }) + return index +} + +// numChildren returns the number of children. +func (n *node) numChildren() int { + return len(n.inodes) +} + +// nextSibling returns the next node with the same parent. +func (n *node) nextSibling() *node { + if n.parent == nil { + return nil + } + index := n.parent.childIndex(n) + if index >= n.parent.numChildren()-1 { + return nil + } + return n.parent.childAt(index + 1) +} + +// prevSibling returns the previous node with the same parent. +func (n *node) prevSibling() *node { + if n.parent == nil { + return nil + } + index := n.parent.childIndex(n) + if index == 0 { + return nil + } + return n.parent.childAt(index - 1) +} + +// put inserts a key/value. +func (n *node) put(oldKey, newKey, value []byte, pgid pgid, flags uint32) { + if pgid >= n.bucket.tx.meta.pgid { + panic(fmt.Sprintf("pgid (%d) above high water mark (%d)", pgid, n.bucket.tx.meta.pgid)) + } else if len(oldKey) <= 0 { + panic("put: zero-length old key") + } else if len(newKey) <= 0 { + panic("put: zero-length new key") + } + + // Find insertion index. + index := sort.Search(len(n.inodes), func(i int) bool { return bytes.Compare(n.inodes[i].key, oldKey) != -1 }) + + // Add capacity and shift nodes if we don't have an exact match and need to insert. + exact := (len(n.inodes) > 0 && index < len(n.inodes) && bytes.Equal(n.inodes[index].key, oldKey)) + if !exact { + n.inodes = append(n.inodes, inode{}) + copy(n.inodes[index+1:], n.inodes[index:]) + } + + inode := &n.inodes[index] + inode.flags = flags + inode.key = newKey + inode.value = value + inode.pgid = pgid + _assert(len(inode.key) > 0, "put: zero-length inode key") +} + +// del removes a key from the node. +func (n *node) del(key []byte) { + // Find index of key. + index := sort.Search(len(n.inodes), func(i int) bool { return bytes.Compare(n.inodes[i].key, key) != -1 }) + + // Exit if the key isn't found. + if index >= len(n.inodes) || !bytes.Equal(n.inodes[index].key, key) { + return + } + + // Delete inode from the node. + n.inodes = append(n.inodes[:index], n.inodes[index+1:]...) + + // Mark the node as needing rebalancing. + n.unbalanced = true +} + +// read initializes the node from a page. +func (n *node) read(p *page) { + n.pgid = p.id + n.isLeaf = ((p.flags & leafPageFlag) != 0) + n.inodes = make(inodes, int(p.count)) + + for i := 0; i < int(p.count); i++ { + inode := &n.inodes[i] + if n.isLeaf { + elem := p.leafPageElement(uint16(i)) + inode.flags = elem.flags + inode.key = elem.key() + inode.value = elem.value() + } else { + elem := p.branchPageElement(uint16(i)) + inode.pgid = elem.pgid + inode.key = elem.key() + } + _assert(len(inode.key) > 0, "read: zero-length inode key") + } + + // Save first key so we can find the node in the parent when we spill. + if len(n.inodes) > 0 { + n.key = n.inodes[0].key + _assert(len(n.key) > 0, "read: zero-length node key") + } else { + n.key = nil + } +} + +// write writes the items onto one or more pages. +func (n *node) write(p *page) { + // Initialize page. + if n.isLeaf { + p.flags |= leafPageFlag + } else { + p.flags |= branchPageFlag + } + + if len(n.inodes) >= 0xFFFF { + panic(fmt.Sprintf("inode overflow: %d (pgid=%d)", len(n.inodes), p.id)) + } + p.count = uint16(len(n.inodes)) + + // Stop here if there are no items to write. + if p.count == 0 { + return + } + + // Loop over each item and write it to the page. + // off tracks the offset into p of the start of the next data. + off := unsafe.Sizeof(*p) + n.pageElementSize()*uintptr(len(n.inodes)) + for i, item := range n.inodes { + _assert(len(item.key) > 0, "write: zero-length inode key") + + // Create a slice to write into of needed size and advance + // byte pointer for next iteration. + sz := len(item.key) + len(item.value) + b := unsafeByteSlice(unsafe.Pointer(p), off, 0, sz) + off += uintptr(sz) + + // Write the page element. + if n.isLeaf { + elem := p.leafPageElement(uint16(i)) + elem.pos = uint32(uintptr(unsafe.Pointer(&b[0])) - uintptr(unsafe.Pointer(elem))) + elem.flags = item.flags + elem.ksize = uint32(len(item.key)) + elem.vsize = uint32(len(item.value)) + } else { + elem := p.branchPageElement(uint16(i)) + elem.pos = uint32(uintptr(unsafe.Pointer(&b[0])) - uintptr(unsafe.Pointer(elem))) + elem.ksize = uint32(len(item.key)) + elem.pgid = item.pgid + _assert(elem.pgid != p.id, "write: circular dependency occurred") + } + + // Write data for the element to the end of the page. + l := copy(b, item.key) + copy(b[l:], item.value) + } + + // DEBUG ONLY: n.dump() +} + +// split breaks up a node into multiple smaller nodes, if appropriate. +// This should only be called from the spill() function. +func (n *node) split(pageSize uintptr) []*node { + var nodes []*node + + node := n + for { + // Split node into two. + a, b := node.splitTwo(pageSize) + nodes = append(nodes, a) + + // If we can't split then exit the loop. + if b == nil { + break + } + + // Set node to b so it gets split on the next iteration. + node = b + } + + return nodes +} + +// splitTwo breaks up a node into two smaller nodes, if appropriate. +// This should only be called from the split() function. +func (n *node) splitTwo(pageSize uintptr) (*node, *node) { + // Ignore the split if the page doesn't have at least enough nodes for + // two pages or if the nodes can fit in a single page. + if len(n.inodes) <= (minKeysPerPage*2) || n.sizeLessThan(pageSize) { + return n, nil + } + + // Determine the threshold before starting a new node. + var fillPercent = n.bucket.FillPercent + if fillPercent < minFillPercent { + fillPercent = minFillPercent + } else if fillPercent > maxFillPercent { + fillPercent = maxFillPercent + } + threshold := int(float64(pageSize) * fillPercent) + + // Determine split position and sizes of the two pages. + splitIndex, _ := n.splitIndex(threshold) + + // Split node into two separate nodes. + // If there's no parent then we'll need to create one. + if n.parent == nil { + n.parent = &node{bucket: n.bucket, children: []*node{n}} + } + + // Create a new node and add it to the parent. + next := &node{bucket: n.bucket, isLeaf: n.isLeaf, parent: n.parent} + n.parent.children = append(n.parent.children, next) + + // Split inodes across two nodes. + next.inodes = n.inodes[splitIndex:] + n.inodes = n.inodes[:splitIndex] + + // Update the statistics. + n.bucket.tx.stats.Split++ + + return n, next +} + +// splitIndex finds the position where a page will fill a given threshold. +// It returns the index as well as the size of the first page. +// This is only be called from split(). +func (n *node) splitIndex(threshold int) (index, sz uintptr) { + sz = pageHeaderSize + + // Loop until we only have the minimum number of keys required for the second page. + for i := 0; i < len(n.inodes)-minKeysPerPage; i++ { + index = uintptr(i) + inode := n.inodes[i] + elsize := n.pageElementSize() + uintptr(len(inode.key)) + uintptr(len(inode.value)) + + // If we have at least the minimum number of keys and adding another + // node would put us over the threshold then exit and return. + if index >= minKeysPerPage && sz+elsize > uintptr(threshold) { + break + } + + // Add the element size to the total size. + sz += elsize + } + + return +} + +// spill writes the nodes to dirty pages and splits nodes as it goes. +// Returns an error if dirty pages cannot be allocated. +func (n *node) spill() error { + var tx = n.bucket.tx + if n.spilled { + return nil + } + + // Spill child nodes first. Child nodes can materialize sibling nodes in + // the case of split-merge so we cannot use a range loop. We have to check + // the children size on every loop iteration. + sort.Sort(n.children) + for i := 0; i < len(n.children); i++ { + if err := n.children[i].spill(); err != nil { + return err + } + } + + // We no longer need the child list because it's only used for spill tracking. + n.children = nil + + // Split nodes into appropriate sizes. The first node will always be n. + var nodes = n.split(uintptr(tx.db.pageSize)) + for _, node := range nodes { + // Add node's page to the freelist if it's not new. + if node.pgid > 0 { + tx.db.freelist.free(tx.meta.txid, tx.page(node.pgid)) + node.pgid = 0 + } + + // Allocate contiguous space for the node. + p, err := tx.allocate((node.size() + tx.db.pageSize - 1) / tx.db.pageSize) + if err != nil { + return err + } + + // Write the node. + if p.id >= tx.meta.pgid { + panic(fmt.Sprintf("pgid (%d) above high water mark (%d)", p.id, tx.meta.pgid)) + } + node.pgid = p.id + node.write(p) + node.spilled = true + + // Insert into parent inodes. + if node.parent != nil { + var key = node.key + if key == nil { + key = node.inodes[0].key + } + + node.parent.put(key, node.inodes[0].key, nil, node.pgid, 0) + node.key = node.inodes[0].key + _assert(len(node.key) > 0, "spill: zero-length node key") + } + + // Update the statistics. + tx.stats.Spill++ + } + + // If the root node split and created a new root then we need to spill that + // as well. We'll clear out the children to make sure it doesn't try to respill. + if n.parent != nil && n.parent.pgid == 0 { + n.children = nil + return n.parent.spill() + } + + return nil +} + +// rebalance attempts to combine the node with sibling nodes if the node fill +// size is below a threshold or if there are not enough keys. +func (n *node) rebalance() { + if !n.unbalanced { + return + } + n.unbalanced = false + + // Update statistics. + n.bucket.tx.stats.Rebalance++ + + // Ignore if node is above threshold (25%) and has enough keys. + var threshold = n.bucket.tx.db.pageSize / 4 + if n.size() > threshold && len(n.inodes) > n.minKeys() { + return + } + + // Root node has special handling. + if n.parent == nil { + // If root node is a branch and only has one node then collapse it. + if !n.isLeaf && len(n.inodes) == 1 { + // Move root's child up. + child := n.bucket.node(n.inodes[0].pgid, n) + n.isLeaf = child.isLeaf + n.inodes = child.inodes[:] + n.children = child.children + + // Reparent all child nodes being moved. + for _, inode := range n.inodes { + if child, ok := n.bucket.nodes[inode.pgid]; ok { + child.parent = n + } + } + + // Remove old child. + child.parent = nil + delete(n.bucket.nodes, child.pgid) + child.free() + } + + return + } + + // If node has no keys then just remove it. + if n.numChildren() == 0 { + n.parent.del(n.key) + n.parent.removeChild(n) + delete(n.bucket.nodes, n.pgid) + n.free() + n.parent.rebalance() + return + } + + _assert(n.parent.numChildren() > 1, "parent must have at least 2 children") + + // Destination node is right sibling if idx == 0, otherwise left sibling. + var target *node + var useNextSibling = (n.parent.childIndex(n) == 0) + if useNextSibling { + target = n.nextSibling() + } else { + target = n.prevSibling() + } + + // If both this node and the target node are too small then merge them. + if useNextSibling { + // Reparent all child nodes being moved. + for _, inode := range target.inodes { + if child, ok := n.bucket.nodes[inode.pgid]; ok { + child.parent.removeChild(child) + child.parent = n + child.parent.children = append(child.parent.children, child) + } + } + + // Copy over inodes from target and remove target. + n.inodes = append(n.inodes, target.inodes...) + n.parent.del(target.key) + n.parent.removeChild(target) + delete(n.bucket.nodes, target.pgid) + target.free() + } else { + // Reparent all child nodes being moved. + for _, inode := range n.inodes { + if child, ok := n.bucket.nodes[inode.pgid]; ok { + child.parent.removeChild(child) + child.parent = target + child.parent.children = append(child.parent.children, child) + } + } + + // Copy over inodes to target and remove node. + target.inodes = append(target.inodes, n.inodes...) + n.parent.del(n.key) + n.parent.removeChild(n) + delete(n.bucket.nodes, n.pgid) + n.free() + } + + // Either this node or the target node was deleted from the parent so rebalance it. + n.parent.rebalance() +} + +// removes a node from the list of in-memory children. +// This does not affect the inodes. +func (n *node) removeChild(target *node) { + for i, child := range n.children { + if child == target { + n.children = append(n.children[:i], n.children[i+1:]...) + return + } + } +} + +// dereference causes the node to copy all its inode key/value references to heap memory. +// This is required when the mmap is reallocated so inodes are not pointing to stale data. +func (n *node) dereference() { + if n.key != nil { + key := make([]byte, len(n.key)) + copy(key, n.key) + n.key = key + _assert(n.pgid == 0 || len(n.key) > 0, "dereference: zero-length node key on existing node") + } + + for i := range n.inodes { + inode := &n.inodes[i] + + key := make([]byte, len(inode.key)) + copy(key, inode.key) + inode.key = key + _assert(len(inode.key) > 0, "dereference: zero-length inode key") + + value := make([]byte, len(inode.value)) + copy(value, inode.value) + inode.value = value + } + + // Recursively dereference children. + for _, child := range n.children { + child.dereference() + } + + // Update statistics. + n.bucket.tx.stats.NodeDeref++ +} + +// free adds the node's underlying page to the freelist. +func (n *node) free() { + if n.pgid != 0 { + n.bucket.tx.db.freelist.free(n.bucket.tx.meta.txid, n.bucket.tx.page(n.pgid)) + n.pgid = 0 + } +} + +// dump writes the contents of the node to STDERR for debugging purposes. +/* +func (n *node) dump() { + // Write node header. + var typ = "branch" + if n.isLeaf { + typ = "leaf" + } + warnf("[NODE %d {type=%s count=%d}]", n.pgid, typ, len(n.inodes)) + + // Write out abbreviated version of each item. + for _, item := range n.inodes { + if n.isLeaf { + if item.flags&bucketLeafFlag != 0 { + bucket := (*bucket)(unsafe.Pointer(&item.value[0])) + warnf("+L %08x -> (bucket root=%d)", trunc(item.key, 4), bucket.root) + } else { + warnf("+L %08x -> %08x", trunc(item.key, 4), trunc(item.value, 4)) + } + } else { + warnf("+B %08x -> pgid=%d", trunc(item.key, 4), item.pgid) + } + } + warn("") +} +*/ + +type nodes []*node + +func (s nodes) Len() int { return len(s) } +func (s nodes) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s nodes) Less(i, j int) bool { + return bytes.Compare(s[i].inodes[0].key, s[j].inodes[0].key) == -1 +} + +// inode represents an internal node inside of a node. +// It can be used to point to elements in a page or point +// to an element which hasn't been added to a page yet. +type inode struct { + flags uint32 + pgid pgid + key []byte + value []byte +} + +type inodes []inode diff --git a/vendor/go.etcd.io/bbolt/page.go b/vendor/go.etcd.io/bbolt/page.go new file mode 100644 index 0000000..c9a158f --- /dev/null +++ b/vendor/go.etcd.io/bbolt/page.go @@ -0,0 +1,204 @@ +package bbolt + +import ( + "fmt" + "os" + "sort" + "unsafe" +) + +const pageHeaderSize = unsafe.Sizeof(page{}) + +const minKeysPerPage = 2 + +const branchPageElementSize = unsafe.Sizeof(branchPageElement{}) +const leafPageElementSize = unsafe.Sizeof(leafPageElement{}) + +const ( + branchPageFlag = 0x01 + leafPageFlag = 0x02 + metaPageFlag = 0x04 + freelistPageFlag = 0x10 +) + +const ( + bucketLeafFlag = 0x01 +) + +type pgid uint64 + +type page struct { + id pgid + flags uint16 + count uint16 + overflow uint32 +} + +// typ returns a human readable page type string used for debugging. +func (p *page) typ() string { + if (p.flags & branchPageFlag) != 0 { + return "branch" + } else if (p.flags & leafPageFlag) != 0 { + return "leaf" + } else if (p.flags & metaPageFlag) != 0 { + return "meta" + } else if (p.flags & freelistPageFlag) != 0 { + return "freelist" + } + return fmt.Sprintf("unknown<%02x>", p.flags) +} + +// meta returns a pointer to the metadata section of the page. +func (p *page) meta() *meta { + return (*meta)(unsafeAdd(unsafe.Pointer(p), unsafe.Sizeof(*p))) +} + +// leafPageElement retrieves the leaf node by index +func (p *page) leafPageElement(index uint16) *leafPageElement { + return (*leafPageElement)(unsafeIndex(unsafe.Pointer(p), unsafe.Sizeof(*p), + leafPageElementSize, int(index))) +} + +// leafPageElements retrieves a list of leaf nodes. +func (p *page) leafPageElements() []leafPageElement { + if p.count == 0 { + return nil + } + var elems []leafPageElement + data := unsafeAdd(unsafe.Pointer(p), unsafe.Sizeof(*p)) + unsafeSlice(unsafe.Pointer(&elems), data, int(p.count)) + return elems +} + +// branchPageElement retrieves the branch node by index +func (p *page) branchPageElement(index uint16) *branchPageElement { + return (*branchPageElement)(unsafeIndex(unsafe.Pointer(p), unsafe.Sizeof(*p), + unsafe.Sizeof(branchPageElement{}), int(index))) +} + +// branchPageElements retrieves a list of branch nodes. +func (p *page) branchPageElements() []branchPageElement { + if p.count == 0 { + return nil + } + var elems []branchPageElement + data := unsafeAdd(unsafe.Pointer(p), unsafe.Sizeof(*p)) + unsafeSlice(unsafe.Pointer(&elems), data, int(p.count)) + return elems +} + +// dump writes n bytes of the page to STDERR as hex output. +func (p *page) hexdump(n int) { + buf := unsafeByteSlice(unsafe.Pointer(p), 0, 0, n) + fmt.Fprintf(os.Stderr, "%x\n", buf) +} + +type pages []*page + +func (s pages) Len() int { return len(s) } +func (s pages) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s pages) Less(i, j int) bool { return s[i].id < s[j].id } + +// branchPageElement represents a node on a branch page. +type branchPageElement struct { + pos uint32 + ksize uint32 + pgid pgid +} + +// key returns a byte slice of the node key. +func (n *branchPageElement) key() []byte { + return unsafeByteSlice(unsafe.Pointer(n), 0, int(n.pos), int(n.pos)+int(n.ksize)) +} + +// leafPageElement represents a node on a leaf page. +type leafPageElement struct { + flags uint32 + pos uint32 + ksize uint32 + vsize uint32 +} + +// key returns a byte slice of the node key. +func (n *leafPageElement) key() []byte { + i := int(n.pos) + j := i + int(n.ksize) + return unsafeByteSlice(unsafe.Pointer(n), 0, i, j) +} + +// value returns a byte slice of the node value. +func (n *leafPageElement) value() []byte { + i := int(n.pos) + int(n.ksize) + j := i + int(n.vsize) + return unsafeByteSlice(unsafe.Pointer(n), 0, i, j) +} + +// PageInfo represents human readable information about a page. +type PageInfo struct { + ID int + Type string + Count int + OverflowCount int +} + +type pgids []pgid + +func (s pgids) Len() int { return len(s) } +func (s pgids) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s pgids) Less(i, j int) bool { return s[i] < s[j] } + +// merge returns the sorted union of a and b. +func (a pgids) merge(b pgids) pgids { + // Return the opposite slice if one is nil. + if len(a) == 0 { + return b + } + if len(b) == 0 { + return a + } + merged := make(pgids, len(a)+len(b)) + mergepgids(merged, a, b) + return merged +} + +// mergepgids copies the sorted union of a and b into dst. +// If dst is too small, it panics. +func mergepgids(dst, a, b pgids) { + if len(dst) < len(a)+len(b) { + panic(fmt.Errorf("mergepgids bad len %d < %d + %d", len(dst), len(a), len(b))) + } + // Copy in the opposite slice if one is nil. + if len(a) == 0 { + copy(dst, b) + return + } + if len(b) == 0 { + copy(dst, a) + return + } + + // Merged will hold all elements from both lists. + merged := dst[:0] + + // Assign lead to the slice with a lower starting value, follow to the higher value. + lead, follow := a, b + if b[0] < a[0] { + lead, follow = b, a + } + + // Continue while there are elements in the lead. + for len(lead) > 0 { + // Merge largest prefix of lead that is ahead of follow[0]. + n := sort.Search(len(lead), func(i int) bool { return lead[i] > follow[0] }) + merged = append(merged, lead[:n]...) + if n >= len(lead) { + break + } + + // Swap lead and follow. + lead, follow = follow, lead[n:] + } + + // Append what's left in follow. + _ = append(merged, follow...) +} diff --git a/vendor/go.etcd.io/bbolt/tx.go b/vendor/go.etcd.io/bbolt/tx.go new file mode 100644 index 0000000..869d412 --- /dev/null +++ b/vendor/go.etcd.io/bbolt/tx.go @@ -0,0 +1,723 @@ +package bbolt + +import ( + "fmt" + "io" + "os" + "sort" + "strings" + "time" + "unsafe" +) + +// txid represents the internal transaction identifier. +type txid uint64 + +// Tx represents a read-only or read/write transaction on the database. +// Read-only transactions can be used for retrieving values for keys and creating cursors. +// Read/write transactions can create and remove buckets and create and remove keys. +// +// IMPORTANT: You must commit or rollback transactions when you are done with +// them. Pages can not be reclaimed by the writer until no more transactions +// are using them. A long running read transaction can cause the database to +// quickly grow. +type Tx struct { + writable bool + managed bool + db *DB + meta *meta + root Bucket + pages map[pgid]*page + stats TxStats + commitHandlers []func() + + // WriteFlag specifies the flag for write-related methods like WriteTo(). + // Tx opens the database file with the specified flag to copy the data. + // + // By default, the flag is unset, which works well for mostly in-memory + // workloads. For databases that are much larger than available RAM, + // set the flag to syscall.O_DIRECT to avoid trashing the page cache. + WriteFlag int +} + +// init initializes the transaction. +func (tx *Tx) init(db *DB) { + tx.db = db + tx.pages = nil + + // Copy the meta page since it can be changed by the writer. + tx.meta = &meta{} + db.meta().copy(tx.meta) + + // Copy over the root bucket. + tx.root = newBucket(tx) + tx.root.bucket = &bucket{} + *tx.root.bucket = tx.meta.root + + // Increment the transaction id and add a page cache for writable transactions. + if tx.writable { + tx.pages = make(map[pgid]*page) + tx.meta.txid += txid(1) + } +} + +// ID returns the transaction id. +func (tx *Tx) ID() int { + return int(tx.meta.txid) +} + +// DB returns a reference to the database that created the transaction. +func (tx *Tx) DB() *DB { + return tx.db +} + +// Size returns current database size in bytes as seen by this transaction. +func (tx *Tx) Size() int64 { + return int64(tx.meta.pgid) * int64(tx.db.pageSize) +} + +// Writable returns whether the transaction can perform write operations. +func (tx *Tx) Writable() bool { + return tx.writable +} + +// Cursor creates a cursor associated with the root bucket. +// All items in the cursor will return a nil value because all root bucket keys point to buckets. +// The cursor is only valid as long as the transaction is open. +// Do not use a cursor after the transaction is closed. +func (tx *Tx) Cursor() *Cursor { + return tx.root.Cursor() +} + +// Stats retrieves a copy of the current transaction statistics. +func (tx *Tx) Stats() TxStats { + return tx.stats +} + +// Bucket retrieves a bucket by name. +// Returns nil if the bucket does not exist. +// The bucket instance is only valid for the lifetime of the transaction. +func (tx *Tx) Bucket(name []byte) *Bucket { + return tx.root.Bucket(name) +} + +// CreateBucket creates a new bucket. +// Returns an error if the bucket already exists, if the bucket name is blank, or if the bucket name is too long. +// The bucket instance is only valid for the lifetime of the transaction. +func (tx *Tx) CreateBucket(name []byte) (*Bucket, error) { + return tx.root.CreateBucket(name) +} + +// CreateBucketIfNotExists creates a new bucket if it doesn't already exist. +// Returns an error if the bucket name is blank, or if the bucket name is too long. +// The bucket instance is only valid for the lifetime of the transaction. +func (tx *Tx) CreateBucketIfNotExists(name []byte) (*Bucket, error) { + return tx.root.CreateBucketIfNotExists(name) +} + +// DeleteBucket deletes a bucket. +// Returns an error if the bucket cannot be found or if the key represents a non-bucket value. +func (tx *Tx) DeleteBucket(name []byte) error { + return tx.root.DeleteBucket(name) +} + +// ForEach executes a function for each bucket in the root. +// If the provided function returns an error then the iteration is stopped and +// the error is returned to the caller. +func (tx *Tx) ForEach(fn func(name []byte, b *Bucket) error) error { + return tx.root.ForEach(func(k, v []byte) error { + return fn(k, tx.root.Bucket(k)) + }) +} + +// OnCommit adds a handler function to be executed after the transaction successfully commits. +func (tx *Tx) OnCommit(fn func()) { + tx.commitHandlers = append(tx.commitHandlers, fn) +} + +// Commit writes all changes to disk and updates the meta page. +// Returns an error if a disk write error occurs, or if Commit is +// called on a read-only transaction. +func (tx *Tx) Commit() error { + _assert(!tx.managed, "managed tx commit not allowed") + if tx.db == nil { + return ErrTxClosed + } else if !tx.writable { + return ErrTxNotWritable + } + + // TODO(benbjohnson): Use vectorized I/O to write out dirty pages. + + // Rebalance nodes which have had deletions. + var startTime = time.Now() + tx.root.rebalance() + if tx.stats.Rebalance > 0 { + tx.stats.RebalanceTime += time.Since(startTime) + } + + // spill data onto dirty pages. + startTime = time.Now() + if err := tx.root.spill(); err != nil { + tx.rollback() + return err + } + tx.stats.SpillTime += time.Since(startTime) + + // Free the old root bucket. + tx.meta.root.root = tx.root.root + + // Free the old freelist because commit writes out a fresh freelist. + if tx.meta.freelist != pgidNoFreelist { + tx.db.freelist.free(tx.meta.txid, tx.db.page(tx.meta.freelist)) + } + + if !tx.db.NoFreelistSync { + err := tx.commitFreelist() + if err != nil { + return err + } + } else { + tx.meta.freelist = pgidNoFreelist + } + + // Write dirty pages to disk. + startTime = time.Now() + if err := tx.write(); err != nil { + tx.rollback() + return err + } + + // If strict mode is enabled then perform a consistency check. + if tx.db.StrictMode { + ch := tx.Check() + var errs []string + for { + err, ok := <-ch + if !ok { + break + } + errs = append(errs, err.Error()) + } + if len(errs) > 0 { + panic("check fail: " + strings.Join(errs, "\n")) + } + } + + // Write meta to disk. + if err := tx.writeMeta(); err != nil { + tx.rollback() + return err + } + tx.stats.WriteTime += time.Since(startTime) + + // Finalize the transaction. + tx.close() + + // Execute commit handlers now that the locks have been removed. + for _, fn := range tx.commitHandlers { + fn() + } + + return nil +} + +func (tx *Tx) commitFreelist() error { + // Allocate new pages for the new free list. This will overestimate + // the size of the freelist but not underestimate the size (which would be bad). + opgid := tx.meta.pgid + p, err := tx.allocate((tx.db.freelist.size() / tx.db.pageSize) + 1) + if err != nil { + tx.rollback() + return err + } + if err := tx.db.freelist.write(p); err != nil { + tx.rollback() + return err + } + tx.meta.freelist = p.id + // If the high water mark has moved up then attempt to grow the database. + if tx.meta.pgid > opgid { + if err := tx.db.grow(int(tx.meta.pgid+1) * tx.db.pageSize); err != nil { + tx.rollback() + return err + } + } + + return nil +} + +// Rollback closes the transaction and ignores all previous updates. Read-only +// transactions must be rolled back and not committed. +func (tx *Tx) Rollback() error { + _assert(!tx.managed, "managed tx rollback not allowed") + if tx.db == nil { + return ErrTxClosed + } + tx.nonPhysicalRollback() + return nil +} + +// nonPhysicalRollback is called when user calls Rollback directly, in this case we do not need to reload the free pages from disk. +func (tx *Tx) nonPhysicalRollback() { + if tx.db == nil { + return + } + if tx.writable { + tx.db.freelist.rollback(tx.meta.txid) + } + tx.close() +} + +// rollback needs to reload the free pages from disk in case some system error happens like fsync error. +func (tx *Tx) rollback() { + if tx.db == nil { + return + } + if tx.writable { + tx.db.freelist.rollback(tx.meta.txid) + if !tx.db.hasSyncedFreelist() { + // Reconstruct free page list by scanning the DB to get the whole free page list. + // Note: scaning the whole db is heavy if your db size is large in NoSyncFreeList mode. + tx.db.freelist.noSyncReload(tx.db.freepages()) + } else { + // Read free page list from freelist page. + tx.db.freelist.reload(tx.db.page(tx.db.meta().freelist)) + } + } + tx.close() +} + +func (tx *Tx) close() { + if tx.db == nil { + return + } + if tx.writable { + // Grab freelist stats. + var freelistFreeN = tx.db.freelist.free_count() + var freelistPendingN = tx.db.freelist.pending_count() + var freelistAlloc = tx.db.freelist.size() + + // Remove transaction ref & writer lock. + tx.db.rwtx = nil + tx.db.rwlock.Unlock() + + // Merge statistics. + tx.db.statlock.Lock() + tx.db.stats.FreePageN = freelistFreeN + tx.db.stats.PendingPageN = freelistPendingN + tx.db.stats.FreeAlloc = (freelistFreeN + freelistPendingN) * tx.db.pageSize + tx.db.stats.FreelistInuse = freelistAlloc + tx.db.stats.TxStats.add(&tx.stats) + tx.db.statlock.Unlock() + } else { + tx.db.removeTx(tx) + } + + // Clear all references. + tx.db = nil + tx.meta = nil + tx.root = Bucket{tx: tx} + tx.pages = nil +} + +// Copy writes the entire database to a writer. +// This function exists for backwards compatibility. +// +// Deprecated; Use WriteTo() instead. +func (tx *Tx) Copy(w io.Writer) error { + _, err := tx.WriteTo(w) + return err +} + +// WriteTo writes the entire database to a writer. +// If err == nil then exactly tx.Size() bytes will be written into the writer. +func (tx *Tx) WriteTo(w io.Writer) (n int64, err error) { + // Attempt to open reader with WriteFlag + f, err := tx.db.openFile(tx.db.path, os.O_RDONLY|tx.WriteFlag, 0) + if err != nil { + return 0, err + } + defer func() { + if cerr := f.Close(); err == nil { + err = cerr + } + }() + + // Generate a meta page. We use the same page data for both meta pages. + buf := make([]byte, tx.db.pageSize) + page := (*page)(unsafe.Pointer(&buf[0])) + page.flags = metaPageFlag + *page.meta() = *tx.meta + + // Write meta 0. + page.id = 0 + page.meta().checksum = page.meta().sum64() + nn, err := w.Write(buf) + n += int64(nn) + if err != nil { + return n, fmt.Errorf("meta 0 copy: %s", err) + } + + // Write meta 1 with a lower transaction id. + page.id = 1 + page.meta().txid -= 1 + page.meta().checksum = page.meta().sum64() + nn, err = w.Write(buf) + n += int64(nn) + if err != nil { + return n, fmt.Errorf("meta 1 copy: %s", err) + } + + // Move past the meta pages in the file. + if _, err := f.Seek(int64(tx.db.pageSize*2), io.SeekStart); err != nil { + return n, fmt.Errorf("seek: %s", err) + } + + // Copy data pages. + wn, err := io.CopyN(w, f, tx.Size()-int64(tx.db.pageSize*2)) + n += wn + if err != nil { + return n, err + } + + return n, nil +} + +// CopyFile copies the entire database to file at the given path. +// A reader transaction is maintained during the copy so it is safe to continue +// using the database while a copy is in progress. +func (tx *Tx) CopyFile(path string, mode os.FileMode) error { + f, err := tx.db.openFile(path, os.O_RDWR|os.O_CREATE|os.O_TRUNC, mode) + if err != nil { + return err + } + + _, err = tx.WriteTo(f) + if err != nil { + _ = f.Close() + return err + } + return f.Close() +} + +// Check performs several consistency checks on the database for this transaction. +// An error is returned if any inconsistency is found. +// +// It can be safely run concurrently on a writable transaction. However, this +// incurs a high cost for large databases and databases with a lot of subbuckets +// because of caching. This overhead can be removed if running on a read-only +// transaction, however, it is not safe to execute other writer transactions at +// the same time. +func (tx *Tx) Check() <-chan error { + ch := make(chan error) + go tx.check(ch) + return ch +} + +func (tx *Tx) check(ch chan error) { + // Force loading free list if opened in ReadOnly mode. + tx.db.loadFreelist() + + // Check if any pages are double freed. + freed := make(map[pgid]bool) + all := make([]pgid, tx.db.freelist.count()) + tx.db.freelist.copyall(all) + for _, id := range all { + if freed[id] { + ch <- fmt.Errorf("page %d: already freed", id) + } + freed[id] = true + } + + // Track every reachable page. + reachable := make(map[pgid]*page) + reachable[0] = tx.page(0) // meta0 + reachable[1] = tx.page(1) // meta1 + if tx.meta.freelist != pgidNoFreelist { + for i := uint32(0); i <= tx.page(tx.meta.freelist).overflow; i++ { + reachable[tx.meta.freelist+pgid(i)] = tx.page(tx.meta.freelist) + } + } + + // Recursively check buckets. + tx.checkBucket(&tx.root, reachable, freed, ch) + + // Ensure all pages below high water mark are either reachable or freed. + for i := pgid(0); i < tx.meta.pgid; i++ { + _, isReachable := reachable[i] + if !isReachable && !freed[i] { + ch <- fmt.Errorf("page %d: unreachable unfreed", int(i)) + } + } + + // Close the channel to signal completion. + close(ch) +} + +func (tx *Tx) checkBucket(b *Bucket, reachable map[pgid]*page, freed map[pgid]bool, ch chan error) { + // Ignore inline buckets. + if b.root == 0 { + return + } + + // Check every page used by this bucket. + b.tx.forEachPage(b.root, 0, func(p *page, _ int) { + if p.id > tx.meta.pgid { + ch <- fmt.Errorf("page %d: out of bounds: %d", int(p.id), int(b.tx.meta.pgid)) + } + + // Ensure each page is only referenced once. + for i := pgid(0); i <= pgid(p.overflow); i++ { + var id = p.id + i + if _, ok := reachable[id]; ok { + ch <- fmt.Errorf("page %d: multiple references", int(id)) + } + reachable[id] = p + } + + // We should only encounter un-freed leaf and branch pages. + if freed[p.id] { + ch <- fmt.Errorf("page %d: reachable freed", int(p.id)) + } else if (p.flags&branchPageFlag) == 0 && (p.flags&leafPageFlag) == 0 { + ch <- fmt.Errorf("page %d: invalid type: %s", int(p.id), p.typ()) + } + }) + + // Check each bucket within this bucket. + _ = b.ForEach(func(k, v []byte) error { + if child := b.Bucket(k); child != nil { + tx.checkBucket(child, reachable, freed, ch) + } + return nil + }) +} + +// allocate returns a contiguous block of memory starting at a given page. +func (tx *Tx) allocate(count int) (*page, error) { + p, err := tx.db.allocate(tx.meta.txid, count) + if err != nil { + return nil, err + } + + // Save to our page cache. + tx.pages[p.id] = p + + // Update statistics. + tx.stats.PageCount += count + tx.stats.PageAlloc += count * tx.db.pageSize + + return p, nil +} + +// write writes any dirty pages to disk. +func (tx *Tx) write() error { + // Sort pages by id. + pages := make(pages, 0, len(tx.pages)) + for _, p := range tx.pages { + pages = append(pages, p) + } + // Clear out page cache early. + tx.pages = make(map[pgid]*page) + sort.Sort(pages) + + // Write pages to disk in order. + for _, p := range pages { + rem := (uint64(p.overflow) + 1) * uint64(tx.db.pageSize) + offset := int64(p.id) * int64(tx.db.pageSize) + var written uintptr + + // Write out page in "max allocation" sized chunks. + for { + sz := rem + if sz > maxAllocSize-1 { + sz = maxAllocSize - 1 + } + buf := unsafeByteSlice(unsafe.Pointer(p), written, 0, int(sz)) + + if _, err := tx.db.ops.writeAt(buf, offset); err != nil { + return err + } + + // Update statistics. + tx.stats.Write++ + + // Exit inner for loop if we've written all the chunks. + rem -= sz + if rem == 0 { + break + } + + // Otherwise move offset forward and move pointer to next chunk. + offset += int64(sz) + written += uintptr(sz) + } + } + + // Ignore file sync if flag is set on DB. + if !tx.db.NoSync || IgnoreNoSync { + if err := fdatasync(tx.db); err != nil { + return err + } + } + + // Put small pages back to page pool. + for _, p := range pages { + // Ignore page sizes over 1 page. + // These are allocated using make() instead of the page pool. + if int(p.overflow) != 0 { + continue + } + + buf := unsafeByteSlice(unsafe.Pointer(p), 0, 0, tx.db.pageSize) + + // See https://go.googlesource.com/go/+/f03c9202c43e0abb130669852082117ca50aa9b1 + for i := range buf { + buf[i] = 0 + } + tx.db.pagePool.Put(buf) + } + + return nil +} + +// writeMeta writes the meta to the disk. +func (tx *Tx) writeMeta() error { + // Create a temporary buffer for the meta page. + buf := make([]byte, tx.db.pageSize) + p := tx.db.pageInBuffer(buf, 0) + tx.meta.write(p) + + // Write the meta page to file. + if _, err := tx.db.ops.writeAt(buf, int64(p.id)*int64(tx.db.pageSize)); err != nil { + return err + } + if !tx.db.NoSync || IgnoreNoSync { + if err := fdatasync(tx.db); err != nil { + return err + } + } + + // Update statistics. + tx.stats.Write++ + + return nil +} + +// page returns a reference to the page with a given id. +// If page has been written to then a temporary buffered page is returned. +func (tx *Tx) page(id pgid) *page { + // Check the dirty pages first. + if tx.pages != nil { + if p, ok := tx.pages[id]; ok { + return p + } + } + + // Otherwise return directly from the mmap. + return tx.db.page(id) +} + +// forEachPage iterates over every page within a given page and executes a function. +func (tx *Tx) forEachPage(pgid pgid, depth int, fn func(*page, int)) { + p := tx.page(pgid) + + // Execute function. + fn(p, depth) + + // Recursively loop over children. + if (p.flags & branchPageFlag) != 0 { + for i := 0; i < int(p.count); i++ { + elem := p.branchPageElement(uint16(i)) + tx.forEachPage(elem.pgid, depth+1, fn) + } + } +} + +// Page returns page information for a given page number. +// This is only safe for concurrent use when used by a writable transaction. +func (tx *Tx) Page(id int) (*PageInfo, error) { + if tx.db == nil { + return nil, ErrTxClosed + } else if pgid(id) >= tx.meta.pgid { + return nil, nil + } + + // Build the page info. + p := tx.db.page(pgid(id)) + info := &PageInfo{ + ID: id, + Count: int(p.count), + OverflowCount: int(p.overflow), + } + + // Determine the type (or if it's free). + if tx.db.freelist.freed(pgid(id)) { + info.Type = "free" + } else { + info.Type = p.typ() + } + + return info, nil +} + +// TxStats represents statistics about the actions performed by the transaction. +type TxStats struct { + // Page statistics. + PageCount int // number of page allocations + PageAlloc int // total bytes allocated + + // Cursor statistics. + CursorCount int // number of cursors created + + // Node statistics + NodeCount int // number of node allocations + NodeDeref int // number of node dereferences + + // Rebalance statistics. + Rebalance int // number of node rebalances + RebalanceTime time.Duration // total time spent rebalancing + + // Split/Spill statistics. + Split int // number of nodes split + Spill int // number of nodes spilled + SpillTime time.Duration // total time spent spilling + + // Write statistics. + Write int // number of writes performed + WriteTime time.Duration // total time spent writing to disk +} + +func (s *TxStats) add(other *TxStats) { + s.PageCount += other.PageCount + s.PageAlloc += other.PageAlloc + s.CursorCount += other.CursorCount + s.NodeCount += other.NodeCount + s.NodeDeref += other.NodeDeref + s.Rebalance += other.Rebalance + s.RebalanceTime += other.RebalanceTime + s.Split += other.Split + s.Spill += other.Spill + s.SpillTime += other.SpillTime + s.Write += other.Write + s.WriteTime += other.WriteTime +} + +// Sub calculates and returns the difference between two sets of transaction stats. +// This is useful when obtaining stats at two different points and time and +// you need the performance counters that occurred within that time span. +func (s *TxStats) Sub(other *TxStats) TxStats { + var diff TxStats + diff.PageCount = s.PageCount - other.PageCount + diff.PageAlloc = s.PageAlloc - other.PageAlloc + diff.CursorCount = s.CursorCount - other.CursorCount + diff.NodeCount = s.NodeCount - other.NodeCount + diff.NodeDeref = s.NodeDeref - other.NodeDeref + diff.Rebalance = s.Rebalance - other.Rebalance + diff.RebalanceTime = s.RebalanceTime - other.RebalanceTime + diff.Split = s.Split - other.Split + diff.Spill = s.Spill - other.Spill + diff.SpillTime = s.SpillTime - other.SpillTime + diff.Write = s.Write - other.Write + diff.WriteTime = s.WriteTime - other.WriteTime + return diff +} diff --git a/vendor/go.etcd.io/bbolt/unsafe.go b/vendor/go.etcd.io/bbolt/unsafe.go new file mode 100644 index 0000000..c0e5037 --- /dev/null +++ b/vendor/go.etcd.io/bbolt/unsafe.go @@ -0,0 +1,39 @@ +package bbolt + +import ( + "reflect" + "unsafe" +) + +func unsafeAdd(base unsafe.Pointer, offset uintptr) unsafe.Pointer { + return unsafe.Pointer(uintptr(base) + offset) +} + +func unsafeIndex(base unsafe.Pointer, offset uintptr, elemsz uintptr, n int) unsafe.Pointer { + return unsafe.Pointer(uintptr(base) + offset + uintptr(n)*elemsz) +} + +func unsafeByteSlice(base unsafe.Pointer, offset uintptr, i, j int) []byte { + // See: https://github.com/golang/go/wiki/cgo#turning-c-arrays-into-go-slices + // + // This memory is not allocated from C, but it is unmanaged by Go's + // garbage collector and should behave similarly, and the compiler + // should produce similar code. Note that this conversion allows a + // subslice to begin after the base address, with an optional offset, + // while the URL above does not cover this case and only slices from + // index 0. However, the wiki never says that the address must be to + // the beginning of a C allocation (or even that malloc was used at + // all), so this is believed to be correct. + return (*[maxAllocSize]byte)(unsafeAdd(base, offset))[i:j:j] +} + +// unsafeSlice modifies the data, len, and cap of a slice variable pointed to by +// the slice parameter. This helper should be used over other direct +// manipulation of reflect.SliceHeader to prevent misuse, namely, converting +// from reflect.SliceHeader to a Go slice type. +func unsafeSlice(slice, data unsafe.Pointer, len int) { + s := (*reflect.SliceHeader)(slice) + s.Data = uintptr(data) + s.Cap = len + s.Len = len +} diff --git a/vendor/golang.org/x/crypto/LICENSE b/vendor/golang.org/x/crypto/LICENSE new file mode 100644 index 0000000..6a66aea --- /dev/null +++ b/vendor/golang.org/x/crypto/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/golang.org/x/crypto/PATENTS b/vendor/golang.org/x/crypto/PATENTS new file mode 100644 index 0000000..7330990 --- /dev/null +++ b/vendor/golang.org/x/crypto/PATENTS @@ -0,0 +1,22 @@ +Additional IP Rights Grant (Patents) + +"This implementation" means the copyrightable works distributed by +Google as part of the Go project. + +Google hereby grants to You a perpetual, worldwide, non-exclusive, +no-charge, royalty-free, irrevocable (except as stated in this section) +patent license to make, have made, use, offer to sell, sell, import, +transfer and otherwise run, modify and propagate the contents of this +implementation of Go, where such license applies only to those patent +claims, both currently owned or controlled by Google and acquired in +the future, licensable by Google that are necessarily infringed by this +implementation of Go. This grant does not include claims that would be +infringed only as a consequence of further modification of this +implementation. If you or your agent or exclusive licensee institute or +order or agree to the institution of patent litigation against any +entity (including a cross-claim or counterclaim in a lawsuit) alleging +that this implementation of Go or any code incorporated within this +implementation of Go constitutes direct or contributory patent +infringement, or inducement of patent infringement, then any patent +rights granted to you under this License for this implementation of Go +shall terminate as of the date such litigation is filed. diff --git a/vendor/golang.org/x/crypto/acme/acme.go b/vendor/golang.org/x/crypto/acme/acme.go new file mode 100644 index 0000000..aaafea2 --- /dev/null +++ b/vendor/golang.org/x/crypto/acme/acme.go @@ -0,0 +1,818 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package acme provides an implementation of the +// Automatic Certificate Management Environment (ACME) spec, +// most famously used by Let's Encrypt. +// +// The initial implementation of this package was based on an early version +// of the spec. The current implementation supports only the modern +// RFC 8555 but some of the old API surface remains for compatibility. +// While code using the old API will still compile, it will return an error. +// Note the deprecation comments to update your code. +// +// See https://tools.ietf.org/html/rfc8555 for the spec. +// +// Most common scenarios will want to use autocert subdirectory instead, +// which provides automatic access to certificates from Let's Encrypt +// and any other ACME-based CA. +package acme + +import ( + "context" + "crypto" + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rand" + "crypto/sha256" + "crypto/tls" + "crypto/x509" + "crypto/x509/pkix" + "encoding/asn1" + "encoding/base64" + "encoding/hex" + "encoding/json" + "encoding/pem" + "errors" + "fmt" + "math/big" + "net/http" + "strings" + "sync" + "time" +) + +const ( + // LetsEncryptURL is the Directory endpoint of Let's Encrypt CA. + LetsEncryptURL = "https://acme-v02.api.letsencrypt.org/directory" + + // ALPNProto is the ALPN protocol name used by a CA server when validating + // tls-alpn-01 challenges. + // + // Package users must ensure their servers can negotiate the ACME ALPN in + // order for tls-alpn-01 challenge verifications to succeed. + // See the crypto/tls package's Config.NextProtos field. + ALPNProto = "acme-tls/1" +) + +// idPeACMEIdentifier is the OID for the ACME extension for the TLS-ALPN challenge. +// https://tools.ietf.org/html/draft-ietf-acme-tls-alpn-05#section-5.1 +var idPeACMEIdentifier = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 1, 31} + +const ( + maxChainLen = 5 // max depth and breadth of a certificate chain + maxCertSize = 1 << 20 // max size of a certificate, in DER bytes + // Used for decoding certs from application/pem-certificate-chain response, + // the default when in RFC mode. + maxCertChainSize = maxCertSize * maxChainLen + + // Max number of collected nonces kept in memory. + // Expect usual peak of 1 or 2. + maxNonces = 100 +) + +// Client is an ACME client. +// +// The only required field is Key. An example of creating a client with a new key +// is as follows: +// +// key, err := rsa.GenerateKey(rand.Reader, 2048) +// if err != nil { +// log.Fatal(err) +// } +// client := &Client{Key: key} +type Client struct { + // Key is the account key used to register with a CA and sign requests. + // Key.Public() must return a *rsa.PublicKey or *ecdsa.PublicKey. + // + // The following algorithms are supported: + // RS256, ES256, ES384 and ES512. + // See RFC 7518 for more details about the algorithms. + Key crypto.Signer + + // HTTPClient optionally specifies an HTTP client to use + // instead of http.DefaultClient. + HTTPClient *http.Client + + // DirectoryURL points to the CA directory endpoint. + // If empty, LetsEncryptURL is used. + // Mutating this value after a successful call of Client's Discover method + // will have no effect. + DirectoryURL string + + // RetryBackoff computes the duration after which the nth retry of a failed request + // should occur. The value of n for the first call on failure is 1. + // The values of r and resp are the request and response of the last failed attempt. + // If the returned value is negative or zero, no more retries are done and an error + // is returned to the caller of the original method. + // + // Requests which result in a 4xx client error are not retried, + // except for 400 Bad Request due to "bad nonce" errors and 429 Too Many Requests. + // + // If RetryBackoff is nil, a truncated exponential backoff algorithm + // with the ceiling of 10 seconds is used, where each subsequent retry n + // is done after either ("Retry-After" + jitter) or (2^n seconds + jitter), + // preferring the former if "Retry-After" header is found in the resp. + // The jitter is a random value up to 1 second. + RetryBackoff func(n int, r *http.Request, resp *http.Response) time.Duration + + // UserAgent is prepended to the User-Agent header sent to the ACME server, + // which by default is this package's name and version. + // + // Reusable libraries and tools in particular should set this value to be + // identifiable by the server, in case they are causing issues. + UserAgent string + + cacheMu sync.Mutex + dir *Directory // cached result of Client's Discover method + // KID is the key identifier provided by the CA. If not provided it will be + // retrieved from the CA by making a call to the registration endpoint. + KID KeyID + + noncesMu sync.Mutex + nonces map[string]struct{} // nonces collected from previous responses +} + +// accountKID returns a key ID associated with c.Key, the account identity +// provided by the CA during RFC based registration. +// It assumes c.Discover has already been called. +// +// accountKID requires at most one network roundtrip. +// It caches only successful result. +// +// When in pre-RFC mode or when c.getRegRFC responds with an error, accountKID +// returns noKeyID. +func (c *Client) accountKID(ctx context.Context) KeyID { + c.cacheMu.Lock() + defer c.cacheMu.Unlock() + if c.KID != noKeyID { + return c.KID + } + a, err := c.getRegRFC(ctx) + if err != nil { + return noKeyID + } + c.KID = KeyID(a.URI) + return c.KID +} + +var errPreRFC = errors.New("acme: server does not support the RFC 8555 version of ACME") + +// Discover performs ACME server discovery using c.DirectoryURL. +// +// It caches successful result. So, subsequent calls will not result in +// a network round-trip. This also means mutating c.DirectoryURL after successful call +// of this method will have no effect. +func (c *Client) Discover(ctx context.Context) (Directory, error) { + c.cacheMu.Lock() + defer c.cacheMu.Unlock() + if c.dir != nil { + return *c.dir, nil + } + + res, err := c.get(ctx, c.directoryURL(), wantStatus(http.StatusOK)) + if err != nil { + return Directory{}, err + } + defer res.Body.Close() + c.addNonce(res.Header) + + var v struct { + Reg string `json:"newAccount"` + Authz string `json:"newAuthz"` + Order string `json:"newOrder"` + Revoke string `json:"revokeCert"` + Nonce string `json:"newNonce"` + KeyChange string `json:"keyChange"` + Meta struct { + Terms string `json:"termsOfService"` + Website string `json:"website"` + CAA []string `json:"caaIdentities"` + ExternalAcct bool `json:"externalAccountRequired"` + } + } + if err := json.NewDecoder(res.Body).Decode(&v); err != nil { + return Directory{}, err + } + if v.Order == "" { + return Directory{}, errPreRFC + } + c.dir = &Directory{ + RegURL: v.Reg, + AuthzURL: v.Authz, + OrderURL: v.Order, + RevokeURL: v.Revoke, + NonceURL: v.Nonce, + KeyChangeURL: v.KeyChange, + Terms: v.Meta.Terms, + Website: v.Meta.Website, + CAA: v.Meta.CAA, + ExternalAccountRequired: v.Meta.ExternalAcct, + } + return *c.dir, nil +} + +func (c *Client) directoryURL() string { + if c.DirectoryURL != "" { + return c.DirectoryURL + } + return LetsEncryptURL +} + +// CreateCert was part of the old version of ACME. It is incompatible with RFC 8555. +// +// Deprecated: this was for the pre-RFC 8555 version of ACME. Callers should use CreateOrderCert. +func (c *Client) CreateCert(ctx context.Context, csr []byte, exp time.Duration, bundle bool) (der [][]byte, certURL string, err error) { + return nil, "", errPreRFC +} + +// FetchCert retrieves already issued certificate from the given url, in DER format. +// It retries the request until the certificate is successfully retrieved, +// context is cancelled by the caller or an error response is received. +// +// If the bundle argument is true, the returned value also contains the CA (issuer) +// certificate chain. +// +// FetchCert returns an error if the CA's response or chain was unreasonably large. +// Callers are encouraged to parse the returned value to ensure the certificate is valid +// and has expected features. +func (c *Client) FetchCert(ctx context.Context, url string, bundle bool) ([][]byte, error) { + if _, err := c.Discover(ctx); err != nil { + return nil, err + } + return c.fetchCertRFC(ctx, url, bundle) +} + +// RevokeCert revokes a previously issued certificate cert, provided in DER format. +// +// The key argument, used to sign the request, must be authorized +// to revoke the certificate. It's up to the CA to decide which keys are authorized. +// For instance, the key pair of the certificate may be authorized. +// If the key is nil, c.Key is used instead. +func (c *Client) RevokeCert(ctx context.Context, key crypto.Signer, cert []byte, reason CRLReasonCode) error { + if _, err := c.Discover(ctx); err != nil { + return err + } + return c.revokeCertRFC(ctx, key, cert, reason) +} + +// AcceptTOS always returns true to indicate the acceptance of a CA's Terms of Service +// during account registration. See Register method of Client for more details. +func AcceptTOS(tosURL string) bool { return true } + +// Register creates a new account with the CA using c.Key. +// It returns the registered account. The account acct is not modified. +// +// The registration may require the caller to agree to the CA's Terms of Service (TOS). +// If so, and the account has not indicated the acceptance of the terms (see Account for details), +// Register calls prompt with a TOS URL provided by the CA. Prompt should report +// whether the caller agrees to the terms. To always accept the terms, the caller can use AcceptTOS. +// +// When interfacing with an RFC-compliant CA, non-RFC 8555 fields of acct are ignored +// and prompt is called if Directory's Terms field is non-zero. +// Also see Error's Instance field for when a CA requires already registered accounts to agree +// to an updated Terms of Service. +func (c *Client) Register(ctx context.Context, acct *Account, prompt func(tosURL string) bool) (*Account, error) { + if c.Key == nil { + return nil, errors.New("acme: client.Key must be set to Register") + } + if _, err := c.Discover(ctx); err != nil { + return nil, err + } + return c.registerRFC(ctx, acct, prompt) +} + +// GetReg retrieves an existing account associated with c.Key. +// +// The url argument is a legacy artifact of the pre-RFC 8555 API +// and is ignored. +func (c *Client) GetReg(ctx context.Context, url string) (*Account, error) { + if _, err := c.Discover(ctx); err != nil { + return nil, err + } + return c.getRegRFC(ctx) +} + +// UpdateReg updates an existing registration. +// It returns an updated account copy. The provided account is not modified. +// +// The account's URI is ignored and the account URL associated with +// c.Key is used instead. +func (c *Client) UpdateReg(ctx context.Context, acct *Account) (*Account, error) { + if _, err := c.Discover(ctx); err != nil { + return nil, err + } + return c.updateRegRFC(ctx, acct) +} + +// AccountKeyRollover attempts to transition a client's account key to a new key. +// On success client's Key is updated which is not concurrency safe. +// On failure an error will be returned. +// The new key is already registered with the ACME provider if the following is true: +// - error is of type acme.Error +// - StatusCode should be 409 (Conflict) +// - Location header will have the KID of the associated account +// +// More about account key rollover can be found at +// https://tools.ietf.org/html/rfc8555#section-7.3.5. +func (c *Client) AccountKeyRollover(ctx context.Context, newKey crypto.Signer) error { + return c.accountKeyRollover(ctx, newKey) +} + +// Authorize performs the initial step in the pre-authorization flow, +// as opposed to order-based flow. +// The caller will then need to choose from and perform a set of returned +// challenges using c.Accept in order to successfully complete authorization. +// +// Once complete, the caller can use AuthorizeOrder which the CA +// should provision with the already satisfied authorization. +// For pre-RFC CAs, the caller can proceed directly to requesting a certificate +// using CreateCert method. +// +// If an authorization has been previously granted, the CA may return +// a valid authorization which has its Status field set to StatusValid. +// +// More about pre-authorization can be found at +// https://tools.ietf.org/html/rfc8555#section-7.4.1. +func (c *Client) Authorize(ctx context.Context, domain string) (*Authorization, error) { + return c.authorize(ctx, "dns", domain) +} + +// AuthorizeIP is the same as Authorize but requests IP address authorization. +// Clients which successfully obtain such authorization may request to issue +// a certificate for IP addresses. +// +// See the ACME spec extension for more details about IP address identifiers: +// https://tools.ietf.org/html/draft-ietf-acme-ip. +func (c *Client) AuthorizeIP(ctx context.Context, ipaddr string) (*Authorization, error) { + return c.authorize(ctx, "ip", ipaddr) +} + +func (c *Client) authorize(ctx context.Context, typ, val string) (*Authorization, error) { + if _, err := c.Discover(ctx); err != nil { + return nil, err + } + + type authzID struct { + Type string `json:"type"` + Value string `json:"value"` + } + req := struct { + Resource string `json:"resource"` + Identifier authzID `json:"identifier"` + }{ + Resource: "new-authz", + Identifier: authzID{Type: typ, Value: val}, + } + res, err := c.post(ctx, nil, c.dir.AuthzURL, req, wantStatus(http.StatusCreated)) + if err != nil { + return nil, err + } + defer res.Body.Close() + + var v wireAuthz + if err := json.NewDecoder(res.Body).Decode(&v); err != nil { + return nil, fmt.Errorf("acme: invalid response: %v", err) + } + if v.Status != StatusPending && v.Status != StatusValid { + return nil, fmt.Errorf("acme: unexpected status: %s", v.Status) + } + return v.authorization(res.Header.Get("Location")), nil +} + +// GetAuthorization retrieves an authorization identified by the given URL. +// +// If a caller needs to poll an authorization until its status is final, +// see the WaitAuthorization method. +func (c *Client) GetAuthorization(ctx context.Context, url string) (*Authorization, error) { + if _, err := c.Discover(ctx); err != nil { + return nil, err + } + + res, err := c.postAsGet(ctx, url, wantStatus(http.StatusOK)) + if err != nil { + return nil, err + } + defer res.Body.Close() + var v wireAuthz + if err := json.NewDecoder(res.Body).Decode(&v); err != nil { + return nil, fmt.Errorf("acme: invalid response: %v", err) + } + return v.authorization(url), nil +} + +// RevokeAuthorization relinquishes an existing authorization identified +// by the given URL. +// The url argument is an Authorization.URI value. +// +// If successful, the caller will be required to obtain a new authorization +// using the Authorize or AuthorizeOrder methods before being able to request +// a new certificate for the domain associated with the authorization. +// +// It does not revoke existing certificates. +func (c *Client) RevokeAuthorization(ctx context.Context, url string) error { + if _, err := c.Discover(ctx); err != nil { + return err + } + + req := struct { + Resource string `json:"resource"` + Status string `json:"status"` + Delete bool `json:"delete"` + }{ + Resource: "authz", + Status: "deactivated", + Delete: true, + } + res, err := c.post(ctx, nil, url, req, wantStatus(http.StatusOK)) + if err != nil { + return err + } + defer res.Body.Close() + return nil +} + +// WaitAuthorization polls an authorization at the given URL +// until it is in one of the final states, StatusValid or StatusInvalid, +// the ACME CA responded with a 4xx error code, or the context is done. +// +// It returns a non-nil Authorization only if its Status is StatusValid. +// In all other cases WaitAuthorization returns an error. +// If the Status is StatusInvalid, the returned error is of type *AuthorizationError. +func (c *Client) WaitAuthorization(ctx context.Context, url string) (*Authorization, error) { + if _, err := c.Discover(ctx); err != nil { + return nil, err + } + for { + res, err := c.postAsGet(ctx, url, wantStatus(http.StatusOK, http.StatusAccepted)) + if err != nil { + return nil, err + } + + var raw wireAuthz + err = json.NewDecoder(res.Body).Decode(&raw) + res.Body.Close() + switch { + case err != nil: + // Skip and retry. + case raw.Status == StatusValid: + return raw.authorization(url), nil + case raw.Status == StatusInvalid: + return nil, raw.error(url) + } + + // Exponential backoff is implemented in c.get above. + // This is just to prevent continuously hitting the CA + // while waiting for a final authorization status. + d := retryAfter(res.Header.Get("Retry-After")) + if d == 0 { + // Given that the fastest challenges TLS-SNI and HTTP-01 + // require a CA to make at least 1 network round trip + // and most likely persist a challenge state, + // this default delay seems reasonable. + d = time.Second + } + t := time.NewTimer(d) + select { + case <-ctx.Done(): + t.Stop() + return nil, ctx.Err() + case <-t.C: + // Retry. + } + } +} + +// GetChallenge retrieves the current status of an challenge. +// +// A client typically polls a challenge status using this method. +func (c *Client) GetChallenge(ctx context.Context, url string) (*Challenge, error) { + if _, err := c.Discover(ctx); err != nil { + return nil, err + } + + res, err := c.postAsGet(ctx, url, wantStatus(http.StatusOK, http.StatusAccepted)) + if err != nil { + return nil, err + } + + defer res.Body.Close() + v := wireChallenge{URI: url} + if err := json.NewDecoder(res.Body).Decode(&v); err != nil { + return nil, fmt.Errorf("acme: invalid response: %v", err) + } + return v.challenge(), nil +} + +// Accept informs the server that the client accepts one of its challenges +// previously obtained with c.Authorize. +// +// The server will then perform the validation asynchronously. +func (c *Client) Accept(ctx context.Context, chal *Challenge) (*Challenge, error) { + if _, err := c.Discover(ctx); err != nil { + return nil, err + } + + res, err := c.post(ctx, nil, chal.URI, json.RawMessage("{}"), wantStatus( + http.StatusOK, // according to the spec + http.StatusAccepted, // Let's Encrypt: see https://goo.gl/WsJ7VT (acme-divergences.md) + )) + if err != nil { + return nil, err + } + defer res.Body.Close() + + var v wireChallenge + if err := json.NewDecoder(res.Body).Decode(&v); err != nil { + return nil, fmt.Errorf("acme: invalid response: %v", err) + } + return v.challenge(), nil +} + +// DNS01ChallengeRecord returns a DNS record value for a dns-01 challenge response. +// A TXT record containing the returned value must be provisioned under +// "_acme-challenge" name of the domain being validated. +// +// The token argument is a Challenge.Token value. +func (c *Client) DNS01ChallengeRecord(token string) (string, error) { + ka, err := keyAuth(c.Key.Public(), token) + if err != nil { + return "", err + } + b := sha256.Sum256([]byte(ka)) + return base64.RawURLEncoding.EncodeToString(b[:]), nil +} + +// HTTP01ChallengeResponse returns the response for an http-01 challenge. +// Servers should respond with the value to HTTP requests at the URL path +// provided by HTTP01ChallengePath to validate the challenge and prove control +// over a domain name. +// +// The token argument is a Challenge.Token value. +func (c *Client) HTTP01ChallengeResponse(token string) (string, error) { + return keyAuth(c.Key.Public(), token) +} + +// HTTP01ChallengePath returns the URL path at which the response for an http-01 challenge +// should be provided by the servers. +// The response value can be obtained with HTTP01ChallengeResponse. +// +// The token argument is a Challenge.Token value. +func (c *Client) HTTP01ChallengePath(token string) string { + return "/.well-known/acme-challenge/" + token +} + +// TLSSNI01ChallengeCert creates a certificate for TLS-SNI-01 challenge response. +// +// Deprecated: This challenge type is unused in both draft-02 and RFC versions of the ACME spec. +func (c *Client) TLSSNI01ChallengeCert(token string, opt ...CertOption) (cert tls.Certificate, name string, err error) { + ka, err := keyAuth(c.Key.Public(), token) + if err != nil { + return tls.Certificate{}, "", err + } + b := sha256.Sum256([]byte(ka)) + h := hex.EncodeToString(b[:]) + name = fmt.Sprintf("%s.%s.acme.invalid", h[:32], h[32:]) + cert, err = tlsChallengeCert([]string{name}, opt) + if err != nil { + return tls.Certificate{}, "", err + } + return cert, name, nil +} + +// TLSSNI02ChallengeCert creates a certificate for TLS-SNI-02 challenge response. +// +// Deprecated: This challenge type is unused in both draft-02 and RFC versions of the ACME spec. +func (c *Client) TLSSNI02ChallengeCert(token string, opt ...CertOption) (cert tls.Certificate, name string, err error) { + b := sha256.Sum256([]byte(token)) + h := hex.EncodeToString(b[:]) + sanA := fmt.Sprintf("%s.%s.token.acme.invalid", h[:32], h[32:]) + + ka, err := keyAuth(c.Key.Public(), token) + if err != nil { + return tls.Certificate{}, "", err + } + b = sha256.Sum256([]byte(ka)) + h = hex.EncodeToString(b[:]) + sanB := fmt.Sprintf("%s.%s.ka.acme.invalid", h[:32], h[32:]) + + cert, err = tlsChallengeCert([]string{sanA, sanB}, opt) + if err != nil { + return tls.Certificate{}, "", err + } + return cert, sanA, nil +} + +// TLSALPN01ChallengeCert creates a certificate for TLS-ALPN-01 challenge response. +// Servers can present the certificate to validate the challenge and prove control +// over a domain name. For more details on TLS-ALPN-01 see +// https://tools.ietf.org/html/draft-shoemaker-acme-tls-alpn-00#section-3 +// +// The token argument is a Challenge.Token value. +// If a WithKey option is provided, its private part signs the returned cert, +// and the public part is used to specify the signee. +// If no WithKey option is provided, a new ECDSA key is generated using P-256 curve. +// +// The returned certificate is valid for the next 24 hours and must be presented only when +// the server name in the TLS ClientHello matches the domain, and the special acme-tls/1 ALPN protocol +// has been specified. +func (c *Client) TLSALPN01ChallengeCert(token, domain string, opt ...CertOption) (cert tls.Certificate, err error) { + ka, err := keyAuth(c.Key.Public(), token) + if err != nil { + return tls.Certificate{}, err + } + shasum := sha256.Sum256([]byte(ka)) + extValue, err := asn1.Marshal(shasum[:]) + if err != nil { + return tls.Certificate{}, err + } + acmeExtension := pkix.Extension{ + Id: idPeACMEIdentifier, + Critical: true, + Value: extValue, + } + + tmpl := defaultTLSChallengeCertTemplate() + + var newOpt []CertOption + for _, o := range opt { + switch o := o.(type) { + case *certOptTemplate: + t := *(*x509.Certificate)(o) // shallow copy is ok + tmpl = &t + default: + newOpt = append(newOpt, o) + } + } + tmpl.ExtraExtensions = append(tmpl.ExtraExtensions, acmeExtension) + newOpt = append(newOpt, WithTemplate(tmpl)) + return tlsChallengeCert([]string{domain}, newOpt) +} + +// popNonce returns a nonce value previously stored with c.addNonce +// or fetches a fresh one from c.dir.NonceURL. +// If NonceURL is empty, it first tries c.directoryURL() and, failing that, +// the provided url. +func (c *Client) popNonce(ctx context.Context, url string) (string, error) { + c.noncesMu.Lock() + defer c.noncesMu.Unlock() + if len(c.nonces) == 0 { + if c.dir != nil && c.dir.NonceURL != "" { + return c.fetchNonce(ctx, c.dir.NonceURL) + } + dirURL := c.directoryURL() + v, err := c.fetchNonce(ctx, dirURL) + if err != nil && url != dirURL { + v, err = c.fetchNonce(ctx, url) + } + return v, err + } + var nonce string + for nonce = range c.nonces { + delete(c.nonces, nonce) + break + } + return nonce, nil +} + +// clearNonces clears any stored nonces +func (c *Client) clearNonces() { + c.noncesMu.Lock() + defer c.noncesMu.Unlock() + c.nonces = make(map[string]struct{}) +} + +// addNonce stores a nonce value found in h (if any) for future use. +func (c *Client) addNonce(h http.Header) { + v := nonceFromHeader(h) + if v == "" { + return + } + c.noncesMu.Lock() + defer c.noncesMu.Unlock() + if len(c.nonces) >= maxNonces { + return + } + if c.nonces == nil { + c.nonces = make(map[string]struct{}) + } + c.nonces[v] = struct{}{} +} + +func (c *Client) fetchNonce(ctx context.Context, url string) (string, error) { + r, err := http.NewRequest("HEAD", url, nil) + if err != nil { + return "", err + } + resp, err := c.doNoRetry(ctx, r) + if err != nil { + return "", err + } + defer resp.Body.Close() + nonce := nonceFromHeader(resp.Header) + if nonce == "" { + if resp.StatusCode > 299 { + return "", responseError(resp) + } + return "", errors.New("acme: nonce not found") + } + return nonce, nil +} + +func nonceFromHeader(h http.Header) string { + return h.Get("Replay-Nonce") +} + +// linkHeader returns URI-Reference values of all Link headers +// with relation-type rel. +// See https://tools.ietf.org/html/rfc5988#section-5 for details. +func linkHeader(h http.Header, rel string) []string { + var links []string + for _, v := range h["Link"] { + parts := strings.Split(v, ";") + for _, p := range parts { + p = strings.TrimSpace(p) + if !strings.HasPrefix(p, "rel=") { + continue + } + if v := strings.Trim(p[4:], `"`); v == rel { + links = append(links, strings.Trim(parts[0], "<>")) + } + } + } + return links +} + +// keyAuth generates a key authorization string for a given token. +func keyAuth(pub crypto.PublicKey, token string) (string, error) { + th, err := JWKThumbprint(pub) + if err != nil { + return "", err + } + return fmt.Sprintf("%s.%s", token, th), nil +} + +// defaultTLSChallengeCertTemplate is a template used to create challenge certs for TLS challenges. +func defaultTLSChallengeCertTemplate() *x509.Certificate { + return &x509.Certificate{ + SerialNumber: big.NewInt(1), + NotBefore: time.Now(), + NotAfter: time.Now().Add(24 * time.Hour), + BasicConstraintsValid: true, + KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature, + ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth}, + } +} + +// tlsChallengeCert creates a temporary certificate for TLS-SNI challenges +// with the given SANs and auto-generated public/private key pair. +// The Subject Common Name is set to the first SAN to aid debugging. +// To create a cert with a custom key pair, specify WithKey option. +func tlsChallengeCert(san []string, opt []CertOption) (tls.Certificate, error) { + var key crypto.Signer + tmpl := defaultTLSChallengeCertTemplate() + for _, o := range opt { + switch o := o.(type) { + case *certOptKey: + if key != nil { + return tls.Certificate{}, errors.New("acme: duplicate key option") + } + key = o.key + case *certOptTemplate: + t := *(*x509.Certificate)(o) // shallow copy is ok + tmpl = &t + default: + // package's fault, if we let this happen: + panic(fmt.Sprintf("unsupported option type %T", o)) + } + } + if key == nil { + var err error + if key, err = ecdsa.GenerateKey(elliptic.P256(), rand.Reader); err != nil { + return tls.Certificate{}, err + } + } + tmpl.DNSNames = san + if len(san) > 0 { + tmpl.Subject.CommonName = san[0] + } + + der, err := x509.CreateCertificate(rand.Reader, tmpl, tmpl, key.Public(), key) + if err != nil { + return tls.Certificate{}, err + } + return tls.Certificate{ + Certificate: [][]byte{der}, + PrivateKey: key, + }, nil +} + +// encodePEM returns b encoded as PEM with block of type typ. +func encodePEM(typ string, b []byte) []byte { + pb := &pem.Block{Type: typ, Bytes: b} + return pem.EncodeToMemory(pb) +} + +// timeNow is time.Now, except in tests which can mess with it. +var timeNow = time.Now diff --git a/vendor/golang.org/x/crypto/acme/autocert/autocert.go b/vendor/golang.org/x/crypto/acme/autocert/autocert.go new file mode 100644 index 0000000..6b4cdf4 --- /dev/null +++ b/vendor/golang.org/x/crypto/acme/autocert/autocert.go @@ -0,0 +1,1198 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package autocert provides automatic access to certificates from Let's Encrypt +// and any other ACME-based CA. +// +// This package is a work in progress and makes no API stability promises. +package autocert + +import ( + "bytes" + "context" + "crypto" + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rand" + "crypto/rsa" + "crypto/tls" + "crypto/x509" + "crypto/x509/pkix" + "encoding/pem" + "errors" + "fmt" + "io" + mathrand "math/rand" + "net" + "net/http" + "path" + "strings" + "sync" + "time" + + "golang.org/x/crypto/acme" + "golang.org/x/net/idna" +) + +// DefaultACMEDirectory is the default ACME Directory URL used when the Manager's Client is nil. +const DefaultACMEDirectory = "https://acme-v02.api.letsencrypt.org/directory" + +// createCertRetryAfter is how much time to wait before removing a failed state +// entry due to an unsuccessful createCert call. +// This is a variable instead of a const for testing. +// TODO: Consider making it configurable or an exp backoff? +var createCertRetryAfter = time.Minute + +// pseudoRand is safe for concurrent use. +var pseudoRand *lockedMathRand + +var errPreRFC = errors.New("autocert: ACME server doesn't support RFC 8555") + +func init() { + src := mathrand.NewSource(time.Now().UnixNano()) + pseudoRand = &lockedMathRand{rnd: mathrand.New(src)} +} + +// AcceptTOS is a Manager.Prompt function that always returns true to +// indicate acceptance of the CA's Terms of Service during account +// registration. +func AcceptTOS(tosURL string) bool { return true } + +// HostPolicy specifies which host names the Manager is allowed to respond to. +// It returns a non-nil error if the host should be rejected. +// The returned error is accessible via tls.Conn.Handshake and its callers. +// See Manager's HostPolicy field and GetCertificate method docs for more details. +type HostPolicy func(ctx context.Context, host string) error + +// HostWhitelist returns a policy where only the specified host names are allowed. +// Only exact matches are currently supported. Subdomains, regexp or wildcard +// will not match. +// +// Note that all hosts will be converted to Punycode via idna.Lookup.ToASCII so that +// Manager.GetCertificate can handle the Unicode IDN and mixedcase hosts correctly. +// Invalid hosts will be silently ignored. +func HostWhitelist(hosts ...string) HostPolicy { + whitelist := make(map[string]bool, len(hosts)) + for _, h := range hosts { + if h, err := idna.Lookup.ToASCII(h); err == nil { + whitelist[h] = true + } + } + return func(_ context.Context, host string) error { + if !whitelist[host] { + return fmt.Errorf("acme/autocert: host %q not configured in HostWhitelist", host) + } + return nil + } +} + +// defaultHostPolicy is used when Manager.HostPolicy is not set. +func defaultHostPolicy(context.Context, string) error { + return nil +} + +// Manager is a stateful certificate manager built on top of acme.Client. +// It obtains and refreshes certificates automatically using "tls-alpn-01" +// or "http-01" challenge types, as well as providing them to a TLS server +// via tls.Config. +// +// You must specify a cache implementation, such as DirCache, +// to reuse obtained certificates across program restarts. +// Otherwise your server is very likely to exceed the certificate +// issuer's request rate limits. +type Manager struct { + // Prompt specifies a callback function to conditionally accept a CA's Terms of Service (TOS). + // The registration may require the caller to agree to the CA's TOS. + // If so, Manager calls Prompt with a TOS URL provided by the CA. Prompt should report + // whether the caller agrees to the terms. + // + // To always accept the terms, the callers can use AcceptTOS. + Prompt func(tosURL string) bool + + // Cache optionally stores and retrieves previously-obtained certificates + // and other state. If nil, certs will only be cached for the lifetime of + // the Manager. Multiple Managers can share the same Cache. + // + // Using a persistent Cache, such as DirCache, is strongly recommended. + Cache Cache + + // HostPolicy controls which domains the Manager will attempt + // to retrieve new certificates for. It does not affect cached certs. + // + // If non-nil, HostPolicy is called before requesting a new cert. + // If nil, all hosts are currently allowed. This is not recommended, + // as it opens a potential attack where clients connect to a server + // by IP address and pretend to be asking for an incorrect host name. + // Manager will attempt to obtain a certificate for that host, incorrectly, + // eventually reaching the CA's rate limit for certificate requests + // and making it impossible to obtain actual certificates. + // + // See GetCertificate for more details. + HostPolicy HostPolicy + + // RenewBefore optionally specifies how early certificates should + // be renewed before they expire. + // + // If zero, they're renewed 30 days before expiration. + RenewBefore time.Duration + + // Client is used to perform low-level operations, such as account registration + // and requesting new certificates. + // + // If Client is nil, a zero-value acme.Client is used with DefaultACMEDirectory + // as the directory endpoint. + // If the Client.Key is nil, a new ECDSA P-256 key is generated and, + // if Cache is not nil, stored in cache. + // + // Mutating the field after the first call of GetCertificate method will have no effect. + Client *acme.Client + + // Email optionally specifies a contact email address. + // This is used by CAs, such as Let's Encrypt, to notify about problems + // with issued certificates. + // + // If the Client's account key is already registered, Email is not used. + Email string + + // ForceRSA used to make the Manager generate RSA certificates. It is now ignored. + // + // Deprecated: the Manager will request the correct type of certificate based + // on what each client supports. + ForceRSA bool + + // ExtraExtensions are used when generating a new CSR (Certificate Request), + // thus allowing customization of the resulting certificate. + // For instance, TLS Feature Extension (RFC 7633) can be used + // to prevent an OCSP downgrade attack. + // + // The field value is passed to crypto/x509.CreateCertificateRequest + // in the template's ExtraExtensions field as is. + ExtraExtensions []pkix.Extension + + // ExternalAccountBinding optionally represents an arbitrary binding to an + // account of the CA to which the ACME server is tied. + // See RFC 8555, Section 7.3.4 for more details. + ExternalAccountBinding *acme.ExternalAccountBinding + + clientMu sync.Mutex + client *acme.Client // initialized by acmeClient method + + stateMu sync.Mutex + state map[certKey]*certState + + // renewal tracks the set of domains currently running renewal timers. + renewalMu sync.Mutex + renewal map[certKey]*domainRenewal + + // challengeMu guards tryHTTP01, certTokens and httpTokens. + challengeMu sync.RWMutex + // tryHTTP01 indicates whether the Manager should try "http-01" challenge type + // during the authorization flow. + tryHTTP01 bool + // httpTokens contains response body values for http-01 challenges + // and is keyed by the URL path at which a challenge response is expected + // to be provisioned. + // The entries are stored for the duration of the authorization flow. + httpTokens map[string][]byte + // certTokens contains temporary certificates for tls-alpn-01 challenges + // and is keyed by the domain name which matches the ClientHello server name. + // The entries are stored for the duration of the authorization flow. + certTokens map[string]*tls.Certificate + + // nowFunc, if not nil, returns the current time. This may be set for + // testing purposes. + nowFunc func() time.Time +} + +// certKey is the key by which certificates are tracked in state, renewal and cache. +type certKey struct { + domain string // without trailing dot + isRSA bool // RSA cert for legacy clients (as opposed to default ECDSA) + isToken bool // tls-based challenge token cert; key type is undefined regardless of isRSA +} + +func (c certKey) String() string { + if c.isToken { + return c.domain + "+token" + } + if c.isRSA { + return c.domain + "+rsa" + } + return c.domain +} + +// TLSConfig creates a new TLS config suitable for net/http.Server servers, +// supporting HTTP/2 and the tls-alpn-01 ACME challenge type. +func (m *Manager) TLSConfig() *tls.Config { + return &tls.Config{ + GetCertificate: m.GetCertificate, + NextProtos: []string{ + "h2", "http/1.1", // enable HTTP/2 + acme.ALPNProto, // enable tls-alpn ACME challenges + }, + } +} + +// GetCertificate implements the tls.Config.GetCertificate hook. +// It provides a TLS certificate for hello.ServerName host, including answering +// tls-alpn-01 challenges. +// All other fields of hello are ignored. +// +// If m.HostPolicy is non-nil, GetCertificate calls the policy before requesting +// a new cert. A non-nil error returned from m.HostPolicy halts TLS negotiation. +// The error is propagated back to the caller of GetCertificate and is user-visible. +// This does not affect cached certs. See HostPolicy field description for more details. +// +// If GetCertificate is used directly, instead of via Manager.TLSConfig, package users will +// also have to add acme.ALPNProto to NextProtos for tls-alpn-01, or use HTTPHandler for http-01. +func (m *Manager) GetCertificate(hello *tls.ClientHelloInfo) (*tls.Certificate, error) { + if m.Prompt == nil { + return nil, errors.New("acme/autocert: Manager.Prompt not set") + } + + name := hello.ServerName + if name == "" { + return nil, errors.New("acme/autocert: missing server name") + } + if !strings.Contains(strings.Trim(name, "."), ".") { + return nil, errors.New("acme/autocert: server name component count invalid") + } + + // Note that this conversion is necessary because some server names in the handshakes + // started by some clients (such as cURL) are not converted to Punycode, which will + // prevent us from obtaining certificates for them. In addition, we should also treat + // example.com and EXAMPLE.COM as equivalent and return the same certificate for them. + // Fortunately, this conversion also helped us deal with this kind of mixedcase problems. + // + // Due to the "σςΣ" problem (see https://unicode.org/faq/idn.html#22), we can't use + // idna.Punycode.ToASCII (or just idna.ToASCII) here. + name, err := idna.Lookup.ToASCII(name) + if err != nil { + return nil, errors.New("acme/autocert: server name contains invalid character") + } + + // In the worst-case scenario, the timeout needs to account for caching, host policy, + // domain ownership verification and certificate issuance. + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute) + defer cancel() + + // Check whether this is a token cert requested for TLS-ALPN challenge. + if wantsTokenCert(hello) { + m.challengeMu.RLock() + defer m.challengeMu.RUnlock() + if cert := m.certTokens[name]; cert != nil { + return cert, nil + } + if cert, err := m.cacheGet(ctx, certKey{domain: name, isToken: true}); err == nil { + return cert, nil + } + // TODO: cache error results? + return nil, fmt.Errorf("acme/autocert: no token cert for %q", name) + } + + // regular domain + ck := certKey{ + domain: strings.TrimSuffix(name, "."), // golang.org/issue/18114 + isRSA: !supportsECDSA(hello), + } + cert, err := m.cert(ctx, ck) + if err == nil { + return cert, nil + } + if err != ErrCacheMiss { + return nil, err + } + + // first-time + if err := m.hostPolicy()(ctx, name); err != nil { + return nil, err + } + cert, err = m.createCert(ctx, ck) + if err != nil { + return nil, err + } + m.cachePut(ctx, ck, cert) + return cert, nil +} + +// wantsTokenCert reports whether a TLS request with SNI is made by a CA server +// for a challenge verification. +func wantsTokenCert(hello *tls.ClientHelloInfo) bool { + // tls-alpn-01 + if len(hello.SupportedProtos) == 1 && hello.SupportedProtos[0] == acme.ALPNProto { + return true + } + return false +} + +func supportsECDSA(hello *tls.ClientHelloInfo) bool { + // The "signature_algorithms" extension, if present, limits the key exchange + // algorithms allowed by the cipher suites. See RFC 5246, section 7.4.1.4.1. + if hello.SignatureSchemes != nil { + ecdsaOK := false + schemeLoop: + for _, scheme := range hello.SignatureSchemes { + const tlsECDSAWithSHA1 tls.SignatureScheme = 0x0203 // constant added in Go 1.10 + switch scheme { + case tlsECDSAWithSHA1, tls.ECDSAWithP256AndSHA256, + tls.ECDSAWithP384AndSHA384, tls.ECDSAWithP521AndSHA512: + ecdsaOK = true + break schemeLoop + } + } + if !ecdsaOK { + return false + } + } + if hello.SupportedCurves != nil { + ecdsaOK := false + for _, curve := range hello.SupportedCurves { + if curve == tls.CurveP256 { + ecdsaOK = true + break + } + } + if !ecdsaOK { + return false + } + } + for _, suite := range hello.CipherSuites { + switch suite { + case tls.TLS_ECDHE_ECDSA_WITH_RC4_128_SHA, + tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, + tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, + tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256, + tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, + tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, + tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305: + return true + } + } + return false +} + +// HTTPHandler configures the Manager to provision ACME "http-01" challenge responses. +// It returns an http.Handler that responds to the challenges and must be +// running on port 80. If it receives a request that is not an ACME challenge, +// it delegates the request to the optional fallback handler. +// +// If fallback is nil, the returned handler redirects all GET and HEAD requests +// to the default TLS port 443 with 302 Found status code, preserving the original +// request path and query. It responds with 400 Bad Request to all other HTTP methods. +// The fallback is not protected by the optional HostPolicy. +// +// Because the fallback handler is run with unencrypted port 80 requests, +// the fallback should not serve TLS-only requests. +// +// If HTTPHandler is never called, the Manager will only use the "tls-alpn-01" +// challenge for domain verification. +func (m *Manager) HTTPHandler(fallback http.Handler) http.Handler { + m.challengeMu.Lock() + defer m.challengeMu.Unlock() + m.tryHTTP01 = true + + if fallback == nil { + fallback = http.HandlerFunc(handleHTTPRedirect) + } + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if !strings.HasPrefix(r.URL.Path, "/.well-known/acme-challenge/") { + fallback.ServeHTTP(w, r) + return + } + // A reasonable context timeout for cache and host policy only, + // because we don't wait for a new certificate issuance here. + ctx, cancel := context.WithTimeout(r.Context(), time.Minute) + defer cancel() + if err := m.hostPolicy()(ctx, r.Host); err != nil { + http.Error(w, err.Error(), http.StatusForbidden) + return + } + data, err := m.httpToken(ctx, r.URL.Path) + if err != nil { + http.Error(w, err.Error(), http.StatusNotFound) + return + } + w.Write(data) + }) +} + +func handleHTTPRedirect(w http.ResponseWriter, r *http.Request) { + if r.Method != "GET" && r.Method != "HEAD" { + http.Error(w, "Use HTTPS", http.StatusBadRequest) + return + } + target := "https://" + stripPort(r.Host) + r.URL.RequestURI() + http.Redirect(w, r, target, http.StatusFound) +} + +func stripPort(hostport string) string { + host, _, err := net.SplitHostPort(hostport) + if err != nil { + return hostport + } + return net.JoinHostPort(host, "443") +} + +// cert returns an existing certificate either from m.state or cache. +// If a certificate is found in cache but not in m.state, the latter will be filled +// with the cached value. +func (m *Manager) cert(ctx context.Context, ck certKey) (*tls.Certificate, error) { + m.stateMu.Lock() + if s, ok := m.state[ck]; ok { + m.stateMu.Unlock() + s.RLock() + defer s.RUnlock() + return s.tlscert() + } + defer m.stateMu.Unlock() + cert, err := m.cacheGet(ctx, ck) + if err != nil { + return nil, err + } + signer, ok := cert.PrivateKey.(crypto.Signer) + if !ok { + return nil, errors.New("acme/autocert: private key cannot sign") + } + if m.state == nil { + m.state = make(map[certKey]*certState) + } + s := &certState{ + key: signer, + cert: cert.Certificate, + leaf: cert.Leaf, + } + m.state[ck] = s + m.startRenew(ck, s.key, s.leaf.NotAfter) + return cert, nil +} + +// cacheGet always returns a valid certificate, or an error otherwise. +// If a cached certificate exists but is not valid, ErrCacheMiss is returned. +func (m *Manager) cacheGet(ctx context.Context, ck certKey) (*tls.Certificate, error) { + if m.Cache == nil { + return nil, ErrCacheMiss + } + data, err := m.Cache.Get(ctx, ck.String()) + if err != nil { + return nil, err + } + + // private + priv, pub := pem.Decode(data) + if priv == nil || !strings.Contains(priv.Type, "PRIVATE") { + return nil, ErrCacheMiss + } + privKey, err := parsePrivateKey(priv.Bytes) + if err != nil { + return nil, err + } + + // public + var pubDER [][]byte + for len(pub) > 0 { + var b *pem.Block + b, pub = pem.Decode(pub) + if b == nil { + break + } + pubDER = append(pubDER, b.Bytes) + } + if len(pub) > 0 { + // Leftover content not consumed by pem.Decode. Corrupt. Ignore. + return nil, ErrCacheMiss + } + + // verify and create TLS cert + leaf, err := validCert(ck, pubDER, privKey, m.now()) + if err != nil { + return nil, ErrCacheMiss + } + tlscert := &tls.Certificate{ + Certificate: pubDER, + PrivateKey: privKey, + Leaf: leaf, + } + return tlscert, nil +} + +func (m *Manager) cachePut(ctx context.Context, ck certKey, tlscert *tls.Certificate) error { + if m.Cache == nil { + return nil + } + + // contains PEM-encoded data + var buf bytes.Buffer + + // private + switch key := tlscert.PrivateKey.(type) { + case *ecdsa.PrivateKey: + if err := encodeECDSAKey(&buf, key); err != nil { + return err + } + case *rsa.PrivateKey: + b := x509.MarshalPKCS1PrivateKey(key) + pb := &pem.Block{Type: "RSA PRIVATE KEY", Bytes: b} + if err := pem.Encode(&buf, pb); err != nil { + return err + } + default: + return errors.New("acme/autocert: unknown private key type") + } + + // public + for _, b := range tlscert.Certificate { + pb := &pem.Block{Type: "CERTIFICATE", Bytes: b} + if err := pem.Encode(&buf, pb); err != nil { + return err + } + } + + return m.Cache.Put(ctx, ck.String(), buf.Bytes()) +} + +func encodeECDSAKey(w io.Writer, key *ecdsa.PrivateKey) error { + b, err := x509.MarshalECPrivateKey(key) + if err != nil { + return err + } + pb := &pem.Block{Type: "EC PRIVATE KEY", Bytes: b} + return pem.Encode(w, pb) +} + +// createCert starts the domain ownership verification and returns a certificate +// for that domain upon success. +// +// If the domain is already being verified, it waits for the existing verification to complete. +// Either way, createCert blocks for the duration of the whole process. +func (m *Manager) createCert(ctx context.Context, ck certKey) (*tls.Certificate, error) { + // TODO: maybe rewrite this whole piece using sync.Once + state, err := m.certState(ck) + if err != nil { + return nil, err + } + // state may exist if another goroutine is already working on it + // in which case just wait for it to finish + if !state.locked { + state.RLock() + defer state.RUnlock() + return state.tlscert() + } + + // We are the first; state is locked. + // Unblock the readers when domain ownership is verified + // and we got the cert or the process failed. + defer state.Unlock() + state.locked = false + + der, leaf, err := m.authorizedCert(ctx, state.key, ck) + if err != nil { + // Remove the failed state after some time, + // making the manager call createCert again on the following TLS hello. + didRemove := testDidRemoveState // The lifetime of this timer is untracked, so copy mutable local state to avoid races. + time.AfterFunc(createCertRetryAfter, func() { + defer didRemove(ck) + m.stateMu.Lock() + defer m.stateMu.Unlock() + // Verify the state hasn't changed and it's still invalid + // before deleting. + s, ok := m.state[ck] + if !ok { + return + } + if _, err := validCert(ck, s.cert, s.key, m.now()); err == nil { + return + } + delete(m.state, ck) + }) + return nil, err + } + state.cert = der + state.leaf = leaf + m.startRenew(ck, state.key, state.leaf.NotAfter) + return state.tlscert() +} + +// certState returns a new or existing certState. +// If a new certState is returned, state.exist is false and the state is locked. +// The returned error is non-nil only in the case where a new state could not be created. +func (m *Manager) certState(ck certKey) (*certState, error) { + m.stateMu.Lock() + defer m.stateMu.Unlock() + if m.state == nil { + m.state = make(map[certKey]*certState) + } + // existing state + if state, ok := m.state[ck]; ok { + return state, nil + } + + // new locked state + var ( + err error + key crypto.Signer + ) + if ck.isRSA { + key, err = rsa.GenerateKey(rand.Reader, 2048) + } else { + key, err = ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + } + if err != nil { + return nil, err + } + + state := &certState{ + key: key, + locked: true, + } + state.Lock() // will be unlocked by m.certState caller + m.state[ck] = state + return state, nil +} + +// authorizedCert starts the domain ownership verification process and requests a new cert upon success. +// The key argument is the certificate private key. +func (m *Manager) authorizedCert(ctx context.Context, key crypto.Signer, ck certKey) (der [][]byte, leaf *x509.Certificate, err error) { + csr, err := certRequest(key, ck.domain, m.ExtraExtensions) + if err != nil { + return nil, nil, err + } + + client, err := m.acmeClient(ctx) + if err != nil { + return nil, nil, err + } + dir, err := client.Discover(ctx) + if err != nil { + return nil, nil, err + } + if dir.OrderURL == "" { + return nil, nil, errPreRFC + } + + o, err := m.verifyRFC(ctx, client, ck.domain) + if err != nil { + return nil, nil, err + } + chain, _, err := client.CreateOrderCert(ctx, o.FinalizeURL, csr, true) + if err != nil { + return nil, nil, err + } + + leaf, err = validCert(ck, chain, key, m.now()) + if err != nil { + return nil, nil, err + } + return chain, leaf, nil +} + +// verifyRFC runs the identifier (domain) order-based authorization flow for RFC compliant CAs +// using each applicable ACME challenge type. +func (m *Manager) verifyRFC(ctx context.Context, client *acme.Client, domain string) (*acme.Order, error) { + // Try each supported challenge type starting with a new order each time. + // The nextTyp index of the next challenge type to try is shared across + // all order authorizations: if we've tried a challenge type once and it didn't work, + // it will most likely not work on another order's authorization either. + challengeTypes := m.supportedChallengeTypes() + nextTyp := 0 // challengeTypes index +AuthorizeOrderLoop: + for { + o, err := client.AuthorizeOrder(ctx, acme.DomainIDs(domain)) + if err != nil { + return nil, err + } + // Remove all hanging authorizations to reduce rate limit quotas + // after we're done. + defer func(urls []string) { + go m.deactivatePendingAuthz(urls) + }(o.AuthzURLs) + + // Check if there's actually anything we need to do. + switch o.Status { + case acme.StatusReady: + // Already authorized. + return o, nil + case acme.StatusPending: + // Continue normal Order-based flow. + default: + return nil, fmt.Errorf("acme/autocert: invalid new order status %q; order URL: %q", o.Status, o.URI) + } + + // Satisfy all pending authorizations. + for _, zurl := range o.AuthzURLs { + z, err := client.GetAuthorization(ctx, zurl) + if err != nil { + return nil, err + } + if z.Status != acme.StatusPending { + // We are interested only in pending authorizations. + continue + } + // Pick the next preferred challenge. + var chal *acme.Challenge + for chal == nil && nextTyp < len(challengeTypes) { + chal = pickChallenge(challengeTypes[nextTyp], z.Challenges) + nextTyp++ + } + if chal == nil { + return nil, fmt.Errorf("acme/autocert: unable to satisfy %q for domain %q: no viable challenge type found", z.URI, domain) + } + // Respond to the challenge and wait for validation result. + cleanup, err := m.fulfill(ctx, client, chal, domain) + if err != nil { + continue AuthorizeOrderLoop + } + defer cleanup() + if _, err := client.Accept(ctx, chal); err != nil { + continue AuthorizeOrderLoop + } + if _, err := client.WaitAuthorization(ctx, z.URI); err != nil { + continue AuthorizeOrderLoop + } + } + + // All authorizations are satisfied. + // Wait for the CA to update the order status. + o, err = client.WaitOrder(ctx, o.URI) + if err != nil { + continue AuthorizeOrderLoop + } + return o, nil + } +} + +func pickChallenge(typ string, chal []*acme.Challenge) *acme.Challenge { + for _, c := range chal { + if c.Type == typ { + return c + } + } + return nil +} + +func (m *Manager) supportedChallengeTypes() []string { + m.challengeMu.RLock() + defer m.challengeMu.RUnlock() + typ := []string{"tls-alpn-01"} + if m.tryHTTP01 { + typ = append(typ, "http-01") + } + return typ +} + +// deactivatePendingAuthz relinquishes all authorizations identified by the elements +// of the provided uri slice which are in "pending" state. +// It ignores revocation errors. +// +// deactivatePendingAuthz takes no context argument and instead runs with its own +// "detached" context because deactivations are done in a goroutine separate from +// that of the main issuance or renewal flow. +func (m *Manager) deactivatePendingAuthz(uri []string) { + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute) + defer cancel() + client, err := m.acmeClient(ctx) + if err != nil { + return + } + for _, u := range uri { + z, err := client.GetAuthorization(ctx, u) + if err == nil && z.Status == acme.StatusPending { + client.RevokeAuthorization(ctx, u) + } + } +} + +// fulfill provisions a response to the challenge chal. +// The cleanup is non-nil only if provisioning succeeded. +func (m *Manager) fulfill(ctx context.Context, client *acme.Client, chal *acme.Challenge, domain string) (cleanup func(), err error) { + switch chal.Type { + case "tls-alpn-01": + cert, err := client.TLSALPN01ChallengeCert(chal.Token, domain) + if err != nil { + return nil, err + } + m.putCertToken(ctx, domain, &cert) + return func() { go m.deleteCertToken(domain) }, nil + case "http-01": + resp, err := client.HTTP01ChallengeResponse(chal.Token) + if err != nil { + return nil, err + } + p := client.HTTP01ChallengePath(chal.Token) + m.putHTTPToken(ctx, p, resp) + return func() { go m.deleteHTTPToken(p) }, nil + } + return nil, fmt.Errorf("acme/autocert: unknown challenge type %q", chal.Type) +} + +// putCertToken stores the token certificate with the specified name +// in both m.certTokens map and m.Cache. +func (m *Manager) putCertToken(ctx context.Context, name string, cert *tls.Certificate) { + m.challengeMu.Lock() + defer m.challengeMu.Unlock() + if m.certTokens == nil { + m.certTokens = make(map[string]*tls.Certificate) + } + m.certTokens[name] = cert + m.cachePut(ctx, certKey{domain: name, isToken: true}, cert) +} + +// deleteCertToken removes the token certificate with the specified name +// from both m.certTokens map and m.Cache. +func (m *Manager) deleteCertToken(name string) { + m.challengeMu.Lock() + defer m.challengeMu.Unlock() + delete(m.certTokens, name) + if m.Cache != nil { + ck := certKey{domain: name, isToken: true} + m.Cache.Delete(context.Background(), ck.String()) + } +} + +// httpToken retrieves an existing http-01 token value from an in-memory map +// or the optional cache. +func (m *Manager) httpToken(ctx context.Context, tokenPath string) ([]byte, error) { + m.challengeMu.RLock() + defer m.challengeMu.RUnlock() + if v, ok := m.httpTokens[tokenPath]; ok { + return v, nil + } + if m.Cache == nil { + return nil, fmt.Errorf("acme/autocert: no token at %q", tokenPath) + } + return m.Cache.Get(ctx, httpTokenCacheKey(tokenPath)) +} + +// putHTTPToken stores an http-01 token value using tokenPath as key +// in both in-memory map and the optional Cache. +// +// It ignores any error returned from Cache.Put. +func (m *Manager) putHTTPToken(ctx context.Context, tokenPath, val string) { + m.challengeMu.Lock() + defer m.challengeMu.Unlock() + if m.httpTokens == nil { + m.httpTokens = make(map[string][]byte) + } + b := []byte(val) + m.httpTokens[tokenPath] = b + if m.Cache != nil { + m.Cache.Put(ctx, httpTokenCacheKey(tokenPath), b) + } +} + +// deleteHTTPToken removes an http-01 token value from both in-memory map +// and the optional Cache, ignoring any error returned from the latter. +// +// If m.Cache is non-nil, it blocks until Cache.Delete returns without a timeout. +func (m *Manager) deleteHTTPToken(tokenPath string) { + m.challengeMu.Lock() + defer m.challengeMu.Unlock() + delete(m.httpTokens, tokenPath) + if m.Cache != nil { + m.Cache.Delete(context.Background(), httpTokenCacheKey(tokenPath)) + } +} + +// httpTokenCacheKey returns a key at which an http-01 token value may be stored +// in the Manager's optional Cache. +func httpTokenCacheKey(tokenPath string) string { + return path.Base(tokenPath) + "+http-01" +} + +// startRenew starts a cert renewal timer loop, one per domain. +// +// The loop is scheduled in two cases: +// - a cert was fetched from cache for the first time (wasn't in m.state) +// - a new cert was created by m.createCert +// +// The key argument is a certificate private key. +// The exp argument is the cert expiration time (NotAfter). +func (m *Manager) startRenew(ck certKey, key crypto.Signer, exp time.Time) { + m.renewalMu.Lock() + defer m.renewalMu.Unlock() + if m.renewal[ck] != nil { + // another goroutine is already on it + return + } + if m.renewal == nil { + m.renewal = make(map[certKey]*domainRenewal) + } + dr := &domainRenewal{m: m, ck: ck, key: key} + m.renewal[ck] = dr + dr.start(exp) +} + +// stopRenew stops all currently running cert renewal timers. +// The timers are not restarted during the lifetime of the Manager. +func (m *Manager) stopRenew() { + m.renewalMu.Lock() + defer m.renewalMu.Unlock() + for name, dr := range m.renewal { + delete(m.renewal, name) + dr.stop() + } +} + +func (m *Manager) accountKey(ctx context.Context) (crypto.Signer, error) { + const keyName = "acme_account+key" + + // Previous versions of autocert stored the value under a different key. + const legacyKeyName = "acme_account.key" + + genKey := func() (*ecdsa.PrivateKey, error) { + return ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + } + + if m.Cache == nil { + return genKey() + } + + data, err := m.Cache.Get(ctx, keyName) + if err == ErrCacheMiss { + data, err = m.Cache.Get(ctx, legacyKeyName) + } + if err == ErrCacheMiss { + key, err := genKey() + if err != nil { + return nil, err + } + var buf bytes.Buffer + if err := encodeECDSAKey(&buf, key); err != nil { + return nil, err + } + if err := m.Cache.Put(ctx, keyName, buf.Bytes()); err != nil { + return nil, err + } + return key, nil + } + if err != nil { + return nil, err + } + + priv, _ := pem.Decode(data) + if priv == nil || !strings.Contains(priv.Type, "PRIVATE") { + return nil, errors.New("acme/autocert: invalid account key found in cache") + } + return parsePrivateKey(priv.Bytes) +} + +func (m *Manager) acmeClient(ctx context.Context) (*acme.Client, error) { + m.clientMu.Lock() + defer m.clientMu.Unlock() + if m.client != nil { + return m.client, nil + } + + client := m.Client + if client == nil { + client = &acme.Client{DirectoryURL: DefaultACMEDirectory} + } + if client.Key == nil { + var err error + client.Key, err = m.accountKey(ctx) + if err != nil { + return nil, err + } + } + if client.UserAgent == "" { + client.UserAgent = "autocert" + } + var contact []string + if m.Email != "" { + contact = []string{"mailto:" + m.Email} + } + a := &acme.Account{Contact: contact, ExternalAccountBinding: m.ExternalAccountBinding} + _, err := client.Register(ctx, a, m.Prompt) + if err == nil || isAccountAlreadyExist(err) { + m.client = client + err = nil + } + return m.client, err +} + +// isAccountAlreadyExist reports whether the err, as returned from acme.Client.Register, +// indicates the account has already been registered. +func isAccountAlreadyExist(err error) bool { + if err == acme.ErrAccountAlreadyExists { + return true + } + ae, ok := err.(*acme.Error) + return ok && ae.StatusCode == http.StatusConflict +} + +func (m *Manager) hostPolicy() HostPolicy { + if m.HostPolicy != nil { + return m.HostPolicy + } + return defaultHostPolicy +} + +func (m *Manager) renewBefore() time.Duration { + if m.RenewBefore > renewJitter { + return m.RenewBefore + } + return 720 * time.Hour // 30 days +} + +func (m *Manager) now() time.Time { + if m.nowFunc != nil { + return m.nowFunc() + } + return time.Now() +} + +// certState is ready when its mutex is unlocked for reading. +type certState struct { + sync.RWMutex + locked bool // locked for read/write + key crypto.Signer // private key for cert + cert [][]byte // DER encoding + leaf *x509.Certificate // parsed cert[0]; always non-nil if cert != nil +} + +// tlscert creates a tls.Certificate from s.key and s.cert. +// Callers should wrap it in s.RLock() and s.RUnlock(). +func (s *certState) tlscert() (*tls.Certificate, error) { + if s.key == nil { + return nil, errors.New("acme/autocert: missing signer") + } + if len(s.cert) == 0 { + return nil, errors.New("acme/autocert: missing certificate") + } + return &tls.Certificate{ + PrivateKey: s.key, + Certificate: s.cert, + Leaf: s.leaf, + }, nil +} + +// certRequest generates a CSR for the given common name. +func certRequest(key crypto.Signer, name string, ext []pkix.Extension) ([]byte, error) { + req := &x509.CertificateRequest{ + Subject: pkix.Name{CommonName: name}, + DNSNames: []string{name}, + ExtraExtensions: ext, + } + return x509.CreateCertificateRequest(rand.Reader, req, key) +} + +// Attempt to parse the given private key DER block. OpenSSL 0.9.8 generates +// PKCS#1 private keys by default, while OpenSSL 1.0.0 generates PKCS#8 keys. +// OpenSSL ecparam generates SEC1 EC private keys for ECDSA. We try all three. +// +// Inspired by parsePrivateKey in crypto/tls/tls.go. +func parsePrivateKey(der []byte) (crypto.Signer, error) { + if key, err := x509.ParsePKCS1PrivateKey(der); err == nil { + return key, nil + } + if key, err := x509.ParsePKCS8PrivateKey(der); err == nil { + switch key := key.(type) { + case *rsa.PrivateKey: + return key, nil + case *ecdsa.PrivateKey: + return key, nil + default: + return nil, errors.New("acme/autocert: unknown private key type in PKCS#8 wrapping") + } + } + if key, err := x509.ParseECPrivateKey(der); err == nil { + return key, nil + } + + return nil, errors.New("acme/autocert: failed to parse private key") +} + +// validCert parses a cert chain provided as der argument and verifies the leaf and der[0] +// correspond to the private key, the domain and key type match, and expiration dates +// are valid. It doesn't do any revocation checking. +// +// The returned value is the verified leaf cert. +func validCert(ck certKey, der [][]byte, key crypto.Signer, now time.Time) (leaf *x509.Certificate, err error) { + // parse public part(s) + var n int + for _, b := range der { + n += len(b) + } + pub := make([]byte, n) + n = 0 + for _, b := range der { + n += copy(pub[n:], b) + } + x509Cert, err := x509.ParseCertificates(pub) + if err != nil || len(x509Cert) == 0 { + return nil, errors.New("acme/autocert: no public key found") + } + // verify the leaf is not expired and matches the domain name + leaf = x509Cert[0] + if now.Before(leaf.NotBefore) { + return nil, errors.New("acme/autocert: certificate is not valid yet") + } + if now.After(leaf.NotAfter) { + return nil, errors.New("acme/autocert: expired certificate") + } + if err := leaf.VerifyHostname(ck.domain); err != nil { + return nil, err + } + // renew certificates revoked by Let's Encrypt in January 2022 + if isRevokedLetsEncrypt(leaf) { + return nil, errors.New("acme/autocert: certificate was probably revoked by Let's Encrypt") + } + // ensure the leaf corresponds to the private key and matches the certKey type + switch pub := leaf.PublicKey.(type) { + case *rsa.PublicKey: + prv, ok := key.(*rsa.PrivateKey) + if !ok { + return nil, errors.New("acme/autocert: private key type does not match public key type") + } + if pub.N.Cmp(prv.N) != 0 { + return nil, errors.New("acme/autocert: private key does not match public key") + } + if !ck.isRSA && !ck.isToken { + return nil, errors.New("acme/autocert: key type does not match expected value") + } + case *ecdsa.PublicKey: + prv, ok := key.(*ecdsa.PrivateKey) + if !ok { + return nil, errors.New("acme/autocert: private key type does not match public key type") + } + if pub.X.Cmp(prv.X) != 0 || pub.Y.Cmp(prv.Y) != 0 { + return nil, errors.New("acme/autocert: private key does not match public key") + } + if ck.isRSA && !ck.isToken { + return nil, errors.New("acme/autocert: key type does not match expected value") + } + default: + return nil, errors.New("acme/autocert: unknown public key algorithm") + } + return leaf, nil +} + +// https://community.letsencrypt.org/t/2022-01-25-issue-with-tls-alpn-01-validation-method/170450 +var letsEncryptFixDeployTime = time.Date(2022, time.January, 26, 00, 48, 0, 0, time.UTC) + +// isRevokedLetsEncrypt returns whether the certificate is likely to be part of +// a batch of certificates revoked by Let's Encrypt in January 2022. This check +// can be safely removed from May 2022. +func isRevokedLetsEncrypt(cert *x509.Certificate) bool { + O := cert.Issuer.Organization + return len(O) == 1 && O[0] == "Let's Encrypt" && + cert.NotBefore.Before(letsEncryptFixDeployTime) +} + +type lockedMathRand struct { + sync.Mutex + rnd *mathrand.Rand +} + +func (r *lockedMathRand) int63n(max int64) int64 { + r.Lock() + n := r.rnd.Int63n(max) + r.Unlock() + return n +} + +// For easier testing. +var ( + // Called when a state is removed. + testDidRemoveState = func(certKey) {} +) diff --git a/vendor/golang.org/x/crypto/acme/autocert/cache.go b/vendor/golang.org/x/crypto/acme/autocert/cache.go new file mode 100644 index 0000000..758ab12 --- /dev/null +++ b/vendor/golang.org/x/crypto/acme/autocert/cache.go @@ -0,0 +1,135 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package autocert + +import ( + "context" + "errors" + "os" + "path/filepath" +) + +// ErrCacheMiss is returned when a certificate is not found in cache. +var ErrCacheMiss = errors.New("acme/autocert: certificate cache miss") + +// Cache is used by Manager to store and retrieve previously obtained certificates +// and other account data as opaque blobs. +// +// Cache implementations should not rely on the key naming pattern. Keys can +// include any printable ASCII characters, except the following: \/:*?"<>| +type Cache interface { + // Get returns a certificate data for the specified key. + // If there's no such key, Get returns ErrCacheMiss. + Get(ctx context.Context, key string) ([]byte, error) + + // Put stores the data in the cache under the specified key. + // Underlying implementations may use any data storage format, + // as long as the reverse operation, Get, results in the original data. + Put(ctx context.Context, key string, data []byte) error + + // Delete removes a certificate data from the cache under the specified key. + // If there's no such key in the cache, Delete returns nil. + Delete(ctx context.Context, key string) error +} + +// DirCache implements Cache using a directory on the local filesystem. +// If the directory does not exist, it will be created with 0700 permissions. +type DirCache string + +// Get reads a certificate data from the specified file name. +func (d DirCache) Get(ctx context.Context, name string) ([]byte, error) { + name = filepath.Join(string(d), filepath.Clean("/"+name)) + var ( + data []byte + err error + done = make(chan struct{}) + ) + go func() { + data, err = os.ReadFile(name) + close(done) + }() + select { + case <-ctx.Done(): + return nil, ctx.Err() + case <-done: + } + if os.IsNotExist(err) { + return nil, ErrCacheMiss + } + return data, err +} + +// Put writes the certificate data to the specified file name. +// The file will be created with 0600 permissions. +func (d DirCache) Put(ctx context.Context, name string, data []byte) error { + if err := os.MkdirAll(string(d), 0700); err != nil { + return err + } + + done := make(chan struct{}) + var err error + go func() { + defer close(done) + var tmp string + if tmp, err = d.writeTempFile(name, data); err != nil { + return + } + defer os.Remove(tmp) + select { + case <-ctx.Done(): + // Don't overwrite the file if the context was canceled. + default: + newName := filepath.Join(string(d), filepath.Clean("/"+name)) + err = os.Rename(tmp, newName) + } + }() + select { + case <-ctx.Done(): + return ctx.Err() + case <-done: + } + return err +} + +// Delete removes the specified file name. +func (d DirCache) Delete(ctx context.Context, name string) error { + name = filepath.Join(string(d), filepath.Clean("/"+name)) + var ( + err error + done = make(chan struct{}) + ) + go func() { + err = os.Remove(name) + close(done) + }() + select { + case <-ctx.Done(): + return ctx.Err() + case <-done: + } + if err != nil && !os.IsNotExist(err) { + return err + } + return nil +} + +// writeTempFile writes b to a temporary file, closes the file and returns its path. +func (d DirCache) writeTempFile(prefix string, b []byte) (name string, reterr error) { + // TempFile uses 0600 permissions + f, err := os.CreateTemp(string(d), prefix) + if err != nil { + return "", err + } + defer func() { + if reterr != nil { + os.Remove(f.Name()) + } + }() + if _, err := f.Write(b); err != nil { + f.Close() + return "", err + } + return f.Name(), f.Close() +} diff --git a/vendor/golang.org/x/crypto/acme/autocert/listener.go b/vendor/golang.org/x/crypto/acme/autocert/listener.go new file mode 100644 index 0000000..9d62f8c --- /dev/null +++ b/vendor/golang.org/x/crypto/acme/autocert/listener.go @@ -0,0 +1,155 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package autocert + +import ( + "crypto/tls" + "log" + "net" + "os" + "path/filepath" + "runtime" + "time" +) + +// NewListener returns a net.Listener that listens on the standard TLS +// port (443) on all interfaces and returns *tls.Conn connections with +// LetsEncrypt certificates for the provided domain or domains. +// +// It enables one-line HTTPS servers: +// +// log.Fatal(http.Serve(autocert.NewListener("example.com"), handler)) +// +// NewListener is a convenience function for a common configuration. +// More complex or custom configurations can use the autocert.Manager +// type instead. +// +// Use of this function implies acceptance of the LetsEncrypt Terms of +// Service. If domains is not empty, the provided domains are passed +// to HostWhitelist. If domains is empty, the listener will do +// LetsEncrypt challenges for any requested domain, which is not +// recommended. +// +// Certificates are cached in a "golang-autocert" directory under an +// operating system-specific cache or temp directory. This may not +// be suitable for servers spanning multiple machines. +// +// The returned listener uses a *tls.Config that enables HTTP/2, and +// should only be used with servers that support HTTP/2. +// +// The returned Listener also enables TCP keep-alives on the accepted +// connections. The returned *tls.Conn are returned before their TLS +// handshake has completed. +func NewListener(domains ...string) net.Listener { + m := &Manager{ + Prompt: AcceptTOS, + } + if len(domains) > 0 { + m.HostPolicy = HostWhitelist(domains...) + } + dir := cacheDir() + if err := os.MkdirAll(dir, 0700); err != nil { + log.Printf("warning: autocert.NewListener not using a cache: %v", err) + } else { + m.Cache = DirCache(dir) + } + return m.Listener() +} + +// Listener listens on the standard TLS port (443) on all interfaces +// and returns a net.Listener returning *tls.Conn connections. +// +// The returned listener uses a *tls.Config that enables HTTP/2, and +// should only be used with servers that support HTTP/2. +// +// The returned Listener also enables TCP keep-alives on the accepted +// connections. The returned *tls.Conn are returned before their TLS +// handshake has completed. +// +// Unlike NewListener, it is the caller's responsibility to initialize +// the Manager m's Prompt, Cache, HostPolicy, and other desired options. +func (m *Manager) Listener() net.Listener { + ln := &listener{ + conf: m.TLSConfig(), + } + ln.tcpListener, ln.tcpListenErr = net.Listen("tcp", ":443") + return ln +} + +type listener struct { + conf *tls.Config + + tcpListener net.Listener + tcpListenErr error +} + +func (ln *listener) Accept() (net.Conn, error) { + if ln.tcpListenErr != nil { + return nil, ln.tcpListenErr + } + conn, err := ln.tcpListener.Accept() + if err != nil { + return nil, err + } + tcpConn := conn.(*net.TCPConn) + + // Because Listener is a convenience function, help out with + // this too. This is not possible for the caller to set once + // we return a *tcp.Conn wrapping an inaccessible net.Conn. + // If callers don't want this, they can do things the manual + // way and tweak as needed. But this is what net/http does + // itself, so copy that. If net/http changes, we can change + // here too. + tcpConn.SetKeepAlive(true) + tcpConn.SetKeepAlivePeriod(3 * time.Minute) + + return tls.Server(tcpConn, ln.conf), nil +} + +func (ln *listener) Addr() net.Addr { + if ln.tcpListener != nil { + return ln.tcpListener.Addr() + } + // net.Listen failed. Return something non-nil in case callers + // call Addr before Accept: + return &net.TCPAddr{IP: net.IP{0, 0, 0, 0}, Port: 443} +} + +func (ln *listener) Close() error { + if ln.tcpListenErr != nil { + return ln.tcpListenErr + } + return ln.tcpListener.Close() +} + +func homeDir() string { + if runtime.GOOS == "windows" { + return os.Getenv("HOMEDRIVE") + os.Getenv("HOMEPATH") + } + if h := os.Getenv("HOME"); h != "" { + return h + } + return "/" +} + +func cacheDir() string { + const base = "golang-autocert" + switch runtime.GOOS { + case "darwin": + return filepath.Join(homeDir(), "Library", "Caches", base) + case "windows": + for _, ev := range []string{"APPDATA", "CSIDL_APPDATA", "TEMP", "TMP"} { + if v := os.Getenv(ev); v != "" { + return filepath.Join(v, base) + } + } + // Worst case: + return filepath.Join(homeDir(), base) + } + if xdg := os.Getenv("XDG_CACHE_HOME"); xdg != "" { + return filepath.Join(xdg, base) + } + return filepath.Join(homeDir(), ".cache", base) +} diff --git a/vendor/golang.org/x/crypto/acme/autocert/renewal.go b/vendor/golang.org/x/crypto/acme/autocert/renewal.go new file mode 100644 index 0000000..0df7da7 --- /dev/null +++ b/vendor/golang.org/x/crypto/acme/autocert/renewal.go @@ -0,0 +1,156 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package autocert + +import ( + "context" + "crypto" + "sync" + "time" +) + +// renewJitter is the maximum deviation from Manager.RenewBefore. +const renewJitter = time.Hour + +// domainRenewal tracks the state used by the periodic timers +// renewing a single domain's cert. +type domainRenewal struct { + m *Manager + ck certKey + key crypto.Signer + + timerMu sync.Mutex + timer *time.Timer + timerClose chan struct{} // if non-nil, renew closes this channel (and nils out the timer fields) instead of running +} + +// start starts a cert renewal timer at the time +// defined by the certificate expiration time exp. +// +// If the timer is already started, calling start is a noop. +func (dr *domainRenewal) start(exp time.Time) { + dr.timerMu.Lock() + defer dr.timerMu.Unlock() + if dr.timer != nil { + return + } + dr.timer = time.AfterFunc(dr.next(exp), dr.renew) +} + +// stop stops the cert renewal timer and waits for any in-flight calls to renew +// to complete. If the timer is already stopped, calling stop is a noop. +func (dr *domainRenewal) stop() { + dr.timerMu.Lock() + defer dr.timerMu.Unlock() + for { + if dr.timer == nil { + return + } + if dr.timer.Stop() { + dr.timer = nil + return + } else { + // dr.timer fired, and we acquired dr.timerMu before the renew callback did. + // (We know this because otherwise the renew callback would have reset dr.timer!) + timerClose := make(chan struct{}) + dr.timerClose = timerClose + dr.timerMu.Unlock() + <-timerClose + dr.timerMu.Lock() + } + } +} + +// renew is called periodically by a timer. +// The first renew call is kicked off by dr.start. +func (dr *domainRenewal) renew() { + dr.timerMu.Lock() + defer dr.timerMu.Unlock() + if dr.timerClose != nil { + close(dr.timerClose) + dr.timer, dr.timerClose = nil, nil + return + } + + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Minute) + defer cancel() + // TODO: rotate dr.key at some point? + next, err := dr.do(ctx) + if err != nil { + next = renewJitter / 2 + next += time.Duration(pseudoRand.int63n(int64(next))) + } + testDidRenewLoop(next, err) + dr.timer = time.AfterFunc(next, dr.renew) +} + +// updateState locks and replaces the relevant Manager.state item with the given +// state. It additionally updates dr.key with the given state's key. +func (dr *domainRenewal) updateState(state *certState) { + dr.m.stateMu.Lock() + defer dr.m.stateMu.Unlock() + dr.key = state.key + dr.m.state[dr.ck] = state +} + +// do is similar to Manager.createCert but it doesn't lock a Manager.state item. +// Instead, it requests a new certificate independently and, upon success, +// replaces dr.m.state item with a new one and updates cache for the given domain. +// +// It may lock and update the Manager.state if the expiration date of the currently +// cached cert is far enough in the future. +// +// The returned value is a time interval after which the renewal should occur again. +func (dr *domainRenewal) do(ctx context.Context) (time.Duration, error) { + // a race is likely unavoidable in a distributed environment + // but we try nonetheless + if tlscert, err := dr.m.cacheGet(ctx, dr.ck); err == nil { + next := dr.next(tlscert.Leaf.NotAfter) + if next > dr.m.renewBefore()+renewJitter { + signer, ok := tlscert.PrivateKey.(crypto.Signer) + if ok { + state := &certState{ + key: signer, + cert: tlscert.Certificate, + leaf: tlscert.Leaf, + } + dr.updateState(state) + return next, nil + } + } + } + + der, leaf, err := dr.m.authorizedCert(ctx, dr.key, dr.ck) + if err != nil { + return 0, err + } + state := &certState{ + key: dr.key, + cert: der, + leaf: leaf, + } + tlscert, err := state.tlscert() + if err != nil { + return 0, err + } + if err := dr.m.cachePut(ctx, dr.ck, tlscert); err != nil { + return 0, err + } + dr.updateState(state) + return dr.next(leaf.NotAfter), nil +} + +func (dr *domainRenewal) next(expiry time.Time) time.Duration { + d := expiry.Sub(dr.m.now()) - dr.m.renewBefore() + // add a bit of randomness to renew deadline + n := pseudoRand.int63n(int64(renewJitter)) + d -= time.Duration(n) + if d < 0 { + return 0 + } + return d +} + +var testDidRenewLoop = func(next time.Duration, err error) {} diff --git a/vendor/golang.org/x/crypto/acme/http.go b/vendor/golang.org/x/crypto/acme/http.go new file mode 100644 index 0000000..58836e5 --- /dev/null +++ b/vendor/golang.org/x/crypto/acme/http.go @@ -0,0 +1,325 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package acme + +import ( + "bytes" + "context" + "crypto" + "crypto/rand" + "encoding/json" + "errors" + "fmt" + "io" + "math/big" + "net/http" + "strconv" + "strings" + "time" +) + +// retryTimer encapsulates common logic for retrying unsuccessful requests. +// It is not safe for concurrent use. +type retryTimer struct { + // backoffFn provides backoff delay sequence for retries. + // See Client.RetryBackoff doc comment. + backoffFn func(n int, r *http.Request, res *http.Response) time.Duration + // n is the current retry attempt. + n int +} + +func (t *retryTimer) inc() { + t.n++ +} + +// backoff pauses the current goroutine as described in Client.RetryBackoff. +func (t *retryTimer) backoff(ctx context.Context, r *http.Request, res *http.Response) error { + d := t.backoffFn(t.n, r, res) + if d <= 0 { + return fmt.Errorf("acme: no more retries for %s; tried %d time(s)", r.URL, t.n) + } + wakeup := time.NewTimer(d) + defer wakeup.Stop() + select { + case <-ctx.Done(): + return ctx.Err() + case <-wakeup.C: + return nil + } +} + +func (c *Client) retryTimer() *retryTimer { + f := c.RetryBackoff + if f == nil { + f = defaultBackoff + } + return &retryTimer{backoffFn: f} +} + +// defaultBackoff provides default Client.RetryBackoff implementation +// using a truncated exponential backoff algorithm, +// as described in Client.RetryBackoff. +// +// The n argument is always bounded between 1 and 30. +// The returned value is always greater than 0. +func defaultBackoff(n int, r *http.Request, res *http.Response) time.Duration { + const max = 10 * time.Second + var jitter time.Duration + if x, err := rand.Int(rand.Reader, big.NewInt(1000)); err == nil { + // Set the minimum to 1ms to avoid a case where + // an invalid Retry-After value is parsed into 0 below, + // resulting in the 0 returned value which would unintentionally + // stop the retries. + jitter = (1 + time.Duration(x.Int64())) * time.Millisecond + } + if v, ok := res.Header["Retry-After"]; ok { + return retryAfter(v[0]) + jitter + } + + if n < 1 { + n = 1 + } + if n > 30 { + n = 30 + } + d := time.Duration(1< max { + return max + } + return d +} + +// retryAfter parses a Retry-After HTTP header value, +// trying to convert v into an int (seconds) or use http.ParseTime otherwise. +// It returns zero value if v cannot be parsed. +func retryAfter(v string) time.Duration { + if i, err := strconv.Atoi(v); err == nil { + return time.Duration(i) * time.Second + } + t, err := http.ParseTime(v) + if err != nil { + return 0 + } + return t.Sub(timeNow()) +} + +// resOkay is a function that reports whether the provided response is okay. +// It is expected to keep the response body unread. +type resOkay func(*http.Response) bool + +// wantStatus returns a function which reports whether the code +// matches the status code of a response. +func wantStatus(codes ...int) resOkay { + return func(res *http.Response) bool { + for _, code := range codes { + if code == res.StatusCode { + return true + } + } + return false + } +} + +// get issues an unsigned GET request to the specified URL. +// It returns a non-error value only when ok reports true. +// +// get retries unsuccessful attempts according to c.RetryBackoff +// until the context is done or a non-retriable error is received. +func (c *Client) get(ctx context.Context, url string, ok resOkay) (*http.Response, error) { + retry := c.retryTimer() + for { + req, err := http.NewRequest("GET", url, nil) + if err != nil { + return nil, err + } + res, err := c.doNoRetry(ctx, req) + switch { + case err != nil: + return nil, err + case ok(res): + return res, nil + case isRetriable(res.StatusCode): + retry.inc() + resErr := responseError(res) + res.Body.Close() + // Ignore the error value from retry.backoff + // and return the one from last retry, as received from the CA. + if retry.backoff(ctx, req, res) != nil { + return nil, resErr + } + default: + defer res.Body.Close() + return nil, responseError(res) + } + } +} + +// postAsGet is POST-as-GET, a replacement for GET in RFC 8555 +// as described in https://tools.ietf.org/html/rfc8555#section-6.3. +// It makes a POST request in KID form with zero JWS payload. +// See nopayload doc comments in jws.go. +func (c *Client) postAsGet(ctx context.Context, url string, ok resOkay) (*http.Response, error) { + return c.post(ctx, nil, url, noPayload, ok) +} + +// post issues a signed POST request in JWS format using the provided key +// to the specified URL. If key is nil, c.Key is used instead. +// It returns a non-error value only when ok reports true. +// +// post retries unsuccessful attempts according to c.RetryBackoff +// until the context is done or a non-retriable error is received. +// It uses postNoRetry to make individual requests. +func (c *Client) post(ctx context.Context, key crypto.Signer, url string, body interface{}, ok resOkay) (*http.Response, error) { + retry := c.retryTimer() + for { + res, req, err := c.postNoRetry(ctx, key, url, body) + if err != nil { + return nil, err + } + if ok(res) { + return res, nil + } + resErr := responseError(res) + res.Body.Close() + switch { + // Check for bad nonce before isRetriable because it may have been returned + // with an unretriable response code such as 400 Bad Request. + case isBadNonce(resErr): + // Consider any previously stored nonce values to be invalid. + c.clearNonces() + case !isRetriable(res.StatusCode): + return nil, resErr + } + retry.inc() + // Ignore the error value from retry.backoff + // and return the one from last retry, as received from the CA. + if err := retry.backoff(ctx, req, res); err != nil { + return nil, resErr + } + } +} + +// postNoRetry signs the body with the given key and POSTs it to the provided url. +// It is used by c.post to retry unsuccessful attempts. +// The body argument must be JSON-serializable. +// +// If key argument is nil, c.Key is used to sign the request. +// If key argument is nil and c.accountKID returns a non-zero keyID, +// the request is sent in KID form. Otherwise, JWK form is used. +// +// In practice, when interfacing with RFC-compliant CAs most requests are sent in KID form +// and JWK is used only when KID is unavailable: new account endpoint and certificate +// revocation requests authenticated by a cert key. +// See jwsEncodeJSON for other details. +func (c *Client) postNoRetry(ctx context.Context, key crypto.Signer, url string, body interface{}) (*http.Response, *http.Request, error) { + kid := noKeyID + if key == nil { + if c.Key == nil { + return nil, nil, errors.New("acme: Client.Key must be populated to make POST requests") + } + key = c.Key + kid = c.accountKID(ctx) + } + nonce, err := c.popNonce(ctx, url) + if err != nil { + return nil, nil, err + } + b, err := jwsEncodeJSON(body, key, kid, nonce, url) + if err != nil { + return nil, nil, err + } + req, err := http.NewRequest("POST", url, bytes.NewReader(b)) + if err != nil { + return nil, nil, err + } + req.Header.Set("Content-Type", "application/jose+json") + res, err := c.doNoRetry(ctx, req) + if err != nil { + return nil, nil, err + } + c.addNonce(res.Header) + return res, req, nil +} + +// doNoRetry issues a request req, replacing its context (if any) with ctx. +func (c *Client) doNoRetry(ctx context.Context, req *http.Request) (*http.Response, error) { + req.Header.Set("User-Agent", c.userAgent()) + res, err := c.httpClient().Do(req.WithContext(ctx)) + if err != nil { + select { + case <-ctx.Done(): + // Prefer the unadorned context error. + // (The acme package had tests assuming this, previously from ctxhttp's + // behavior, predating net/http supporting contexts natively) + // TODO(bradfitz): reconsider this in the future. But for now this + // requires no test updates. + return nil, ctx.Err() + default: + return nil, err + } + } + return res, nil +} + +func (c *Client) httpClient() *http.Client { + if c.HTTPClient != nil { + return c.HTTPClient + } + return http.DefaultClient +} + +// packageVersion is the version of the module that contains this package, for +// sending as part of the User-Agent header. It's set in version_go112.go. +var packageVersion string + +// userAgent returns the User-Agent header value. It includes the package name, +// the module version (if available), and the c.UserAgent value (if set). +func (c *Client) userAgent() string { + ua := "golang.org/x/crypto/acme" + if packageVersion != "" { + ua += "@" + packageVersion + } + if c.UserAgent != "" { + ua = c.UserAgent + " " + ua + } + return ua +} + +// isBadNonce reports whether err is an ACME "badnonce" error. +func isBadNonce(err error) bool { + // According to the spec badNonce is urn:ietf:params:acme:error:badNonce. + // However, ACME servers in the wild return their versions of the error. + // See https://tools.ietf.org/html/draft-ietf-acme-acme-02#section-5.4 + // and https://github.com/letsencrypt/boulder/blob/0e07eacb/docs/acme-divergences.md#section-66. + ae, ok := err.(*Error) + return ok && strings.HasSuffix(strings.ToLower(ae.ProblemType), ":badnonce") +} + +// isRetriable reports whether a request can be retried +// based on the response status code. +// +// Note that a "bad nonce" error is returned with a non-retriable 400 Bad Request code. +// Callers should parse the response and check with isBadNonce. +func isRetriable(code int) bool { + return code <= 399 || code >= 500 || code == http.StatusTooManyRequests +} + +// responseError creates an error of Error type from resp. +func responseError(resp *http.Response) error { + // don't care if ReadAll returns an error: + // json.Unmarshal will fail in that case anyway + b, _ := io.ReadAll(resp.Body) + e := &wireError{Status: resp.StatusCode} + if err := json.Unmarshal(b, e); err != nil { + // this is not a regular error response: + // populate detail with anything we received, + // e.Status will already contain HTTP response code value + e.Detail = string(b) + if e.Detail == "" { + e.Detail = resp.Status + } + } + return e.error(resp.Header) +} diff --git a/vendor/golang.org/x/crypto/acme/jws.go b/vendor/golang.org/x/crypto/acme/jws.go new file mode 100644 index 0000000..b38828d --- /dev/null +++ b/vendor/golang.org/x/crypto/acme/jws.go @@ -0,0 +1,257 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package acme + +import ( + "crypto" + "crypto/ecdsa" + "crypto/hmac" + "crypto/rand" + "crypto/rsa" + "crypto/sha256" + _ "crypto/sha512" // need for EC keys + "encoding/asn1" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "math/big" +) + +// KeyID is the account key identity provided by a CA during registration. +type KeyID string + +// noKeyID indicates that jwsEncodeJSON should compute and use JWK instead of a KID. +// See jwsEncodeJSON for details. +const noKeyID = KeyID("") + +// noPayload indicates jwsEncodeJSON will encode zero-length octet string +// in a JWS request. This is called POST-as-GET in RFC 8555 and is used to make +// authenticated GET requests via POSTing with an empty payload. +// See https://tools.ietf.org/html/rfc8555#section-6.3 for more details. +const noPayload = "" + +// noNonce indicates that the nonce should be omitted from the protected header. +// See jwsEncodeJSON for details. +const noNonce = "" + +// jsonWebSignature can be easily serialized into a JWS following +// https://tools.ietf.org/html/rfc7515#section-3.2. +type jsonWebSignature struct { + Protected string `json:"protected"` + Payload string `json:"payload"` + Sig string `json:"signature"` +} + +// jwsEncodeJSON signs claimset using provided key and a nonce. +// The result is serialized in JSON format containing either kid or jwk +// fields based on the provided KeyID value. +// +// The claimset is marshalled using json.Marshal unless it is a string. +// In which case it is inserted directly into the message. +// +// If kid is non-empty, its quoted value is inserted in the protected header +// as "kid" field value. Otherwise, JWK is computed using jwkEncode and inserted +// as "jwk" field value. The "jwk" and "kid" fields are mutually exclusive. +// +// If nonce is non-empty, its quoted value is inserted in the protected header. +// +// See https://tools.ietf.org/html/rfc7515#section-7. +func jwsEncodeJSON(claimset interface{}, key crypto.Signer, kid KeyID, nonce, url string) ([]byte, error) { + if key == nil { + return nil, errors.New("nil key") + } + alg, sha := jwsHasher(key.Public()) + if alg == "" || !sha.Available() { + return nil, ErrUnsupportedKey + } + headers := struct { + Alg string `json:"alg"` + KID string `json:"kid,omitempty"` + JWK json.RawMessage `json:"jwk,omitempty"` + Nonce string `json:"nonce,omitempty"` + URL string `json:"url"` + }{ + Alg: alg, + Nonce: nonce, + URL: url, + } + switch kid { + case noKeyID: + jwk, err := jwkEncode(key.Public()) + if err != nil { + return nil, err + } + headers.JWK = json.RawMessage(jwk) + default: + headers.KID = string(kid) + } + phJSON, err := json.Marshal(headers) + if err != nil { + return nil, err + } + phead := base64.RawURLEncoding.EncodeToString([]byte(phJSON)) + var payload string + if val, ok := claimset.(string); ok { + payload = val + } else { + cs, err := json.Marshal(claimset) + if err != nil { + return nil, err + } + payload = base64.RawURLEncoding.EncodeToString(cs) + } + hash := sha.New() + hash.Write([]byte(phead + "." + payload)) + sig, err := jwsSign(key, sha, hash.Sum(nil)) + if err != nil { + return nil, err + } + enc := jsonWebSignature{ + Protected: phead, + Payload: payload, + Sig: base64.RawURLEncoding.EncodeToString(sig), + } + return json.Marshal(&enc) +} + +// jwsWithMAC creates and signs a JWS using the given key and the HS256 +// algorithm. kid and url are included in the protected header. rawPayload +// should not be base64-URL-encoded. +func jwsWithMAC(key []byte, kid, url string, rawPayload []byte) (*jsonWebSignature, error) { + if len(key) == 0 { + return nil, errors.New("acme: cannot sign JWS with an empty MAC key") + } + header := struct { + Algorithm string `json:"alg"` + KID string `json:"kid"` + URL string `json:"url,omitempty"` + }{ + // Only HMAC-SHA256 is supported. + Algorithm: "HS256", + KID: kid, + URL: url, + } + rawProtected, err := json.Marshal(header) + if err != nil { + return nil, err + } + protected := base64.RawURLEncoding.EncodeToString(rawProtected) + payload := base64.RawURLEncoding.EncodeToString(rawPayload) + + h := hmac.New(sha256.New, key) + if _, err := h.Write([]byte(protected + "." + payload)); err != nil { + return nil, err + } + mac := h.Sum(nil) + + return &jsonWebSignature{ + Protected: protected, + Payload: payload, + Sig: base64.RawURLEncoding.EncodeToString(mac), + }, nil +} + +// jwkEncode encodes public part of an RSA or ECDSA key into a JWK. +// The result is also suitable for creating a JWK thumbprint. +// https://tools.ietf.org/html/rfc7517 +func jwkEncode(pub crypto.PublicKey) (string, error) { + switch pub := pub.(type) { + case *rsa.PublicKey: + // https://tools.ietf.org/html/rfc7518#section-6.3.1 + n := pub.N + e := big.NewInt(int64(pub.E)) + // Field order is important. + // See https://tools.ietf.org/html/rfc7638#section-3.3 for details. + return fmt.Sprintf(`{"e":"%s","kty":"RSA","n":"%s"}`, + base64.RawURLEncoding.EncodeToString(e.Bytes()), + base64.RawURLEncoding.EncodeToString(n.Bytes()), + ), nil + case *ecdsa.PublicKey: + // https://tools.ietf.org/html/rfc7518#section-6.2.1 + p := pub.Curve.Params() + n := p.BitSize / 8 + if p.BitSize%8 != 0 { + n++ + } + x := pub.X.Bytes() + if n > len(x) { + x = append(make([]byte, n-len(x)), x...) + } + y := pub.Y.Bytes() + if n > len(y) { + y = append(make([]byte, n-len(y)), y...) + } + // Field order is important. + // See https://tools.ietf.org/html/rfc7638#section-3.3 for details. + return fmt.Sprintf(`{"crv":"%s","kty":"EC","x":"%s","y":"%s"}`, + p.Name, + base64.RawURLEncoding.EncodeToString(x), + base64.RawURLEncoding.EncodeToString(y), + ), nil + } + return "", ErrUnsupportedKey +} + +// jwsSign signs the digest using the given key. +// The hash is unused for ECDSA keys. +func jwsSign(key crypto.Signer, hash crypto.Hash, digest []byte) ([]byte, error) { + switch pub := key.Public().(type) { + case *rsa.PublicKey: + return key.Sign(rand.Reader, digest, hash) + case *ecdsa.PublicKey: + sigASN1, err := key.Sign(rand.Reader, digest, hash) + if err != nil { + return nil, err + } + + var rs struct{ R, S *big.Int } + if _, err := asn1.Unmarshal(sigASN1, &rs); err != nil { + return nil, err + } + + rb, sb := rs.R.Bytes(), rs.S.Bytes() + size := pub.Params().BitSize / 8 + if size%8 > 0 { + size++ + } + sig := make([]byte, size*2) + copy(sig[size-len(rb):], rb) + copy(sig[size*2-len(sb):], sb) + return sig, nil + } + return nil, ErrUnsupportedKey +} + +// jwsHasher indicates suitable JWS algorithm name and a hash function +// to use for signing a digest with the provided key. +// It returns ("", 0) if the key is not supported. +func jwsHasher(pub crypto.PublicKey) (string, crypto.Hash) { + switch pub := pub.(type) { + case *rsa.PublicKey: + return "RS256", crypto.SHA256 + case *ecdsa.PublicKey: + switch pub.Params().Name { + case "P-256": + return "ES256", crypto.SHA256 + case "P-384": + return "ES384", crypto.SHA384 + case "P-521": + return "ES512", crypto.SHA512 + } + } + return "", 0 +} + +// JWKThumbprint creates a JWK thumbprint out of pub +// as specified in https://tools.ietf.org/html/rfc7638. +func JWKThumbprint(pub crypto.PublicKey) (string, error) { + jwk, err := jwkEncode(pub) + if err != nil { + return "", err + } + b := sha256.Sum256([]byte(jwk)) + return base64.RawURLEncoding.EncodeToString(b[:]), nil +} diff --git a/vendor/golang.org/x/crypto/acme/rfc8555.go b/vendor/golang.org/x/crypto/acme/rfc8555.go new file mode 100644 index 0000000..ee24dfd --- /dev/null +++ b/vendor/golang.org/x/crypto/acme/rfc8555.go @@ -0,0 +1,476 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package acme + +import ( + "context" + "crypto" + "encoding/base64" + "encoding/json" + "encoding/pem" + "errors" + "fmt" + "io" + "net/http" + "time" +) + +// DeactivateReg permanently disables an existing account associated with c.Key. +// A deactivated account can no longer request certificate issuance or access +// resources related to the account, such as orders or authorizations. +// +// It only works with CAs implementing RFC 8555. +func (c *Client) DeactivateReg(ctx context.Context) error { + if _, err := c.Discover(ctx); err != nil { // required by c.accountKID + return err + } + url := string(c.accountKID(ctx)) + if url == "" { + return ErrNoAccount + } + req := json.RawMessage(`{"status": "deactivated"}`) + res, err := c.post(ctx, nil, url, req, wantStatus(http.StatusOK)) + if err != nil { + return err + } + res.Body.Close() + return nil +} + +// registerRFC is equivalent to c.Register but for CAs implementing RFC 8555. +// It expects c.Discover to have already been called. +func (c *Client) registerRFC(ctx context.Context, acct *Account, prompt func(tosURL string) bool) (*Account, error) { + c.cacheMu.Lock() // guard c.kid access + defer c.cacheMu.Unlock() + + req := struct { + TermsAgreed bool `json:"termsOfServiceAgreed,omitempty"` + Contact []string `json:"contact,omitempty"` + ExternalAccountBinding *jsonWebSignature `json:"externalAccountBinding,omitempty"` + }{ + Contact: acct.Contact, + } + if c.dir.Terms != "" { + req.TermsAgreed = prompt(c.dir.Terms) + } + + // set 'externalAccountBinding' field if requested + if acct.ExternalAccountBinding != nil { + eabJWS, err := c.encodeExternalAccountBinding(acct.ExternalAccountBinding) + if err != nil { + return nil, fmt.Errorf("acme: failed to encode external account binding: %v", err) + } + req.ExternalAccountBinding = eabJWS + } + + res, err := c.post(ctx, c.Key, c.dir.RegURL, req, wantStatus( + http.StatusOK, // account with this key already registered + http.StatusCreated, // new account created + )) + if err != nil { + return nil, err + } + + defer res.Body.Close() + a, err := responseAccount(res) + if err != nil { + return nil, err + } + // Cache Account URL even if we return an error to the caller. + // It is by all means a valid and usable "kid" value for future requests. + c.KID = KeyID(a.URI) + if res.StatusCode == http.StatusOK { + return nil, ErrAccountAlreadyExists + } + return a, nil +} + +// encodeExternalAccountBinding will encode an external account binding stanza +// as described in https://tools.ietf.org/html/rfc8555#section-7.3.4. +func (c *Client) encodeExternalAccountBinding(eab *ExternalAccountBinding) (*jsonWebSignature, error) { + jwk, err := jwkEncode(c.Key.Public()) + if err != nil { + return nil, err + } + return jwsWithMAC(eab.Key, eab.KID, c.dir.RegURL, []byte(jwk)) +} + +// updateRegRFC is equivalent to c.UpdateReg but for CAs implementing RFC 8555. +// It expects c.Discover to have already been called. +func (c *Client) updateRegRFC(ctx context.Context, a *Account) (*Account, error) { + url := string(c.accountKID(ctx)) + if url == "" { + return nil, ErrNoAccount + } + req := struct { + Contact []string `json:"contact,omitempty"` + }{ + Contact: a.Contact, + } + res, err := c.post(ctx, nil, url, req, wantStatus(http.StatusOK)) + if err != nil { + return nil, err + } + defer res.Body.Close() + return responseAccount(res) +} + +// getGegRFC is equivalent to c.GetReg but for CAs implementing RFC 8555. +// It expects c.Discover to have already been called. +func (c *Client) getRegRFC(ctx context.Context) (*Account, error) { + req := json.RawMessage(`{"onlyReturnExisting": true}`) + res, err := c.post(ctx, c.Key, c.dir.RegURL, req, wantStatus(http.StatusOK)) + if e, ok := err.(*Error); ok && e.ProblemType == "urn:ietf:params:acme:error:accountDoesNotExist" { + return nil, ErrNoAccount + } + if err != nil { + return nil, err + } + + defer res.Body.Close() + return responseAccount(res) +} + +func responseAccount(res *http.Response) (*Account, error) { + var v struct { + Status string + Contact []string + Orders string + } + if err := json.NewDecoder(res.Body).Decode(&v); err != nil { + return nil, fmt.Errorf("acme: invalid account response: %v", err) + } + return &Account{ + URI: res.Header.Get("Location"), + Status: v.Status, + Contact: v.Contact, + OrdersURL: v.Orders, + }, nil +} + +// accountKeyRollover attempts to perform account key rollover. +// On success it will change client.Key to the new key. +func (c *Client) accountKeyRollover(ctx context.Context, newKey crypto.Signer) error { + dir, err := c.Discover(ctx) // Also required by c.accountKID + if err != nil { + return err + } + kid := c.accountKID(ctx) + if kid == noKeyID { + return ErrNoAccount + } + oldKey, err := jwkEncode(c.Key.Public()) + if err != nil { + return err + } + payload := struct { + Account string `json:"account"` + OldKey json.RawMessage `json:"oldKey"` + }{ + Account: string(kid), + OldKey: json.RawMessage(oldKey), + } + inner, err := jwsEncodeJSON(payload, newKey, noKeyID, noNonce, dir.KeyChangeURL) + if err != nil { + return err + } + + res, err := c.post(ctx, nil, dir.KeyChangeURL, base64.RawURLEncoding.EncodeToString(inner), wantStatus(http.StatusOK)) + if err != nil { + return err + } + defer res.Body.Close() + c.Key = newKey + return nil +} + +// AuthorizeOrder initiates the order-based application for certificate issuance, +// as opposed to pre-authorization in Authorize. +// It is only supported by CAs implementing RFC 8555. +// +// The caller then needs to fetch each authorization with GetAuthorization, +// identify those with StatusPending status and fulfill a challenge using Accept. +// Once all authorizations are satisfied, the caller will typically want to poll +// order status using WaitOrder until it's in StatusReady state. +// To finalize the order and obtain a certificate, the caller submits a CSR with CreateOrderCert. +func (c *Client) AuthorizeOrder(ctx context.Context, id []AuthzID, opt ...OrderOption) (*Order, error) { + dir, err := c.Discover(ctx) + if err != nil { + return nil, err + } + + req := struct { + Identifiers []wireAuthzID `json:"identifiers"` + NotBefore string `json:"notBefore,omitempty"` + NotAfter string `json:"notAfter,omitempty"` + }{} + for _, v := range id { + req.Identifiers = append(req.Identifiers, wireAuthzID{ + Type: v.Type, + Value: v.Value, + }) + } + for _, o := range opt { + switch o := o.(type) { + case orderNotBeforeOpt: + req.NotBefore = time.Time(o).Format(time.RFC3339) + case orderNotAfterOpt: + req.NotAfter = time.Time(o).Format(time.RFC3339) + default: + // Package's fault if we let this happen. + panic(fmt.Sprintf("unsupported order option type %T", o)) + } + } + + res, err := c.post(ctx, nil, dir.OrderURL, req, wantStatus(http.StatusCreated)) + if err != nil { + return nil, err + } + defer res.Body.Close() + return responseOrder(res) +} + +// GetOrder retrives an order identified by the given URL. +// For orders created with AuthorizeOrder, the url value is Order.URI. +// +// If a caller needs to poll an order until its status is final, +// see the WaitOrder method. +func (c *Client) GetOrder(ctx context.Context, url string) (*Order, error) { + if _, err := c.Discover(ctx); err != nil { + return nil, err + } + + res, err := c.postAsGet(ctx, url, wantStatus(http.StatusOK)) + if err != nil { + return nil, err + } + defer res.Body.Close() + return responseOrder(res) +} + +// WaitOrder polls an order from the given URL until it is in one of the final states, +// StatusReady, StatusValid or StatusInvalid, the CA responded with a non-retryable error +// or the context is done. +// +// It returns a non-nil Order only if its Status is StatusReady or StatusValid. +// In all other cases WaitOrder returns an error. +// If the Status is StatusInvalid, the returned error is of type *OrderError. +func (c *Client) WaitOrder(ctx context.Context, url string) (*Order, error) { + if _, err := c.Discover(ctx); err != nil { + return nil, err + } + for { + res, err := c.postAsGet(ctx, url, wantStatus(http.StatusOK)) + if err != nil { + return nil, err + } + o, err := responseOrder(res) + res.Body.Close() + switch { + case err != nil: + // Skip and retry. + case o.Status == StatusInvalid: + return nil, &OrderError{OrderURL: o.URI, Status: o.Status} + case o.Status == StatusReady || o.Status == StatusValid: + return o, nil + } + + d := retryAfter(res.Header.Get("Retry-After")) + if d == 0 { + // Default retry-after. + // Same reasoning as in WaitAuthorization. + d = time.Second + } + t := time.NewTimer(d) + select { + case <-ctx.Done(): + t.Stop() + return nil, ctx.Err() + case <-t.C: + // Retry. + } + } +} + +func responseOrder(res *http.Response) (*Order, error) { + var v struct { + Status string + Expires time.Time + Identifiers []wireAuthzID + NotBefore time.Time + NotAfter time.Time + Error *wireError + Authorizations []string + Finalize string + Certificate string + } + if err := json.NewDecoder(res.Body).Decode(&v); err != nil { + return nil, fmt.Errorf("acme: error reading order: %v", err) + } + o := &Order{ + URI: res.Header.Get("Location"), + Status: v.Status, + Expires: v.Expires, + NotBefore: v.NotBefore, + NotAfter: v.NotAfter, + AuthzURLs: v.Authorizations, + FinalizeURL: v.Finalize, + CertURL: v.Certificate, + } + for _, id := range v.Identifiers { + o.Identifiers = append(o.Identifiers, AuthzID{Type: id.Type, Value: id.Value}) + } + if v.Error != nil { + o.Error = v.Error.error(nil /* headers */) + } + return o, nil +} + +// CreateOrderCert submits the CSR (Certificate Signing Request) to a CA at the specified URL. +// The URL is the FinalizeURL field of an Order created with AuthorizeOrder. +// +// If the bundle argument is true, the returned value also contain the CA (issuer) +// certificate chain. Otherwise, only a leaf certificate is returned. +// The returned URL can be used to re-fetch the certificate using FetchCert. +// +// This method is only supported by CAs implementing RFC 8555. See CreateCert for pre-RFC CAs. +// +// CreateOrderCert returns an error if the CA's response is unreasonably large. +// Callers are encouraged to parse the returned value to ensure the certificate is valid and has the expected features. +func (c *Client) CreateOrderCert(ctx context.Context, url string, csr []byte, bundle bool) (der [][]byte, certURL string, err error) { + if _, err := c.Discover(ctx); err != nil { // required by c.accountKID + return nil, "", err + } + + // RFC describes this as "finalize order" request. + req := struct { + CSR string `json:"csr"` + }{ + CSR: base64.RawURLEncoding.EncodeToString(csr), + } + res, err := c.post(ctx, nil, url, req, wantStatus(http.StatusOK)) + if err != nil { + return nil, "", err + } + defer res.Body.Close() + o, err := responseOrder(res) + if err != nil { + return nil, "", err + } + + // Wait for CA to issue the cert if they haven't. + if o.Status != StatusValid { + o, err = c.WaitOrder(ctx, o.URI) + } + if err != nil { + return nil, "", err + } + // The only acceptable status post finalize and WaitOrder is "valid". + if o.Status != StatusValid { + return nil, "", &OrderError{OrderURL: o.URI, Status: o.Status} + } + crt, err := c.fetchCertRFC(ctx, o.CertURL, bundle) + return crt, o.CertURL, err +} + +// fetchCertRFC downloads issued certificate from the given URL. +// It expects the CA to respond with PEM-encoded certificate chain. +// +// The URL argument is the CertURL field of Order. +func (c *Client) fetchCertRFC(ctx context.Context, url string, bundle bool) ([][]byte, error) { + res, err := c.postAsGet(ctx, url, wantStatus(http.StatusOK)) + if err != nil { + return nil, err + } + defer res.Body.Close() + + // Get all the bytes up to a sane maximum. + // Account very roughly for base64 overhead. + const max = maxCertChainSize + maxCertChainSize/33 + b, err := io.ReadAll(io.LimitReader(res.Body, max+1)) + if err != nil { + return nil, fmt.Errorf("acme: fetch cert response stream: %v", err) + } + if len(b) > max { + return nil, errors.New("acme: certificate chain is too big") + } + + // Decode PEM chain. + var chain [][]byte + for { + var p *pem.Block + p, b = pem.Decode(b) + if p == nil { + break + } + if p.Type != "CERTIFICATE" { + return nil, fmt.Errorf("acme: invalid PEM cert type %q", p.Type) + } + + chain = append(chain, p.Bytes) + if !bundle { + return chain, nil + } + if len(chain) > maxChainLen { + return nil, errors.New("acme: certificate chain is too long") + } + } + if len(chain) == 0 { + return nil, errors.New("acme: certificate chain is empty") + } + return chain, nil +} + +// sends a cert revocation request in either JWK form when key is non-nil or KID form otherwise. +func (c *Client) revokeCertRFC(ctx context.Context, key crypto.Signer, cert []byte, reason CRLReasonCode) error { + req := &struct { + Cert string `json:"certificate"` + Reason int `json:"reason"` + }{ + Cert: base64.RawURLEncoding.EncodeToString(cert), + Reason: int(reason), + } + res, err := c.post(ctx, key, c.dir.RevokeURL, req, wantStatus(http.StatusOK)) + if err != nil { + if isAlreadyRevoked(err) { + // Assume it is not an error to revoke an already revoked cert. + return nil + } + return err + } + defer res.Body.Close() + return nil +} + +func isAlreadyRevoked(err error) bool { + e, ok := err.(*Error) + return ok && e.ProblemType == "urn:ietf:params:acme:error:alreadyRevoked" +} + +// ListCertAlternates retrieves any alternate certificate chain URLs for the +// given certificate chain URL. These alternate URLs can be passed to FetchCert +// in order to retrieve the alternate certificate chains. +// +// If there are no alternate issuer certificate chains, a nil slice will be +// returned. +func (c *Client) ListCertAlternates(ctx context.Context, url string) ([]string, error) { + if _, err := c.Discover(ctx); err != nil { // required by c.accountKID + return nil, err + } + + res, err := c.postAsGet(ctx, url, wantStatus(http.StatusOK)) + if err != nil { + return nil, err + } + defer res.Body.Close() + + // We don't need the body but we need to discard it so we don't end up + // preventing keep-alive + if _, err := io.Copy(io.Discard, res.Body); err != nil { + return nil, fmt.Errorf("acme: cert alternates response stream: %v", err) + } + alts := linkHeader(res.Header, "alternate") + return alts, nil +} diff --git a/vendor/golang.org/x/crypto/acme/types.go b/vendor/golang.org/x/crypto/acme/types.go new file mode 100644 index 0000000..4888726 --- /dev/null +++ b/vendor/golang.org/x/crypto/acme/types.go @@ -0,0 +1,614 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package acme + +import ( + "crypto" + "crypto/x509" + "errors" + "fmt" + "net/http" + "strings" + "time" +) + +// ACME status values of Account, Order, Authorization and Challenge objects. +// See https://tools.ietf.org/html/rfc8555#section-7.1.6 for details. +const ( + StatusDeactivated = "deactivated" + StatusExpired = "expired" + StatusInvalid = "invalid" + StatusPending = "pending" + StatusProcessing = "processing" + StatusReady = "ready" + StatusRevoked = "revoked" + StatusUnknown = "unknown" + StatusValid = "valid" +) + +// CRLReasonCode identifies the reason for a certificate revocation. +type CRLReasonCode int + +// CRL reason codes as defined in RFC 5280. +const ( + CRLReasonUnspecified CRLReasonCode = 0 + CRLReasonKeyCompromise CRLReasonCode = 1 + CRLReasonCACompromise CRLReasonCode = 2 + CRLReasonAffiliationChanged CRLReasonCode = 3 + CRLReasonSuperseded CRLReasonCode = 4 + CRLReasonCessationOfOperation CRLReasonCode = 5 + CRLReasonCertificateHold CRLReasonCode = 6 + CRLReasonRemoveFromCRL CRLReasonCode = 8 + CRLReasonPrivilegeWithdrawn CRLReasonCode = 9 + CRLReasonAACompromise CRLReasonCode = 10 +) + +var ( + // ErrUnsupportedKey is returned when an unsupported key type is encountered. + ErrUnsupportedKey = errors.New("acme: unknown key type; only RSA and ECDSA are supported") + + // ErrAccountAlreadyExists indicates that the Client's key has already been registered + // with the CA. It is returned by Register method. + ErrAccountAlreadyExists = errors.New("acme: account already exists") + + // ErrNoAccount indicates that the Client's key has not been registered with the CA. + ErrNoAccount = errors.New("acme: account does not exist") +) + +// A Subproblem describes an ACME subproblem as reported in an Error. +type Subproblem struct { + // Type is a URI reference that identifies the problem type, + // typically in a "urn:acme:error:xxx" form. + Type string + // Detail is a human-readable explanation specific to this occurrence of the problem. + Detail string + // Instance indicates a URL that the client should direct a human user to visit + // in order for instructions on how to agree to the updated Terms of Service. + // In such an event CA sets StatusCode to 403, Type to + // "urn:ietf:params:acme:error:userActionRequired", and adds a Link header with relation + // "terms-of-service" containing the latest TOS URL. + Instance string + // Identifier may contain the ACME identifier that the error is for. + Identifier *AuthzID +} + +func (sp Subproblem) String() string { + str := fmt.Sprintf("%s: ", sp.Type) + if sp.Identifier != nil { + str += fmt.Sprintf("[%s: %s] ", sp.Identifier.Type, sp.Identifier.Value) + } + str += sp.Detail + return str +} + +// Error is an ACME error, defined in Problem Details for HTTP APIs doc +// http://tools.ietf.org/html/draft-ietf-appsawg-http-problem. +type Error struct { + // StatusCode is The HTTP status code generated by the origin server. + StatusCode int + // ProblemType is a URI reference that identifies the problem type, + // typically in a "urn:acme:error:xxx" form. + ProblemType string + // Detail is a human-readable explanation specific to this occurrence of the problem. + Detail string + // Instance indicates a URL that the client should direct a human user to visit + // in order for instructions on how to agree to the updated Terms of Service. + // In such an event CA sets StatusCode to 403, ProblemType to + // "urn:ietf:params:acme:error:userActionRequired" and a Link header with relation + // "terms-of-service" containing the latest TOS URL. + Instance string + // Header is the original server error response headers. + // It may be nil. + Header http.Header + // Subproblems may contain more detailed information about the individual problems + // that caused the error. This field is only sent by RFC 8555 compatible ACME + // servers. Defined in RFC 8555 Section 6.7.1. + Subproblems []Subproblem +} + +func (e *Error) Error() string { + str := fmt.Sprintf("%d %s: %s", e.StatusCode, e.ProblemType, e.Detail) + if len(e.Subproblems) > 0 { + str += fmt.Sprintf("; subproblems:") + for _, sp := range e.Subproblems { + str += fmt.Sprintf("\n\t%s", sp) + } + } + return str +} + +// AuthorizationError indicates that an authorization for an identifier +// did not succeed. +// It contains all errors from Challenge items of the failed Authorization. +type AuthorizationError struct { + // URI uniquely identifies the failed Authorization. + URI string + + // Identifier is an AuthzID.Value of the failed Authorization. + Identifier string + + // Errors is a collection of non-nil error values of Challenge items + // of the failed Authorization. + Errors []error +} + +func (a *AuthorizationError) Error() string { + e := make([]string, len(a.Errors)) + for i, err := range a.Errors { + e[i] = err.Error() + } + + if a.Identifier != "" { + return fmt.Sprintf("acme: authorization error for %s: %s", a.Identifier, strings.Join(e, "; ")) + } + + return fmt.Sprintf("acme: authorization error: %s", strings.Join(e, "; ")) +} + +// OrderError is returned from Client's order related methods. +// It indicates the order is unusable and the clients should start over with +// AuthorizeOrder. +// +// The clients can still fetch the order object from CA using GetOrder +// to inspect its state. +type OrderError struct { + OrderURL string + Status string +} + +func (oe *OrderError) Error() string { + return fmt.Sprintf("acme: order %s status: %s", oe.OrderURL, oe.Status) +} + +// RateLimit reports whether err represents a rate limit error and +// any Retry-After duration returned by the server. +// +// See the following for more details on rate limiting: +// https://tools.ietf.org/html/draft-ietf-acme-acme-05#section-5.6 +func RateLimit(err error) (time.Duration, bool) { + e, ok := err.(*Error) + if !ok { + return 0, false + } + // Some CA implementations may return incorrect values. + // Use case-insensitive comparison. + if !strings.HasSuffix(strings.ToLower(e.ProblemType), ":ratelimited") { + return 0, false + } + if e.Header == nil { + return 0, true + } + return retryAfter(e.Header.Get("Retry-After")), true +} + +// Account is a user account. It is associated with a private key. +// Non-RFC 8555 fields are empty when interfacing with a compliant CA. +type Account struct { + // URI is the account unique ID, which is also a URL used to retrieve + // account data from the CA. + // When interfacing with RFC 8555-compliant CAs, URI is the "kid" field + // value in JWS signed requests. + URI string + + // Contact is a slice of contact info used during registration. + // See https://tools.ietf.org/html/rfc8555#section-7.3 for supported + // formats. + Contact []string + + // Status indicates current account status as returned by the CA. + // Possible values are StatusValid, StatusDeactivated, and StatusRevoked. + Status string + + // OrdersURL is a URL from which a list of orders submitted by this account + // can be fetched. + OrdersURL string + + // The terms user has agreed to. + // A value not matching CurrentTerms indicates that the user hasn't agreed + // to the actual Terms of Service of the CA. + // + // It is non-RFC 8555 compliant. Package users can store the ToS they agree to + // during Client's Register call in the prompt callback function. + AgreedTerms string + + // Actual terms of a CA. + // + // It is non-RFC 8555 compliant. Use Directory's Terms field. + // When a CA updates their terms and requires an account agreement, + // a URL at which instructions to do so is available in Error's Instance field. + CurrentTerms string + + // Authz is the authorization URL used to initiate a new authz flow. + // + // It is non-RFC 8555 compliant. Use Directory's AuthzURL or OrderURL. + Authz string + + // Authorizations is a URI from which a list of authorizations + // granted to this account can be fetched via a GET request. + // + // It is non-RFC 8555 compliant and is obsoleted by OrdersURL. + Authorizations string + + // Certificates is a URI from which a list of certificates + // issued for this account can be fetched via a GET request. + // + // It is non-RFC 8555 compliant and is obsoleted by OrdersURL. + Certificates string + + // ExternalAccountBinding represents an arbitrary binding to an account of + // the CA which the ACME server is tied to. + // See https://tools.ietf.org/html/rfc8555#section-7.3.4 for more details. + ExternalAccountBinding *ExternalAccountBinding +} + +// ExternalAccountBinding contains the data needed to form a request with +// an external account binding. +// See https://tools.ietf.org/html/rfc8555#section-7.3.4 for more details. +type ExternalAccountBinding struct { + // KID is the Key ID of the symmetric MAC key that the CA provides to + // identify an external account from ACME. + KID string + + // Key is the bytes of the symmetric key that the CA provides to identify + // the account. Key must correspond to the KID. + Key []byte +} + +func (e *ExternalAccountBinding) String() string { + return fmt.Sprintf("&{KID: %q, Key: redacted}", e.KID) +} + +// Directory is ACME server discovery data. +// See https://tools.ietf.org/html/rfc8555#section-7.1.1 for more details. +type Directory struct { + // NonceURL indicates an endpoint where to fetch fresh nonce values from. + NonceURL string + + // RegURL is an account endpoint URL, allowing for creating new accounts. + // Pre-RFC 8555 CAs also allow modifying existing accounts at this URL. + RegURL string + + // OrderURL is used to initiate the certificate issuance flow + // as described in RFC 8555. + OrderURL string + + // AuthzURL is used to initiate identifier pre-authorization flow. + // Empty string indicates the flow is unsupported by the CA. + AuthzURL string + + // CertURL is a new certificate issuance endpoint URL. + // It is non-RFC 8555 compliant and is obsoleted by OrderURL. + CertURL string + + // RevokeURL is used to initiate a certificate revocation flow. + RevokeURL string + + // KeyChangeURL allows to perform account key rollover flow. + KeyChangeURL string + + // Term is a URI identifying the current terms of service. + Terms string + + // Website is an HTTP or HTTPS URL locating a website + // providing more information about the ACME server. + Website string + + // CAA consists of lowercase hostname elements, which the ACME server + // recognises as referring to itself for the purposes of CAA record validation + // as defined in RFC 6844. + CAA []string + + // ExternalAccountRequired indicates that the CA requires for all account-related + // requests to include external account binding information. + ExternalAccountRequired bool +} + +// Order represents a client's request for a certificate. +// It tracks the request flow progress through to issuance. +type Order struct { + // URI uniquely identifies an order. + URI string + + // Status represents the current status of the order. + // It indicates which action the client should take. + // + // Possible values are StatusPending, StatusReady, StatusProcessing, StatusValid and StatusInvalid. + // Pending means the CA does not believe that the client has fulfilled the requirements. + // Ready indicates that the client has fulfilled all the requirements and can submit a CSR + // to obtain a certificate. This is done with Client's CreateOrderCert. + // Processing means the certificate is being issued. + // Valid indicates the CA has issued the certificate. It can be downloaded + // from the Order's CertURL. This is done with Client's FetchCert. + // Invalid means the certificate will not be issued. Users should consider this order + // abandoned. + Status string + + // Expires is the timestamp after which CA considers this order invalid. + Expires time.Time + + // Identifiers contains all identifier objects which the order pertains to. + Identifiers []AuthzID + + // NotBefore is the requested value of the notBefore field in the certificate. + NotBefore time.Time + + // NotAfter is the requested value of the notAfter field in the certificate. + NotAfter time.Time + + // AuthzURLs represents authorizations to complete before a certificate + // for identifiers specified in the order can be issued. + // It also contains unexpired authorizations that the client has completed + // in the past. + // + // Authorization objects can be fetched using Client's GetAuthorization method. + // + // The required authorizations are dictated by CA policies. + // There may not be a 1:1 relationship between the identifiers and required authorizations. + // Required authorizations can be identified by their StatusPending status. + // + // For orders in the StatusValid or StatusInvalid state these are the authorizations + // which were completed. + AuthzURLs []string + + // FinalizeURL is the endpoint at which a CSR is submitted to obtain a certificate + // once all the authorizations are satisfied. + FinalizeURL string + + // CertURL points to the certificate that has been issued in response to this order. + CertURL string + + // The error that occurred while processing the order as received from a CA, if any. + Error *Error +} + +// OrderOption allows customizing Client.AuthorizeOrder call. +type OrderOption interface { + privateOrderOpt() +} + +// WithOrderNotBefore sets order's NotBefore field. +func WithOrderNotBefore(t time.Time) OrderOption { + return orderNotBeforeOpt(t) +} + +// WithOrderNotAfter sets order's NotAfter field. +func WithOrderNotAfter(t time.Time) OrderOption { + return orderNotAfterOpt(t) +} + +type orderNotBeforeOpt time.Time + +func (orderNotBeforeOpt) privateOrderOpt() {} + +type orderNotAfterOpt time.Time + +func (orderNotAfterOpt) privateOrderOpt() {} + +// Authorization encodes an authorization response. +type Authorization struct { + // URI uniquely identifies a authorization. + URI string + + // Status is the current status of an authorization. + // Possible values are StatusPending, StatusValid, StatusInvalid, StatusDeactivated, + // StatusExpired and StatusRevoked. + Status string + + // Identifier is what the account is authorized to represent. + Identifier AuthzID + + // The timestamp after which the CA considers the authorization invalid. + Expires time.Time + + // Wildcard is true for authorizations of a wildcard domain name. + Wildcard bool + + // Challenges that the client needs to fulfill in order to prove possession + // of the identifier (for pending authorizations). + // For valid authorizations, the challenge that was validated. + // For invalid authorizations, the challenge that was attempted and failed. + // + // RFC 8555 compatible CAs require users to fuflfill only one of the challenges. + Challenges []*Challenge + + // A collection of sets of challenges, each of which would be sufficient + // to prove possession of the identifier. + // Clients must complete a set of challenges that covers at least one set. + // Challenges are identified by their indices in the challenges array. + // If this field is empty, the client needs to complete all challenges. + // + // This field is unused in RFC 8555. + Combinations [][]int +} + +// AuthzID is an identifier that an account is authorized to represent. +type AuthzID struct { + Type string // The type of identifier, "dns" or "ip". + Value string // The identifier itself, e.g. "example.org". +} + +// DomainIDs creates a slice of AuthzID with "dns" identifier type. +func DomainIDs(names ...string) []AuthzID { + a := make([]AuthzID, len(names)) + for i, v := range names { + a[i] = AuthzID{Type: "dns", Value: v} + } + return a +} + +// IPIDs creates a slice of AuthzID with "ip" identifier type. +// Each element of addr is textual form of an address as defined +// in RFC 1123 Section 2.1 for IPv4 and in RFC 5952 Section 4 for IPv6. +func IPIDs(addr ...string) []AuthzID { + a := make([]AuthzID, len(addr)) + for i, v := range addr { + a[i] = AuthzID{Type: "ip", Value: v} + } + return a +} + +// wireAuthzID is ACME JSON representation of authorization identifier objects. +type wireAuthzID struct { + Type string `json:"type"` + Value string `json:"value"` +} + +// wireAuthz is ACME JSON representation of Authorization objects. +type wireAuthz struct { + Identifier wireAuthzID + Status string + Expires time.Time + Wildcard bool + Challenges []wireChallenge + Combinations [][]int + Error *wireError +} + +func (z *wireAuthz) authorization(uri string) *Authorization { + a := &Authorization{ + URI: uri, + Status: z.Status, + Identifier: AuthzID{Type: z.Identifier.Type, Value: z.Identifier.Value}, + Expires: z.Expires, + Wildcard: z.Wildcard, + Challenges: make([]*Challenge, len(z.Challenges)), + Combinations: z.Combinations, // shallow copy + } + for i, v := range z.Challenges { + a.Challenges[i] = v.challenge() + } + return a +} + +func (z *wireAuthz) error(uri string) *AuthorizationError { + err := &AuthorizationError{ + URI: uri, + Identifier: z.Identifier.Value, + } + + if z.Error != nil { + err.Errors = append(err.Errors, z.Error.error(nil)) + } + + for _, raw := range z.Challenges { + if raw.Error != nil { + err.Errors = append(err.Errors, raw.Error.error(nil)) + } + } + + return err +} + +// Challenge encodes a returned CA challenge. +// Its Error field may be non-nil if the challenge is part of an Authorization +// with StatusInvalid. +type Challenge struct { + // Type is the challenge type, e.g. "http-01", "tls-alpn-01", "dns-01". + Type string + + // URI is where a challenge response can be posted to. + URI string + + // Token is a random value that uniquely identifies the challenge. + Token string + + // Status identifies the status of this challenge. + // In RFC 8555, possible values are StatusPending, StatusProcessing, StatusValid, + // and StatusInvalid. + Status string + + // Validated is the time at which the CA validated this challenge. + // Always zero value in pre-RFC 8555. + Validated time.Time + + // Error indicates the reason for an authorization failure + // when this challenge was used. + // The type of a non-nil value is *Error. + Error error +} + +// wireChallenge is ACME JSON challenge representation. +type wireChallenge struct { + URL string `json:"url"` // RFC + URI string `json:"uri"` // pre-RFC + Type string + Token string + Status string + Validated time.Time + Error *wireError +} + +func (c *wireChallenge) challenge() *Challenge { + v := &Challenge{ + URI: c.URL, + Type: c.Type, + Token: c.Token, + Status: c.Status, + } + if v.URI == "" { + v.URI = c.URI // c.URL was empty; use legacy + } + if v.Status == "" { + v.Status = StatusPending + } + if c.Error != nil { + v.Error = c.Error.error(nil) + } + return v +} + +// wireError is a subset of fields of the Problem Details object +// as described in https://tools.ietf.org/html/rfc7807#section-3.1. +type wireError struct { + Status int + Type string + Detail string + Instance string + Subproblems []Subproblem +} + +func (e *wireError) error(h http.Header) *Error { + err := &Error{ + StatusCode: e.Status, + ProblemType: e.Type, + Detail: e.Detail, + Instance: e.Instance, + Header: h, + Subproblems: e.Subproblems, + } + return err +} + +// CertOption is an optional argument type for the TLS ChallengeCert methods for +// customizing a temporary certificate for TLS-based challenges. +type CertOption interface { + privateCertOpt() +} + +// WithKey creates an option holding a private/public key pair. +// The private part signs a certificate, and the public part represents the signee. +func WithKey(key crypto.Signer) CertOption { + return &certOptKey{key} +} + +type certOptKey struct { + key crypto.Signer +} + +func (*certOptKey) privateCertOpt() {} + +// WithTemplate creates an option for specifying a certificate template. +// See x509.CreateCertificate for template usage details. +// +// In TLS ChallengeCert methods, the template is also used as parent, +// resulting in a self-signed certificate. +// The DNSNames field of t is always overwritten for tls-sni challenge certs. +func WithTemplate(t *x509.Certificate) CertOption { + return (*certOptTemplate)(t) +} + +type certOptTemplate x509.Certificate + +func (*certOptTemplate) privateCertOpt() {} diff --git a/vendor/golang.org/x/crypto/acme/version_go112.go b/vendor/golang.org/x/crypto/acme/version_go112.go new file mode 100644 index 0000000..b9efdb5 --- /dev/null +++ b/vendor/golang.org/x/crypto/acme/version_go112.go @@ -0,0 +1,28 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.12 +// +build go1.12 + +package acme + +import "runtime/debug" + +func init() { + // Set packageVersion if the binary was built in modules mode and x/crypto + // was not replaced with a different module. + info, ok := debug.ReadBuildInfo() + if !ok { + return + } + for _, m := range info.Deps { + if m.Path != "golang.org/x/crypto" { + continue + } + if m.Replace == nil { + packageVersion = m.Version + } + break + } +} diff --git a/vendor/golang.org/x/crypto/bcrypt/base64.go b/vendor/golang.org/x/crypto/bcrypt/base64.go new file mode 100644 index 0000000..fc31160 --- /dev/null +++ b/vendor/golang.org/x/crypto/bcrypt/base64.go @@ -0,0 +1,35 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package bcrypt + +import "encoding/base64" + +const alphabet = "./ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789" + +var bcEncoding = base64.NewEncoding(alphabet) + +func base64Encode(src []byte) []byte { + n := bcEncoding.EncodedLen(len(src)) + dst := make([]byte, n) + bcEncoding.Encode(dst, src) + for dst[n-1] == '=' { + n-- + } + return dst[:n] +} + +func base64Decode(src []byte) ([]byte, error) { + numOfEquals := 4 - (len(src) % 4) + for i := 0; i < numOfEquals; i++ { + src = append(src, '=') + } + + dst := make([]byte, bcEncoding.DecodedLen(len(src))) + n, err := bcEncoding.Decode(dst, src) + if err != nil { + return nil, err + } + return dst[:n], nil +} diff --git a/vendor/golang.org/x/crypto/bcrypt/bcrypt.go b/vendor/golang.org/x/crypto/bcrypt/bcrypt.go new file mode 100644 index 0000000..5577c0f --- /dev/null +++ b/vendor/golang.org/x/crypto/bcrypt/bcrypt.go @@ -0,0 +1,304 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package bcrypt implements Provos and Mazières's bcrypt adaptive hashing +// algorithm. See http://www.usenix.org/event/usenix99/provos/provos.pdf +package bcrypt // import "golang.org/x/crypto/bcrypt" + +// The code is a port of Provos and Mazières's C implementation. +import ( + "crypto/rand" + "crypto/subtle" + "errors" + "fmt" + "io" + "strconv" + + "golang.org/x/crypto/blowfish" +) + +const ( + MinCost int = 4 // the minimum allowable cost as passed in to GenerateFromPassword + MaxCost int = 31 // the maximum allowable cost as passed in to GenerateFromPassword + DefaultCost int = 10 // the cost that will actually be set if a cost below MinCost is passed into GenerateFromPassword +) + +// The error returned from CompareHashAndPassword when a password and hash do +// not match. +var ErrMismatchedHashAndPassword = errors.New("crypto/bcrypt: hashedPassword is not the hash of the given password") + +// The error returned from CompareHashAndPassword when a hash is too short to +// be a bcrypt hash. +var ErrHashTooShort = errors.New("crypto/bcrypt: hashedSecret too short to be a bcrypted password") + +// The error returned from CompareHashAndPassword when a hash was created with +// a bcrypt algorithm newer than this implementation. +type HashVersionTooNewError byte + +func (hv HashVersionTooNewError) Error() string { + return fmt.Sprintf("crypto/bcrypt: bcrypt algorithm version '%c' requested is newer than current version '%c'", byte(hv), majorVersion) +} + +// The error returned from CompareHashAndPassword when a hash starts with something other than '$' +type InvalidHashPrefixError byte + +func (ih InvalidHashPrefixError) Error() string { + return fmt.Sprintf("crypto/bcrypt: bcrypt hashes must start with '$', but hashedSecret started with '%c'", byte(ih)) +} + +type InvalidCostError int + +func (ic InvalidCostError) Error() string { + return fmt.Sprintf("crypto/bcrypt: cost %d is outside allowed range (%d,%d)", int(ic), MinCost, MaxCost) +} + +const ( + majorVersion = '2' + minorVersion = 'a' + maxSaltSize = 16 + maxCryptedHashSize = 23 + encodedSaltSize = 22 + encodedHashSize = 31 + minHashSize = 59 +) + +// magicCipherData is an IV for the 64 Blowfish encryption calls in +// bcrypt(). It's the string "OrpheanBeholderScryDoubt" in big-endian bytes. +var magicCipherData = []byte{ + 0x4f, 0x72, 0x70, 0x68, + 0x65, 0x61, 0x6e, 0x42, + 0x65, 0x68, 0x6f, 0x6c, + 0x64, 0x65, 0x72, 0x53, + 0x63, 0x72, 0x79, 0x44, + 0x6f, 0x75, 0x62, 0x74, +} + +type hashed struct { + hash []byte + salt []byte + cost int // allowed range is MinCost to MaxCost + major byte + minor byte +} + +// ErrPasswordTooLong is returned when the password passed to +// GenerateFromPassword is too long (i.e. > 72 bytes). +var ErrPasswordTooLong = errors.New("bcrypt: password length exceeds 72 bytes") + +// GenerateFromPassword returns the bcrypt hash of the password at the given +// cost. If the cost given is less than MinCost, the cost will be set to +// DefaultCost, instead. Use CompareHashAndPassword, as defined in this package, +// to compare the returned hashed password with its cleartext version. +// GenerateFromPassword does not accept passwords longer than 72 bytes, which +// is the longest password bcrypt will operate on. +func GenerateFromPassword(password []byte, cost int) ([]byte, error) { + if len(password) > 72 { + return nil, ErrPasswordTooLong + } + p, err := newFromPassword(password, cost) + if err != nil { + return nil, err + } + return p.Hash(), nil +} + +// CompareHashAndPassword compares a bcrypt hashed password with its possible +// plaintext equivalent. Returns nil on success, or an error on failure. +func CompareHashAndPassword(hashedPassword, password []byte) error { + p, err := newFromHash(hashedPassword) + if err != nil { + return err + } + + otherHash, err := bcrypt(password, p.cost, p.salt) + if err != nil { + return err + } + + otherP := &hashed{otherHash, p.salt, p.cost, p.major, p.minor} + if subtle.ConstantTimeCompare(p.Hash(), otherP.Hash()) == 1 { + return nil + } + + return ErrMismatchedHashAndPassword +} + +// Cost returns the hashing cost used to create the given hashed +// password. When, in the future, the hashing cost of a password system needs +// to be increased in order to adjust for greater computational power, this +// function allows one to establish which passwords need to be updated. +func Cost(hashedPassword []byte) (int, error) { + p, err := newFromHash(hashedPassword) + if err != nil { + return 0, err + } + return p.cost, nil +} + +func newFromPassword(password []byte, cost int) (*hashed, error) { + if cost < MinCost { + cost = DefaultCost + } + p := new(hashed) + p.major = majorVersion + p.minor = minorVersion + + err := checkCost(cost) + if err != nil { + return nil, err + } + p.cost = cost + + unencodedSalt := make([]byte, maxSaltSize) + _, err = io.ReadFull(rand.Reader, unencodedSalt) + if err != nil { + return nil, err + } + + p.salt = base64Encode(unencodedSalt) + hash, err := bcrypt(password, p.cost, p.salt) + if err != nil { + return nil, err + } + p.hash = hash + return p, err +} + +func newFromHash(hashedSecret []byte) (*hashed, error) { + if len(hashedSecret) < minHashSize { + return nil, ErrHashTooShort + } + p := new(hashed) + n, err := p.decodeVersion(hashedSecret) + if err != nil { + return nil, err + } + hashedSecret = hashedSecret[n:] + n, err = p.decodeCost(hashedSecret) + if err != nil { + return nil, err + } + hashedSecret = hashedSecret[n:] + + // The "+2" is here because we'll have to append at most 2 '=' to the salt + // when base64 decoding it in expensiveBlowfishSetup(). + p.salt = make([]byte, encodedSaltSize, encodedSaltSize+2) + copy(p.salt, hashedSecret[:encodedSaltSize]) + + hashedSecret = hashedSecret[encodedSaltSize:] + p.hash = make([]byte, len(hashedSecret)) + copy(p.hash, hashedSecret) + + return p, nil +} + +func bcrypt(password []byte, cost int, salt []byte) ([]byte, error) { + cipherData := make([]byte, len(magicCipherData)) + copy(cipherData, magicCipherData) + + c, err := expensiveBlowfishSetup(password, uint32(cost), salt) + if err != nil { + return nil, err + } + + for i := 0; i < 24; i += 8 { + for j := 0; j < 64; j++ { + c.Encrypt(cipherData[i:i+8], cipherData[i:i+8]) + } + } + + // Bug compatibility with C bcrypt implementations. We only encode 23 of + // the 24 bytes encrypted. + hsh := base64Encode(cipherData[:maxCryptedHashSize]) + return hsh, nil +} + +func expensiveBlowfishSetup(key []byte, cost uint32, salt []byte) (*blowfish.Cipher, error) { + csalt, err := base64Decode(salt) + if err != nil { + return nil, err + } + + // Bug compatibility with C bcrypt implementations. They use the trailing + // NULL in the key string during expansion. + // We copy the key to prevent changing the underlying array. + ckey := append(key[:len(key):len(key)], 0) + + c, err := blowfish.NewSaltedCipher(ckey, csalt) + if err != nil { + return nil, err + } + + var i, rounds uint64 + rounds = 1 << cost + for i = 0; i < rounds; i++ { + blowfish.ExpandKey(ckey, c) + blowfish.ExpandKey(csalt, c) + } + + return c, nil +} + +func (p *hashed) Hash() []byte { + arr := make([]byte, 60) + arr[0] = '$' + arr[1] = p.major + n := 2 + if p.minor != 0 { + arr[2] = p.minor + n = 3 + } + arr[n] = '$' + n++ + copy(arr[n:], []byte(fmt.Sprintf("%02d", p.cost))) + n += 2 + arr[n] = '$' + n++ + copy(arr[n:], p.salt) + n += encodedSaltSize + copy(arr[n:], p.hash) + n += encodedHashSize + return arr[:n] +} + +func (p *hashed) decodeVersion(sbytes []byte) (int, error) { + if sbytes[0] != '$' { + return -1, InvalidHashPrefixError(sbytes[0]) + } + if sbytes[1] > majorVersion { + return -1, HashVersionTooNewError(sbytes[1]) + } + p.major = sbytes[1] + n := 3 + if sbytes[2] != '$' { + p.minor = sbytes[2] + n++ + } + return n, nil +} + +// sbytes should begin where decodeVersion left off. +func (p *hashed) decodeCost(sbytes []byte) (int, error) { + cost, err := strconv.Atoi(string(sbytes[0:2])) + if err != nil { + return -1, err + } + err = checkCost(cost) + if err != nil { + return -1, err + } + p.cost = cost + return 3, nil +} + +func (p *hashed) String() string { + return fmt.Sprintf("&{hash: %#v, salt: %#v, cost: %d, major: %c, minor: %c}", string(p.hash), p.salt, p.cost, p.major, p.minor) +} + +func checkCost(cost int) error { + if cost < MinCost || cost > MaxCost { + return InvalidCostError(cost) + } + return nil +} diff --git a/vendor/golang.org/x/crypto/blake2b/blake2b.go b/vendor/golang.org/x/crypto/blake2b/blake2b.go new file mode 100644 index 0000000..d2e98d4 --- /dev/null +++ b/vendor/golang.org/x/crypto/blake2b/blake2b.go @@ -0,0 +1,291 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package blake2b implements the BLAKE2b hash algorithm defined by RFC 7693 +// and the extendable output function (XOF) BLAKE2Xb. +// +// BLAKE2b is optimized for 64-bit platforms—including NEON-enabled ARMs—and +// produces digests of any size between 1 and 64 bytes. +// For a detailed specification of BLAKE2b see https://blake2.net/blake2.pdf +// and for BLAKE2Xb see https://blake2.net/blake2x.pdf +// +// If you aren't sure which function you need, use BLAKE2b (Sum512 or New512). +// If you need a secret-key MAC (message authentication code), use the New512 +// function with a non-nil key. +// +// BLAKE2X is a construction to compute hash values larger than 64 bytes. It +// can produce hash values between 0 and 4 GiB. +package blake2b + +import ( + "encoding/binary" + "errors" + "hash" +) + +const ( + // The blocksize of BLAKE2b in bytes. + BlockSize = 128 + // The hash size of BLAKE2b-512 in bytes. + Size = 64 + // The hash size of BLAKE2b-384 in bytes. + Size384 = 48 + // The hash size of BLAKE2b-256 in bytes. + Size256 = 32 +) + +var ( + useAVX2 bool + useAVX bool + useSSE4 bool +) + +var ( + errKeySize = errors.New("blake2b: invalid key size") + errHashSize = errors.New("blake2b: invalid hash size") +) + +var iv = [8]uint64{ + 0x6a09e667f3bcc908, 0xbb67ae8584caa73b, 0x3c6ef372fe94f82b, 0xa54ff53a5f1d36f1, + 0x510e527fade682d1, 0x9b05688c2b3e6c1f, 0x1f83d9abfb41bd6b, 0x5be0cd19137e2179, +} + +// Sum512 returns the BLAKE2b-512 checksum of the data. +func Sum512(data []byte) [Size]byte { + var sum [Size]byte + checkSum(&sum, Size, data) + return sum +} + +// Sum384 returns the BLAKE2b-384 checksum of the data. +func Sum384(data []byte) [Size384]byte { + var sum [Size]byte + var sum384 [Size384]byte + checkSum(&sum, Size384, data) + copy(sum384[:], sum[:Size384]) + return sum384 +} + +// Sum256 returns the BLAKE2b-256 checksum of the data. +func Sum256(data []byte) [Size256]byte { + var sum [Size]byte + var sum256 [Size256]byte + checkSum(&sum, Size256, data) + copy(sum256[:], sum[:Size256]) + return sum256 +} + +// New512 returns a new hash.Hash computing the BLAKE2b-512 checksum. A non-nil +// key turns the hash into a MAC. The key must be between zero and 64 bytes long. +func New512(key []byte) (hash.Hash, error) { return newDigest(Size, key) } + +// New384 returns a new hash.Hash computing the BLAKE2b-384 checksum. A non-nil +// key turns the hash into a MAC. The key must be between zero and 64 bytes long. +func New384(key []byte) (hash.Hash, error) { return newDigest(Size384, key) } + +// New256 returns a new hash.Hash computing the BLAKE2b-256 checksum. A non-nil +// key turns the hash into a MAC. The key must be between zero and 64 bytes long. +func New256(key []byte) (hash.Hash, error) { return newDigest(Size256, key) } + +// New returns a new hash.Hash computing the BLAKE2b checksum with a custom length. +// A non-nil key turns the hash into a MAC. The key must be between zero and 64 bytes long. +// The hash size can be a value between 1 and 64 but it is highly recommended to use +// values equal or greater than: +// - 32 if BLAKE2b is used as a hash function (The key is zero bytes long). +// - 16 if BLAKE2b is used as a MAC function (The key is at least 16 bytes long). +// When the key is nil, the returned hash.Hash implements BinaryMarshaler +// and BinaryUnmarshaler for state (de)serialization as documented by hash.Hash. +func New(size int, key []byte) (hash.Hash, error) { return newDigest(size, key) } + +func newDigest(hashSize int, key []byte) (*digest, error) { + if hashSize < 1 || hashSize > Size { + return nil, errHashSize + } + if len(key) > Size { + return nil, errKeySize + } + d := &digest{ + size: hashSize, + keyLen: len(key), + } + copy(d.key[:], key) + d.Reset() + return d, nil +} + +func checkSum(sum *[Size]byte, hashSize int, data []byte) { + h := iv + h[0] ^= uint64(hashSize) | (1 << 16) | (1 << 24) + var c [2]uint64 + + if length := len(data); length > BlockSize { + n := length &^ (BlockSize - 1) + if length == n { + n -= BlockSize + } + hashBlocks(&h, &c, 0, data[:n]) + data = data[n:] + } + + var block [BlockSize]byte + offset := copy(block[:], data) + remaining := uint64(BlockSize - offset) + if c[0] < remaining { + c[1]-- + } + c[0] -= remaining + + hashBlocks(&h, &c, 0xFFFFFFFFFFFFFFFF, block[:]) + + for i, v := range h[:(hashSize+7)/8] { + binary.LittleEndian.PutUint64(sum[8*i:], v) + } +} + +type digest struct { + h [8]uint64 + c [2]uint64 + size int + block [BlockSize]byte + offset int + + key [BlockSize]byte + keyLen int +} + +const ( + magic = "b2b" + marshaledSize = len(magic) + 8*8 + 2*8 + 1 + BlockSize + 1 +) + +func (d *digest) MarshalBinary() ([]byte, error) { + if d.keyLen != 0 { + return nil, errors.New("crypto/blake2b: cannot marshal MACs") + } + b := make([]byte, 0, marshaledSize) + b = append(b, magic...) + for i := 0; i < 8; i++ { + b = appendUint64(b, d.h[i]) + } + b = appendUint64(b, d.c[0]) + b = appendUint64(b, d.c[1]) + // Maximum value for size is 64 + b = append(b, byte(d.size)) + b = append(b, d.block[:]...) + b = append(b, byte(d.offset)) + return b, nil +} + +func (d *digest) UnmarshalBinary(b []byte) error { + if len(b) < len(magic) || string(b[:len(magic)]) != magic { + return errors.New("crypto/blake2b: invalid hash state identifier") + } + if len(b) != marshaledSize { + return errors.New("crypto/blake2b: invalid hash state size") + } + b = b[len(magic):] + for i := 0; i < 8; i++ { + b, d.h[i] = consumeUint64(b) + } + b, d.c[0] = consumeUint64(b) + b, d.c[1] = consumeUint64(b) + d.size = int(b[0]) + b = b[1:] + copy(d.block[:], b[:BlockSize]) + b = b[BlockSize:] + d.offset = int(b[0]) + return nil +} + +func (d *digest) BlockSize() int { return BlockSize } + +func (d *digest) Size() int { return d.size } + +func (d *digest) Reset() { + d.h = iv + d.h[0] ^= uint64(d.size) | (uint64(d.keyLen) << 8) | (1 << 16) | (1 << 24) + d.offset, d.c[0], d.c[1] = 0, 0, 0 + if d.keyLen > 0 { + d.block = d.key + d.offset = BlockSize + } +} + +func (d *digest) Write(p []byte) (n int, err error) { + n = len(p) + + if d.offset > 0 { + remaining := BlockSize - d.offset + if n <= remaining { + d.offset += copy(d.block[d.offset:], p) + return + } + copy(d.block[d.offset:], p[:remaining]) + hashBlocks(&d.h, &d.c, 0, d.block[:]) + d.offset = 0 + p = p[remaining:] + } + + if length := len(p); length > BlockSize { + nn := length &^ (BlockSize - 1) + if length == nn { + nn -= BlockSize + } + hashBlocks(&d.h, &d.c, 0, p[:nn]) + p = p[nn:] + } + + if len(p) > 0 { + d.offset += copy(d.block[:], p) + } + + return +} + +func (d *digest) Sum(sum []byte) []byte { + var hash [Size]byte + d.finalize(&hash) + return append(sum, hash[:d.size]...) +} + +func (d *digest) finalize(hash *[Size]byte) { + var block [BlockSize]byte + copy(block[:], d.block[:d.offset]) + remaining := uint64(BlockSize - d.offset) + + c := d.c + if c[0] < remaining { + c[1]-- + } + c[0] -= remaining + + h := d.h + hashBlocks(&h, &c, 0xFFFFFFFFFFFFFFFF, block[:]) + + for i, v := range h { + binary.LittleEndian.PutUint64(hash[8*i:], v) + } +} + +func appendUint64(b []byte, x uint64) []byte { + var a [8]byte + binary.BigEndian.PutUint64(a[:], x) + return append(b, a[:]...) +} + +func appendUint32(b []byte, x uint32) []byte { + var a [4]byte + binary.BigEndian.PutUint32(a[:], x) + return append(b, a[:]...) +} + +func consumeUint64(b []byte) ([]byte, uint64) { + x := binary.BigEndian.Uint64(b) + return b[8:], x +} + +func consumeUint32(b []byte) ([]byte, uint32) { + x := binary.BigEndian.Uint32(b) + return b[4:], x +} diff --git a/vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.go b/vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.go new file mode 100644 index 0000000..56bfaaa --- /dev/null +++ b/vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.go @@ -0,0 +1,38 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.7 && amd64 && gc && !purego +// +build go1.7,amd64,gc,!purego + +package blake2b + +import "golang.org/x/sys/cpu" + +func init() { + useAVX2 = cpu.X86.HasAVX2 + useAVX = cpu.X86.HasAVX + useSSE4 = cpu.X86.HasSSE41 +} + +//go:noescape +func hashBlocksAVX2(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) + +//go:noescape +func hashBlocksAVX(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) + +//go:noescape +func hashBlocksSSE4(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) + +func hashBlocks(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) { + switch { + case useAVX2: + hashBlocksAVX2(h, c, flag, blocks) + case useAVX: + hashBlocksAVX(h, c, flag, blocks) + case useSSE4: + hashBlocksSSE4(h, c, flag, blocks) + default: + hashBlocksGeneric(h, c, flag, blocks) + } +} diff --git a/vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.s b/vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.s new file mode 100644 index 0000000..4b9daa1 --- /dev/null +++ b/vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.s @@ -0,0 +1,745 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.7 && amd64 && gc && !purego +// +build go1.7,amd64,gc,!purego + +#include "textflag.h" + +DATA ·AVX2_iv0<>+0x00(SB)/8, $0x6a09e667f3bcc908 +DATA ·AVX2_iv0<>+0x08(SB)/8, $0xbb67ae8584caa73b +DATA ·AVX2_iv0<>+0x10(SB)/8, $0x3c6ef372fe94f82b +DATA ·AVX2_iv0<>+0x18(SB)/8, $0xa54ff53a5f1d36f1 +GLOBL ·AVX2_iv0<>(SB), (NOPTR+RODATA), $32 + +DATA ·AVX2_iv1<>+0x00(SB)/8, $0x510e527fade682d1 +DATA ·AVX2_iv1<>+0x08(SB)/8, $0x9b05688c2b3e6c1f +DATA ·AVX2_iv1<>+0x10(SB)/8, $0x1f83d9abfb41bd6b +DATA ·AVX2_iv1<>+0x18(SB)/8, $0x5be0cd19137e2179 +GLOBL ·AVX2_iv1<>(SB), (NOPTR+RODATA), $32 + +DATA ·AVX2_c40<>+0x00(SB)/8, $0x0201000706050403 +DATA ·AVX2_c40<>+0x08(SB)/8, $0x0a09080f0e0d0c0b +DATA ·AVX2_c40<>+0x10(SB)/8, $0x0201000706050403 +DATA ·AVX2_c40<>+0x18(SB)/8, $0x0a09080f0e0d0c0b +GLOBL ·AVX2_c40<>(SB), (NOPTR+RODATA), $32 + +DATA ·AVX2_c48<>+0x00(SB)/8, $0x0100070605040302 +DATA ·AVX2_c48<>+0x08(SB)/8, $0x09080f0e0d0c0b0a +DATA ·AVX2_c48<>+0x10(SB)/8, $0x0100070605040302 +DATA ·AVX2_c48<>+0x18(SB)/8, $0x09080f0e0d0c0b0a +GLOBL ·AVX2_c48<>(SB), (NOPTR+RODATA), $32 + +DATA ·AVX_iv0<>+0x00(SB)/8, $0x6a09e667f3bcc908 +DATA ·AVX_iv0<>+0x08(SB)/8, $0xbb67ae8584caa73b +GLOBL ·AVX_iv0<>(SB), (NOPTR+RODATA), $16 + +DATA ·AVX_iv1<>+0x00(SB)/8, $0x3c6ef372fe94f82b +DATA ·AVX_iv1<>+0x08(SB)/8, $0xa54ff53a5f1d36f1 +GLOBL ·AVX_iv1<>(SB), (NOPTR+RODATA), $16 + +DATA ·AVX_iv2<>+0x00(SB)/8, $0x510e527fade682d1 +DATA ·AVX_iv2<>+0x08(SB)/8, $0x9b05688c2b3e6c1f +GLOBL ·AVX_iv2<>(SB), (NOPTR+RODATA), $16 + +DATA ·AVX_iv3<>+0x00(SB)/8, $0x1f83d9abfb41bd6b +DATA ·AVX_iv3<>+0x08(SB)/8, $0x5be0cd19137e2179 +GLOBL ·AVX_iv3<>(SB), (NOPTR+RODATA), $16 + +DATA ·AVX_c40<>+0x00(SB)/8, $0x0201000706050403 +DATA ·AVX_c40<>+0x08(SB)/8, $0x0a09080f0e0d0c0b +GLOBL ·AVX_c40<>(SB), (NOPTR+RODATA), $16 + +DATA ·AVX_c48<>+0x00(SB)/8, $0x0100070605040302 +DATA ·AVX_c48<>+0x08(SB)/8, $0x09080f0e0d0c0b0a +GLOBL ·AVX_c48<>(SB), (NOPTR+RODATA), $16 + +#define VPERMQ_0x39_Y1_Y1 BYTE $0xc4; BYTE $0xe3; BYTE $0xfd; BYTE $0x00; BYTE $0xc9; BYTE $0x39 +#define VPERMQ_0x93_Y1_Y1 BYTE $0xc4; BYTE $0xe3; BYTE $0xfd; BYTE $0x00; BYTE $0xc9; BYTE $0x93 +#define VPERMQ_0x4E_Y2_Y2 BYTE $0xc4; BYTE $0xe3; BYTE $0xfd; BYTE $0x00; BYTE $0xd2; BYTE $0x4e +#define VPERMQ_0x93_Y3_Y3 BYTE $0xc4; BYTE $0xe3; BYTE $0xfd; BYTE $0x00; BYTE $0xdb; BYTE $0x93 +#define VPERMQ_0x39_Y3_Y3 BYTE $0xc4; BYTE $0xe3; BYTE $0xfd; BYTE $0x00; BYTE $0xdb; BYTE $0x39 + +#define ROUND_AVX2(m0, m1, m2, m3, t, c40, c48) \ + VPADDQ m0, Y0, Y0; \ + VPADDQ Y1, Y0, Y0; \ + VPXOR Y0, Y3, Y3; \ + VPSHUFD $-79, Y3, Y3; \ + VPADDQ Y3, Y2, Y2; \ + VPXOR Y2, Y1, Y1; \ + VPSHUFB c40, Y1, Y1; \ + VPADDQ m1, Y0, Y0; \ + VPADDQ Y1, Y0, Y0; \ + VPXOR Y0, Y3, Y3; \ + VPSHUFB c48, Y3, Y3; \ + VPADDQ Y3, Y2, Y2; \ + VPXOR Y2, Y1, Y1; \ + VPADDQ Y1, Y1, t; \ + VPSRLQ $63, Y1, Y1; \ + VPXOR t, Y1, Y1; \ + VPERMQ_0x39_Y1_Y1; \ + VPERMQ_0x4E_Y2_Y2; \ + VPERMQ_0x93_Y3_Y3; \ + VPADDQ m2, Y0, Y0; \ + VPADDQ Y1, Y0, Y0; \ + VPXOR Y0, Y3, Y3; \ + VPSHUFD $-79, Y3, Y3; \ + VPADDQ Y3, Y2, Y2; \ + VPXOR Y2, Y1, Y1; \ + VPSHUFB c40, Y1, Y1; \ + VPADDQ m3, Y0, Y0; \ + VPADDQ Y1, Y0, Y0; \ + VPXOR Y0, Y3, Y3; \ + VPSHUFB c48, Y3, Y3; \ + VPADDQ Y3, Y2, Y2; \ + VPXOR Y2, Y1, Y1; \ + VPADDQ Y1, Y1, t; \ + VPSRLQ $63, Y1, Y1; \ + VPXOR t, Y1, Y1; \ + VPERMQ_0x39_Y3_Y3; \ + VPERMQ_0x4E_Y2_Y2; \ + VPERMQ_0x93_Y1_Y1 + +#define VMOVQ_SI_X11_0 BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x1E +#define VMOVQ_SI_X12_0 BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x26 +#define VMOVQ_SI_X13_0 BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x2E +#define VMOVQ_SI_X14_0 BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x36 +#define VMOVQ_SI_X15_0 BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x3E + +#define VMOVQ_SI_X11(n) BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x5E; BYTE $n +#define VMOVQ_SI_X12(n) BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x66; BYTE $n +#define VMOVQ_SI_X13(n) BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x6E; BYTE $n +#define VMOVQ_SI_X14(n) BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x76; BYTE $n +#define VMOVQ_SI_X15(n) BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x7E; BYTE $n + +#define VPINSRQ_1_SI_X11_0 BYTE $0xC4; BYTE $0x63; BYTE $0xA1; BYTE $0x22; BYTE $0x1E; BYTE $0x01 +#define VPINSRQ_1_SI_X12_0 BYTE $0xC4; BYTE $0x63; BYTE $0x99; BYTE $0x22; BYTE $0x26; BYTE $0x01 +#define VPINSRQ_1_SI_X13_0 BYTE $0xC4; BYTE $0x63; BYTE $0x91; BYTE $0x22; BYTE $0x2E; BYTE $0x01 +#define VPINSRQ_1_SI_X14_0 BYTE $0xC4; BYTE $0x63; BYTE $0x89; BYTE $0x22; BYTE $0x36; BYTE $0x01 +#define VPINSRQ_1_SI_X15_0 BYTE $0xC4; BYTE $0x63; BYTE $0x81; BYTE $0x22; BYTE $0x3E; BYTE $0x01 + +#define VPINSRQ_1_SI_X11(n) BYTE $0xC4; BYTE $0x63; BYTE $0xA1; BYTE $0x22; BYTE $0x5E; BYTE $n; BYTE $0x01 +#define VPINSRQ_1_SI_X12(n) BYTE $0xC4; BYTE $0x63; BYTE $0x99; BYTE $0x22; BYTE $0x66; BYTE $n; BYTE $0x01 +#define VPINSRQ_1_SI_X13(n) BYTE $0xC4; BYTE $0x63; BYTE $0x91; BYTE $0x22; BYTE $0x6E; BYTE $n; BYTE $0x01 +#define VPINSRQ_1_SI_X14(n) BYTE $0xC4; BYTE $0x63; BYTE $0x89; BYTE $0x22; BYTE $0x76; BYTE $n; BYTE $0x01 +#define VPINSRQ_1_SI_X15(n) BYTE $0xC4; BYTE $0x63; BYTE $0x81; BYTE $0x22; BYTE $0x7E; BYTE $n; BYTE $0x01 + +#define VMOVQ_R8_X15 BYTE $0xC4; BYTE $0x41; BYTE $0xF9; BYTE $0x6E; BYTE $0xF8 +#define VPINSRQ_1_R9_X15 BYTE $0xC4; BYTE $0x43; BYTE $0x81; BYTE $0x22; BYTE $0xF9; BYTE $0x01 + +// load msg: Y12 = (i0, i1, i2, i3) +// i0, i1, i2, i3 must not be 0 +#define LOAD_MSG_AVX2_Y12(i0, i1, i2, i3) \ + VMOVQ_SI_X12(i0*8); \ + VMOVQ_SI_X11(i2*8); \ + VPINSRQ_1_SI_X12(i1*8); \ + VPINSRQ_1_SI_X11(i3*8); \ + VINSERTI128 $1, X11, Y12, Y12 + +// load msg: Y13 = (i0, i1, i2, i3) +// i0, i1, i2, i3 must not be 0 +#define LOAD_MSG_AVX2_Y13(i0, i1, i2, i3) \ + VMOVQ_SI_X13(i0*8); \ + VMOVQ_SI_X11(i2*8); \ + VPINSRQ_1_SI_X13(i1*8); \ + VPINSRQ_1_SI_X11(i3*8); \ + VINSERTI128 $1, X11, Y13, Y13 + +// load msg: Y14 = (i0, i1, i2, i3) +// i0, i1, i2, i3 must not be 0 +#define LOAD_MSG_AVX2_Y14(i0, i1, i2, i3) \ + VMOVQ_SI_X14(i0*8); \ + VMOVQ_SI_X11(i2*8); \ + VPINSRQ_1_SI_X14(i1*8); \ + VPINSRQ_1_SI_X11(i3*8); \ + VINSERTI128 $1, X11, Y14, Y14 + +// load msg: Y15 = (i0, i1, i2, i3) +// i0, i1, i2, i3 must not be 0 +#define LOAD_MSG_AVX2_Y15(i0, i1, i2, i3) \ + VMOVQ_SI_X15(i0*8); \ + VMOVQ_SI_X11(i2*8); \ + VPINSRQ_1_SI_X15(i1*8); \ + VPINSRQ_1_SI_X11(i3*8); \ + VINSERTI128 $1, X11, Y15, Y15 + +#define LOAD_MSG_AVX2_0_2_4_6_1_3_5_7_8_10_12_14_9_11_13_15() \ + VMOVQ_SI_X12_0; \ + VMOVQ_SI_X11(4*8); \ + VPINSRQ_1_SI_X12(2*8); \ + VPINSRQ_1_SI_X11(6*8); \ + VINSERTI128 $1, X11, Y12, Y12; \ + LOAD_MSG_AVX2_Y13(1, 3, 5, 7); \ + LOAD_MSG_AVX2_Y14(8, 10, 12, 14); \ + LOAD_MSG_AVX2_Y15(9, 11, 13, 15) + +#define LOAD_MSG_AVX2_14_4_9_13_10_8_15_6_1_0_11_5_12_2_7_3() \ + LOAD_MSG_AVX2_Y12(14, 4, 9, 13); \ + LOAD_MSG_AVX2_Y13(10, 8, 15, 6); \ + VMOVQ_SI_X11(11*8); \ + VPSHUFD $0x4E, 0*8(SI), X14; \ + VPINSRQ_1_SI_X11(5*8); \ + VINSERTI128 $1, X11, Y14, Y14; \ + LOAD_MSG_AVX2_Y15(12, 2, 7, 3) + +#define LOAD_MSG_AVX2_11_12_5_15_8_0_2_13_10_3_7_9_14_6_1_4() \ + VMOVQ_SI_X11(5*8); \ + VMOVDQU 11*8(SI), X12; \ + VPINSRQ_1_SI_X11(15*8); \ + VINSERTI128 $1, X11, Y12, Y12; \ + VMOVQ_SI_X13(8*8); \ + VMOVQ_SI_X11(2*8); \ + VPINSRQ_1_SI_X13_0; \ + VPINSRQ_1_SI_X11(13*8); \ + VINSERTI128 $1, X11, Y13, Y13; \ + LOAD_MSG_AVX2_Y14(10, 3, 7, 9); \ + LOAD_MSG_AVX2_Y15(14, 6, 1, 4) + +#define LOAD_MSG_AVX2_7_3_13_11_9_1_12_14_2_5_4_15_6_10_0_8() \ + LOAD_MSG_AVX2_Y12(7, 3, 13, 11); \ + LOAD_MSG_AVX2_Y13(9, 1, 12, 14); \ + LOAD_MSG_AVX2_Y14(2, 5, 4, 15); \ + VMOVQ_SI_X15(6*8); \ + VMOVQ_SI_X11_0; \ + VPINSRQ_1_SI_X15(10*8); \ + VPINSRQ_1_SI_X11(8*8); \ + VINSERTI128 $1, X11, Y15, Y15 + +#define LOAD_MSG_AVX2_9_5_2_10_0_7_4_15_14_11_6_3_1_12_8_13() \ + LOAD_MSG_AVX2_Y12(9, 5, 2, 10); \ + VMOVQ_SI_X13_0; \ + VMOVQ_SI_X11(4*8); \ + VPINSRQ_1_SI_X13(7*8); \ + VPINSRQ_1_SI_X11(15*8); \ + VINSERTI128 $1, X11, Y13, Y13; \ + LOAD_MSG_AVX2_Y14(14, 11, 6, 3); \ + LOAD_MSG_AVX2_Y15(1, 12, 8, 13) + +#define LOAD_MSG_AVX2_2_6_0_8_12_10_11_3_4_7_15_1_13_5_14_9() \ + VMOVQ_SI_X12(2*8); \ + VMOVQ_SI_X11_0; \ + VPINSRQ_1_SI_X12(6*8); \ + VPINSRQ_1_SI_X11(8*8); \ + VINSERTI128 $1, X11, Y12, Y12; \ + LOAD_MSG_AVX2_Y13(12, 10, 11, 3); \ + LOAD_MSG_AVX2_Y14(4, 7, 15, 1); \ + LOAD_MSG_AVX2_Y15(13, 5, 14, 9) + +#define LOAD_MSG_AVX2_12_1_14_4_5_15_13_10_0_6_9_8_7_3_2_11() \ + LOAD_MSG_AVX2_Y12(12, 1, 14, 4); \ + LOAD_MSG_AVX2_Y13(5, 15, 13, 10); \ + VMOVQ_SI_X14_0; \ + VPSHUFD $0x4E, 8*8(SI), X11; \ + VPINSRQ_1_SI_X14(6*8); \ + VINSERTI128 $1, X11, Y14, Y14; \ + LOAD_MSG_AVX2_Y15(7, 3, 2, 11) + +#define LOAD_MSG_AVX2_13_7_12_3_11_14_1_9_5_15_8_2_0_4_6_10() \ + LOAD_MSG_AVX2_Y12(13, 7, 12, 3); \ + LOAD_MSG_AVX2_Y13(11, 14, 1, 9); \ + LOAD_MSG_AVX2_Y14(5, 15, 8, 2); \ + VMOVQ_SI_X15_0; \ + VMOVQ_SI_X11(6*8); \ + VPINSRQ_1_SI_X15(4*8); \ + VPINSRQ_1_SI_X11(10*8); \ + VINSERTI128 $1, X11, Y15, Y15 + +#define LOAD_MSG_AVX2_6_14_11_0_15_9_3_8_12_13_1_10_2_7_4_5() \ + VMOVQ_SI_X12(6*8); \ + VMOVQ_SI_X11(11*8); \ + VPINSRQ_1_SI_X12(14*8); \ + VPINSRQ_1_SI_X11_0; \ + VINSERTI128 $1, X11, Y12, Y12; \ + LOAD_MSG_AVX2_Y13(15, 9, 3, 8); \ + VMOVQ_SI_X11(1*8); \ + VMOVDQU 12*8(SI), X14; \ + VPINSRQ_1_SI_X11(10*8); \ + VINSERTI128 $1, X11, Y14, Y14; \ + VMOVQ_SI_X15(2*8); \ + VMOVDQU 4*8(SI), X11; \ + VPINSRQ_1_SI_X15(7*8); \ + VINSERTI128 $1, X11, Y15, Y15 + +#define LOAD_MSG_AVX2_10_8_7_1_2_4_6_5_15_9_3_13_11_14_12_0() \ + LOAD_MSG_AVX2_Y12(10, 8, 7, 1); \ + VMOVQ_SI_X13(2*8); \ + VPSHUFD $0x4E, 5*8(SI), X11; \ + VPINSRQ_1_SI_X13(4*8); \ + VINSERTI128 $1, X11, Y13, Y13; \ + LOAD_MSG_AVX2_Y14(15, 9, 3, 13); \ + VMOVQ_SI_X15(11*8); \ + VMOVQ_SI_X11(12*8); \ + VPINSRQ_1_SI_X15(14*8); \ + VPINSRQ_1_SI_X11_0; \ + VINSERTI128 $1, X11, Y15, Y15 + +// func hashBlocksAVX2(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) +TEXT ·hashBlocksAVX2(SB), 4, $320-48 // frame size = 288 + 32 byte alignment + MOVQ h+0(FP), AX + MOVQ c+8(FP), BX + MOVQ flag+16(FP), CX + MOVQ blocks_base+24(FP), SI + MOVQ blocks_len+32(FP), DI + + MOVQ SP, DX + ADDQ $31, DX + ANDQ $~31, DX + + MOVQ CX, 16(DX) + XORQ CX, CX + MOVQ CX, 24(DX) + + VMOVDQU ·AVX2_c40<>(SB), Y4 + VMOVDQU ·AVX2_c48<>(SB), Y5 + + VMOVDQU 0(AX), Y8 + VMOVDQU 32(AX), Y9 + VMOVDQU ·AVX2_iv0<>(SB), Y6 + VMOVDQU ·AVX2_iv1<>(SB), Y7 + + MOVQ 0(BX), R8 + MOVQ 8(BX), R9 + MOVQ R9, 8(DX) + +loop: + ADDQ $128, R8 + MOVQ R8, 0(DX) + CMPQ R8, $128 + JGE noinc + INCQ R9 + MOVQ R9, 8(DX) + +noinc: + VMOVDQA Y8, Y0 + VMOVDQA Y9, Y1 + VMOVDQA Y6, Y2 + VPXOR 0(DX), Y7, Y3 + + LOAD_MSG_AVX2_0_2_4_6_1_3_5_7_8_10_12_14_9_11_13_15() + VMOVDQA Y12, 32(DX) + VMOVDQA Y13, 64(DX) + VMOVDQA Y14, 96(DX) + VMOVDQA Y15, 128(DX) + ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5) + LOAD_MSG_AVX2_14_4_9_13_10_8_15_6_1_0_11_5_12_2_7_3() + VMOVDQA Y12, 160(DX) + VMOVDQA Y13, 192(DX) + VMOVDQA Y14, 224(DX) + VMOVDQA Y15, 256(DX) + + ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5) + LOAD_MSG_AVX2_11_12_5_15_8_0_2_13_10_3_7_9_14_6_1_4() + ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5) + LOAD_MSG_AVX2_7_3_13_11_9_1_12_14_2_5_4_15_6_10_0_8() + ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5) + LOAD_MSG_AVX2_9_5_2_10_0_7_4_15_14_11_6_3_1_12_8_13() + ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5) + LOAD_MSG_AVX2_2_6_0_8_12_10_11_3_4_7_15_1_13_5_14_9() + ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5) + LOAD_MSG_AVX2_12_1_14_4_5_15_13_10_0_6_9_8_7_3_2_11() + ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5) + LOAD_MSG_AVX2_13_7_12_3_11_14_1_9_5_15_8_2_0_4_6_10() + ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5) + LOAD_MSG_AVX2_6_14_11_0_15_9_3_8_12_13_1_10_2_7_4_5() + ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5) + LOAD_MSG_AVX2_10_8_7_1_2_4_6_5_15_9_3_13_11_14_12_0() + ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5) + + ROUND_AVX2(32(DX), 64(DX), 96(DX), 128(DX), Y10, Y4, Y5) + ROUND_AVX2(160(DX), 192(DX), 224(DX), 256(DX), Y10, Y4, Y5) + + VPXOR Y0, Y8, Y8 + VPXOR Y1, Y9, Y9 + VPXOR Y2, Y8, Y8 + VPXOR Y3, Y9, Y9 + + LEAQ 128(SI), SI + SUBQ $128, DI + JNE loop + + MOVQ R8, 0(BX) + MOVQ R9, 8(BX) + + VMOVDQU Y8, 0(AX) + VMOVDQU Y9, 32(AX) + VZEROUPPER + + RET + +#define VPUNPCKLQDQ_X2_X2_X15 BYTE $0xC5; BYTE $0x69; BYTE $0x6C; BYTE $0xFA +#define VPUNPCKLQDQ_X3_X3_X15 BYTE $0xC5; BYTE $0x61; BYTE $0x6C; BYTE $0xFB +#define VPUNPCKLQDQ_X7_X7_X15 BYTE $0xC5; BYTE $0x41; BYTE $0x6C; BYTE $0xFF +#define VPUNPCKLQDQ_X13_X13_X15 BYTE $0xC4; BYTE $0x41; BYTE $0x11; BYTE $0x6C; BYTE $0xFD +#define VPUNPCKLQDQ_X14_X14_X15 BYTE $0xC4; BYTE $0x41; BYTE $0x09; BYTE $0x6C; BYTE $0xFE + +#define VPUNPCKHQDQ_X15_X2_X2 BYTE $0xC4; BYTE $0xC1; BYTE $0x69; BYTE $0x6D; BYTE $0xD7 +#define VPUNPCKHQDQ_X15_X3_X3 BYTE $0xC4; BYTE $0xC1; BYTE $0x61; BYTE $0x6D; BYTE $0xDF +#define VPUNPCKHQDQ_X15_X6_X6 BYTE $0xC4; BYTE $0xC1; BYTE $0x49; BYTE $0x6D; BYTE $0xF7 +#define VPUNPCKHQDQ_X15_X7_X7 BYTE $0xC4; BYTE $0xC1; BYTE $0x41; BYTE $0x6D; BYTE $0xFF +#define VPUNPCKHQDQ_X15_X3_X2 BYTE $0xC4; BYTE $0xC1; BYTE $0x61; BYTE $0x6D; BYTE $0xD7 +#define VPUNPCKHQDQ_X15_X7_X6 BYTE $0xC4; BYTE $0xC1; BYTE $0x41; BYTE $0x6D; BYTE $0xF7 +#define VPUNPCKHQDQ_X15_X13_X3 BYTE $0xC4; BYTE $0xC1; BYTE $0x11; BYTE $0x6D; BYTE $0xDF +#define VPUNPCKHQDQ_X15_X13_X7 BYTE $0xC4; BYTE $0xC1; BYTE $0x11; BYTE $0x6D; BYTE $0xFF + +#define SHUFFLE_AVX() \ + VMOVDQA X6, X13; \ + VMOVDQA X2, X14; \ + VMOVDQA X4, X6; \ + VPUNPCKLQDQ_X13_X13_X15; \ + VMOVDQA X5, X4; \ + VMOVDQA X6, X5; \ + VPUNPCKHQDQ_X15_X7_X6; \ + VPUNPCKLQDQ_X7_X7_X15; \ + VPUNPCKHQDQ_X15_X13_X7; \ + VPUNPCKLQDQ_X3_X3_X15; \ + VPUNPCKHQDQ_X15_X2_X2; \ + VPUNPCKLQDQ_X14_X14_X15; \ + VPUNPCKHQDQ_X15_X3_X3; \ + +#define SHUFFLE_AVX_INV() \ + VMOVDQA X2, X13; \ + VMOVDQA X4, X14; \ + VPUNPCKLQDQ_X2_X2_X15; \ + VMOVDQA X5, X4; \ + VPUNPCKHQDQ_X15_X3_X2; \ + VMOVDQA X14, X5; \ + VPUNPCKLQDQ_X3_X3_X15; \ + VMOVDQA X6, X14; \ + VPUNPCKHQDQ_X15_X13_X3; \ + VPUNPCKLQDQ_X7_X7_X15; \ + VPUNPCKHQDQ_X15_X6_X6; \ + VPUNPCKLQDQ_X14_X14_X15; \ + VPUNPCKHQDQ_X15_X7_X7; \ + +#define HALF_ROUND_AVX(v0, v1, v2, v3, v4, v5, v6, v7, m0, m1, m2, m3, t0, c40, c48) \ + VPADDQ m0, v0, v0; \ + VPADDQ v2, v0, v0; \ + VPADDQ m1, v1, v1; \ + VPADDQ v3, v1, v1; \ + VPXOR v0, v6, v6; \ + VPXOR v1, v7, v7; \ + VPSHUFD $-79, v6, v6; \ + VPSHUFD $-79, v7, v7; \ + VPADDQ v6, v4, v4; \ + VPADDQ v7, v5, v5; \ + VPXOR v4, v2, v2; \ + VPXOR v5, v3, v3; \ + VPSHUFB c40, v2, v2; \ + VPSHUFB c40, v3, v3; \ + VPADDQ m2, v0, v0; \ + VPADDQ v2, v0, v0; \ + VPADDQ m3, v1, v1; \ + VPADDQ v3, v1, v1; \ + VPXOR v0, v6, v6; \ + VPXOR v1, v7, v7; \ + VPSHUFB c48, v6, v6; \ + VPSHUFB c48, v7, v7; \ + VPADDQ v6, v4, v4; \ + VPADDQ v7, v5, v5; \ + VPXOR v4, v2, v2; \ + VPXOR v5, v3, v3; \ + VPADDQ v2, v2, t0; \ + VPSRLQ $63, v2, v2; \ + VPXOR t0, v2, v2; \ + VPADDQ v3, v3, t0; \ + VPSRLQ $63, v3, v3; \ + VPXOR t0, v3, v3 + +// load msg: X12 = (i0, i1), X13 = (i2, i3), X14 = (i4, i5), X15 = (i6, i7) +// i0, i1, i2, i3, i4, i5, i6, i7 must not be 0 +#define LOAD_MSG_AVX(i0, i1, i2, i3, i4, i5, i6, i7) \ + VMOVQ_SI_X12(i0*8); \ + VMOVQ_SI_X13(i2*8); \ + VMOVQ_SI_X14(i4*8); \ + VMOVQ_SI_X15(i6*8); \ + VPINSRQ_1_SI_X12(i1*8); \ + VPINSRQ_1_SI_X13(i3*8); \ + VPINSRQ_1_SI_X14(i5*8); \ + VPINSRQ_1_SI_X15(i7*8) + +// load msg: X12 = (0, 2), X13 = (4, 6), X14 = (1, 3), X15 = (5, 7) +#define LOAD_MSG_AVX_0_2_4_6_1_3_5_7() \ + VMOVQ_SI_X12_0; \ + VMOVQ_SI_X13(4*8); \ + VMOVQ_SI_X14(1*8); \ + VMOVQ_SI_X15(5*8); \ + VPINSRQ_1_SI_X12(2*8); \ + VPINSRQ_1_SI_X13(6*8); \ + VPINSRQ_1_SI_X14(3*8); \ + VPINSRQ_1_SI_X15(7*8) + +// load msg: X12 = (1, 0), X13 = (11, 5), X14 = (12, 2), X15 = (7, 3) +#define LOAD_MSG_AVX_1_0_11_5_12_2_7_3() \ + VPSHUFD $0x4E, 0*8(SI), X12; \ + VMOVQ_SI_X13(11*8); \ + VMOVQ_SI_X14(12*8); \ + VMOVQ_SI_X15(7*8); \ + VPINSRQ_1_SI_X13(5*8); \ + VPINSRQ_1_SI_X14(2*8); \ + VPINSRQ_1_SI_X15(3*8) + +// load msg: X12 = (11, 12), X13 = (5, 15), X14 = (8, 0), X15 = (2, 13) +#define LOAD_MSG_AVX_11_12_5_15_8_0_2_13() \ + VMOVDQU 11*8(SI), X12; \ + VMOVQ_SI_X13(5*8); \ + VMOVQ_SI_X14(8*8); \ + VMOVQ_SI_X15(2*8); \ + VPINSRQ_1_SI_X13(15*8); \ + VPINSRQ_1_SI_X14_0; \ + VPINSRQ_1_SI_X15(13*8) + +// load msg: X12 = (2, 5), X13 = (4, 15), X14 = (6, 10), X15 = (0, 8) +#define LOAD_MSG_AVX_2_5_4_15_6_10_0_8() \ + VMOVQ_SI_X12(2*8); \ + VMOVQ_SI_X13(4*8); \ + VMOVQ_SI_X14(6*8); \ + VMOVQ_SI_X15_0; \ + VPINSRQ_1_SI_X12(5*8); \ + VPINSRQ_1_SI_X13(15*8); \ + VPINSRQ_1_SI_X14(10*8); \ + VPINSRQ_1_SI_X15(8*8) + +// load msg: X12 = (9, 5), X13 = (2, 10), X14 = (0, 7), X15 = (4, 15) +#define LOAD_MSG_AVX_9_5_2_10_0_7_4_15() \ + VMOVQ_SI_X12(9*8); \ + VMOVQ_SI_X13(2*8); \ + VMOVQ_SI_X14_0; \ + VMOVQ_SI_X15(4*8); \ + VPINSRQ_1_SI_X12(5*8); \ + VPINSRQ_1_SI_X13(10*8); \ + VPINSRQ_1_SI_X14(7*8); \ + VPINSRQ_1_SI_X15(15*8) + +// load msg: X12 = (2, 6), X13 = (0, 8), X14 = (12, 10), X15 = (11, 3) +#define LOAD_MSG_AVX_2_6_0_8_12_10_11_3() \ + VMOVQ_SI_X12(2*8); \ + VMOVQ_SI_X13_0; \ + VMOVQ_SI_X14(12*8); \ + VMOVQ_SI_X15(11*8); \ + VPINSRQ_1_SI_X12(6*8); \ + VPINSRQ_1_SI_X13(8*8); \ + VPINSRQ_1_SI_X14(10*8); \ + VPINSRQ_1_SI_X15(3*8) + +// load msg: X12 = (0, 6), X13 = (9, 8), X14 = (7, 3), X15 = (2, 11) +#define LOAD_MSG_AVX_0_6_9_8_7_3_2_11() \ + MOVQ 0*8(SI), X12; \ + VPSHUFD $0x4E, 8*8(SI), X13; \ + MOVQ 7*8(SI), X14; \ + MOVQ 2*8(SI), X15; \ + VPINSRQ_1_SI_X12(6*8); \ + VPINSRQ_1_SI_X14(3*8); \ + VPINSRQ_1_SI_X15(11*8) + +// load msg: X12 = (6, 14), X13 = (11, 0), X14 = (15, 9), X15 = (3, 8) +#define LOAD_MSG_AVX_6_14_11_0_15_9_3_8() \ + MOVQ 6*8(SI), X12; \ + MOVQ 11*8(SI), X13; \ + MOVQ 15*8(SI), X14; \ + MOVQ 3*8(SI), X15; \ + VPINSRQ_1_SI_X12(14*8); \ + VPINSRQ_1_SI_X13_0; \ + VPINSRQ_1_SI_X14(9*8); \ + VPINSRQ_1_SI_X15(8*8) + +// load msg: X12 = (5, 15), X13 = (8, 2), X14 = (0, 4), X15 = (6, 10) +#define LOAD_MSG_AVX_5_15_8_2_0_4_6_10() \ + MOVQ 5*8(SI), X12; \ + MOVQ 8*8(SI), X13; \ + MOVQ 0*8(SI), X14; \ + MOVQ 6*8(SI), X15; \ + VPINSRQ_1_SI_X12(15*8); \ + VPINSRQ_1_SI_X13(2*8); \ + VPINSRQ_1_SI_X14(4*8); \ + VPINSRQ_1_SI_X15(10*8) + +// load msg: X12 = (12, 13), X13 = (1, 10), X14 = (2, 7), X15 = (4, 5) +#define LOAD_MSG_AVX_12_13_1_10_2_7_4_5() \ + VMOVDQU 12*8(SI), X12; \ + MOVQ 1*8(SI), X13; \ + MOVQ 2*8(SI), X14; \ + VPINSRQ_1_SI_X13(10*8); \ + VPINSRQ_1_SI_X14(7*8); \ + VMOVDQU 4*8(SI), X15 + +// load msg: X12 = (15, 9), X13 = (3, 13), X14 = (11, 14), X15 = (12, 0) +#define LOAD_MSG_AVX_15_9_3_13_11_14_12_0() \ + MOVQ 15*8(SI), X12; \ + MOVQ 3*8(SI), X13; \ + MOVQ 11*8(SI), X14; \ + MOVQ 12*8(SI), X15; \ + VPINSRQ_1_SI_X12(9*8); \ + VPINSRQ_1_SI_X13(13*8); \ + VPINSRQ_1_SI_X14(14*8); \ + VPINSRQ_1_SI_X15_0 + +// func hashBlocksAVX(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) +TEXT ·hashBlocksAVX(SB), 4, $288-48 // frame size = 272 + 16 byte alignment + MOVQ h+0(FP), AX + MOVQ c+8(FP), BX + MOVQ flag+16(FP), CX + MOVQ blocks_base+24(FP), SI + MOVQ blocks_len+32(FP), DI + + MOVQ SP, R10 + ADDQ $15, R10 + ANDQ $~15, R10 + + VMOVDQU ·AVX_c40<>(SB), X0 + VMOVDQU ·AVX_c48<>(SB), X1 + VMOVDQA X0, X8 + VMOVDQA X1, X9 + + VMOVDQU ·AVX_iv3<>(SB), X0 + VMOVDQA X0, 0(R10) + XORQ CX, 0(R10) // 0(R10) = ·AVX_iv3 ^ (CX || 0) + + VMOVDQU 0(AX), X10 + VMOVDQU 16(AX), X11 + VMOVDQU 32(AX), X2 + VMOVDQU 48(AX), X3 + + MOVQ 0(BX), R8 + MOVQ 8(BX), R9 + +loop: + ADDQ $128, R8 + CMPQ R8, $128 + JGE noinc + INCQ R9 + +noinc: + VMOVQ_R8_X15 + VPINSRQ_1_R9_X15 + + VMOVDQA X10, X0 + VMOVDQA X11, X1 + VMOVDQU ·AVX_iv0<>(SB), X4 + VMOVDQU ·AVX_iv1<>(SB), X5 + VMOVDQU ·AVX_iv2<>(SB), X6 + + VPXOR X15, X6, X6 + VMOVDQA 0(R10), X7 + + LOAD_MSG_AVX_0_2_4_6_1_3_5_7() + VMOVDQA X12, 16(R10) + VMOVDQA X13, 32(R10) + VMOVDQA X14, 48(R10) + VMOVDQA X15, 64(R10) + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX() + LOAD_MSG_AVX(8, 10, 12, 14, 9, 11, 13, 15) + VMOVDQA X12, 80(R10) + VMOVDQA X13, 96(R10) + VMOVDQA X14, 112(R10) + VMOVDQA X15, 128(R10) + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX_INV() + + LOAD_MSG_AVX(14, 4, 9, 13, 10, 8, 15, 6) + VMOVDQA X12, 144(R10) + VMOVDQA X13, 160(R10) + VMOVDQA X14, 176(R10) + VMOVDQA X15, 192(R10) + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX() + LOAD_MSG_AVX_1_0_11_5_12_2_7_3() + VMOVDQA X12, 208(R10) + VMOVDQA X13, 224(R10) + VMOVDQA X14, 240(R10) + VMOVDQA X15, 256(R10) + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX_INV() + + LOAD_MSG_AVX_11_12_5_15_8_0_2_13() + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX() + LOAD_MSG_AVX(10, 3, 7, 9, 14, 6, 1, 4) + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX_INV() + + LOAD_MSG_AVX(7, 3, 13, 11, 9, 1, 12, 14) + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX() + LOAD_MSG_AVX_2_5_4_15_6_10_0_8() + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX_INV() + + LOAD_MSG_AVX_9_5_2_10_0_7_4_15() + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX() + LOAD_MSG_AVX(14, 11, 6, 3, 1, 12, 8, 13) + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX_INV() + + LOAD_MSG_AVX_2_6_0_8_12_10_11_3() + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX() + LOAD_MSG_AVX(4, 7, 15, 1, 13, 5, 14, 9) + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX_INV() + + LOAD_MSG_AVX(12, 1, 14, 4, 5, 15, 13, 10) + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX() + LOAD_MSG_AVX_0_6_9_8_7_3_2_11() + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX_INV() + + LOAD_MSG_AVX(13, 7, 12, 3, 11, 14, 1, 9) + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX() + LOAD_MSG_AVX_5_15_8_2_0_4_6_10() + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX_INV() + + LOAD_MSG_AVX_6_14_11_0_15_9_3_8() + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX() + LOAD_MSG_AVX_12_13_1_10_2_7_4_5() + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX_INV() + + LOAD_MSG_AVX(10, 8, 7, 1, 2, 4, 6, 5) + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX() + LOAD_MSG_AVX_15_9_3_13_11_14_12_0() + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX_INV() + + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, 16(R10), 32(R10), 48(R10), 64(R10), X15, X8, X9) + SHUFFLE_AVX() + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, 80(R10), 96(R10), 112(R10), 128(R10), X15, X8, X9) + SHUFFLE_AVX_INV() + + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, 144(R10), 160(R10), 176(R10), 192(R10), X15, X8, X9) + SHUFFLE_AVX() + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, 208(R10), 224(R10), 240(R10), 256(R10), X15, X8, X9) + SHUFFLE_AVX_INV() + + VMOVDQU 32(AX), X14 + VMOVDQU 48(AX), X15 + VPXOR X0, X10, X10 + VPXOR X1, X11, X11 + VPXOR X2, X14, X14 + VPXOR X3, X15, X15 + VPXOR X4, X10, X10 + VPXOR X5, X11, X11 + VPXOR X6, X14, X2 + VPXOR X7, X15, X3 + VMOVDQU X2, 32(AX) + VMOVDQU X3, 48(AX) + + LEAQ 128(SI), SI + SUBQ $128, DI + JNE loop + + VMOVDQU X10, 0(AX) + VMOVDQU X11, 16(AX) + + MOVQ R8, 0(BX) + MOVQ R9, 8(BX) + VZEROUPPER + + RET diff --git a/vendor/golang.org/x/crypto/blake2b/blake2b_amd64.go b/vendor/golang.org/x/crypto/blake2b/blake2b_amd64.go new file mode 100644 index 0000000..5fa1b32 --- /dev/null +++ b/vendor/golang.org/x/crypto/blake2b/blake2b_amd64.go @@ -0,0 +1,25 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !go1.7 && amd64 && gc && !purego +// +build !go1.7,amd64,gc,!purego + +package blake2b + +import "golang.org/x/sys/cpu" + +func init() { + useSSE4 = cpu.X86.HasSSE41 +} + +//go:noescape +func hashBlocksSSE4(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) + +func hashBlocks(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) { + if useSSE4 { + hashBlocksSSE4(h, c, flag, blocks) + } else { + hashBlocksGeneric(h, c, flag, blocks) + } +} diff --git a/vendor/golang.org/x/crypto/blake2b/blake2b_amd64.s b/vendor/golang.org/x/crypto/blake2b/blake2b_amd64.s new file mode 100644 index 0000000..ae75eb9 --- /dev/null +++ b/vendor/golang.org/x/crypto/blake2b/blake2b_amd64.s @@ -0,0 +1,279 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build amd64 && gc && !purego +// +build amd64,gc,!purego + +#include "textflag.h" + +DATA ·iv0<>+0x00(SB)/8, $0x6a09e667f3bcc908 +DATA ·iv0<>+0x08(SB)/8, $0xbb67ae8584caa73b +GLOBL ·iv0<>(SB), (NOPTR+RODATA), $16 + +DATA ·iv1<>+0x00(SB)/8, $0x3c6ef372fe94f82b +DATA ·iv1<>+0x08(SB)/8, $0xa54ff53a5f1d36f1 +GLOBL ·iv1<>(SB), (NOPTR+RODATA), $16 + +DATA ·iv2<>+0x00(SB)/8, $0x510e527fade682d1 +DATA ·iv2<>+0x08(SB)/8, $0x9b05688c2b3e6c1f +GLOBL ·iv2<>(SB), (NOPTR+RODATA), $16 + +DATA ·iv3<>+0x00(SB)/8, $0x1f83d9abfb41bd6b +DATA ·iv3<>+0x08(SB)/8, $0x5be0cd19137e2179 +GLOBL ·iv3<>(SB), (NOPTR+RODATA), $16 + +DATA ·c40<>+0x00(SB)/8, $0x0201000706050403 +DATA ·c40<>+0x08(SB)/8, $0x0a09080f0e0d0c0b +GLOBL ·c40<>(SB), (NOPTR+RODATA), $16 + +DATA ·c48<>+0x00(SB)/8, $0x0100070605040302 +DATA ·c48<>+0x08(SB)/8, $0x09080f0e0d0c0b0a +GLOBL ·c48<>(SB), (NOPTR+RODATA), $16 + +#define SHUFFLE(v2, v3, v4, v5, v6, v7, t1, t2) \ + MOVO v4, t1; \ + MOVO v5, v4; \ + MOVO t1, v5; \ + MOVO v6, t1; \ + PUNPCKLQDQ v6, t2; \ + PUNPCKHQDQ v7, v6; \ + PUNPCKHQDQ t2, v6; \ + PUNPCKLQDQ v7, t2; \ + MOVO t1, v7; \ + MOVO v2, t1; \ + PUNPCKHQDQ t2, v7; \ + PUNPCKLQDQ v3, t2; \ + PUNPCKHQDQ t2, v2; \ + PUNPCKLQDQ t1, t2; \ + PUNPCKHQDQ t2, v3 + +#define SHUFFLE_INV(v2, v3, v4, v5, v6, v7, t1, t2) \ + MOVO v4, t1; \ + MOVO v5, v4; \ + MOVO t1, v5; \ + MOVO v2, t1; \ + PUNPCKLQDQ v2, t2; \ + PUNPCKHQDQ v3, v2; \ + PUNPCKHQDQ t2, v2; \ + PUNPCKLQDQ v3, t2; \ + MOVO t1, v3; \ + MOVO v6, t1; \ + PUNPCKHQDQ t2, v3; \ + PUNPCKLQDQ v7, t2; \ + PUNPCKHQDQ t2, v6; \ + PUNPCKLQDQ t1, t2; \ + PUNPCKHQDQ t2, v7 + +#define HALF_ROUND(v0, v1, v2, v3, v4, v5, v6, v7, m0, m1, m2, m3, t0, c40, c48) \ + PADDQ m0, v0; \ + PADDQ m1, v1; \ + PADDQ v2, v0; \ + PADDQ v3, v1; \ + PXOR v0, v6; \ + PXOR v1, v7; \ + PSHUFD $0xB1, v6, v6; \ + PSHUFD $0xB1, v7, v7; \ + PADDQ v6, v4; \ + PADDQ v7, v5; \ + PXOR v4, v2; \ + PXOR v5, v3; \ + PSHUFB c40, v2; \ + PSHUFB c40, v3; \ + PADDQ m2, v0; \ + PADDQ m3, v1; \ + PADDQ v2, v0; \ + PADDQ v3, v1; \ + PXOR v0, v6; \ + PXOR v1, v7; \ + PSHUFB c48, v6; \ + PSHUFB c48, v7; \ + PADDQ v6, v4; \ + PADDQ v7, v5; \ + PXOR v4, v2; \ + PXOR v5, v3; \ + MOVOU v2, t0; \ + PADDQ v2, t0; \ + PSRLQ $63, v2; \ + PXOR t0, v2; \ + MOVOU v3, t0; \ + PADDQ v3, t0; \ + PSRLQ $63, v3; \ + PXOR t0, v3 + +#define LOAD_MSG(m0, m1, m2, m3, src, i0, i1, i2, i3, i4, i5, i6, i7) \ + MOVQ i0*8(src), m0; \ + PINSRQ $1, i1*8(src), m0; \ + MOVQ i2*8(src), m1; \ + PINSRQ $1, i3*8(src), m1; \ + MOVQ i4*8(src), m2; \ + PINSRQ $1, i5*8(src), m2; \ + MOVQ i6*8(src), m3; \ + PINSRQ $1, i7*8(src), m3 + +// func hashBlocksSSE4(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) +TEXT ·hashBlocksSSE4(SB), 4, $288-48 // frame size = 272 + 16 byte alignment + MOVQ h+0(FP), AX + MOVQ c+8(FP), BX + MOVQ flag+16(FP), CX + MOVQ blocks_base+24(FP), SI + MOVQ blocks_len+32(FP), DI + + MOVQ SP, R10 + ADDQ $15, R10 + ANDQ $~15, R10 + + MOVOU ·iv3<>(SB), X0 + MOVO X0, 0(R10) + XORQ CX, 0(R10) // 0(R10) = ·iv3 ^ (CX || 0) + + MOVOU ·c40<>(SB), X13 + MOVOU ·c48<>(SB), X14 + + MOVOU 0(AX), X12 + MOVOU 16(AX), X15 + + MOVQ 0(BX), R8 + MOVQ 8(BX), R9 + +loop: + ADDQ $128, R8 + CMPQ R8, $128 + JGE noinc + INCQ R9 + +noinc: + MOVQ R8, X8 + PINSRQ $1, R9, X8 + + MOVO X12, X0 + MOVO X15, X1 + MOVOU 32(AX), X2 + MOVOU 48(AX), X3 + MOVOU ·iv0<>(SB), X4 + MOVOU ·iv1<>(SB), X5 + MOVOU ·iv2<>(SB), X6 + + PXOR X8, X6 + MOVO 0(R10), X7 + + LOAD_MSG(X8, X9, X10, X11, SI, 0, 2, 4, 6, 1, 3, 5, 7) + MOVO X8, 16(R10) + MOVO X9, 32(R10) + MOVO X10, 48(R10) + MOVO X11, 64(R10) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) + LOAD_MSG(X8, X9, X10, X11, SI, 8, 10, 12, 14, 9, 11, 13, 15) + MOVO X8, 80(R10) + MOVO X9, 96(R10) + MOVO X10, 112(R10) + MOVO X11, 128(R10) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) + + LOAD_MSG(X8, X9, X10, X11, SI, 14, 4, 9, 13, 10, 8, 15, 6) + MOVO X8, 144(R10) + MOVO X9, 160(R10) + MOVO X10, 176(R10) + MOVO X11, 192(R10) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) + LOAD_MSG(X8, X9, X10, X11, SI, 1, 0, 11, 5, 12, 2, 7, 3) + MOVO X8, 208(R10) + MOVO X9, 224(R10) + MOVO X10, 240(R10) + MOVO X11, 256(R10) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) + + LOAD_MSG(X8, X9, X10, X11, SI, 11, 12, 5, 15, 8, 0, 2, 13) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) + LOAD_MSG(X8, X9, X10, X11, SI, 10, 3, 7, 9, 14, 6, 1, 4) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) + + LOAD_MSG(X8, X9, X10, X11, SI, 7, 3, 13, 11, 9, 1, 12, 14) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) + LOAD_MSG(X8, X9, X10, X11, SI, 2, 5, 4, 15, 6, 10, 0, 8) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) + + LOAD_MSG(X8, X9, X10, X11, SI, 9, 5, 2, 10, 0, 7, 4, 15) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) + LOAD_MSG(X8, X9, X10, X11, SI, 14, 11, 6, 3, 1, 12, 8, 13) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) + + LOAD_MSG(X8, X9, X10, X11, SI, 2, 6, 0, 8, 12, 10, 11, 3) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) + LOAD_MSG(X8, X9, X10, X11, SI, 4, 7, 15, 1, 13, 5, 14, 9) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) + + LOAD_MSG(X8, X9, X10, X11, SI, 12, 1, 14, 4, 5, 15, 13, 10) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) + LOAD_MSG(X8, X9, X10, X11, SI, 0, 6, 9, 8, 7, 3, 2, 11) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) + + LOAD_MSG(X8, X9, X10, X11, SI, 13, 7, 12, 3, 11, 14, 1, 9) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) + LOAD_MSG(X8, X9, X10, X11, SI, 5, 15, 8, 2, 0, 4, 6, 10) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) + + LOAD_MSG(X8, X9, X10, X11, SI, 6, 14, 11, 0, 15, 9, 3, 8) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) + LOAD_MSG(X8, X9, X10, X11, SI, 12, 13, 1, 10, 2, 7, 4, 5) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) + + LOAD_MSG(X8, X9, X10, X11, SI, 10, 8, 7, 1, 2, 4, 6, 5) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) + LOAD_MSG(X8, X9, X10, X11, SI, 15, 9, 3, 13, 11, 14, 12, 0) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) + + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, 16(R10), 32(R10), 48(R10), 64(R10), X11, X13, X14) + SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, 80(R10), 96(R10), 112(R10), 128(R10), X11, X13, X14) + SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) + + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, 144(R10), 160(R10), 176(R10), 192(R10), X11, X13, X14) + SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, 208(R10), 224(R10), 240(R10), 256(R10), X11, X13, X14) + SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) + + MOVOU 32(AX), X10 + MOVOU 48(AX), X11 + PXOR X0, X12 + PXOR X1, X15 + PXOR X2, X10 + PXOR X3, X11 + PXOR X4, X12 + PXOR X5, X15 + PXOR X6, X10 + PXOR X7, X11 + MOVOU X10, 32(AX) + MOVOU X11, 48(AX) + + LEAQ 128(SI), SI + SUBQ $128, DI + JNE loop + + MOVOU X12, 0(AX) + MOVOU X15, 16(AX) + + MOVQ R8, 0(BX) + MOVQ R9, 8(BX) + + RET diff --git a/vendor/golang.org/x/crypto/blake2b/blake2b_generic.go b/vendor/golang.org/x/crypto/blake2b/blake2b_generic.go new file mode 100644 index 0000000..3168a8a --- /dev/null +++ b/vendor/golang.org/x/crypto/blake2b/blake2b_generic.go @@ -0,0 +1,182 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package blake2b + +import ( + "encoding/binary" + "math/bits" +) + +// the precomputed values for BLAKE2b +// there are 12 16-byte arrays - one for each round +// the entries are calculated from the sigma constants. +var precomputed = [12][16]byte{ + {0, 2, 4, 6, 1, 3, 5, 7, 8, 10, 12, 14, 9, 11, 13, 15}, + {14, 4, 9, 13, 10, 8, 15, 6, 1, 0, 11, 5, 12, 2, 7, 3}, + {11, 12, 5, 15, 8, 0, 2, 13, 10, 3, 7, 9, 14, 6, 1, 4}, + {7, 3, 13, 11, 9, 1, 12, 14, 2, 5, 4, 15, 6, 10, 0, 8}, + {9, 5, 2, 10, 0, 7, 4, 15, 14, 11, 6, 3, 1, 12, 8, 13}, + {2, 6, 0, 8, 12, 10, 11, 3, 4, 7, 15, 1, 13, 5, 14, 9}, + {12, 1, 14, 4, 5, 15, 13, 10, 0, 6, 9, 8, 7, 3, 2, 11}, + {13, 7, 12, 3, 11, 14, 1, 9, 5, 15, 8, 2, 0, 4, 6, 10}, + {6, 14, 11, 0, 15, 9, 3, 8, 12, 13, 1, 10, 2, 7, 4, 5}, + {10, 8, 7, 1, 2, 4, 6, 5, 15, 9, 3, 13, 11, 14, 12, 0}, + {0, 2, 4, 6, 1, 3, 5, 7, 8, 10, 12, 14, 9, 11, 13, 15}, // equal to the first + {14, 4, 9, 13, 10, 8, 15, 6, 1, 0, 11, 5, 12, 2, 7, 3}, // equal to the second +} + +func hashBlocksGeneric(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) { + var m [16]uint64 + c0, c1 := c[0], c[1] + + for i := 0; i < len(blocks); { + c0 += BlockSize + if c0 < BlockSize { + c1++ + } + + v0, v1, v2, v3, v4, v5, v6, v7 := h[0], h[1], h[2], h[3], h[4], h[5], h[6], h[7] + v8, v9, v10, v11, v12, v13, v14, v15 := iv[0], iv[1], iv[2], iv[3], iv[4], iv[5], iv[6], iv[7] + v12 ^= c0 + v13 ^= c1 + v14 ^= flag + + for j := range m { + m[j] = binary.LittleEndian.Uint64(blocks[i:]) + i += 8 + } + + for j := range precomputed { + s := &(precomputed[j]) + + v0 += m[s[0]] + v0 += v4 + v12 ^= v0 + v12 = bits.RotateLeft64(v12, -32) + v8 += v12 + v4 ^= v8 + v4 = bits.RotateLeft64(v4, -24) + v1 += m[s[1]] + v1 += v5 + v13 ^= v1 + v13 = bits.RotateLeft64(v13, -32) + v9 += v13 + v5 ^= v9 + v5 = bits.RotateLeft64(v5, -24) + v2 += m[s[2]] + v2 += v6 + v14 ^= v2 + v14 = bits.RotateLeft64(v14, -32) + v10 += v14 + v6 ^= v10 + v6 = bits.RotateLeft64(v6, -24) + v3 += m[s[3]] + v3 += v7 + v15 ^= v3 + v15 = bits.RotateLeft64(v15, -32) + v11 += v15 + v7 ^= v11 + v7 = bits.RotateLeft64(v7, -24) + + v0 += m[s[4]] + v0 += v4 + v12 ^= v0 + v12 = bits.RotateLeft64(v12, -16) + v8 += v12 + v4 ^= v8 + v4 = bits.RotateLeft64(v4, -63) + v1 += m[s[5]] + v1 += v5 + v13 ^= v1 + v13 = bits.RotateLeft64(v13, -16) + v9 += v13 + v5 ^= v9 + v5 = bits.RotateLeft64(v5, -63) + v2 += m[s[6]] + v2 += v6 + v14 ^= v2 + v14 = bits.RotateLeft64(v14, -16) + v10 += v14 + v6 ^= v10 + v6 = bits.RotateLeft64(v6, -63) + v3 += m[s[7]] + v3 += v7 + v15 ^= v3 + v15 = bits.RotateLeft64(v15, -16) + v11 += v15 + v7 ^= v11 + v7 = bits.RotateLeft64(v7, -63) + + v0 += m[s[8]] + v0 += v5 + v15 ^= v0 + v15 = bits.RotateLeft64(v15, -32) + v10 += v15 + v5 ^= v10 + v5 = bits.RotateLeft64(v5, -24) + v1 += m[s[9]] + v1 += v6 + v12 ^= v1 + v12 = bits.RotateLeft64(v12, -32) + v11 += v12 + v6 ^= v11 + v6 = bits.RotateLeft64(v6, -24) + v2 += m[s[10]] + v2 += v7 + v13 ^= v2 + v13 = bits.RotateLeft64(v13, -32) + v8 += v13 + v7 ^= v8 + v7 = bits.RotateLeft64(v7, -24) + v3 += m[s[11]] + v3 += v4 + v14 ^= v3 + v14 = bits.RotateLeft64(v14, -32) + v9 += v14 + v4 ^= v9 + v4 = bits.RotateLeft64(v4, -24) + + v0 += m[s[12]] + v0 += v5 + v15 ^= v0 + v15 = bits.RotateLeft64(v15, -16) + v10 += v15 + v5 ^= v10 + v5 = bits.RotateLeft64(v5, -63) + v1 += m[s[13]] + v1 += v6 + v12 ^= v1 + v12 = bits.RotateLeft64(v12, -16) + v11 += v12 + v6 ^= v11 + v6 = bits.RotateLeft64(v6, -63) + v2 += m[s[14]] + v2 += v7 + v13 ^= v2 + v13 = bits.RotateLeft64(v13, -16) + v8 += v13 + v7 ^= v8 + v7 = bits.RotateLeft64(v7, -63) + v3 += m[s[15]] + v3 += v4 + v14 ^= v3 + v14 = bits.RotateLeft64(v14, -16) + v9 += v14 + v4 ^= v9 + v4 = bits.RotateLeft64(v4, -63) + + } + + h[0] ^= v0 ^ v8 + h[1] ^= v1 ^ v9 + h[2] ^= v2 ^ v10 + h[3] ^= v3 ^ v11 + h[4] ^= v4 ^ v12 + h[5] ^= v5 ^ v13 + h[6] ^= v6 ^ v14 + h[7] ^= v7 ^ v15 + } + c[0], c[1] = c0, c1 +} diff --git a/vendor/golang.org/x/crypto/blake2b/blake2b_ref.go b/vendor/golang.org/x/crypto/blake2b/blake2b_ref.go new file mode 100644 index 0000000..b0137cd --- /dev/null +++ b/vendor/golang.org/x/crypto/blake2b/blake2b_ref.go @@ -0,0 +1,12 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !amd64 || purego || !gc +// +build !amd64 purego !gc + +package blake2b + +func hashBlocks(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) { + hashBlocksGeneric(h, c, flag, blocks) +} diff --git a/vendor/golang.org/x/crypto/blake2b/blake2x.go b/vendor/golang.org/x/crypto/blake2b/blake2x.go new file mode 100644 index 0000000..52c414d --- /dev/null +++ b/vendor/golang.org/x/crypto/blake2b/blake2x.go @@ -0,0 +1,177 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package blake2b + +import ( + "encoding/binary" + "errors" + "io" +) + +// XOF defines the interface to hash functions that +// support arbitrary-length output. +type XOF interface { + // Write absorbs more data into the hash's state. It panics if called + // after Read. + io.Writer + + // Read reads more output from the hash. It returns io.EOF if the limit + // has been reached. + io.Reader + + // Clone returns a copy of the XOF in its current state. + Clone() XOF + + // Reset resets the XOF to its initial state. + Reset() +} + +// OutputLengthUnknown can be used as the size argument to NewXOF to indicate +// the length of the output is not known in advance. +const OutputLengthUnknown = 0 + +// magicUnknownOutputLength is a magic value for the output size that indicates +// an unknown number of output bytes. +const magicUnknownOutputLength = (1 << 32) - 1 + +// maxOutputLength is the absolute maximum number of bytes to produce when the +// number of output bytes is unknown. +const maxOutputLength = (1 << 32) * 64 + +// NewXOF creates a new variable-output-length hash. The hash either produce a +// known number of bytes (1 <= size < 2**32-1), or an unknown number of bytes +// (size == OutputLengthUnknown). In the latter case, an absolute limit of +// 256GiB applies. +// +// A non-nil key turns the hash into a MAC. The key must between +// zero and 32 bytes long. +func NewXOF(size uint32, key []byte) (XOF, error) { + if len(key) > Size { + return nil, errKeySize + } + if size == magicUnknownOutputLength { + // 2^32-1 indicates an unknown number of bytes and thus isn't a + // valid length. + return nil, errors.New("blake2b: XOF length too large") + } + if size == OutputLengthUnknown { + size = magicUnknownOutputLength + } + x := &xof{ + d: digest{ + size: Size, + keyLen: len(key), + }, + length: size, + } + copy(x.d.key[:], key) + x.Reset() + return x, nil +} + +type xof struct { + d digest + length uint32 + remaining uint64 + cfg, root, block [Size]byte + offset int + nodeOffset uint32 + readMode bool +} + +func (x *xof) Write(p []byte) (n int, err error) { + if x.readMode { + panic("blake2b: write to XOF after read") + } + return x.d.Write(p) +} + +func (x *xof) Clone() XOF { + clone := *x + return &clone +} + +func (x *xof) Reset() { + x.cfg[0] = byte(Size) + binary.LittleEndian.PutUint32(x.cfg[4:], uint32(Size)) // leaf length + binary.LittleEndian.PutUint32(x.cfg[12:], x.length) // XOF length + x.cfg[17] = byte(Size) // inner hash size + + x.d.Reset() + x.d.h[1] ^= uint64(x.length) << 32 + + x.remaining = uint64(x.length) + if x.remaining == magicUnknownOutputLength { + x.remaining = maxOutputLength + } + x.offset, x.nodeOffset = 0, 0 + x.readMode = false +} + +func (x *xof) Read(p []byte) (n int, err error) { + if !x.readMode { + x.d.finalize(&x.root) + x.readMode = true + } + + if x.remaining == 0 { + return 0, io.EOF + } + + n = len(p) + if uint64(n) > x.remaining { + n = int(x.remaining) + p = p[:n] + } + + if x.offset > 0 { + blockRemaining := Size - x.offset + if n < blockRemaining { + x.offset += copy(p, x.block[x.offset:]) + x.remaining -= uint64(n) + return + } + copy(p, x.block[x.offset:]) + p = p[blockRemaining:] + x.offset = 0 + x.remaining -= uint64(blockRemaining) + } + + for len(p) >= Size { + binary.LittleEndian.PutUint32(x.cfg[8:], x.nodeOffset) + x.nodeOffset++ + + x.d.initConfig(&x.cfg) + x.d.Write(x.root[:]) + x.d.finalize(&x.block) + + copy(p, x.block[:]) + p = p[Size:] + x.remaining -= uint64(Size) + } + + if todo := len(p); todo > 0 { + if x.remaining < uint64(Size) { + x.cfg[0] = byte(x.remaining) + } + binary.LittleEndian.PutUint32(x.cfg[8:], x.nodeOffset) + x.nodeOffset++ + + x.d.initConfig(&x.cfg) + x.d.Write(x.root[:]) + x.d.finalize(&x.block) + + x.offset = copy(p, x.block[:todo]) + x.remaining -= uint64(todo) + } + return +} + +func (d *digest) initConfig(cfg *[Size]byte) { + d.offset, d.c[0], d.c[1] = 0, 0, 0 + for i := range d.h { + d.h[i] = iv[i] ^ binary.LittleEndian.Uint64(cfg[i*8:]) + } +} diff --git a/vendor/golang.org/x/crypto/blake2b/register.go b/vendor/golang.org/x/crypto/blake2b/register.go new file mode 100644 index 0000000..9d86339 --- /dev/null +++ b/vendor/golang.org/x/crypto/blake2b/register.go @@ -0,0 +1,33 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.9 +// +build go1.9 + +package blake2b + +import ( + "crypto" + "hash" +) + +func init() { + newHash256 := func() hash.Hash { + h, _ := New256(nil) + return h + } + newHash384 := func() hash.Hash { + h, _ := New384(nil) + return h + } + + newHash512 := func() hash.Hash { + h, _ := New512(nil) + return h + } + + crypto.RegisterHash(crypto.BLAKE2b_256, newHash256) + crypto.RegisterHash(crypto.BLAKE2b_384, newHash384) + crypto.RegisterHash(crypto.BLAKE2b_512, newHash512) +} diff --git a/vendor/golang.org/x/crypto/blowfish/block.go b/vendor/golang.org/x/crypto/blowfish/block.go new file mode 100644 index 0000000..9d80f19 --- /dev/null +++ b/vendor/golang.org/x/crypto/blowfish/block.go @@ -0,0 +1,159 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package blowfish + +// getNextWord returns the next big-endian uint32 value from the byte slice +// at the given position in a circular manner, updating the position. +func getNextWord(b []byte, pos *int) uint32 { + var w uint32 + j := *pos + for i := 0; i < 4; i++ { + w = w<<8 | uint32(b[j]) + j++ + if j >= len(b) { + j = 0 + } + } + *pos = j + return w +} + +// ExpandKey performs a key expansion on the given *Cipher. Specifically, it +// performs the Blowfish algorithm's key schedule which sets up the *Cipher's +// pi and substitution tables for calls to Encrypt. This is used, primarily, +// by the bcrypt package to reuse the Blowfish key schedule during its +// set up. It's unlikely that you need to use this directly. +func ExpandKey(key []byte, c *Cipher) { + j := 0 + for i := 0; i < 18; i++ { + // Using inlined getNextWord for performance. + var d uint32 + for k := 0; k < 4; k++ { + d = d<<8 | uint32(key[j]) + j++ + if j >= len(key) { + j = 0 + } + } + c.p[i] ^= d + } + + var l, r uint32 + for i := 0; i < 18; i += 2 { + l, r = encryptBlock(l, r, c) + c.p[i], c.p[i+1] = l, r + } + + for i := 0; i < 256; i += 2 { + l, r = encryptBlock(l, r, c) + c.s0[i], c.s0[i+1] = l, r + } + for i := 0; i < 256; i += 2 { + l, r = encryptBlock(l, r, c) + c.s1[i], c.s1[i+1] = l, r + } + for i := 0; i < 256; i += 2 { + l, r = encryptBlock(l, r, c) + c.s2[i], c.s2[i+1] = l, r + } + for i := 0; i < 256; i += 2 { + l, r = encryptBlock(l, r, c) + c.s3[i], c.s3[i+1] = l, r + } +} + +// This is similar to ExpandKey, but folds the salt during the key +// schedule. While ExpandKey is essentially expandKeyWithSalt with an all-zero +// salt passed in, reusing ExpandKey turns out to be a place of inefficiency +// and specializing it here is useful. +func expandKeyWithSalt(key []byte, salt []byte, c *Cipher) { + j := 0 + for i := 0; i < 18; i++ { + c.p[i] ^= getNextWord(key, &j) + } + + j = 0 + var l, r uint32 + for i := 0; i < 18; i += 2 { + l ^= getNextWord(salt, &j) + r ^= getNextWord(salt, &j) + l, r = encryptBlock(l, r, c) + c.p[i], c.p[i+1] = l, r + } + + for i := 0; i < 256; i += 2 { + l ^= getNextWord(salt, &j) + r ^= getNextWord(salt, &j) + l, r = encryptBlock(l, r, c) + c.s0[i], c.s0[i+1] = l, r + } + + for i := 0; i < 256; i += 2 { + l ^= getNextWord(salt, &j) + r ^= getNextWord(salt, &j) + l, r = encryptBlock(l, r, c) + c.s1[i], c.s1[i+1] = l, r + } + + for i := 0; i < 256; i += 2 { + l ^= getNextWord(salt, &j) + r ^= getNextWord(salt, &j) + l, r = encryptBlock(l, r, c) + c.s2[i], c.s2[i+1] = l, r + } + + for i := 0; i < 256; i += 2 { + l ^= getNextWord(salt, &j) + r ^= getNextWord(salt, &j) + l, r = encryptBlock(l, r, c) + c.s3[i], c.s3[i+1] = l, r + } +} + +func encryptBlock(l, r uint32, c *Cipher) (uint32, uint32) { + xl, xr := l, r + xl ^= c.p[0] + xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[1] + xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[2] + xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[3] + xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[4] + xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[5] + xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[6] + xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[7] + xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[8] + xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[9] + xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[10] + xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[11] + xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[12] + xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[13] + xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[14] + xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[15] + xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[16] + xr ^= c.p[17] + return xr, xl +} + +func decryptBlock(l, r uint32, c *Cipher) (uint32, uint32) { + xl, xr := l, r + xl ^= c.p[17] + xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[16] + xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[15] + xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[14] + xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[13] + xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[12] + xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[11] + xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[10] + xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[9] + xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[8] + xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[7] + xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[6] + xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[5] + xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[4] + xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[3] + xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[2] + xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[1] + xr ^= c.p[0] + return xr, xl +} diff --git a/vendor/golang.org/x/crypto/blowfish/cipher.go b/vendor/golang.org/x/crypto/blowfish/cipher.go new file mode 100644 index 0000000..213bf20 --- /dev/null +++ b/vendor/golang.org/x/crypto/blowfish/cipher.go @@ -0,0 +1,99 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package blowfish implements Bruce Schneier's Blowfish encryption algorithm. +// +// Blowfish is a legacy cipher and its short block size makes it vulnerable to +// birthday bound attacks (see https://sweet32.info). It should only be used +// where compatibility with legacy systems, not security, is the goal. +// +// Deprecated: any new system should use AES (from crypto/aes, if necessary in +// an AEAD mode like crypto/cipher.NewGCM) or XChaCha20-Poly1305 (from +// golang.org/x/crypto/chacha20poly1305). +package blowfish // import "golang.org/x/crypto/blowfish" + +// The code is a port of Bruce Schneier's C implementation. +// See https://www.schneier.com/blowfish.html. + +import "strconv" + +// The Blowfish block size in bytes. +const BlockSize = 8 + +// A Cipher is an instance of Blowfish encryption using a particular key. +type Cipher struct { + p [18]uint32 + s0, s1, s2, s3 [256]uint32 +} + +type KeySizeError int + +func (k KeySizeError) Error() string { + return "crypto/blowfish: invalid key size " + strconv.Itoa(int(k)) +} + +// NewCipher creates and returns a Cipher. +// The key argument should be the Blowfish key, from 1 to 56 bytes. +func NewCipher(key []byte) (*Cipher, error) { + var result Cipher + if k := len(key); k < 1 || k > 56 { + return nil, KeySizeError(k) + } + initCipher(&result) + ExpandKey(key, &result) + return &result, nil +} + +// NewSaltedCipher creates a returns a Cipher that folds a salt into its key +// schedule. For most purposes, NewCipher, instead of NewSaltedCipher, is +// sufficient and desirable. For bcrypt compatibility, the key can be over 56 +// bytes. +func NewSaltedCipher(key, salt []byte) (*Cipher, error) { + if len(salt) == 0 { + return NewCipher(key) + } + var result Cipher + if k := len(key); k < 1 { + return nil, KeySizeError(k) + } + initCipher(&result) + expandKeyWithSalt(key, salt, &result) + return &result, nil +} + +// BlockSize returns the Blowfish block size, 8 bytes. +// It is necessary to satisfy the Block interface in the +// package "crypto/cipher". +func (c *Cipher) BlockSize() int { return BlockSize } + +// Encrypt encrypts the 8-byte buffer src using the key k +// and stores the result in dst. +// Note that for amounts of data larger than a block, +// it is not safe to just call Encrypt on successive blocks; +// instead, use an encryption mode like CBC (see crypto/cipher/cbc.go). +func (c *Cipher) Encrypt(dst, src []byte) { + l := uint32(src[0])<<24 | uint32(src[1])<<16 | uint32(src[2])<<8 | uint32(src[3]) + r := uint32(src[4])<<24 | uint32(src[5])<<16 | uint32(src[6])<<8 | uint32(src[7]) + l, r = encryptBlock(l, r, c) + dst[0], dst[1], dst[2], dst[3] = byte(l>>24), byte(l>>16), byte(l>>8), byte(l) + dst[4], dst[5], dst[6], dst[7] = byte(r>>24), byte(r>>16), byte(r>>8), byte(r) +} + +// Decrypt decrypts the 8-byte buffer src using the key k +// and stores the result in dst. +func (c *Cipher) Decrypt(dst, src []byte) { + l := uint32(src[0])<<24 | uint32(src[1])<<16 | uint32(src[2])<<8 | uint32(src[3]) + r := uint32(src[4])<<24 | uint32(src[5])<<16 | uint32(src[6])<<8 | uint32(src[7]) + l, r = decryptBlock(l, r, c) + dst[0], dst[1], dst[2], dst[3] = byte(l>>24), byte(l>>16), byte(l>>8), byte(l) + dst[4], dst[5], dst[6], dst[7] = byte(r>>24), byte(r>>16), byte(r>>8), byte(r) +} + +func initCipher(c *Cipher) { + copy(c.p[0:], p[0:]) + copy(c.s0[0:], s0[0:]) + copy(c.s1[0:], s1[0:]) + copy(c.s2[0:], s2[0:]) + copy(c.s3[0:], s3[0:]) +} diff --git a/vendor/golang.org/x/crypto/blowfish/const.go b/vendor/golang.org/x/crypto/blowfish/const.go new file mode 100644 index 0000000..d040775 --- /dev/null +++ b/vendor/golang.org/x/crypto/blowfish/const.go @@ -0,0 +1,199 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// The startup permutation array and substitution boxes. +// They are the hexadecimal digits of PI; see: +// https://www.schneier.com/code/constants.txt. + +package blowfish + +var s0 = [256]uint32{ + 0xd1310ba6, 0x98dfb5ac, 0x2ffd72db, 0xd01adfb7, 0xb8e1afed, 0x6a267e96, + 0xba7c9045, 0xf12c7f99, 0x24a19947, 0xb3916cf7, 0x0801f2e2, 0x858efc16, + 0x636920d8, 0x71574e69, 0xa458fea3, 0xf4933d7e, 0x0d95748f, 0x728eb658, + 0x718bcd58, 0x82154aee, 0x7b54a41d, 0xc25a59b5, 0x9c30d539, 0x2af26013, + 0xc5d1b023, 0x286085f0, 0xca417918, 0xb8db38ef, 0x8e79dcb0, 0x603a180e, + 0x6c9e0e8b, 0xb01e8a3e, 0xd71577c1, 0xbd314b27, 0x78af2fda, 0x55605c60, + 0xe65525f3, 0xaa55ab94, 0x57489862, 0x63e81440, 0x55ca396a, 0x2aab10b6, + 0xb4cc5c34, 0x1141e8ce, 0xa15486af, 0x7c72e993, 0xb3ee1411, 0x636fbc2a, + 0x2ba9c55d, 0x741831f6, 0xce5c3e16, 0x9b87931e, 0xafd6ba33, 0x6c24cf5c, + 0x7a325381, 0x28958677, 0x3b8f4898, 0x6b4bb9af, 0xc4bfe81b, 0x66282193, + 0x61d809cc, 0xfb21a991, 0x487cac60, 0x5dec8032, 0xef845d5d, 0xe98575b1, + 0xdc262302, 0xeb651b88, 0x23893e81, 0xd396acc5, 0x0f6d6ff3, 0x83f44239, + 0x2e0b4482, 0xa4842004, 0x69c8f04a, 0x9e1f9b5e, 0x21c66842, 0xf6e96c9a, + 0x670c9c61, 0xabd388f0, 0x6a51a0d2, 0xd8542f68, 0x960fa728, 0xab5133a3, + 0x6eef0b6c, 0x137a3be4, 0xba3bf050, 0x7efb2a98, 0xa1f1651d, 0x39af0176, + 0x66ca593e, 0x82430e88, 0x8cee8619, 0x456f9fb4, 0x7d84a5c3, 0x3b8b5ebe, + 0xe06f75d8, 0x85c12073, 0x401a449f, 0x56c16aa6, 0x4ed3aa62, 0x363f7706, + 0x1bfedf72, 0x429b023d, 0x37d0d724, 0xd00a1248, 0xdb0fead3, 0x49f1c09b, + 0x075372c9, 0x80991b7b, 0x25d479d8, 0xf6e8def7, 0xe3fe501a, 0xb6794c3b, + 0x976ce0bd, 0x04c006ba, 0xc1a94fb6, 0x409f60c4, 0x5e5c9ec2, 0x196a2463, + 0x68fb6faf, 0x3e6c53b5, 0x1339b2eb, 0x3b52ec6f, 0x6dfc511f, 0x9b30952c, + 0xcc814544, 0xaf5ebd09, 0xbee3d004, 0xde334afd, 0x660f2807, 0x192e4bb3, + 0xc0cba857, 0x45c8740f, 0xd20b5f39, 0xb9d3fbdb, 0x5579c0bd, 0x1a60320a, + 0xd6a100c6, 0x402c7279, 0x679f25fe, 0xfb1fa3cc, 0x8ea5e9f8, 0xdb3222f8, + 0x3c7516df, 0xfd616b15, 0x2f501ec8, 0xad0552ab, 0x323db5fa, 0xfd238760, + 0x53317b48, 0x3e00df82, 0x9e5c57bb, 0xca6f8ca0, 0x1a87562e, 0xdf1769db, + 0xd542a8f6, 0x287effc3, 0xac6732c6, 0x8c4f5573, 0x695b27b0, 0xbbca58c8, + 0xe1ffa35d, 0xb8f011a0, 0x10fa3d98, 0xfd2183b8, 0x4afcb56c, 0x2dd1d35b, + 0x9a53e479, 0xb6f84565, 0xd28e49bc, 0x4bfb9790, 0xe1ddf2da, 0xa4cb7e33, + 0x62fb1341, 0xcee4c6e8, 0xef20cada, 0x36774c01, 0xd07e9efe, 0x2bf11fb4, + 0x95dbda4d, 0xae909198, 0xeaad8e71, 0x6b93d5a0, 0xd08ed1d0, 0xafc725e0, + 0x8e3c5b2f, 0x8e7594b7, 0x8ff6e2fb, 0xf2122b64, 0x8888b812, 0x900df01c, + 0x4fad5ea0, 0x688fc31c, 0xd1cff191, 0xb3a8c1ad, 0x2f2f2218, 0xbe0e1777, + 0xea752dfe, 0x8b021fa1, 0xe5a0cc0f, 0xb56f74e8, 0x18acf3d6, 0xce89e299, + 0xb4a84fe0, 0xfd13e0b7, 0x7cc43b81, 0xd2ada8d9, 0x165fa266, 0x80957705, + 0x93cc7314, 0x211a1477, 0xe6ad2065, 0x77b5fa86, 0xc75442f5, 0xfb9d35cf, + 0xebcdaf0c, 0x7b3e89a0, 0xd6411bd3, 0xae1e7e49, 0x00250e2d, 0x2071b35e, + 0x226800bb, 0x57b8e0af, 0x2464369b, 0xf009b91e, 0x5563911d, 0x59dfa6aa, + 0x78c14389, 0xd95a537f, 0x207d5ba2, 0x02e5b9c5, 0x83260376, 0x6295cfa9, + 0x11c81968, 0x4e734a41, 0xb3472dca, 0x7b14a94a, 0x1b510052, 0x9a532915, + 0xd60f573f, 0xbc9bc6e4, 0x2b60a476, 0x81e67400, 0x08ba6fb5, 0x571be91f, + 0xf296ec6b, 0x2a0dd915, 0xb6636521, 0xe7b9f9b6, 0xff34052e, 0xc5855664, + 0x53b02d5d, 0xa99f8fa1, 0x08ba4799, 0x6e85076a, +} + +var s1 = [256]uint32{ + 0x4b7a70e9, 0xb5b32944, 0xdb75092e, 0xc4192623, 0xad6ea6b0, 0x49a7df7d, + 0x9cee60b8, 0x8fedb266, 0xecaa8c71, 0x699a17ff, 0x5664526c, 0xc2b19ee1, + 0x193602a5, 0x75094c29, 0xa0591340, 0xe4183a3e, 0x3f54989a, 0x5b429d65, + 0x6b8fe4d6, 0x99f73fd6, 0xa1d29c07, 0xefe830f5, 0x4d2d38e6, 0xf0255dc1, + 0x4cdd2086, 0x8470eb26, 0x6382e9c6, 0x021ecc5e, 0x09686b3f, 0x3ebaefc9, + 0x3c971814, 0x6b6a70a1, 0x687f3584, 0x52a0e286, 0xb79c5305, 0xaa500737, + 0x3e07841c, 0x7fdeae5c, 0x8e7d44ec, 0x5716f2b8, 0xb03ada37, 0xf0500c0d, + 0xf01c1f04, 0x0200b3ff, 0xae0cf51a, 0x3cb574b2, 0x25837a58, 0xdc0921bd, + 0xd19113f9, 0x7ca92ff6, 0x94324773, 0x22f54701, 0x3ae5e581, 0x37c2dadc, + 0xc8b57634, 0x9af3dda7, 0xa9446146, 0x0fd0030e, 0xecc8c73e, 0xa4751e41, + 0xe238cd99, 0x3bea0e2f, 0x3280bba1, 0x183eb331, 0x4e548b38, 0x4f6db908, + 0x6f420d03, 0xf60a04bf, 0x2cb81290, 0x24977c79, 0x5679b072, 0xbcaf89af, + 0xde9a771f, 0xd9930810, 0xb38bae12, 0xdccf3f2e, 0x5512721f, 0x2e6b7124, + 0x501adde6, 0x9f84cd87, 0x7a584718, 0x7408da17, 0xbc9f9abc, 0xe94b7d8c, + 0xec7aec3a, 0xdb851dfa, 0x63094366, 0xc464c3d2, 0xef1c1847, 0x3215d908, + 0xdd433b37, 0x24c2ba16, 0x12a14d43, 0x2a65c451, 0x50940002, 0x133ae4dd, + 0x71dff89e, 0x10314e55, 0x81ac77d6, 0x5f11199b, 0x043556f1, 0xd7a3c76b, + 0x3c11183b, 0x5924a509, 0xf28fe6ed, 0x97f1fbfa, 0x9ebabf2c, 0x1e153c6e, + 0x86e34570, 0xeae96fb1, 0x860e5e0a, 0x5a3e2ab3, 0x771fe71c, 0x4e3d06fa, + 0x2965dcb9, 0x99e71d0f, 0x803e89d6, 0x5266c825, 0x2e4cc978, 0x9c10b36a, + 0xc6150eba, 0x94e2ea78, 0xa5fc3c53, 0x1e0a2df4, 0xf2f74ea7, 0x361d2b3d, + 0x1939260f, 0x19c27960, 0x5223a708, 0xf71312b6, 0xebadfe6e, 0xeac31f66, + 0xe3bc4595, 0xa67bc883, 0xb17f37d1, 0x018cff28, 0xc332ddef, 0xbe6c5aa5, + 0x65582185, 0x68ab9802, 0xeecea50f, 0xdb2f953b, 0x2aef7dad, 0x5b6e2f84, + 0x1521b628, 0x29076170, 0xecdd4775, 0x619f1510, 0x13cca830, 0xeb61bd96, + 0x0334fe1e, 0xaa0363cf, 0xb5735c90, 0x4c70a239, 0xd59e9e0b, 0xcbaade14, + 0xeecc86bc, 0x60622ca7, 0x9cab5cab, 0xb2f3846e, 0x648b1eaf, 0x19bdf0ca, + 0xa02369b9, 0x655abb50, 0x40685a32, 0x3c2ab4b3, 0x319ee9d5, 0xc021b8f7, + 0x9b540b19, 0x875fa099, 0x95f7997e, 0x623d7da8, 0xf837889a, 0x97e32d77, + 0x11ed935f, 0x16681281, 0x0e358829, 0xc7e61fd6, 0x96dedfa1, 0x7858ba99, + 0x57f584a5, 0x1b227263, 0x9b83c3ff, 0x1ac24696, 0xcdb30aeb, 0x532e3054, + 0x8fd948e4, 0x6dbc3128, 0x58ebf2ef, 0x34c6ffea, 0xfe28ed61, 0xee7c3c73, + 0x5d4a14d9, 0xe864b7e3, 0x42105d14, 0x203e13e0, 0x45eee2b6, 0xa3aaabea, + 0xdb6c4f15, 0xfacb4fd0, 0xc742f442, 0xef6abbb5, 0x654f3b1d, 0x41cd2105, + 0xd81e799e, 0x86854dc7, 0xe44b476a, 0x3d816250, 0xcf62a1f2, 0x5b8d2646, + 0xfc8883a0, 0xc1c7b6a3, 0x7f1524c3, 0x69cb7492, 0x47848a0b, 0x5692b285, + 0x095bbf00, 0xad19489d, 0x1462b174, 0x23820e00, 0x58428d2a, 0x0c55f5ea, + 0x1dadf43e, 0x233f7061, 0x3372f092, 0x8d937e41, 0xd65fecf1, 0x6c223bdb, + 0x7cde3759, 0xcbee7460, 0x4085f2a7, 0xce77326e, 0xa6078084, 0x19f8509e, + 0xe8efd855, 0x61d99735, 0xa969a7aa, 0xc50c06c2, 0x5a04abfc, 0x800bcadc, + 0x9e447a2e, 0xc3453484, 0xfdd56705, 0x0e1e9ec9, 0xdb73dbd3, 0x105588cd, + 0x675fda79, 0xe3674340, 0xc5c43465, 0x713e38d8, 0x3d28f89e, 0xf16dff20, + 0x153e21e7, 0x8fb03d4a, 0xe6e39f2b, 0xdb83adf7, +} + +var s2 = [256]uint32{ + 0xe93d5a68, 0x948140f7, 0xf64c261c, 0x94692934, 0x411520f7, 0x7602d4f7, + 0xbcf46b2e, 0xd4a20068, 0xd4082471, 0x3320f46a, 0x43b7d4b7, 0x500061af, + 0x1e39f62e, 0x97244546, 0x14214f74, 0xbf8b8840, 0x4d95fc1d, 0x96b591af, + 0x70f4ddd3, 0x66a02f45, 0xbfbc09ec, 0x03bd9785, 0x7fac6dd0, 0x31cb8504, + 0x96eb27b3, 0x55fd3941, 0xda2547e6, 0xabca0a9a, 0x28507825, 0x530429f4, + 0x0a2c86da, 0xe9b66dfb, 0x68dc1462, 0xd7486900, 0x680ec0a4, 0x27a18dee, + 0x4f3ffea2, 0xe887ad8c, 0xb58ce006, 0x7af4d6b6, 0xaace1e7c, 0xd3375fec, + 0xce78a399, 0x406b2a42, 0x20fe9e35, 0xd9f385b9, 0xee39d7ab, 0x3b124e8b, + 0x1dc9faf7, 0x4b6d1856, 0x26a36631, 0xeae397b2, 0x3a6efa74, 0xdd5b4332, + 0x6841e7f7, 0xca7820fb, 0xfb0af54e, 0xd8feb397, 0x454056ac, 0xba489527, + 0x55533a3a, 0x20838d87, 0xfe6ba9b7, 0xd096954b, 0x55a867bc, 0xa1159a58, + 0xcca92963, 0x99e1db33, 0xa62a4a56, 0x3f3125f9, 0x5ef47e1c, 0x9029317c, + 0xfdf8e802, 0x04272f70, 0x80bb155c, 0x05282ce3, 0x95c11548, 0xe4c66d22, + 0x48c1133f, 0xc70f86dc, 0x07f9c9ee, 0x41041f0f, 0x404779a4, 0x5d886e17, + 0x325f51eb, 0xd59bc0d1, 0xf2bcc18f, 0x41113564, 0x257b7834, 0x602a9c60, + 0xdff8e8a3, 0x1f636c1b, 0x0e12b4c2, 0x02e1329e, 0xaf664fd1, 0xcad18115, + 0x6b2395e0, 0x333e92e1, 0x3b240b62, 0xeebeb922, 0x85b2a20e, 0xe6ba0d99, + 0xde720c8c, 0x2da2f728, 0xd0127845, 0x95b794fd, 0x647d0862, 0xe7ccf5f0, + 0x5449a36f, 0x877d48fa, 0xc39dfd27, 0xf33e8d1e, 0x0a476341, 0x992eff74, + 0x3a6f6eab, 0xf4f8fd37, 0xa812dc60, 0xa1ebddf8, 0x991be14c, 0xdb6e6b0d, + 0xc67b5510, 0x6d672c37, 0x2765d43b, 0xdcd0e804, 0xf1290dc7, 0xcc00ffa3, + 0xb5390f92, 0x690fed0b, 0x667b9ffb, 0xcedb7d9c, 0xa091cf0b, 0xd9155ea3, + 0xbb132f88, 0x515bad24, 0x7b9479bf, 0x763bd6eb, 0x37392eb3, 0xcc115979, + 0x8026e297, 0xf42e312d, 0x6842ada7, 0xc66a2b3b, 0x12754ccc, 0x782ef11c, + 0x6a124237, 0xb79251e7, 0x06a1bbe6, 0x4bfb6350, 0x1a6b1018, 0x11caedfa, + 0x3d25bdd8, 0xe2e1c3c9, 0x44421659, 0x0a121386, 0xd90cec6e, 0xd5abea2a, + 0x64af674e, 0xda86a85f, 0xbebfe988, 0x64e4c3fe, 0x9dbc8057, 0xf0f7c086, + 0x60787bf8, 0x6003604d, 0xd1fd8346, 0xf6381fb0, 0x7745ae04, 0xd736fccc, + 0x83426b33, 0xf01eab71, 0xb0804187, 0x3c005e5f, 0x77a057be, 0xbde8ae24, + 0x55464299, 0xbf582e61, 0x4e58f48f, 0xf2ddfda2, 0xf474ef38, 0x8789bdc2, + 0x5366f9c3, 0xc8b38e74, 0xb475f255, 0x46fcd9b9, 0x7aeb2661, 0x8b1ddf84, + 0x846a0e79, 0x915f95e2, 0x466e598e, 0x20b45770, 0x8cd55591, 0xc902de4c, + 0xb90bace1, 0xbb8205d0, 0x11a86248, 0x7574a99e, 0xb77f19b6, 0xe0a9dc09, + 0x662d09a1, 0xc4324633, 0xe85a1f02, 0x09f0be8c, 0x4a99a025, 0x1d6efe10, + 0x1ab93d1d, 0x0ba5a4df, 0xa186f20f, 0x2868f169, 0xdcb7da83, 0x573906fe, + 0xa1e2ce9b, 0x4fcd7f52, 0x50115e01, 0xa70683fa, 0xa002b5c4, 0x0de6d027, + 0x9af88c27, 0x773f8641, 0xc3604c06, 0x61a806b5, 0xf0177a28, 0xc0f586e0, + 0x006058aa, 0x30dc7d62, 0x11e69ed7, 0x2338ea63, 0x53c2dd94, 0xc2c21634, + 0xbbcbee56, 0x90bcb6de, 0xebfc7da1, 0xce591d76, 0x6f05e409, 0x4b7c0188, + 0x39720a3d, 0x7c927c24, 0x86e3725f, 0x724d9db9, 0x1ac15bb4, 0xd39eb8fc, + 0xed545578, 0x08fca5b5, 0xd83d7cd3, 0x4dad0fc4, 0x1e50ef5e, 0xb161e6f8, + 0xa28514d9, 0x6c51133c, 0x6fd5c7e7, 0x56e14ec4, 0x362abfce, 0xddc6c837, + 0xd79a3234, 0x92638212, 0x670efa8e, 0x406000e0, +} + +var s3 = [256]uint32{ + 0x3a39ce37, 0xd3faf5cf, 0xabc27737, 0x5ac52d1b, 0x5cb0679e, 0x4fa33742, + 0xd3822740, 0x99bc9bbe, 0xd5118e9d, 0xbf0f7315, 0xd62d1c7e, 0xc700c47b, + 0xb78c1b6b, 0x21a19045, 0xb26eb1be, 0x6a366eb4, 0x5748ab2f, 0xbc946e79, + 0xc6a376d2, 0x6549c2c8, 0x530ff8ee, 0x468dde7d, 0xd5730a1d, 0x4cd04dc6, + 0x2939bbdb, 0xa9ba4650, 0xac9526e8, 0xbe5ee304, 0xa1fad5f0, 0x6a2d519a, + 0x63ef8ce2, 0x9a86ee22, 0xc089c2b8, 0x43242ef6, 0xa51e03aa, 0x9cf2d0a4, + 0x83c061ba, 0x9be96a4d, 0x8fe51550, 0xba645bd6, 0x2826a2f9, 0xa73a3ae1, + 0x4ba99586, 0xef5562e9, 0xc72fefd3, 0xf752f7da, 0x3f046f69, 0x77fa0a59, + 0x80e4a915, 0x87b08601, 0x9b09e6ad, 0x3b3ee593, 0xe990fd5a, 0x9e34d797, + 0x2cf0b7d9, 0x022b8b51, 0x96d5ac3a, 0x017da67d, 0xd1cf3ed6, 0x7c7d2d28, + 0x1f9f25cf, 0xadf2b89b, 0x5ad6b472, 0x5a88f54c, 0xe029ac71, 0xe019a5e6, + 0x47b0acfd, 0xed93fa9b, 0xe8d3c48d, 0x283b57cc, 0xf8d56629, 0x79132e28, + 0x785f0191, 0xed756055, 0xf7960e44, 0xe3d35e8c, 0x15056dd4, 0x88f46dba, + 0x03a16125, 0x0564f0bd, 0xc3eb9e15, 0x3c9057a2, 0x97271aec, 0xa93a072a, + 0x1b3f6d9b, 0x1e6321f5, 0xf59c66fb, 0x26dcf319, 0x7533d928, 0xb155fdf5, + 0x03563482, 0x8aba3cbb, 0x28517711, 0xc20ad9f8, 0xabcc5167, 0xccad925f, + 0x4de81751, 0x3830dc8e, 0x379d5862, 0x9320f991, 0xea7a90c2, 0xfb3e7bce, + 0x5121ce64, 0x774fbe32, 0xa8b6e37e, 0xc3293d46, 0x48de5369, 0x6413e680, + 0xa2ae0810, 0xdd6db224, 0x69852dfd, 0x09072166, 0xb39a460a, 0x6445c0dd, + 0x586cdecf, 0x1c20c8ae, 0x5bbef7dd, 0x1b588d40, 0xccd2017f, 0x6bb4e3bb, + 0xdda26a7e, 0x3a59ff45, 0x3e350a44, 0xbcb4cdd5, 0x72eacea8, 0xfa6484bb, + 0x8d6612ae, 0xbf3c6f47, 0xd29be463, 0x542f5d9e, 0xaec2771b, 0xf64e6370, + 0x740e0d8d, 0xe75b1357, 0xf8721671, 0xaf537d5d, 0x4040cb08, 0x4eb4e2cc, + 0x34d2466a, 0x0115af84, 0xe1b00428, 0x95983a1d, 0x06b89fb4, 0xce6ea048, + 0x6f3f3b82, 0x3520ab82, 0x011a1d4b, 0x277227f8, 0x611560b1, 0xe7933fdc, + 0xbb3a792b, 0x344525bd, 0xa08839e1, 0x51ce794b, 0x2f32c9b7, 0xa01fbac9, + 0xe01cc87e, 0xbcc7d1f6, 0xcf0111c3, 0xa1e8aac7, 0x1a908749, 0xd44fbd9a, + 0xd0dadecb, 0xd50ada38, 0x0339c32a, 0xc6913667, 0x8df9317c, 0xe0b12b4f, + 0xf79e59b7, 0x43f5bb3a, 0xf2d519ff, 0x27d9459c, 0xbf97222c, 0x15e6fc2a, + 0x0f91fc71, 0x9b941525, 0xfae59361, 0xceb69ceb, 0xc2a86459, 0x12baa8d1, + 0xb6c1075e, 0xe3056a0c, 0x10d25065, 0xcb03a442, 0xe0ec6e0e, 0x1698db3b, + 0x4c98a0be, 0x3278e964, 0x9f1f9532, 0xe0d392df, 0xd3a0342b, 0x8971f21e, + 0x1b0a7441, 0x4ba3348c, 0xc5be7120, 0xc37632d8, 0xdf359f8d, 0x9b992f2e, + 0xe60b6f47, 0x0fe3f11d, 0xe54cda54, 0x1edad891, 0xce6279cf, 0xcd3e7e6f, + 0x1618b166, 0xfd2c1d05, 0x848fd2c5, 0xf6fb2299, 0xf523f357, 0xa6327623, + 0x93a83531, 0x56cccd02, 0xacf08162, 0x5a75ebb5, 0x6e163697, 0x88d273cc, + 0xde966292, 0x81b949d0, 0x4c50901b, 0x71c65614, 0xe6c6c7bd, 0x327a140a, + 0x45e1d006, 0xc3f27b9a, 0xc9aa53fd, 0x62a80f00, 0xbb25bfe2, 0x35bdd2f6, + 0x71126905, 0xb2040222, 0xb6cbcf7c, 0xcd769c2b, 0x53113ec0, 0x1640e3d3, + 0x38abbd60, 0x2547adf0, 0xba38209c, 0xf746ce76, 0x77afa1c5, 0x20756060, + 0x85cbfe4e, 0x8ae88dd8, 0x7aaaf9b0, 0x4cf9aa7e, 0x1948c25c, 0x02fb8a8c, + 0x01c36ae4, 0xd6ebe1f9, 0x90d4f869, 0xa65cdea0, 0x3f09252d, 0xc208e69f, + 0xb74e6132, 0xce77e25b, 0x578fdfe3, 0x3ac372e6, +} + +var p = [18]uint32{ + 0x243f6a88, 0x85a308d3, 0x13198a2e, 0x03707344, 0xa4093822, 0x299f31d0, + 0x082efa98, 0xec4e6c89, 0x452821e6, 0x38d01377, 0xbe5466cf, 0x34e90c6c, + 0xc0ac29b7, 0xc97c50dd, 0x3f84d5b5, 0xb5470917, 0x9216d5d9, 0x8979fb1b, +} diff --git a/vendor/golang.org/x/crypto/pbkdf2/pbkdf2.go b/vendor/golang.org/x/crypto/pbkdf2/pbkdf2.go new file mode 100644 index 0000000..904b57e --- /dev/null +++ b/vendor/golang.org/x/crypto/pbkdf2/pbkdf2.go @@ -0,0 +1,77 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +Package pbkdf2 implements the key derivation function PBKDF2 as defined in RFC +2898 / PKCS #5 v2.0. + +A key derivation function is useful when encrypting data based on a password +or any other not-fully-random data. It uses a pseudorandom function to derive +a secure encryption key based on the password. + +While v2.0 of the standard defines only one pseudorandom function to use, +HMAC-SHA1, the drafted v2.1 specification allows use of all five FIPS Approved +Hash Functions SHA-1, SHA-224, SHA-256, SHA-384 and SHA-512 for HMAC. To +choose, you can pass the `New` functions from the different SHA packages to +pbkdf2.Key. +*/ +package pbkdf2 // import "golang.org/x/crypto/pbkdf2" + +import ( + "crypto/hmac" + "hash" +) + +// Key derives a key from the password, salt and iteration count, returning a +// []byte of length keylen that can be used as cryptographic key. The key is +// derived based on the method described as PBKDF2 with the HMAC variant using +// the supplied hash function. +// +// For example, to use a HMAC-SHA-1 based PBKDF2 key derivation function, you +// can get a derived key for e.g. AES-256 (which needs a 32-byte key) by +// doing: +// +// dk := pbkdf2.Key([]byte("some password"), salt, 4096, 32, sha1.New) +// +// Remember to get a good random salt. At least 8 bytes is recommended by the +// RFC. +// +// Using a higher iteration count will increase the cost of an exhaustive +// search but will also make derivation proportionally slower. +func Key(password, salt []byte, iter, keyLen int, h func() hash.Hash) []byte { + prf := hmac.New(h, password) + hashLen := prf.Size() + numBlocks := (keyLen + hashLen - 1) / hashLen + + var buf [4]byte + dk := make([]byte, 0, numBlocks*hashLen) + U := make([]byte, hashLen) + for block := 1; block <= numBlocks; block++ { + // N.B.: || means concatenation, ^ means XOR + // for each block T_i = U_1 ^ U_2 ^ ... ^ U_iter + // U_1 = PRF(password, salt || uint(i)) + prf.Reset() + prf.Write(salt) + buf[0] = byte(block >> 24) + buf[1] = byte(block >> 16) + buf[2] = byte(block >> 8) + buf[3] = byte(block) + prf.Write(buf[:4]) + dk = prf.Sum(dk) + T := dk[len(dk)-hashLen:] + copy(U, T) + + // U_n = PRF(password, U_(n-1)) + for n := 2; n <= iter; n++ { + prf.Reset() + prf.Write(U) + U = U[:0] + U = prf.Sum(U) + for x := range U { + T[x] ^= U[x] + } + } + } + return dk[:keyLen] +} diff --git a/vendor/golang.org/x/mod/LICENSE b/vendor/golang.org/x/mod/LICENSE new file mode 100644 index 0000000..6a66aea --- /dev/null +++ b/vendor/golang.org/x/mod/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/golang.org/x/mod/PATENTS b/vendor/golang.org/x/mod/PATENTS new file mode 100644 index 0000000..7330990 --- /dev/null +++ b/vendor/golang.org/x/mod/PATENTS @@ -0,0 +1,22 @@ +Additional IP Rights Grant (Patents) + +"This implementation" means the copyrightable works distributed by +Google as part of the Go project. + +Google hereby grants to You a perpetual, worldwide, non-exclusive, +no-charge, royalty-free, irrevocable (except as stated in this section) +patent license to make, have made, use, offer to sell, sell, import, +transfer and otherwise run, modify and propagate the contents of this +implementation of Go, where such license applies only to those patent +claims, both currently owned or controlled by Google and acquired in +the future, licensable by Google that are necessarily infringed by this +implementation of Go. This grant does not include claims that would be +infringed only as a consequence of further modification of this +implementation. If you or your agent or exclusive licensee institute or +order or agree to the institution of patent litigation against any +entity (including a cross-claim or counterclaim in a lawsuit) alleging +that this implementation of Go or any code incorporated within this +implementation of Go constitutes direct or contributory patent +infringement, or inducement of patent infringement, then any patent +rights granted to you under this License for this implementation of Go +shall terminate as of the date such litigation is filed. diff --git a/vendor/golang.org/x/mod/internal/lazyregexp/lazyre.go b/vendor/golang.org/x/mod/internal/lazyregexp/lazyre.go new file mode 100644 index 0000000..2681af3 --- /dev/null +++ b/vendor/golang.org/x/mod/internal/lazyregexp/lazyre.go @@ -0,0 +1,78 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package lazyregexp is a thin wrapper over regexp, allowing the use of global +// regexp variables without forcing them to be compiled at init. +package lazyregexp + +import ( + "os" + "regexp" + "strings" + "sync" +) + +// Regexp is a wrapper around regexp.Regexp, where the underlying regexp will be +// compiled the first time it is needed. +type Regexp struct { + str string + once sync.Once + rx *regexp.Regexp +} + +func (r *Regexp) re() *regexp.Regexp { + r.once.Do(r.build) + return r.rx +} + +func (r *Regexp) build() { + r.rx = regexp.MustCompile(r.str) + r.str = "" +} + +func (r *Regexp) FindSubmatch(s []byte) [][]byte { + return r.re().FindSubmatch(s) +} + +func (r *Regexp) FindStringSubmatch(s string) []string { + return r.re().FindStringSubmatch(s) +} + +func (r *Regexp) FindStringSubmatchIndex(s string) []int { + return r.re().FindStringSubmatchIndex(s) +} + +func (r *Regexp) ReplaceAllString(src, repl string) string { + return r.re().ReplaceAllString(src, repl) +} + +func (r *Regexp) FindString(s string) string { + return r.re().FindString(s) +} + +func (r *Regexp) FindAllString(s string, n int) []string { + return r.re().FindAllString(s, n) +} + +func (r *Regexp) MatchString(s string) bool { + return r.re().MatchString(s) +} + +func (r *Regexp) SubexpNames() []string { + return r.re().SubexpNames() +} + +var inTest = len(os.Args) > 0 && strings.HasSuffix(strings.TrimSuffix(os.Args[0], ".exe"), ".test") + +// New creates a new lazy regexp, delaying the compiling work until it is first +// needed. If the code is being run as part of tests, the regexp compiling will +// happen immediately. +func New(str string) *Regexp { + lr := &Regexp{str: str} + if inTest { + // In tests, always compile the regexps early. + lr.re() + } + return lr +} diff --git a/vendor/golang.org/x/mod/modfile/print.go b/vendor/golang.org/x/mod/modfile/print.go new file mode 100644 index 0000000..524f930 --- /dev/null +++ b/vendor/golang.org/x/mod/modfile/print.go @@ -0,0 +1,174 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Module file printer. + +package modfile + +import ( + "bytes" + "fmt" + "strings" +) + +// Format returns a go.mod file as a byte slice, formatted in standard style. +func Format(f *FileSyntax) []byte { + pr := &printer{} + pr.file(f) + return pr.Bytes() +} + +// A printer collects the state during printing of a file or expression. +type printer struct { + bytes.Buffer // output buffer + comment []Comment // pending end-of-line comments + margin int // left margin (indent), a number of tabs +} + +// printf prints to the buffer. +func (p *printer) printf(format string, args ...interface{}) { + fmt.Fprintf(p, format, args...) +} + +// indent returns the position on the current line, in bytes, 0-indexed. +func (p *printer) indent() int { + b := p.Bytes() + n := 0 + for n < len(b) && b[len(b)-1-n] != '\n' { + n++ + } + return n +} + +// newline ends the current line, flushing end-of-line comments. +func (p *printer) newline() { + if len(p.comment) > 0 { + p.printf(" ") + for i, com := range p.comment { + if i > 0 { + p.trim() + p.printf("\n") + for i := 0; i < p.margin; i++ { + p.printf("\t") + } + } + p.printf("%s", strings.TrimSpace(com.Token)) + } + p.comment = p.comment[:0] + } + + p.trim() + p.printf("\n") + for i := 0; i < p.margin; i++ { + p.printf("\t") + } +} + +// trim removes trailing spaces and tabs from the current line. +func (p *printer) trim() { + // Remove trailing spaces and tabs from line we're about to end. + b := p.Bytes() + n := len(b) + for n > 0 && (b[n-1] == '\t' || b[n-1] == ' ') { + n-- + } + p.Truncate(n) +} + +// file formats the given file into the print buffer. +func (p *printer) file(f *FileSyntax) { + for _, com := range f.Before { + p.printf("%s", strings.TrimSpace(com.Token)) + p.newline() + } + + for i, stmt := range f.Stmt { + switch x := stmt.(type) { + case *CommentBlock: + // comments already handled + p.expr(x) + + default: + p.expr(x) + p.newline() + } + + for _, com := range stmt.Comment().After { + p.printf("%s", strings.TrimSpace(com.Token)) + p.newline() + } + + if i+1 < len(f.Stmt) { + p.newline() + } + } +} + +func (p *printer) expr(x Expr) { + // Emit line-comments preceding this expression. + if before := x.Comment().Before; len(before) > 0 { + // Want to print a line comment. + // Line comments must be at the current margin. + p.trim() + if p.indent() > 0 { + // There's other text on the line. Start a new line. + p.printf("\n") + } + // Re-indent to margin. + for i := 0; i < p.margin; i++ { + p.printf("\t") + } + for _, com := range before { + p.printf("%s", strings.TrimSpace(com.Token)) + p.newline() + } + } + + switch x := x.(type) { + default: + panic(fmt.Errorf("printer: unexpected type %T", x)) + + case *CommentBlock: + // done + + case *LParen: + p.printf("(") + case *RParen: + p.printf(")") + + case *Line: + p.tokens(x.Token) + + case *LineBlock: + p.tokens(x.Token) + p.printf(" ") + p.expr(&x.LParen) + p.margin++ + for _, l := range x.Line { + p.newline() + p.expr(l) + } + p.margin-- + p.newline() + p.expr(&x.RParen) + } + + // Queue end-of-line comments for printing when we + // reach the end of the line. + p.comment = append(p.comment, x.Comment().Suffix...) +} + +func (p *printer) tokens(tokens []string) { + sep := "" + for _, t := range tokens { + if t == "," || t == ")" || t == "]" || t == "}" { + sep = "" + } + p.printf("%s%s", sep, t) + sep = " " + if t == "(" || t == "[" || t == "{" { + sep = "" + } + } +} diff --git a/vendor/golang.org/x/mod/modfile/read.go b/vendor/golang.org/x/mod/modfile/read.go new file mode 100644 index 0000000..70947ee --- /dev/null +++ b/vendor/golang.org/x/mod/modfile/read.go @@ -0,0 +1,958 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package modfile + +import ( + "bytes" + "errors" + "fmt" + "os" + "strconv" + "strings" + "unicode" + "unicode/utf8" +) + +// A Position describes an arbitrary source position in a file, including the +// file, line, column, and byte offset. +type Position struct { + Line int // line in input (starting at 1) + LineRune int // rune in line (starting at 1) + Byte int // byte in input (starting at 0) +} + +// add returns the position at the end of s, assuming it starts at p. +func (p Position) add(s string) Position { + p.Byte += len(s) + if n := strings.Count(s, "\n"); n > 0 { + p.Line += n + s = s[strings.LastIndex(s, "\n")+1:] + p.LineRune = 1 + } + p.LineRune += utf8.RuneCountInString(s) + return p +} + +// An Expr represents an input element. +type Expr interface { + // Span returns the start and end position of the expression, + // excluding leading or trailing comments. + Span() (start, end Position) + + // Comment returns the comments attached to the expression. + // This method would normally be named 'Comments' but that + // would interfere with embedding a type of the same name. + Comment() *Comments +} + +// A Comment represents a single // comment. +type Comment struct { + Start Position + Token string // without trailing newline + Suffix bool // an end of line (not whole line) comment +} + +// Comments collects the comments associated with an expression. +type Comments struct { + Before []Comment // whole-line comments before this expression + Suffix []Comment // end-of-line comments after this expression + + // For top-level expressions only, After lists whole-line + // comments following the expression. + After []Comment +} + +// Comment returns the receiver. This isn't useful by itself, but +// a Comments struct is embedded into all the expression +// implementation types, and this gives each of those a Comment +// method to satisfy the Expr interface. +func (c *Comments) Comment() *Comments { + return c +} + +// A FileSyntax represents an entire go.mod file. +type FileSyntax struct { + Name string // file path + Comments + Stmt []Expr +} + +func (x *FileSyntax) Span() (start, end Position) { + if len(x.Stmt) == 0 { + return + } + start, _ = x.Stmt[0].Span() + _, end = x.Stmt[len(x.Stmt)-1].Span() + return start, end +} + +// addLine adds a line containing the given tokens to the file. +// +// If the first token of the hint matches the first token of the +// line, the new line is added at the end of the block containing hint, +// extracting hint into a new block if it is not yet in one. +// +// If the hint is non-nil buts its first token does not match, +// the new line is added after the block containing hint +// (or hint itself, if not in a block). +// +// If no hint is provided, addLine appends the line to the end of +// the last block with a matching first token, +// or to the end of the file if no such block exists. +func (x *FileSyntax) addLine(hint Expr, tokens ...string) *Line { + if hint == nil { + // If no hint given, add to the last statement of the given type. + Loop: + for i := len(x.Stmt) - 1; i >= 0; i-- { + stmt := x.Stmt[i] + switch stmt := stmt.(type) { + case *Line: + if stmt.Token != nil && stmt.Token[0] == tokens[0] { + hint = stmt + break Loop + } + case *LineBlock: + if stmt.Token[0] == tokens[0] { + hint = stmt + break Loop + } + } + } + } + + newLineAfter := func(i int) *Line { + new := &Line{Token: tokens} + if i == len(x.Stmt) { + x.Stmt = append(x.Stmt, new) + } else { + x.Stmt = append(x.Stmt, nil) + copy(x.Stmt[i+2:], x.Stmt[i+1:]) + x.Stmt[i+1] = new + } + return new + } + + if hint != nil { + for i, stmt := range x.Stmt { + switch stmt := stmt.(type) { + case *Line: + if stmt == hint { + if stmt.Token == nil || stmt.Token[0] != tokens[0] { + return newLineAfter(i) + } + + // Convert line to line block. + stmt.InBlock = true + block := &LineBlock{Token: stmt.Token[:1], Line: []*Line{stmt}} + stmt.Token = stmt.Token[1:] + x.Stmt[i] = block + new := &Line{Token: tokens[1:], InBlock: true} + block.Line = append(block.Line, new) + return new + } + + case *LineBlock: + if stmt == hint { + if stmt.Token[0] != tokens[0] { + return newLineAfter(i) + } + + new := &Line{Token: tokens[1:], InBlock: true} + stmt.Line = append(stmt.Line, new) + return new + } + + for j, line := range stmt.Line { + if line == hint { + if stmt.Token[0] != tokens[0] { + return newLineAfter(i) + } + + // Add new line after hint within the block. + stmt.Line = append(stmt.Line, nil) + copy(stmt.Line[j+2:], stmt.Line[j+1:]) + new := &Line{Token: tokens[1:], InBlock: true} + stmt.Line[j+1] = new + return new + } + } + } + } + } + + new := &Line{Token: tokens} + x.Stmt = append(x.Stmt, new) + return new +} + +func (x *FileSyntax) updateLine(line *Line, tokens ...string) { + if line.InBlock { + tokens = tokens[1:] + } + line.Token = tokens +} + +// markRemoved modifies line so that it (and its end-of-line comment, if any) +// will be dropped by (*FileSyntax).Cleanup. +func (line *Line) markRemoved() { + line.Token = nil + line.Comments.Suffix = nil +} + +// Cleanup cleans up the file syntax x after any edit operations. +// To avoid quadratic behavior, (*Line).markRemoved marks the line as dead +// by setting line.Token = nil but does not remove it from the slice +// in which it appears. After edits have all been indicated, +// calling Cleanup cleans out the dead lines. +func (x *FileSyntax) Cleanup() { + w := 0 + for _, stmt := range x.Stmt { + switch stmt := stmt.(type) { + case *Line: + if stmt.Token == nil { + continue + } + case *LineBlock: + ww := 0 + for _, line := range stmt.Line { + if line.Token != nil { + stmt.Line[ww] = line + ww++ + } + } + if ww == 0 { + continue + } + if ww == 1 { + // Collapse block into single line. + line := &Line{ + Comments: Comments{ + Before: commentsAdd(stmt.Before, stmt.Line[0].Before), + Suffix: commentsAdd(stmt.Line[0].Suffix, stmt.Suffix), + After: commentsAdd(stmt.Line[0].After, stmt.After), + }, + Token: stringsAdd(stmt.Token, stmt.Line[0].Token), + } + x.Stmt[w] = line + w++ + continue + } + stmt.Line = stmt.Line[:ww] + } + x.Stmt[w] = stmt + w++ + } + x.Stmt = x.Stmt[:w] +} + +func commentsAdd(x, y []Comment) []Comment { + return append(x[:len(x):len(x)], y...) +} + +func stringsAdd(x, y []string) []string { + return append(x[:len(x):len(x)], y...) +} + +// A CommentBlock represents a top-level block of comments separate +// from any rule. +type CommentBlock struct { + Comments + Start Position +} + +func (x *CommentBlock) Span() (start, end Position) { + return x.Start, x.Start +} + +// A Line is a single line of tokens. +type Line struct { + Comments + Start Position + Token []string + InBlock bool + End Position +} + +func (x *Line) Span() (start, end Position) { + return x.Start, x.End +} + +// A LineBlock is a factored block of lines, like +// +// require ( +// "x" +// "y" +// ) +type LineBlock struct { + Comments + Start Position + LParen LParen + Token []string + Line []*Line + RParen RParen +} + +func (x *LineBlock) Span() (start, end Position) { + return x.Start, x.RParen.Pos.add(")") +} + +// An LParen represents the beginning of a parenthesized line block. +// It is a place to store suffix comments. +type LParen struct { + Comments + Pos Position +} + +func (x *LParen) Span() (start, end Position) { + return x.Pos, x.Pos.add(")") +} + +// An RParen represents the end of a parenthesized line block. +// It is a place to store whole-line (before) comments. +type RParen struct { + Comments + Pos Position +} + +func (x *RParen) Span() (start, end Position) { + return x.Pos, x.Pos.add(")") +} + +// An input represents a single input file being parsed. +type input struct { + // Lexing state. + filename string // name of input file, for errors + complete []byte // entire input + remaining []byte // remaining input + tokenStart []byte // token being scanned to end of input + token token // next token to be returned by lex, peek + pos Position // current input position + comments []Comment // accumulated comments + + // Parser state. + file *FileSyntax // returned top-level syntax tree + parseErrors ErrorList // errors encountered during parsing + + // Comment assignment state. + pre []Expr // all expressions, in preorder traversal + post []Expr // all expressions, in postorder traversal +} + +func newInput(filename string, data []byte) *input { + return &input{ + filename: filename, + complete: data, + remaining: data, + pos: Position{Line: 1, LineRune: 1, Byte: 0}, + } +} + +// parse parses the input file. +func parse(file string, data []byte) (f *FileSyntax, err error) { + // The parser panics for both routine errors like syntax errors + // and for programmer bugs like array index errors. + // Turn both into error returns. Catching bug panics is + // especially important when processing many files. + in := newInput(file, data) + defer func() { + if e := recover(); e != nil && e != &in.parseErrors { + in.parseErrors = append(in.parseErrors, Error{ + Filename: in.filename, + Pos: in.pos, + Err: fmt.Errorf("internal error: %v", e), + }) + } + if err == nil && len(in.parseErrors) > 0 { + err = in.parseErrors + } + }() + + // Prime the lexer by reading in the first token. It will be available + // in the next peek() or lex() call. + in.readToken() + + // Invoke the parser. + in.parseFile() + if len(in.parseErrors) > 0 { + return nil, in.parseErrors + } + in.file.Name = in.filename + + // Assign comments to nearby syntax. + in.assignComments() + + return in.file, nil +} + +// Error is called to report an error. +// Error does not return: it panics. +func (in *input) Error(s string) { + in.parseErrors = append(in.parseErrors, Error{ + Filename: in.filename, + Pos: in.pos, + Err: errors.New(s), + }) + panic(&in.parseErrors) +} + +// eof reports whether the input has reached end of file. +func (in *input) eof() bool { + return len(in.remaining) == 0 +} + +// peekRune returns the next rune in the input without consuming it. +func (in *input) peekRune() int { + if len(in.remaining) == 0 { + return 0 + } + r, _ := utf8.DecodeRune(in.remaining) + return int(r) +} + +// peekPrefix reports whether the remaining input begins with the given prefix. +func (in *input) peekPrefix(prefix string) bool { + // This is like bytes.HasPrefix(in.remaining, []byte(prefix)) + // but without the allocation of the []byte copy of prefix. + for i := 0; i < len(prefix); i++ { + if i >= len(in.remaining) || in.remaining[i] != prefix[i] { + return false + } + } + return true +} + +// readRune consumes and returns the next rune in the input. +func (in *input) readRune() int { + if len(in.remaining) == 0 { + in.Error("internal lexer error: readRune at EOF") + } + r, size := utf8.DecodeRune(in.remaining) + in.remaining = in.remaining[size:] + if r == '\n' { + in.pos.Line++ + in.pos.LineRune = 1 + } else { + in.pos.LineRune++ + } + in.pos.Byte += size + return int(r) +} + +type token struct { + kind tokenKind + pos Position + endPos Position + text string +} + +type tokenKind int + +const ( + _EOF tokenKind = -(iota + 1) + _EOLCOMMENT + _IDENT + _STRING + _COMMENT + + // newlines and punctuation tokens are allowed as ASCII codes. +) + +func (k tokenKind) isComment() bool { + return k == _COMMENT || k == _EOLCOMMENT +} + +// isEOL returns whether a token terminates a line. +func (k tokenKind) isEOL() bool { + return k == _EOF || k == _EOLCOMMENT || k == '\n' +} + +// startToken marks the beginning of the next input token. +// It must be followed by a call to endToken, once the token's text has +// been consumed using readRune. +func (in *input) startToken() { + in.tokenStart = in.remaining + in.token.text = "" + in.token.pos = in.pos +} + +// endToken marks the end of an input token. +// It records the actual token string in tok.text. +// A single trailing newline (LF or CRLF) will be removed from comment tokens. +func (in *input) endToken(kind tokenKind) { + in.token.kind = kind + text := string(in.tokenStart[:len(in.tokenStart)-len(in.remaining)]) + if kind.isComment() { + if strings.HasSuffix(text, "\r\n") { + text = text[:len(text)-2] + } else { + text = strings.TrimSuffix(text, "\n") + } + } + in.token.text = text + in.token.endPos = in.pos +} + +// peek returns the kind of the the next token returned by lex. +func (in *input) peek() tokenKind { + return in.token.kind +} + +// lex is called from the parser to obtain the next input token. +func (in *input) lex() token { + tok := in.token + in.readToken() + return tok +} + +// readToken lexes the next token from the text and stores it in in.token. +func (in *input) readToken() { + // Skip past spaces, stopping at non-space or EOF. + for !in.eof() { + c := in.peekRune() + if c == ' ' || c == '\t' || c == '\r' { + in.readRune() + continue + } + + // Comment runs to end of line. + if in.peekPrefix("//") { + in.startToken() + + // Is this comment the only thing on its line? + // Find the last \n before this // and see if it's all + // spaces from there to here. + i := bytes.LastIndex(in.complete[:in.pos.Byte], []byte("\n")) + suffix := len(bytes.TrimSpace(in.complete[i+1:in.pos.Byte])) > 0 + in.readRune() + in.readRune() + + // Consume comment. + for len(in.remaining) > 0 && in.readRune() != '\n' { + } + + // If we are at top level (not in a statement), hand the comment to + // the parser as a _COMMENT token. The grammar is written + // to handle top-level comments itself. + if !suffix { + in.endToken(_COMMENT) + return + } + + // Otherwise, save comment for later attachment to syntax tree. + in.endToken(_EOLCOMMENT) + in.comments = append(in.comments, Comment{in.token.pos, in.token.text, suffix}) + return + } + + if in.peekPrefix("/*") { + in.Error("mod files must use // comments (not /* */ comments)") + } + + // Found non-space non-comment. + break + } + + // Found the beginning of the next token. + in.startToken() + + // End of file. + if in.eof() { + in.endToken(_EOF) + return + } + + // Punctuation tokens. + switch c := in.peekRune(); c { + case '\n', '(', ')', '[', ']', '{', '}', ',': + in.readRune() + in.endToken(tokenKind(c)) + return + + case '"', '`': // quoted string + quote := c + in.readRune() + for { + if in.eof() { + in.pos = in.token.pos + in.Error("unexpected EOF in string") + } + if in.peekRune() == '\n' { + in.Error("unexpected newline in string") + } + c := in.readRune() + if c == quote { + break + } + if c == '\\' && quote != '`' { + if in.eof() { + in.pos = in.token.pos + in.Error("unexpected EOF in string") + } + in.readRune() + } + } + in.endToken(_STRING) + return + } + + // Checked all punctuation. Must be identifier token. + if c := in.peekRune(); !isIdent(c) { + in.Error(fmt.Sprintf("unexpected input character %#q", c)) + } + + // Scan over identifier. + for isIdent(in.peekRune()) { + if in.peekPrefix("//") { + break + } + if in.peekPrefix("/*") { + in.Error("mod files must use // comments (not /* */ comments)") + } + in.readRune() + } + in.endToken(_IDENT) +} + +// isIdent reports whether c is an identifier rune. +// We treat most printable runes as identifier runes, except for a handful of +// ASCII punctuation characters. +func isIdent(c int) bool { + switch r := rune(c); r { + case ' ', '(', ')', '[', ']', '{', '}', ',': + return false + default: + return !unicode.IsSpace(r) && unicode.IsPrint(r) + } +} + +// Comment assignment. +// We build two lists of all subexpressions, preorder and postorder. +// The preorder list is ordered by start location, with outer expressions first. +// The postorder list is ordered by end location, with outer expressions last. +// We use the preorder list to assign each whole-line comment to the syntax +// immediately following it, and we use the postorder list to assign each +// end-of-line comment to the syntax immediately preceding it. + +// order walks the expression adding it and its subexpressions to the +// preorder and postorder lists. +func (in *input) order(x Expr) { + if x != nil { + in.pre = append(in.pre, x) + } + switch x := x.(type) { + default: + panic(fmt.Errorf("order: unexpected type %T", x)) + case nil: + // nothing + case *LParen, *RParen: + // nothing + case *CommentBlock: + // nothing + case *Line: + // nothing + case *FileSyntax: + for _, stmt := range x.Stmt { + in.order(stmt) + } + case *LineBlock: + in.order(&x.LParen) + for _, l := range x.Line { + in.order(l) + } + in.order(&x.RParen) + } + if x != nil { + in.post = append(in.post, x) + } +} + +// assignComments attaches comments to nearby syntax. +func (in *input) assignComments() { + const debug = false + + // Generate preorder and postorder lists. + in.order(in.file) + + // Split into whole-line comments and suffix comments. + var line, suffix []Comment + for _, com := range in.comments { + if com.Suffix { + suffix = append(suffix, com) + } else { + line = append(line, com) + } + } + + if debug { + for _, c := range line { + fmt.Fprintf(os.Stderr, "LINE %q :%d:%d #%d\n", c.Token, c.Start.Line, c.Start.LineRune, c.Start.Byte) + } + } + + // Assign line comments to syntax immediately following. + for _, x := range in.pre { + start, _ := x.Span() + if debug { + fmt.Fprintf(os.Stderr, "pre %T :%d:%d #%d\n", x, start.Line, start.LineRune, start.Byte) + } + xcom := x.Comment() + for len(line) > 0 && start.Byte >= line[0].Start.Byte { + if debug { + fmt.Fprintf(os.Stderr, "ASSIGN LINE %q #%d\n", line[0].Token, line[0].Start.Byte) + } + xcom.Before = append(xcom.Before, line[0]) + line = line[1:] + } + } + + // Remaining line comments go at end of file. + in.file.After = append(in.file.After, line...) + + if debug { + for _, c := range suffix { + fmt.Fprintf(os.Stderr, "SUFFIX %q :%d:%d #%d\n", c.Token, c.Start.Line, c.Start.LineRune, c.Start.Byte) + } + } + + // Assign suffix comments to syntax immediately before. + for i := len(in.post) - 1; i >= 0; i-- { + x := in.post[i] + + start, end := x.Span() + if debug { + fmt.Fprintf(os.Stderr, "post %T :%d:%d #%d :%d:%d #%d\n", x, start.Line, start.LineRune, start.Byte, end.Line, end.LineRune, end.Byte) + } + + // Do not assign suffix comments to end of line block or whole file. + // Instead assign them to the last element inside. + switch x.(type) { + case *FileSyntax: + continue + } + + // Do not assign suffix comments to something that starts + // on an earlier line, so that in + // + // x ( y + // z ) // comment + // + // we assign the comment to z and not to x ( ... ). + if start.Line != end.Line { + continue + } + xcom := x.Comment() + for len(suffix) > 0 && end.Byte <= suffix[len(suffix)-1].Start.Byte { + if debug { + fmt.Fprintf(os.Stderr, "ASSIGN SUFFIX %q #%d\n", suffix[len(suffix)-1].Token, suffix[len(suffix)-1].Start.Byte) + } + xcom.Suffix = append(xcom.Suffix, suffix[len(suffix)-1]) + suffix = suffix[:len(suffix)-1] + } + } + + // We assigned suffix comments in reverse. + // If multiple suffix comments were appended to the same + // expression node, they are now in reverse. Fix that. + for _, x := range in.post { + reverseComments(x.Comment().Suffix) + } + + // Remaining suffix comments go at beginning of file. + in.file.Before = append(in.file.Before, suffix...) +} + +// reverseComments reverses the []Comment list. +func reverseComments(list []Comment) { + for i, j := 0, len(list)-1; i < j; i, j = i+1, j-1 { + list[i], list[j] = list[j], list[i] + } +} + +func (in *input) parseFile() { + in.file = new(FileSyntax) + var cb *CommentBlock + for { + switch in.peek() { + case '\n': + in.lex() + if cb != nil { + in.file.Stmt = append(in.file.Stmt, cb) + cb = nil + } + case _COMMENT: + tok := in.lex() + if cb == nil { + cb = &CommentBlock{Start: tok.pos} + } + com := cb.Comment() + com.Before = append(com.Before, Comment{Start: tok.pos, Token: tok.text}) + case _EOF: + if cb != nil { + in.file.Stmt = append(in.file.Stmt, cb) + } + return + default: + in.parseStmt() + if cb != nil { + in.file.Stmt[len(in.file.Stmt)-1].Comment().Before = cb.Before + cb = nil + } + } + } +} + +func (in *input) parseStmt() { + tok := in.lex() + start := tok.pos + end := tok.endPos + tokens := []string{tok.text} + for { + tok := in.lex() + switch { + case tok.kind.isEOL(): + in.file.Stmt = append(in.file.Stmt, &Line{ + Start: start, + Token: tokens, + End: end, + }) + return + + case tok.kind == '(': + if next := in.peek(); next.isEOL() { + // Start of block: no more tokens on this line. + in.file.Stmt = append(in.file.Stmt, in.parseLineBlock(start, tokens, tok)) + return + } else if next == ')' { + rparen := in.lex() + if in.peek().isEOL() { + // Empty block. + in.lex() + in.file.Stmt = append(in.file.Stmt, &LineBlock{ + Start: start, + Token: tokens, + LParen: LParen{Pos: tok.pos}, + RParen: RParen{Pos: rparen.pos}, + }) + return + } + // '( )' in the middle of the line, not a block. + tokens = append(tokens, tok.text, rparen.text) + } else { + // '(' in the middle of the line, not a block. + tokens = append(tokens, tok.text) + } + + default: + tokens = append(tokens, tok.text) + end = tok.endPos + } + } +} + +func (in *input) parseLineBlock(start Position, token []string, lparen token) *LineBlock { + x := &LineBlock{ + Start: start, + Token: token, + LParen: LParen{Pos: lparen.pos}, + } + var comments []Comment + for { + switch in.peek() { + case _EOLCOMMENT: + // Suffix comment, will be attached later by assignComments. + in.lex() + case '\n': + // Blank line. Add an empty comment to preserve it. + in.lex() + if len(comments) == 0 && len(x.Line) > 0 || len(comments) > 0 && comments[len(comments)-1].Token != "" { + comments = append(comments, Comment{}) + } + case _COMMENT: + tok := in.lex() + comments = append(comments, Comment{Start: tok.pos, Token: tok.text}) + case _EOF: + in.Error(fmt.Sprintf("syntax error (unterminated block started at %s:%d:%d)", in.filename, x.Start.Line, x.Start.LineRune)) + case ')': + rparen := in.lex() + x.RParen.Before = comments + x.RParen.Pos = rparen.pos + if !in.peek().isEOL() { + in.Error("syntax error (expected newline after closing paren)") + } + in.lex() + return x + default: + l := in.parseLine() + x.Line = append(x.Line, l) + l.Comment().Before = comments + comments = nil + } + } +} + +func (in *input) parseLine() *Line { + tok := in.lex() + if tok.kind.isEOL() { + in.Error("internal parse error: parseLine at end of line") + } + start := tok.pos + end := tok.endPos + tokens := []string{tok.text} + for { + tok := in.lex() + if tok.kind.isEOL() { + return &Line{ + Start: start, + Token: tokens, + End: end, + InBlock: true, + } + } + tokens = append(tokens, tok.text) + end = tok.endPos + } +} + +var ( + slashSlash = []byte("//") + moduleStr = []byte("module") +) + +// ModulePath returns the module path from the gomod file text. +// If it cannot find a module path, it returns an empty string. +// It is tolerant of unrelated problems in the go.mod file. +func ModulePath(mod []byte) string { + for len(mod) > 0 { + line := mod + mod = nil + if i := bytes.IndexByte(line, '\n'); i >= 0 { + line, mod = line[:i], line[i+1:] + } + if i := bytes.Index(line, slashSlash); i >= 0 { + line = line[:i] + } + line = bytes.TrimSpace(line) + if !bytes.HasPrefix(line, moduleStr) { + continue + } + line = line[len(moduleStr):] + n := len(line) + line = bytes.TrimSpace(line) + if len(line) == n || len(line) == 0 { + continue + } + + if line[0] == '"' || line[0] == '`' { + p, err := strconv.Unquote(string(line)) + if err != nil { + return "" // malformed quoted string or multiline module path + } + return p + } + + return string(line) + } + return "" // missing module path +} diff --git a/vendor/golang.org/x/mod/modfile/rule.go b/vendor/golang.org/x/mod/modfile/rule.go new file mode 100644 index 0000000..ed2f31a --- /dev/null +++ b/vendor/golang.org/x/mod/modfile/rule.go @@ -0,0 +1,1556 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package modfile implements a parser and formatter for go.mod files. +// +// The go.mod syntax is described in +// https://golang.org/cmd/go/#hdr-The_go_mod_file. +// +// The Parse and ParseLax functions both parse a go.mod file and return an +// abstract syntax tree. ParseLax ignores unknown statements and may be used to +// parse go.mod files that may have been developed with newer versions of Go. +// +// The File struct returned by Parse and ParseLax represent an abstract +// go.mod file. File has several methods like AddNewRequire and DropReplace +// that can be used to programmatically edit a file. +// +// The Format function formats a File back to a byte slice which can be +// written to a file. +package modfile + +import ( + "errors" + "fmt" + "path/filepath" + "sort" + "strconv" + "strings" + "unicode" + + "golang.org/x/mod/internal/lazyregexp" + "golang.org/x/mod/module" + "golang.org/x/mod/semver" +) + +// A File is the parsed, interpreted form of a go.mod file. +type File struct { + Module *Module + Go *Go + Require []*Require + Exclude []*Exclude + Replace []*Replace + Retract []*Retract + + Syntax *FileSyntax +} + +// A Module is the module statement. +type Module struct { + Mod module.Version + Deprecated string + Syntax *Line +} + +// A Go is the go statement. +type Go struct { + Version string // "1.23" + Syntax *Line +} + +// An Exclude is a single exclude statement. +type Exclude struct { + Mod module.Version + Syntax *Line +} + +// A Replace is a single replace statement. +type Replace struct { + Old module.Version + New module.Version + Syntax *Line +} + +// A Retract is a single retract statement. +type Retract struct { + VersionInterval + Rationale string + Syntax *Line +} + +// A VersionInterval represents a range of versions with upper and lower bounds. +// Intervals are closed: both bounds are included. When Low is equal to High, +// the interval may refer to a single version ('v1.2.3') or an interval +// ('[v1.2.3, v1.2.3]'); both have the same representation. +type VersionInterval struct { + Low, High string +} + +// A Require is a single require statement. +type Require struct { + Mod module.Version + Indirect bool // has "// indirect" comment + Syntax *Line +} + +func (r *Require) markRemoved() { + r.Syntax.markRemoved() + *r = Require{} +} + +func (r *Require) setVersion(v string) { + r.Mod.Version = v + + if line := r.Syntax; len(line.Token) > 0 { + if line.InBlock { + // If the line is preceded by an empty line, remove it; see + // https://golang.org/issue/33779. + if len(line.Comments.Before) == 1 && len(line.Comments.Before[0].Token) == 0 { + line.Comments.Before = line.Comments.Before[:0] + } + if len(line.Token) >= 2 { // example.com v1.2.3 + line.Token[1] = v + } + } else { + if len(line.Token) >= 3 { // require example.com v1.2.3 + line.Token[2] = v + } + } + } +} + +// setIndirect sets line to have (or not have) a "// indirect" comment. +func (r *Require) setIndirect(indirect bool) { + r.Indirect = indirect + line := r.Syntax + if isIndirect(line) == indirect { + return + } + if indirect { + // Adding comment. + if len(line.Suffix) == 0 { + // New comment. + line.Suffix = []Comment{{Token: "// indirect", Suffix: true}} + return + } + + com := &line.Suffix[0] + text := strings.TrimSpace(strings.TrimPrefix(com.Token, string(slashSlash))) + if text == "" { + // Empty comment. + com.Token = "// indirect" + return + } + + // Insert at beginning of existing comment. + com.Token = "// indirect; " + text + return + } + + // Removing comment. + f := strings.TrimSpace(strings.TrimPrefix(line.Suffix[0].Token, string(slashSlash))) + if f == "indirect" { + // Remove whole comment. + line.Suffix = nil + return + } + + // Remove comment prefix. + com := &line.Suffix[0] + i := strings.Index(com.Token, "indirect;") + com.Token = "//" + com.Token[i+len("indirect;"):] +} + +// isIndirect reports whether line has a "// indirect" comment, +// meaning it is in go.mod only for its effect on indirect dependencies, +// so that it can be dropped entirely once the effective version of the +// indirect dependency reaches the given minimum version. +func isIndirect(line *Line) bool { + if len(line.Suffix) == 0 { + return false + } + f := strings.Fields(strings.TrimPrefix(line.Suffix[0].Token, string(slashSlash))) + return (len(f) == 1 && f[0] == "indirect" || len(f) > 1 && f[0] == "indirect;") +} + +func (f *File) AddModuleStmt(path string) error { + if f.Syntax == nil { + f.Syntax = new(FileSyntax) + } + if f.Module == nil { + f.Module = &Module{ + Mod: module.Version{Path: path}, + Syntax: f.Syntax.addLine(nil, "module", AutoQuote(path)), + } + } else { + f.Module.Mod.Path = path + f.Syntax.updateLine(f.Module.Syntax, "module", AutoQuote(path)) + } + return nil +} + +func (f *File) AddComment(text string) { + if f.Syntax == nil { + f.Syntax = new(FileSyntax) + } + f.Syntax.Stmt = append(f.Syntax.Stmt, &CommentBlock{ + Comments: Comments{ + Before: []Comment{ + { + Token: text, + }, + }, + }, + }) +} + +type VersionFixer func(path, version string) (string, error) + +// errDontFix is returned by a VersionFixer to indicate the version should be +// left alone, even if it's not canonical. +var dontFixRetract VersionFixer = func(_, vers string) (string, error) { + return vers, nil +} + +// Parse parses and returns a go.mod file. +// +// file is the name of the file, used in positions and errors. +// +// data is the content of the file. +// +// fix is an optional function that canonicalizes module versions. +// If fix is nil, all module versions must be canonical (module.CanonicalVersion +// must return the same string). +func Parse(file string, data []byte, fix VersionFixer) (*File, error) { + return parseToFile(file, data, fix, true) +} + +// ParseLax is like Parse but ignores unknown statements. +// It is used when parsing go.mod files other than the main module, +// under the theory that most statement types we add in the future will +// only apply in the main module, like exclude and replace, +// and so we get better gradual deployments if old go commands +// simply ignore those statements when found in go.mod files +// in dependencies. +func ParseLax(file string, data []byte, fix VersionFixer) (*File, error) { + return parseToFile(file, data, fix, false) +} + +func parseToFile(file string, data []byte, fix VersionFixer, strict bool) (parsed *File, err error) { + fs, err := parse(file, data) + if err != nil { + return nil, err + } + f := &File{ + Syntax: fs, + } + var errs ErrorList + + // fix versions in retract directives after the file is parsed. + // We need the module path to fix versions, and it might be at the end. + defer func() { + oldLen := len(errs) + f.fixRetract(fix, &errs) + if len(errs) > oldLen { + parsed, err = nil, errs + } + }() + + for _, x := range fs.Stmt { + switch x := x.(type) { + case *Line: + f.add(&errs, nil, x, x.Token[0], x.Token[1:], fix, strict) + + case *LineBlock: + if len(x.Token) > 1 { + if strict { + errs = append(errs, Error{ + Filename: file, + Pos: x.Start, + Err: fmt.Errorf("unknown block type: %s", strings.Join(x.Token, " ")), + }) + } + continue + } + switch x.Token[0] { + default: + if strict { + errs = append(errs, Error{ + Filename: file, + Pos: x.Start, + Err: fmt.Errorf("unknown block type: %s", strings.Join(x.Token, " ")), + }) + } + continue + case "module", "require", "exclude", "replace", "retract": + for _, l := range x.Line { + f.add(&errs, x, l, x.Token[0], l.Token, fix, strict) + } + } + } + } + + if len(errs) > 0 { + return nil, errs + } + return f, nil +} + +var GoVersionRE = lazyregexp.New(`^([1-9][0-9]*)\.(0|[1-9][0-9]*)$`) +var laxGoVersionRE = lazyregexp.New(`^v?(([1-9][0-9]*)\.(0|[1-9][0-9]*))([^0-9].*)$`) + +func (f *File) add(errs *ErrorList, block *LineBlock, line *Line, verb string, args []string, fix VersionFixer, strict bool) { + // If strict is false, this module is a dependency. + // We ignore all unknown directives as well as main-module-only + // directives like replace and exclude. It will work better for + // forward compatibility if we can depend on modules that have unknown + // statements (presumed relevant only when acting as the main module) + // and simply ignore those statements. + if !strict { + switch verb { + case "go", "module", "retract", "require": + // want these even for dependency go.mods + default: + return + } + } + + wrapModPathError := func(modPath string, err error) { + *errs = append(*errs, Error{ + Filename: f.Syntax.Name, + Pos: line.Start, + ModPath: modPath, + Verb: verb, + Err: err, + }) + } + wrapError := func(err error) { + *errs = append(*errs, Error{ + Filename: f.Syntax.Name, + Pos: line.Start, + Err: err, + }) + } + errorf := func(format string, args ...interface{}) { + wrapError(fmt.Errorf(format, args...)) + } + + switch verb { + default: + errorf("unknown directive: %s", verb) + + case "go": + if f.Go != nil { + errorf("repeated go statement") + return + } + if len(args) != 1 { + errorf("go directive expects exactly one argument") + return + } else if !GoVersionRE.MatchString(args[0]) { + fixed := false + if !strict { + if m := laxGoVersionRE.FindStringSubmatch(args[0]); m != nil { + args[0] = m[1] + fixed = true + } + } + if !fixed { + errorf("invalid go version '%s': must match format 1.23", args[0]) + return + } + } + + f.Go = &Go{Syntax: line} + f.Go.Version = args[0] + + case "module": + if f.Module != nil { + errorf("repeated module statement") + return + } + deprecated := parseDeprecation(block, line) + f.Module = &Module{ + Syntax: line, + Deprecated: deprecated, + } + if len(args) != 1 { + errorf("usage: module module/path") + return + } + s, err := parseString(&args[0]) + if err != nil { + errorf("invalid quoted string: %v", err) + return + } + f.Module.Mod = module.Version{Path: s} + + case "require", "exclude": + if len(args) != 2 { + errorf("usage: %s module/path v1.2.3", verb) + return + } + s, err := parseString(&args[0]) + if err != nil { + errorf("invalid quoted string: %v", err) + return + } + v, err := parseVersion(verb, s, &args[1], fix) + if err != nil { + wrapError(err) + return + } + pathMajor, err := modulePathMajor(s) + if err != nil { + wrapError(err) + return + } + if err := module.CheckPathMajor(v, pathMajor); err != nil { + wrapModPathError(s, err) + return + } + if verb == "require" { + f.Require = append(f.Require, &Require{ + Mod: module.Version{Path: s, Version: v}, + Syntax: line, + Indirect: isIndirect(line), + }) + } else { + f.Exclude = append(f.Exclude, &Exclude{ + Mod: module.Version{Path: s, Version: v}, + Syntax: line, + }) + } + + case "replace": + replace, wrappederr := parseReplace(f.Syntax.Name, line, verb, args, fix) + if wrappederr != nil { + *errs = append(*errs, *wrappederr) + return + } + f.Replace = append(f.Replace, replace) + + case "retract": + rationale := parseDirectiveComment(block, line) + vi, err := parseVersionInterval(verb, "", &args, dontFixRetract) + if err != nil { + if strict { + wrapError(err) + return + } else { + // Only report errors parsing intervals in the main module. We may + // support additional syntax in the future, such as open and half-open + // intervals. Those can't be supported now, because they break the + // go.mod parser, even in lax mode. + return + } + } + if len(args) > 0 && strict { + // In the future, there may be additional information after the version. + errorf("unexpected token after version: %q", args[0]) + return + } + retract := &Retract{ + VersionInterval: vi, + Rationale: rationale, + Syntax: line, + } + f.Retract = append(f.Retract, retract) + } +} + +func parseReplace(filename string, line *Line, verb string, args []string, fix VersionFixer) (*Replace, *Error) { + wrapModPathError := func(modPath string, err error) *Error { + return &Error{ + Filename: filename, + Pos: line.Start, + ModPath: modPath, + Verb: verb, + Err: err, + } + } + wrapError := func(err error) *Error { + return &Error{ + Filename: filename, + Pos: line.Start, + Err: err, + } + } + errorf := func(format string, args ...interface{}) *Error { + return wrapError(fmt.Errorf(format, args...)) + } + + arrow := 2 + if len(args) >= 2 && args[1] == "=>" { + arrow = 1 + } + if len(args) < arrow+2 || len(args) > arrow+3 || args[arrow] != "=>" { + return nil, errorf("usage: %s module/path [v1.2.3] => other/module v1.4\n\t or %s module/path [v1.2.3] => ../local/directory", verb, verb) + } + s, err := parseString(&args[0]) + if err != nil { + return nil, errorf("invalid quoted string: %v", err) + } + pathMajor, err := modulePathMajor(s) + if err != nil { + return nil, wrapModPathError(s, err) + + } + var v string + if arrow == 2 { + v, err = parseVersion(verb, s, &args[1], fix) + if err != nil { + return nil, wrapError(err) + } + if err := module.CheckPathMajor(v, pathMajor); err != nil { + return nil, wrapModPathError(s, err) + } + } + ns, err := parseString(&args[arrow+1]) + if err != nil { + return nil, errorf("invalid quoted string: %v", err) + } + nv := "" + if len(args) == arrow+2 { + if !IsDirectoryPath(ns) { + return nil, errorf("replacement module without version must be directory path (rooted or starting with ./ or ../)") + } + if filepath.Separator == '/' && strings.Contains(ns, `\`) { + return nil, errorf("replacement directory appears to be Windows path (on a non-windows system)") + } + } + if len(args) == arrow+3 { + nv, err = parseVersion(verb, ns, &args[arrow+2], fix) + if err != nil { + return nil, wrapError(err) + } + if IsDirectoryPath(ns) { + return nil, errorf("replacement module directory path %q cannot have version", ns) + + } + } + return &Replace{ + Old: module.Version{Path: s, Version: v}, + New: module.Version{Path: ns, Version: nv}, + Syntax: line, + }, nil +} + +// fixRetract applies fix to each retract directive in f, appending any errors +// to errs. +// +// Most versions are fixed as we parse the file, but for retract directives, +// the relevant module path is the one specified with the module directive, +// and that might appear at the end of the file (or not at all). +func (f *File) fixRetract(fix VersionFixer, errs *ErrorList) { + if fix == nil { + return + } + path := "" + if f.Module != nil { + path = f.Module.Mod.Path + } + var r *Retract + wrapError := func(err error) { + *errs = append(*errs, Error{ + Filename: f.Syntax.Name, + Pos: r.Syntax.Start, + Err: err, + }) + } + + for _, r = range f.Retract { + if path == "" { + wrapError(errors.New("no module directive found, so retract cannot be used")) + return // only print the first one of these + } + + args := r.Syntax.Token + if args[0] == "retract" { + args = args[1:] + } + vi, err := parseVersionInterval("retract", path, &args, fix) + if err != nil { + wrapError(err) + } + r.VersionInterval = vi + } +} + +func (f *WorkFile) add(errs *ErrorList, line *Line, verb string, args []string, fix VersionFixer) { + wrapError := func(err error) { + *errs = append(*errs, Error{ + Filename: f.Syntax.Name, + Pos: line.Start, + Err: err, + }) + } + errorf := func(format string, args ...interface{}) { + wrapError(fmt.Errorf(format, args...)) + } + + switch verb { + default: + errorf("unknown directive: %s", verb) + + case "go": + if f.Go != nil { + errorf("repeated go statement") + return + } + if len(args) != 1 { + errorf("go directive expects exactly one argument") + return + } else if !GoVersionRE.MatchString(args[0]) { + errorf("invalid go version '%s': must match format 1.23", args[0]) + return + } + + f.Go = &Go{Syntax: line} + f.Go.Version = args[0] + + case "use": + if len(args) != 1 { + errorf("usage: %s local/dir", verb) + return + } + s, err := parseString(&args[0]) + if err != nil { + errorf("invalid quoted string: %v", err) + return + } + f.Use = append(f.Use, &Use{ + Path: s, + Syntax: line, + }) + + case "replace": + replace, wrappederr := parseReplace(f.Syntax.Name, line, verb, args, fix) + if wrappederr != nil { + *errs = append(*errs, *wrappederr) + return + } + f.Replace = append(f.Replace, replace) + } +} + +// IsDirectoryPath reports whether the given path should be interpreted +// as a directory path. Just like on the go command line, relative paths +// and rooted paths are directory paths; the rest are module paths. +func IsDirectoryPath(ns string) bool { + // Because go.mod files can move from one system to another, + // we check all known path syntaxes, both Unix and Windows. + return strings.HasPrefix(ns, "./") || strings.HasPrefix(ns, "../") || strings.HasPrefix(ns, "/") || + strings.HasPrefix(ns, `.\`) || strings.HasPrefix(ns, `..\`) || strings.HasPrefix(ns, `\`) || + len(ns) >= 2 && ('A' <= ns[0] && ns[0] <= 'Z' || 'a' <= ns[0] && ns[0] <= 'z') && ns[1] == ':' +} + +// MustQuote reports whether s must be quoted in order to appear as +// a single token in a go.mod line. +func MustQuote(s string) bool { + for _, r := range s { + switch r { + case ' ', '"', '\'', '`': + return true + + case '(', ')', '[', ']', '{', '}', ',': + if len(s) > 1 { + return true + } + + default: + if !unicode.IsPrint(r) { + return true + } + } + } + return s == "" || strings.Contains(s, "//") || strings.Contains(s, "/*") +} + +// AutoQuote returns s or, if quoting is required for s to appear in a go.mod, +// the quotation of s. +func AutoQuote(s string) string { + if MustQuote(s) { + return strconv.Quote(s) + } + return s +} + +func parseVersionInterval(verb string, path string, args *[]string, fix VersionFixer) (VersionInterval, error) { + toks := *args + if len(toks) == 0 || toks[0] == "(" { + return VersionInterval{}, fmt.Errorf("expected '[' or version") + } + if toks[0] != "[" { + v, err := parseVersion(verb, path, &toks[0], fix) + if err != nil { + return VersionInterval{}, err + } + *args = toks[1:] + return VersionInterval{Low: v, High: v}, nil + } + toks = toks[1:] + + if len(toks) == 0 { + return VersionInterval{}, fmt.Errorf("expected version after '['") + } + low, err := parseVersion(verb, path, &toks[0], fix) + if err != nil { + return VersionInterval{}, err + } + toks = toks[1:] + + if len(toks) == 0 || toks[0] != "," { + return VersionInterval{}, fmt.Errorf("expected ',' after version") + } + toks = toks[1:] + + if len(toks) == 0 { + return VersionInterval{}, fmt.Errorf("expected version after ','") + } + high, err := parseVersion(verb, path, &toks[0], fix) + if err != nil { + return VersionInterval{}, err + } + toks = toks[1:] + + if len(toks) == 0 || toks[0] != "]" { + return VersionInterval{}, fmt.Errorf("expected ']' after version") + } + toks = toks[1:] + + *args = toks + return VersionInterval{Low: low, High: high}, nil +} + +func parseString(s *string) (string, error) { + t := *s + if strings.HasPrefix(t, `"`) { + var err error + if t, err = strconv.Unquote(t); err != nil { + return "", err + } + } else if strings.ContainsAny(t, "\"'`") { + // Other quotes are reserved both for possible future expansion + // and to avoid confusion. For example if someone types 'x' + // we want that to be a syntax error and not a literal x in literal quotation marks. + return "", fmt.Errorf("unquoted string cannot contain quote") + } + *s = AutoQuote(t) + return t, nil +} + +var deprecatedRE = lazyregexp.New(`(?s)(?:^|\n\n)Deprecated: *(.*?)(?:$|\n\n)`) + +// parseDeprecation extracts the text of comments on a "module" directive and +// extracts a deprecation message from that. +// +// A deprecation message is contained in a paragraph within a block of comments +// that starts with "Deprecated:" (case sensitive). The message runs until the +// end of the paragraph and does not include the "Deprecated:" prefix. If the +// comment block has multiple paragraphs that start with "Deprecated:", +// parseDeprecation returns the message from the first. +func parseDeprecation(block *LineBlock, line *Line) string { + text := parseDirectiveComment(block, line) + m := deprecatedRE.FindStringSubmatch(text) + if m == nil { + return "" + } + return m[1] +} + +// parseDirectiveComment extracts the text of comments on a directive. +// If the directive's line does not have comments and is part of a block that +// does have comments, the block's comments are used. +func parseDirectiveComment(block *LineBlock, line *Line) string { + comments := line.Comment() + if block != nil && len(comments.Before) == 0 && len(comments.Suffix) == 0 { + comments = block.Comment() + } + groups := [][]Comment{comments.Before, comments.Suffix} + var lines []string + for _, g := range groups { + for _, c := range g { + if !strings.HasPrefix(c.Token, "//") { + continue // blank line + } + lines = append(lines, strings.TrimSpace(strings.TrimPrefix(c.Token, "//"))) + } + } + return strings.Join(lines, "\n") +} + +type ErrorList []Error + +func (e ErrorList) Error() string { + errStrs := make([]string, len(e)) + for i, err := range e { + errStrs[i] = err.Error() + } + return strings.Join(errStrs, "\n") +} + +type Error struct { + Filename string + Pos Position + Verb string + ModPath string + Err error +} + +func (e *Error) Error() string { + var pos string + if e.Pos.LineRune > 1 { + // Don't print LineRune if it's 1 (beginning of line). + // It's always 1 except in scanner errors, which are rare. + pos = fmt.Sprintf("%s:%d:%d: ", e.Filename, e.Pos.Line, e.Pos.LineRune) + } else if e.Pos.Line > 0 { + pos = fmt.Sprintf("%s:%d: ", e.Filename, e.Pos.Line) + } else if e.Filename != "" { + pos = fmt.Sprintf("%s: ", e.Filename) + } + + var directive string + if e.ModPath != "" { + directive = fmt.Sprintf("%s %s: ", e.Verb, e.ModPath) + } else if e.Verb != "" { + directive = fmt.Sprintf("%s: ", e.Verb) + } + + return pos + directive + e.Err.Error() +} + +func (e *Error) Unwrap() error { return e.Err } + +func parseVersion(verb string, path string, s *string, fix VersionFixer) (string, error) { + t, err := parseString(s) + if err != nil { + return "", &Error{ + Verb: verb, + ModPath: path, + Err: &module.InvalidVersionError{ + Version: *s, + Err: err, + }, + } + } + if fix != nil { + fixed, err := fix(path, t) + if err != nil { + if err, ok := err.(*module.ModuleError); ok { + return "", &Error{ + Verb: verb, + ModPath: path, + Err: err.Err, + } + } + return "", err + } + t = fixed + } else { + cv := module.CanonicalVersion(t) + if cv == "" { + return "", &Error{ + Verb: verb, + ModPath: path, + Err: &module.InvalidVersionError{ + Version: t, + Err: errors.New("must be of the form v1.2.3"), + }, + } + } + t = cv + } + *s = t + return *s, nil +} + +func modulePathMajor(path string) (string, error) { + _, major, ok := module.SplitPathVersion(path) + if !ok { + return "", fmt.Errorf("invalid module path") + } + return major, nil +} + +func (f *File) Format() ([]byte, error) { + return Format(f.Syntax), nil +} + +// Cleanup cleans up the file f after any edit operations. +// To avoid quadratic behavior, modifications like DropRequire +// clear the entry but do not remove it from the slice. +// Cleanup cleans out all the cleared entries. +func (f *File) Cleanup() { + w := 0 + for _, r := range f.Require { + if r.Mod.Path != "" { + f.Require[w] = r + w++ + } + } + f.Require = f.Require[:w] + + w = 0 + for _, x := range f.Exclude { + if x.Mod.Path != "" { + f.Exclude[w] = x + w++ + } + } + f.Exclude = f.Exclude[:w] + + w = 0 + for _, r := range f.Replace { + if r.Old.Path != "" { + f.Replace[w] = r + w++ + } + } + f.Replace = f.Replace[:w] + + w = 0 + for _, r := range f.Retract { + if r.Low != "" || r.High != "" { + f.Retract[w] = r + w++ + } + } + f.Retract = f.Retract[:w] + + f.Syntax.Cleanup() +} + +func (f *File) AddGoStmt(version string) error { + if !GoVersionRE.MatchString(version) { + return fmt.Errorf("invalid language version string %q", version) + } + if f.Go == nil { + var hint Expr + if f.Module != nil && f.Module.Syntax != nil { + hint = f.Module.Syntax + } + f.Go = &Go{ + Version: version, + Syntax: f.Syntax.addLine(hint, "go", version), + } + } else { + f.Go.Version = version + f.Syntax.updateLine(f.Go.Syntax, "go", version) + } + return nil +} + +// AddRequire sets the first require line for path to version vers, +// preserving any existing comments for that line and removing all +// other lines for path. +// +// If no line currently exists for path, AddRequire adds a new line +// at the end of the last require block. +func (f *File) AddRequire(path, vers string) error { + need := true + for _, r := range f.Require { + if r.Mod.Path == path { + if need { + r.Mod.Version = vers + f.Syntax.updateLine(r.Syntax, "require", AutoQuote(path), vers) + need = false + } else { + r.Syntax.markRemoved() + *r = Require{} + } + } + } + + if need { + f.AddNewRequire(path, vers, false) + } + return nil +} + +// AddNewRequire adds a new require line for path at version vers at the end of +// the last require block, regardless of any existing require lines for path. +func (f *File) AddNewRequire(path, vers string, indirect bool) { + line := f.Syntax.addLine(nil, "require", AutoQuote(path), vers) + r := &Require{ + Mod: module.Version{Path: path, Version: vers}, + Syntax: line, + } + r.setIndirect(indirect) + f.Require = append(f.Require, r) +} + +// SetRequire updates the requirements of f to contain exactly req, preserving +// the existing block structure and line comment contents (except for 'indirect' +// markings) for the first requirement on each named module path. +// +// The Syntax field is ignored for the requirements in req. +// +// Any requirements not already present in the file are added to the block +// containing the last require line. +// +// The requirements in req must specify at most one distinct version for each +// module path. +// +// If any existing requirements may be removed, the caller should call Cleanup +// after all edits are complete. +func (f *File) SetRequire(req []*Require) { + type elem struct { + version string + indirect bool + } + need := make(map[string]elem) + for _, r := range req { + if prev, dup := need[r.Mod.Path]; dup && prev.version != r.Mod.Version { + panic(fmt.Errorf("SetRequire called with conflicting versions for path %s (%s and %s)", r.Mod.Path, prev.version, r.Mod.Version)) + } + need[r.Mod.Path] = elem{r.Mod.Version, r.Indirect} + } + + // Update or delete the existing Require entries to preserve + // only the first for each module path in req. + for _, r := range f.Require { + e, ok := need[r.Mod.Path] + if ok { + r.setVersion(e.version) + r.setIndirect(e.indirect) + } else { + r.markRemoved() + } + delete(need, r.Mod.Path) + } + + // Add new entries in the last block of the file for any paths that weren't + // already present. + // + // This step is nondeterministic, but the final result will be deterministic + // because we will sort the block. + for path, e := range need { + f.AddNewRequire(path, e.version, e.indirect) + } + + f.SortBlocks() +} + +// SetRequireSeparateIndirect updates the requirements of f to contain the given +// requirements. Comment contents (except for 'indirect' markings) are retained +// from the first existing requirement for each module path. Like SetRequire, +// SetRequireSeparateIndirect adds requirements for new paths in req, +// updates the version and "// indirect" comment on existing requirements, +// and deletes requirements on paths not in req. Existing duplicate requirements +// are deleted. +// +// As its name suggests, SetRequireSeparateIndirect puts direct and indirect +// requirements into two separate blocks, one containing only direct +// requirements, and the other containing only indirect requirements. +// SetRequireSeparateIndirect may move requirements between these two blocks +// when their indirect markings change. However, SetRequireSeparateIndirect +// won't move requirements from other blocks, especially blocks with comments. +// +// If the file initially has one uncommented block of requirements, +// SetRequireSeparateIndirect will split it into a direct-only and indirect-only +// block. This aids in the transition to separate blocks. +func (f *File) SetRequireSeparateIndirect(req []*Require) { + // hasComments returns whether a line or block has comments + // other than "indirect". + hasComments := func(c Comments) bool { + return len(c.Before) > 0 || len(c.After) > 0 || len(c.Suffix) > 1 || + (len(c.Suffix) == 1 && + strings.TrimSpace(strings.TrimPrefix(c.Suffix[0].Token, string(slashSlash))) != "indirect") + } + + // moveReq adds r to block. If r was in another block, moveReq deletes + // it from that block and transfers its comments. + moveReq := func(r *Require, block *LineBlock) { + var line *Line + if r.Syntax == nil { + line = &Line{Token: []string{AutoQuote(r.Mod.Path), r.Mod.Version}} + r.Syntax = line + if r.Indirect { + r.setIndirect(true) + } + } else { + line = new(Line) + *line = *r.Syntax + if !line.InBlock && len(line.Token) > 0 && line.Token[0] == "require" { + line.Token = line.Token[1:] + } + r.Syntax.Token = nil // Cleanup will delete the old line. + r.Syntax = line + } + line.InBlock = true + block.Line = append(block.Line, line) + } + + // Examine existing require lines and blocks. + var ( + // We may insert new requirements into the last uncommented + // direct-only and indirect-only blocks. We may also move requirements + // to the opposite block if their indirect markings change. + lastDirectIndex = -1 + lastIndirectIndex = -1 + + // If there are no direct-only or indirect-only blocks, a new block may + // be inserted after the last require line or block. + lastRequireIndex = -1 + + // If there's only one require line or block, and it's uncommented, + // we'll move its requirements to the direct-only or indirect-only blocks. + requireLineOrBlockCount = 0 + + // Track the block each requirement belongs to (if any) so we can + // move them later. + lineToBlock = make(map[*Line]*LineBlock) + ) + for i, stmt := range f.Syntax.Stmt { + switch stmt := stmt.(type) { + case *Line: + if len(stmt.Token) == 0 || stmt.Token[0] != "require" { + continue + } + lastRequireIndex = i + requireLineOrBlockCount++ + if !hasComments(stmt.Comments) { + if isIndirect(stmt) { + lastIndirectIndex = i + } else { + lastDirectIndex = i + } + } + + case *LineBlock: + if len(stmt.Token) == 0 || stmt.Token[0] != "require" { + continue + } + lastRequireIndex = i + requireLineOrBlockCount++ + allDirect := len(stmt.Line) > 0 && !hasComments(stmt.Comments) + allIndirect := len(stmt.Line) > 0 && !hasComments(stmt.Comments) + for _, line := range stmt.Line { + lineToBlock[line] = stmt + if hasComments(line.Comments) { + allDirect = false + allIndirect = false + } else if isIndirect(line) { + allDirect = false + } else { + allIndirect = false + } + } + if allDirect { + lastDirectIndex = i + } + if allIndirect { + lastIndirectIndex = i + } + } + } + + oneFlatUncommentedBlock := requireLineOrBlockCount == 1 && + !hasComments(*f.Syntax.Stmt[lastRequireIndex].Comment()) + + // Create direct and indirect blocks if needed. Convert lines into blocks + // if needed. If we end up with an empty block or a one-line block, + // Cleanup will delete it or convert it to a line later. + insertBlock := func(i int) *LineBlock { + block := &LineBlock{Token: []string{"require"}} + f.Syntax.Stmt = append(f.Syntax.Stmt, nil) + copy(f.Syntax.Stmt[i+1:], f.Syntax.Stmt[i:]) + f.Syntax.Stmt[i] = block + return block + } + + ensureBlock := func(i int) *LineBlock { + switch stmt := f.Syntax.Stmt[i].(type) { + case *LineBlock: + return stmt + case *Line: + block := &LineBlock{ + Token: []string{"require"}, + Line: []*Line{stmt}, + } + stmt.Token = stmt.Token[1:] // remove "require" + stmt.InBlock = true + f.Syntax.Stmt[i] = block + return block + default: + panic(fmt.Sprintf("unexpected statement: %v", stmt)) + } + } + + var lastDirectBlock *LineBlock + if lastDirectIndex < 0 { + if lastIndirectIndex >= 0 { + lastDirectIndex = lastIndirectIndex + lastIndirectIndex++ + } else if lastRequireIndex >= 0 { + lastDirectIndex = lastRequireIndex + 1 + } else { + lastDirectIndex = len(f.Syntax.Stmt) + } + lastDirectBlock = insertBlock(lastDirectIndex) + } else { + lastDirectBlock = ensureBlock(lastDirectIndex) + } + + var lastIndirectBlock *LineBlock + if lastIndirectIndex < 0 { + lastIndirectIndex = lastDirectIndex + 1 + lastIndirectBlock = insertBlock(lastIndirectIndex) + } else { + lastIndirectBlock = ensureBlock(lastIndirectIndex) + } + + // Delete requirements we don't want anymore. + // Update versions and indirect comments on requirements we want to keep. + // If a requirement is in last{Direct,Indirect}Block with the wrong + // indirect marking after this, or if the requirement is in an single + // uncommented mixed block (oneFlatUncommentedBlock), move it to the + // correct block. + // + // Some blocks may be empty after this. Cleanup will remove them. + need := make(map[string]*Require) + for _, r := range req { + need[r.Mod.Path] = r + } + have := make(map[string]*Require) + for _, r := range f.Require { + path := r.Mod.Path + if need[path] == nil || have[path] != nil { + // Requirement not needed, or duplicate requirement. Delete. + r.markRemoved() + continue + } + have[r.Mod.Path] = r + r.setVersion(need[path].Mod.Version) + r.setIndirect(need[path].Indirect) + if need[path].Indirect && + (oneFlatUncommentedBlock || lineToBlock[r.Syntax] == lastDirectBlock) { + moveReq(r, lastIndirectBlock) + } else if !need[path].Indirect && + (oneFlatUncommentedBlock || lineToBlock[r.Syntax] == lastIndirectBlock) { + moveReq(r, lastDirectBlock) + } + } + + // Add new requirements. + for path, r := range need { + if have[path] == nil { + if r.Indirect { + moveReq(r, lastIndirectBlock) + } else { + moveReq(r, lastDirectBlock) + } + f.Require = append(f.Require, r) + } + } + + f.SortBlocks() +} + +func (f *File) DropRequire(path string) error { + for _, r := range f.Require { + if r.Mod.Path == path { + r.Syntax.markRemoved() + *r = Require{} + } + } + return nil +} + +// AddExclude adds a exclude statement to the mod file. Errors if the provided +// version is not a canonical version string +func (f *File) AddExclude(path, vers string) error { + if err := checkCanonicalVersion(path, vers); err != nil { + return err + } + + var hint *Line + for _, x := range f.Exclude { + if x.Mod.Path == path && x.Mod.Version == vers { + return nil + } + if x.Mod.Path == path { + hint = x.Syntax + } + } + + f.Exclude = append(f.Exclude, &Exclude{Mod: module.Version{Path: path, Version: vers}, Syntax: f.Syntax.addLine(hint, "exclude", AutoQuote(path), vers)}) + return nil +} + +func (f *File) DropExclude(path, vers string) error { + for _, x := range f.Exclude { + if x.Mod.Path == path && x.Mod.Version == vers { + x.Syntax.markRemoved() + *x = Exclude{} + } + } + return nil +} + +func (f *File) AddReplace(oldPath, oldVers, newPath, newVers string) error { + return addReplace(f.Syntax, &f.Replace, oldPath, oldVers, newPath, newVers) +} + +func addReplace(syntax *FileSyntax, replace *[]*Replace, oldPath, oldVers, newPath, newVers string) error { + need := true + old := module.Version{Path: oldPath, Version: oldVers} + new := module.Version{Path: newPath, Version: newVers} + tokens := []string{"replace", AutoQuote(oldPath)} + if oldVers != "" { + tokens = append(tokens, oldVers) + } + tokens = append(tokens, "=>", AutoQuote(newPath)) + if newVers != "" { + tokens = append(tokens, newVers) + } + + var hint *Line + for _, r := range *replace { + if r.Old.Path == oldPath && (oldVers == "" || r.Old.Version == oldVers) { + if need { + // Found replacement for old; update to use new. + r.New = new + syntax.updateLine(r.Syntax, tokens...) + need = false + continue + } + // Already added; delete other replacements for same. + r.Syntax.markRemoved() + *r = Replace{} + } + if r.Old.Path == oldPath { + hint = r.Syntax + } + } + if need { + *replace = append(*replace, &Replace{Old: old, New: new, Syntax: syntax.addLine(hint, tokens...)}) + } + return nil +} + +func (f *File) DropReplace(oldPath, oldVers string) error { + for _, r := range f.Replace { + if r.Old.Path == oldPath && r.Old.Version == oldVers { + r.Syntax.markRemoved() + *r = Replace{} + } + } + return nil +} + +// AddRetract adds a retract statement to the mod file. Errors if the provided +// version interval does not consist of canonical version strings +func (f *File) AddRetract(vi VersionInterval, rationale string) error { + var path string + if f.Module != nil { + path = f.Module.Mod.Path + } + if err := checkCanonicalVersion(path, vi.High); err != nil { + return err + } + if err := checkCanonicalVersion(path, vi.Low); err != nil { + return err + } + + r := &Retract{ + VersionInterval: vi, + } + if vi.Low == vi.High { + r.Syntax = f.Syntax.addLine(nil, "retract", AutoQuote(vi.Low)) + } else { + r.Syntax = f.Syntax.addLine(nil, "retract", "[", AutoQuote(vi.Low), ",", AutoQuote(vi.High), "]") + } + if rationale != "" { + for _, line := range strings.Split(rationale, "\n") { + com := Comment{Token: "// " + line} + r.Syntax.Comment().Before = append(r.Syntax.Comment().Before, com) + } + } + return nil +} + +func (f *File) DropRetract(vi VersionInterval) error { + for _, r := range f.Retract { + if r.VersionInterval == vi { + r.Syntax.markRemoved() + *r = Retract{} + } + } + return nil +} + +func (f *File) SortBlocks() { + f.removeDups() // otherwise sorting is unsafe + + for _, stmt := range f.Syntax.Stmt { + block, ok := stmt.(*LineBlock) + if !ok { + continue + } + less := lineLess + if block.Token[0] == "retract" { + less = lineRetractLess + } + sort.SliceStable(block.Line, func(i, j int) bool { + return less(block.Line[i], block.Line[j]) + }) + } +} + +// removeDups removes duplicate exclude and replace directives. +// +// Earlier exclude directives take priority. +// +// Later replace directives take priority. +// +// require directives are not de-duplicated. That's left up to higher-level +// logic (MVS). +// +// retract directives are not de-duplicated since comments are +// meaningful, and versions may be retracted multiple times. +func (f *File) removeDups() { + removeDups(f.Syntax, &f.Exclude, &f.Replace) +} + +func removeDups(syntax *FileSyntax, exclude *[]*Exclude, replace *[]*Replace) { + kill := make(map[*Line]bool) + + // Remove duplicate excludes. + if exclude != nil { + haveExclude := make(map[module.Version]bool) + for _, x := range *exclude { + if haveExclude[x.Mod] { + kill[x.Syntax] = true + continue + } + haveExclude[x.Mod] = true + } + var excl []*Exclude + for _, x := range *exclude { + if !kill[x.Syntax] { + excl = append(excl, x) + } + } + *exclude = excl + } + + // Remove duplicate replacements. + // Later replacements take priority over earlier ones. + haveReplace := make(map[module.Version]bool) + for i := len(*replace) - 1; i >= 0; i-- { + x := (*replace)[i] + if haveReplace[x.Old] { + kill[x.Syntax] = true + continue + } + haveReplace[x.Old] = true + } + var repl []*Replace + for _, x := range *replace { + if !kill[x.Syntax] { + repl = append(repl, x) + } + } + *replace = repl + + // Duplicate require and retract directives are not removed. + + // Drop killed statements from the syntax tree. + var stmts []Expr + for _, stmt := range syntax.Stmt { + switch stmt := stmt.(type) { + case *Line: + if kill[stmt] { + continue + } + case *LineBlock: + var lines []*Line + for _, line := range stmt.Line { + if !kill[line] { + lines = append(lines, line) + } + } + stmt.Line = lines + if len(lines) == 0 { + continue + } + } + stmts = append(stmts, stmt) + } + syntax.Stmt = stmts +} + +// lineLess returns whether li should be sorted before lj. It sorts +// lexicographically without assigning any special meaning to tokens. +func lineLess(li, lj *Line) bool { + for k := 0; k < len(li.Token) && k < len(lj.Token); k++ { + if li.Token[k] != lj.Token[k] { + return li.Token[k] < lj.Token[k] + } + } + return len(li.Token) < len(lj.Token) +} + +// lineRetractLess returns whether li should be sorted before lj for lines in +// a "retract" block. It treats each line as a version interval. Single versions +// are compared as if they were intervals with the same low and high version. +// Intervals are sorted in descending order, first by low version, then by +// high version, using semver.Compare. +func lineRetractLess(li, lj *Line) bool { + interval := func(l *Line) VersionInterval { + if len(l.Token) == 1 { + return VersionInterval{Low: l.Token[0], High: l.Token[0]} + } else if len(l.Token) == 5 && l.Token[0] == "[" && l.Token[2] == "," && l.Token[4] == "]" { + return VersionInterval{Low: l.Token[1], High: l.Token[3]} + } else { + // Line in unknown format. Treat as an invalid version. + return VersionInterval{} + } + } + vii := interval(li) + vij := interval(lj) + if cmp := semver.Compare(vii.Low, vij.Low); cmp != 0 { + return cmp > 0 + } + return semver.Compare(vii.High, vij.High) > 0 +} + +// checkCanonicalVersion returns a non-nil error if vers is not a canonical +// version string or does not match the major version of path. +// +// If path is non-empty, the error text suggests a format with a major version +// corresponding to the path. +func checkCanonicalVersion(path, vers string) error { + _, pathMajor, pathMajorOk := module.SplitPathVersion(path) + + if vers == "" || vers != module.CanonicalVersion(vers) { + if pathMajor == "" { + return &module.InvalidVersionError{ + Version: vers, + Err: fmt.Errorf("must be of the form v1.2.3"), + } + } + return &module.InvalidVersionError{ + Version: vers, + Err: fmt.Errorf("must be of the form %s.2.3", module.PathMajorPrefix(pathMajor)), + } + } + + if pathMajorOk { + if err := module.CheckPathMajor(vers, pathMajor); err != nil { + if pathMajor == "" { + // In this context, the user probably wrote "v2.3.4" when they meant + // "v2.3.4+incompatible". Suggest that instead of "v0 or v1". + return &module.InvalidVersionError{ + Version: vers, + Err: fmt.Errorf("should be %s+incompatible (or module %s/%v)", vers, path, semver.Major(vers)), + } + } + return err + } + } + + return nil +} diff --git a/vendor/golang.org/x/mod/modfile/work.go b/vendor/golang.org/x/mod/modfile/work.go new file mode 100644 index 0000000..0c0e521 --- /dev/null +++ b/vendor/golang.org/x/mod/modfile/work.go @@ -0,0 +1,234 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package modfile + +import ( + "fmt" + "sort" + "strings" +) + +// A WorkFile is the parsed, interpreted form of a go.work file. +type WorkFile struct { + Go *Go + Use []*Use + Replace []*Replace + + Syntax *FileSyntax +} + +// A Use is a single directory statement. +type Use struct { + Path string // Use path of module. + ModulePath string // Module path in the comment. + Syntax *Line +} + +// ParseWork parses and returns a go.work file. +// +// file is the name of the file, used in positions and errors. +// +// data is the content of the file. +// +// fix is an optional function that canonicalizes module versions. +// If fix is nil, all module versions must be canonical (module.CanonicalVersion +// must return the same string). +func ParseWork(file string, data []byte, fix VersionFixer) (*WorkFile, error) { + fs, err := parse(file, data) + if err != nil { + return nil, err + } + f := &WorkFile{ + Syntax: fs, + } + var errs ErrorList + + for _, x := range fs.Stmt { + switch x := x.(type) { + case *Line: + f.add(&errs, x, x.Token[0], x.Token[1:], fix) + + case *LineBlock: + if len(x.Token) > 1 { + errs = append(errs, Error{ + Filename: file, + Pos: x.Start, + Err: fmt.Errorf("unknown block type: %s", strings.Join(x.Token, " ")), + }) + continue + } + switch x.Token[0] { + default: + errs = append(errs, Error{ + Filename: file, + Pos: x.Start, + Err: fmt.Errorf("unknown block type: %s", strings.Join(x.Token, " ")), + }) + continue + case "use", "replace": + for _, l := range x.Line { + f.add(&errs, l, x.Token[0], l.Token, fix) + } + } + } + } + + if len(errs) > 0 { + return nil, errs + } + return f, nil +} + +// Cleanup cleans up the file f after any edit operations. +// To avoid quadratic behavior, modifications like DropRequire +// clear the entry but do not remove it from the slice. +// Cleanup cleans out all the cleared entries. +func (f *WorkFile) Cleanup() { + w := 0 + for _, r := range f.Use { + if r.Path != "" { + f.Use[w] = r + w++ + } + } + f.Use = f.Use[:w] + + w = 0 + for _, r := range f.Replace { + if r.Old.Path != "" { + f.Replace[w] = r + w++ + } + } + f.Replace = f.Replace[:w] + + f.Syntax.Cleanup() +} + +func (f *WorkFile) AddGoStmt(version string) error { + if !GoVersionRE.MatchString(version) { + return fmt.Errorf("invalid language version string %q", version) + } + if f.Go == nil { + stmt := &Line{Token: []string{"go", version}} + f.Go = &Go{ + Version: version, + Syntax: stmt, + } + // Find the first non-comment-only block that's and add + // the go statement before it. That will keep file comments at the top. + i := 0 + for i = 0; i < len(f.Syntax.Stmt); i++ { + if _, ok := f.Syntax.Stmt[i].(*CommentBlock); !ok { + break + } + } + f.Syntax.Stmt = append(append(f.Syntax.Stmt[:i:i], stmt), f.Syntax.Stmt[i:]...) + } else { + f.Go.Version = version + f.Syntax.updateLine(f.Go.Syntax, "go", version) + } + return nil +} + +func (f *WorkFile) AddUse(diskPath, modulePath string) error { + need := true + for _, d := range f.Use { + if d.Path == diskPath { + if need { + d.ModulePath = modulePath + f.Syntax.updateLine(d.Syntax, "use", AutoQuote(diskPath)) + need = false + } else { + d.Syntax.markRemoved() + *d = Use{} + } + } + } + + if need { + f.AddNewUse(diskPath, modulePath) + } + return nil +} + +func (f *WorkFile) AddNewUse(diskPath, modulePath string) { + line := f.Syntax.addLine(nil, "use", AutoQuote(diskPath)) + f.Use = append(f.Use, &Use{Path: diskPath, ModulePath: modulePath, Syntax: line}) +} + +func (f *WorkFile) SetUse(dirs []*Use) { + need := make(map[string]string) + for _, d := range dirs { + need[d.Path] = d.ModulePath + } + + for _, d := range f.Use { + if modulePath, ok := need[d.Path]; ok { + d.ModulePath = modulePath + } else { + d.Syntax.markRemoved() + *d = Use{} + } + } + + // TODO(#45713): Add module path to comment. + + for diskPath, modulePath := range need { + f.AddNewUse(diskPath, modulePath) + } + f.SortBlocks() +} + +func (f *WorkFile) DropUse(path string) error { + for _, d := range f.Use { + if d.Path == path { + d.Syntax.markRemoved() + *d = Use{} + } + } + return nil +} + +func (f *WorkFile) AddReplace(oldPath, oldVers, newPath, newVers string) error { + return addReplace(f.Syntax, &f.Replace, oldPath, oldVers, newPath, newVers) +} + +func (f *WorkFile) DropReplace(oldPath, oldVers string) error { + for _, r := range f.Replace { + if r.Old.Path == oldPath && r.Old.Version == oldVers { + r.Syntax.markRemoved() + *r = Replace{} + } + } + return nil +} + +func (f *WorkFile) SortBlocks() { + f.removeDups() // otherwise sorting is unsafe + + for _, stmt := range f.Syntax.Stmt { + block, ok := stmt.(*LineBlock) + if !ok { + continue + } + sort.SliceStable(block.Line, func(i, j int) bool { + return lineLess(block.Line[i], block.Line[j]) + }) + } +} + +// removeDups removes duplicate replace directives. +// +// Later replace directives take priority. +// +// require directives are not de-duplicated. That's left up to higher-level +// logic (MVS). +// +// retract directives are not de-duplicated since comments are +// meaningful, and versions may be retracted multiple times. +func (f *WorkFile) removeDups() { + removeDups(f.Syntax, nil, &f.Replace) +} diff --git a/vendor/golang.org/x/mod/module/module.go b/vendor/golang.org/x/mod/module/module.go new file mode 100644 index 0000000..c26d1d2 --- /dev/null +++ b/vendor/golang.org/x/mod/module/module.go @@ -0,0 +1,841 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package module defines the module.Version type along with support code. +// +// The module.Version type is a simple Path, Version pair: +// +// type Version struct { +// Path string +// Version string +// } +// +// There are no restrictions imposed directly by use of this structure, +// but additional checking functions, most notably Check, verify that +// a particular path, version pair is valid. +// +// # Escaped Paths +// +// Module paths appear as substrings of file system paths +// (in the download cache) and of web server URLs in the proxy protocol. +// In general we cannot rely on file systems to be case-sensitive, +// nor can we rely on web servers, since they read from file systems. +// That is, we cannot rely on the file system to keep rsc.io/QUOTE +// and rsc.io/quote separate. Windows and macOS don't. +// Instead, we must never require two different casings of a file path. +// Because we want the download cache to match the proxy protocol, +// and because we want the proxy protocol to be possible to serve +// from a tree of static files (which might be stored on a case-insensitive +// file system), the proxy protocol must never require two different casings +// of a URL path either. +// +// One possibility would be to make the escaped form be the lowercase +// hexadecimal encoding of the actual path bytes. This would avoid ever +// needing different casings of a file path, but it would be fairly illegible +// to most programmers when those paths appeared in the file system +// (including in file paths in compiler errors and stack traces) +// in web server logs, and so on. Instead, we want a safe escaped form that +// leaves most paths unaltered. +// +// The safe escaped form is to replace every uppercase letter +// with an exclamation mark followed by the letter's lowercase equivalent. +// +// For example, +// +// github.com/Azure/azure-sdk-for-go -> github.com/!azure/azure-sdk-for-go. +// github.com/GoogleCloudPlatform/cloudsql-proxy -> github.com/!google!cloud!platform/cloudsql-proxy +// github.com/Sirupsen/logrus -> github.com/!sirupsen/logrus. +// +// Import paths that avoid upper-case letters are left unchanged. +// Note that because import paths are ASCII-only and avoid various +// problematic punctuation (like : < and >), the escaped form is also ASCII-only +// and avoids the same problematic punctuation. +// +// Import paths have never allowed exclamation marks, so there is no +// need to define how to escape a literal !. +// +// # Unicode Restrictions +// +// Today, paths are disallowed from using Unicode. +// +// Although paths are currently disallowed from using Unicode, +// we would like at some point to allow Unicode letters as well, to assume that +// file systems and URLs are Unicode-safe (storing UTF-8), and apply +// the !-for-uppercase convention for escaping them in the file system. +// But there are at least two subtle considerations. +// +// First, note that not all case-fold equivalent distinct runes +// form an upper/lower pair. +// For example, U+004B ('K'), U+006B ('k'), and U+212A ('K' for Kelvin) +// are three distinct runes that case-fold to each other. +// When we do add Unicode letters, we must not assume that upper/lower +// are the only case-equivalent pairs. +// Perhaps the Kelvin symbol would be disallowed entirely, for example. +// Or perhaps it would escape as "!!k", or perhaps as "(212A)". +// +// Second, it would be nice to allow Unicode marks as well as letters, +// but marks include combining marks, and then we must deal not +// only with case folding but also normalization: both U+00E9 ('é') +// and U+0065 U+0301 ('e' followed by combining acute accent) +// look the same on the page and are treated by some file systems +// as the same path. If we do allow Unicode marks in paths, there +// must be some kind of normalization to allow only one canonical +// encoding of any character used in an import path. +package module + +// IMPORTANT NOTE +// +// This file essentially defines the set of valid import paths for the go command. +// There are many subtle considerations, including Unicode ambiguity, +// security, network, and file system representations. +// +// This file also defines the set of valid module path and version combinations, +// another topic with many subtle considerations. +// +// Changes to the semantics in this file require approval from rsc. + +import ( + "fmt" + "path" + "sort" + "strings" + "unicode" + "unicode/utf8" + "errors" + + "golang.org/x/mod/semver" +) + +// A Version (for clients, a module.Version) is defined by a module path and version pair. +// These are stored in their plain (unescaped) form. +type Version struct { + // Path is a module path, like "golang.org/x/text" or "rsc.io/quote/v2". + Path string + + // Version is usually a semantic version in canonical form. + // There are three exceptions to this general rule. + // First, the top-level target of a build has no specific version + // and uses Version = "". + // Second, during MVS calculations the version "none" is used + // to represent the decision to take no version of a given module. + // Third, filesystem paths found in "replace" directives are + // represented by a path with an empty version. + Version string `json:",omitempty"` +} + +// String returns a representation of the Version suitable for logging +// (Path@Version, or just Path if Version is empty). +func (m Version) String() string { + if m.Version == "" { + return m.Path + } + return m.Path + "@" + m.Version +} + +// A ModuleError indicates an error specific to a module. +type ModuleError struct { + Path string + Version string + Err error +} + +// VersionError returns a ModuleError derived from a Version and error, +// or err itself if it is already such an error. +func VersionError(v Version, err error) error { + var mErr *ModuleError + if errors.As(err, &mErr) && mErr.Path == v.Path && mErr.Version == v.Version { + return err + } + return &ModuleError{ + Path: v.Path, + Version: v.Version, + Err: err, + } +} + +func (e *ModuleError) Error() string { + if v, ok := e.Err.(*InvalidVersionError); ok { + return fmt.Sprintf("%s@%s: invalid %s: %v", e.Path, v.Version, v.noun(), v.Err) + } + if e.Version != "" { + return fmt.Sprintf("%s@%s: %v", e.Path, e.Version, e.Err) + } + return fmt.Sprintf("module %s: %v", e.Path, e.Err) +} + +func (e *ModuleError) Unwrap() error { return e.Err } + +// An InvalidVersionError indicates an error specific to a version, with the +// module path unknown or specified externally. +// +// A ModuleError may wrap an InvalidVersionError, but an InvalidVersionError +// must not wrap a ModuleError. +type InvalidVersionError struct { + Version string + Pseudo bool + Err error +} + +// noun returns either "version" or "pseudo-version", depending on whether +// e.Version is a pseudo-version. +func (e *InvalidVersionError) noun() string { + if e.Pseudo { + return "pseudo-version" + } + return "version" +} + +func (e *InvalidVersionError) Error() string { + return fmt.Sprintf("%s %q invalid: %s", e.noun(), e.Version, e.Err) +} + +func (e *InvalidVersionError) Unwrap() error { return e.Err } + +// An InvalidPathError indicates a module, import, or file path doesn't +// satisfy all naming constraints. See CheckPath, CheckImportPath, +// and CheckFilePath for specific restrictions. +type InvalidPathError struct { + Kind string // "module", "import", or "file" + Path string + Err error +} + +func (e *InvalidPathError) Error() string { + return fmt.Sprintf("malformed %s path %q: %v", e.Kind, e.Path, e.Err) +} + +func (e *InvalidPathError) Unwrap() error { return e.Err } + +// Check checks that a given module path, version pair is valid. +// In addition to the path being a valid module path +// and the version being a valid semantic version, +// the two must correspond. +// For example, the path "yaml/v2" only corresponds to +// semantic versions beginning with "v2.". +func Check(path, version string) error { + if err := CheckPath(path); err != nil { + return err + } + if !semver.IsValid(version) { + return &ModuleError{ + Path: path, + Err: &InvalidVersionError{Version: version, Err: errors.New("not a semantic version")}, + } + } + _, pathMajor, _ := SplitPathVersion(path) + if err := CheckPathMajor(version, pathMajor); err != nil { + return &ModuleError{Path: path, Err: err} + } + return nil +} + +// firstPathOK reports whether r can appear in the first element of a module path. +// The first element of the path must be an LDH domain name, at least for now. +// To avoid case ambiguity, the domain name must be entirely lower case. +func firstPathOK(r rune) bool { + return r == '-' || r == '.' || + '0' <= r && r <= '9' || + 'a' <= r && r <= 'z' +} + +// modPathOK reports whether r can appear in a module path element. +// Paths can be ASCII letters, ASCII digits, and limited ASCII punctuation: - . _ and ~. +// +// This matches what "go get" has historically recognized in import paths, +// and avoids confusing sequences like '%20' or '+' that would change meaning +// if used in a URL. +// +// TODO(rsc): We would like to allow Unicode letters, but that requires additional +// care in the safe encoding (see "escaped paths" above). +func modPathOK(r rune) bool { + if r < utf8.RuneSelf { + return r == '-' || r == '.' || r == '_' || r == '~' || + '0' <= r && r <= '9' || + 'A' <= r && r <= 'Z' || + 'a' <= r && r <= 'z' + } + return false +} + +// modPathOK reports whether r can appear in a package import path element. +// +// Import paths are intermediate between module paths and file paths: we allow +// disallow characters that would be confusing or ambiguous as arguments to +// 'go get' (such as '@' and ' ' ), but allow certain characters that are +// otherwise-unambiguous on the command line and historically used for some +// binary names (such as '++' as a suffix for compiler binaries and wrappers). +func importPathOK(r rune) bool { + return modPathOK(r) || r == '+' +} + +// fileNameOK reports whether r can appear in a file name. +// For now we allow all Unicode letters but otherwise limit to pathOK plus a few more punctuation characters. +// If we expand the set of allowed characters here, we have to +// work harder at detecting potential case-folding and normalization collisions. +// See note about "escaped paths" above. +func fileNameOK(r rune) bool { + if r < utf8.RuneSelf { + // Entire set of ASCII punctuation, from which we remove characters: + // ! " # $ % & ' ( ) * + , - . / : ; < = > ? @ [ \ ] ^ _ ` { | } ~ + // We disallow some shell special characters: " ' * < > ? ` | + // (Note that some of those are disallowed by the Windows file system as well.) + // We also disallow path separators / : and \ (fileNameOK is only called on path element characters). + // We allow spaces (U+0020) in file names. + const allowed = "!#$%&()+,-.=@[]^_{}~ " + if '0' <= r && r <= '9' || 'A' <= r && r <= 'Z' || 'a' <= r && r <= 'z' { + return true + } + return strings.ContainsRune(allowed, r) + } + // It may be OK to add more ASCII punctuation here, but only carefully. + // For example Windows disallows < > \, and macOS disallows :, so we must not allow those. + return unicode.IsLetter(r) +} + +// CheckPath checks that a module path is valid. +// A valid module path is a valid import path, as checked by CheckImportPath, +// with three additional constraints. +// First, the leading path element (up to the first slash, if any), +// by convention a domain name, must contain only lower-case ASCII letters, +// ASCII digits, dots (U+002E), and dashes (U+002D); +// it must contain at least one dot and cannot start with a dash. +// Second, for a final path element of the form /vN, where N looks numeric +// (ASCII digits and dots) must not begin with a leading zero, must not be /v1, +// and must not contain any dots. For paths beginning with "gopkg.in/", +// this second requirement is replaced by a requirement that the path +// follow the gopkg.in server's conventions. +// Third, no path element may begin with a dot. +func CheckPath(path string) (err error) { + defer func() { + if err != nil { + err = &InvalidPathError{Kind: "module", Path: path, Err: err} + } + }() + + if err := checkPath(path, modulePath); err != nil { + return err + } + i := strings.Index(path, "/") + if i < 0 { + i = len(path) + } + if i == 0 { + return fmt.Errorf("leading slash") + } + if !strings.Contains(path[:i], ".") { + return fmt.Errorf("missing dot in first path element") + } + if path[0] == '-' { + return fmt.Errorf("leading dash in first path element") + } + for _, r := range path[:i] { + if !firstPathOK(r) { + return fmt.Errorf("invalid char %q in first path element", r) + } + } + if _, _, ok := SplitPathVersion(path); !ok { + return fmt.Errorf("invalid version") + } + return nil +} + +// CheckImportPath checks that an import path is valid. +// +// A valid import path consists of one or more valid path elements +// separated by slashes (U+002F). (It must not begin with nor end in a slash.) +// +// A valid path element is a non-empty string made up of +// ASCII letters, ASCII digits, and limited ASCII punctuation: - . _ and ~. +// It must not end with a dot (U+002E), nor contain two dots in a row. +// +// The element prefix up to the first dot must not be a reserved file name +// on Windows, regardless of case (CON, com1, NuL, and so on). The element +// must not have a suffix of a tilde followed by one or more ASCII digits +// (to exclude paths elements that look like Windows short-names). +// +// CheckImportPath may be less restrictive in the future, but see the +// top-level package documentation for additional information about +// subtleties of Unicode. +func CheckImportPath(path string) error { + if err := checkPath(path, importPath); err != nil { + return &InvalidPathError{Kind: "import", Path: path, Err: err} + } + return nil +} + +// pathKind indicates what kind of path we're checking. Module paths, +// import paths, and file paths have different restrictions. +type pathKind int + +const ( + modulePath pathKind = iota + importPath + filePath +) + +// checkPath checks that a general path is valid. kind indicates what +// specific constraints should be applied. +// +// checkPath returns an error describing why the path is not valid. +// Because these checks apply to module, import, and file paths, +// and because other checks may be applied, the caller is expected to wrap +// this error with InvalidPathError. +func checkPath(path string, kind pathKind) error { + if !utf8.ValidString(path) { + return fmt.Errorf("invalid UTF-8") + } + if path == "" { + return fmt.Errorf("empty string") + } + if path[0] == '-' && kind != filePath { + return fmt.Errorf("leading dash") + } + if strings.Contains(path, "//") { + return fmt.Errorf("double slash") + } + if path[len(path)-1] == '/' { + return fmt.Errorf("trailing slash") + } + elemStart := 0 + for i, r := range path { + if r == '/' { + if err := checkElem(path[elemStart:i], kind); err != nil { + return err + } + elemStart = i + 1 + } + } + if err := checkElem(path[elemStart:], kind); err != nil { + return err + } + return nil +} + +// checkElem checks whether an individual path element is valid. +func checkElem(elem string, kind pathKind) error { + if elem == "" { + return fmt.Errorf("empty path element") + } + if strings.Count(elem, ".") == len(elem) { + return fmt.Errorf("invalid path element %q", elem) + } + if elem[0] == '.' && kind == modulePath { + return fmt.Errorf("leading dot in path element") + } + if elem[len(elem)-1] == '.' { + return fmt.Errorf("trailing dot in path element") + } + for _, r := range elem { + ok := false + switch kind { + case modulePath: + ok = modPathOK(r) + case importPath: + ok = importPathOK(r) + case filePath: + ok = fileNameOK(r) + default: + panic(fmt.Sprintf("internal error: invalid kind %v", kind)) + } + if !ok { + return fmt.Errorf("invalid char %q", r) + } + } + + // Windows disallows a bunch of path elements, sadly. + // See https://docs.microsoft.com/en-us/windows/desktop/fileio/naming-a-file + short := elem + if i := strings.Index(short, "."); i >= 0 { + short = short[:i] + } + for _, bad := range badWindowsNames { + if strings.EqualFold(bad, short) { + return fmt.Errorf("%q disallowed as path element component on Windows", short) + } + } + + if kind == filePath { + // don't check for Windows short-names in file names. They're + // only an issue for import paths. + return nil + } + + // Reject path components that look like Windows short-names. + // Those usually end in a tilde followed by one or more ASCII digits. + if tilde := strings.LastIndexByte(short, '~'); tilde >= 0 && tilde < len(short)-1 { + suffix := short[tilde+1:] + suffixIsDigits := true + for _, r := range suffix { + if r < '0' || r > '9' { + suffixIsDigits = false + break + } + } + if suffixIsDigits { + return fmt.Errorf("trailing tilde and digits in path element") + } + } + + return nil +} + +// CheckFilePath checks that a slash-separated file path is valid. +// The definition of a valid file path is the same as the definition +// of a valid import path except that the set of allowed characters is larger: +// all Unicode letters, ASCII digits, the ASCII space character (U+0020), +// and the ASCII punctuation characters +// “!#$%&()+,-.=@[]^_{}~”. +// (The excluded punctuation characters, " * < > ? ` ' | / \ and :, +// have special meanings in certain shells or operating systems.) +// +// CheckFilePath may be less restrictive in the future, but see the +// top-level package documentation for additional information about +// subtleties of Unicode. +func CheckFilePath(path string) error { + if err := checkPath(path, filePath); err != nil { + return &InvalidPathError{Kind: "file", Path: path, Err: err} + } + return nil +} + +// badWindowsNames are the reserved file path elements on Windows. +// See https://docs.microsoft.com/en-us/windows/desktop/fileio/naming-a-file +var badWindowsNames = []string{ + "CON", + "PRN", + "AUX", + "NUL", + "COM1", + "COM2", + "COM3", + "COM4", + "COM5", + "COM6", + "COM7", + "COM8", + "COM9", + "LPT1", + "LPT2", + "LPT3", + "LPT4", + "LPT5", + "LPT6", + "LPT7", + "LPT8", + "LPT9", +} + +// SplitPathVersion returns prefix and major version such that prefix+pathMajor == path +// and version is either empty or "/vN" for N >= 2. +// As a special case, gopkg.in paths are recognized directly; +// they require ".vN" instead of "/vN", and for all N, not just N >= 2. +// SplitPathVersion returns with ok = false when presented with +// a path whose last path element does not satisfy the constraints +// applied by CheckPath, such as "example.com/pkg/v1" or "example.com/pkg/v1.2". +func SplitPathVersion(path string) (prefix, pathMajor string, ok bool) { + if strings.HasPrefix(path, "gopkg.in/") { + return splitGopkgIn(path) + } + + i := len(path) + dot := false + for i > 0 && ('0' <= path[i-1] && path[i-1] <= '9' || path[i-1] == '.') { + if path[i-1] == '.' { + dot = true + } + i-- + } + if i <= 1 || i == len(path) || path[i-1] != 'v' || path[i-2] != '/' { + return path, "", true + } + prefix, pathMajor = path[:i-2], path[i-2:] + if dot || len(pathMajor) <= 2 || pathMajor[2] == '0' || pathMajor == "/v1" { + return path, "", false + } + return prefix, pathMajor, true +} + +// splitGopkgIn is like SplitPathVersion but only for gopkg.in paths. +func splitGopkgIn(path string) (prefix, pathMajor string, ok bool) { + if !strings.HasPrefix(path, "gopkg.in/") { + return path, "", false + } + i := len(path) + if strings.HasSuffix(path, "-unstable") { + i -= len("-unstable") + } + for i > 0 && ('0' <= path[i-1] && path[i-1] <= '9') { + i-- + } + if i <= 1 || path[i-1] != 'v' || path[i-2] != '.' { + // All gopkg.in paths must end in vN for some N. + return path, "", false + } + prefix, pathMajor = path[:i-2], path[i-2:] + if len(pathMajor) <= 2 || pathMajor[2] == '0' && pathMajor != ".v0" { + return path, "", false + } + return prefix, pathMajor, true +} + +// MatchPathMajor reports whether the semantic version v +// matches the path major version pathMajor. +// +// MatchPathMajor returns true if and only if CheckPathMajor returns nil. +func MatchPathMajor(v, pathMajor string) bool { + return CheckPathMajor(v, pathMajor) == nil +} + +// CheckPathMajor returns a non-nil error if the semantic version v +// does not match the path major version pathMajor. +func CheckPathMajor(v, pathMajor string) error { + // TODO(jayconrod): return errors or panic for invalid inputs. This function + // (and others) was covered by integration tests for cmd/go, and surrounding + // code protected against invalid inputs like non-canonical versions. + if strings.HasPrefix(pathMajor, ".v") && strings.HasSuffix(pathMajor, "-unstable") { + pathMajor = strings.TrimSuffix(pathMajor, "-unstable") + } + if strings.HasPrefix(v, "v0.0.0-") && pathMajor == ".v1" { + // Allow old bug in pseudo-versions that generated v0.0.0- pseudoversion for gopkg .v1. + // For example, gopkg.in/yaml.v2@v2.2.1's go.mod requires gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405. + return nil + } + m := semver.Major(v) + if pathMajor == "" { + if m == "v0" || m == "v1" || semver.Build(v) == "+incompatible" { + return nil + } + pathMajor = "v0 or v1" + } else if pathMajor[0] == '/' || pathMajor[0] == '.' { + if m == pathMajor[1:] { + return nil + } + pathMajor = pathMajor[1:] + } + return &InvalidVersionError{ + Version: v, + Err: fmt.Errorf("should be %s, not %s", pathMajor, semver.Major(v)), + } +} + +// PathMajorPrefix returns the major-version tag prefix implied by pathMajor. +// An empty PathMajorPrefix allows either v0 or v1. +// +// Note that MatchPathMajor may accept some versions that do not actually begin +// with this prefix: namely, it accepts a 'v0.0.0-' prefix for a '.v1' +// pathMajor, even though that pathMajor implies 'v1' tagging. +func PathMajorPrefix(pathMajor string) string { + if pathMajor == "" { + return "" + } + if pathMajor[0] != '/' && pathMajor[0] != '.' { + panic("pathMajor suffix " + pathMajor + " passed to PathMajorPrefix lacks separator") + } + if strings.HasPrefix(pathMajor, ".v") && strings.HasSuffix(pathMajor, "-unstable") { + pathMajor = strings.TrimSuffix(pathMajor, "-unstable") + } + m := pathMajor[1:] + if m != semver.Major(m) { + panic("pathMajor suffix " + pathMajor + "passed to PathMajorPrefix is not a valid major version") + } + return m +} + +// CanonicalVersion returns the canonical form of the version string v. +// It is the same as semver.Canonical(v) except that it preserves the special build suffix "+incompatible". +func CanonicalVersion(v string) string { + cv := semver.Canonical(v) + if semver.Build(v) == "+incompatible" { + cv += "+incompatible" + } + return cv +} + +// Sort sorts the list by Path, breaking ties by comparing Version fields. +// The Version fields are interpreted as semantic versions (using semver.Compare) +// optionally followed by a tie-breaking suffix introduced by a slash character, +// like in "v0.0.1/go.mod". +func Sort(list []Version) { + sort.Slice(list, func(i, j int) bool { + mi := list[i] + mj := list[j] + if mi.Path != mj.Path { + return mi.Path < mj.Path + } + // To help go.sum formatting, allow version/file. + // Compare semver prefix by semver rules, + // file by string order. + vi := mi.Version + vj := mj.Version + var fi, fj string + if k := strings.Index(vi, "/"); k >= 0 { + vi, fi = vi[:k], vi[k:] + } + if k := strings.Index(vj, "/"); k >= 0 { + vj, fj = vj[:k], vj[k:] + } + if vi != vj { + return semver.Compare(vi, vj) < 0 + } + return fi < fj + }) +} + +// EscapePath returns the escaped form of the given module path. +// It fails if the module path is invalid. +func EscapePath(path string) (escaped string, err error) { + if err := CheckPath(path); err != nil { + return "", err + } + + return escapeString(path) +} + +// EscapeVersion returns the escaped form of the given module version. +// Versions are allowed to be in non-semver form but must be valid file names +// and not contain exclamation marks. +func EscapeVersion(v string) (escaped string, err error) { + if err := checkElem(v, filePath); err != nil || strings.Contains(v, "!") { + return "", &InvalidVersionError{ + Version: v, + Err: fmt.Errorf("disallowed version string"), + } + } + return escapeString(v) +} + +func escapeString(s string) (escaped string, err error) { + haveUpper := false + for _, r := range s { + if r == '!' || r >= utf8.RuneSelf { + // This should be disallowed by CheckPath, but diagnose anyway. + // The correctness of the escaping loop below depends on it. + return "", fmt.Errorf("internal error: inconsistency in EscapePath") + } + if 'A' <= r && r <= 'Z' { + haveUpper = true + } + } + + if !haveUpper { + return s, nil + } + + var buf []byte + for _, r := range s { + if 'A' <= r && r <= 'Z' { + buf = append(buf, '!', byte(r+'a'-'A')) + } else { + buf = append(buf, byte(r)) + } + } + return string(buf), nil +} + +// UnescapePath returns the module path for the given escaped path. +// It fails if the escaped path is invalid or describes an invalid path. +func UnescapePath(escaped string) (path string, err error) { + path, ok := unescapeString(escaped) + if !ok { + return "", fmt.Errorf("invalid escaped module path %q", escaped) + } + if err := CheckPath(path); err != nil { + return "", fmt.Errorf("invalid escaped module path %q: %v", escaped, err) + } + return path, nil +} + +// UnescapeVersion returns the version string for the given escaped version. +// It fails if the escaped form is invalid or describes an invalid version. +// Versions are allowed to be in non-semver form but must be valid file names +// and not contain exclamation marks. +func UnescapeVersion(escaped string) (v string, err error) { + v, ok := unescapeString(escaped) + if !ok { + return "", fmt.Errorf("invalid escaped version %q", escaped) + } + if err := checkElem(v, filePath); err != nil { + return "", fmt.Errorf("invalid escaped version %q: %v", v, err) + } + return v, nil +} + +func unescapeString(escaped string) (string, bool) { + var buf []byte + + bang := false + for _, r := range escaped { + if r >= utf8.RuneSelf { + return "", false + } + if bang { + bang = false + if r < 'a' || 'z' < r { + return "", false + } + buf = append(buf, byte(r+'A'-'a')) + continue + } + if r == '!' { + bang = true + continue + } + if 'A' <= r && r <= 'Z' { + return "", false + } + buf = append(buf, byte(r)) + } + if bang { + return "", false + } + return string(buf), true +} + +// MatchPrefixPatterns reports whether any path prefix of target matches one of +// the glob patterns (as defined by path.Match) in the comma-separated globs +// list. This implements the algorithm used when matching a module path to the +// GOPRIVATE environment variable, as described by 'go help module-private'. +// +// It ignores any empty or malformed patterns in the list. +// Trailing slashes on patterns are ignored. +func MatchPrefixPatterns(globs, target string) bool { + for globs != "" { + // Extract next non-empty glob in comma-separated list. + var glob string + if i := strings.Index(globs, ","); i >= 0 { + glob, globs = globs[:i], globs[i+1:] + } else { + glob, globs = globs, "" + } + glob = strings.TrimSuffix(glob, "/") + if glob == "" { + continue + } + + // A glob with N+1 path elements (N slashes) needs to be matched + // against the first N+1 path elements of target, + // which end just before the N+1'th slash. + n := strings.Count(glob, "/") + prefix := target + // Walk target, counting slashes, truncating at the N+1'th slash. + for i := 0; i < len(target); i++ { + if target[i] == '/' { + if n == 0 { + prefix = target[:i] + break + } + n-- + } + } + if n > 0 { + // Not enough prefix elements. + continue + } + matched, _ := path.Match(glob, prefix) + if matched { + return true + } + } + return false +} diff --git a/vendor/golang.org/x/mod/module/pseudo.go b/vendor/golang.org/x/mod/module/pseudo.go new file mode 100644 index 0000000..f04ad37 --- /dev/null +++ b/vendor/golang.org/x/mod/module/pseudo.go @@ -0,0 +1,250 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Pseudo-versions +// +// Code authors are expected to tag the revisions they want users to use, +// including prereleases. However, not all authors tag versions at all, +// and not all commits a user might want to try will have tags. +// A pseudo-version is a version with a special form that allows us to +// address an untagged commit and order that version with respect to +// other versions we might encounter. +// +// A pseudo-version takes one of the general forms: +// +// (1) vX.0.0-yyyymmddhhmmss-abcdef123456 +// (2) vX.Y.(Z+1)-0.yyyymmddhhmmss-abcdef123456 +// (3) vX.Y.(Z+1)-0.yyyymmddhhmmss-abcdef123456+incompatible +// (4) vX.Y.Z-pre.0.yyyymmddhhmmss-abcdef123456 +// (5) vX.Y.Z-pre.0.yyyymmddhhmmss-abcdef123456+incompatible +// +// If there is no recently tagged version with the right major version vX, +// then form (1) is used, creating a space of pseudo-versions at the bottom +// of the vX version range, less than any tagged version, including the unlikely v0.0.0. +// +// If the most recent tagged version before the target commit is vX.Y.Z or vX.Y.Z+incompatible, +// then the pseudo-version uses form (2) or (3), making it a prerelease for the next +// possible semantic version after vX.Y.Z. The leading 0 segment in the prerelease string +// ensures that the pseudo-version compares less than possible future explicit prereleases +// like vX.Y.(Z+1)-rc1 or vX.Y.(Z+1)-1. +// +// If the most recent tagged version before the target commit is vX.Y.Z-pre or vX.Y.Z-pre+incompatible, +// then the pseudo-version uses form (4) or (5), making it a slightly later prerelease. + +package module + +import ( + "errors" + "fmt" + "strings" + "time" + + "golang.org/x/mod/internal/lazyregexp" + "golang.org/x/mod/semver" +) + +var pseudoVersionRE = lazyregexp.New(`^v[0-9]+\.(0\.0-|\d+\.\d+-([^+]*\.)?0\.)\d{14}-[A-Za-z0-9]+(\+[0-9A-Za-z-]+(\.[0-9A-Za-z-]+)*)?$`) + +const PseudoVersionTimestampFormat = "20060102150405" + +// PseudoVersion returns a pseudo-version for the given major version ("v1") +// preexisting older tagged version ("" or "v1.2.3" or "v1.2.3-pre"), revision time, +// and revision identifier (usually a 12-byte commit hash prefix). +func PseudoVersion(major, older string, t time.Time, rev string) string { + if major == "" { + major = "v0" + } + segment := fmt.Sprintf("%s-%s", t.UTC().Format(PseudoVersionTimestampFormat), rev) + build := semver.Build(older) + older = semver.Canonical(older) + if older == "" { + return major + ".0.0-" + segment // form (1) + } + if semver.Prerelease(older) != "" { + return older + ".0." + segment + build // form (4), (5) + } + + // Form (2), (3). + // Extract patch from vMAJOR.MINOR.PATCH + i := strings.LastIndex(older, ".") + 1 + v, patch := older[:i], older[i:] + + // Reassemble. + return v + incDecimal(patch) + "-0." + segment + build +} + +// ZeroPseudoVersion returns a pseudo-version with a zero timestamp and +// revision, which may be used as a placeholder. +func ZeroPseudoVersion(major string) string { + return PseudoVersion(major, "", time.Time{}, "000000000000") +} + +// incDecimal returns the decimal string incremented by 1. +func incDecimal(decimal string) string { + // Scan right to left turning 9s to 0s until you find a digit to increment. + digits := []byte(decimal) + i := len(digits) - 1 + for ; i >= 0 && digits[i] == '9'; i-- { + digits[i] = '0' + } + if i >= 0 { + digits[i]++ + } else { + // digits is all zeros + digits[0] = '1' + digits = append(digits, '0') + } + return string(digits) +} + +// decDecimal returns the decimal string decremented by 1, or the empty string +// if the decimal is all zeroes. +func decDecimal(decimal string) string { + // Scan right to left turning 0s to 9s until you find a digit to decrement. + digits := []byte(decimal) + i := len(digits) - 1 + for ; i >= 0 && digits[i] == '0'; i-- { + digits[i] = '9' + } + if i < 0 { + // decimal is all zeros + return "" + } + if i == 0 && digits[i] == '1' && len(digits) > 1 { + digits = digits[1:] + } else { + digits[i]-- + } + return string(digits) +} + +// IsPseudoVersion reports whether v is a pseudo-version. +func IsPseudoVersion(v string) bool { + return strings.Count(v, "-") >= 2 && semver.IsValid(v) && pseudoVersionRE.MatchString(v) +} + +// IsZeroPseudoVersion returns whether v is a pseudo-version with a zero base, +// timestamp, and revision, as returned by ZeroPseudoVersion. +func IsZeroPseudoVersion(v string) bool { + return v == ZeroPseudoVersion(semver.Major(v)) +} + +// PseudoVersionTime returns the time stamp of the pseudo-version v. +// It returns an error if v is not a pseudo-version or if the time stamp +// embedded in the pseudo-version is not a valid time. +func PseudoVersionTime(v string) (time.Time, error) { + _, timestamp, _, _, err := parsePseudoVersion(v) + if err != nil { + return time.Time{}, err + } + t, err := time.Parse("20060102150405", timestamp) + if err != nil { + return time.Time{}, &InvalidVersionError{ + Version: v, + Pseudo: true, + Err: fmt.Errorf("malformed time %q", timestamp), + } + } + return t, nil +} + +// PseudoVersionRev returns the revision identifier of the pseudo-version v. +// It returns an error if v is not a pseudo-version. +func PseudoVersionRev(v string) (rev string, err error) { + _, _, rev, _, err = parsePseudoVersion(v) + return +} + +// PseudoVersionBase returns the canonical parent version, if any, upon which +// the pseudo-version v is based. +// +// If v has no parent version (that is, if it is "vX.0.0-[…]"), +// PseudoVersionBase returns the empty string and a nil error. +func PseudoVersionBase(v string) (string, error) { + base, _, _, build, err := parsePseudoVersion(v) + if err != nil { + return "", err + } + + switch pre := semver.Prerelease(base); pre { + case "": + // vX.0.0-yyyymmddhhmmss-abcdef123456 → "" + if build != "" { + // Pseudo-versions of the form vX.0.0-yyyymmddhhmmss-abcdef123456+incompatible + // are nonsensical: the "vX.0.0-" prefix implies that there is no parent tag, + // but the "+incompatible" suffix implies that the major version of + // the parent tag is not compatible with the module's import path. + // + // There are a few such entries in the index generated by proxy.golang.org, + // but we believe those entries were generated by the proxy itself. + return "", &InvalidVersionError{ + Version: v, + Pseudo: true, + Err: fmt.Errorf("lacks base version, but has build metadata %q", build), + } + } + return "", nil + + case "-0": + // vX.Y.(Z+1)-0.yyyymmddhhmmss-abcdef123456 → vX.Y.Z + // vX.Y.(Z+1)-0.yyyymmddhhmmss-abcdef123456+incompatible → vX.Y.Z+incompatible + base = strings.TrimSuffix(base, pre) + i := strings.LastIndexByte(base, '.') + if i < 0 { + panic("base from parsePseudoVersion missing patch number: " + base) + } + patch := decDecimal(base[i+1:]) + if patch == "" { + // vX.0.0-0 is invalid, but has been observed in the wild in the index + // generated by requests to proxy.golang.org. + // + // NOTE(bcmills): I cannot find a historical bug that accounts for + // pseudo-versions of this form, nor have I seen such versions in any + // actual go.mod files. If we find actual examples of this form and a + // reasonable theory of how they came into existence, it seems fine to + // treat them as equivalent to vX.0.0 (especially since the invalid + // pseudo-versions have lower precedence than the real ones). For now, we + // reject them. + return "", &InvalidVersionError{ + Version: v, + Pseudo: true, + Err: fmt.Errorf("version before %s would have negative patch number", base), + } + } + return base[:i+1] + patch + build, nil + + default: + // vX.Y.Z-pre.0.yyyymmddhhmmss-abcdef123456 → vX.Y.Z-pre + // vX.Y.Z-pre.0.yyyymmddhhmmss-abcdef123456+incompatible → vX.Y.Z-pre+incompatible + if !strings.HasSuffix(base, ".0") { + panic(`base from parsePseudoVersion missing ".0" before date: ` + base) + } + return strings.TrimSuffix(base, ".0") + build, nil + } +} + +var errPseudoSyntax = errors.New("syntax error") + +func parsePseudoVersion(v string) (base, timestamp, rev, build string, err error) { + if !IsPseudoVersion(v) { + return "", "", "", "", &InvalidVersionError{ + Version: v, + Pseudo: true, + Err: errPseudoSyntax, + } + } + build = semver.Build(v) + v = strings.TrimSuffix(v, build) + j := strings.LastIndex(v, "-") + v, rev = v[:j], v[j+1:] + i := strings.LastIndex(v, "-") + if j := strings.LastIndex(v, "."); j > i { + base = v[:j] // "vX.Y.Z-pre.0" or "vX.Y.(Z+1)-0" + timestamp = v[j+1:] + } else { + base = v[:i] // "vX.0.0" + timestamp = v[i+1:] + } + return base, timestamp, rev, build, nil +} diff --git a/vendor/golang.org/x/mod/semver/semver.go b/vendor/golang.org/x/mod/semver/semver.go new file mode 100644 index 0000000..a30a22b --- /dev/null +++ b/vendor/golang.org/x/mod/semver/semver.go @@ -0,0 +1,401 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package semver implements comparison of semantic version strings. +// In this package, semantic version strings must begin with a leading "v", +// as in "v1.0.0". +// +// The general form of a semantic version string accepted by this package is +// +// vMAJOR[.MINOR[.PATCH[-PRERELEASE][+BUILD]]] +// +// where square brackets indicate optional parts of the syntax; +// MAJOR, MINOR, and PATCH are decimal integers without extra leading zeros; +// PRERELEASE and BUILD are each a series of non-empty dot-separated identifiers +// using only alphanumeric characters and hyphens; and +// all-numeric PRERELEASE identifiers must not have leading zeros. +// +// This package follows Semantic Versioning 2.0.0 (see semver.org) +// with two exceptions. First, it requires the "v" prefix. Second, it recognizes +// vMAJOR and vMAJOR.MINOR (with no prerelease or build suffixes) +// as shorthands for vMAJOR.0.0 and vMAJOR.MINOR.0. +package semver + +import "sort" + +// parsed returns the parsed form of a semantic version string. +type parsed struct { + major string + minor string + patch string + short string + prerelease string + build string +} + +// IsValid reports whether v is a valid semantic version string. +func IsValid(v string) bool { + _, ok := parse(v) + return ok +} + +// Canonical returns the canonical formatting of the semantic version v. +// It fills in any missing .MINOR or .PATCH and discards build metadata. +// Two semantic versions compare equal only if their canonical formattings +// are identical strings. +// The canonical invalid semantic version is the empty string. +func Canonical(v string) string { + p, ok := parse(v) + if !ok { + return "" + } + if p.build != "" { + return v[:len(v)-len(p.build)] + } + if p.short != "" { + return v + p.short + } + return v +} + +// Major returns the major version prefix of the semantic version v. +// For example, Major("v2.1.0") == "v2". +// If v is an invalid semantic version string, Major returns the empty string. +func Major(v string) string { + pv, ok := parse(v) + if !ok { + return "" + } + return v[:1+len(pv.major)] +} + +// MajorMinor returns the major.minor version prefix of the semantic version v. +// For example, MajorMinor("v2.1.0") == "v2.1". +// If v is an invalid semantic version string, MajorMinor returns the empty string. +func MajorMinor(v string) string { + pv, ok := parse(v) + if !ok { + return "" + } + i := 1 + len(pv.major) + if j := i + 1 + len(pv.minor); j <= len(v) && v[i] == '.' && v[i+1:j] == pv.minor { + return v[:j] + } + return v[:i] + "." + pv.minor +} + +// Prerelease returns the prerelease suffix of the semantic version v. +// For example, Prerelease("v2.1.0-pre+meta") == "-pre". +// If v is an invalid semantic version string, Prerelease returns the empty string. +func Prerelease(v string) string { + pv, ok := parse(v) + if !ok { + return "" + } + return pv.prerelease +} + +// Build returns the build suffix of the semantic version v. +// For example, Build("v2.1.0+meta") == "+meta". +// If v is an invalid semantic version string, Build returns the empty string. +func Build(v string) string { + pv, ok := parse(v) + if !ok { + return "" + } + return pv.build +} + +// Compare returns an integer comparing two versions according to +// semantic version precedence. +// The result will be 0 if v == w, -1 if v < w, or +1 if v > w. +// +// An invalid semantic version string is considered less than a valid one. +// All invalid semantic version strings compare equal to each other. +func Compare(v, w string) int { + pv, ok1 := parse(v) + pw, ok2 := parse(w) + if !ok1 && !ok2 { + return 0 + } + if !ok1 { + return -1 + } + if !ok2 { + return +1 + } + if c := compareInt(pv.major, pw.major); c != 0 { + return c + } + if c := compareInt(pv.minor, pw.minor); c != 0 { + return c + } + if c := compareInt(pv.patch, pw.patch); c != 0 { + return c + } + return comparePrerelease(pv.prerelease, pw.prerelease) +} + +// Max canonicalizes its arguments and then returns the version string +// that compares greater. +// +// Deprecated: use Compare instead. In most cases, returning a canonicalized +// version is not expected or desired. +func Max(v, w string) string { + v = Canonical(v) + w = Canonical(w) + if Compare(v, w) > 0 { + return v + } + return w +} + +// ByVersion implements sort.Interface for sorting semantic version strings. +type ByVersion []string + +func (vs ByVersion) Len() int { return len(vs) } +func (vs ByVersion) Swap(i, j int) { vs[i], vs[j] = vs[j], vs[i] } +func (vs ByVersion) Less(i, j int) bool { + cmp := Compare(vs[i], vs[j]) + if cmp != 0 { + return cmp < 0 + } + return vs[i] < vs[j] +} + +// Sort sorts a list of semantic version strings using ByVersion. +func Sort(list []string) { + sort.Sort(ByVersion(list)) +} + +func parse(v string) (p parsed, ok bool) { + if v == "" || v[0] != 'v' { + return + } + p.major, v, ok = parseInt(v[1:]) + if !ok { + return + } + if v == "" { + p.minor = "0" + p.patch = "0" + p.short = ".0.0" + return + } + if v[0] != '.' { + ok = false + return + } + p.minor, v, ok = parseInt(v[1:]) + if !ok { + return + } + if v == "" { + p.patch = "0" + p.short = ".0" + return + } + if v[0] != '.' { + ok = false + return + } + p.patch, v, ok = parseInt(v[1:]) + if !ok { + return + } + if len(v) > 0 && v[0] == '-' { + p.prerelease, v, ok = parsePrerelease(v) + if !ok { + return + } + } + if len(v) > 0 && v[0] == '+' { + p.build, v, ok = parseBuild(v) + if !ok { + return + } + } + if v != "" { + ok = false + return + } + ok = true + return +} + +func parseInt(v string) (t, rest string, ok bool) { + if v == "" { + return + } + if v[0] < '0' || '9' < v[0] { + return + } + i := 1 + for i < len(v) && '0' <= v[i] && v[i] <= '9' { + i++ + } + if v[0] == '0' && i != 1 { + return + } + return v[:i], v[i:], true +} + +func parsePrerelease(v string) (t, rest string, ok bool) { + // "A pre-release version MAY be denoted by appending a hyphen and + // a series of dot separated identifiers immediately following the patch version. + // Identifiers MUST comprise only ASCII alphanumerics and hyphen [0-9A-Za-z-]. + // Identifiers MUST NOT be empty. Numeric identifiers MUST NOT include leading zeroes." + if v == "" || v[0] != '-' { + return + } + i := 1 + start := 1 + for i < len(v) && v[i] != '+' { + if !isIdentChar(v[i]) && v[i] != '.' { + return + } + if v[i] == '.' { + if start == i || isBadNum(v[start:i]) { + return + } + start = i + 1 + } + i++ + } + if start == i || isBadNum(v[start:i]) { + return + } + return v[:i], v[i:], true +} + +func parseBuild(v string) (t, rest string, ok bool) { + if v == "" || v[0] != '+' { + return + } + i := 1 + start := 1 + for i < len(v) { + if !isIdentChar(v[i]) && v[i] != '.' { + return + } + if v[i] == '.' { + if start == i { + return + } + start = i + 1 + } + i++ + } + if start == i { + return + } + return v[:i], v[i:], true +} + +func isIdentChar(c byte) bool { + return 'A' <= c && c <= 'Z' || 'a' <= c && c <= 'z' || '0' <= c && c <= '9' || c == '-' +} + +func isBadNum(v string) bool { + i := 0 + for i < len(v) && '0' <= v[i] && v[i] <= '9' { + i++ + } + return i == len(v) && i > 1 && v[0] == '0' +} + +func isNum(v string) bool { + i := 0 + for i < len(v) && '0' <= v[i] && v[i] <= '9' { + i++ + } + return i == len(v) +} + +func compareInt(x, y string) int { + if x == y { + return 0 + } + if len(x) < len(y) { + return -1 + } + if len(x) > len(y) { + return +1 + } + if x < y { + return -1 + } else { + return +1 + } +} + +func comparePrerelease(x, y string) int { + // "When major, minor, and patch are equal, a pre-release version has + // lower precedence than a normal version. + // Example: 1.0.0-alpha < 1.0.0. + // Precedence for two pre-release versions with the same major, minor, + // and patch version MUST be determined by comparing each dot separated + // identifier from left to right until a difference is found as follows: + // identifiers consisting of only digits are compared numerically and + // identifiers with letters or hyphens are compared lexically in ASCII + // sort order. Numeric identifiers always have lower precedence than + // non-numeric identifiers. A larger set of pre-release fields has a + // higher precedence than a smaller set, if all of the preceding + // identifiers are equal. + // Example: 1.0.0-alpha < 1.0.0-alpha.1 < 1.0.0-alpha.beta < + // 1.0.0-beta < 1.0.0-beta.2 < 1.0.0-beta.11 < 1.0.0-rc.1 < 1.0.0." + if x == y { + return 0 + } + if x == "" { + return +1 + } + if y == "" { + return -1 + } + for x != "" && y != "" { + x = x[1:] // skip - or . + y = y[1:] // skip - or . + var dx, dy string + dx, x = nextIdent(x) + dy, y = nextIdent(y) + if dx != dy { + ix := isNum(dx) + iy := isNum(dy) + if ix != iy { + if ix { + return -1 + } else { + return +1 + } + } + if ix { + if len(dx) < len(dy) { + return -1 + } + if len(dx) > len(dy) { + return +1 + } + } + if dx < dy { + return -1 + } else { + return +1 + } + } + } + if x == "" { + return -1 + } else { + return +1 + } +} + +func nextIdent(x string) (dx, rest string) { + i := 0 + for i < len(x) && x[i] != '.' { + i++ + } + return x[:i], x[i:] +} diff --git a/vendor/golang.org/x/net/LICENSE b/vendor/golang.org/x/net/LICENSE new file mode 100644 index 0000000..6a66aea --- /dev/null +++ b/vendor/golang.org/x/net/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/golang.org/x/net/PATENTS b/vendor/golang.org/x/net/PATENTS new file mode 100644 index 0000000..7330990 --- /dev/null +++ b/vendor/golang.org/x/net/PATENTS @@ -0,0 +1,22 @@ +Additional IP Rights Grant (Patents) + +"This implementation" means the copyrightable works distributed by +Google as part of the Go project. + +Google hereby grants to You a perpetual, worldwide, non-exclusive, +no-charge, royalty-free, irrevocable (except as stated in this section) +patent license to make, have made, use, offer to sell, sell, import, +transfer and otherwise run, modify and propagate the contents of this +implementation of Go, where such license applies only to those patent +claims, both currently owned or controlled by Google and acquired in +the future, licensable by Google that are necessarily infringed by this +implementation of Go. This grant does not include claims that would be +infringed only as a consequence of further modification of this +implementation. If you or your agent or exclusive licensee institute or +order or agree to the institution of patent litigation against any +entity (including a cross-claim or counterclaim in a lawsuit) alleging +that this implementation of Go or any code incorporated within this +implementation of Go constitutes direct or contributory patent +infringement, or inducement of patent infringement, then any patent +rights granted to you under this License for this implementation of Go +shall terminate as of the date such litigation is filed. diff --git a/vendor/golang.org/x/net/html/atom/atom.go b/vendor/golang.org/x/net/html/atom/atom.go new file mode 100644 index 0000000..cd0a8ac --- /dev/null +++ b/vendor/golang.org/x/net/html/atom/atom.go @@ -0,0 +1,78 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package atom provides integer codes (also known as atoms) for a fixed set of +// frequently occurring HTML strings: tag names and attribute keys such as "p" +// and "id". +// +// Sharing an atom's name between all elements with the same tag can result in +// fewer string allocations when tokenizing and parsing HTML. Integer +// comparisons are also generally faster than string comparisons. +// +// The value of an atom's particular code is not guaranteed to stay the same +// between versions of this package. Neither is any ordering guaranteed: +// whether atom.H1 < atom.H2 may also change. The codes are not guaranteed to +// be dense. The only guarantees are that e.g. looking up "div" will yield +// atom.Div, calling atom.Div.String will return "div", and atom.Div != 0. +package atom // import "golang.org/x/net/html/atom" + +// Atom is an integer code for a string. The zero value maps to "". +type Atom uint32 + +// String returns the atom's name. +func (a Atom) String() string { + start := uint32(a >> 8) + n := uint32(a & 0xff) + if start+n > uint32(len(atomText)) { + return "" + } + return atomText[start : start+n] +} + +func (a Atom) string() string { + return atomText[a>>8 : a>>8+a&0xff] +} + +// fnv computes the FNV hash with an arbitrary starting value h. +func fnv(h uint32, s []byte) uint32 { + for i := range s { + h ^= uint32(s[i]) + h *= 16777619 + } + return h +} + +func match(s string, t []byte) bool { + for i, c := range t { + if s[i] != c { + return false + } + } + return true +} + +// Lookup returns the atom whose name is s. It returns zero if there is no +// such atom. The lookup is case sensitive. +func Lookup(s []byte) Atom { + if len(s) == 0 || len(s) > maxAtomLen { + return 0 + } + h := fnv(hash0, s) + if a := table[h&uint32(len(table)-1)]; int(a&0xff) == len(s) && match(a.string(), s) { + return a + } + if a := table[(h>>16)&uint32(len(table)-1)]; int(a&0xff) == len(s) && match(a.string(), s) { + return a + } + return 0 +} + +// String returns a string whose contents are equal to s. In that sense, it is +// equivalent to string(s) but may be more efficient. +func String(s []byte) string { + if a := Lookup(s); a != 0 { + return a.String() + } + return string(s) +} diff --git a/vendor/golang.org/x/net/html/atom/table.go b/vendor/golang.org/x/net/html/atom/table.go new file mode 100644 index 0000000..2a93886 --- /dev/null +++ b/vendor/golang.org/x/net/html/atom/table.go @@ -0,0 +1,783 @@ +// Code generated by go generate gen.go; DO NOT EDIT. + +//go:generate go run gen.go + +package atom + +const ( + A Atom = 0x1 + Abbr Atom = 0x4 + Accept Atom = 0x1a06 + AcceptCharset Atom = 0x1a0e + Accesskey Atom = 0x2c09 + Acronym Atom = 0xaa07 + Action Atom = 0x27206 + Address Atom = 0x6f307 + Align Atom = 0xb105 + Allowfullscreen Atom = 0x2080f + Allowpaymentrequest Atom = 0xc113 + Allowusermedia Atom = 0xdd0e + Alt Atom = 0xf303 + Annotation Atom = 0x1c90a + AnnotationXml Atom = 0x1c90e + Applet Atom = 0x31906 + Area Atom = 0x35604 + Article Atom = 0x3fc07 + As Atom = 0x3c02 + Aside Atom = 0x10705 + Async Atom = 0xff05 + Audio Atom = 0x11505 + Autocomplete Atom = 0x2780c + Autofocus Atom = 0x12109 + Autoplay Atom = 0x13c08 + B Atom = 0x101 + Base Atom = 0x3b04 + Basefont Atom = 0x3b08 + Bdi Atom = 0xba03 + Bdo Atom = 0x14b03 + Bgsound Atom = 0x15e07 + Big Atom = 0x17003 + Blink Atom = 0x17305 + Blockquote Atom = 0x1870a + Body Atom = 0x2804 + Br Atom = 0x202 + Button Atom = 0x19106 + Canvas Atom = 0x10306 + Caption Atom = 0x23107 + Center Atom = 0x22006 + Challenge Atom = 0x29b09 + Charset Atom = 0x2107 + Checked Atom = 0x47907 + Cite Atom = 0x19c04 + Class Atom = 0x56405 + Code Atom = 0x5c504 + Col Atom = 0x1ab03 + Colgroup Atom = 0x1ab08 + Color Atom = 0x1bf05 + Cols Atom = 0x1c404 + Colspan Atom = 0x1c407 + Command Atom = 0x1d707 + Content Atom = 0x58b07 + Contenteditable Atom = 0x58b0f + Contextmenu Atom = 0x3800b + Controls Atom = 0x1de08 + Coords Atom = 0x1ea06 + Crossorigin Atom = 0x1fb0b + Data Atom = 0x4a504 + Datalist Atom = 0x4a508 + Datetime Atom = 0x2b808 + Dd Atom = 0x2d702 + Default Atom = 0x10a07 + Defer Atom = 0x5c705 + Del Atom = 0x45203 + Desc Atom = 0x56104 + Details Atom = 0x7207 + Dfn Atom = 0x8703 + Dialog Atom = 0xbb06 + Dir Atom = 0x9303 + Dirname Atom = 0x9307 + Disabled Atom = 0x16408 + Div Atom = 0x16b03 + Dl Atom = 0x5e602 + Download Atom = 0x46308 + Draggable Atom = 0x17a09 + Dropzone Atom = 0x40508 + Dt Atom = 0x64b02 + Em Atom = 0x6e02 + Embed Atom = 0x6e05 + Enctype Atom = 0x28d07 + Face Atom = 0x21e04 + Fieldset Atom = 0x22608 + Figcaption Atom = 0x22e0a + Figure Atom = 0x24806 + Font Atom = 0x3f04 + Footer Atom = 0xf606 + For Atom = 0x25403 + ForeignObject Atom = 0x2540d + Foreignobject Atom = 0x2610d + Form Atom = 0x26e04 + Formaction Atom = 0x26e0a + Formenctype Atom = 0x2890b + Formmethod Atom = 0x2a40a + Formnovalidate Atom = 0x2ae0e + Formtarget Atom = 0x2c00a + Frame Atom = 0x8b05 + Frameset Atom = 0x8b08 + H1 Atom = 0x15c02 + H2 Atom = 0x2de02 + H3 Atom = 0x30d02 + H4 Atom = 0x34502 + H5 Atom = 0x34f02 + H6 Atom = 0x64d02 + Head Atom = 0x33104 + Header Atom = 0x33106 + Headers Atom = 0x33107 + Height Atom = 0x5206 + Hgroup Atom = 0x2ca06 + Hidden Atom = 0x2d506 + High Atom = 0x2db04 + Hr Atom = 0x15702 + Href Atom = 0x2e004 + Hreflang Atom = 0x2e008 + Html Atom = 0x5604 + HttpEquiv Atom = 0x2e80a + I Atom = 0x601 + Icon Atom = 0x58a04 + Id Atom = 0x10902 + Iframe Atom = 0x2fc06 + Image Atom = 0x30205 + Img Atom = 0x30703 + Input Atom = 0x44b05 + Inputmode Atom = 0x44b09 + Ins Atom = 0x20403 + Integrity Atom = 0x23f09 + Is Atom = 0x16502 + Isindex Atom = 0x30f07 + Ismap Atom = 0x31605 + Itemid Atom = 0x38b06 + Itemprop Atom = 0x19d08 + Itemref Atom = 0x3cd07 + Itemscope Atom = 0x67109 + Itemtype Atom = 0x31f08 + Kbd Atom = 0xb903 + Keygen Atom = 0x3206 + Keytype Atom = 0xd607 + Kind Atom = 0x17704 + Label Atom = 0x5905 + Lang Atom = 0x2e404 + Legend Atom = 0x18106 + Li Atom = 0xb202 + Link Atom = 0x17404 + List Atom = 0x4a904 + Listing Atom = 0x4a907 + Loop Atom = 0x5d04 + Low Atom = 0xc303 + Main Atom = 0x1004 + Malignmark Atom = 0xb00a + Manifest Atom = 0x6d708 + Map Atom = 0x31803 + Mark Atom = 0xb604 + Marquee Atom = 0x32707 + Math Atom = 0x32e04 + Max Atom = 0x33d03 + Maxlength Atom = 0x33d09 + Media Atom = 0xe605 + Mediagroup Atom = 0xe60a + Menu Atom = 0x38704 + Menuitem Atom = 0x38708 + Meta Atom = 0x4b804 + Meter Atom = 0x9805 + Method Atom = 0x2a806 + Mglyph Atom = 0x30806 + Mi Atom = 0x34702 + Min Atom = 0x34703 + Minlength Atom = 0x34709 + Mn Atom = 0x2b102 + Mo Atom = 0xa402 + Ms Atom = 0x67402 + Mtext Atom = 0x35105 + Multiple Atom = 0x35f08 + Muted Atom = 0x36705 + Name Atom = 0x9604 + Nav Atom = 0x1303 + Nobr Atom = 0x3704 + Noembed Atom = 0x6c07 + Noframes Atom = 0x8908 + Nomodule Atom = 0xa208 + Nonce Atom = 0x1a605 + Noscript Atom = 0x21608 + Novalidate Atom = 0x2b20a + Object Atom = 0x26806 + Ol Atom = 0x13702 + Onabort Atom = 0x19507 + Onafterprint Atom = 0x2360c + Onautocomplete Atom = 0x2760e + Onautocompleteerror Atom = 0x27613 + Onauxclick Atom = 0x61f0a + Onbeforeprint Atom = 0x69e0d + Onbeforeunload Atom = 0x6e70e + Onblur Atom = 0x56d06 + Oncancel Atom = 0x11908 + Oncanplay Atom = 0x14d09 + Oncanplaythrough Atom = 0x14d10 + Onchange Atom = 0x41b08 + Onclick Atom = 0x2f507 + Onclose Atom = 0x36c07 + Oncontextmenu Atom = 0x37e0d + Oncopy Atom = 0x39106 + Oncuechange Atom = 0x3970b + Oncut Atom = 0x3a205 + Ondblclick Atom = 0x3a70a + Ondrag Atom = 0x3b106 + Ondragend Atom = 0x3b109 + Ondragenter Atom = 0x3ba0b + Ondragexit Atom = 0x3c50a + Ondragleave Atom = 0x3df0b + Ondragover Atom = 0x3ea0a + Ondragstart Atom = 0x3f40b + Ondrop Atom = 0x40306 + Ondurationchange Atom = 0x41310 + Onemptied Atom = 0x40a09 + Onended Atom = 0x42307 + Onerror Atom = 0x42a07 + Onfocus Atom = 0x43107 + Onhashchange Atom = 0x43d0c + Oninput Atom = 0x44907 + Oninvalid Atom = 0x45509 + Onkeydown Atom = 0x45e09 + Onkeypress Atom = 0x46b0a + Onkeyup Atom = 0x48007 + Onlanguagechange Atom = 0x48d10 + Onload Atom = 0x49d06 + Onloadeddata Atom = 0x49d0c + Onloadedmetadata Atom = 0x4b010 + Onloadend Atom = 0x4c609 + Onloadstart Atom = 0x4cf0b + Onmessage Atom = 0x4da09 + Onmessageerror Atom = 0x4da0e + Onmousedown Atom = 0x4e80b + Onmouseenter Atom = 0x4f30c + Onmouseleave Atom = 0x4ff0c + Onmousemove Atom = 0x50b0b + Onmouseout Atom = 0x5160a + Onmouseover Atom = 0x5230b + Onmouseup Atom = 0x52e09 + Onmousewheel Atom = 0x53c0c + Onoffline Atom = 0x54809 + Ononline Atom = 0x55108 + Onpagehide Atom = 0x5590a + Onpageshow Atom = 0x5730a + Onpaste Atom = 0x57f07 + Onpause Atom = 0x59a07 + Onplay Atom = 0x5a406 + Onplaying Atom = 0x5a409 + Onpopstate Atom = 0x5ad0a + Onprogress Atom = 0x5b70a + Onratechange Atom = 0x5cc0c + Onrejectionhandled Atom = 0x5d812 + Onreset Atom = 0x5ea07 + Onresize Atom = 0x5f108 + Onscroll Atom = 0x60008 + Onsecuritypolicyviolation Atom = 0x60819 + Onseeked Atom = 0x62908 + Onseeking Atom = 0x63109 + Onselect Atom = 0x63a08 + Onshow Atom = 0x64406 + Onsort Atom = 0x64f06 + Onstalled Atom = 0x65909 + Onstorage Atom = 0x66209 + Onsubmit Atom = 0x66b08 + Onsuspend Atom = 0x67b09 + Ontimeupdate Atom = 0x400c + Ontoggle Atom = 0x68408 + Onunhandledrejection Atom = 0x68c14 + Onunload Atom = 0x6ab08 + Onvolumechange Atom = 0x6b30e + Onwaiting Atom = 0x6c109 + Onwheel Atom = 0x6ca07 + Open Atom = 0x1a304 + Optgroup Atom = 0x5f08 + Optimum Atom = 0x6d107 + Option Atom = 0x6e306 + Output Atom = 0x51d06 + P Atom = 0xc01 + Param Atom = 0xc05 + Pattern Atom = 0x6607 + Picture Atom = 0x7b07 + Ping Atom = 0xef04 + Placeholder Atom = 0x1310b + Plaintext Atom = 0x1b209 + Playsinline Atom = 0x1400b + Poster Atom = 0x2cf06 + Pre Atom = 0x47003 + Preload Atom = 0x48607 + Progress Atom = 0x5b908 + Prompt Atom = 0x53606 + Public Atom = 0x58606 + Q Atom = 0xcf01 + Radiogroup Atom = 0x30a + Rb Atom = 0x3a02 + Readonly Atom = 0x35708 + Referrerpolicy Atom = 0x3d10e + Rel Atom = 0x48703 + Required Atom = 0x24c08 + Reversed Atom = 0x8008 + Rows Atom = 0x9c04 + Rowspan Atom = 0x9c07 + Rp Atom = 0x23c02 + Rt Atom = 0x19a02 + Rtc Atom = 0x19a03 + Ruby Atom = 0xfb04 + S Atom = 0x2501 + Samp Atom = 0x7804 + Sandbox Atom = 0x12907 + Scope Atom = 0x67505 + Scoped Atom = 0x67506 + Script Atom = 0x21806 + Seamless Atom = 0x37108 + Section Atom = 0x56807 + Select Atom = 0x63c06 + Selected Atom = 0x63c08 + Shape Atom = 0x1e505 + Size Atom = 0x5f504 + Sizes Atom = 0x5f505 + Slot Atom = 0x1ef04 + Small Atom = 0x20605 + Sortable Atom = 0x65108 + Sorted Atom = 0x33706 + Source Atom = 0x37806 + Spacer Atom = 0x43706 + Span Atom = 0x9f04 + Spellcheck Atom = 0x4740a + Src Atom = 0x5c003 + Srcdoc Atom = 0x5c006 + Srclang Atom = 0x5f907 + Srcset Atom = 0x6f906 + Start Atom = 0x3fa05 + Step Atom = 0x58304 + Strike Atom = 0xd206 + Strong Atom = 0x6dd06 + Style Atom = 0x6ff05 + Sub Atom = 0x66d03 + Summary Atom = 0x70407 + Sup Atom = 0x70b03 + Svg Atom = 0x70e03 + System Atom = 0x71106 + Tabindex Atom = 0x4be08 + Table Atom = 0x59505 + Target Atom = 0x2c406 + Tbody Atom = 0x2705 + Td Atom = 0x9202 + Template Atom = 0x71408 + Textarea Atom = 0x35208 + Tfoot Atom = 0xf505 + Th Atom = 0x15602 + Thead Atom = 0x33005 + Time Atom = 0x4204 + Title Atom = 0x11005 + Tr Atom = 0xcc02 + Track Atom = 0x1ba05 + Translate Atom = 0x1f209 + Tt Atom = 0x6802 + Type Atom = 0xd904 + Typemustmatch Atom = 0x2900d + U Atom = 0xb01 + Ul Atom = 0xa702 + Updateviacache Atom = 0x460e + Usemap Atom = 0x59e06 + Value Atom = 0x1505 + Var Atom = 0x16d03 + Video Atom = 0x2f105 + Wbr Atom = 0x57c03 + Width Atom = 0x64905 + Workertype Atom = 0x71c0a + Wrap Atom = 0x72604 + Xmp Atom = 0x12f03 +) + +const hash0 = 0x81cdf10e + +const maxAtomLen = 25 + +var table = [1 << 9]Atom{ + 0x1: 0xe60a, // mediagroup + 0x2: 0x2e404, // lang + 0x4: 0x2c09, // accesskey + 0x5: 0x8b08, // frameset + 0x7: 0x63a08, // onselect + 0x8: 0x71106, // system + 0xa: 0x64905, // width + 0xc: 0x2890b, // formenctype + 0xd: 0x13702, // ol + 0xe: 0x3970b, // oncuechange + 0x10: 0x14b03, // bdo + 0x11: 0x11505, // audio + 0x12: 0x17a09, // draggable + 0x14: 0x2f105, // video + 0x15: 0x2b102, // mn + 0x16: 0x38704, // menu + 0x17: 0x2cf06, // poster + 0x19: 0xf606, // footer + 0x1a: 0x2a806, // method + 0x1b: 0x2b808, // datetime + 0x1c: 0x19507, // onabort + 0x1d: 0x460e, // updateviacache + 0x1e: 0xff05, // async + 0x1f: 0x49d06, // onload + 0x21: 0x11908, // oncancel + 0x22: 0x62908, // onseeked + 0x23: 0x30205, // image + 0x24: 0x5d812, // onrejectionhandled + 0x26: 0x17404, // link + 0x27: 0x51d06, // output + 0x28: 0x33104, // head + 0x29: 0x4ff0c, // onmouseleave + 0x2a: 0x57f07, // onpaste + 0x2b: 0x5a409, // onplaying + 0x2c: 0x1c407, // colspan + 0x2f: 0x1bf05, // color + 0x30: 0x5f504, // size + 0x31: 0x2e80a, // http-equiv + 0x33: 0x601, // i + 0x34: 0x5590a, // onpagehide + 0x35: 0x68c14, // onunhandledrejection + 0x37: 0x42a07, // onerror + 0x3a: 0x3b08, // basefont + 0x3f: 0x1303, // nav + 0x40: 0x17704, // kind + 0x41: 0x35708, // readonly + 0x42: 0x30806, // mglyph + 0x44: 0xb202, // li + 0x46: 0x2d506, // hidden + 0x47: 0x70e03, // svg + 0x48: 0x58304, // step + 0x49: 0x23f09, // integrity + 0x4a: 0x58606, // public + 0x4c: 0x1ab03, // col + 0x4d: 0x1870a, // blockquote + 0x4e: 0x34f02, // h5 + 0x50: 0x5b908, // progress + 0x51: 0x5f505, // sizes + 0x52: 0x34502, // h4 + 0x56: 0x33005, // thead + 0x57: 0xd607, // keytype + 0x58: 0x5b70a, // onprogress + 0x59: 0x44b09, // inputmode + 0x5a: 0x3b109, // ondragend + 0x5d: 0x3a205, // oncut + 0x5e: 0x43706, // spacer + 0x5f: 0x1ab08, // colgroup + 0x62: 0x16502, // is + 0x65: 0x3c02, // as + 0x66: 0x54809, // onoffline + 0x67: 0x33706, // sorted + 0x69: 0x48d10, // onlanguagechange + 0x6c: 0x43d0c, // onhashchange + 0x6d: 0x9604, // name + 0x6e: 0xf505, // tfoot + 0x6f: 0x56104, // desc + 0x70: 0x33d03, // max + 0x72: 0x1ea06, // coords + 0x73: 0x30d02, // h3 + 0x74: 0x6e70e, // onbeforeunload + 0x75: 0x9c04, // rows + 0x76: 0x63c06, // select + 0x77: 0x9805, // meter + 0x78: 0x38b06, // itemid + 0x79: 0x53c0c, // onmousewheel + 0x7a: 0x5c006, // srcdoc + 0x7d: 0x1ba05, // track + 0x7f: 0x31f08, // itemtype + 0x82: 0xa402, // mo + 0x83: 0x41b08, // onchange + 0x84: 0x33107, // headers + 0x85: 0x5cc0c, // onratechange + 0x86: 0x60819, // onsecuritypolicyviolation + 0x88: 0x4a508, // datalist + 0x89: 0x4e80b, // onmousedown + 0x8a: 0x1ef04, // slot + 0x8b: 0x4b010, // onloadedmetadata + 0x8c: 0x1a06, // accept + 0x8d: 0x26806, // object + 0x91: 0x6b30e, // onvolumechange + 0x92: 0x2107, // charset + 0x93: 0x27613, // onautocompleteerror + 0x94: 0xc113, // allowpaymentrequest + 0x95: 0x2804, // body + 0x96: 0x10a07, // default + 0x97: 0x63c08, // selected + 0x98: 0x21e04, // face + 0x99: 0x1e505, // shape + 0x9b: 0x68408, // ontoggle + 0x9e: 0x64b02, // dt + 0x9f: 0xb604, // mark + 0xa1: 0xb01, // u + 0xa4: 0x6ab08, // onunload + 0xa5: 0x5d04, // loop + 0xa6: 0x16408, // disabled + 0xaa: 0x42307, // onended + 0xab: 0xb00a, // malignmark + 0xad: 0x67b09, // onsuspend + 0xae: 0x35105, // mtext + 0xaf: 0x64f06, // onsort + 0xb0: 0x19d08, // itemprop + 0xb3: 0x67109, // itemscope + 0xb4: 0x17305, // blink + 0xb6: 0x3b106, // ondrag + 0xb7: 0xa702, // ul + 0xb8: 0x26e04, // form + 0xb9: 0x12907, // sandbox + 0xba: 0x8b05, // frame + 0xbb: 0x1505, // value + 0xbc: 0x66209, // onstorage + 0xbf: 0xaa07, // acronym + 0xc0: 0x19a02, // rt + 0xc2: 0x202, // br + 0xc3: 0x22608, // fieldset + 0xc4: 0x2900d, // typemustmatch + 0xc5: 0xa208, // nomodule + 0xc6: 0x6c07, // noembed + 0xc7: 0x69e0d, // onbeforeprint + 0xc8: 0x19106, // button + 0xc9: 0x2f507, // onclick + 0xca: 0x70407, // summary + 0xcd: 0xfb04, // ruby + 0xce: 0x56405, // class + 0xcf: 0x3f40b, // ondragstart + 0xd0: 0x23107, // caption + 0xd4: 0xdd0e, // allowusermedia + 0xd5: 0x4cf0b, // onloadstart + 0xd9: 0x16b03, // div + 0xda: 0x4a904, // list + 0xdb: 0x32e04, // math + 0xdc: 0x44b05, // input + 0xdf: 0x3ea0a, // ondragover + 0xe0: 0x2de02, // h2 + 0xe2: 0x1b209, // plaintext + 0xe4: 0x4f30c, // onmouseenter + 0xe7: 0x47907, // checked + 0xe8: 0x47003, // pre + 0xea: 0x35f08, // multiple + 0xeb: 0xba03, // bdi + 0xec: 0x33d09, // maxlength + 0xed: 0xcf01, // q + 0xee: 0x61f0a, // onauxclick + 0xf0: 0x57c03, // wbr + 0xf2: 0x3b04, // base + 0xf3: 0x6e306, // option + 0xf5: 0x41310, // ondurationchange + 0xf7: 0x8908, // noframes + 0xf9: 0x40508, // dropzone + 0xfb: 0x67505, // scope + 0xfc: 0x8008, // reversed + 0xfd: 0x3ba0b, // ondragenter + 0xfe: 0x3fa05, // start + 0xff: 0x12f03, // xmp + 0x100: 0x5f907, // srclang + 0x101: 0x30703, // img + 0x104: 0x101, // b + 0x105: 0x25403, // for + 0x106: 0x10705, // aside + 0x107: 0x44907, // oninput + 0x108: 0x35604, // area + 0x109: 0x2a40a, // formmethod + 0x10a: 0x72604, // wrap + 0x10c: 0x23c02, // rp + 0x10d: 0x46b0a, // onkeypress + 0x10e: 0x6802, // tt + 0x110: 0x34702, // mi + 0x111: 0x36705, // muted + 0x112: 0xf303, // alt + 0x113: 0x5c504, // code + 0x114: 0x6e02, // em + 0x115: 0x3c50a, // ondragexit + 0x117: 0x9f04, // span + 0x119: 0x6d708, // manifest + 0x11a: 0x38708, // menuitem + 0x11b: 0x58b07, // content + 0x11d: 0x6c109, // onwaiting + 0x11f: 0x4c609, // onloadend + 0x121: 0x37e0d, // oncontextmenu + 0x123: 0x56d06, // onblur + 0x124: 0x3fc07, // article + 0x125: 0x9303, // dir + 0x126: 0xef04, // ping + 0x127: 0x24c08, // required + 0x128: 0x45509, // oninvalid + 0x129: 0xb105, // align + 0x12b: 0x58a04, // icon + 0x12c: 0x64d02, // h6 + 0x12d: 0x1c404, // cols + 0x12e: 0x22e0a, // figcaption + 0x12f: 0x45e09, // onkeydown + 0x130: 0x66b08, // onsubmit + 0x131: 0x14d09, // oncanplay + 0x132: 0x70b03, // sup + 0x133: 0xc01, // p + 0x135: 0x40a09, // onemptied + 0x136: 0x39106, // oncopy + 0x137: 0x19c04, // cite + 0x138: 0x3a70a, // ondblclick + 0x13a: 0x50b0b, // onmousemove + 0x13c: 0x66d03, // sub + 0x13d: 0x48703, // rel + 0x13e: 0x5f08, // optgroup + 0x142: 0x9c07, // rowspan + 0x143: 0x37806, // source + 0x144: 0x21608, // noscript + 0x145: 0x1a304, // open + 0x146: 0x20403, // ins + 0x147: 0x2540d, // foreignObject + 0x148: 0x5ad0a, // onpopstate + 0x14a: 0x28d07, // enctype + 0x14b: 0x2760e, // onautocomplete + 0x14c: 0x35208, // textarea + 0x14e: 0x2780c, // autocomplete + 0x14f: 0x15702, // hr + 0x150: 0x1de08, // controls + 0x151: 0x10902, // id + 0x153: 0x2360c, // onafterprint + 0x155: 0x2610d, // foreignobject + 0x156: 0x32707, // marquee + 0x157: 0x59a07, // onpause + 0x158: 0x5e602, // dl + 0x159: 0x5206, // height + 0x15a: 0x34703, // min + 0x15b: 0x9307, // dirname + 0x15c: 0x1f209, // translate + 0x15d: 0x5604, // html + 0x15e: 0x34709, // minlength + 0x15f: 0x48607, // preload + 0x160: 0x71408, // template + 0x161: 0x3df0b, // ondragleave + 0x162: 0x3a02, // rb + 0x164: 0x5c003, // src + 0x165: 0x6dd06, // strong + 0x167: 0x7804, // samp + 0x168: 0x6f307, // address + 0x169: 0x55108, // ononline + 0x16b: 0x1310b, // placeholder + 0x16c: 0x2c406, // target + 0x16d: 0x20605, // small + 0x16e: 0x6ca07, // onwheel + 0x16f: 0x1c90a, // annotation + 0x170: 0x4740a, // spellcheck + 0x171: 0x7207, // details + 0x172: 0x10306, // canvas + 0x173: 0x12109, // autofocus + 0x174: 0xc05, // param + 0x176: 0x46308, // download + 0x177: 0x45203, // del + 0x178: 0x36c07, // onclose + 0x179: 0xb903, // kbd + 0x17a: 0x31906, // applet + 0x17b: 0x2e004, // href + 0x17c: 0x5f108, // onresize + 0x17e: 0x49d0c, // onloadeddata + 0x180: 0xcc02, // tr + 0x181: 0x2c00a, // formtarget + 0x182: 0x11005, // title + 0x183: 0x6ff05, // style + 0x184: 0xd206, // strike + 0x185: 0x59e06, // usemap + 0x186: 0x2fc06, // iframe + 0x187: 0x1004, // main + 0x189: 0x7b07, // picture + 0x18c: 0x31605, // ismap + 0x18e: 0x4a504, // data + 0x18f: 0x5905, // label + 0x191: 0x3d10e, // referrerpolicy + 0x192: 0x15602, // th + 0x194: 0x53606, // prompt + 0x195: 0x56807, // section + 0x197: 0x6d107, // optimum + 0x198: 0x2db04, // high + 0x199: 0x15c02, // h1 + 0x19a: 0x65909, // onstalled + 0x19b: 0x16d03, // var + 0x19c: 0x4204, // time + 0x19e: 0x67402, // ms + 0x19f: 0x33106, // header + 0x1a0: 0x4da09, // onmessage + 0x1a1: 0x1a605, // nonce + 0x1a2: 0x26e0a, // formaction + 0x1a3: 0x22006, // center + 0x1a4: 0x3704, // nobr + 0x1a5: 0x59505, // table + 0x1a6: 0x4a907, // listing + 0x1a7: 0x18106, // legend + 0x1a9: 0x29b09, // challenge + 0x1aa: 0x24806, // figure + 0x1ab: 0xe605, // media + 0x1ae: 0xd904, // type + 0x1af: 0x3f04, // font + 0x1b0: 0x4da0e, // onmessageerror + 0x1b1: 0x37108, // seamless + 0x1b2: 0x8703, // dfn + 0x1b3: 0x5c705, // defer + 0x1b4: 0xc303, // low + 0x1b5: 0x19a03, // rtc + 0x1b6: 0x5230b, // onmouseover + 0x1b7: 0x2b20a, // novalidate + 0x1b8: 0x71c0a, // workertype + 0x1ba: 0x3cd07, // itemref + 0x1bd: 0x1, // a + 0x1be: 0x31803, // map + 0x1bf: 0x400c, // ontimeupdate + 0x1c0: 0x15e07, // bgsound + 0x1c1: 0x3206, // keygen + 0x1c2: 0x2705, // tbody + 0x1c5: 0x64406, // onshow + 0x1c7: 0x2501, // s + 0x1c8: 0x6607, // pattern + 0x1cc: 0x14d10, // oncanplaythrough + 0x1ce: 0x2d702, // dd + 0x1cf: 0x6f906, // srcset + 0x1d0: 0x17003, // big + 0x1d2: 0x65108, // sortable + 0x1d3: 0x48007, // onkeyup + 0x1d5: 0x5a406, // onplay + 0x1d7: 0x4b804, // meta + 0x1d8: 0x40306, // ondrop + 0x1da: 0x60008, // onscroll + 0x1db: 0x1fb0b, // crossorigin + 0x1dc: 0x5730a, // onpageshow + 0x1dd: 0x4, // abbr + 0x1de: 0x9202, // td + 0x1df: 0x58b0f, // contenteditable + 0x1e0: 0x27206, // action + 0x1e1: 0x1400b, // playsinline + 0x1e2: 0x43107, // onfocus + 0x1e3: 0x2e008, // hreflang + 0x1e5: 0x5160a, // onmouseout + 0x1e6: 0x5ea07, // onreset + 0x1e7: 0x13c08, // autoplay + 0x1e8: 0x63109, // onseeking + 0x1ea: 0x67506, // scoped + 0x1ec: 0x30a, // radiogroup + 0x1ee: 0x3800b, // contextmenu + 0x1ef: 0x52e09, // onmouseup + 0x1f1: 0x2ca06, // hgroup + 0x1f2: 0x2080f, // allowfullscreen + 0x1f3: 0x4be08, // tabindex + 0x1f6: 0x30f07, // isindex + 0x1f7: 0x1a0e, // accept-charset + 0x1f8: 0x2ae0e, // formnovalidate + 0x1fb: 0x1c90e, // annotation-xml + 0x1fc: 0x6e05, // embed + 0x1fd: 0x21806, // script + 0x1fe: 0xbb06, // dialog + 0x1ff: 0x1d707, // command +} + +const atomText = "abbradiogrouparamainavalueaccept-charsetbodyaccesskeygenobrb" + + "asefontimeupdateviacacheightmlabelooptgroupatternoembedetail" + + "sampictureversedfnoframesetdirnameterowspanomoduleacronymali" + + "gnmarkbdialogallowpaymentrequestrikeytypeallowusermediagroup" + + "ingaltfooterubyasyncanvasidefaultitleaudioncancelautofocusan" + + "dboxmplaceholderautoplaysinlinebdoncanplaythrough1bgsoundisa" + + "bledivarbigblinkindraggablegendblockquotebuttonabortcitempro" + + "penoncecolgrouplaintextrackcolorcolspannotation-xmlcommandco" + + "ntrolshapecoordslotranslatecrossoriginsmallowfullscreenoscri" + + "ptfacenterfieldsetfigcaptionafterprintegrityfigurequiredfore" + + "ignObjectforeignobjectformactionautocompleteerrorformenctype" + + "mustmatchallengeformmethodformnovalidatetimeformtargethgroup" + + "osterhiddenhigh2hreflanghttp-equivideonclickiframeimageimgly" + + "ph3isindexismappletitemtypemarqueematheadersortedmaxlength4m" + + "inlength5mtextareadonlymultiplemutedoncloseamlessourceoncont" + + "extmenuitemidoncopyoncuechangeoncutondblclickondragendondrag" + + "enterondragexitemreferrerpolicyondragleaveondragoverondragst" + + "articleondropzonemptiedondurationchangeonendedonerroronfocus" + + "paceronhashchangeoninputmodeloninvalidonkeydownloadonkeypres" + + "spellcheckedonkeyupreloadonlanguagechangeonloadeddatalisting" + + "onloadedmetadatabindexonloadendonloadstartonmessageerroronmo" + + "usedownonmouseenteronmouseleaveonmousemoveonmouseoutputonmou" + + "seoveronmouseupromptonmousewheelonofflineononlineonpagehides" + + "classectionbluronpageshowbronpastepublicontenteditableonpaus" + + "emaponplayingonpopstateonprogressrcdocodeferonratechangeonre" + + "jectionhandledonresetonresizesrclangonscrollonsecuritypolicy" + + "violationauxclickonseekedonseekingonselectedonshowidth6onsor" + + "tableonstalledonstorageonsubmitemscopedonsuspendontoggleonun" + + "handledrejectionbeforeprintonunloadonvolumechangeonwaitingon" + + "wheeloptimumanifestrongoptionbeforeunloaddressrcsetstylesumm" + + "arysupsvgsystemplateworkertypewrap" diff --git a/vendor/golang.org/x/net/html/const.go b/vendor/golang.org/x/net/html/const.go new file mode 100644 index 0000000..ff7acf2 --- /dev/null +++ b/vendor/golang.org/x/net/html/const.go @@ -0,0 +1,111 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package html + +// Section 12.2.4.2 of the HTML5 specification says "The following elements +// have varying levels of special parsing rules". +// https://html.spec.whatwg.org/multipage/syntax.html#the-stack-of-open-elements +var isSpecialElementMap = map[string]bool{ + "address": true, + "applet": true, + "area": true, + "article": true, + "aside": true, + "base": true, + "basefont": true, + "bgsound": true, + "blockquote": true, + "body": true, + "br": true, + "button": true, + "caption": true, + "center": true, + "col": true, + "colgroup": true, + "dd": true, + "details": true, + "dir": true, + "div": true, + "dl": true, + "dt": true, + "embed": true, + "fieldset": true, + "figcaption": true, + "figure": true, + "footer": true, + "form": true, + "frame": true, + "frameset": true, + "h1": true, + "h2": true, + "h3": true, + "h4": true, + "h5": true, + "h6": true, + "head": true, + "header": true, + "hgroup": true, + "hr": true, + "html": true, + "iframe": true, + "img": true, + "input": true, + "keygen": true, // "keygen" has been removed from the spec, but are kept here for backwards compatibility. + "li": true, + "link": true, + "listing": true, + "main": true, + "marquee": true, + "menu": true, + "meta": true, + "nav": true, + "noembed": true, + "noframes": true, + "noscript": true, + "object": true, + "ol": true, + "p": true, + "param": true, + "plaintext": true, + "pre": true, + "script": true, + "section": true, + "select": true, + "source": true, + "style": true, + "summary": true, + "table": true, + "tbody": true, + "td": true, + "template": true, + "textarea": true, + "tfoot": true, + "th": true, + "thead": true, + "title": true, + "tr": true, + "track": true, + "ul": true, + "wbr": true, + "xmp": true, +} + +func isSpecialElement(element *Node) bool { + switch element.Namespace { + case "", "html": + return isSpecialElementMap[element.Data] + case "math": + switch element.Data { + case "mi", "mo", "mn", "ms", "mtext", "annotation-xml": + return true + } + case "svg": + switch element.Data { + case "foreignObject", "desc", "title": + return true + } + } + return false +} diff --git a/vendor/golang.org/x/net/html/doc.go b/vendor/golang.org/x/net/html/doc.go new file mode 100644 index 0000000..822ed42 --- /dev/null +++ b/vendor/golang.org/x/net/html/doc.go @@ -0,0 +1,106 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +Package html implements an HTML5-compliant tokenizer and parser. + +Tokenization is done by creating a Tokenizer for an io.Reader r. It is the +caller's responsibility to ensure that r provides UTF-8 encoded HTML. + + z := html.NewTokenizer(r) + +Given a Tokenizer z, the HTML is tokenized by repeatedly calling z.Next(), +which parses the next token and returns its type, or an error: + + for { + tt := z.Next() + if tt == html.ErrorToken { + // ... + return ... + } + // Process the current token. + } + +There are two APIs for retrieving the current token. The high-level API is to +call Token; the low-level API is to call Text or TagName / TagAttr. Both APIs +allow optionally calling Raw after Next but before Token, Text, TagName, or +TagAttr. In EBNF notation, the valid call sequence per token is: + + Next {Raw} [ Token | Text | TagName {TagAttr} ] + +Token returns an independent data structure that completely describes a token. +Entities (such as "<") are unescaped, tag names and attribute keys are +lower-cased, and attributes are collected into a []Attribute. For example: + + for { + if z.Next() == html.ErrorToken { + // Returning io.EOF indicates success. + return z.Err() + } + emitToken(z.Token()) + } + +The low-level API performs fewer allocations and copies, but the contents of +the []byte values returned by Text, TagName and TagAttr may change on the next +call to Next. For example, to extract an HTML page's anchor text: + + depth := 0 + for { + tt := z.Next() + switch tt { + case html.ErrorToken: + return z.Err() + case html.TextToken: + if depth > 0 { + // emitBytes should copy the []byte it receives, + // if it doesn't process it immediately. + emitBytes(z.Text()) + } + case html.StartTagToken, html.EndTagToken: + tn, _ := z.TagName() + if len(tn) == 1 && tn[0] == 'a' { + if tt == html.StartTagToken { + depth++ + } else { + depth-- + } + } + } + } + +Parsing is done by calling Parse with an io.Reader, which returns the root of +the parse tree (the document element) as a *Node. It is the caller's +responsibility to ensure that the Reader provides UTF-8 encoded HTML. For +example, to process each anchor node in depth-first order: + + doc, err := html.Parse(r) + if err != nil { + // ... + } + var f func(*html.Node) + f = func(n *html.Node) { + if n.Type == html.ElementNode && n.Data == "a" { + // Do something with n... + } + for c := n.FirstChild; c != nil; c = c.NextSibling { + f(c) + } + } + f(doc) + +The relevant specifications include: +https://html.spec.whatwg.org/multipage/syntax.html and +https://html.spec.whatwg.org/multipage/syntax.html#tokenization +*/ +package html // import "golang.org/x/net/html" + +// The tokenization algorithm implemented by this package is not a line-by-line +// transliteration of the relatively verbose state-machine in the WHATWG +// specification. A more direct approach is used instead, where the program +// counter implies the state, such as whether it is tokenizing a tag or a text +// node. Specification compliance is verified by checking expected and actual +// outputs over a test suite rather than aiming for algorithmic fidelity. + +// TODO(nigeltao): Does a DOM API belong in this package or a separate one? +// TODO(nigeltao): How does parsing interact with a JavaScript engine? diff --git a/vendor/golang.org/x/net/html/doctype.go b/vendor/golang.org/x/net/html/doctype.go new file mode 100644 index 0000000..c484e5a --- /dev/null +++ b/vendor/golang.org/x/net/html/doctype.go @@ -0,0 +1,156 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package html + +import ( + "strings" +) + +// parseDoctype parses the data from a DoctypeToken into a name, +// public identifier, and system identifier. It returns a Node whose Type +// is DoctypeNode, whose Data is the name, and which has attributes +// named "system" and "public" for the two identifiers if they were present. +// quirks is whether the document should be parsed in "quirks mode". +func parseDoctype(s string) (n *Node, quirks bool) { + n = &Node{Type: DoctypeNode} + + // Find the name. + space := strings.IndexAny(s, whitespace) + if space == -1 { + space = len(s) + } + n.Data = s[:space] + // The comparison to "html" is case-sensitive. + if n.Data != "html" { + quirks = true + } + n.Data = strings.ToLower(n.Data) + s = strings.TrimLeft(s[space:], whitespace) + + if len(s) < 6 { + // It can't start with "PUBLIC" or "SYSTEM". + // Ignore the rest of the string. + return n, quirks || s != "" + } + + key := strings.ToLower(s[:6]) + s = s[6:] + for key == "public" || key == "system" { + s = strings.TrimLeft(s, whitespace) + if s == "" { + break + } + quote := s[0] + if quote != '"' && quote != '\'' { + break + } + s = s[1:] + q := strings.IndexRune(s, rune(quote)) + var id string + if q == -1 { + id = s + s = "" + } else { + id = s[:q] + s = s[q+1:] + } + n.Attr = append(n.Attr, Attribute{Key: key, Val: id}) + if key == "public" { + key = "system" + } else { + key = "" + } + } + + if key != "" || s != "" { + quirks = true + } else if len(n.Attr) > 0 { + if n.Attr[0].Key == "public" { + public := strings.ToLower(n.Attr[0].Val) + switch public { + case "-//w3o//dtd w3 html strict 3.0//en//", "-/w3d/dtd html 4.0 transitional/en", "html": + quirks = true + default: + for _, q := range quirkyIDs { + if strings.HasPrefix(public, q) { + quirks = true + break + } + } + } + // The following two public IDs only cause quirks mode if there is no system ID. + if len(n.Attr) == 1 && (strings.HasPrefix(public, "-//w3c//dtd html 4.01 frameset//") || + strings.HasPrefix(public, "-//w3c//dtd html 4.01 transitional//")) { + quirks = true + } + } + if lastAttr := n.Attr[len(n.Attr)-1]; lastAttr.Key == "system" && + strings.ToLower(lastAttr.Val) == "http://www.ibm.com/data/dtd/v11/ibmxhtml1-transitional.dtd" { + quirks = true + } + } + + return n, quirks +} + +// quirkyIDs is a list of public doctype identifiers that cause a document +// to be interpreted in quirks mode. The identifiers should be in lower case. +var quirkyIDs = []string{ + "+//silmaril//dtd html pro v0r11 19970101//", + "-//advasoft ltd//dtd html 3.0 aswedit + extensions//", + "-//as//dtd html 3.0 aswedit + extensions//", + "-//ietf//dtd html 2.0 level 1//", + "-//ietf//dtd html 2.0 level 2//", + "-//ietf//dtd html 2.0 strict level 1//", + "-//ietf//dtd html 2.0 strict level 2//", + "-//ietf//dtd html 2.0 strict//", + "-//ietf//dtd html 2.0//", + "-//ietf//dtd html 2.1e//", + "-//ietf//dtd html 3.0//", + "-//ietf//dtd html 3.2 final//", + "-//ietf//dtd html 3.2//", + "-//ietf//dtd html 3//", + "-//ietf//dtd html level 0//", + "-//ietf//dtd html level 1//", + "-//ietf//dtd html level 2//", + "-//ietf//dtd html level 3//", + "-//ietf//dtd html strict level 0//", + "-//ietf//dtd html strict level 1//", + "-//ietf//dtd html strict level 2//", + "-//ietf//dtd html strict level 3//", + "-//ietf//dtd html strict//", + "-//ietf//dtd html//", + "-//metrius//dtd metrius presentational//", + "-//microsoft//dtd internet explorer 2.0 html strict//", + "-//microsoft//dtd internet explorer 2.0 html//", + "-//microsoft//dtd internet explorer 2.0 tables//", + "-//microsoft//dtd internet explorer 3.0 html strict//", + "-//microsoft//dtd internet explorer 3.0 html//", + "-//microsoft//dtd internet explorer 3.0 tables//", + "-//netscape comm. corp.//dtd html//", + "-//netscape comm. corp.//dtd strict html//", + "-//o'reilly and associates//dtd html 2.0//", + "-//o'reilly and associates//dtd html extended 1.0//", + "-//o'reilly and associates//dtd html extended relaxed 1.0//", + "-//softquad software//dtd hotmetal pro 6.0::19990601::extensions to html 4.0//", + "-//softquad//dtd hotmetal pro 4.0::19971010::extensions to html 4.0//", + "-//spyglass//dtd html 2.0 extended//", + "-//sq//dtd html 2.0 hotmetal + extensions//", + "-//sun microsystems corp.//dtd hotjava html//", + "-//sun microsystems corp.//dtd hotjava strict html//", + "-//w3c//dtd html 3 1995-03-24//", + "-//w3c//dtd html 3.2 draft//", + "-//w3c//dtd html 3.2 final//", + "-//w3c//dtd html 3.2//", + "-//w3c//dtd html 3.2s draft//", + "-//w3c//dtd html 4.0 frameset//", + "-//w3c//dtd html 4.0 transitional//", + "-//w3c//dtd html experimental 19960712//", + "-//w3c//dtd html experimental 970421//", + "-//w3c//dtd w3 html//", + "-//w3o//dtd w3 html 3.0//", + "-//webtechs//dtd mozilla html 2.0//", + "-//webtechs//dtd mozilla html//", +} diff --git a/vendor/golang.org/x/net/html/entity.go b/vendor/golang.org/x/net/html/entity.go new file mode 100644 index 0000000..b628880 --- /dev/null +++ b/vendor/golang.org/x/net/html/entity.go @@ -0,0 +1,2253 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package html + +// All entities that do not end with ';' are 6 or fewer bytes long. +const longestEntityWithoutSemicolon = 6 + +// entity is a map from HTML entity names to their values. The semicolon matters: +// https://html.spec.whatwg.org/multipage/syntax.html#named-character-references +// lists both "amp" and "amp;" as two separate entries. +// +// Note that the HTML5 list is larger than the HTML4 list at +// http://www.w3.org/TR/html4/sgml/entities.html +var entity = map[string]rune{ + "AElig;": '\U000000C6', + "AMP;": '\U00000026', + "Aacute;": '\U000000C1', + "Abreve;": '\U00000102', + "Acirc;": '\U000000C2', + "Acy;": '\U00000410', + "Afr;": '\U0001D504', + "Agrave;": '\U000000C0', + "Alpha;": '\U00000391', + "Amacr;": '\U00000100', + "And;": '\U00002A53', + "Aogon;": '\U00000104', + "Aopf;": '\U0001D538', + "ApplyFunction;": '\U00002061', + "Aring;": '\U000000C5', + "Ascr;": '\U0001D49C', + "Assign;": '\U00002254', + "Atilde;": '\U000000C3', + "Auml;": '\U000000C4', + "Backslash;": '\U00002216', + "Barv;": '\U00002AE7', + "Barwed;": '\U00002306', + "Bcy;": '\U00000411', + "Because;": '\U00002235', + "Bernoullis;": '\U0000212C', + "Beta;": '\U00000392', + "Bfr;": '\U0001D505', + "Bopf;": '\U0001D539', + "Breve;": '\U000002D8', + "Bscr;": '\U0000212C', + "Bumpeq;": '\U0000224E', + "CHcy;": '\U00000427', + "COPY;": '\U000000A9', + "Cacute;": '\U00000106', + "Cap;": '\U000022D2', + "CapitalDifferentialD;": '\U00002145', + "Cayleys;": '\U0000212D', + "Ccaron;": '\U0000010C', + "Ccedil;": '\U000000C7', + "Ccirc;": '\U00000108', + "Cconint;": '\U00002230', + "Cdot;": '\U0000010A', + "Cedilla;": '\U000000B8', + "CenterDot;": '\U000000B7', + "Cfr;": '\U0000212D', + "Chi;": '\U000003A7', + "CircleDot;": '\U00002299', + "CircleMinus;": '\U00002296', + "CirclePlus;": '\U00002295', + "CircleTimes;": '\U00002297', + "ClockwiseContourIntegral;": '\U00002232', + "CloseCurlyDoubleQuote;": '\U0000201D', + "CloseCurlyQuote;": '\U00002019', + "Colon;": '\U00002237', + "Colone;": '\U00002A74', + "Congruent;": '\U00002261', + "Conint;": '\U0000222F', + "ContourIntegral;": '\U0000222E', + "Copf;": '\U00002102', + "Coproduct;": '\U00002210', + "CounterClockwiseContourIntegral;": '\U00002233', + "Cross;": '\U00002A2F', + "Cscr;": '\U0001D49E', + "Cup;": '\U000022D3', + "CupCap;": '\U0000224D', + "DD;": '\U00002145', + "DDotrahd;": '\U00002911', + "DJcy;": '\U00000402', + "DScy;": '\U00000405', + "DZcy;": '\U0000040F', + "Dagger;": '\U00002021', + "Darr;": '\U000021A1', + "Dashv;": '\U00002AE4', + "Dcaron;": '\U0000010E', + "Dcy;": '\U00000414', + "Del;": '\U00002207', + "Delta;": '\U00000394', + "Dfr;": '\U0001D507', + "DiacriticalAcute;": '\U000000B4', + "DiacriticalDot;": '\U000002D9', + "DiacriticalDoubleAcute;": '\U000002DD', + "DiacriticalGrave;": '\U00000060', + "DiacriticalTilde;": '\U000002DC', + "Diamond;": '\U000022C4', + "DifferentialD;": '\U00002146', + "Dopf;": '\U0001D53B', + "Dot;": '\U000000A8', + "DotDot;": '\U000020DC', + "DotEqual;": '\U00002250', + "DoubleContourIntegral;": '\U0000222F', + "DoubleDot;": '\U000000A8', + "DoubleDownArrow;": '\U000021D3', + "DoubleLeftArrow;": '\U000021D0', + "DoubleLeftRightArrow;": '\U000021D4', + "DoubleLeftTee;": '\U00002AE4', + "DoubleLongLeftArrow;": '\U000027F8', + "DoubleLongLeftRightArrow;": '\U000027FA', + "DoubleLongRightArrow;": '\U000027F9', + "DoubleRightArrow;": '\U000021D2', + "DoubleRightTee;": '\U000022A8', + "DoubleUpArrow;": '\U000021D1', + "DoubleUpDownArrow;": '\U000021D5', + "DoubleVerticalBar;": '\U00002225', + "DownArrow;": '\U00002193', + "DownArrowBar;": '\U00002913', + "DownArrowUpArrow;": '\U000021F5', + "DownBreve;": '\U00000311', + "DownLeftRightVector;": '\U00002950', + "DownLeftTeeVector;": '\U0000295E', + "DownLeftVector;": '\U000021BD', + "DownLeftVectorBar;": '\U00002956', + "DownRightTeeVector;": '\U0000295F', + "DownRightVector;": '\U000021C1', + "DownRightVectorBar;": '\U00002957', + "DownTee;": '\U000022A4', + "DownTeeArrow;": '\U000021A7', + "Downarrow;": '\U000021D3', + "Dscr;": '\U0001D49F', + "Dstrok;": '\U00000110', + "ENG;": '\U0000014A', + "ETH;": '\U000000D0', + "Eacute;": '\U000000C9', + "Ecaron;": '\U0000011A', + "Ecirc;": '\U000000CA', + "Ecy;": '\U0000042D', + "Edot;": '\U00000116', + "Efr;": '\U0001D508', + "Egrave;": '\U000000C8', + "Element;": '\U00002208', + "Emacr;": '\U00000112', + "EmptySmallSquare;": '\U000025FB', + "EmptyVerySmallSquare;": '\U000025AB', + "Eogon;": '\U00000118', + "Eopf;": '\U0001D53C', + "Epsilon;": '\U00000395', + "Equal;": '\U00002A75', + "EqualTilde;": '\U00002242', + "Equilibrium;": '\U000021CC', + "Escr;": '\U00002130', + "Esim;": '\U00002A73', + "Eta;": '\U00000397', + "Euml;": '\U000000CB', + "Exists;": '\U00002203', + "ExponentialE;": '\U00002147', + "Fcy;": '\U00000424', + "Ffr;": '\U0001D509', + "FilledSmallSquare;": '\U000025FC', + "FilledVerySmallSquare;": '\U000025AA', + "Fopf;": '\U0001D53D', + "ForAll;": '\U00002200', + "Fouriertrf;": '\U00002131', + "Fscr;": '\U00002131', + "GJcy;": '\U00000403', + "GT;": '\U0000003E', + "Gamma;": '\U00000393', + "Gammad;": '\U000003DC', + "Gbreve;": '\U0000011E', + "Gcedil;": '\U00000122', + "Gcirc;": '\U0000011C', + "Gcy;": '\U00000413', + "Gdot;": '\U00000120', + "Gfr;": '\U0001D50A', + "Gg;": '\U000022D9', + "Gopf;": '\U0001D53E', + "GreaterEqual;": '\U00002265', + "GreaterEqualLess;": '\U000022DB', + "GreaterFullEqual;": '\U00002267', + "GreaterGreater;": '\U00002AA2', + "GreaterLess;": '\U00002277', + "GreaterSlantEqual;": '\U00002A7E', + "GreaterTilde;": '\U00002273', + "Gscr;": '\U0001D4A2', + "Gt;": '\U0000226B', + "HARDcy;": '\U0000042A', + "Hacek;": '\U000002C7', + "Hat;": '\U0000005E', + "Hcirc;": '\U00000124', + "Hfr;": '\U0000210C', + "HilbertSpace;": '\U0000210B', + "Hopf;": '\U0000210D', + "HorizontalLine;": '\U00002500', + "Hscr;": '\U0000210B', + "Hstrok;": '\U00000126', + "HumpDownHump;": '\U0000224E', + "HumpEqual;": '\U0000224F', + "IEcy;": '\U00000415', + "IJlig;": '\U00000132', + "IOcy;": '\U00000401', + "Iacute;": '\U000000CD', + "Icirc;": '\U000000CE', + "Icy;": '\U00000418', + "Idot;": '\U00000130', + "Ifr;": '\U00002111', + "Igrave;": '\U000000CC', + "Im;": '\U00002111', + "Imacr;": '\U0000012A', + "ImaginaryI;": '\U00002148', + "Implies;": '\U000021D2', + "Int;": '\U0000222C', + "Integral;": '\U0000222B', + "Intersection;": '\U000022C2', + "InvisibleComma;": '\U00002063', + "InvisibleTimes;": '\U00002062', + "Iogon;": '\U0000012E', + "Iopf;": '\U0001D540', + "Iota;": '\U00000399', + "Iscr;": '\U00002110', + "Itilde;": '\U00000128', + "Iukcy;": '\U00000406', + "Iuml;": '\U000000CF', + "Jcirc;": '\U00000134', + "Jcy;": '\U00000419', + "Jfr;": '\U0001D50D', + "Jopf;": '\U0001D541', + "Jscr;": '\U0001D4A5', + "Jsercy;": '\U00000408', + "Jukcy;": '\U00000404', + "KHcy;": '\U00000425', + "KJcy;": '\U0000040C', + "Kappa;": '\U0000039A', + "Kcedil;": '\U00000136', + "Kcy;": '\U0000041A', + "Kfr;": '\U0001D50E', + "Kopf;": '\U0001D542', + "Kscr;": '\U0001D4A6', + "LJcy;": '\U00000409', + "LT;": '\U0000003C', + "Lacute;": '\U00000139', + "Lambda;": '\U0000039B', + "Lang;": '\U000027EA', + "Laplacetrf;": '\U00002112', + "Larr;": '\U0000219E', + "Lcaron;": '\U0000013D', + "Lcedil;": '\U0000013B', + "Lcy;": '\U0000041B', + "LeftAngleBracket;": '\U000027E8', + "LeftArrow;": '\U00002190', + "LeftArrowBar;": '\U000021E4', + "LeftArrowRightArrow;": '\U000021C6', + "LeftCeiling;": '\U00002308', + "LeftDoubleBracket;": '\U000027E6', + "LeftDownTeeVector;": '\U00002961', + "LeftDownVector;": '\U000021C3', + "LeftDownVectorBar;": '\U00002959', + "LeftFloor;": '\U0000230A', + "LeftRightArrow;": '\U00002194', + "LeftRightVector;": '\U0000294E', + "LeftTee;": '\U000022A3', + "LeftTeeArrow;": '\U000021A4', + "LeftTeeVector;": '\U0000295A', + "LeftTriangle;": '\U000022B2', + "LeftTriangleBar;": '\U000029CF', + "LeftTriangleEqual;": '\U000022B4', + "LeftUpDownVector;": '\U00002951', + "LeftUpTeeVector;": '\U00002960', + "LeftUpVector;": '\U000021BF', + "LeftUpVectorBar;": '\U00002958', + "LeftVector;": '\U000021BC', + "LeftVectorBar;": '\U00002952', + "Leftarrow;": '\U000021D0', + "Leftrightarrow;": '\U000021D4', + "LessEqualGreater;": '\U000022DA', + "LessFullEqual;": '\U00002266', + "LessGreater;": '\U00002276', + "LessLess;": '\U00002AA1', + "LessSlantEqual;": '\U00002A7D', + "LessTilde;": '\U00002272', + "Lfr;": '\U0001D50F', + "Ll;": '\U000022D8', + "Lleftarrow;": '\U000021DA', + "Lmidot;": '\U0000013F', + "LongLeftArrow;": '\U000027F5', + "LongLeftRightArrow;": '\U000027F7', + "LongRightArrow;": '\U000027F6', + "Longleftarrow;": '\U000027F8', + "Longleftrightarrow;": '\U000027FA', + "Longrightarrow;": '\U000027F9', + "Lopf;": '\U0001D543', + "LowerLeftArrow;": '\U00002199', + "LowerRightArrow;": '\U00002198', + "Lscr;": '\U00002112', + "Lsh;": '\U000021B0', + "Lstrok;": '\U00000141', + "Lt;": '\U0000226A', + "Map;": '\U00002905', + "Mcy;": '\U0000041C', + "MediumSpace;": '\U0000205F', + "Mellintrf;": '\U00002133', + "Mfr;": '\U0001D510', + "MinusPlus;": '\U00002213', + "Mopf;": '\U0001D544', + "Mscr;": '\U00002133', + "Mu;": '\U0000039C', + "NJcy;": '\U0000040A', + "Nacute;": '\U00000143', + "Ncaron;": '\U00000147', + "Ncedil;": '\U00000145', + "Ncy;": '\U0000041D', + "NegativeMediumSpace;": '\U0000200B', + "NegativeThickSpace;": '\U0000200B', + "NegativeThinSpace;": '\U0000200B', + "NegativeVeryThinSpace;": '\U0000200B', + "NestedGreaterGreater;": '\U0000226B', + "NestedLessLess;": '\U0000226A', + "NewLine;": '\U0000000A', + "Nfr;": '\U0001D511', + "NoBreak;": '\U00002060', + "NonBreakingSpace;": '\U000000A0', + "Nopf;": '\U00002115', + "Not;": '\U00002AEC', + "NotCongruent;": '\U00002262', + "NotCupCap;": '\U0000226D', + "NotDoubleVerticalBar;": '\U00002226', + "NotElement;": '\U00002209', + "NotEqual;": '\U00002260', + "NotExists;": '\U00002204', + "NotGreater;": '\U0000226F', + "NotGreaterEqual;": '\U00002271', + "NotGreaterLess;": '\U00002279', + "NotGreaterTilde;": '\U00002275', + "NotLeftTriangle;": '\U000022EA', + "NotLeftTriangleEqual;": '\U000022EC', + "NotLess;": '\U0000226E', + "NotLessEqual;": '\U00002270', + "NotLessGreater;": '\U00002278', + "NotLessTilde;": '\U00002274', + "NotPrecedes;": '\U00002280', + "NotPrecedesSlantEqual;": '\U000022E0', + "NotReverseElement;": '\U0000220C', + "NotRightTriangle;": '\U000022EB', + "NotRightTriangleEqual;": '\U000022ED', + "NotSquareSubsetEqual;": '\U000022E2', + "NotSquareSupersetEqual;": '\U000022E3', + "NotSubsetEqual;": '\U00002288', + "NotSucceeds;": '\U00002281', + "NotSucceedsSlantEqual;": '\U000022E1', + "NotSupersetEqual;": '\U00002289', + "NotTilde;": '\U00002241', + "NotTildeEqual;": '\U00002244', + "NotTildeFullEqual;": '\U00002247', + "NotTildeTilde;": '\U00002249', + "NotVerticalBar;": '\U00002224', + "Nscr;": '\U0001D4A9', + "Ntilde;": '\U000000D1', + "Nu;": '\U0000039D', + "OElig;": '\U00000152', + "Oacute;": '\U000000D3', + "Ocirc;": '\U000000D4', + "Ocy;": '\U0000041E', + "Odblac;": '\U00000150', + "Ofr;": '\U0001D512', + "Ograve;": '\U000000D2', + "Omacr;": '\U0000014C', + "Omega;": '\U000003A9', + "Omicron;": '\U0000039F', + "Oopf;": '\U0001D546', + "OpenCurlyDoubleQuote;": '\U0000201C', + "OpenCurlyQuote;": '\U00002018', + "Or;": '\U00002A54', + "Oscr;": '\U0001D4AA', + "Oslash;": '\U000000D8', + "Otilde;": '\U000000D5', + "Otimes;": '\U00002A37', + "Ouml;": '\U000000D6', + "OverBar;": '\U0000203E', + "OverBrace;": '\U000023DE', + "OverBracket;": '\U000023B4', + "OverParenthesis;": '\U000023DC', + "PartialD;": '\U00002202', + "Pcy;": '\U0000041F', + "Pfr;": '\U0001D513', + "Phi;": '\U000003A6', + "Pi;": '\U000003A0', + "PlusMinus;": '\U000000B1', + "Poincareplane;": '\U0000210C', + "Popf;": '\U00002119', + "Pr;": '\U00002ABB', + "Precedes;": '\U0000227A', + "PrecedesEqual;": '\U00002AAF', + "PrecedesSlantEqual;": '\U0000227C', + "PrecedesTilde;": '\U0000227E', + "Prime;": '\U00002033', + "Product;": '\U0000220F', + "Proportion;": '\U00002237', + "Proportional;": '\U0000221D', + "Pscr;": '\U0001D4AB', + "Psi;": '\U000003A8', + "QUOT;": '\U00000022', + "Qfr;": '\U0001D514', + "Qopf;": '\U0000211A', + "Qscr;": '\U0001D4AC', + "RBarr;": '\U00002910', + "REG;": '\U000000AE', + "Racute;": '\U00000154', + "Rang;": '\U000027EB', + "Rarr;": '\U000021A0', + "Rarrtl;": '\U00002916', + "Rcaron;": '\U00000158', + "Rcedil;": '\U00000156', + "Rcy;": '\U00000420', + "Re;": '\U0000211C', + "ReverseElement;": '\U0000220B', + "ReverseEquilibrium;": '\U000021CB', + "ReverseUpEquilibrium;": '\U0000296F', + "Rfr;": '\U0000211C', + "Rho;": '\U000003A1', + "RightAngleBracket;": '\U000027E9', + "RightArrow;": '\U00002192', + "RightArrowBar;": '\U000021E5', + "RightArrowLeftArrow;": '\U000021C4', + "RightCeiling;": '\U00002309', + "RightDoubleBracket;": '\U000027E7', + "RightDownTeeVector;": '\U0000295D', + "RightDownVector;": '\U000021C2', + "RightDownVectorBar;": '\U00002955', + "RightFloor;": '\U0000230B', + "RightTee;": '\U000022A2', + "RightTeeArrow;": '\U000021A6', + "RightTeeVector;": '\U0000295B', + "RightTriangle;": '\U000022B3', + "RightTriangleBar;": '\U000029D0', + "RightTriangleEqual;": '\U000022B5', + "RightUpDownVector;": '\U0000294F', + "RightUpTeeVector;": '\U0000295C', + "RightUpVector;": '\U000021BE', + "RightUpVectorBar;": '\U00002954', + "RightVector;": '\U000021C0', + "RightVectorBar;": '\U00002953', + "Rightarrow;": '\U000021D2', + "Ropf;": '\U0000211D', + "RoundImplies;": '\U00002970', + "Rrightarrow;": '\U000021DB', + "Rscr;": '\U0000211B', + "Rsh;": '\U000021B1', + "RuleDelayed;": '\U000029F4', + "SHCHcy;": '\U00000429', + "SHcy;": '\U00000428', + "SOFTcy;": '\U0000042C', + "Sacute;": '\U0000015A', + "Sc;": '\U00002ABC', + "Scaron;": '\U00000160', + "Scedil;": '\U0000015E', + "Scirc;": '\U0000015C', + "Scy;": '\U00000421', + "Sfr;": '\U0001D516', + "ShortDownArrow;": '\U00002193', + "ShortLeftArrow;": '\U00002190', + "ShortRightArrow;": '\U00002192', + "ShortUpArrow;": '\U00002191', + "Sigma;": '\U000003A3', + "SmallCircle;": '\U00002218', + "Sopf;": '\U0001D54A', + "Sqrt;": '\U0000221A', + "Square;": '\U000025A1', + "SquareIntersection;": '\U00002293', + "SquareSubset;": '\U0000228F', + "SquareSubsetEqual;": '\U00002291', + "SquareSuperset;": '\U00002290', + "SquareSupersetEqual;": '\U00002292', + "SquareUnion;": '\U00002294', + "Sscr;": '\U0001D4AE', + "Star;": '\U000022C6', + "Sub;": '\U000022D0', + "Subset;": '\U000022D0', + "SubsetEqual;": '\U00002286', + "Succeeds;": '\U0000227B', + "SucceedsEqual;": '\U00002AB0', + "SucceedsSlantEqual;": '\U0000227D', + "SucceedsTilde;": '\U0000227F', + "SuchThat;": '\U0000220B', + "Sum;": '\U00002211', + "Sup;": '\U000022D1', + "Superset;": '\U00002283', + "SupersetEqual;": '\U00002287', + "Supset;": '\U000022D1', + "THORN;": '\U000000DE', + "TRADE;": '\U00002122', + "TSHcy;": '\U0000040B', + "TScy;": '\U00000426', + "Tab;": '\U00000009', + "Tau;": '\U000003A4', + "Tcaron;": '\U00000164', + "Tcedil;": '\U00000162', + "Tcy;": '\U00000422', + "Tfr;": '\U0001D517', + "Therefore;": '\U00002234', + "Theta;": '\U00000398', + "ThinSpace;": '\U00002009', + "Tilde;": '\U0000223C', + "TildeEqual;": '\U00002243', + "TildeFullEqual;": '\U00002245', + "TildeTilde;": '\U00002248', + "Topf;": '\U0001D54B', + "TripleDot;": '\U000020DB', + "Tscr;": '\U0001D4AF', + "Tstrok;": '\U00000166', + "Uacute;": '\U000000DA', + "Uarr;": '\U0000219F', + "Uarrocir;": '\U00002949', + "Ubrcy;": '\U0000040E', + "Ubreve;": '\U0000016C', + "Ucirc;": '\U000000DB', + "Ucy;": '\U00000423', + "Udblac;": '\U00000170', + "Ufr;": '\U0001D518', + "Ugrave;": '\U000000D9', + "Umacr;": '\U0000016A', + "UnderBar;": '\U0000005F', + "UnderBrace;": '\U000023DF', + "UnderBracket;": '\U000023B5', + "UnderParenthesis;": '\U000023DD', + "Union;": '\U000022C3', + "UnionPlus;": '\U0000228E', + "Uogon;": '\U00000172', + "Uopf;": '\U0001D54C', + "UpArrow;": '\U00002191', + "UpArrowBar;": '\U00002912', + "UpArrowDownArrow;": '\U000021C5', + "UpDownArrow;": '\U00002195', + "UpEquilibrium;": '\U0000296E', + "UpTee;": '\U000022A5', + "UpTeeArrow;": '\U000021A5', + "Uparrow;": '\U000021D1', + "Updownarrow;": '\U000021D5', + "UpperLeftArrow;": '\U00002196', + "UpperRightArrow;": '\U00002197', + "Upsi;": '\U000003D2', + "Upsilon;": '\U000003A5', + "Uring;": '\U0000016E', + "Uscr;": '\U0001D4B0', + "Utilde;": '\U00000168', + "Uuml;": '\U000000DC', + "VDash;": '\U000022AB', + "Vbar;": '\U00002AEB', + "Vcy;": '\U00000412', + "Vdash;": '\U000022A9', + "Vdashl;": '\U00002AE6', + "Vee;": '\U000022C1', + "Verbar;": '\U00002016', + "Vert;": '\U00002016', + "VerticalBar;": '\U00002223', + "VerticalLine;": '\U0000007C', + "VerticalSeparator;": '\U00002758', + "VerticalTilde;": '\U00002240', + "VeryThinSpace;": '\U0000200A', + "Vfr;": '\U0001D519', + "Vopf;": '\U0001D54D', + "Vscr;": '\U0001D4B1', + "Vvdash;": '\U000022AA', + "Wcirc;": '\U00000174', + "Wedge;": '\U000022C0', + "Wfr;": '\U0001D51A', + "Wopf;": '\U0001D54E', + "Wscr;": '\U0001D4B2', + "Xfr;": '\U0001D51B', + "Xi;": '\U0000039E', + "Xopf;": '\U0001D54F', + "Xscr;": '\U0001D4B3', + "YAcy;": '\U0000042F', + "YIcy;": '\U00000407', + "YUcy;": '\U0000042E', + "Yacute;": '\U000000DD', + "Ycirc;": '\U00000176', + "Ycy;": '\U0000042B', + "Yfr;": '\U0001D51C', + "Yopf;": '\U0001D550', + "Yscr;": '\U0001D4B4', + "Yuml;": '\U00000178', + "ZHcy;": '\U00000416', + "Zacute;": '\U00000179', + "Zcaron;": '\U0000017D', + "Zcy;": '\U00000417', + "Zdot;": '\U0000017B', + "ZeroWidthSpace;": '\U0000200B', + "Zeta;": '\U00000396', + "Zfr;": '\U00002128', + "Zopf;": '\U00002124', + "Zscr;": '\U0001D4B5', + "aacute;": '\U000000E1', + "abreve;": '\U00000103', + "ac;": '\U0000223E', + "acd;": '\U0000223F', + "acirc;": '\U000000E2', + "acute;": '\U000000B4', + "acy;": '\U00000430', + "aelig;": '\U000000E6', + "af;": '\U00002061', + "afr;": '\U0001D51E', + "agrave;": '\U000000E0', + "alefsym;": '\U00002135', + "aleph;": '\U00002135', + "alpha;": '\U000003B1', + "amacr;": '\U00000101', + "amalg;": '\U00002A3F', + "amp;": '\U00000026', + "and;": '\U00002227', + "andand;": '\U00002A55', + "andd;": '\U00002A5C', + "andslope;": '\U00002A58', + "andv;": '\U00002A5A', + "ang;": '\U00002220', + "ange;": '\U000029A4', + "angle;": '\U00002220', + "angmsd;": '\U00002221', + "angmsdaa;": '\U000029A8', + "angmsdab;": '\U000029A9', + "angmsdac;": '\U000029AA', + "angmsdad;": '\U000029AB', + "angmsdae;": '\U000029AC', + "angmsdaf;": '\U000029AD', + "angmsdag;": '\U000029AE', + "angmsdah;": '\U000029AF', + "angrt;": '\U0000221F', + "angrtvb;": '\U000022BE', + "angrtvbd;": '\U0000299D', + "angsph;": '\U00002222', + "angst;": '\U000000C5', + "angzarr;": '\U0000237C', + "aogon;": '\U00000105', + "aopf;": '\U0001D552', + "ap;": '\U00002248', + "apE;": '\U00002A70', + "apacir;": '\U00002A6F', + "ape;": '\U0000224A', + "apid;": '\U0000224B', + "apos;": '\U00000027', + "approx;": '\U00002248', + "approxeq;": '\U0000224A', + "aring;": '\U000000E5', + "ascr;": '\U0001D4B6', + "ast;": '\U0000002A', + "asymp;": '\U00002248', + "asympeq;": '\U0000224D', + "atilde;": '\U000000E3', + "auml;": '\U000000E4', + "awconint;": '\U00002233', + "awint;": '\U00002A11', + "bNot;": '\U00002AED', + "backcong;": '\U0000224C', + "backepsilon;": '\U000003F6', + "backprime;": '\U00002035', + "backsim;": '\U0000223D', + "backsimeq;": '\U000022CD', + "barvee;": '\U000022BD', + "barwed;": '\U00002305', + "barwedge;": '\U00002305', + "bbrk;": '\U000023B5', + "bbrktbrk;": '\U000023B6', + "bcong;": '\U0000224C', + "bcy;": '\U00000431', + "bdquo;": '\U0000201E', + "becaus;": '\U00002235', + "because;": '\U00002235', + "bemptyv;": '\U000029B0', + "bepsi;": '\U000003F6', + "bernou;": '\U0000212C', + "beta;": '\U000003B2', + "beth;": '\U00002136', + "between;": '\U0000226C', + "bfr;": '\U0001D51F', + "bigcap;": '\U000022C2', + "bigcirc;": '\U000025EF', + "bigcup;": '\U000022C3', + "bigodot;": '\U00002A00', + "bigoplus;": '\U00002A01', + "bigotimes;": '\U00002A02', + "bigsqcup;": '\U00002A06', + "bigstar;": '\U00002605', + "bigtriangledown;": '\U000025BD', + "bigtriangleup;": '\U000025B3', + "biguplus;": '\U00002A04', + "bigvee;": '\U000022C1', + "bigwedge;": '\U000022C0', + "bkarow;": '\U0000290D', + "blacklozenge;": '\U000029EB', + "blacksquare;": '\U000025AA', + "blacktriangle;": '\U000025B4', + "blacktriangledown;": '\U000025BE', + "blacktriangleleft;": '\U000025C2', + "blacktriangleright;": '\U000025B8', + "blank;": '\U00002423', + "blk12;": '\U00002592', + "blk14;": '\U00002591', + "blk34;": '\U00002593', + "block;": '\U00002588', + "bnot;": '\U00002310', + "bopf;": '\U0001D553', + "bot;": '\U000022A5', + "bottom;": '\U000022A5', + "bowtie;": '\U000022C8', + "boxDL;": '\U00002557', + "boxDR;": '\U00002554', + "boxDl;": '\U00002556', + "boxDr;": '\U00002553', + "boxH;": '\U00002550', + "boxHD;": '\U00002566', + "boxHU;": '\U00002569', + "boxHd;": '\U00002564', + "boxHu;": '\U00002567', + "boxUL;": '\U0000255D', + "boxUR;": '\U0000255A', + "boxUl;": '\U0000255C', + "boxUr;": '\U00002559', + "boxV;": '\U00002551', + "boxVH;": '\U0000256C', + "boxVL;": '\U00002563', + "boxVR;": '\U00002560', + "boxVh;": '\U0000256B', + "boxVl;": '\U00002562', + "boxVr;": '\U0000255F', + "boxbox;": '\U000029C9', + "boxdL;": '\U00002555', + "boxdR;": '\U00002552', + "boxdl;": '\U00002510', + "boxdr;": '\U0000250C', + "boxh;": '\U00002500', + "boxhD;": '\U00002565', + "boxhU;": '\U00002568', + "boxhd;": '\U0000252C', + "boxhu;": '\U00002534', + "boxminus;": '\U0000229F', + "boxplus;": '\U0000229E', + "boxtimes;": '\U000022A0', + "boxuL;": '\U0000255B', + "boxuR;": '\U00002558', + "boxul;": '\U00002518', + "boxur;": '\U00002514', + "boxv;": '\U00002502', + "boxvH;": '\U0000256A', + "boxvL;": '\U00002561', + "boxvR;": '\U0000255E', + "boxvh;": '\U0000253C', + "boxvl;": '\U00002524', + "boxvr;": '\U0000251C', + "bprime;": '\U00002035', + "breve;": '\U000002D8', + "brvbar;": '\U000000A6', + "bscr;": '\U0001D4B7', + "bsemi;": '\U0000204F', + "bsim;": '\U0000223D', + "bsime;": '\U000022CD', + "bsol;": '\U0000005C', + "bsolb;": '\U000029C5', + "bsolhsub;": '\U000027C8', + "bull;": '\U00002022', + "bullet;": '\U00002022', + "bump;": '\U0000224E', + "bumpE;": '\U00002AAE', + "bumpe;": '\U0000224F', + "bumpeq;": '\U0000224F', + "cacute;": '\U00000107', + "cap;": '\U00002229', + "capand;": '\U00002A44', + "capbrcup;": '\U00002A49', + "capcap;": '\U00002A4B', + "capcup;": '\U00002A47', + "capdot;": '\U00002A40', + "caret;": '\U00002041', + "caron;": '\U000002C7', + "ccaps;": '\U00002A4D', + "ccaron;": '\U0000010D', + "ccedil;": '\U000000E7', + "ccirc;": '\U00000109', + "ccups;": '\U00002A4C', + "ccupssm;": '\U00002A50', + "cdot;": '\U0000010B', + "cedil;": '\U000000B8', + "cemptyv;": '\U000029B2', + "cent;": '\U000000A2', + "centerdot;": '\U000000B7', + "cfr;": '\U0001D520', + "chcy;": '\U00000447', + "check;": '\U00002713', + "checkmark;": '\U00002713', + "chi;": '\U000003C7', + "cir;": '\U000025CB', + "cirE;": '\U000029C3', + "circ;": '\U000002C6', + "circeq;": '\U00002257', + "circlearrowleft;": '\U000021BA', + "circlearrowright;": '\U000021BB', + "circledR;": '\U000000AE', + "circledS;": '\U000024C8', + "circledast;": '\U0000229B', + "circledcirc;": '\U0000229A', + "circleddash;": '\U0000229D', + "cire;": '\U00002257', + "cirfnint;": '\U00002A10', + "cirmid;": '\U00002AEF', + "cirscir;": '\U000029C2', + "clubs;": '\U00002663', + "clubsuit;": '\U00002663', + "colon;": '\U0000003A', + "colone;": '\U00002254', + "coloneq;": '\U00002254', + "comma;": '\U0000002C', + "commat;": '\U00000040', + "comp;": '\U00002201', + "compfn;": '\U00002218', + "complement;": '\U00002201', + "complexes;": '\U00002102', + "cong;": '\U00002245', + "congdot;": '\U00002A6D', + "conint;": '\U0000222E', + "copf;": '\U0001D554', + "coprod;": '\U00002210', + "copy;": '\U000000A9', + "copysr;": '\U00002117', + "crarr;": '\U000021B5', + "cross;": '\U00002717', + "cscr;": '\U0001D4B8', + "csub;": '\U00002ACF', + "csube;": '\U00002AD1', + "csup;": '\U00002AD0', + "csupe;": '\U00002AD2', + "ctdot;": '\U000022EF', + "cudarrl;": '\U00002938', + "cudarrr;": '\U00002935', + "cuepr;": '\U000022DE', + "cuesc;": '\U000022DF', + "cularr;": '\U000021B6', + "cularrp;": '\U0000293D', + "cup;": '\U0000222A', + "cupbrcap;": '\U00002A48', + "cupcap;": '\U00002A46', + "cupcup;": '\U00002A4A', + "cupdot;": '\U0000228D', + "cupor;": '\U00002A45', + "curarr;": '\U000021B7', + "curarrm;": '\U0000293C', + "curlyeqprec;": '\U000022DE', + "curlyeqsucc;": '\U000022DF', + "curlyvee;": '\U000022CE', + "curlywedge;": '\U000022CF', + "curren;": '\U000000A4', + "curvearrowleft;": '\U000021B6', + "curvearrowright;": '\U000021B7', + "cuvee;": '\U000022CE', + "cuwed;": '\U000022CF', + "cwconint;": '\U00002232', + "cwint;": '\U00002231', + "cylcty;": '\U0000232D', + "dArr;": '\U000021D3', + "dHar;": '\U00002965', + "dagger;": '\U00002020', + "daleth;": '\U00002138', + "darr;": '\U00002193', + "dash;": '\U00002010', + "dashv;": '\U000022A3', + "dbkarow;": '\U0000290F', + "dblac;": '\U000002DD', + "dcaron;": '\U0000010F', + "dcy;": '\U00000434', + "dd;": '\U00002146', + "ddagger;": '\U00002021', + "ddarr;": '\U000021CA', + "ddotseq;": '\U00002A77', + "deg;": '\U000000B0', + "delta;": '\U000003B4', + "demptyv;": '\U000029B1', + "dfisht;": '\U0000297F', + "dfr;": '\U0001D521', + "dharl;": '\U000021C3', + "dharr;": '\U000021C2', + "diam;": '\U000022C4', + "diamond;": '\U000022C4', + "diamondsuit;": '\U00002666', + "diams;": '\U00002666', + "die;": '\U000000A8', + "digamma;": '\U000003DD', + "disin;": '\U000022F2', + "div;": '\U000000F7', + "divide;": '\U000000F7', + "divideontimes;": '\U000022C7', + "divonx;": '\U000022C7', + "djcy;": '\U00000452', + "dlcorn;": '\U0000231E', + "dlcrop;": '\U0000230D', + "dollar;": '\U00000024', + "dopf;": '\U0001D555', + "dot;": '\U000002D9', + "doteq;": '\U00002250', + "doteqdot;": '\U00002251', + "dotminus;": '\U00002238', + "dotplus;": '\U00002214', + "dotsquare;": '\U000022A1', + "doublebarwedge;": '\U00002306', + "downarrow;": '\U00002193', + "downdownarrows;": '\U000021CA', + "downharpoonleft;": '\U000021C3', + "downharpoonright;": '\U000021C2', + "drbkarow;": '\U00002910', + "drcorn;": '\U0000231F', + "drcrop;": '\U0000230C', + "dscr;": '\U0001D4B9', + "dscy;": '\U00000455', + "dsol;": '\U000029F6', + "dstrok;": '\U00000111', + "dtdot;": '\U000022F1', + "dtri;": '\U000025BF', + "dtrif;": '\U000025BE', + "duarr;": '\U000021F5', + "duhar;": '\U0000296F', + "dwangle;": '\U000029A6', + "dzcy;": '\U0000045F', + "dzigrarr;": '\U000027FF', + "eDDot;": '\U00002A77', + "eDot;": '\U00002251', + "eacute;": '\U000000E9', + "easter;": '\U00002A6E', + "ecaron;": '\U0000011B', + "ecir;": '\U00002256', + "ecirc;": '\U000000EA', + "ecolon;": '\U00002255', + "ecy;": '\U0000044D', + "edot;": '\U00000117', + "ee;": '\U00002147', + "efDot;": '\U00002252', + "efr;": '\U0001D522', + "eg;": '\U00002A9A', + "egrave;": '\U000000E8', + "egs;": '\U00002A96', + "egsdot;": '\U00002A98', + "el;": '\U00002A99', + "elinters;": '\U000023E7', + "ell;": '\U00002113', + "els;": '\U00002A95', + "elsdot;": '\U00002A97', + "emacr;": '\U00000113', + "empty;": '\U00002205', + "emptyset;": '\U00002205', + "emptyv;": '\U00002205', + "emsp;": '\U00002003', + "emsp13;": '\U00002004', + "emsp14;": '\U00002005', + "eng;": '\U0000014B', + "ensp;": '\U00002002', + "eogon;": '\U00000119', + "eopf;": '\U0001D556', + "epar;": '\U000022D5', + "eparsl;": '\U000029E3', + "eplus;": '\U00002A71', + "epsi;": '\U000003B5', + "epsilon;": '\U000003B5', + "epsiv;": '\U000003F5', + "eqcirc;": '\U00002256', + "eqcolon;": '\U00002255', + "eqsim;": '\U00002242', + "eqslantgtr;": '\U00002A96', + "eqslantless;": '\U00002A95', + "equals;": '\U0000003D', + "equest;": '\U0000225F', + "equiv;": '\U00002261', + "equivDD;": '\U00002A78', + "eqvparsl;": '\U000029E5', + "erDot;": '\U00002253', + "erarr;": '\U00002971', + "escr;": '\U0000212F', + "esdot;": '\U00002250', + "esim;": '\U00002242', + "eta;": '\U000003B7', + "eth;": '\U000000F0', + "euml;": '\U000000EB', + "euro;": '\U000020AC', + "excl;": '\U00000021', + "exist;": '\U00002203', + "expectation;": '\U00002130', + "exponentiale;": '\U00002147', + "fallingdotseq;": '\U00002252', + "fcy;": '\U00000444', + "female;": '\U00002640', + "ffilig;": '\U0000FB03', + "fflig;": '\U0000FB00', + "ffllig;": '\U0000FB04', + "ffr;": '\U0001D523', + "filig;": '\U0000FB01', + "flat;": '\U0000266D', + "fllig;": '\U0000FB02', + "fltns;": '\U000025B1', + "fnof;": '\U00000192', + "fopf;": '\U0001D557', + "forall;": '\U00002200', + "fork;": '\U000022D4', + "forkv;": '\U00002AD9', + "fpartint;": '\U00002A0D', + "frac12;": '\U000000BD', + "frac13;": '\U00002153', + "frac14;": '\U000000BC', + "frac15;": '\U00002155', + "frac16;": '\U00002159', + "frac18;": '\U0000215B', + "frac23;": '\U00002154', + "frac25;": '\U00002156', + "frac34;": '\U000000BE', + "frac35;": '\U00002157', + "frac38;": '\U0000215C', + "frac45;": '\U00002158', + "frac56;": '\U0000215A', + "frac58;": '\U0000215D', + "frac78;": '\U0000215E', + "frasl;": '\U00002044', + "frown;": '\U00002322', + "fscr;": '\U0001D4BB', + "gE;": '\U00002267', + "gEl;": '\U00002A8C', + "gacute;": '\U000001F5', + "gamma;": '\U000003B3', + "gammad;": '\U000003DD', + "gap;": '\U00002A86', + "gbreve;": '\U0000011F', + "gcirc;": '\U0000011D', + "gcy;": '\U00000433', + "gdot;": '\U00000121', + "ge;": '\U00002265', + "gel;": '\U000022DB', + "geq;": '\U00002265', + "geqq;": '\U00002267', + "geqslant;": '\U00002A7E', + "ges;": '\U00002A7E', + "gescc;": '\U00002AA9', + "gesdot;": '\U00002A80', + "gesdoto;": '\U00002A82', + "gesdotol;": '\U00002A84', + "gesles;": '\U00002A94', + "gfr;": '\U0001D524', + "gg;": '\U0000226B', + "ggg;": '\U000022D9', + "gimel;": '\U00002137', + "gjcy;": '\U00000453', + "gl;": '\U00002277', + "glE;": '\U00002A92', + "gla;": '\U00002AA5', + "glj;": '\U00002AA4', + "gnE;": '\U00002269', + "gnap;": '\U00002A8A', + "gnapprox;": '\U00002A8A', + "gne;": '\U00002A88', + "gneq;": '\U00002A88', + "gneqq;": '\U00002269', + "gnsim;": '\U000022E7', + "gopf;": '\U0001D558', + "grave;": '\U00000060', + "gscr;": '\U0000210A', + "gsim;": '\U00002273', + "gsime;": '\U00002A8E', + "gsiml;": '\U00002A90', + "gt;": '\U0000003E', + "gtcc;": '\U00002AA7', + "gtcir;": '\U00002A7A', + "gtdot;": '\U000022D7', + "gtlPar;": '\U00002995', + "gtquest;": '\U00002A7C', + "gtrapprox;": '\U00002A86', + "gtrarr;": '\U00002978', + "gtrdot;": '\U000022D7', + "gtreqless;": '\U000022DB', + "gtreqqless;": '\U00002A8C', + "gtrless;": '\U00002277', + "gtrsim;": '\U00002273', + "hArr;": '\U000021D4', + "hairsp;": '\U0000200A', + "half;": '\U000000BD', + "hamilt;": '\U0000210B', + "hardcy;": '\U0000044A', + "harr;": '\U00002194', + "harrcir;": '\U00002948', + "harrw;": '\U000021AD', + "hbar;": '\U0000210F', + "hcirc;": '\U00000125', + "hearts;": '\U00002665', + "heartsuit;": '\U00002665', + "hellip;": '\U00002026', + "hercon;": '\U000022B9', + "hfr;": '\U0001D525', + "hksearow;": '\U00002925', + "hkswarow;": '\U00002926', + "hoarr;": '\U000021FF', + "homtht;": '\U0000223B', + "hookleftarrow;": '\U000021A9', + "hookrightarrow;": '\U000021AA', + "hopf;": '\U0001D559', + "horbar;": '\U00002015', + "hscr;": '\U0001D4BD', + "hslash;": '\U0000210F', + "hstrok;": '\U00000127', + "hybull;": '\U00002043', + "hyphen;": '\U00002010', + "iacute;": '\U000000ED', + "ic;": '\U00002063', + "icirc;": '\U000000EE', + "icy;": '\U00000438', + "iecy;": '\U00000435', + "iexcl;": '\U000000A1', + "iff;": '\U000021D4', + "ifr;": '\U0001D526', + "igrave;": '\U000000EC', + "ii;": '\U00002148', + "iiiint;": '\U00002A0C', + "iiint;": '\U0000222D', + "iinfin;": '\U000029DC', + "iiota;": '\U00002129', + "ijlig;": '\U00000133', + "imacr;": '\U0000012B', + "image;": '\U00002111', + "imagline;": '\U00002110', + "imagpart;": '\U00002111', + "imath;": '\U00000131', + "imof;": '\U000022B7', + "imped;": '\U000001B5', + "in;": '\U00002208', + "incare;": '\U00002105', + "infin;": '\U0000221E', + "infintie;": '\U000029DD', + "inodot;": '\U00000131', + "int;": '\U0000222B', + "intcal;": '\U000022BA', + "integers;": '\U00002124', + "intercal;": '\U000022BA', + "intlarhk;": '\U00002A17', + "intprod;": '\U00002A3C', + "iocy;": '\U00000451', + "iogon;": '\U0000012F', + "iopf;": '\U0001D55A', + "iota;": '\U000003B9', + "iprod;": '\U00002A3C', + "iquest;": '\U000000BF', + "iscr;": '\U0001D4BE', + "isin;": '\U00002208', + "isinE;": '\U000022F9', + "isindot;": '\U000022F5', + "isins;": '\U000022F4', + "isinsv;": '\U000022F3', + "isinv;": '\U00002208', + "it;": '\U00002062', + "itilde;": '\U00000129', + "iukcy;": '\U00000456', + "iuml;": '\U000000EF', + "jcirc;": '\U00000135', + "jcy;": '\U00000439', + "jfr;": '\U0001D527', + "jmath;": '\U00000237', + "jopf;": '\U0001D55B', + "jscr;": '\U0001D4BF', + "jsercy;": '\U00000458', + "jukcy;": '\U00000454', + "kappa;": '\U000003BA', + "kappav;": '\U000003F0', + "kcedil;": '\U00000137', + "kcy;": '\U0000043A', + "kfr;": '\U0001D528', + "kgreen;": '\U00000138', + "khcy;": '\U00000445', + "kjcy;": '\U0000045C', + "kopf;": '\U0001D55C', + "kscr;": '\U0001D4C0', + "lAarr;": '\U000021DA', + "lArr;": '\U000021D0', + "lAtail;": '\U0000291B', + "lBarr;": '\U0000290E', + "lE;": '\U00002266', + "lEg;": '\U00002A8B', + "lHar;": '\U00002962', + "lacute;": '\U0000013A', + "laemptyv;": '\U000029B4', + "lagran;": '\U00002112', + "lambda;": '\U000003BB', + "lang;": '\U000027E8', + "langd;": '\U00002991', + "langle;": '\U000027E8', + "lap;": '\U00002A85', + "laquo;": '\U000000AB', + "larr;": '\U00002190', + "larrb;": '\U000021E4', + "larrbfs;": '\U0000291F', + "larrfs;": '\U0000291D', + "larrhk;": '\U000021A9', + "larrlp;": '\U000021AB', + "larrpl;": '\U00002939', + "larrsim;": '\U00002973', + "larrtl;": '\U000021A2', + "lat;": '\U00002AAB', + "latail;": '\U00002919', + "late;": '\U00002AAD', + "lbarr;": '\U0000290C', + "lbbrk;": '\U00002772', + "lbrace;": '\U0000007B', + "lbrack;": '\U0000005B', + "lbrke;": '\U0000298B', + "lbrksld;": '\U0000298F', + "lbrkslu;": '\U0000298D', + "lcaron;": '\U0000013E', + "lcedil;": '\U0000013C', + "lceil;": '\U00002308', + "lcub;": '\U0000007B', + "lcy;": '\U0000043B', + "ldca;": '\U00002936', + "ldquo;": '\U0000201C', + "ldquor;": '\U0000201E', + "ldrdhar;": '\U00002967', + "ldrushar;": '\U0000294B', + "ldsh;": '\U000021B2', + "le;": '\U00002264', + "leftarrow;": '\U00002190', + "leftarrowtail;": '\U000021A2', + "leftharpoondown;": '\U000021BD', + "leftharpoonup;": '\U000021BC', + "leftleftarrows;": '\U000021C7', + "leftrightarrow;": '\U00002194', + "leftrightarrows;": '\U000021C6', + "leftrightharpoons;": '\U000021CB', + "leftrightsquigarrow;": '\U000021AD', + "leftthreetimes;": '\U000022CB', + "leg;": '\U000022DA', + "leq;": '\U00002264', + "leqq;": '\U00002266', + "leqslant;": '\U00002A7D', + "les;": '\U00002A7D', + "lescc;": '\U00002AA8', + "lesdot;": '\U00002A7F', + "lesdoto;": '\U00002A81', + "lesdotor;": '\U00002A83', + "lesges;": '\U00002A93', + "lessapprox;": '\U00002A85', + "lessdot;": '\U000022D6', + "lesseqgtr;": '\U000022DA', + "lesseqqgtr;": '\U00002A8B', + "lessgtr;": '\U00002276', + "lesssim;": '\U00002272', + "lfisht;": '\U0000297C', + "lfloor;": '\U0000230A', + "lfr;": '\U0001D529', + "lg;": '\U00002276', + "lgE;": '\U00002A91', + "lhard;": '\U000021BD', + "lharu;": '\U000021BC', + "lharul;": '\U0000296A', + "lhblk;": '\U00002584', + "ljcy;": '\U00000459', + "ll;": '\U0000226A', + "llarr;": '\U000021C7', + "llcorner;": '\U0000231E', + "llhard;": '\U0000296B', + "lltri;": '\U000025FA', + "lmidot;": '\U00000140', + "lmoust;": '\U000023B0', + "lmoustache;": '\U000023B0', + "lnE;": '\U00002268', + "lnap;": '\U00002A89', + "lnapprox;": '\U00002A89', + "lne;": '\U00002A87', + "lneq;": '\U00002A87', + "lneqq;": '\U00002268', + "lnsim;": '\U000022E6', + "loang;": '\U000027EC', + "loarr;": '\U000021FD', + "lobrk;": '\U000027E6', + "longleftarrow;": '\U000027F5', + "longleftrightarrow;": '\U000027F7', + "longmapsto;": '\U000027FC', + "longrightarrow;": '\U000027F6', + "looparrowleft;": '\U000021AB', + "looparrowright;": '\U000021AC', + "lopar;": '\U00002985', + "lopf;": '\U0001D55D', + "loplus;": '\U00002A2D', + "lotimes;": '\U00002A34', + "lowast;": '\U00002217', + "lowbar;": '\U0000005F', + "loz;": '\U000025CA', + "lozenge;": '\U000025CA', + "lozf;": '\U000029EB', + "lpar;": '\U00000028', + "lparlt;": '\U00002993', + "lrarr;": '\U000021C6', + "lrcorner;": '\U0000231F', + "lrhar;": '\U000021CB', + "lrhard;": '\U0000296D', + "lrm;": '\U0000200E', + "lrtri;": '\U000022BF', + "lsaquo;": '\U00002039', + "lscr;": '\U0001D4C1', + "lsh;": '\U000021B0', + "lsim;": '\U00002272', + "lsime;": '\U00002A8D', + "lsimg;": '\U00002A8F', + "lsqb;": '\U0000005B', + "lsquo;": '\U00002018', + "lsquor;": '\U0000201A', + "lstrok;": '\U00000142', + "lt;": '\U0000003C', + "ltcc;": '\U00002AA6', + "ltcir;": '\U00002A79', + "ltdot;": '\U000022D6', + "lthree;": '\U000022CB', + "ltimes;": '\U000022C9', + "ltlarr;": '\U00002976', + "ltquest;": '\U00002A7B', + "ltrPar;": '\U00002996', + "ltri;": '\U000025C3', + "ltrie;": '\U000022B4', + "ltrif;": '\U000025C2', + "lurdshar;": '\U0000294A', + "luruhar;": '\U00002966', + "mDDot;": '\U0000223A', + "macr;": '\U000000AF', + "male;": '\U00002642', + "malt;": '\U00002720', + "maltese;": '\U00002720', + "map;": '\U000021A6', + "mapsto;": '\U000021A6', + "mapstodown;": '\U000021A7', + "mapstoleft;": '\U000021A4', + "mapstoup;": '\U000021A5', + "marker;": '\U000025AE', + "mcomma;": '\U00002A29', + "mcy;": '\U0000043C', + "mdash;": '\U00002014', + "measuredangle;": '\U00002221', + "mfr;": '\U0001D52A', + "mho;": '\U00002127', + "micro;": '\U000000B5', + "mid;": '\U00002223', + "midast;": '\U0000002A', + "midcir;": '\U00002AF0', + "middot;": '\U000000B7', + "minus;": '\U00002212', + "minusb;": '\U0000229F', + "minusd;": '\U00002238', + "minusdu;": '\U00002A2A', + "mlcp;": '\U00002ADB', + "mldr;": '\U00002026', + "mnplus;": '\U00002213', + "models;": '\U000022A7', + "mopf;": '\U0001D55E', + "mp;": '\U00002213', + "mscr;": '\U0001D4C2', + "mstpos;": '\U0000223E', + "mu;": '\U000003BC', + "multimap;": '\U000022B8', + "mumap;": '\U000022B8', + "nLeftarrow;": '\U000021CD', + "nLeftrightarrow;": '\U000021CE', + "nRightarrow;": '\U000021CF', + "nVDash;": '\U000022AF', + "nVdash;": '\U000022AE', + "nabla;": '\U00002207', + "nacute;": '\U00000144', + "nap;": '\U00002249', + "napos;": '\U00000149', + "napprox;": '\U00002249', + "natur;": '\U0000266E', + "natural;": '\U0000266E', + "naturals;": '\U00002115', + "nbsp;": '\U000000A0', + "ncap;": '\U00002A43', + "ncaron;": '\U00000148', + "ncedil;": '\U00000146', + "ncong;": '\U00002247', + "ncup;": '\U00002A42', + "ncy;": '\U0000043D', + "ndash;": '\U00002013', + "ne;": '\U00002260', + "neArr;": '\U000021D7', + "nearhk;": '\U00002924', + "nearr;": '\U00002197', + "nearrow;": '\U00002197', + "nequiv;": '\U00002262', + "nesear;": '\U00002928', + "nexist;": '\U00002204', + "nexists;": '\U00002204', + "nfr;": '\U0001D52B', + "nge;": '\U00002271', + "ngeq;": '\U00002271', + "ngsim;": '\U00002275', + "ngt;": '\U0000226F', + "ngtr;": '\U0000226F', + "nhArr;": '\U000021CE', + "nharr;": '\U000021AE', + "nhpar;": '\U00002AF2', + "ni;": '\U0000220B', + "nis;": '\U000022FC', + "nisd;": '\U000022FA', + "niv;": '\U0000220B', + "njcy;": '\U0000045A', + "nlArr;": '\U000021CD', + "nlarr;": '\U0000219A', + "nldr;": '\U00002025', + "nle;": '\U00002270', + "nleftarrow;": '\U0000219A', + "nleftrightarrow;": '\U000021AE', + "nleq;": '\U00002270', + "nless;": '\U0000226E', + "nlsim;": '\U00002274', + "nlt;": '\U0000226E', + "nltri;": '\U000022EA', + "nltrie;": '\U000022EC', + "nmid;": '\U00002224', + "nopf;": '\U0001D55F', + "not;": '\U000000AC', + "notin;": '\U00002209', + "notinva;": '\U00002209', + "notinvb;": '\U000022F7', + "notinvc;": '\U000022F6', + "notni;": '\U0000220C', + "notniva;": '\U0000220C', + "notnivb;": '\U000022FE', + "notnivc;": '\U000022FD', + "npar;": '\U00002226', + "nparallel;": '\U00002226', + "npolint;": '\U00002A14', + "npr;": '\U00002280', + "nprcue;": '\U000022E0', + "nprec;": '\U00002280', + "nrArr;": '\U000021CF', + "nrarr;": '\U0000219B', + "nrightarrow;": '\U0000219B', + "nrtri;": '\U000022EB', + "nrtrie;": '\U000022ED', + "nsc;": '\U00002281', + "nsccue;": '\U000022E1', + "nscr;": '\U0001D4C3', + "nshortmid;": '\U00002224', + "nshortparallel;": '\U00002226', + "nsim;": '\U00002241', + "nsime;": '\U00002244', + "nsimeq;": '\U00002244', + "nsmid;": '\U00002224', + "nspar;": '\U00002226', + "nsqsube;": '\U000022E2', + "nsqsupe;": '\U000022E3', + "nsub;": '\U00002284', + "nsube;": '\U00002288', + "nsubseteq;": '\U00002288', + "nsucc;": '\U00002281', + "nsup;": '\U00002285', + "nsupe;": '\U00002289', + "nsupseteq;": '\U00002289', + "ntgl;": '\U00002279', + "ntilde;": '\U000000F1', + "ntlg;": '\U00002278', + "ntriangleleft;": '\U000022EA', + "ntrianglelefteq;": '\U000022EC', + "ntriangleright;": '\U000022EB', + "ntrianglerighteq;": '\U000022ED', + "nu;": '\U000003BD', + "num;": '\U00000023', + "numero;": '\U00002116', + "numsp;": '\U00002007', + "nvDash;": '\U000022AD', + "nvHarr;": '\U00002904', + "nvdash;": '\U000022AC', + "nvinfin;": '\U000029DE', + "nvlArr;": '\U00002902', + "nvrArr;": '\U00002903', + "nwArr;": '\U000021D6', + "nwarhk;": '\U00002923', + "nwarr;": '\U00002196', + "nwarrow;": '\U00002196', + "nwnear;": '\U00002927', + "oS;": '\U000024C8', + "oacute;": '\U000000F3', + "oast;": '\U0000229B', + "ocir;": '\U0000229A', + "ocirc;": '\U000000F4', + "ocy;": '\U0000043E', + "odash;": '\U0000229D', + "odblac;": '\U00000151', + "odiv;": '\U00002A38', + "odot;": '\U00002299', + "odsold;": '\U000029BC', + "oelig;": '\U00000153', + "ofcir;": '\U000029BF', + "ofr;": '\U0001D52C', + "ogon;": '\U000002DB', + "ograve;": '\U000000F2', + "ogt;": '\U000029C1', + "ohbar;": '\U000029B5', + "ohm;": '\U000003A9', + "oint;": '\U0000222E', + "olarr;": '\U000021BA', + "olcir;": '\U000029BE', + "olcross;": '\U000029BB', + "oline;": '\U0000203E', + "olt;": '\U000029C0', + "omacr;": '\U0000014D', + "omega;": '\U000003C9', + "omicron;": '\U000003BF', + "omid;": '\U000029B6', + "ominus;": '\U00002296', + "oopf;": '\U0001D560', + "opar;": '\U000029B7', + "operp;": '\U000029B9', + "oplus;": '\U00002295', + "or;": '\U00002228', + "orarr;": '\U000021BB', + "ord;": '\U00002A5D', + "order;": '\U00002134', + "orderof;": '\U00002134', + "ordf;": '\U000000AA', + "ordm;": '\U000000BA', + "origof;": '\U000022B6', + "oror;": '\U00002A56', + "orslope;": '\U00002A57', + "orv;": '\U00002A5B', + "oscr;": '\U00002134', + "oslash;": '\U000000F8', + "osol;": '\U00002298', + "otilde;": '\U000000F5', + "otimes;": '\U00002297', + "otimesas;": '\U00002A36', + "ouml;": '\U000000F6', + "ovbar;": '\U0000233D', + "par;": '\U00002225', + "para;": '\U000000B6', + "parallel;": '\U00002225', + "parsim;": '\U00002AF3', + "parsl;": '\U00002AFD', + "part;": '\U00002202', + "pcy;": '\U0000043F', + "percnt;": '\U00000025', + "period;": '\U0000002E', + "permil;": '\U00002030', + "perp;": '\U000022A5', + "pertenk;": '\U00002031', + "pfr;": '\U0001D52D', + "phi;": '\U000003C6', + "phiv;": '\U000003D5', + "phmmat;": '\U00002133', + "phone;": '\U0000260E', + "pi;": '\U000003C0', + "pitchfork;": '\U000022D4', + "piv;": '\U000003D6', + "planck;": '\U0000210F', + "planckh;": '\U0000210E', + "plankv;": '\U0000210F', + "plus;": '\U0000002B', + "plusacir;": '\U00002A23', + "plusb;": '\U0000229E', + "pluscir;": '\U00002A22', + "plusdo;": '\U00002214', + "plusdu;": '\U00002A25', + "pluse;": '\U00002A72', + "plusmn;": '\U000000B1', + "plussim;": '\U00002A26', + "plustwo;": '\U00002A27', + "pm;": '\U000000B1', + "pointint;": '\U00002A15', + "popf;": '\U0001D561', + "pound;": '\U000000A3', + "pr;": '\U0000227A', + "prE;": '\U00002AB3', + "prap;": '\U00002AB7', + "prcue;": '\U0000227C', + "pre;": '\U00002AAF', + "prec;": '\U0000227A', + "precapprox;": '\U00002AB7', + "preccurlyeq;": '\U0000227C', + "preceq;": '\U00002AAF', + "precnapprox;": '\U00002AB9', + "precneqq;": '\U00002AB5', + "precnsim;": '\U000022E8', + "precsim;": '\U0000227E', + "prime;": '\U00002032', + "primes;": '\U00002119', + "prnE;": '\U00002AB5', + "prnap;": '\U00002AB9', + "prnsim;": '\U000022E8', + "prod;": '\U0000220F', + "profalar;": '\U0000232E', + "profline;": '\U00002312', + "profsurf;": '\U00002313', + "prop;": '\U0000221D', + "propto;": '\U0000221D', + "prsim;": '\U0000227E', + "prurel;": '\U000022B0', + "pscr;": '\U0001D4C5', + "psi;": '\U000003C8', + "puncsp;": '\U00002008', + "qfr;": '\U0001D52E', + "qint;": '\U00002A0C', + "qopf;": '\U0001D562', + "qprime;": '\U00002057', + "qscr;": '\U0001D4C6', + "quaternions;": '\U0000210D', + "quatint;": '\U00002A16', + "quest;": '\U0000003F', + "questeq;": '\U0000225F', + "quot;": '\U00000022', + "rAarr;": '\U000021DB', + "rArr;": '\U000021D2', + "rAtail;": '\U0000291C', + "rBarr;": '\U0000290F', + "rHar;": '\U00002964', + "racute;": '\U00000155', + "radic;": '\U0000221A', + "raemptyv;": '\U000029B3', + "rang;": '\U000027E9', + "rangd;": '\U00002992', + "range;": '\U000029A5', + "rangle;": '\U000027E9', + "raquo;": '\U000000BB', + "rarr;": '\U00002192', + "rarrap;": '\U00002975', + "rarrb;": '\U000021E5', + "rarrbfs;": '\U00002920', + "rarrc;": '\U00002933', + "rarrfs;": '\U0000291E', + "rarrhk;": '\U000021AA', + "rarrlp;": '\U000021AC', + "rarrpl;": '\U00002945', + "rarrsim;": '\U00002974', + "rarrtl;": '\U000021A3', + "rarrw;": '\U0000219D', + "ratail;": '\U0000291A', + "ratio;": '\U00002236', + "rationals;": '\U0000211A', + "rbarr;": '\U0000290D', + "rbbrk;": '\U00002773', + "rbrace;": '\U0000007D', + "rbrack;": '\U0000005D', + "rbrke;": '\U0000298C', + "rbrksld;": '\U0000298E', + "rbrkslu;": '\U00002990', + "rcaron;": '\U00000159', + "rcedil;": '\U00000157', + "rceil;": '\U00002309', + "rcub;": '\U0000007D', + "rcy;": '\U00000440', + "rdca;": '\U00002937', + "rdldhar;": '\U00002969', + "rdquo;": '\U0000201D', + "rdquor;": '\U0000201D', + "rdsh;": '\U000021B3', + "real;": '\U0000211C', + "realine;": '\U0000211B', + "realpart;": '\U0000211C', + "reals;": '\U0000211D', + "rect;": '\U000025AD', + "reg;": '\U000000AE', + "rfisht;": '\U0000297D', + "rfloor;": '\U0000230B', + "rfr;": '\U0001D52F', + "rhard;": '\U000021C1', + "rharu;": '\U000021C0', + "rharul;": '\U0000296C', + "rho;": '\U000003C1', + "rhov;": '\U000003F1', + "rightarrow;": '\U00002192', + "rightarrowtail;": '\U000021A3', + "rightharpoondown;": '\U000021C1', + "rightharpoonup;": '\U000021C0', + "rightleftarrows;": '\U000021C4', + "rightleftharpoons;": '\U000021CC', + "rightrightarrows;": '\U000021C9', + "rightsquigarrow;": '\U0000219D', + "rightthreetimes;": '\U000022CC', + "ring;": '\U000002DA', + "risingdotseq;": '\U00002253', + "rlarr;": '\U000021C4', + "rlhar;": '\U000021CC', + "rlm;": '\U0000200F', + "rmoust;": '\U000023B1', + "rmoustache;": '\U000023B1', + "rnmid;": '\U00002AEE', + "roang;": '\U000027ED', + "roarr;": '\U000021FE', + "robrk;": '\U000027E7', + "ropar;": '\U00002986', + "ropf;": '\U0001D563', + "roplus;": '\U00002A2E', + "rotimes;": '\U00002A35', + "rpar;": '\U00000029', + "rpargt;": '\U00002994', + "rppolint;": '\U00002A12', + "rrarr;": '\U000021C9', + "rsaquo;": '\U0000203A', + "rscr;": '\U0001D4C7', + "rsh;": '\U000021B1', + "rsqb;": '\U0000005D', + "rsquo;": '\U00002019', + "rsquor;": '\U00002019', + "rthree;": '\U000022CC', + "rtimes;": '\U000022CA', + "rtri;": '\U000025B9', + "rtrie;": '\U000022B5', + "rtrif;": '\U000025B8', + "rtriltri;": '\U000029CE', + "ruluhar;": '\U00002968', + "rx;": '\U0000211E', + "sacute;": '\U0000015B', + "sbquo;": '\U0000201A', + "sc;": '\U0000227B', + "scE;": '\U00002AB4', + "scap;": '\U00002AB8', + "scaron;": '\U00000161', + "sccue;": '\U0000227D', + "sce;": '\U00002AB0', + "scedil;": '\U0000015F', + "scirc;": '\U0000015D', + "scnE;": '\U00002AB6', + "scnap;": '\U00002ABA', + "scnsim;": '\U000022E9', + "scpolint;": '\U00002A13', + "scsim;": '\U0000227F', + "scy;": '\U00000441', + "sdot;": '\U000022C5', + "sdotb;": '\U000022A1', + "sdote;": '\U00002A66', + "seArr;": '\U000021D8', + "searhk;": '\U00002925', + "searr;": '\U00002198', + "searrow;": '\U00002198', + "sect;": '\U000000A7', + "semi;": '\U0000003B', + "seswar;": '\U00002929', + "setminus;": '\U00002216', + "setmn;": '\U00002216', + "sext;": '\U00002736', + "sfr;": '\U0001D530', + "sfrown;": '\U00002322', + "sharp;": '\U0000266F', + "shchcy;": '\U00000449', + "shcy;": '\U00000448', + "shortmid;": '\U00002223', + "shortparallel;": '\U00002225', + "shy;": '\U000000AD', + "sigma;": '\U000003C3', + "sigmaf;": '\U000003C2', + "sigmav;": '\U000003C2', + "sim;": '\U0000223C', + "simdot;": '\U00002A6A', + "sime;": '\U00002243', + "simeq;": '\U00002243', + "simg;": '\U00002A9E', + "simgE;": '\U00002AA0', + "siml;": '\U00002A9D', + "simlE;": '\U00002A9F', + "simne;": '\U00002246', + "simplus;": '\U00002A24', + "simrarr;": '\U00002972', + "slarr;": '\U00002190', + "smallsetminus;": '\U00002216', + "smashp;": '\U00002A33', + "smeparsl;": '\U000029E4', + "smid;": '\U00002223', + "smile;": '\U00002323', + "smt;": '\U00002AAA', + "smte;": '\U00002AAC', + "softcy;": '\U0000044C', + "sol;": '\U0000002F', + "solb;": '\U000029C4', + "solbar;": '\U0000233F', + "sopf;": '\U0001D564', + "spades;": '\U00002660', + "spadesuit;": '\U00002660', + "spar;": '\U00002225', + "sqcap;": '\U00002293', + "sqcup;": '\U00002294', + "sqsub;": '\U0000228F', + "sqsube;": '\U00002291', + "sqsubset;": '\U0000228F', + "sqsubseteq;": '\U00002291', + "sqsup;": '\U00002290', + "sqsupe;": '\U00002292', + "sqsupset;": '\U00002290', + "sqsupseteq;": '\U00002292', + "squ;": '\U000025A1', + "square;": '\U000025A1', + "squarf;": '\U000025AA', + "squf;": '\U000025AA', + "srarr;": '\U00002192', + "sscr;": '\U0001D4C8', + "ssetmn;": '\U00002216', + "ssmile;": '\U00002323', + "sstarf;": '\U000022C6', + "star;": '\U00002606', + "starf;": '\U00002605', + "straightepsilon;": '\U000003F5', + "straightphi;": '\U000003D5', + "strns;": '\U000000AF', + "sub;": '\U00002282', + "subE;": '\U00002AC5', + "subdot;": '\U00002ABD', + "sube;": '\U00002286', + "subedot;": '\U00002AC3', + "submult;": '\U00002AC1', + "subnE;": '\U00002ACB', + "subne;": '\U0000228A', + "subplus;": '\U00002ABF', + "subrarr;": '\U00002979', + "subset;": '\U00002282', + "subseteq;": '\U00002286', + "subseteqq;": '\U00002AC5', + "subsetneq;": '\U0000228A', + "subsetneqq;": '\U00002ACB', + "subsim;": '\U00002AC7', + "subsub;": '\U00002AD5', + "subsup;": '\U00002AD3', + "succ;": '\U0000227B', + "succapprox;": '\U00002AB8', + "succcurlyeq;": '\U0000227D', + "succeq;": '\U00002AB0', + "succnapprox;": '\U00002ABA', + "succneqq;": '\U00002AB6', + "succnsim;": '\U000022E9', + "succsim;": '\U0000227F', + "sum;": '\U00002211', + "sung;": '\U0000266A', + "sup;": '\U00002283', + "sup1;": '\U000000B9', + "sup2;": '\U000000B2', + "sup3;": '\U000000B3', + "supE;": '\U00002AC6', + "supdot;": '\U00002ABE', + "supdsub;": '\U00002AD8', + "supe;": '\U00002287', + "supedot;": '\U00002AC4', + "suphsol;": '\U000027C9', + "suphsub;": '\U00002AD7', + "suplarr;": '\U0000297B', + "supmult;": '\U00002AC2', + "supnE;": '\U00002ACC', + "supne;": '\U0000228B', + "supplus;": '\U00002AC0', + "supset;": '\U00002283', + "supseteq;": '\U00002287', + "supseteqq;": '\U00002AC6', + "supsetneq;": '\U0000228B', + "supsetneqq;": '\U00002ACC', + "supsim;": '\U00002AC8', + "supsub;": '\U00002AD4', + "supsup;": '\U00002AD6', + "swArr;": '\U000021D9', + "swarhk;": '\U00002926', + "swarr;": '\U00002199', + "swarrow;": '\U00002199', + "swnwar;": '\U0000292A', + "szlig;": '\U000000DF', + "target;": '\U00002316', + "tau;": '\U000003C4', + "tbrk;": '\U000023B4', + "tcaron;": '\U00000165', + "tcedil;": '\U00000163', + "tcy;": '\U00000442', + "tdot;": '\U000020DB', + "telrec;": '\U00002315', + "tfr;": '\U0001D531', + "there4;": '\U00002234', + "therefore;": '\U00002234', + "theta;": '\U000003B8', + "thetasym;": '\U000003D1', + "thetav;": '\U000003D1', + "thickapprox;": '\U00002248', + "thicksim;": '\U0000223C', + "thinsp;": '\U00002009', + "thkap;": '\U00002248', + "thksim;": '\U0000223C', + "thorn;": '\U000000FE', + "tilde;": '\U000002DC', + "times;": '\U000000D7', + "timesb;": '\U000022A0', + "timesbar;": '\U00002A31', + "timesd;": '\U00002A30', + "tint;": '\U0000222D', + "toea;": '\U00002928', + "top;": '\U000022A4', + "topbot;": '\U00002336', + "topcir;": '\U00002AF1', + "topf;": '\U0001D565', + "topfork;": '\U00002ADA', + "tosa;": '\U00002929', + "tprime;": '\U00002034', + "trade;": '\U00002122', + "triangle;": '\U000025B5', + "triangledown;": '\U000025BF', + "triangleleft;": '\U000025C3', + "trianglelefteq;": '\U000022B4', + "triangleq;": '\U0000225C', + "triangleright;": '\U000025B9', + "trianglerighteq;": '\U000022B5', + "tridot;": '\U000025EC', + "trie;": '\U0000225C', + "triminus;": '\U00002A3A', + "triplus;": '\U00002A39', + "trisb;": '\U000029CD', + "tritime;": '\U00002A3B', + "trpezium;": '\U000023E2', + "tscr;": '\U0001D4C9', + "tscy;": '\U00000446', + "tshcy;": '\U0000045B', + "tstrok;": '\U00000167', + "twixt;": '\U0000226C', + "twoheadleftarrow;": '\U0000219E', + "twoheadrightarrow;": '\U000021A0', + "uArr;": '\U000021D1', + "uHar;": '\U00002963', + "uacute;": '\U000000FA', + "uarr;": '\U00002191', + "ubrcy;": '\U0000045E', + "ubreve;": '\U0000016D', + "ucirc;": '\U000000FB', + "ucy;": '\U00000443', + "udarr;": '\U000021C5', + "udblac;": '\U00000171', + "udhar;": '\U0000296E', + "ufisht;": '\U0000297E', + "ufr;": '\U0001D532', + "ugrave;": '\U000000F9', + "uharl;": '\U000021BF', + "uharr;": '\U000021BE', + "uhblk;": '\U00002580', + "ulcorn;": '\U0000231C', + "ulcorner;": '\U0000231C', + "ulcrop;": '\U0000230F', + "ultri;": '\U000025F8', + "umacr;": '\U0000016B', + "uml;": '\U000000A8', + "uogon;": '\U00000173', + "uopf;": '\U0001D566', + "uparrow;": '\U00002191', + "updownarrow;": '\U00002195', + "upharpoonleft;": '\U000021BF', + "upharpoonright;": '\U000021BE', + "uplus;": '\U0000228E', + "upsi;": '\U000003C5', + "upsih;": '\U000003D2', + "upsilon;": '\U000003C5', + "upuparrows;": '\U000021C8', + "urcorn;": '\U0000231D', + "urcorner;": '\U0000231D', + "urcrop;": '\U0000230E', + "uring;": '\U0000016F', + "urtri;": '\U000025F9', + "uscr;": '\U0001D4CA', + "utdot;": '\U000022F0', + "utilde;": '\U00000169', + "utri;": '\U000025B5', + "utrif;": '\U000025B4', + "uuarr;": '\U000021C8', + "uuml;": '\U000000FC', + "uwangle;": '\U000029A7', + "vArr;": '\U000021D5', + "vBar;": '\U00002AE8', + "vBarv;": '\U00002AE9', + "vDash;": '\U000022A8', + "vangrt;": '\U0000299C', + "varepsilon;": '\U000003F5', + "varkappa;": '\U000003F0', + "varnothing;": '\U00002205', + "varphi;": '\U000003D5', + "varpi;": '\U000003D6', + "varpropto;": '\U0000221D', + "varr;": '\U00002195', + "varrho;": '\U000003F1', + "varsigma;": '\U000003C2', + "vartheta;": '\U000003D1', + "vartriangleleft;": '\U000022B2', + "vartriangleright;": '\U000022B3', + "vcy;": '\U00000432', + "vdash;": '\U000022A2', + "vee;": '\U00002228', + "veebar;": '\U000022BB', + "veeeq;": '\U0000225A', + "vellip;": '\U000022EE', + "verbar;": '\U0000007C', + "vert;": '\U0000007C', + "vfr;": '\U0001D533', + "vltri;": '\U000022B2', + "vopf;": '\U0001D567', + "vprop;": '\U0000221D', + "vrtri;": '\U000022B3', + "vscr;": '\U0001D4CB', + "vzigzag;": '\U0000299A', + "wcirc;": '\U00000175', + "wedbar;": '\U00002A5F', + "wedge;": '\U00002227', + "wedgeq;": '\U00002259', + "weierp;": '\U00002118', + "wfr;": '\U0001D534', + "wopf;": '\U0001D568', + "wp;": '\U00002118', + "wr;": '\U00002240', + "wreath;": '\U00002240', + "wscr;": '\U0001D4CC', + "xcap;": '\U000022C2', + "xcirc;": '\U000025EF', + "xcup;": '\U000022C3', + "xdtri;": '\U000025BD', + "xfr;": '\U0001D535', + "xhArr;": '\U000027FA', + "xharr;": '\U000027F7', + "xi;": '\U000003BE', + "xlArr;": '\U000027F8', + "xlarr;": '\U000027F5', + "xmap;": '\U000027FC', + "xnis;": '\U000022FB', + "xodot;": '\U00002A00', + "xopf;": '\U0001D569', + "xoplus;": '\U00002A01', + "xotime;": '\U00002A02', + "xrArr;": '\U000027F9', + "xrarr;": '\U000027F6', + "xscr;": '\U0001D4CD', + "xsqcup;": '\U00002A06', + "xuplus;": '\U00002A04', + "xutri;": '\U000025B3', + "xvee;": '\U000022C1', + "xwedge;": '\U000022C0', + "yacute;": '\U000000FD', + "yacy;": '\U0000044F', + "ycirc;": '\U00000177', + "ycy;": '\U0000044B', + "yen;": '\U000000A5', + "yfr;": '\U0001D536', + "yicy;": '\U00000457', + "yopf;": '\U0001D56A', + "yscr;": '\U0001D4CE', + "yucy;": '\U0000044E', + "yuml;": '\U000000FF', + "zacute;": '\U0000017A', + "zcaron;": '\U0000017E', + "zcy;": '\U00000437', + "zdot;": '\U0000017C', + "zeetrf;": '\U00002128', + "zeta;": '\U000003B6', + "zfr;": '\U0001D537', + "zhcy;": '\U00000436', + "zigrarr;": '\U000021DD', + "zopf;": '\U0001D56B', + "zscr;": '\U0001D4CF', + "zwj;": '\U0000200D', + "zwnj;": '\U0000200C', + "AElig": '\U000000C6', + "AMP": '\U00000026', + "Aacute": '\U000000C1', + "Acirc": '\U000000C2', + "Agrave": '\U000000C0', + "Aring": '\U000000C5', + "Atilde": '\U000000C3', + "Auml": '\U000000C4', + "COPY": '\U000000A9', + "Ccedil": '\U000000C7', + "ETH": '\U000000D0', + "Eacute": '\U000000C9', + "Ecirc": '\U000000CA', + "Egrave": '\U000000C8', + "Euml": '\U000000CB', + "GT": '\U0000003E', + "Iacute": '\U000000CD', + "Icirc": '\U000000CE', + "Igrave": '\U000000CC', + "Iuml": '\U000000CF', + "LT": '\U0000003C', + "Ntilde": '\U000000D1', + "Oacute": '\U000000D3', + "Ocirc": '\U000000D4', + "Ograve": '\U000000D2', + "Oslash": '\U000000D8', + "Otilde": '\U000000D5', + "Ouml": '\U000000D6', + "QUOT": '\U00000022', + "REG": '\U000000AE', + "THORN": '\U000000DE', + "Uacute": '\U000000DA', + "Ucirc": '\U000000DB', + "Ugrave": '\U000000D9', + "Uuml": '\U000000DC', + "Yacute": '\U000000DD', + "aacute": '\U000000E1', + "acirc": '\U000000E2', + "acute": '\U000000B4', + "aelig": '\U000000E6', + "agrave": '\U000000E0', + "amp": '\U00000026', + "aring": '\U000000E5', + "atilde": '\U000000E3', + "auml": '\U000000E4', + "brvbar": '\U000000A6', + "ccedil": '\U000000E7', + "cedil": '\U000000B8', + "cent": '\U000000A2', + "copy": '\U000000A9', + "curren": '\U000000A4', + "deg": '\U000000B0', + "divide": '\U000000F7', + "eacute": '\U000000E9', + "ecirc": '\U000000EA', + "egrave": '\U000000E8', + "eth": '\U000000F0', + "euml": '\U000000EB', + "frac12": '\U000000BD', + "frac14": '\U000000BC', + "frac34": '\U000000BE', + "gt": '\U0000003E', + "iacute": '\U000000ED', + "icirc": '\U000000EE', + "iexcl": '\U000000A1', + "igrave": '\U000000EC', + "iquest": '\U000000BF', + "iuml": '\U000000EF', + "laquo": '\U000000AB', + "lt": '\U0000003C', + "macr": '\U000000AF', + "micro": '\U000000B5', + "middot": '\U000000B7', + "nbsp": '\U000000A0', + "not": '\U000000AC', + "ntilde": '\U000000F1', + "oacute": '\U000000F3', + "ocirc": '\U000000F4', + "ograve": '\U000000F2', + "ordf": '\U000000AA', + "ordm": '\U000000BA', + "oslash": '\U000000F8', + "otilde": '\U000000F5', + "ouml": '\U000000F6', + "para": '\U000000B6', + "plusmn": '\U000000B1', + "pound": '\U000000A3', + "quot": '\U00000022', + "raquo": '\U000000BB', + "reg": '\U000000AE', + "sect": '\U000000A7', + "shy": '\U000000AD', + "sup1": '\U000000B9', + "sup2": '\U000000B2', + "sup3": '\U000000B3', + "szlig": '\U000000DF', + "thorn": '\U000000FE', + "times": '\U000000D7', + "uacute": '\U000000FA', + "ucirc": '\U000000FB', + "ugrave": '\U000000F9', + "uml": '\U000000A8', + "uuml": '\U000000FC', + "yacute": '\U000000FD', + "yen": '\U000000A5', + "yuml": '\U000000FF', +} + +// HTML entities that are two unicode codepoints. +var entity2 = map[string][2]rune{ + // TODO(nigeltao): Handle replacements that are wider than their names. + // "nLt;": {'\u226A', '\u20D2'}, + // "nGt;": {'\u226B', '\u20D2'}, + "NotEqualTilde;": {'\u2242', '\u0338'}, + "NotGreaterFullEqual;": {'\u2267', '\u0338'}, + "NotGreaterGreater;": {'\u226B', '\u0338'}, + "NotGreaterSlantEqual;": {'\u2A7E', '\u0338'}, + "NotHumpDownHump;": {'\u224E', '\u0338'}, + "NotHumpEqual;": {'\u224F', '\u0338'}, + "NotLeftTriangleBar;": {'\u29CF', '\u0338'}, + "NotLessLess;": {'\u226A', '\u0338'}, + "NotLessSlantEqual;": {'\u2A7D', '\u0338'}, + "NotNestedGreaterGreater;": {'\u2AA2', '\u0338'}, + "NotNestedLessLess;": {'\u2AA1', '\u0338'}, + "NotPrecedesEqual;": {'\u2AAF', '\u0338'}, + "NotRightTriangleBar;": {'\u29D0', '\u0338'}, + "NotSquareSubset;": {'\u228F', '\u0338'}, + "NotSquareSuperset;": {'\u2290', '\u0338'}, + "NotSubset;": {'\u2282', '\u20D2'}, + "NotSucceedsEqual;": {'\u2AB0', '\u0338'}, + "NotSucceedsTilde;": {'\u227F', '\u0338'}, + "NotSuperset;": {'\u2283', '\u20D2'}, + "ThickSpace;": {'\u205F', '\u200A'}, + "acE;": {'\u223E', '\u0333'}, + "bne;": {'\u003D', '\u20E5'}, + "bnequiv;": {'\u2261', '\u20E5'}, + "caps;": {'\u2229', '\uFE00'}, + "cups;": {'\u222A', '\uFE00'}, + "fjlig;": {'\u0066', '\u006A'}, + "gesl;": {'\u22DB', '\uFE00'}, + "gvertneqq;": {'\u2269', '\uFE00'}, + "gvnE;": {'\u2269', '\uFE00'}, + "lates;": {'\u2AAD', '\uFE00'}, + "lesg;": {'\u22DA', '\uFE00'}, + "lvertneqq;": {'\u2268', '\uFE00'}, + "lvnE;": {'\u2268', '\uFE00'}, + "nGg;": {'\u22D9', '\u0338'}, + "nGtv;": {'\u226B', '\u0338'}, + "nLl;": {'\u22D8', '\u0338'}, + "nLtv;": {'\u226A', '\u0338'}, + "nang;": {'\u2220', '\u20D2'}, + "napE;": {'\u2A70', '\u0338'}, + "napid;": {'\u224B', '\u0338'}, + "nbump;": {'\u224E', '\u0338'}, + "nbumpe;": {'\u224F', '\u0338'}, + "ncongdot;": {'\u2A6D', '\u0338'}, + "nedot;": {'\u2250', '\u0338'}, + "nesim;": {'\u2242', '\u0338'}, + "ngE;": {'\u2267', '\u0338'}, + "ngeqq;": {'\u2267', '\u0338'}, + "ngeqslant;": {'\u2A7E', '\u0338'}, + "nges;": {'\u2A7E', '\u0338'}, + "nlE;": {'\u2266', '\u0338'}, + "nleqq;": {'\u2266', '\u0338'}, + "nleqslant;": {'\u2A7D', '\u0338'}, + "nles;": {'\u2A7D', '\u0338'}, + "notinE;": {'\u22F9', '\u0338'}, + "notindot;": {'\u22F5', '\u0338'}, + "nparsl;": {'\u2AFD', '\u20E5'}, + "npart;": {'\u2202', '\u0338'}, + "npre;": {'\u2AAF', '\u0338'}, + "npreceq;": {'\u2AAF', '\u0338'}, + "nrarrc;": {'\u2933', '\u0338'}, + "nrarrw;": {'\u219D', '\u0338'}, + "nsce;": {'\u2AB0', '\u0338'}, + "nsubE;": {'\u2AC5', '\u0338'}, + "nsubset;": {'\u2282', '\u20D2'}, + "nsubseteqq;": {'\u2AC5', '\u0338'}, + "nsucceq;": {'\u2AB0', '\u0338'}, + "nsupE;": {'\u2AC6', '\u0338'}, + "nsupset;": {'\u2283', '\u20D2'}, + "nsupseteqq;": {'\u2AC6', '\u0338'}, + "nvap;": {'\u224D', '\u20D2'}, + "nvge;": {'\u2265', '\u20D2'}, + "nvgt;": {'\u003E', '\u20D2'}, + "nvle;": {'\u2264', '\u20D2'}, + "nvlt;": {'\u003C', '\u20D2'}, + "nvltrie;": {'\u22B4', '\u20D2'}, + "nvrtrie;": {'\u22B5', '\u20D2'}, + "nvsim;": {'\u223C', '\u20D2'}, + "race;": {'\u223D', '\u0331'}, + "smtes;": {'\u2AAC', '\uFE00'}, + "sqcaps;": {'\u2293', '\uFE00'}, + "sqcups;": {'\u2294', '\uFE00'}, + "varsubsetneq;": {'\u228A', '\uFE00'}, + "varsubsetneqq;": {'\u2ACB', '\uFE00'}, + "varsupsetneq;": {'\u228B', '\uFE00'}, + "varsupsetneqq;": {'\u2ACC', '\uFE00'}, + "vnsub;": {'\u2282', '\u20D2'}, + "vnsup;": {'\u2283', '\u20D2'}, + "vsubnE;": {'\u2ACB', '\uFE00'}, + "vsubne;": {'\u228A', '\uFE00'}, + "vsupnE;": {'\u2ACC', '\uFE00'}, + "vsupne;": {'\u228B', '\uFE00'}, +} diff --git a/vendor/golang.org/x/net/html/escape.go b/vendor/golang.org/x/net/html/escape.go new file mode 100644 index 0000000..d856139 --- /dev/null +++ b/vendor/golang.org/x/net/html/escape.go @@ -0,0 +1,258 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package html + +import ( + "bytes" + "strings" + "unicode/utf8" +) + +// These replacements permit compatibility with old numeric entities that +// assumed Windows-1252 encoding. +// https://html.spec.whatwg.org/multipage/syntax.html#consume-a-character-reference +var replacementTable = [...]rune{ + '\u20AC', // First entry is what 0x80 should be replaced with. + '\u0081', + '\u201A', + '\u0192', + '\u201E', + '\u2026', + '\u2020', + '\u2021', + '\u02C6', + '\u2030', + '\u0160', + '\u2039', + '\u0152', + '\u008D', + '\u017D', + '\u008F', + '\u0090', + '\u2018', + '\u2019', + '\u201C', + '\u201D', + '\u2022', + '\u2013', + '\u2014', + '\u02DC', + '\u2122', + '\u0161', + '\u203A', + '\u0153', + '\u009D', + '\u017E', + '\u0178', // Last entry is 0x9F. + // 0x00->'\uFFFD' is handled programmatically. + // 0x0D->'\u000D' is a no-op. +} + +// unescapeEntity reads an entity like "<" from b[src:] and writes the +// corresponding "<" to b[dst:], returning the incremented dst and src cursors. +// Precondition: b[src] == '&' && dst <= src. +// attribute should be true if parsing an attribute value. +func unescapeEntity(b []byte, dst, src int, attribute bool) (dst1, src1 int) { + // https://html.spec.whatwg.org/multipage/syntax.html#consume-a-character-reference + + // i starts at 1 because we already know that s[0] == '&'. + i, s := 1, b[src:] + + if len(s) <= 1 { + b[dst] = b[src] + return dst + 1, src + 1 + } + + if s[i] == '#' { + if len(s) <= 3 { // We need to have at least "&#.". + b[dst] = b[src] + return dst + 1, src + 1 + } + i++ + c := s[i] + hex := false + if c == 'x' || c == 'X' { + hex = true + i++ + } + + x := '\x00' + for i < len(s) { + c = s[i] + i++ + if hex { + if '0' <= c && c <= '9' { + x = 16*x + rune(c) - '0' + continue + } else if 'a' <= c && c <= 'f' { + x = 16*x + rune(c) - 'a' + 10 + continue + } else if 'A' <= c && c <= 'F' { + x = 16*x + rune(c) - 'A' + 10 + continue + } + } else if '0' <= c && c <= '9' { + x = 10*x + rune(c) - '0' + continue + } + if c != ';' { + i-- + } + break + } + + if i <= 3 { // No characters matched. + b[dst] = b[src] + return dst + 1, src + 1 + } + + if 0x80 <= x && x <= 0x9F { + // Replace characters from Windows-1252 with UTF-8 equivalents. + x = replacementTable[x-0x80] + } else if x == 0 || (0xD800 <= x && x <= 0xDFFF) || x > 0x10FFFF { + // Replace invalid characters with the replacement character. + x = '\uFFFD' + } + + return dst + utf8.EncodeRune(b[dst:], x), src + i + } + + // Consume the maximum number of characters possible, with the + // consumed characters matching one of the named references. + + for i < len(s) { + c := s[i] + i++ + // Lower-cased characters are more common in entities, so we check for them first. + if 'a' <= c && c <= 'z' || 'A' <= c && c <= 'Z' || '0' <= c && c <= '9' { + continue + } + if c != ';' { + i-- + } + break + } + + entityName := string(s[1:i]) + if entityName == "" { + // No-op. + } else if attribute && entityName[len(entityName)-1] != ';' && len(s) > i && s[i] == '=' { + // No-op. + } else if x := entity[entityName]; x != 0 { + return dst + utf8.EncodeRune(b[dst:], x), src + i + } else if x := entity2[entityName]; x[0] != 0 { + dst1 := dst + utf8.EncodeRune(b[dst:], x[0]) + return dst1 + utf8.EncodeRune(b[dst1:], x[1]), src + i + } else if !attribute { + maxLen := len(entityName) - 1 + if maxLen > longestEntityWithoutSemicolon { + maxLen = longestEntityWithoutSemicolon + } + for j := maxLen; j > 1; j-- { + if x := entity[entityName[:j]]; x != 0 { + return dst + utf8.EncodeRune(b[dst:], x), src + j + 1 + } + } + } + + dst1, src1 = dst+i, src+i + copy(b[dst:dst1], b[src:src1]) + return dst1, src1 +} + +// unescape unescapes b's entities in-place, so that "a<b" becomes "a': + esc = ">" + case '"': + // """ is shorter than """. + esc = """ + case '\r': + esc = " " + default: + panic("unrecognized escape character") + } + s = s[i+1:] + if _, err := w.WriteString(esc); err != nil { + return err + } + i = strings.IndexAny(s, escapedChars) + } + _, err := w.WriteString(s) + return err +} + +// EscapeString escapes special characters like "<" to become "<". It +// escapes only five such characters: <, >, &, ' and ". +// UnescapeString(EscapeString(s)) == s always holds, but the converse isn't +// always true. +func EscapeString(s string) string { + if strings.IndexAny(s, escapedChars) == -1 { + return s + } + var buf bytes.Buffer + escape(&buf, s) + return buf.String() +} + +// UnescapeString unescapes entities like "<" to become "<". It unescapes a +// larger range of entities than EscapeString escapes. For example, "á" +// unescapes to "á", as does "á" and "&xE1;". +// UnescapeString(EscapeString(s)) == s always holds, but the converse isn't +// always true. +func UnescapeString(s string) string { + for _, c := range s { + if c == '&' { + return string(unescape([]byte(s), false)) + } + } + return s +} diff --git a/vendor/golang.org/x/net/html/foreign.go b/vendor/golang.org/x/net/html/foreign.go new file mode 100644 index 0000000..9da9e9d --- /dev/null +++ b/vendor/golang.org/x/net/html/foreign.go @@ -0,0 +1,222 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package html + +import ( + "strings" +) + +func adjustAttributeNames(aa []Attribute, nameMap map[string]string) { + for i := range aa { + if newName, ok := nameMap[aa[i].Key]; ok { + aa[i].Key = newName + } + } +} + +func adjustForeignAttributes(aa []Attribute) { + for i, a := range aa { + if a.Key == "" || a.Key[0] != 'x' { + continue + } + switch a.Key { + case "xlink:actuate", "xlink:arcrole", "xlink:href", "xlink:role", "xlink:show", + "xlink:title", "xlink:type", "xml:base", "xml:lang", "xml:space", "xmlns:xlink": + j := strings.Index(a.Key, ":") + aa[i].Namespace = a.Key[:j] + aa[i].Key = a.Key[j+1:] + } + } +} + +func htmlIntegrationPoint(n *Node) bool { + if n.Type != ElementNode { + return false + } + switch n.Namespace { + case "math": + if n.Data == "annotation-xml" { + for _, a := range n.Attr { + if a.Key == "encoding" { + val := strings.ToLower(a.Val) + if val == "text/html" || val == "application/xhtml+xml" { + return true + } + } + } + } + case "svg": + switch n.Data { + case "desc", "foreignObject", "title": + return true + } + } + return false +} + +func mathMLTextIntegrationPoint(n *Node) bool { + if n.Namespace != "math" { + return false + } + switch n.Data { + case "mi", "mo", "mn", "ms", "mtext": + return true + } + return false +} + +// Section 12.2.6.5. +var breakout = map[string]bool{ + "b": true, + "big": true, + "blockquote": true, + "body": true, + "br": true, + "center": true, + "code": true, + "dd": true, + "div": true, + "dl": true, + "dt": true, + "em": true, + "embed": true, + "h1": true, + "h2": true, + "h3": true, + "h4": true, + "h5": true, + "h6": true, + "head": true, + "hr": true, + "i": true, + "img": true, + "li": true, + "listing": true, + "menu": true, + "meta": true, + "nobr": true, + "ol": true, + "p": true, + "pre": true, + "ruby": true, + "s": true, + "small": true, + "span": true, + "strong": true, + "strike": true, + "sub": true, + "sup": true, + "table": true, + "tt": true, + "u": true, + "ul": true, + "var": true, +} + +// Section 12.2.6.5. +var svgTagNameAdjustments = map[string]string{ + "altglyph": "altGlyph", + "altglyphdef": "altGlyphDef", + "altglyphitem": "altGlyphItem", + "animatecolor": "animateColor", + "animatemotion": "animateMotion", + "animatetransform": "animateTransform", + "clippath": "clipPath", + "feblend": "feBlend", + "fecolormatrix": "feColorMatrix", + "fecomponenttransfer": "feComponentTransfer", + "fecomposite": "feComposite", + "feconvolvematrix": "feConvolveMatrix", + "fediffuselighting": "feDiffuseLighting", + "fedisplacementmap": "feDisplacementMap", + "fedistantlight": "feDistantLight", + "feflood": "feFlood", + "fefunca": "feFuncA", + "fefuncb": "feFuncB", + "fefuncg": "feFuncG", + "fefuncr": "feFuncR", + "fegaussianblur": "feGaussianBlur", + "feimage": "feImage", + "femerge": "feMerge", + "femergenode": "feMergeNode", + "femorphology": "feMorphology", + "feoffset": "feOffset", + "fepointlight": "fePointLight", + "fespecularlighting": "feSpecularLighting", + "fespotlight": "feSpotLight", + "fetile": "feTile", + "feturbulence": "feTurbulence", + "foreignobject": "foreignObject", + "glyphref": "glyphRef", + "lineargradient": "linearGradient", + "radialgradient": "radialGradient", + "textpath": "textPath", +} + +// Section 12.2.6.1 +var mathMLAttributeAdjustments = map[string]string{ + "definitionurl": "definitionURL", +} + +var svgAttributeAdjustments = map[string]string{ + "attributename": "attributeName", + "attributetype": "attributeType", + "basefrequency": "baseFrequency", + "baseprofile": "baseProfile", + "calcmode": "calcMode", + "clippathunits": "clipPathUnits", + "diffuseconstant": "diffuseConstant", + "edgemode": "edgeMode", + "filterunits": "filterUnits", + "glyphref": "glyphRef", + "gradienttransform": "gradientTransform", + "gradientunits": "gradientUnits", + "kernelmatrix": "kernelMatrix", + "kernelunitlength": "kernelUnitLength", + "keypoints": "keyPoints", + "keysplines": "keySplines", + "keytimes": "keyTimes", + "lengthadjust": "lengthAdjust", + "limitingconeangle": "limitingConeAngle", + "markerheight": "markerHeight", + "markerunits": "markerUnits", + "markerwidth": "markerWidth", + "maskcontentunits": "maskContentUnits", + "maskunits": "maskUnits", + "numoctaves": "numOctaves", + "pathlength": "pathLength", + "patterncontentunits": "patternContentUnits", + "patterntransform": "patternTransform", + "patternunits": "patternUnits", + "pointsatx": "pointsAtX", + "pointsaty": "pointsAtY", + "pointsatz": "pointsAtZ", + "preservealpha": "preserveAlpha", + "preserveaspectratio": "preserveAspectRatio", + "primitiveunits": "primitiveUnits", + "refx": "refX", + "refy": "refY", + "repeatcount": "repeatCount", + "repeatdur": "repeatDur", + "requiredextensions": "requiredExtensions", + "requiredfeatures": "requiredFeatures", + "specularconstant": "specularConstant", + "specularexponent": "specularExponent", + "spreadmethod": "spreadMethod", + "startoffset": "startOffset", + "stddeviation": "stdDeviation", + "stitchtiles": "stitchTiles", + "surfacescale": "surfaceScale", + "systemlanguage": "systemLanguage", + "tablevalues": "tableValues", + "targetx": "targetX", + "targety": "targetY", + "textlength": "textLength", + "viewbox": "viewBox", + "viewtarget": "viewTarget", + "xchannelselector": "xChannelSelector", + "ychannelselector": "yChannelSelector", + "zoomandpan": "zoomAndPan", +} diff --git a/vendor/golang.org/x/net/html/node.go b/vendor/golang.org/x/net/html/node.go new file mode 100644 index 0000000..1350eef --- /dev/null +++ b/vendor/golang.org/x/net/html/node.go @@ -0,0 +1,225 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package html + +import ( + "golang.org/x/net/html/atom" +) + +// A NodeType is the type of a Node. +type NodeType uint32 + +const ( + ErrorNode NodeType = iota + TextNode + DocumentNode + ElementNode + CommentNode + DoctypeNode + // RawNode nodes are not returned by the parser, but can be part of the + // Node tree passed to func Render to insert raw HTML (without escaping). + // If so, this package makes no guarantee that the rendered HTML is secure + // (from e.g. Cross Site Scripting attacks) or well-formed. + RawNode + scopeMarkerNode +) + +// Section 12.2.4.3 says "The markers are inserted when entering applet, +// object, marquee, template, td, th, and caption elements, and are used +// to prevent formatting from "leaking" into applet, object, marquee, +// template, td, th, and caption elements". +var scopeMarker = Node{Type: scopeMarkerNode} + +// A Node consists of a NodeType and some Data (tag name for element nodes, +// content for text) and are part of a tree of Nodes. Element nodes may also +// have a Namespace and contain a slice of Attributes. Data is unescaped, so +// that it looks like "a 0 { + return (*s)[i-1] + } + return nil +} + +// index returns the index of the top-most occurrence of n in the stack, or -1 +// if n is not present. +func (s *nodeStack) index(n *Node) int { + for i := len(*s) - 1; i >= 0; i-- { + if (*s)[i] == n { + return i + } + } + return -1 +} + +// contains returns whether a is within s. +func (s *nodeStack) contains(a atom.Atom) bool { + for _, n := range *s { + if n.DataAtom == a && n.Namespace == "" { + return true + } + } + return false +} + +// insert inserts a node at the given index. +func (s *nodeStack) insert(i int, n *Node) { + (*s) = append(*s, nil) + copy((*s)[i+1:], (*s)[i:]) + (*s)[i] = n +} + +// remove removes a node from the stack. It is a no-op if n is not present. +func (s *nodeStack) remove(n *Node) { + i := s.index(n) + if i == -1 { + return + } + copy((*s)[i:], (*s)[i+1:]) + j := len(*s) - 1 + (*s)[j] = nil + *s = (*s)[:j] +} + +type insertionModeStack []insertionMode + +func (s *insertionModeStack) pop() (im insertionMode) { + i := len(*s) + im = (*s)[i-1] + *s = (*s)[:i-1] + return im +} + +func (s *insertionModeStack) top() insertionMode { + if i := len(*s); i > 0 { + return (*s)[i-1] + } + return nil +} diff --git a/vendor/golang.org/x/net/html/parse.go b/vendor/golang.org/x/net/html/parse.go new file mode 100644 index 0000000..291c919 --- /dev/null +++ b/vendor/golang.org/x/net/html/parse.go @@ -0,0 +1,2460 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package html + +import ( + "errors" + "fmt" + "io" + "strings" + + a "golang.org/x/net/html/atom" +) + +// A parser implements the HTML5 parsing algorithm: +// https://html.spec.whatwg.org/multipage/syntax.html#tree-construction +type parser struct { + // tokenizer provides the tokens for the parser. + tokenizer *Tokenizer + // tok is the most recently read token. + tok Token + // Self-closing tags like
are treated as start tags, except that + // hasSelfClosingToken is set while they are being processed. + hasSelfClosingToken bool + // doc is the document root element. + doc *Node + // The stack of open elements (section 12.2.4.2) and active formatting + // elements (section 12.2.4.3). + oe, afe nodeStack + // Element pointers (section 12.2.4.4). + head, form *Node + // Other parsing state flags (section 12.2.4.5). + scripting, framesetOK bool + // The stack of template insertion modes + templateStack insertionModeStack + // im is the current insertion mode. + im insertionMode + // originalIM is the insertion mode to go back to after completing a text + // or inTableText insertion mode. + originalIM insertionMode + // fosterParenting is whether new elements should be inserted according to + // the foster parenting rules (section 12.2.6.1). + fosterParenting bool + // quirks is whether the parser is operating in "quirks mode." + quirks bool + // fragment is whether the parser is parsing an HTML fragment. + fragment bool + // context is the context element when parsing an HTML fragment + // (section 12.4). + context *Node +} + +func (p *parser) top() *Node { + if n := p.oe.top(); n != nil { + return n + } + return p.doc +} + +// Stop tags for use in popUntil. These come from section 12.2.4.2. +var ( + defaultScopeStopTags = map[string][]a.Atom{ + "": {a.Applet, a.Caption, a.Html, a.Table, a.Td, a.Th, a.Marquee, a.Object, a.Template}, + "math": {a.AnnotationXml, a.Mi, a.Mn, a.Mo, a.Ms, a.Mtext}, + "svg": {a.Desc, a.ForeignObject, a.Title}, + } +) + +type scope int + +const ( + defaultScope scope = iota + listItemScope + buttonScope + tableScope + tableRowScope + tableBodyScope + selectScope +) + +// popUntil pops the stack of open elements at the highest element whose tag +// is in matchTags, provided there is no higher element in the scope's stop +// tags (as defined in section 12.2.4.2). It returns whether or not there was +// such an element. If there was not, popUntil leaves the stack unchanged. +// +// For example, the set of stop tags for table scope is: "html", "table". If +// the stack was: +// ["html", "body", "font", "table", "b", "i", "u"] +// then popUntil(tableScope, "font") would return false, but +// popUntil(tableScope, "i") would return true and the stack would become: +// ["html", "body", "font", "table", "b"] +// +// If an element's tag is in both the stop tags and matchTags, then the stack +// will be popped and the function returns true (provided, of course, there was +// no higher element in the stack that was also in the stop tags). For example, +// popUntil(tableScope, "table") returns true and leaves: +// ["html", "body", "font"] +func (p *parser) popUntil(s scope, matchTags ...a.Atom) bool { + if i := p.indexOfElementInScope(s, matchTags...); i != -1 { + p.oe = p.oe[:i] + return true + } + return false +} + +// indexOfElementInScope returns the index in p.oe of the highest element whose +// tag is in matchTags that is in scope. If no matching element is in scope, it +// returns -1. +func (p *parser) indexOfElementInScope(s scope, matchTags ...a.Atom) int { + for i := len(p.oe) - 1; i >= 0; i-- { + tagAtom := p.oe[i].DataAtom + if p.oe[i].Namespace == "" { + for _, t := range matchTags { + if t == tagAtom { + return i + } + } + switch s { + case defaultScope: + // No-op. + case listItemScope: + if tagAtom == a.Ol || tagAtom == a.Ul { + return -1 + } + case buttonScope: + if tagAtom == a.Button { + return -1 + } + case tableScope: + if tagAtom == a.Html || tagAtom == a.Table || tagAtom == a.Template { + return -1 + } + case selectScope: + if tagAtom != a.Optgroup && tagAtom != a.Option { + return -1 + } + default: + panic("unreachable") + } + } + switch s { + case defaultScope, listItemScope, buttonScope: + for _, t := range defaultScopeStopTags[p.oe[i].Namespace] { + if t == tagAtom { + return -1 + } + } + } + } + return -1 +} + +// elementInScope is like popUntil, except that it doesn't modify the stack of +// open elements. +func (p *parser) elementInScope(s scope, matchTags ...a.Atom) bool { + return p.indexOfElementInScope(s, matchTags...) != -1 +} + +// clearStackToContext pops elements off the stack of open elements until a +// scope-defined element is found. +func (p *parser) clearStackToContext(s scope) { + for i := len(p.oe) - 1; i >= 0; i-- { + tagAtom := p.oe[i].DataAtom + switch s { + case tableScope: + if tagAtom == a.Html || tagAtom == a.Table || tagAtom == a.Template { + p.oe = p.oe[:i+1] + return + } + case tableRowScope: + if tagAtom == a.Html || tagAtom == a.Tr || tagAtom == a.Template { + p.oe = p.oe[:i+1] + return + } + case tableBodyScope: + if tagAtom == a.Html || tagAtom == a.Tbody || tagAtom == a.Tfoot || tagAtom == a.Thead || tagAtom == a.Template { + p.oe = p.oe[:i+1] + return + } + default: + panic("unreachable") + } + } +} + +// parseGenericRawTextElements implements the generic raw text element parsing +// algorithm defined in 12.2.6.2. +// https://html.spec.whatwg.org/multipage/parsing.html#parsing-elements-that-contain-only-text +// TODO: Since both RAWTEXT and RCDATA states are treated as tokenizer's part +// officially, need to make tokenizer consider both states. +func (p *parser) parseGenericRawTextElement() { + p.addElement() + p.originalIM = p.im + p.im = textIM +} + +// generateImpliedEndTags pops nodes off the stack of open elements as long as +// the top node has a tag name of dd, dt, li, optgroup, option, p, rb, rp, rt or rtc. +// If exceptions are specified, nodes with that name will not be popped off. +func (p *parser) generateImpliedEndTags(exceptions ...string) { + var i int +loop: + for i = len(p.oe) - 1; i >= 0; i-- { + n := p.oe[i] + if n.Type != ElementNode { + break + } + switch n.DataAtom { + case a.Dd, a.Dt, a.Li, a.Optgroup, a.Option, a.P, a.Rb, a.Rp, a.Rt, a.Rtc: + for _, except := range exceptions { + if n.Data == except { + break loop + } + } + continue + } + break + } + + p.oe = p.oe[:i+1] +} + +// addChild adds a child node n to the top element, and pushes n onto the stack +// of open elements if it is an element node. +func (p *parser) addChild(n *Node) { + if p.shouldFosterParent() { + p.fosterParent(n) + } else { + p.top().AppendChild(n) + } + + if n.Type == ElementNode { + p.oe = append(p.oe, n) + } +} + +// shouldFosterParent returns whether the next node to be added should be +// foster parented. +func (p *parser) shouldFosterParent() bool { + if p.fosterParenting { + switch p.top().DataAtom { + case a.Table, a.Tbody, a.Tfoot, a.Thead, a.Tr: + return true + } + } + return false +} + +// fosterParent adds a child node according to the foster parenting rules. +// Section 12.2.6.1, "foster parenting". +func (p *parser) fosterParent(n *Node) { + var table, parent, prev, template *Node + var i int + for i = len(p.oe) - 1; i >= 0; i-- { + if p.oe[i].DataAtom == a.Table { + table = p.oe[i] + break + } + } + + var j int + for j = len(p.oe) - 1; j >= 0; j-- { + if p.oe[j].DataAtom == a.Template { + template = p.oe[j] + break + } + } + + if template != nil && (table == nil || j > i) { + template.AppendChild(n) + return + } + + if table == nil { + // The foster parent is the html element. + parent = p.oe[0] + } else { + parent = table.Parent + } + if parent == nil { + parent = p.oe[i-1] + } + + if table != nil { + prev = table.PrevSibling + } else { + prev = parent.LastChild + } + if prev != nil && prev.Type == TextNode && n.Type == TextNode { + prev.Data += n.Data + return + } + + parent.InsertBefore(n, table) +} + +// addText adds text to the preceding node if it is a text node, or else it +// calls addChild with a new text node. +func (p *parser) addText(text string) { + if text == "" { + return + } + + if p.shouldFosterParent() { + p.fosterParent(&Node{ + Type: TextNode, + Data: text, + }) + return + } + + t := p.top() + if n := t.LastChild; n != nil && n.Type == TextNode { + n.Data += text + return + } + p.addChild(&Node{ + Type: TextNode, + Data: text, + }) +} + +// addElement adds a child element based on the current token. +func (p *parser) addElement() { + p.addChild(&Node{ + Type: ElementNode, + DataAtom: p.tok.DataAtom, + Data: p.tok.Data, + Attr: p.tok.Attr, + }) +} + +// Section 12.2.4.3. +func (p *parser) addFormattingElement() { + tagAtom, attr := p.tok.DataAtom, p.tok.Attr + p.addElement() + + // Implement the Noah's Ark clause, but with three per family instead of two. + identicalElements := 0 +findIdenticalElements: + for i := len(p.afe) - 1; i >= 0; i-- { + n := p.afe[i] + if n.Type == scopeMarkerNode { + break + } + if n.Type != ElementNode { + continue + } + if n.Namespace != "" { + continue + } + if n.DataAtom != tagAtom { + continue + } + if len(n.Attr) != len(attr) { + continue + } + compareAttributes: + for _, t0 := range n.Attr { + for _, t1 := range attr { + if t0.Key == t1.Key && t0.Namespace == t1.Namespace && t0.Val == t1.Val { + // Found a match for this attribute, continue with the next attribute. + continue compareAttributes + } + } + // If we get here, there is no attribute that matches a. + // Therefore the element is not identical to the new one. + continue findIdenticalElements + } + + identicalElements++ + if identicalElements >= 3 { + p.afe.remove(n) + } + } + + p.afe = append(p.afe, p.top()) +} + +// Section 12.2.4.3. +func (p *parser) clearActiveFormattingElements() { + for { + if n := p.afe.pop(); len(p.afe) == 0 || n.Type == scopeMarkerNode { + return + } + } +} + +// Section 12.2.4.3. +func (p *parser) reconstructActiveFormattingElements() { + n := p.afe.top() + if n == nil { + return + } + if n.Type == scopeMarkerNode || p.oe.index(n) != -1 { + return + } + i := len(p.afe) - 1 + for n.Type != scopeMarkerNode && p.oe.index(n) == -1 { + if i == 0 { + i = -1 + break + } + i-- + n = p.afe[i] + } + for { + i++ + clone := p.afe[i].clone() + p.addChild(clone) + p.afe[i] = clone + if i == len(p.afe)-1 { + break + } + } +} + +// Section 12.2.5. +func (p *parser) acknowledgeSelfClosingTag() { + p.hasSelfClosingToken = false +} + +// An insertion mode (section 12.2.4.1) is the state transition function from +// a particular state in the HTML5 parser's state machine. It updates the +// parser's fields depending on parser.tok (where ErrorToken means EOF). +// It returns whether the token was consumed. +type insertionMode func(*parser) bool + +// setOriginalIM sets the insertion mode to return to after completing a text or +// inTableText insertion mode. +// Section 12.2.4.1, "using the rules for". +func (p *parser) setOriginalIM() { + if p.originalIM != nil { + panic("html: bad parser state: originalIM was set twice") + } + p.originalIM = p.im +} + +// Section 12.2.4.1, "reset the insertion mode". +func (p *parser) resetInsertionMode() { + for i := len(p.oe) - 1; i >= 0; i-- { + n := p.oe[i] + last := i == 0 + if last && p.context != nil { + n = p.context + } + + switch n.DataAtom { + case a.Select: + if !last { + for ancestor, first := n, p.oe[0]; ancestor != first; { + ancestor = p.oe[p.oe.index(ancestor)-1] + switch ancestor.DataAtom { + case a.Template: + p.im = inSelectIM + return + case a.Table: + p.im = inSelectInTableIM + return + } + } + } + p.im = inSelectIM + case a.Td, a.Th: + // TODO: remove this divergence from the HTML5 spec. + // + // See https://bugs.chromium.org/p/chromium/issues/detail?id=829668 + p.im = inCellIM + case a.Tr: + p.im = inRowIM + case a.Tbody, a.Thead, a.Tfoot: + p.im = inTableBodyIM + case a.Caption: + p.im = inCaptionIM + case a.Colgroup: + p.im = inColumnGroupIM + case a.Table: + p.im = inTableIM + case a.Template: + // TODO: remove this divergence from the HTML5 spec. + if n.Namespace != "" { + continue + } + p.im = p.templateStack.top() + case a.Head: + // TODO: remove this divergence from the HTML5 spec. + // + // See https://bugs.chromium.org/p/chromium/issues/detail?id=829668 + p.im = inHeadIM + case a.Body: + p.im = inBodyIM + case a.Frameset: + p.im = inFramesetIM + case a.Html: + if p.head == nil { + p.im = beforeHeadIM + } else { + p.im = afterHeadIM + } + default: + if last { + p.im = inBodyIM + return + } + continue + } + return + } +} + +const whitespace = " \t\r\n\f" + +// Section 12.2.6.4.1. +func initialIM(p *parser) bool { + switch p.tok.Type { + case TextToken: + p.tok.Data = strings.TrimLeft(p.tok.Data, whitespace) + if len(p.tok.Data) == 0 { + // It was all whitespace, so ignore it. + return true + } + case CommentToken: + p.doc.AppendChild(&Node{ + Type: CommentNode, + Data: p.tok.Data, + }) + return true + case DoctypeToken: + n, quirks := parseDoctype(p.tok.Data) + p.doc.AppendChild(n) + p.quirks = quirks + p.im = beforeHTMLIM + return true + } + p.quirks = true + p.im = beforeHTMLIM + return false +} + +// Section 12.2.6.4.2. +func beforeHTMLIM(p *parser) bool { + switch p.tok.Type { + case DoctypeToken: + // Ignore the token. + return true + case TextToken: + p.tok.Data = strings.TrimLeft(p.tok.Data, whitespace) + if len(p.tok.Data) == 0 { + // It was all whitespace, so ignore it. + return true + } + case StartTagToken: + if p.tok.DataAtom == a.Html { + p.addElement() + p.im = beforeHeadIM + return true + } + case EndTagToken: + switch p.tok.DataAtom { + case a.Head, a.Body, a.Html, a.Br: + p.parseImpliedToken(StartTagToken, a.Html, a.Html.String()) + return false + default: + // Ignore the token. + return true + } + case CommentToken: + p.doc.AppendChild(&Node{ + Type: CommentNode, + Data: p.tok.Data, + }) + return true + } + p.parseImpliedToken(StartTagToken, a.Html, a.Html.String()) + return false +} + +// Section 12.2.6.4.3. +func beforeHeadIM(p *parser) bool { + switch p.tok.Type { + case TextToken: + p.tok.Data = strings.TrimLeft(p.tok.Data, whitespace) + if len(p.tok.Data) == 0 { + // It was all whitespace, so ignore it. + return true + } + case StartTagToken: + switch p.tok.DataAtom { + case a.Head: + p.addElement() + p.head = p.top() + p.im = inHeadIM + return true + case a.Html: + return inBodyIM(p) + } + case EndTagToken: + switch p.tok.DataAtom { + case a.Head, a.Body, a.Html, a.Br: + p.parseImpliedToken(StartTagToken, a.Head, a.Head.String()) + return false + default: + // Ignore the token. + return true + } + case CommentToken: + p.addChild(&Node{ + Type: CommentNode, + Data: p.tok.Data, + }) + return true + case DoctypeToken: + // Ignore the token. + return true + } + + p.parseImpliedToken(StartTagToken, a.Head, a.Head.String()) + return false +} + +// Section 12.2.6.4.4. +func inHeadIM(p *parser) bool { + switch p.tok.Type { + case TextToken: + s := strings.TrimLeft(p.tok.Data, whitespace) + if len(s) < len(p.tok.Data) { + // Add the initial whitespace to the current node. + p.addText(p.tok.Data[:len(p.tok.Data)-len(s)]) + if s == "" { + return true + } + p.tok.Data = s + } + case StartTagToken: + switch p.tok.DataAtom { + case a.Html: + return inBodyIM(p) + case a.Base, a.Basefont, a.Bgsound, a.Link, a.Meta: + p.addElement() + p.oe.pop() + p.acknowledgeSelfClosingTag() + return true + case a.Noscript: + if p.scripting { + p.parseGenericRawTextElement() + return true + } + p.addElement() + p.im = inHeadNoscriptIM + // Don't let the tokenizer go into raw text mode when scripting is disabled. + p.tokenizer.NextIsNotRawText() + return true + case a.Script, a.Title: + p.addElement() + p.setOriginalIM() + p.im = textIM + return true + case a.Noframes, a.Style: + p.parseGenericRawTextElement() + return true + case a.Head: + // Ignore the token. + return true + case a.Template: + // TODO: remove this divergence from the HTML5 spec. + // + // We don't handle all of the corner cases when mixing foreign + // content (i.e. or ) with tag. + case a.Template: + return inHeadIM(p) + default: + // Ignore the token. + return true + } + case CommentToken: + p.addChild(&Node{ + Type: CommentNode, + Data: p.tok.Data, + }) + return true + case DoctypeToken: + // Ignore the token. + return true + } + + p.parseImpliedToken(StartTagToken, a.Body, a.Body.String()) + p.framesetOK = true + return false +} + +// copyAttributes copies attributes of src not found on dst to dst. +func copyAttributes(dst *Node, src Token) { + if len(src.Attr) == 0 { + return + } + attr := map[string]string{} + for _, t := range dst.Attr { + attr[t.Key] = t.Val + } + for _, t := range src.Attr { + if _, ok := attr[t.Key]; !ok { + dst.Attr = append(dst.Attr, t) + attr[t.Key] = t.Val + } + } +} + +// Section 12.2.6.4.7. +func inBodyIM(p *parser) bool { + switch p.tok.Type { + case TextToken: + d := p.tok.Data + switch n := p.oe.top(); n.DataAtom { + case a.Pre, a.Listing: + if n.FirstChild == nil { + // Ignore a newline at the start of a
 block.
+				if d != "" && d[0] == '\r' {
+					d = d[1:]
+				}
+				if d != "" && d[0] == '\n' {
+					d = d[1:]
+				}
+			}
+		}
+		d = strings.Replace(d, "\x00", "", -1)
+		if d == "" {
+			return true
+		}
+		p.reconstructActiveFormattingElements()
+		p.addText(d)
+		if p.framesetOK && strings.TrimLeft(d, whitespace) != "" {
+			// There were non-whitespace characters inserted.
+			p.framesetOK = false
+		}
+	case StartTagToken:
+		switch p.tok.DataAtom {
+		case a.Html:
+			if p.oe.contains(a.Template) {
+				return true
+			}
+			copyAttributes(p.oe[0], p.tok)
+		case a.Base, a.Basefont, a.Bgsound, a.Link, a.Meta, a.Noframes, a.Script, a.Style, a.Template, a.Title:
+			return inHeadIM(p)
+		case a.Body:
+			if p.oe.contains(a.Template) {
+				return true
+			}
+			if len(p.oe) >= 2 {
+				body := p.oe[1]
+				if body.Type == ElementNode && body.DataAtom == a.Body {
+					p.framesetOK = false
+					copyAttributes(body, p.tok)
+				}
+			}
+		case a.Frameset:
+			if !p.framesetOK || len(p.oe) < 2 || p.oe[1].DataAtom != a.Body {
+				// Ignore the token.
+				return true
+			}
+			body := p.oe[1]
+			if body.Parent != nil {
+				body.Parent.RemoveChild(body)
+			}
+			p.oe = p.oe[:1]
+			p.addElement()
+			p.im = inFramesetIM
+			return true
+		case a.Address, a.Article, a.Aside, a.Blockquote, a.Center, a.Details, a.Dialog, a.Dir, a.Div, a.Dl, a.Fieldset, a.Figcaption, a.Figure, a.Footer, a.Header, a.Hgroup, a.Main, a.Menu, a.Nav, a.Ol, a.P, a.Section, a.Summary, a.Ul:
+			p.popUntil(buttonScope, a.P)
+			p.addElement()
+		case a.H1, a.H2, a.H3, a.H4, a.H5, a.H6:
+			p.popUntil(buttonScope, a.P)
+			switch n := p.top(); n.DataAtom {
+			case a.H1, a.H2, a.H3, a.H4, a.H5, a.H6:
+				p.oe.pop()
+			}
+			p.addElement()
+		case a.Pre, a.Listing:
+			p.popUntil(buttonScope, a.P)
+			p.addElement()
+			// The newline, if any, will be dealt with by the TextToken case.
+			p.framesetOK = false
+		case a.Form:
+			if p.form != nil && !p.oe.contains(a.Template) {
+				// Ignore the token
+				return true
+			}
+			p.popUntil(buttonScope, a.P)
+			p.addElement()
+			if !p.oe.contains(a.Template) {
+				p.form = p.top()
+			}
+		case a.Li:
+			p.framesetOK = false
+			for i := len(p.oe) - 1; i >= 0; i-- {
+				node := p.oe[i]
+				switch node.DataAtom {
+				case a.Li:
+					p.oe = p.oe[:i]
+				case a.Address, a.Div, a.P:
+					continue
+				default:
+					if !isSpecialElement(node) {
+						continue
+					}
+				}
+				break
+			}
+			p.popUntil(buttonScope, a.P)
+			p.addElement()
+		case a.Dd, a.Dt:
+			p.framesetOK = false
+			for i := len(p.oe) - 1; i >= 0; i-- {
+				node := p.oe[i]
+				switch node.DataAtom {
+				case a.Dd, a.Dt:
+					p.oe = p.oe[:i]
+				case a.Address, a.Div, a.P:
+					continue
+				default:
+					if !isSpecialElement(node) {
+						continue
+					}
+				}
+				break
+			}
+			p.popUntil(buttonScope, a.P)
+			p.addElement()
+		case a.Plaintext:
+			p.popUntil(buttonScope, a.P)
+			p.addElement()
+		case a.Button:
+			p.popUntil(defaultScope, a.Button)
+			p.reconstructActiveFormattingElements()
+			p.addElement()
+			p.framesetOK = false
+		case a.A:
+			for i := len(p.afe) - 1; i >= 0 && p.afe[i].Type != scopeMarkerNode; i-- {
+				if n := p.afe[i]; n.Type == ElementNode && n.DataAtom == a.A {
+					p.inBodyEndTagFormatting(a.A, "a")
+					p.oe.remove(n)
+					p.afe.remove(n)
+					break
+				}
+			}
+			p.reconstructActiveFormattingElements()
+			p.addFormattingElement()
+		case a.B, a.Big, a.Code, a.Em, a.Font, a.I, a.S, a.Small, a.Strike, a.Strong, a.Tt, a.U:
+			p.reconstructActiveFormattingElements()
+			p.addFormattingElement()
+		case a.Nobr:
+			p.reconstructActiveFormattingElements()
+			if p.elementInScope(defaultScope, a.Nobr) {
+				p.inBodyEndTagFormatting(a.Nobr, "nobr")
+				p.reconstructActiveFormattingElements()
+			}
+			p.addFormattingElement()
+		case a.Applet, a.Marquee, a.Object:
+			p.reconstructActiveFormattingElements()
+			p.addElement()
+			p.afe = append(p.afe, &scopeMarker)
+			p.framesetOK = false
+		case a.Table:
+			if !p.quirks {
+				p.popUntil(buttonScope, a.P)
+			}
+			p.addElement()
+			p.framesetOK = false
+			p.im = inTableIM
+			return true
+		case a.Area, a.Br, a.Embed, a.Img, a.Input, a.Keygen, a.Wbr:
+			p.reconstructActiveFormattingElements()
+			p.addElement()
+			p.oe.pop()
+			p.acknowledgeSelfClosingTag()
+			if p.tok.DataAtom == a.Input {
+				for _, t := range p.tok.Attr {
+					if t.Key == "type" {
+						if strings.ToLower(t.Val) == "hidden" {
+							// Skip setting framesetOK = false
+							return true
+						}
+					}
+				}
+			}
+			p.framesetOK = false
+		case a.Param, a.Source, a.Track:
+			p.addElement()
+			p.oe.pop()
+			p.acknowledgeSelfClosingTag()
+		case a.Hr:
+			p.popUntil(buttonScope, a.P)
+			p.addElement()
+			p.oe.pop()
+			p.acknowledgeSelfClosingTag()
+			p.framesetOK = false
+		case a.Image:
+			p.tok.DataAtom = a.Img
+			p.tok.Data = a.Img.String()
+			return false
+		case a.Textarea:
+			p.addElement()
+			p.setOriginalIM()
+			p.framesetOK = false
+			p.im = textIM
+		case a.Xmp:
+			p.popUntil(buttonScope, a.P)
+			p.reconstructActiveFormattingElements()
+			p.framesetOK = false
+			p.parseGenericRawTextElement()
+		case a.Iframe:
+			p.framesetOK = false
+			p.parseGenericRawTextElement()
+		case a.Noembed:
+			p.parseGenericRawTextElement()
+		case a.Noscript:
+			if p.scripting {
+				p.parseGenericRawTextElement()
+				return true
+			}
+			p.reconstructActiveFormattingElements()
+			p.addElement()
+			// Don't let the tokenizer go into raw text mode when scripting is disabled.
+			p.tokenizer.NextIsNotRawText()
+		case a.Select:
+			p.reconstructActiveFormattingElements()
+			p.addElement()
+			p.framesetOK = false
+			p.im = inSelectIM
+			return true
+		case a.Optgroup, a.Option:
+			if p.top().DataAtom == a.Option {
+				p.oe.pop()
+			}
+			p.reconstructActiveFormattingElements()
+			p.addElement()
+		case a.Rb, a.Rtc:
+			if p.elementInScope(defaultScope, a.Ruby) {
+				p.generateImpliedEndTags()
+			}
+			p.addElement()
+		case a.Rp, a.Rt:
+			if p.elementInScope(defaultScope, a.Ruby) {
+				p.generateImpliedEndTags("rtc")
+			}
+			p.addElement()
+		case a.Math, a.Svg:
+			p.reconstructActiveFormattingElements()
+			if p.tok.DataAtom == a.Math {
+				adjustAttributeNames(p.tok.Attr, mathMLAttributeAdjustments)
+			} else {
+				adjustAttributeNames(p.tok.Attr, svgAttributeAdjustments)
+			}
+			adjustForeignAttributes(p.tok.Attr)
+			p.addElement()
+			p.top().Namespace = p.tok.Data
+			if p.hasSelfClosingToken {
+				p.oe.pop()
+				p.acknowledgeSelfClosingTag()
+			}
+			return true
+		case a.Caption, a.Col, a.Colgroup, a.Frame, a.Head, a.Tbody, a.Td, a.Tfoot, a.Th, a.Thead, a.Tr:
+			// Ignore the token.
+		default:
+			p.reconstructActiveFormattingElements()
+			p.addElement()
+		}
+	case EndTagToken:
+		switch p.tok.DataAtom {
+		case a.Body:
+			if p.elementInScope(defaultScope, a.Body) {
+				p.im = afterBodyIM
+			}
+		case a.Html:
+			if p.elementInScope(defaultScope, a.Body) {
+				p.parseImpliedToken(EndTagToken, a.Body, a.Body.String())
+				return false
+			}
+			return true
+		case a.Address, a.Article, a.Aside, a.Blockquote, a.Button, a.Center, a.Details, a.Dialog, a.Dir, a.Div, a.Dl, a.Fieldset, a.Figcaption, a.Figure, a.Footer, a.Header, a.Hgroup, a.Listing, a.Main, a.Menu, a.Nav, a.Ol, a.Pre, a.Section, a.Summary, a.Ul:
+			p.popUntil(defaultScope, p.tok.DataAtom)
+		case a.Form:
+			if p.oe.contains(a.Template) {
+				i := p.indexOfElementInScope(defaultScope, a.Form)
+				if i == -1 {
+					// Ignore the token.
+					return true
+				}
+				p.generateImpliedEndTags()
+				if p.oe[i].DataAtom != a.Form {
+					// Ignore the token.
+					return true
+				}
+				p.popUntil(defaultScope, a.Form)
+			} else {
+				node := p.form
+				p.form = nil
+				i := p.indexOfElementInScope(defaultScope, a.Form)
+				if node == nil || i == -1 || p.oe[i] != node {
+					// Ignore the token.
+					return true
+				}
+				p.generateImpliedEndTags()
+				p.oe.remove(node)
+			}
+		case a.P:
+			if !p.elementInScope(buttonScope, a.P) {
+				p.parseImpliedToken(StartTagToken, a.P, a.P.String())
+			}
+			p.popUntil(buttonScope, a.P)
+		case a.Li:
+			p.popUntil(listItemScope, a.Li)
+		case a.Dd, a.Dt:
+			p.popUntil(defaultScope, p.tok.DataAtom)
+		case a.H1, a.H2, a.H3, a.H4, a.H5, a.H6:
+			p.popUntil(defaultScope, a.H1, a.H2, a.H3, a.H4, a.H5, a.H6)
+		case a.A, a.B, a.Big, a.Code, a.Em, a.Font, a.I, a.Nobr, a.S, a.Small, a.Strike, a.Strong, a.Tt, a.U:
+			p.inBodyEndTagFormatting(p.tok.DataAtom, p.tok.Data)
+		case a.Applet, a.Marquee, a.Object:
+			if p.popUntil(defaultScope, p.tok.DataAtom) {
+				p.clearActiveFormattingElements()
+			}
+		case a.Br:
+			p.tok.Type = StartTagToken
+			return false
+		case a.Template:
+			return inHeadIM(p)
+		default:
+			p.inBodyEndTagOther(p.tok.DataAtom, p.tok.Data)
+		}
+	case CommentToken:
+		p.addChild(&Node{
+			Type: CommentNode,
+			Data: p.tok.Data,
+		})
+	case ErrorToken:
+		// TODO: remove this divergence from the HTML5 spec.
+		if len(p.templateStack) > 0 {
+			p.im = inTemplateIM
+			return false
+		}
+		for _, e := range p.oe {
+			switch e.DataAtom {
+			case a.Dd, a.Dt, a.Li, a.Optgroup, a.Option, a.P, a.Rb, a.Rp, a.Rt, a.Rtc, a.Tbody, a.Td, a.Tfoot, a.Th,
+				a.Thead, a.Tr, a.Body, a.Html:
+			default:
+				return true
+			}
+		}
+	}
+
+	return true
+}
+
+func (p *parser) inBodyEndTagFormatting(tagAtom a.Atom, tagName string) {
+	// This is the "adoption agency" algorithm, described at
+	// https://html.spec.whatwg.org/multipage/syntax.html#adoptionAgency
+
+	// TODO: this is a fairly literal line-by-line translation of that algorithm.
+	// Once the code successfully parses the comprehensive test suite, we should
+	// refactor this code to be more idiomatic.
+
+	// Steps 1-2
+	if current := p.oe.top(); current.Data == tagName && p.afe.index(current) == -1 {
+		p.oe.pop()
+		return
+	}
+
+	// Steps 3-5. The outer loop.
+	for i := 0; i < 8; i++ {
+		// Step 6. Find the formatting element.
+		var formattingElement *Node
+		for j := len(p.afe) - 1; j >= 0; j-- {
+			if p.afe[j].Type == scopeMarkerNode {
+				break
+			}
+			if p.afe[j].DataAtom == tagAtom {
+				formattingElement = p.afe[j]
+				break
+			}
+		}
+		if formattingElement == nil {
+			p.inBodyEndTagOther(tagAtom, tagName)
+			return
+		}
+
+		// Step 7. Ignore the tag if formatting element is not in the stack of open elements.
+		feIndex := p.oe.index(formattingElement)
+		if feIndex == -1 {
+			p.afe.remove(formattingElement)
+			return
+		}
+		// Step 8. Ignore the tag if formatting element is not in the scope.
+		if !p.elementInScope(defaultScope, tagAtom) {
+			// Ignore the tag.
+			return
+		}
+
+		// Step 9. This step is omitted because it's just a parse error but no need to return.
+
+		// Steps 10-11. Find the furthest block.
+		var furthestBlock *Node
+		for _, e := range p.oe[feIndex:] {
+			if isSpecialElement(e) {
+				furthestBlock = e
+				break
+			}
+		}
+		if furthestBlock == nil {
+			e := p.oe.pop()
+			for e != formattingElement {
+				e = p.oe.pop()
+			}
+			p.afe.remove(e)
+			return
+		}
+
+		// Steps 12-13. Find the common ancestor and bookmark node.
+		commonAncestor := p.oe[feIndex-1]
+		bookmark := p.afe.index(formattingElement)
+
+		// Step 14. The inner loop. Find the lastNode to reparent.
+		lastNode := furthestBlock
+		node := furthestBlock
+		x := p.oe.index(node)
+		// Step 14.1.
+		j := 0
+		for {
+			// Step 14.2.
+			j++
+			// Step. 14.3.
+			x--
+			node = p.oe[x]
+			// Step 14.4. Go to the next step if node is formatting element.
+			if node == formattingElement {
+				break
+			}
+			// Step 14.5. Remove node from the list of active formatting elements if
+			// inner loop counter is greater than three and node is in the list of
+			// active formatting elements.
+			if ni := p.afe.index(node); j > 3 && ni > -1 {
+				p.afe.remove(node)
+				// If any element of the list of active formatting elements is removed,
+				// we need to take care whether bookmark should be decremented or not.
+				// This is because the value of bookmark may exceed the size of the
+				// list by removing elements from the list.
+				if ni <= bookmark {
+					bookmark--
+				}
+				continue
+			}
+			// Step 14.6. Continue the next inner loop if node is not in the list of
+			// active formatting elements.
+			if p.afe.index(node) == -1 {
+				p.oe.remove(node)
+				continue
+			}
+			// Step 14.7.
+			clone := node.clone()
+			p.afe[p.afe.index(node)] = clone
+			p.oe[p.oe.index(node)] = clone
+			node = clone
+			// Step 14.8.
+			if lastNode == furthestBlock {
+				bookmark = p.afe.index(node) + 1
+			}
+			// Step 14.9.
+			if lastNode.Parent != nil {
+				lastNode.Parent.RemoveChild(lastNode)
+			}
+			node.AppendChild(lastNode)
+			// Step 14.10.
+			lastNode = node
+		}
+
+		// Step 15. Reparent lastNode to the common ancestor,
+		// or for misnested table nodes, to the foster parent.
+		if lastNode.Parent != nil {
+			lastNode.Parent.RemoveChild(lastNode)
+		}
+		switch commonAncestor.DataAtom {
+		case a.Table, a.Tbody, a.Tfoot, a.Thead, a.Tr:
+			p.fosterParent(lastNode)
+		default:
+			commonAncestor.AppendChild(lastNode)
+		}
+
+		// Steps 16-18. Reparent nodes from the furthest block's children
+		// to a clone of the formatting element.
+		clone := formattingElement.clone()
+		reparentChildren(clone, furthestBlock)
+		furthestBlock.AppendChild(clone)
+
+		// Step 19. Fix up the list of active formatting elements.
+		if oldLoc := p.afe.index(formattingElement); oldLoc != -1 && oldLoc < bookmark {
+			// Move the bookmark with the rest of the list.
+			bookmark--
+		}
+		p.afe.remove(formattingElement)
+		p.afe.insert(bookmark, clone)
+
+		// Step 20. Fix up the stack of open elements.
+		p.oe.remove(formattingElement)
+		p.oe.insert(p.oe.index(furthestBlock)+1, clone)
+	}
+}
+
+// inBodyEndTagOther performs the "any other end tag" algorithm for inBodyIM.
+// "Any other end tag" handling from 12.2.6.5 The rules for parsing tokens in foreign content
+// https://html.spec.whatwg.org/multipage/syntax.html#parsing-main-inforeign
+func (p *parser) inBodyEndTagOther(tagAtom a.Atom, tagName string) {
+	for i := len(p.oe) - 1; i >= 0; i-- {
+		// Two element nodes have the same tag if they have the same Data (a
+		// string-typed field). As an optimization, for common HTML tags, each
+		// Data string is assigned a unique, non-zero DataAtom (a uint32-typed
+		// field), since integer comparison is faster than string comparison.
+		// Uncommon (custom) tags get a zero DataAtom.
+		//
+		// The if condition here is equivalent to (p.oe[i].Data == tagName).
+		if (p.oe[i].DataAtom == tagAtom) &&
+			((tagAtom != 0) || (p.oe[i].Data == tagName)) {
+			p.oe = p.oe[:i]
+			break
+		}
+		if isSpecialElement(p.oe[i]) {
+			break
+		}
+	}
+}
+
+// Section 12.2.6.4.8.
+func textIM(p *parser) bool {
+	switch p.tok.Type {
+	case ErrorToken:
+		p.oe.pop()
+	case TextToken:
+		d := p.tok.Data
+		if n := p.oe.top(); n.DataAtom == a.Textarea && n.FirstChild == nil {
+			// Ignore a newline at the start of a