2023-12-31 13:55:22 +03:00
// NOTE: GENERATED by github.com/mjl-/sherpats, DO NOT MODIFY
namespace api {
// CheckResult is the analysis of a domain, its actual configuration (DNS, TLS,
// connectivity) and the mox configuration. It includes configuration instructions
// (e.g. DNS records), and warnings and errors encountered.
export interface CheckResult {
Domain : string
DNSSEC : DNSSECResult
IPRev : IPRevCheckResult
MX : MXCheckResult
TLS : TLSCheckResult
DANE : DANECheckResult
SPF : SPFCheckResult
DKIM : DKIMCheckResult
DMARC : DMARCCheckResult
HostTLSRPT : TLSRPTCheckResult
DomainTLSRPT : TLSRPTCheckResult
MTASTS : MTASTSCheckResult
SRVConf : SRVConfCheckResult
Autoconf : AutoconfCheckResult
Autodiscover : AutodiscoverCheckResult
}
export interface DNSSECResult {
Errors? : string [ ] | null
Warnings? : string [ ] | null
Instructions? : string [ ] | null
}
export interface IPRevCheckResult {
Hostname : Domain // This hostname, IPs must resolve back to this.
IPNames ? : { [ key : string ] : string [ ] | null } // IP to names.
Errors? : string [ ] | null
Warnings? : string [ ] | null
Instructions? : string [ ] | null
}
// Domain is a domain name, with one or more labels, with at least an ASCII
// representation, and for IDNA non-ASCII domains a unicode representation.
// The ASCII string must be used for DNS lookups. The strings do not have a
// trailing dot. When using with StrictResolver, add the trailing dot.
export interface Domain {
ASCII : string // A non-unicode domain, e.g. with A-labels (xn--...) or NR-LDH (non-reserved letters/digits/hyphens) labels. Always in lower case. No trailing dot.
2024-03-08 23:08:40 +03:00
Unicode : string // Name as U-labels, in Unicode NFC. Empty if this is an ASCII-only domain. No trailing dot.
2023-12-31 13:55:22 +03:00
}
export interface MXCheckResult {
Records? : MX [ ] | null
Errors? : string [ ] | null
Warnings? : string [ ] | null
Instructions? : string [ ] | null
}
export interface MX {
Host : string
Pref : number
IPs? : string [ ] | null
}
export interface TLSCheckResult {
Errors? : string [ ] | null
Warnings? : string [ ] | null
Instructions? : string [ ] | null
}
export interface DANECheckResult {
Errors? : string [ ] | null
Warnings? : string [ ] | null
Instructions? : string [ ] | null
}
export interface SPFCheckResult {
DomainTXT : string
DomainRecord? : SPFRecord | null
HostTXT : string
HostRecord? : SPFRecord | null
Errors? : string [ ] | null
Warnings? : string [ ] | null
Instructions? : string [ ] | null
}
export interface SPFRecord {
Version : string // Must be "spf1".
Directives? : Directive [ ] | null // An IP is evaluated against each directive until a match is found.
Redirect : string // Modifier that redirects SPF checks to other domain after directives did not match. Optional. For "redirect=".
Explanation : string // Modifier for creating a user-friendly error message when an IP results in status "fail".
Other? : Modifier [ ] | null // Other modifiers.
}
// Directive consists of a mechanism that describes how to check if an IP matches,
// an (optional) qualifier indicating the policy for a match, and optional
// parameters specific to the mechanism.
export interface Directive {
Qualifier : string // Sets the result if this directive matches. "" and "+" are "pass", "-" is "fail", "?" is "neutral", "~" is "softfail".
Mechanism : string // "all", "include", "a", "mx", "ptr", "ip4", "ip6", "exists".
DomainSpec : string // For include, a, mx, ptr, exists. Always in lower-case when parsed using ParseRecord.
IPstr : string // Original string for IP, always with /subnet.
IP4CIDRLen? : number | null // For a, mx, ip4.
IP6CIDRLen? : number | null // For a, mx, ip6.
}
// Modifier provides additional information for a policy.
// "redirect" and "exp" are not represented as a Modifier but explicitly in a Record.
export interface Modifier {
Key : string // Key is case-insensitive.
Value : string
}
export interface DKIMCheckResult {
Records? : DKIMRecord [ ] | null
Errors? : string [ ] | null
Warnings? : string [ ] | null
Instructions? : string [ ] | null
}
export interface DKIMRecord {
Selector : string
TXT : string
Record? : Record | null
}
// Record is a DKIM DNS record, served on <selector>._domainkey.<domain> for a
// given selector and domain (s= and d= in the DKIM-Signature).
//
// The record is a semicolon-separated list of "="-separated field value pairs.
// Strings should be compared case-insensitively, e.g. k=ed25519 is equivalent to k=ED25519.
//
// Example:
//
// v=DKIM1;h=sha256;k=ed25519;p=ln5zd/JEX4Jy60WAhUOv33IYm2YZMyTQAdr9stML504=
export interface Record {
Version : string // Version, fixed "DKIM1" (case sensitive). Field "v".
Hashes? : string [ ] | null // Acceptable hash algorithms, e.g. "sha1", "sha256". Optional, defaults to all algorithms. Field "h".
Key : string // Key type, "rsa" or "ed25519". Optional, default "rsa". Field "k".
Notes : string // Debug notes. Field "n".
Pubkey? : string | null // Public key, as base64 in record. If empty, the key has been revoked. Field "p".
Services? : string [ ] | null // Service types. Optional, default "*" for all services. Other values: "email". Field "s".
Flags? : string [ ] | null // Flags, colon-separated. Optional, default is no flags. Other values: "y" for testing DKIM, "s" for "i=" must have same domain as "d" in signatures. Field "t".
}
export interface DMARCCheckResult {
Domain : string
TXT : string
Record? : DMARCRecord | null
Errors? : string [ ] | null
Warnings? : string [ ] | null
Instructions? : string [ ] | null
}
export interface DMARCRecord {
Version : string // "v=DMARC1", fixed.
Policy : DMARCPolicy // Required, for "p=".
SubdomainPolicy : DMARCPolicy // Like policy but for subdomains. Optional, for "sp=".
AggregateReportAddresses? : URI [ ] | null // Optional, for "rua=". Destination addresses for aggregate reports.
FailureReportAddresses? : URI [ ] | null // Optional, for "ruf=". Destination addresses for failure reports.
ADKIM : Align // Alignment: "r" (default) for relaxed or "s" for simple. For "adkim=".
ASPF : Align // Alignment: "r" (default) for relaxed or "s" for simple. For "aspf=".
AggregateReportingInterval : number // In seconds, default 86400. For "ri="
FailureReportingOptions? : string [ ] | null // "0" (default), "1", "d", "s". For "fo=".
ReportingFormat? : string [ ] | null // "afrf" (default). For "rf=".
Percentage : number // Between 0 and 100, default 100. For "pct=". Policy applies randomly to this percentage of messages.
}
// URI is a destination address for reporting.
export interface URI {
Address : string // Should start with "mailto:".
MaxSize : number // Optional maximum message size, subject to Unit.
Unit : string // "" (b), "k", "m", "g", "t" (case insensitive), unit size, where k is 2^10 etc.
}
export interface TLSRPTCheckResult {
TXT : string
Record? : TLSRPTRecord | null
Errors? : string [ ] | null
Warnings? : string [ ] | null
Instructions? : string [ ] | null
}
export interface TLSRPTRecord {
Version : string // "TLSRPTv1", for "v=".
RUAs ? : ( RUA [ ] | null ) [ ] | null // Aggregate reporting URI, for "rua=". "rua=" can occur multiple times, each can be a list.
Extensions? : Extension [ ] | null
}
// Extension is an additional key/value pair for a TLSRPT record.
export interface Extension {
Key : string
Value : string
}
export interface MTASTSCheckResult {
TXT : string
Record? : MTASTSRecord | null
PolicyText : string
Policy? : Policy | null
Errors? : string [ ] | null
Warnings? : string [ ] | null
Instructions? : string [ ] | null
}
export interface MTASTSRecord {
Version : string // "STSv1", for "v=". Required.
ID : string // Record version, for "id=". Required.
Extensions? : Pair [ ] | null // Optional extensions.
}
// Pair is an extension key/value pair in a MTA-STS DNS record or policy.
export interface Pair {
Key : string
Value : string
}
// Policy is an MTA-STS policy as served at "https://mta-sts.<domain>/.well-known/mta-sts.txt".
export interface Policy {
Version : string // "STSv1"
Mode : Mode
MX? : STSMX [ ] | null
MaxAgeSeconds : number // How long this policy can be cached. Suggested values are in weeks or more.
Extensions? : Pair [ ] | null
}
2024-04-19 11:51:24 +03:00
// MX is an allowlisted MX host name/pattern.
2023-12-31 13:55:22 +03:00
export interface STSMX {
Wildcard : boolean // "*." wildcard, e.g. if a subdomain matches. A wildcard must match exactly one label. *.example.com matches mail.example.com, but not example.com, and not foor.bar.example.com.
Domain : Domain
}
export interface SRVConfCheckResult {
SRVs ? : { [ key : string ] : SRV [ ] | null } // Service (e.g. "_imaps") to records.
Errors? : string [ ] | null
Warnings? : string [ ] | null
Instructions? : string [ ] | null
}
// An SRV represents a single DNS SRV record.
export interface SRV {
Target : string
Port : number
Priority : number
Weight : number
}
export interface AutoconfCheckResult {
ClientSettingsDomainIPs? : string [ ] | null
IPs? : string [ ] | null
Errors? : string [ ] | null
Warnings? : string [ ] | null
Instructions? : string [ ] | null
}
export interface AutodiscoverCheckResult {
Records? : AutodiscoverSRV [ ] | null
Errors? : string [ ] | null
Warnings? : string [ ] | null
Instructions? : string [ ] | null
}
export interface AutodiscoverSRV {
Target : string
Port : number
Priority : number
Weight : number
IPs? : string [ ] | null
}
2024-04-18 12:14:24 +03:00
export interface ConfigDomain {
Description : string
ClientSettingsDomain : string
LocalpartCatchallSeparator : string
LocalpartCaseSensitive : boolean
DKIM : DKIM
DMARC? : DMARC | null
MTASTS? : MTASTS | null
TLSRPT? : TLSRPT | null
Routes? : Route [ ] | null
2024-04-24 20:15:30 +03:00
Aliases ? : { [ key : string ] : Alias }
Domain : Domain
2024-04-18 12:14:24 +03:00
}
export interface DKIM {
Selectors ? : { [ key : string ] : Selector }
Sign? : string [ ] | null
}
export interface Selector {
Hash : string
HashEffective : string
Canonicalization : Canonicalization
Headers? : string [ ] | null
HeadersEffective? : string [ ] | null // Used when signing. Based on Headers from config, or the reasonable default.
DontSealHeaders : boolean
Expiration : string
PrivateKeyFile : string
2024-04-19 11:23:53 +03:00
Algorithm : string // "ed25519", "rsa-*", based on private key.
2024-04-18 12:14:24 +03:00
}
export interface Canonicalization {
HeaderRelaxed : boolean
BodyRelaxed : boolean
}
export interface DMARC {
Localpart : string
Domain : string
Account : string
Mailbox : string
ParsedLocalpart : Localpart
DNSDomain : Domain // Effective domain, always set based on Domain field or Domain where this is configured.
}
export interface MTASTS {
PolicyID : string
Mode : Mode
MaxAge : number
MX? : string [ ] | null
}
export interface TLSRPT {
Localpart : string
Domain : string
Account : string
Mailbox : string
ParsedLocalpart : Localpart
DNSDomain : Domain // Effective domain, always set based on Domain field or Domain where this is configured.
}
export interface Route {
FromDomain? : string [ ] | null
ToDomain? : string [ ] | null
MinimumAttempts : number
Transport : string
FromDomainASCII? : string [ ] | null
ToDomainASCII? : string [ ] | null
}
2024-04-24 20:15:30 +03:00
export interface Alias {
Addresses? : string [ ] | null
PostPublic : boolean
ListMembers : boolean
AllowMsgFrom : boolean
LocalpartStr : string // In encoded form.
Domain : Domain
ParsedAddresses? : AliasAddress [ ] | null // Matches addresses.
}
export interface AliasAddress {
Address : Address // Parsed address.
AccountName : string // Looked up.
Destination : Destination // Belonging to address.
}
// Address is a parsed email address.
export interface Address {
Localpart : Localpart
Domain : Domain // todo: shouldn't we accept an ip address here too? and merge this type into smtp.Path.
}
export interface Destination {
Mailbox : string
Rulesets? : Ruleset [ ] | null
FullName : string
}
export interface Ruleset {
SMTPMailFromRegexp : string
MsgFromRegexp : string
VerifiedDomain : string
HeadersRegexp ? : { [ key : string ] : string }
IsForward : boolean // todo: once we implement ARC, we can use dkim domains that we cannot verify but that the arc-verified forwarding mail server was able to verify.
ListAllowDomain : string
AcceptRejectsToMailbox : string
Mailbox : string
Comment : string
VerifiedDNSDomain : Domain
ListAllowDNSDomain : Domain
}
2024-04-14 18:18:20 +03:00
export interface Account {
add a webapi and webhooks for a simple http/json-based api
for applications to compose/send messages, receive delivery feedback, and
maintain suppression lists.
this is an alternative to applications using a library to compose messages,
submitting those messages using smtp, and monitoring a mailbox with imap for
DSNs, which can be processed into the equivalent of suppression lists. but you
need to know about all these standards/protocols and find libraries. by using
the webapi & webhooks, you just need a http & json library.
unfortunately, there is no standard for these kinds of api, so mox has made up
yet another one...
matching incoming DSNs about deliveries to original outgoing messages requires
keeping history of "retired" messages (delivered from the queue, either
successfully or failed). this can be enabled per account. history is also
useful for debugging deliveries. we now also keep history of each delivery
attempt, accessible while still in the queue, and kept when a message is
retired. the queue webadmin pages now also have pagination, to show potentially
large history.
a queue of webhook calls is now managed too. failures are retried similar to
message deliveries. webhooks can also be saved to the retired list after
completing. also configurable per account.
messages can be sent with a "unique smtp mail from" address. this can only be
used if the domain is configured with a localpart catchall separator such as
"+". when enabled, a queued message gets assigned a random "fromid", which is
added after the separator when sending. when DSNs are returned, they can be
related to previously sent messages based on this fromid. in the future, we can
implement matching on the "envid" used in the smtp dsn extension, or on the
"message-id" of the message. using a fromid can be triggered by authenticating
with a login email address that is configured as enabling fromid.
suppression lists are automatically managed per account. if a delivery attempt
results in certain smtp errors, the destination address is added to the
suppression list. future messages queued for that recipient will immediately
fail without a delivery attempt. suppression lists protect your mail server
reputation.
submitted messages can carry "extra" data through the queue and webhooks for
outgoing deliveries. through webapi as a json object, through smtp submission
as message headers of the form "x-mox-extra-<key>: value".
to make it easy to test webapi/webhooks locally, the "localserve" mode actually
puts messages in the queue. when it's time to deliver, it still won't do a full
delivery attempt, but just delivers to the sender account. unless the recipient
address has a special form, simulating a failure to deliver.
admins now have more control over the queue. "hold rules" can be added to mark
newly queued messages as "on hold", pausing delivery. rules can be about
certain sender or recipient domains/addresses, or apply to all messages pausing
the entire queue. also useful for (local) testing.
new config options have been introduced. they are editable through the admin
and/or account web interfaces.
the webapi http endpoints are enabled for newly generated configs with the
quickstart, and in localserve. existing configurations must explicitly enable
the webapi in mox.conf.
gopherwatch.org was created to dogfood this code. it initially used just the
compose/smtpclient/imapclient mox packages to send messages and process
delivery feedback. it will get a config option to use the mox webapi/webhooks
instead. the gopherwatch code to use webapi/webhook is smaller and simpler, and
developing that shaped development of the mox webapi/webhooks.
for issue #31 by cuu508
2024-04-15 22:49:02 +03:00
OutgoingWebhook? : OutgoingWebhook | null
IncomingWebhook? : IncomingWebhook | null
FromIDLoginAddresses? : string [ ] | null
KeepRetiredMessagePeriod : number
KeepRetiredWebhookPeriod : number
2024-04-14 18:18:20 +03:00
Domain : string
Description : string
FullName : string
Destinations ? : { [ key : string ] : Destination }
SubjectPass : SubjectPass
QuotaMessageSize : number
RejectsMailbox : string
KeepRejects : boolean
AutomaticJunkFlags : AutomaticJunkFlags
JunkFilter? : JunkFilter | null // todo: sane defaults for junkfilter
MaxOutgoingMessagesPerDay : number
MaxFirstTimeRecipientsPerDay : number
NoFirstTimeSenderDelay : boolean
Routes? : Route [ ] | null
DNSDomain : Domain // Parsed form of Domain.
2024-04-24 20:15:30 +03:00
Aliases? : AddressAlias [ ] | null
2024-04-14 18:18:20 +03:00
}
add a webapi and webhooks for a simple http/json-based api
for applications to compose/send messages, receive delivery feedback, and
maintain suppression lists.
this is an alternative to applications using a library to compose messages,
submitting those messages using smtp, and monitoring a mailbox with imap for
DSNs, which can be processed into the equivalent of suppression lists. but you
need to know about all these standards/protocols and find libraries. by using
the webapi & webhooks, you just need a http & json library.
unfortunately, there is no standard for these kinds of api, so mox has made up
yet another one...
matching incoming DSNs about deliveries to original outgoing messages requires
keeping history of "retired" messages (delivered from the queue, either
successfully or failed). this can be enabled per account. history is also
useful for debugging deliveries. we now also keep history of each delivery
attempt, accessible while still in the queue, and kept when a message is
retired. the queue webadmin pages now also have pagination, to show potentially
large history.
a queue of webhook calls is now managed too. failures are retried similar to
message deliveries. webhooks can also be saved to the retired list after
completing. also configurable per account.
messages can be sent with a "unique smtp mail from" address. this can only be
used if the domain is configured with a localpart catchall separator such as
"+". when enabled, a queued message gets assigned a random "fromid", which is
added after the separator when sending. when DSNs are returned, they can be
related to previously sent messages based on this fromid. in the future, we can
implement matching on the "envid" used in the smtp dsn extension, or on the
"message-id" of the message. using a fromid can be triggered by authenticating
with a login email address that is configured as enabling fromid.
suppression lists are automatically managed per account. if a delivery attempt
results in certain smtp errors, the destination address is added to the
suppression list. future messages queued for that recipient will immediately
fail without a delivery attempt. suppression lists protect your mail server
reputation.
submitted messages can carry "extra" data through the queue and webhooks for
outgoing deliveries. through webapi as a json object, through smtp submission
as message headers of the form "x-mox-extra-<key>: value".
to make it easy to test webapi/webhooks locally, the "localserve" mode actually
puts messages in the queue. when it's time to deliver, it still won't do a full
delivery attempt, but just delivers to the sender account. unless the recipient
address has a special form, simulating a failure to deliver.
admins now have more control over the queue. "hold rules" can be added to mark
newly queued messages as "on hold", pausing delivery. rules can be about
certain sender or recipient domains/addresses, or apply to all messages pausing
the entire queue. also useful for (local) testing.
new config options have been introduced. they are editable through the admin
and/or account web interfaces.
the webapi http endpoints are enabled for newly generated configs with the
quickstart, and in localserve. existing configurations must explicitly enable
the webapi in mox.conf.
gopherwatch.org was created to dogfood this code. it initially used just the
compose/smtpclient/imapclient mox packages to send messages and process
delivery feedback. it will get a config option to use the mox webapi/webhooks
instead. the gopherwatch code to use webapi/webhook is smaller and simpler, and
developing that shaped development of the mox webapi/webhooks.
for issue #31 by cuu508
2024-04-15 22:49:02 +03:00
export interface OutgoingWebhook {
URL : string
Authorization : string
Events? : string [ ] | null
}
export interface IncomingWebhook {
URL : string
Authorization : string
}
2024-04-14 18:18:20 +03:00
export interface SubjectPass {
Period : number // todo: have a reasonable default for this?
}
export interface AutomaticJunkFlags {
Enabled : boolean
JunkMailboxRegexp : string
NeutralMailboxRegexp : string
NotJunkMailboxRegexp : string
}
export interface JunkFilter {
Threshold : number
Onegrams : boolean
Twograms : boolean
Threegrams : boolean
MaxPower : number
TopWords : number
IgnoreWords : number
RareWords : number
}
2024-04-24 20:15:30 +03:00
export interface AddressAlias {
SubscriptionAddress : string
Alias : Alias // Without members.
MemberAddresses? : string [ ] | null // Only if allowed to see.
}
2023-12-31 13:55:22 +03:00
// PolicyRecord is a cached policy or absence of a policy.
export interface PolicyRecord {
Domain : string // Domain name, with unicode characters.
Inserted : Date
ValidEnd : Date
LastUpdate : Date // Policies are refreshed on use and periodically.
LastUse : Date
Backoff : boolean
RecordID : string // As retrieved from DNS.
Version : string // "STSv1"
Mode : Mode
MX? : STSMX [ ] | null
MaxAgeSeconds : number // How long this policy can be cached. Suggested values are in weeks or more.
Extensions? : Pair [ ] | null
PolicyText : string // Text that make up the policy, as retrieved. We didn't store this in the past. If empty, policy can be reconstructed from Policy field. Needed by TLSRPT.
}
2024-04-19 11:51:24 +03:00
// Record is a TLS report as a database record, including information
2023-12-31 13:55:22 +03:00
// about the sender.
export interface TLSReportRecord {
ID : number
Domain : string // Policy domain to which the TLS report applies. Unicode.
FromDomain : string
MailFrom : string
HostReport : boolean // Report for host TLSRPT record, as opposed to domain TLSRPT record.
Report : Report
}
// Report is a TLSRPT report.
export interface Report {
OrganizationName : string
DateRange : TLSRPTDateRange
ContactInfo : string
ReportID : string
Policies? : Result [ ] | null
}
// note: with TLSRPT prefix to prevent clash in sherpadoc types.
export interface TLSRPTDateRange {
Start : Date
End : Date
}
export interface Result {
Policy : ResultPolicy
Summary : Summary
FailureDetails? : FailureDetails [ ] | null
}
export interface ResultPolicy {
2024-05-09 16:58:14 +03:00
Type : string
2023-12-31 13:55:22 +03:00
String ? : string [ ] | null
2024-01-24 12:36:20 +03:00
Domain : string // ASCII/A-labels, ../rfc/8460:704
2023-12-31 13:55:22 +03:00
MXHost? : string [ ] | null
}
export interface Summary {
TotalSuccessfulSessionCount : number
TotalFailureSessionCount : number
}
export interface FailureDetails {
2024-05-09 16:58:14 +03:00
ResultType : string
2023-12-31 13:55:22 +03:00
SendingMTAIP : string
ReceivingMXHostname : string
ReceivingMXHelo : string
ReceivingIP : string
FailedSessionCount : number
AdditionalInformation : string
FailureReasonCode : string
}
// TLSRPTSummary presents TLS reporting statistics for a single domain
// over a period.
export interface TLSRPTSummary {
PolicyDomain : Domain
Success : number
Failure : number
ResultTypeCounts ? : { [ key : string ] : number }
}
// DomainFeedback is a single report stored in the database.
export interface DomainFeedback {
ID : number
Domain : string // Domain where DMARC DNS record was found, could be organizational domain.
FromDomain : string // Domain in From-header.
Version : string
ReportMetadata : ReportMetadata
PolicyPublished : PolicyPublished
Records? : ReportRecord [ ] | null
}
export interface ReportMetadata {
OrgName : string
Email : string
ExtraContactInfo : string
ReportID : string
DateRange : DateRange
Errors? : string [ ] | null
}
export interface DateRange {
Begin : number
End : number
}
// PolicyPublished is the policy as found in DNS for the domain.
export interface PolicyPublished {
Domain : string // Domain is where DMARC record was found, not necessarily message From. Reports we generate use unicode names, incoming reports may have either ASCII-only or Unicode domains.
2024-05-09 16:58:14 +03:00
ADKIM : string
ASPF : string
Policy : string
SubdomainPolicy : string
2023-12-31 13:55:22 +03:00
Percentage : number
ReportingOptions : string
}
export interface ReportRecord {
Row : Row
Identifiers : Identifiers
AuthResults : AuthResults
}
export interface Row {
SourceIP : string // SourceIP must match the pattern ((1?[0-9]?[0-9]|2[0-4][0-9]|25[0-5]).){3} (1?[0-9]?[0-9]|2[0-4][0-9]|25[0-5])| ([A-Fa-f0-9]{1,4}:){7}[A-Fa-f0-9]{1,4}
Count : number
PolicyEvaluated : PolicyEvaluated
}
export interface PolicyEvaluated {
2024-05-09 16:58:14 +03:00
Disposition : string
DKIM : string
SPF : string
2023-12-31 13:55:22 +03:00
Reasons? : PolicyOverrideReason [ ] | null
}
export interface PolicyOverrideReason {
2024-05-09 16:58:14 +03:00
Type : string
2023-12-31 13:55:22 +03:00
Comment : string
}
export interface Identifiers {
EnvelopeTo : string
EnvelopeFrom : string
HeaderFrom : string
}
export interface AuthResults {
DKIM? : DKIMAuthResult [ ] | null
SPF? : SPFAuthResult [ ] | null
}
export interface DKIMAuthResult {
Domain : string
Selector : string
2024-05-09 16:58:14 +03:00
Result : string
2023-12-31 13:55:22 +03:00
HumanResult : string
}
export interface SPFAuthResult {
Domain : string
2024-05-09 16:58:14 +03:00
Scope : string
Result : string
2023-12-31 13:55:22 +03:00
}
// DMARCSummary presents DMARC aggregate reporting statistics for a single domain
// over a period.
export interface DMARCSummary {
Domain : string
Total : number
DispositionNone : number
DispositionQuarantine : number
DispositionReject : number
DKIMFail : number
SPFFail : number
PolicyOverrides ? : { [ key : string ] : number }
}
// Reverse is the result of a reverse lookup.
export interface Reverse {
Hostnames? : string [ ] | null
}
// ClientConfigs holds the client configuration for IMAP/Submission for a
// domain.
export interface ClientConfigs {
Entries? : ClientConfigsEntry [ ] | null
}
export interface ClientConfigsEntry {
Protocol : string
Host : Domain
Port : number
Listener : string
Note : string
}
2024-03-18 10:50:42 +03:00
// HoldRule is a set of conditions that cause a matching message to be marked as on
// hold when it is queued. All-empty conditions matches all messages, effectively
// pausing the entire queue.
export interface HoldRule {
ID : number
Account : string
SenderDomain : Domain
RecipientDomain : Domain
SenderDomainStr : string // Unicode.
RecipientDomainStr : string // Unicode.
}
// Filter filters messages to list or operate on. Used by admin web interface
// and cli.
//
// Only non-empty/non-zero values are applied to the filter. Leaving all fields
// empty/zero matches all messages.
export interface Filter {
add a webapi and webhooks for a simple http/json-based api
for applications to compose/send messages, receive delivery feedback, and
maintain suppression lists.
this is an alternative to applications using a library to compose messages,
submitting those messages using smtp, and monitoring a mailbox with imap for
DSNs, which can be processed into the equivalent of suppression lists. but you
need to know about all these standards/protocols and find libraries. by using
the webapi & webhooks, you just need a http & json library.
unfortunately, there is no standard for these kinds of api, so mox has made up
yet another one...
matching incoming DSNs about deliveries to original outgoing messages requires
keeping history of "retired" messages (delivered from the queue, either
successfully or failed). this can be enabled per account. history is also
useful for debugging deliveries. we now also keep history of each delivery
attempt, accessible while still in the queue, and kept when a message is
retired. the queue webadmin pages now also have pagination, to show potentially
large history.
a queue of webhook calls is now managed too. failures are retried similar to
message deliveries. webhooks can also be saved to the retired list after
completing. also configurable per account.
messages can be sent with a "unique smtp mail from" address. this can only be
used if the domain is configured with a localpart catchall separator such as
"+". when enabled, a queued message gets assigned a random "fromid", which is
added after the separator when sending. when DSNs are returned, they can be
related to previously sent messages based on this fromid. in the future, we can
implement matching on the "envid" used in the smtp dsn extension, or on the
"message-id" of the message. using a fromid can be triggered by authenticating
with a login email address that is configured as enabling fromid.
suppression lists are automatically managed per account. if a delivery attempt
results in certain smtp errors, the destination address is added to the
suppression list. future messages queued for that recipient will immediately
fail without a delivery attempt. suppression lists protect your mail server
reputation.
submitted messages can carry "extra" data through the queue and webhooks for
outgoing deliveries. through webapi as a json object, through smtp submission
as message headers of the form "x-mox-extra-<key>: value".
to make it easy to test webapi/webhooks locally, the "localserve" mode actually
puts messages in the queue. when it's time to deliver, it still won't do a full
delivery attempt, but just delivers to the sender account. unless the recipient
address has a special form, simulating a failure to deliver.
admins now have more control over the queue. "hold rules" can be added to mark
newly queued messages as "on hold", pausing delivery. rules can be about
certain sender or recipient domains/addresses, or apply to all messages pausing
the entire queue. also useful for (local) testing.
new config options have been introduced. they are editable through the admin
and/or account web interfaces.
the webapi http endpoints are enabled for newly generated configs with the
quickstart, and in localserve. existing configurations must explicitly enable
the webapi in mox.conf.
gopherwatch.org was created to dogfood this code. it initially used just the
compose/smtpclient/imapclient mox packages to send messages and process
delivery feedback. it will get a config option to use the mox webapi/webhooks
instead. the gopherwatch code to use webapi/webhook is smaller and simpler, and
developing that shaped development of the mox webapi/webhooks.
for issue #31 by cuu508
2024-04-15 22:49:02 +03:00
Max : number
2024-03-18 10:50:42 +03:00
IDs? : number [ ] | null
Account : string
From : string
To : string
Hold? : boolean | null
Submitted : string // Whether submitted before/after a time relative to now. ">$duration" or "<$duration", also with "now" for duration.
NextAttempt : string // ">$duration" or "<$duration", also with "now" for duration.
Transport? : string | null
}
add a webapi and webhooks for a simple http/json-based api
for applications to compose/send messages, receive delivery feedback, and
maintain suppression lists.
this is an alternative to applications using a library to compose messages,
submitting those messages using smtp, and monitoring a mailbox with imap for
DSNs, which can be processed into the equivalent of suppression lists. but you
need to know about all these standards/protocols and find libraries. by using
the webapi & webhooks, you just need a http & json library.
unfortunately, there is no standard for these kinds of api, so mox has made up
yet another one...
matching incoming DSNs about deliveries to original outgoing messages requires
keeping history of "retired" messages (delivered from the queue, either
successfully or failed). this can be enabled per account. history is also
useful for debugging deliveries. we now also keep history of each delivery
attempt, accessible while still in the queue, and kept when a message is
retired. the queue webadmin pages now also have pagination, to show potentially
large history.
a queue of webhook calls is now managed too. failures are retried similar to
message deliveries. webhooks can also be saved to the retired list after
completing. also configurable per account.
messages can be sent with a "unique smtp mail from" address. this can only be
used if the domain is configured with a localpart catchall separator such as
"+". when enabled, a queued message gets assigned a random "fromid", which is
added after the separator when sending. when DSNs are returned, they can be
related to previously sent messages based on this fromid. in the future, we can
implement matching on the "envid" used in the smtp dsn extension, or on the
"message-id" of the message. using a fromid can be triggered by authenticating
with a login email address that is configured as enabling fromid.
suppression lists are automatically managed per account. if a delivery attempt
results in certain smtp errors, the destination address is added to the
suppression list. future messages queued for that recipient will immediately
fail without a delivery attempt. suppression lists protect your mail server
reputation.
submitted messages can carry "extra" data through the queue and webhooks for
outgoing deliveries. through webapi as a json object, through smtp submission
as message headers of the form "x-mox-extra-<key>: value".
to make it easy to test webapi/webhooks locally, the "localserve" mode actually
puts messages in the queue. when it's time to deliver, it still won't do a full
delivery attempt, but just delivers to the sender account. unless the recipient
address has a special form, simulating a failure to deliver.
admins now have more control over the queue. "hold rules" can be added to mark
newly queued messages as "on hold", pausing delivery. rules can be about
certain sender or recipient domains/addresses, or apply to all messages pausing
the entire queue. also useful for (local) testing.
new config options have been introduced. they are editable through the admin
and/or account web interfaces.
the webapi http endpoints are enabled for newly generated configs with the
quickstart, and in localserve. existing configurations must explicitly enable
the webapi in mox.conf.
gopherwatch.org was created to dogfood this code. it initially used just the
compose/smtpclient/imapclient mox packages to send messages and process
delivery feedback. it will get a config option to use the mox webapi/webhooks
instead. the gopherwatch code to use webapi/webhook is smaller and simpler, and
developing that shaped development of the mox webapi/webhooks.
for issue #31 by cuu508
2024-04-15 22:49:02 +03:00
export interface Sort {
Field : string // "Queued" or "NextAttempt"/"".
LastID : number // If > 0, we return objects beyond this, less/greater depending on Asc.
Last : any // Value of Field for last object. Must be set iff LastID is set.
Asc : boolean // Ascending, or descending.
}
2023-12-31 13:55:22 +03:00
// Msg is a message in the queue.
//
// Use MakeMsg to make a message with fields that Add needs. Add will further set
// queueing related fields.
export interface Msg {
ID : number
queue: deliver to multiple recipients in a single smtp transaction
transferring the data only once. we only do this when the recipient domains
are the same. when queuing, we now take care to set the same NextAttempt
timestamp, so queued messages are actually eligable for combined delivery.
this adds a DeliverMultiple to the smtp client. for pipelined requests, it will
send all RCPT TO (and MAIL and DATA) in one go, and handles the various
responses and error conditions, returning either an overal error, or per
recipient smtp responses. the results of the smtp LIMITS extension are also
available in the smtp client now.
this also takes the "LIMITS RCPTMAX" smtp extension into account: if the server
only accepts a single recipient, we won't send multiple.
if a server doesn't announce a RCPTMAX limit, but still has one (like mox does
for non-spf-verified transactions), we'll recognize code 452 and 552 (for
historic reasons) as temporary error, and try again in a separate transaction
immediately after. we don't yet implement "LIMITS MAILMAX", doesn't seem likely
in practice.
2024-03-07 12:07:53 +03:00
BaseID : number // A message for multiple recipients will get a BaseID that is identical to the first Msg.ID queued. The message contents will be identical for each recipient, including MsgPrefix. If other properties are identical too, including recipient domain, multiple Msgs may be delivered in a single SMTP transaction. For messages with a single recipient, this field will be 0.
2023-12-31 13:55:22 +03:00
Queued : Date
2024-03-18 10:50:42 +03:00
Hold : boolean // If set, delivery won't be attempted.
2023-12-31 13:55:22 +03:00
SenderAccount : string // Failures are delivered back to this local account. Also used for routing.
SenderLocalpart : Localpart // Should be a local user and domain.
SenderDomain : IPDomain
2024-03-18 10:50:42 +03:00
SenderDomainStr : string // For filtering, unicode.
add a webapi and webhooks for a simple http/json-based api
for applications to compose/send messages, receive delivery feedback, and
maintain suppression lists.
this is an alternative to applications using a library to compose messages,
submitting those messages using smtp, and monitoring a mailbox with imap for
DSNs, which can be processed into the equivalent of suppression lists. but you
need to know about all these standards/protocols and find libraries. by using
the webapi & webhooks, you just need a http & json library.
unfortunately, there is no standard for these kinds of api, so mox has made up
yet another one...
matching incoming DSNs about deliveries to original outgoing messages requires
keeping history of "retired" messages (delivered from the queue, either
successfully or failed). this can be enabled per account. history is also
useful for debugging deliveries. we now also keep history of each delivery
attempt, accessible while still in the queue, and kept when a message is
retired. the queue webadmin pages now also have pagination, to show potentially
large history.
a queue of webhook calls is now managed too. failures are retried similar to
message deliveries. webhooks can also be saved to the retired list after
completing. also configurable per account.
messages can be sent with a "unique smtp mail from" address. this can only be
used if the domain is configured with a localpart catchall separator such as
"+". when enabled, a queued message gets assigned a random "fromid", which is
added after the separator when sending. when DSNs are returned, they can be
related to previously sent messages based on this fromid. in the future, we can
implement matching on the "envid" used in the smtp dsn extension, or on the
"message-id" of the message. using a fromid can be triggered by authenticating
with a login email address that is configured as enabling fromid.
suppression lists are automatically managed per account. if a delivery attempt
results in certain smtp errors, the destination address is added to the
suppression list. future messages queued for that recipient will immediately
fail without a delivery attempt. suppression lists protect your mail server
reputation.
submitted messages can carry "extra" data through the queue and webhooks for
outgoing deliveries. through webapi as a json object, through smtp submission
as message headers of the form "x-mox-extra-<key>: value".
to make it easy to test webapi/webhooks locally, the "localserve" mode actually
puts messages in the queue. when it's time to deliver, it still won't do a full
delivery attempt, but just delivers to the sender account. unless the recipient
address has a special form, simulating a failure to deliver.
admins now have more control over the queue. "hold rules" can be added to mark
newly queued messages as "on hold", pausing delivery. rules can be about
certain sender or recipient domains/addresses, or apply to all messages pausing
the entire queue. also useful for (local) testing.
new config options have been introduced. they are editable through the admin
and/or account web interfaces.
the webapi http endpoints are enabled for newly generated configs with the
quickstart, and in localserve. existing configurations must explicitly enable
the webapi in mox.conf.
gopherwatch.org was created to dogfood this code. it initially used just the
compose/smtpclient/imapclient mox packages to send messages and process
delivery feedback. it will get a config option to use the mox webapi/webhooks
instead. the gopherwatch code to use webapi/webhook is smaller and simpler, and
developing that shaped development of the mox webapi/webhooks.
for issue #31 by cuu508
2024-04-15 22:49:02 +03:00
FromID : string // For transactional messages, used to match later DSNs.
2023-12-31 13:55:22 +03:00
RecipientLocalpart : Localpart // Typically a remote user and domain.
RecipientDomain : IPDomain
add a webapi and webhooks for a simple http/json-based api
for applications to compose/send messages, receive delivery feedback, and
maintain suppression lists.
this is an alternative to applications using a library to compose messages,
submitting those messages using smtp, and monitoring a mailbox with imap for
DSNs, which can be processed into the equivalent of suppression lists. but you
need to know about all these standards/protocols and find libraries. by using
the webapi & webhooks, you just need a http & json library.
unfortunately, there is no standard for these kinds of api, so mox has made up
yet another one...
matching incoming DSNs about deliveries to original outgoing messages requires
keeping history of "retired" messages (delivered from the queue, either
successfully or failed). this can be enabled per account. history is also
useful for debugging deliveries. we now also keep history of each delivery
attempt, accessible while still in the queue, and kept when a message is
retired. the queue webadmin pages now also have pagination, to show potentially
large history.
a queue of webhook calls is now managed too. failures are retried similar to
message deliveries. webhooks can also be saved to the retired list after
completing. also configurable per account.
messages can be sent with a "unique smtp mail from" address. this can only be
used if the domain is configured with a localpart catchall separator such as
"+". when enabled, a queued message gets assigned a random "fromid", which is
added after the separator when sending. when DSNs are returned, they can be
related to previously sent messages based on this fromid. in the future, we can
implement matching on the "envid" used in the smtp dsn extension, or on the
"message-id" of the message. using a fromid can be triggered by authenticating
with a login email address that is configured as enabling fromid.
suppression lists are automatically managed per account. if a delivery attempt
results in certain smtp errors, the destination address is added to the
suppression list. future messages queued for that recipient will immediately
fail without a delivery attempt. suppression lists protect your mail server
reputation.
submitted messages can carry "extra" data through the queue and webhooks for
outgoing deliveries. through webapi as a json object, through smtp submission
as message headers of the form "x-mox-extra-<key>: value".
to make it easy to test webapi/webhooks locally, the "localserve" mode actually
puts messages in the queue. when it's time to deliver, it still won't do a full
delivery attempt, but just delivers to the sender account. unless the recipient
address has a special form, simulating a failure to deliver.
admins now have more control over the queue. "hold rules" can be added to mark
newly queued messages as "on hold", pausing delivery. rules can be about
certain sender or recipient domains/addresses, or apply to all messages pausing
the entire queue. also useful for (local) testing.
new config options have been introduced. they are editable through the admin
and/or account web interfaces.
the webapi http endpoints are enabled for newly generated configs with the
quickstart, and in localserve. existing configurations must explicitly enable
the webapi in mox.conf.
gopherwatch.org was created to dogfood this code. it initially used just the
compose/smtpclient/imapclient mox packages to send messages and process
delivery feedback. it will get a config option to use the mox webapi/webhooks
instead. the gopherwatch code to use webapi/webhook is smaller and simpler, and
developing that shaped development of the mox webapi/webhooks.
for issue #31 by cuu508
2024-04-15 22:49:02 +03:00
RecipientDomainStr : string // For filtering, unicode domain. Can also contain ip enclosed in [].
2023-12-31 13:55:22 +03:00
Attempts : number // Next attempt is based on last attempt and exponential back off based on attempts.
MaxAttempts : number // Max number of attempts before giving up. If 0, then the default of 8 attempts is used instead.
DialedIPs ? : { [ key : string ] : IP [ ] | null } // For each host, the IPs that were dialed. Used for IP selection for later attempts.
NextAttempt : Date // For scheduling.
LastAttempt? : Date | null
add a webapi and webhooks for a simple http/json-based api
for applications to compose/send messages, receive delivery feedback, and
maintain suppression lists.
this is an alternative to applications using a library to compose messages,
submitting those messages using smtp, and monitoring a mailbox with imap for
DSNs, which can be processed into the equivalent of suppression lists. but you
need to know about all these standards/protocols and find libraries. by using
the webapi & webhooks, you just need a http & json library.
unfortunately, there is no standard for these kinds of api, so mox has made up
yet another one...
matching incoming DSNs about deliveries to original outgoing messages requires
keeping history of "retired" messages (delivered from the queue, either
successfully or failed). this can be enabled per account. history is also
useful for debugging deliveries. we now also keep history of each delivery
attempt, accessible while still in the queue, and kept when a message is
retired. the queue webadmin pages now also have pagination, to show potentially
large history.
a queue of webhook calls is now managed too. failures are retried similar to
message deliveries. webhooks can also be saved to the retired list after
completing. also configurable per account.
messages can be sent with a "unique smtp mail from" address. this can only be
used if the domain is configured with a localpart catchall separator such as
"+". when enabled, a queued message gets assigned a random "fromid", which is
added after the separator when sending. when DSNs are returned, they can be
related to previously sent messages based on this fromid. in the future, we can
implement matching on the "envid" used in the smtp dsn extension, or on the
"message-id" of the message. using a fromid can be triggered by authenticating
with a login email address that is configured as enabling fromid.
suppression lists are automatically managed per account. if a delivery attempt
results in certain smtp errors, the destination address is added to the
suppression list. future messages queued for that recipient will immediately
fail without a delivery attempt. suppression lists protect your mail server
reputation.
submitted messages can carry "extra" data through the queue and webhooks for
outgoing deliveries. through webapi as a json object, through smtp submission
as message headers of the form "x-mox-extra-<key>: value".
to make it easy to test webapi/webhooks locally, the "localserve" mode actually
puts messages in the queue. when it's time to deliver, it still won't do a full
delivery attempt, but just delivers to the sender account. unless the recipient
address has a special form, simulating a failure to deliver.
admins now have more control over the queue. "hold rules" can be added to mark
newly queued messages as "on hold", pausing delivery. rules can be about
certain sender or recipient domains/addresses, or apply to all messages pausing
the entire queue. also useful for (local) testing.
new config options have been introduced. they are editable through the admin
and/or account web interfaces.
the webapi http endpoints are enabled for newly generated configs with the
quickstart, and in localserve. existing configurations must explicitly enable
the webapi in mox.conf.
gopherwatch.org was created to dogfood this code. it initially used just the
compose/smtpclient/imapclient mox packages to send messages and process
delivery feedback. it will get a config option to use the mox webapi/webhooks
instead. the gopherwatch code to use webapi/webhook is smaller and simpler, and
developing that shaped development of the mox webapi/webhooks.
for issue #31 by cuu508
2024-04-15 22:49:02 +03:00
Results? : MsgResult [ ] | null
2023-12-31 13:55:22 +03:00
Has8bit : boolean // Whether message contains bytes with high bit set, determines whether 8BITMIME SMTP extension is needed.
SMTPUTF8 : boolean // Whether message requires use of SMTPUTF8.
IsDMARCReport : boolean // Delivery failures for DMARC reports are handled differently.
IsTLSReport : boolean // Delivery failures for TLS reports are handled differently.
Size : number // Full size of message, combined MsgPrefix with contents of message file.
add a webapi and webhooks for a simple http/json-based api
for applications to compose/send messages, receive delivery feedback, and
maintain suppression lists.
this is an alternative to applications using a library to compose messages,
submitting those messages using smtp, and monitoring a mailbox with imap for
DSNs, which can be processed into the equivalent of suppression lists. but you
need to know about all these standards/protocols and find libraries. by using
the webapi & webhooks, you just need a http & json library.
unfortunately, there is no standard for these kinds of api, so mox has made up
yet another one...
matching incoming DSNs about deliveries to original outgoing messages requires
keeping history of "retired" messages (delivered from the queue, either
successfully or failed). this can be enabled per account. history is also
useful for debugging deliveries. we now also keep history of each delivery
attempt, accessible while still in the queue, and kept when a message is
retired. the queue webadmin pages now also have pagination, to show potentially
large history.
a queue of webhook calls is now managed too. failures are retried similar to
message deliveries. webhooks can also be saved to the retired list after
completing. also configurable per account.
messages can be sent with a "unique smtp mail from" address. this can only be
used if the domain is configured with a localpart catchall separator such as
"+". when enabled, a queued message gets assigned a random "fromid", which is
added after the separator when sending. when DSNs are returned, they can be
related to previously sent messages based on this fromid. in the future, we can
implement matching on the "envid" used in the smtp dsn extension, or on the
"message-id" of the message. using a fromid can be triggered by authenticating
with a login email address that is configured as enabling fromid.
suppression lists are automatically managed per account. if a delivery attempt
results in certain smtp errors, the destination address is added to the
suppression list. future messages queued for that recipient will immediately
fail without a delivery attempt. suppression lists protect your mail server
reputation.
submitted messages can carry "extra" data through the queue and webhooks for
outgoing deliveries. through webapi as a json object, through smtp submission
as message headers of the form "x-mox-extra-<key>: value".
to make it easy to test webapi/webhooks locally, the "localserve" mode actually
puts messages in the queue. when it's time to deliver, it still won't do a full
delivery attempt, but just delivers to the sender account. unless the recipient
address has a special form, simulating a failure to deliver.
admins now have more control over the queue. "hold rules" can be added to mark
newly queued messages as "on hold", pausing delivery. rules can be about
certain sender or recipient domains/addresses, or apply to all messages pausing
the entire queue. also useful for (local) testing.
new config options have been introduced. they are editable through the admin
and/or account web interfaces.
the webapi http endpoints are enabled for newly generated configs with the
quickstart, and in localserve. existing configurations must explicitly enable
the webapi in mox.conf.
gopherwatch.org was created to dogfood this code. it initially used just the
compose/smtpclient/imapclient mox packages to send messages and process
delivery feedback. it will get a config option to use the mox webapi/webhooks
instead. the gopherwatch code to use webapi/webhook is smaller and simpler, and
developing that shaped development of the mox webapi/webhooks.
for issue #31 by cuu508
2024-04-15 22:49:02 +03:00
MessageID : string // Message-ID header, including <>. Used when composing a DSN, in its References header.
MsgPrefix? : string | null // Data to send before the contents from the file, typically with headers like DKIM-Signature.
Subject : string // For context about delivery.
2023-12-31 13:55:22 +03:00
DSNUTF8? : string | null // If set, this message is a DSN and this is a version using utf-8, for the case the remote MTA supports smtputf8. In this case, Size and MsgPrefix are not relevant.
Transport : string // If non-empty, the transport to use for this message. Can be set through cli or admin interface. If empty (the default for a submitted message), regular routing rules apply.
RequireTLS? : boolean | null // RequireTLS influences TLS verification during delivery. If nil, the recipient domain policy is followed (MTA-STS and/or DANE), falling back to optional opportunistic non-verified STARTTLS. If RequireTLS is true (through SMTP REQUIRETLS extension or webmail submit), MTA-STS or DANE is required, as well as REQUIRETLS support by the next hop server. If RequireTLS is false (through messag header "TLS-Required: No"), the recipient domain's policy is ignored if it does not lead to a successful TLS connection, i.e. falling back to SMTP delivery with unverified STARTTLS or plain text.
2024-02-10 19:55:56 +03:00
FutureReleaseRequest : string // For DSNs, where the original FUTURERELEASE value must be included as per-message field. This field should be of the form "for;" plus interval, or "until;" plus utc date-time.
add a webapi and webhooks for a simple http/json-based api
for applications to compose/send messages, receive delivery feedback, and
maintain suppression lists.
this is an alternative to applications using a library to compose messages,
submitting those messages using smtp, and monitoring a mailbox with imap for
DSNs, which can be processed into the equivalent of suppression lists. but you
need to know about all these standards/protocols and find libraries. by using
the webapi & webhooks, you just need a http & json library.
unfortunately, there is no standard for these kinds of api, so mox has made up
yet another one...
matching incoming DSNs about deliveries to original outgoing messages requires
keeping history of "retired" messages (delivered from the queue, either
successfully or failed). this can be enabled per account. history is also
useful for debugging deliveries. we now also keep history of each delivery
attempt, accessible while still in the queue, and kept when a message is
retired. the queue webadmin pages now also have pagination, to show potentially
large history.
a queue of webhook calls is now managed too. failures are retried similar to
message deliveries. webhooks can also be saved to the retired list after
completing. also configurable per account.
messages can be sent with a "unique smtp mail from" address. this can only be
used if the domain is configured with a localpart catchall separator such as
"+". when enabled, a queued message gets assigned a random "fromid", which is
added after the separator when sending. when DSNs are returned, they can be
related to previously sent messages based on this fromid. in the future, we can
implement matching on the "envid" used in the smtp dsn extension, or on the
"message-id" of the message. using a fromid can be triggered by authenticating
with a login email address that is configured as enabling fromid.
suppression lists are automatically managed per account. if a delivery attempt
results in certain smtp errors, the destination address is added to the
suppression list. future messages queued for that recipient will immediately
fail without a delivery attempt. suppression lists protect your mail server
reputation.
submitted messages can carry "extra" data through the queue and webhooks for
outgoing deliveries. through webapi as a json object, through smtp submission
as message headers of the form "x-mox-extra-<key>: value".
to make it easy to test webapi/webhooks locally, the "localserve" mode actually
puts messages in the queue. when it's time to deliver, it still won't do a full
delivery attempt, but just delivers to the sender account. unless the recipient
address has a special form, simulating a failure to deliver.
admins now have more control over the queue. "hold rules" can be added to mark
newly queued messages as "on hold", pausing delivery. rules can be about
certain sender or recipient domains/addresses, or apply to all messages pausing
the entire queue. also useful for (local) testing.
new config options have been introduced. they are editable through the admin
and/or account web interfaces.
the webapi http endpoints are enabled for newly generated configs with the
quickstart, and in localserve. existing configurations must explicitly enable
the webapi in mox.conf.
gopherwatch.org was created to dogfood this code. it initially used just the
compose/smtpclient/imapclient mox packages to send messages and process
delivery feedback. it will get a config option to use the mox webapi/webhooks
instead. the gopherwatch code to use webapi/webhook is smaller and simpler, and
developing that shaped development of the mox webapi/webhooks.
for issue #31 by cuu508
2024-04-15 22:49:02 +03:00
Extra ? : { [ key : string ] : string } // Extra information, for transactional email.
2023-12-31 13:55:22 +03:00
}
// IPDomain is an ip address, a domain, or empty.
export interface IPDomain {
IP : IP
Domain : Domain
}
add a webapi and webhooks for a simple http/json-based api
for applications to compose/send messages, receive delivery feedback, and
maintain suppression lists.
this is an alternative to applications using a library to compose messages,
submitting those messages using smtp, and monitoring a mailbox with imap for
DSNs, which can be processed into the equivalent of suppression lists. but you
need to know about all these standards/protocols and find libraries. by using
the webapi & webhooks, you just need a http & json library.
unfortunately, there is no standard for these kinds of api, so mox has made up
yet another one...
matching incoming DSNs about deliveries to original outgoing messages requires
keeping history of "retired" messages (delivered from the queue, either
successfully or failed). this can be enabled per account. history is also
useful for debugging deliveries. we now also keep history of each delivery
attempt, accessible while still in the queue, and kept when a message is
retired. the queue webadmin pages now also have pagination, to show potentially
large history.
a queue of webhook calls is now managed too. failures are retried similar to
message deliveries. webhooks can also be saved to the retired list after
completing. also configurable per account.
messages can be sent with a "unique smtp mail from" address. this can only be
used if the domain is configured with a localpart catchall separator such as
"+". when enabled, a queued message gets assigned a random "fromid", which is
added after the separator when sending. when DSNs are returned, they can be
related to previously sent messages based on this fromid. in the future, we can
implement matching on the "envid" used in the smtp dsn extension, or on the
"message-id" of the message. using a fromid can be triggered by authenticating
with a login email address that is configured as enabling fromid.
suppression lists are automatically managed per account. if a delivery attempt
results in certain smtp errors, the destination address is added to the
suppression list. future messages queued for that recipient will immediately
fail without a delivery attempt. suppression lists protect your mail server
reputation.
submitted messages can carry "extra" data through the queue and webhooks for
outgoing deliveries. through webapi as a json object, through smtp submission
as message headers of the form "x-mox-extra-<key>: value".
to make it easy to test webapi/webhooks locally, the "localserve" mode actually
puts messages in the queue. when it's time to deliver, it still won't do a full
delivery attempt, but just delivers to the sender account. unless the recipient
address has a special form, simulating a failure to deliver.
admins now have more control over the queue. "hold rules" can be added to mark
newly queued messages as "on hold", pausing delivery. rules can be about
certain sender or recipient domains/addresses, or apply to all messages pausing
the entire queue. also useful for (local) testing.
new config options have been introduced. they are editable through the admin
and/or account web interfaces.
the webapi http endpoints are enabled for newly generated configs with the
quickstart, and in localserve. existing configurations must explicitly enable
the webapi in mox.conf.
gopherwatch.org was created to dogfood this code. it initially used just the
compose/smtpclient/imapclient mox packages to send messages and process
delivery feedback. it will get a config option to use the mox webapi/webhooks
instead. the gopherwatch code to use webapi/webhook is smaller and simpler, and
developing that shaped development of the mox webapi/webhooks.
for issue #31 by cuu508
2024-04-15 22:49:02 +03:00
// MsgResult is the result (or work in progress) of a delivery attempt.
export interface MsgResult {
Start : Date
Duration : number
Success : boolean
Code : number
Secode : string
Error : string
}
// RetiredFilter filters messages to list or operate on. Used by admin web interface
// and cli.
//
// Only non-empty/non-zero values are applied to the filter. Leaving all fields
// empty/zero matches all messages.
export interface RetiredFilter {
Max : number
IDs? : number [ ] | null
Account : string
From : string
To : string
Submitted : string // Whether submitted before/after a time relative to now. ">$duration" or "<$duration", also with "now" for duration.
LastActivity : string // ">$duration" or "<$duration", also with "now" for duration.
Transport? : string | null
Success? : boolean | null
}
export interface RetiredSort {
Field : string // "Queued" or "LastActivity"/"".
LastID : number // If > 0, we return objects beyond this, less/greater depending on Asc.
Last : any // Value of Field for last object. Must be set iff LastID is set.
Asc : boolean // Ascending, or descending.
}
// MsgRetired is a message for which delivery completed, either successful,
// failed/canceled. Retired messages are only stored if so configured, and will be
// cleaned up after the configured period.
export interface MsgRetired {
ID : number // Same ID as it was as Msg.ID.
BaseID : number
Queued : Date
SenderAccount : string // Failures are delivered back to this local account. Also used for routing.
SenderLocalpart : Localpart // Should be a local user and domain.
SenderDomainStr : string // For filtering, unicode.
FromID : string // Used to match DSNs.
RecipientLocalpart : Localpart // Typically a remote user and domain.
RecipientDomain : IPDomain
RecipientDomainStr : string // For filtering, unicode.
Attempts : number // Next attempt is based on last attempt and exponential back off based on attempts.
MaxAttempts : number // Max number of attempts before giving up. If 0, then the default of 8 attempts is used instead.
DialedIPs ? : { [ key : string ] : IP [ ] | null } // For each host, the IPs that were dialed. Used for IP selection for later attempts.
LastAttempt? : Date | null
Results? : MsgResult [ ] | null
Has8bit : boolean // Whether message contains bytes with high bit set, determines whether 8BITMIME SMTP extension is needed.
SMTPUTF8 : boolean // Whether message requires use of SMTPUTF8.
IsDMARCReport : boolean // Delivery failures for DMARC reports are handled differently.
IsTLSReport : boolean // Delivery failures for TLS reports are handled differently.
Size : number // Full size of message, combined MsgPrefix with contents of message file.
MessageID : string // Used when composing a DSN, in its References header.
Subject : string // For context about delivery.
Transport : string
RequireTLS? : boolean | null
FutureReleaseRequest : string
Extra ? : { [ key : string ] : string } // Extra information, for transactional email.
LastActivity : Date
RecipientAddress : string
Success : boolean // Whether delivery to next hop succeeded.
KeepUntil : Date
}
// HookFilter filters messages to list or operate on. Used by admin web interface
// and cli.
//
// Only non-empty/non-zero values are applied to the filter. Leaving all fields
// empty/zero matches all hooks.
export interface HookFilter {
Max : number
IDs? : number [ ] | null
Account : string
Submitted : string // Whether submitted before/after a time relative to now. ">$duration" or "<$duration", also with "now" for duration.
NextAttempt : string // ">$duration" or "<$duration", also with "now" for duration.
Event : string // Including "incoming".
}
export interface HookSort {
Field : string // "Queued" or "NextAttempt"/"".
LastID : number // If > 0, we return objects beyond this, less/greater depending on Asc.
Last : any // Value of Field for last object. Must be set iff LastID is set.
Asc : boolean // Ascending, or descending.
}
// Hook is a webhook call about a delivery. We'll try delivering with backoff until we succeed or fail.
export interface Hook {
ID : number
QueueMsgID : number // Original queue Msg/MsgRetired ID. Zero for hooks for incoming messages.
FromID : string // As generated by us and returned in webapi call. Can be empty, for incoming messages to our base address.
MessageID : string // Of outgoing or incoming messages. Includes <>.
Subject : string // Subject of original outgoing message, or of incoming message.
Extra ? : { [ key : string ] : string } // From submitted message.
Account : string
URL : string // Taken from config when webhook is scheduled.
Authorization : string // Optional value for authorization header to include in HTTP request.
IsIncoming : boolean
OutgoingEvent : string // Empty string if not outgoing.
Payload : string // JSON data to be submitted.
Submitted : Date
Attempts : number
NextAttempt : Date // Index for fast scheduling.
Results? : HookResult [ ] | null
}
// HookResult is the result of a single attempt to deliver a webhook.
export interface HookResult {
Start : Date
Duration : number
URL : string
Success : boolean
Code : number // eg 200, 404, 500. 2xx implies success.
Error : string
Response : string // Max 512 bytes of HTTP response body.
}
// HookRetiredFilter filters messages to list or operate on. Used by admin web interface
// and cli.
//
// Only non-empty/non-zero values are applied to the filter. Leaving all fields
// empty/zero matches all hooks.
export interface HookRetiredFilter {
Max : number
IDs? : number [ ] | null
Account : string
Submitted : string // Whether submitted before/after a time relative to now. ">$duration" or "<$duration", also with "now" for duration.
LastActivity : string // ">$duration" or "<$duration", also with "now" for duration.
Event : string // Including "incoming".
}
export interface HookRetiredSort {
Field : string // "Queued" or "LastActivity"/"".
LastID : number // If > 0, we return objects beyond this, less/greater depending on Asc.
Last : any // Value of Field for last object. Must be set iff LastID is set.
Asc : boolean // Ascending, or descending.
}
// HookRetired is a Hook that was delivered/failed/canceled and kept according
// to the configuration.
export interface HookRetired {
ID : number // Same as original Hook.ID.
QueueMsgID : number // Original queue Msg or MsgRetired ID. Zero for hooks for incoming messages.
FromID : string // As generated by us and returned in webapi call. Can be empty, for incoming messages to our base address.
MessageID : string // Of outgoing or incoming messages. Includes <>.
Subject : string // Subject of original outgoing message, or of incoming message.
Extra ? : { [ key : string ] : string } // From submitted message.
Account : string
URL : string // Taken from config at start of each attempt.
Authorization : boolean // Whether request had authorization without keeping it around.
IsIncoming : boolean
OutgoingEvent : string
Payload : string // JSON data submitted.
Submitted : Date
SupersededByID : number // If not 0, a Hook.ID that superseded this one and Done will be true.
Attempts : number
Results? : HookResult [ ] | null
Success : boolean
LastActivity : Date
KeepUntil : Date
}
2023-12-31 13:55:22 +03:00
// WebserverConfig is the combination of WebDomainRedirects and WebHandlers
// from the domains.conf configuration file.
export interface WebserverConfig {
WebDNSDomainRedirects ? : ( Domain [ ] | null ) [ ] | null // From server to frontend.
WebDomainRedirects ? : ( string [ ] | null ) [ ] | null // From frontend to server, it's not convenient to create dns.Domain in the frontend.
WebHandlers? : WebHandler [ ] | null
}
export interface WebHandler {
LogName : string
Domain : string
PathRegexp : string
DontRedirectPlainHTTP : boolean
Compress : boolean
WebStatic? : WebStatic | null
WebRedirect? : WebRedirect | null
WebForward? : WebForward | null
improve http request handling for internal services and multiple domains
per listener, you could enable the admin/account/webmail/webapi handlers. but
that would serve those services on their configured paths (/admin/, /,
/webmail/, /webapi/) on all domains mox would be webserving, including any
non-mail domains. so your www.example/admin/ would be serving the admin web
interface, with no way to disabled that.
with this change, the admin interface is only served on requests to (based on
Host header):
- ip addresses
- the listener host name (explicitly configured in the listener, with fallback
to global hostname)
- "localhost" (for ssh tunnel/forwarding scenario's)
the account/webmail/webapi interfaces are served on the same domains as the
admin interface, and additionally:
- the client settings domains, as optionally configured in each Domain in
domains.conf. typically "mail.<yourdomain>".
this means the internal services are no longer served on other domains
configured in the webserver, e.g. www.example.org/admin/ will not be handled
specially.
the order of evaluation of routes/services is also changed:
before this change, the internal handlers would always be evaluated first.
with this change, only the system handlers for
MTA-STS/autoconfig/ACME-validation will be evaluated first. then the webserver
handlers. and finally the internal services (admin/account/webmail/webapi).
this allows an admin to configure overrides for some of the domains (per
hostname-matching rules explained above) that would normally serve these
services.
webserver handlers can now be configured that pass the request to an internal
service: in addition to the existing static/redirect/forward config options,
there is now an "internal" config option, naming the service
(admin/account/webmail/webapi) for handling the request. this allows enabling
the internal services on custom domains.
for issue #160 by TragicLifeHu, thanks for reporting!
2024-05-11 12:13:14 +03:00
WebInternal? : WebInternal | null
2023-12-31 13:55:22 +03:00
Name : string // Either LogName, or numeric index if LogName was empty. Used instead of LogName in logging/metrics.
DNSDomain : Domain
}
export interface WebStatic {
StripPrefix : string
Root : string
ListFiles : boolean
ContinueNotFound : boolean
ResponseHeaders ? : { [ key : string ] : string }
}
export interface WebRedirect {
BaseURL : string
OrigPathRegexp : string
ReplacePath : string
StatusCode : number
}
export interface WebForward {
StripPath : boolean
URL : string
ResponseHeaders ? : { [ key : string ] : string }
}
improve http request handling for internal services and multiple domains
per listener, you could enable the admin/account/webmail/webapi handlers. but
that would serve those services on their configured paths (/admin/, /,
/webmail/, /webapi/) on all domains mox would be webserving, including any
non-mail domains. so your www.example/admin/ would be serving the admin web
interface, with no way to disabled that.
with this change, the admin interface is only served on requests to (based on
Host header):
- ip addresses
- the listener host name (explicitly configured in the listener, with fallback
to global hostname)
- "localhost" (for ssh tunnel/forwarding scenario's)
the account/webmail/webapi interfaces are served on the same domains as the
admin interface, and additionally:
- the client settings domains, as optionally configured in each Domain in
domains.conf. typically "mail.<yourdomain>".
this means the internal services are no longer served on other domains
configured in the webserver, e.g. www.example.org/admin/ will not be handled
specially.
the order of evaluation of routes/services is also changed:
before this change, the internal handlers would always be evaluated first.
with this change, only the system handlers for
MTA-STS/autoconfig/ACME-validation will be evaluated first. then the webserver
handlers. and finally the internal services (admin/account/webmail/webapi).
this allows an admin to configure overrides for some of the domains (per
hostname-matching rules explained above) that would normally serve these
services.
webserver handlers can now be configured that pass the request to an internal
service: in addition to the existing static/redirect/forward config options,
there is now an "internal" config option, naming the service
(admin/account/webmail/webapi) for handling the request. this allows enabling
the internal services on custom domains.
for issue #160 by TragicLifeHu, thanks for reporting!
2024-05-11 12:13:14 +03:00
export interface WebInternal {
BasePath : string
Service : string
}
2023-12-31 13:55:22 +03:00
// Transport is a method to delivery a message. At most one of the fields can
// be non-nil. The non-nil field represents the type of transport. For a
// transport with all fields nil, regular email delivery is done.
export interface Transport {
Submissions? : TransportSMTP | null
Submission? : TransportSMTP | null
SMTP? : TransportSMTP | null
Socks? : TransportSocks | null
2024-04-08 22:50:30 +03:00
Direct? : TransportDirect | null
2023-12-31 13:55:22 +03:00
}
// TransportSMTP delivers messages by "submission" (SMTP, typically
// authenticated) to the queue of a remote host (smarthost), or by relaying
// (SMTP, typically unauthenticated).
export interface TransportSMTP {
Host : string
Port : number
STARTTLSInsecureSkipVerify : boolean
NoSTARTTLS : boolean
Auth? : SMTPAuth | null
}
// SMTPAuth hold authentication credentials used when delivering messages
// through a smarthost.
export interface SMTPAuth {
Username : string
Password : string
Mechanisms? : string [ ] | null
}
export interface TransportSocks {
Address : string
RemoteIPs? : string [ ] | null
RemoteHostname : string
}
2024-04-08 22:50:30 +03:00
export interface TransportDirect {
DisableIPv4 : boolean
DisableIPv6 : boolean
}
2023-12-31 13:55:22 +03:00
// EvaluationStat summarizes stored evaluations, for inclusion in an upcoming
// aggregate report, for a domain.
export interface EvaluationStat {
Domain : Domain
Dispositions? : string [ ] | null
Count : number
SendReport : boolean
}
// Evaluation is the result of an evaluation of a DMARC policy, to be included
// in a DMARC report.
export interface Evaluation {
ID : number
PolicyDomain : string // Domain where DMARC policy was found, could be the organizational domain while evaluation was for a subdomain. Unicode. Same as domain found in PolicyPublished. A separate field for its index.
Evaluated : Date // Time of evaluation, determines which report (covering whole hours) this evaluation will be included in.
Optional : boolean // If optional, this evaluation is not a reason to send a DMARC report, but it will be included when a report is sent due to other non-optional evaluations. Set for evaluations of incoming DMARC reports. We don't want such deliveries causing us to send a report, or we would keep exchanging reporting messages forever. Also set for when evaluation is a DMARC reject for domains we haven't positively interacted with, to prevent being used to flood an unsuspecting domain with reports.
IntervalHours : number // Effective aggregate reporting interval in hours. Between 1 and 24, rounded up from seconds from policy to first number that can divide 24.
Addresses? : string [ ] | null // "rua" in DMARC record, we only store evaluations for records with aggregate reporting addresses, so always non-empty.
PolicyPublished : PolicyPublished // Policy used for evaluation. We don't store the "fo" field for failure reporting options, since we don't send failure reports for individual messages.
SourceIP : string // For "row" in a report record.
2024-05-09 16:58:14 +03:00
Disposition : string
2023-12-31 13:55:22 +03:00
AlignedDKIMPass : boolean
AlignedSPFPass : boolean
OverrideReasons? : PolicyOverrideReason [ ] | null
EnvelopeTo : string // For "identifiers" in a report record.
EnvelopeFrom : string
HeaderFrom : string
DKIMResults? : DKIMAuthResult [ ] | null // For "auth_results" in a report record.
SPFResults? : SPFAuthResult [ ] | null
}
// SuppressAddress is a reporting address for which outgoing DMARC reports
// will be suppressed for a period.
export interface SuppressAddress {
ID : number
Inserted : Date
ReportingAddress : string
Until : Date
Comment : string
}
// TLSResult is stored in the database to track TLS results per policy domain, day
// and recipient domain. These records will be included in TLS reports.
export interface TLSResult {
ID : number
PolicyDomain : string // Domain potentially with TLSRPT DNS record, with addresses that will receive reports. Either a recipient domain (for MTA-STS policies) or an (MX) host (for DANE policies). Unicode.
DayUTC : string // DayUTC is of the form yyyymmdd.
RecipientDomain : string // Reports are sent per recipient domain and per MX host. For reports to a recipient domain, we type send a result for MTA-STS and one or more MX host (DANE) results. Unicode.
Created : Date
Updated : Date
IsHost : boolean // Result is for MX host (DANE), not recipient domain (MTA-STS).
SendReport : boolean // Whether to send a report. TLS results for delivering messages with TLS reports will be recorded, but will not cause a report to be sent.
SentToRecipientDomain : boolean // Set after sending to recipient domain, before sending results to policy domain (after which the record is removed).
RecipientDomainReportingAddresses? : string [ ] | null // Reporting addresses from the recipient domain TLSRPT record, not necessarily those we sent to (e.g. due to failure). Used to leave results to MX target (DANE) policy domains out that were already sent in the report to the recipient domain, so we don't report twice.
SentToPolicyDomain : boolean // Set after sending report to policy domain.
Results? : Result [ ] | null // Results is updated for each TLS attempt.
}
2024-04-19 11:51:24 +03:00
// SuppressAddress is a reporting address for which outgoing TLS reports
2023-12-31 13:55:22 +03:00
// will be suppressed for a period.
export interface TLSRPTSuppressAddress {
ID : number
Inserted : Date
ReportingAddress : string
Until : Date
Comment : string
}
2024-04-18 12:14:24 +03:00
// Dynamic is the parsed form of domains.conf, and is automatically reloaded when changed.
export interface Dynamic {
Domains ? : { [ key : string ] : ConfigDomain }
Accounts ? : { [ key : string ] : Account }
WebDomainRedirects ? : { [ key : string ] : string }
WebHandlers? : WebHandler [ ] | null
Routes? : Route [ ] | null
MonitorDNSBLs? : string [ ] | null
MonitorDNSBLZones? : Domain [ ] | null
}
implement tls client certificate authentication
the imap & smtp servers now allow logging in with tls client authentication and
the "external" sasl authentication mechanism. email clients like thunderbird,
fairemail, k9, macos mail implement it. this seems to be the most secure among
the authentication mechanism commonly implemented by clients. a useful property
is that an account can have a separate tls public key for each device/email
client. with tls client cert auth, authentication is also bound to the tls
connection. a mitm cannot pass the credentials on to another tls connection,
similar to scram-*-plus. though part of scram-*-plus is that clients verify
that the server knows the client credentials.
for tls client auth with imap, we send a "preauth" untagged message by default.
that puts the connection in authenticated state. given the imap connection
state machine, further authentication commands are not allowed. some clients
don't recognize the preauth message, and try to authenticate anyway, which
fails. a tls public key has a config option to disable preauth, keeping new
connections in unauthenticated state, to work with such email clients.
for smtp (submission), we don't require an explicit auth command.
both for imap and smtp, we allow a client to authenticate with another
mechanism than "external". in that case, credentials are verified, and have to
be for the same account as the tls client auth, but the adress can be another
one than the login address configured with the tls public key.
only the public key is used to identify the account that is authenticating. we
ignore the rest of the certificate. expiration dates, names, constraints, etc
are not verified. no certificate authorities are involved.
users can upload their own (minimal) certificate. the account web interface
shows openssl commands you can run to generate a private key, minimal cert, and
a p12 file (the format that email clients seem to like...) containing both
private key and certificate.
the imapclient & smtpclient packages can now also use tls client auth. and so
does "mox sendmail", either with a pem file with private key and certificate,
or with just an ed25519 private key.
there are new subcommands "mox config tlspubkey ..." for
adding/removing/listing tls public keys from the cli, by the admin.
2024-12-06 00:41:49 +03:00
// TLSPublicKey is a public key for use with TLS client authentication based on the
// public key of the certificate.
export interface TLSPublicKey {
Fingerprint : string // Raw-url-base64-encoded Subject Public Key Info of certificate.
Created : Date
Type : string // E.g. "rsa-2048", "ecdsa-p256", "ed25519"
Name : string // Descriptive name to identify the key, e.g. the device where key is used.
NoIMAPPreauth : boolean // If set, new immediate authenticated TLS connections are not moved to "authenticated" state. For clients that don't understand it, and will try an authenticate command anyway.
CertDER? : string | null
Account : string // Key authenticates this account.
LoginAddress : string // Must belong to account.
}
replace http basic auth for web interfaces with session cookie & csrf-based auth
the http basic auth we had was very simple to reason about, and to implement.
but it has a major downside:
there is no way to logout, browsers keep sending credentials. ideally, browsers
themselves would show a button to stop sending credentials.
a related downside: the http auth mechanism doesn't indicate for which server
paths the credentials are.
another downside: the original password is sent to the server with each
request. though sending original passwords to web servers seems to be
considered normal.
our new approach uses session cookies, along with csrf values when we can. the
sessions are server-side managed, automatically extended on each use. this
makes it easy to invalidate sessions and keeps the frontend simpler (than with
long- vs short-term sessions and refreshing). the cookies are httponly,
samesite=strict, scoped to the path of the web interface. cookies are set
"secure" when set over https. the cookie is set by a successful call to Login.
a call to Logout invalidates a session. changing a password invalidates all
sessions for a user, but keeps the session with which the password was changed
alive. the csrf value is also random, and associated with the session cookie.
the csrf must be sent as header for api calls, or as parameter for direct form
posts (where we cannot set a custom header). rest-like calls made directly by
the browser, e.g. for images, don't have a csrf protection. the csrf value is
returned by the Login api call and stored in localstorage.
api calls without credentials return code "user:noAuth", and with bad
credentials return "user:badAuth". the api client recognizes this and triggers
a login. after a login, all auth-failed api calls are automatically retried.
only for "user:badAuth" is an error message displayed in the login form (e.g.
session expired).
in an ideal world, browsers would take care of most session management. a
server would indicate authentication is needed (like http basic auth), and the
browsers uses trusted ui to request credentials for the server & path. the
browser could use safer mechanism than sending original passwords to the
server, such as scram, along with a standard way to create sessions. for now,
web developers have to do authentication themselves: from showing the login
prompt, ensuring the right session/csrf cookies/localstorage/headers/etc are
sent with each request.
webauthn is a newer way to do authentication, perhaps we'll implement it in the
future. though hardware tokens aren't an attractive option for many users, and
it may be overkill as long as we still do old-fashioned authentication in smtp
& imap where passwords can be sent to the server.
for issue #58
2024-01-04 15:10:48 +03:00
export type CSRFToken = string
2023-12-31 13:55:22 +03:00
// Policy as used in DMARC DNS record for "p=" or "sp=".
export enum DMARCPolicy {
PolicyEmpty = "" , // Only for the optional Record.SubdomainPolicy.
PolicyNone = "none" ,
PolicyQuarantine = "quarantine" ,
PolicyReject = "reject" ,
}
// Align specifies the required alignment of a domain name.
export enum Align {
AlignStrict = "s" , // Strict requires an exact domain name match.
AlignRelaxed = "r" , // Relaxed requires either an exact or subdomain name match.
}
// RUA is a reporting address with scheme and special characters ",", "!" and
// ";" not encoded.
export type RUA = string
// Mode indicates how the policy should be interpreted.
export enum Mode {
ModeEnforce = "enforce" , // Policy must be followed, i.e. deliveries must fail if a TLS connection cannot be made.
ModeTesting = "testing" , // In case TLS cannot be negotiated, plain SMTP can be used, but failures must be reported, e.g. with TLSRPT.
ModeNone = "none" , // In case MTA-STS is not or no longer implemented.
}
2024-04-18 12:14:24 +03:00
// Localpart is a decoded local part of an email address, before the "@".
// For quoted strings, values do not hold the double quote or escaping backslashes.
// An empty string can be a valid localpart.
// Localparts are in Unicode NFC.
export type Localpart = string
2023-12-31 13:55:22 +03:00
// An IP is a single IP address, a slice of bytes.
// Functions in this package accept either 4-byte (IPv4)
// or 16-byte (IPv6) slices as input.
//
// Note that in this documentation, referring to an
// IP address as an IPv4 address or an IPv6 address
// is a semantic property of the address, not just the
// length of the byte slice: a 16-byte slice can still
// be an IPv4 address.
export type IP = string
implement tls client certificate authentication
the imap & smtp servers now allow logging in with tls client authentication and
the "external" sasl authentication mechanism. email clients like thunderbird,
fairemail, k9, macos mail implement it. this seems to be the most secure among
the authentication mechanism commonly implemented by clients. a useful property
is that an account can have a separate tls public key for each device/email
client. with tls client cert auth, authentication is also bound to the tls
connection. a mitm cannot pass the credentials on to another tls connection,
similar to scram-*-plus. though part of scram-*-plus is that clients verify
that the server knows the client credentials.
for tls client auth with imap, we send a "preauth" untagged message by default.
that puts the connection in authenticated state. given the imap connection
state machine, further authentication commands are not allowed. some clients
don't recognize the preauth message, and try to authenticate anyway, which
fails. a tls public key has a config option to disable preauth, keeping new
connections in unauthenticated state, to work with such email clients.
for smtp (submission), we don't require an explicit auth command.
both for imap and smtp, we allow a client to authenticate with another
mechanism than "external". in that case, credentials are verified, and have to
be for the same account as the tls client auth, but the adress can be another
one than the login address configured with the tls public key.
only the public key is used to identify the account that is authenticating. we
ignore the rest of the certificate. expiration dates, names, constraints, etc
are not verified. no certificate authorities are involved.
users can upload their own (minimal) certificate. the account web interface
shows openssl commands you can run to generate a private key, minimal cert, and
a p12 file (the format that email clients seem to like...) containing both
private key and certificate.
the imapclient & smtpclient packages can now also use tls client auth. and so
does "mox sendmail", either with a pem file with private key and certificate,
or with just an ed25519 private key.
there are new subcommands "mox config tlspubkey ..." for
adding/removing/listing tls public keys from the cli, by the admin.
2024-12-06 00:41:49 +03:00
export const structTypes : { [ typename : string ] : boolean } = { "Account" : true , "Address" : true , "AddressAlias" : true , "Alias" : true , "AliasAddress" : true , "AuthResults" : true , "AutoconfCheckResult" : true , "AutodiscoverCheckResult" : true , "AutodiscoverSRV" : true , "AutomaticJunkFlags" : true , "Canonicalization" : true , "CheckResult" : true , "ClientConfigs" : true , "ClientConfigsEntry" : true , "ConfigDomain" : true , "DANECheckResult" : true , "DKIM" : true , "DKIMAuthResult" : true , "DKIMCheckResult" : true , "DKIMRecord" : true , "DMARC" : true , "DMARCCheckResult" : true , "DMARCRecord" : true , "DMARCSummary" : true , "DNSSECResult" : true , "DateRange" : true , "Destination" : true , "Directive" : true , "Domain" : true , "DomainFeedback" : true , "Dynamic" : true , "Evaluation" : true , "EvaluationStat" : true , "Extension" : true , "FailureDetails" : true , "Filter" : true , "HoldRule" : true , "Hook" : true , "HookFilter" : true , "HookResult" : true , "HookRetired" : true , "HookRetiredFilter" : true , "HookRetiredSort" : true , "HookSort" : true , "IPDomain" : true , "IPRevCheckResult" : true , "Identifiers" : true , "IncomingWebhook" : true , "JunkFilter" : true , "MTASTS" : true , "MTASTSCheckResult" : true , "MTASTSRecord" : true , "MX" : true , "MXCheckResult" : true , "Modifier" : true , "Msg" : true , "MsgResult" : true , "MsgRetired" : true , "OutgoingWebhook" : true , "Pair" : true , "Policy" : true , "PolicyEvaluated" : true , "PolicyOverrideReason" : true , "PolicyPublished" : true , "PolicyRecord" : true , "Record" : true , "Report" : true , "ReportMetadata" : true , "ReportRecord" : true , "Result" : true , "ResultPolicy" : true , "RetiredFilter" : true , "RetiredSort" : true , "Reverse" : true , "Route" : true , "Row" : true , "Ruleset" : true , "SMTPAuth" : true , "SPFAuthResult" : true , "SPFCheckResult" : true , "SPFRecord" : true , "SRV" : true , "SRVConfCheckResult" : true , "STSMX" : true , "Selector" : true , "Sort" : true , "SubjectPass" : true , "Summary" : true , "SuppressAddress" : true , "TLSCheckResult" : true , "TLSPublicKey" : true , "TLSRPT" : true , "TLSRPTCheckResult" : true , "TLSRPTDateRange" : true , "TLSRPTRecord" : true , "TLSRPTSummary" : true , "TLSRPTSuppressAddress" : true , "TLSReportRecord" : true , "TLSResult" : true , "Transport" : true , "TransportDirect" : true , "TransportSMTP" : true , "TransportSocks" : true , "URI" : true , "WebForward" : true , "WebHandler" : true , "WebInternal" : true , "WebRedirect" : true , "WebStatic" : true , "WebserverConfig" : true }
2024-05-09 16:58:14 +03:00
export const stringsTypes : { [ typename : string ] : boolean } = { "Align" : true , "CSRFToken" : true , "DMARCPolicy" : true , "IP" : true , "Localpart" : true , "Mode" : true , "RUA" : true }
2023-12-31 13:55:22 +03:00
export const intsTypes : { [ typename : string ] : boolean } = { }
export const types : TypenameMap = {
"CheckResult" : { "Name" : "CheckResult" , "Docs" : "" , "Fields" : [ { "Name" : "Domain" , "Docs" : "" , "Typewords" : [ "string" ] } , { "Name" : "DNSSEC" , "Docs" : "" , "Typewords" : [ "DNSSECResult" ] } , { "Name" : "IPRev" , "Docs" : "" , "Typewords" : [ "IPRevCheckResult" ] } , { "Name" : "MX" , "Docs" : "" , "Typewords" : [ "MXCheckResult" ] } , { "Name" : "TLS" , "Docs" : "" , "Typewords" : [ "TLSCheckResult" ] } , { "Name" : "DANE" , "Docs" : "" , "Typewords" : [ "DANECheckResult" ] } , { "Name" : "SPF" , "Docs" : "" , "Typewords" : [ "SPFCheckResult" ] } , { "Name" : "DKIM" , "Docs" : "" , "Typewords" : [ "DKIMCheckResult" ] } , { "Name" : "DMARC" , "Docs" : "" , "Typewords" : [ "DMARCCheckResult" ] } , { "Name" : "HostTLSRPT" , "Docs" : "" , "Typewords" : [ "TLSRPTCheckResult" ] } , { "Name" : "DomainTLSRPT" , "Docs" : "" , "Typewords" : [ "TLSRPTCheckResult" ] } , { "Name" : "MTASTS" , "Docs" : "" , "Typewords" : [ "MTASTSCheckResult" ] } , { "Name" : "SRVConf" , "Docs" : "" , "Typewords" : [ "SRVConfCheckResult" ] } , { "Name" : "Autoconf" , "Docs" : "" , "Typewords" : [ "AutoconfCheckResult" ] } , { "Name" : "Autodiscover" , "Docs" : "" , "Typewords" : [ "AutodiscoverCheckResult" ] } ] } ,
"DNSSECResult" : { "Name" : "DNSSECResult" , "Docs" : "" , "Fields" : [ { "Name" : "Errors" , "Docs" : "" , "Typewords" : [ "[]" , "string" ] } , { "Name" : "Warnings" , "Docs" : "" , "Typewords" : [ "[]" , "string" ] } , { "Name" : "Instructions" , "Docs" : "" , "Typewords" : [ "[]" , "string" ] } ] } ,
"IPRevCheckResult" : { "Name" : "IPRevCheckResult" , "Docs" : "" , "Fields" : [ { "Name" : "Hostname" , "Docs" : "" , "Typewords" : [ "Domain" ] } , { "Name" : "IPNames" , "Docs" : "" , "Typewords" : [ "{}" , "[]" , "string" ] } , { "Name" : "Errors" , "Docs" : "" , "Typewords" : [ "[]" , "string" ] } , { "Name" : "Warnings" , "Docs" : "" , "Typewords" : [ "[]" , "string" ] } , { "Name" : "Instructions" , "Docs" : "" , "Typewords" : [ "[]" , "string" ] } ] } ,
"Domain" : { "Name" : "Domain" , "Docs" : "" , "Fields" : [ { "Name" : "ASCII" , "Docs" : "" , "Typewords" : [ "string" ] } , { "Name" : "Unicode" , "Docs" : "" , "Typewords" : [ "string" ] } ] } ,
"MXCheckResult" : { "Name" : "MXCheckResult" , "Docs" : "" , "Fields" : [ { "Name" : "Records" , "Docs" : "" , "Typewords" : [ "[]" , "MX" ] } , { "Name" : "Errors" , "Docs" : "" , "Typewords" : [ "[]" , "string" ] } , { "Name" : "Warnings" , "Docs" : "" , "Typewords" : [ "[]" , "string" ] } , { "Name" : "Instructions" , "Docs" : "" , "Typewords" : [ "[]" , "string" ] } ] } ,
"MX" : { "Name" : "MX" , "Docs" : "" , "Fields" : [ { "Name" : "Host" , "Docs" : "" , "Typewords" : [ "string" ] } , { "Name" : "Pref" , "Docs" : "" , "Typewords" : [ "int32" ] } , { "Name" : "IPs" , "Docs" : "" , "Typewords" : [ "[]" , "string" ] } ] } ,
"TLSCheckResult" : { "Name" : "TLSCheckResult" , "Docs" : "" , "Fields" : [ { "Name" : "Errors" , "Docs" : "" , "Typewords" : [ "[]" , "string" ] } , { "Name" : "Warnings" , "Docs" : "" , "Typewords" : [ "[]" , "string" ] } , { "Name" : "Instructions" , "Docs" : "" , "Typewords" : [ "[]" , "string" ] } ] } ,
"DANECheckResult" : { "Name" : "DANECheckResult" , "Docs" : "" , "Fields" : [ { "Name" : "Errors" , "Docs" : "" , "Typewords" : [ "[]" , "string" ] } , { "Name" : "Warnings" , "Docs" : "" , "Typewords" : [ "[]" , "string" ] } , { "Name" : "Instructions" , "Docs" : "" , "Typewords" : [ "[]" , "string" ] } ] } ,
"SPFCheckResult" : { "Name" : "SPFCheckResult" , "Docs" : "" , "Fields" : [ { "Name" : "DomainTXT" , "Docs" : "" , "Typewords" : [ "string" ] } , { "Name" : "DomainRecord" , "Docs" : "" , "Typewords" : [ "nullable" , "SPFRecord" ] } , { "Name" : "HostTXT" , "Docs" : "" , "Typewords" : [ "string" ] } , { "Name" : "HostRecord" , "Docs" : "" , "Typewords" : [ "nullable" , "SPFRecord" ] } , { "Name" : "Errors" , "Docs" : "" , "Typewords" : [ "[]" , "string" ] } , { "Name" : "Warnings" , "Docs" : "" , "Typewords" : [ "[]" , "string" ] } , { "Name" : "Instructions" , "Docs" : "" , "Typewords" : [ "[]" , "string" ] } ] } ,
"SPFRecord" : { "Name" : "SPFRecord" , "Docs" : "" , "Fields" : [ { "Name" : "Version" , "Docs" : "" , "Typewords" : [ "string" ] } , { "Name" : "Directives" , "Docs" : "" , "Typewords" : [ "[]" , "Directive" ] } , { "Name" : "Redirect" , "Docs" : "" , "Typewords" : [ "string" ] } , { "Name" : "Explanation" , "Docs" : "" , "Typewords" : [ "string" ] } , { "Name" : "Other" , "Docs" : "" , "Typewords" : [ "[]" , "Modifier" ] } ] } ,
"Directive" : { "Name" : "Directive" , "Docs" : "" , "Fields" : [ { "Name" : "Qualifier" , "Docs" : "" , "Typewords" : [ "string" ] } , { "Name" : "Mechanism" , "Docs" : "" , "Typewords" : [ "string" ] } , { "Name" : "DomainSpec" , "Docs" : "" , "Typewords" : [ "string" ] } , { "Name" : "IPstr" , "Docs" : "" , "Typewords" : [ "string" ] } , { "Name" : "IP4CIDRLen" , "Docs" : "" , "Typewords" : [ "nullable" , "int32" ] } , { "Name" : "IP6CIDRLen" , "Docs" : "" , "Typewords" : [ "nullable" , "int32" ] } ] } ,
"Modifier" : { "Name" : "Modifier" , "Docs" : "" , "Fields" : [ { "Name" : "Key" , "Docs" : "" , "Typewords" : [ "string" ] } , { "Name" : "Value" , "Docs" : "" , "Typewords" : [ "string" ] } ] } ,
"DKIMCheckResult" : { "Name" : "DKIMCheckResult" , "Docs" : "" , "Fields" : [ { "Name" : "Records" , "Docs" : "" , "Typewords" : [ "[]" , "DKIMRecord" ] } , { "Name" : "Errors" , "Docs" : "" , "Typewords" : [ "[]" , "string" ] } , { "Name" : "Warnings" , "Docs" : "" , "Typewords" : [ "[]" , "string" ] } , { "Name" : "Instructions" , "Docs" : "" , "Typewords" : [ "[]" , "string" ] } ] } ,
"DKIMRecord" : { "Name" : "DKIMRecord" , "Docs" : "" , "Fields" : [ { "Name" : "Selector" , "Docs" : "" , "Typewords" : [ "string" ] } , { "Name" : "TXT" , "Docs" : "" , "Typewords" : [ "string" ] } , { "Name" : "Record" , "Docs" : "" , "Typewords" : [ "nullable" , "Record" ] } ] } ,
"Record" : { "Name" : "Record" , "Docs" : "" , "Fields" : [ { "Name" : "Version" , "Docs" : "" , "Typewords" : [ "string" ] } , { "Name" : "Hashes" , "Docs" : "" , "Typewords" : [ "[]" , "string" ] } , { "Name" : "Key" , "Docs" : "" , "Typewords" : [ "string" ] } , { "Name" : "Notes" , "Docs" : "" , "Typewords" : [ "string" ] } , { "Name" : "Pubkey" , "Docs" : "" , "Typewords" : [ "nullable" , "string" ] } , { "Name" : "Services" , "Docs" : "" , "Typewords" : [ "[]" , "string" ] } , { "Name" : "Flags" , "Docs" : "" , "Typewords" : [ "[]" , "string" ] } ] } ,
"DMARCCheckResult" : { "Name" : "DMARCCheckResult" , "Docs" : "" , "Fields" : [ { "Name" : "Domain" , "Docs" : "" , "Typewords" : [ "string" ] } , { "Name" : "TXT" , "Docs" : "" , "Typewords" : [ "string" ] } , { "Name" : "Record" , "Docs" : "" , "Typewords" : [ "nullable" , "DMARCRecord" ] } , { "Name" : "Errors" , "Docs" : "" , "Typewords" : [ "[]" , "string" ] } , { "Name" : "Warnings" , "Docs" : "" , "Typewords" : [ "[]" , "string" ] } , { "Name" : "Instructions" , "Docs" : "" , "Typewords" : [ "[]" , "string" ] } ] } ,
"DMARCRecord" : { "Name" : "DMARCRecord" , "Docs" : "" , "Fields" : [ { "Name" : "Version" , "Docs" : "" , "Typewords" : [ "string" ] } , { "Name" : "Policy" , "Docs" : "" , "Typewords" : [ "DMARCPolicy" ] } , { "Name" : "SubdomainPolicy" , "Docs" : "" , "Typewords" : [ "DMARCPolicy" ] } , { "Name" : "AggregateReportAddresses" , "Docs" : "" , "Typewords" : [ "[]" , "URI" ] } , { "Name" : "FailureReportAddresses" , "Docs" : "" , "Typewords" : [ "[]" , "URI" ] } , { "Name" : "ADKIM" , "Docs" : "" , "Typewords" : [ "Align" ] } , { "Name" : "ASPF" , "Docs" : "" , "Typewords" : [ "Align" ] } , { "Name" : "AggregateReportingInterval" , "Docs" : "" , "Typewords" : [ "int32" ] } , { "Name" : "FailureReportingOptions" , "Docs" : "" , "Typewords" : [ "[]" , "string" ] } , { "Name" : "ReportingFormat" , "Docs" : "" , "Typewords" : [ "[]" , "string" ] } , { "Name" : "Percentage" , "Docs" : "" , "Typewords" : [ "int32" ] } ] } ,
"URI" : { "Name" : "URI" , "Docs" : "" , "Fields" : [ { "Name" : "Address" , "Docs" : "" , "Typewords" : [ "string" ] } , { "Name" : "MaxSize" , "Docs" : "" , "Typewords" : [ "uint64" ] } , { "Name" : "Unit" , "Docs" : "" , "Typewords" : [ "string" ] } ] } ,
"TLSRPTCheckResult" : { "Name" : "TLSRPTCheckResult" , "Docs" : "" , "Fields" : [ { "Name" : "TXT" , "Docs" : "" , "Typewords" : [ "string" ] } , { "Name" : "Record" , "Docs" : "" , "Typewords" : [ "nullable" , "TLSRPTRecord" ] } , { "Name" : "Errors" , "Docs" : "" , "Typewords" : [ "[]" , "string" ] } , { "Name" : "Warnings" , "Docs" : "" , "Typewords" : [ "[]" , "string" ] } , { "Name" : "Instructions" , "Docs" : "" , "Typewords" : [ "[]" , "string" ] } ] } ,
"TLSRPTRecord" : { "Name" : "TLSRPTRecord" , "Docs" : "" , "Fields" : [ { "Name" : "Version" , "Docs" : "" , "Typewords" : [ "string" ] } , { "Name" : "RUAs" , "Docs" : "" , "Typewords" : [ "[]" , "[]" , "RUA" ] } , { "Name" : "Extensions" , "Docs" : "" , "Typewords" : [ "[]" , "Extension" ] } ] } ,
"Extension" : { "Name" : "Extension" , "Docs" : "" , "Fields" : [ { "Name" : "Key" , "Docs" : "" , "Typewords" : [ "string" ] } , { "Name" : "Value" , "Docs" : "" , "Typewords" : [ "string" ] } ] } ,
"MTASTSCheckResult" : { "Name" : "MTASTSCheckResult" , "Docs" : "" , "Fields" : [ { "Name" : "TXT" , "Docs" : "" , "Typewords" : [ "string" ] } , { "Name" : "Record" , "Docs" : "" , "Typewords" : [ "nullable" , "MTASTSRecord" ] } , { "Name" : "PolicyText" , "Docs" : "" , "Typewords" : [ "string" ] } , { "Name" : "Policy" , "Docs" : "" , "Typewords" : [ "nullable" , "Policy" ] } , { "Name" : "Errors" , "Docs" : "" , "Typewords" : [ "[]" , "string" ] } , { "Name" : "Warnings" , "Docs" : "" , "Typewords" : [ "[]" , "string" ] } , { "Name" : "Instructions" , "Docs" : "" , "Typewords" : [ "[]" , "string" ] } ] } ,
"MTASTSRecord" : { "Name" : "MTASTSRecord" , "Docs" : "" , "Fields" : [ { "Name" : "Version" , "Docs" : "" , "Typewords" : [ "string" ] } , { "Name" : "ID" , "Docs" : "" , "Typewords" : [ "string" ] } , { "Name" : "Extensions" , "Docs" : "" , "Typewords" : [ "[]" , "Pair" ] } ] } ,
"Pair" : { "Name" : "Pair" , "Docs" : "" , "Fields" : [ { "Name" : "Key" , "Docs" : "" , "Typewords" : [ "string" ] } , { "Name" : "Value" , "Docs" : "" , "Typewords" : [ "string" ] } ] } ,
"Policy" : { "Name" : "Policy" , "Docs" : "" , "Fields" : [ { "Name" : "Version" , "Docs" : "" , "Typewords" : [ "string" ] } , { "Name" : "Mode" , "Docs" : "" , "Typewords" : [ "Mode" ] } , { "Name" : "MX" , "Docs" : "" , "Typewords" : [ "[]" , "STSMX" ] } , { "Name" : "MaxAgeSeconds" , "Docs" : "" , "Typewords" : [ "int32" ] } , { "Name" : "Extensions" , "Docs" : "" , "Typewords" : [ "[]" , "Pair" ] } ] } ,
"STSMX" : { "Name" : "STSMX" , "Docs" : "" , "Fields" : [ { "Name" : "Wildcard" , "Docs" : "" , "Typewords" : [ "bool" ] } , { "Name" : "Domain" , "Docs" : "" , "Typewords" : [ "Domain" ] } ] } ,
"SRVConfCheckResult" : { "Name" : "SRVConfCheckResult" , "Docs" : "" , "Fields" : [ { "Name" : "SRVs" , "Docs" : "" , "Typewords" : [ "{}" , "[]" , "SRV" ] } , { "Name" : "Errors" , "Docs" : "" , "Typewords" : [ "[]" , "string" ] } , { "Name" : "Warnings" , "Docs" : "" , "Typewords" : [ "[]" , "string" ] } , { "Name" : "Instructions" , "Docs" : "" , "Typewords" : [ "[]" , "string" ] } ] } ,
"SRV" : { "Name" : "SRV" , "Docs" : "" , "Fields" : [ { "Name" : "Target" , "Docs" : "" , "Typewords" : [ "string" ] } , { "Name" : "Port" , "Docs" : "" , "Typewords" : [ "uint16" ] } , { "Name" : "Priority" , "Docs" : "" , "Typewords" : [ "uint16" ] } , { "Name" : "Weight" , "Docs" : "" , "Typewords" : [ "uint16" ] } ] } ,
"AutoconfCheckResult" : { "Name" : "AutoconfCheckResult" , "Docs" : "" , "Fields" : [ { "Name" : "ClientSettingsDomainIPs" , "Docs" : "" , "Typewords" : [ "[]" , "string" ] } , { "Name" : "IPs" , "Docs" : "" , "Typewords" : [ "[]" , "string" ] } , { "Name" : "Errors" , "Docs" : "" , "Typewords" : [ "[]" , "string" ] } , { "Name" : "Warnings" , "Docs" : "" , "Typewords" : [ "[]" , "string" ] } , { "Name" : "Instructions" , "Docs" : "" , "Typewords" : [ "[]" , "string" ] } ] } ,
"AutodiscoverCheckResult" : { "Name" : "AutodiscoverCheckResult" , "Docs" : "" , "Fields" : [ { "Name" : "Records" , "Docs" : "" , "Typewords" : [ "[]" , "AutodiscoverSRV" ] } , { "Name" : "Errors" , "Docs" : "" , "Typewords" : [ "[]" , "string" ] } , { "Name" : "Warnings" , "Docs" : "" , "Typewords" : [ "[]" , "string" ] } , { "Name" : "Instructions" , "Docs" : "" , "Typewords" : [ "[]" , "string" ] } ] } ,
"AutodiscoverSRV" : { "Name" : "AutodiscoverSRV" , "Docs" : "" , "Fields" : [ { "Name" : "Target" , "Docs" : "" , "Typewords" : [ "string" ] } , { "Name" : "Port" , "Docs" : "" , "Typewords" : [ "uint16" ] } , { "Name" : "Priority" , "Docs" : "" , "Typewords" : [ "uint16" ] } , { "Name" : "Weight" , "Docs" : "" , "Typewords" : [ "uint16" ] } , { "Name" : "IPs" , "Docs" : "" , "Typewords" : [ "[]" , "string" ] } ] } ,
2024-04-24 20:15:30 +03:00
"ConfigDomain" : { "Name" : "ConfigDomain" , "Docs" : "" , "Fields" : [ { "Name" : "Description" , "Docs" : "" , "Typewords" : [ "string" ] } , { "Name" : "ClientSettingsDomain" , "Docs" : "" , "Typewords" : [ "string" ] } , { "Name" : "LocalpartCatchallSeparator" , "Docs" : "" , "Typewords" : [ "string" ] } , { "Name" : "LocalpartCaseSensitive" , "Docs" : "" , "Typewords" : [ "bool" ] } , { "Name" : "DKIM" , "Docs" : "" , "Typewords" : [ "DKIM" ] } , { "Name" : "DMARC" , "Docs" : "" , "Typewords" : [ "nullable" , "DMARC" ] } , { "Name" : "MTASTS" , "Docs" : "" , "Typewords" : [ "nullable" , "MTASTS" ] } , { "Name" : "TLSRPT" , "Docs" : "" , "Typewords" : [ "nullable" , "TLSRPT" ] } , { "Name" : "Routes" , "Docs" : "" , "Typewords" : [ "[]" , "Route" ] } , { "Name" : "Aliases" , "Docs" : "" , "Typewords" : [ "{}" , "Alias" ] } , { "Name" : "Domain" , "Docs" : "" , "Typewords" : [ "Domain" ] } ] } ,
2024-04-18 12:14:24 +03:00
"DKIM" : { "Name" : "DKIM" , "Docs" : "" , "Fields" : [ { "Name" : "Selectors" , "Docs" : "" , "Typewords" : [ "{}" , "Selector" ] } , { "Name" : "Sign" , "Docs" : "" , "Typewords" : [ "[]" , "string" ] } ] } ,
2024-04-19 11:23:53 +03:00
"Selector" : { "Name" : "Selector" , "Docs" : "" , "Fields" : [ { "Name" : "Hash" , "Docs" : "" , "Typewords" : [ "string" ] } , { "Name" : "HashEffective" , "Docs" : "" , "Typewords" : [ "string" ] } , { "Name" : "Canonicalization" , "Docs" : "" , "Typewords" : [ "Canonicalization" ] } , { "Name" : "Headers" , "Docs" : "" , "Typewords" : [ "[]" , "string" ] } , { "Name" : "HeadersEffective" , "Docs" : "" , "Typewords" : [ "[]" , "string" ] } , { "Name" : "DontSealHeaders" , "Docs" : "" , "Typewords" : [ "bool" ] } , { "Name" : "Expiration" , "Docs" : "" , "Typewords" : [ "string" ] } , { "Name" : "PrivateKeyFile" , "Docs" : "" , "Typewords" : [ "string" ] } , { "Name" : "Algorithm" , "Docs" : "" , "Typewords" : [ "string" ] } ] } ,
2024-04-18 12:14:24 +03:00
"Canonicalization" : { "Name" : "Canonicalization" , "Docs" : "" , "Fields" : [ { "Name" : "HeaderRelaxed" , "Docs" : "" , "Typewords" : [ "bool" ] } , { "Name" : "BodyRelaxed" , "Docs" : "" , "Typewords" : [ "bool" ] } ] } ,
"DMARC" : { "Name" : "DMARC" , "Docs" : "" , "Fields" : [ { "Name" : "Localpart" , "Docs" : "" , "Typewords" : [ "string" ] } , { "Name" : "Domain" , "Docs" : "" , "Typewords" : [ "string" ] } , { "Name" : "Account" , "Docs" : "" , "Typewords" : [ "string" ] } , { "Name" : "Mailbox" , "Docs" : "" , "Typewords" : [ "string" ] } , { "Name" : "ParsedLocalpart" , "Docs" : "" , "Typewords" : [ "Localpart" ] } , { "Name" : "DNSDomain" , "Docs" : "" , "Typewords" : [ "Domain" ] } ] } ,
"MTASTS" : { "Name" : "MTASTS" , "Docs" : "" , "Fields" : [ { "Name" : "PolicyID" , "Docs" : "" , "Typewords" : [ "string" ] } , { "Name" : "Mode" , "Docs" : "" , "Typewords" : [ "Mode" ] } , { "Name" : "MaxAge" , "Docs" : "" , "Typewords" : [ "int64" ] } , { "Name" : "MX" , "Docs" : "" , "Typewords" : [ "[]" , "string" ] } ] } ,
"TLSRPT" : { "Name" : "TLSRPT" , "Docs" : "" , "Fields" : [ { "Name" : "Localpart" , "Docs" : "" , "Typewords" : [ "string" ] } , { "Name" : "Domain" , "Docs" : "" , "Typewords" : [ "string" ] } , { "Name" : "Account" , "Docs" : "" , "Typewords" : [ "string" ] } , { "Name" : "Mailbox" , "Docs" : "" , "Typewords" : [ "string" ] } , { "Name" : "ParsedLocalpart" , "Docs" : "" , "Typewords" : [ "Localpart" ] } , { "Name" : "DNSDomain" , "Docs" : "" , "Typewords" : [ "Domain" ] } ] } ,
"Route" : { "Name" : "Route" , "Docs" : "" , "Fields" : [ { "Name" : "FromDomain" , "Docs" : "" , "Typewords" : [ "[]" , "string" ] } , { "Name" : "ToDomain" , "Docs" : "" , "Typewords" : [ "[]" , "string" ] } , { "Name" : "MinimumAttempts" , "Docs" : "" , "Typewords" : [ "int32" ] } , { "Name" : "Transport" , "Docs" : "" , "Typewords" : [ "string" ] } , { "Name" : "FromDomainASCII" , "Docs" : "" , "Typewords" : [ "[]" , "string" ] } , { "Name" : "ToDomainASCII" , "Docs" : "" , "Typewords" : [ "[]" , "string" ] } ] } ,
2024-04-24 20:15:30 +03:00
"Alias" : { "Name" : "Alias" , "Docs" : "" , "Fields" : [ { "Name" : "Addresses" , "Docs" : "" , "Typewords" : [ "[]" , "string" ] } , { "Name" : "PostPublic" , "Docs" : "" , "Typewords" : [ "bool" ] } , { "Name" : "ListMembers" , "Docs" : "" , "Typewords" : [ "bool" ] } , { "Name" : "AllowMsgFrom" , "Docs" : "" , "Typewords" : [ "bool" ] } , { "Name" : "LocalpartStr" , "Docs" : "" , "Typewords" : [ "string" ] } , { "Name" : "Domain" , "Docs" : "" , "Typewords" : [ "Domain" ] } , { "Name" : "ParsedAddresses" , "Docs" : "" , "Typewords" : [ "[]" , "AliasAddress" ] } ] } ,
"AliasAddress" : { "Name" : "AliasAddress" , "Docs" : "" , "Fields" : [ { "Name" : "Address" , "Docs" : "" , "Typewords" : [ "Address" ] } , { "Name" : "AccountName" , "Docs" : "" , "Typewords" : [ "string" ] } , { "Name" : "Destination" , "Docs" : "" , "Typewords" : [ "Destination" ] } ] } ,
"Address" : { "Name" : "Address" , "Docs" : "" , "Fields" : [ { "Name" : "Localpart" , "Docs" : "" , "Typewords" : [ "Localpart" ] } , { "Name" : "Domain" , "Docs" : "" , "Typewords" : [ "Domain" ] } ] } ,
2024-04-14 18:18:20 +03:00
"Destination" : { "Name" : "Destination" , "Docs" : "" , "Fields" : [ { "Name" : "Mailbox" , "Docs" : "" , "Typewords" : [ "string" ] } , { "Name" : "Rulesets" , "Docs" : "" , "Typewords" : [ "[]" , "Ruleset" ] } , { "Name" : "FullName" , "Docs" : "" , "Typewords" : [ "string" ] } ] } ,
webmail: when moving a single message out of/to the inbox, ask if user wants to create a rule to automatically do that server-side for future deliveries
if the message has a list-id header, we assume this is a (mailing) list
message, and we require a dkim/spf-verified domain (we prefer the shortest that
is a suffix of the list-id value). the rule we would add will mark such
messages as from a mailing list, changing filtering rules on incoming messages
(not enforcing dmarc policies). messages will be matched on list-id header and
will only match if they have the same dkim/spf-verified domain.
if the message doesn't have a list-id header, we'll ask to match based on
"message from" address.
we don't ask the user in several cases:
- if the destination/source mailbox is a special-use mailbox (e.g.
trash,archive,sent,junk; inbox isn't included)
- if the rule already exist (no point in adding it again).
- if the user said "no, not for this list-id/from-address" in the past.
- if the user said "no, not for messages moved to this mailbox" in the past.
we'll add the rule if the message was moved out of the inbox.
if the message was moved to the inbox, we check if there is a matching rule
that we can remove.
we now remember the "no" answers (for list-id, msg-from-addr and mailbox) in
the account database.
to implement the msgfrom rules, this adds support to rulesets for matching on
message "from" address. before, we could match on smtp from address (and other
fields). rulesets now also have a field for comments. webmail adds a note that
it created the rule, with the date.
manual editing of the rulesets is still in the webaccount page. this webmail
functionality is just a convenient way to add/remove common rules.
2024-04-21 18:01:50 +03:00
"Ruleset" : { "Name" : "Ruleset" , "Docs" : "" , "Fields" : [ { "Name" : "SMTPMailFromRegexp" , "Docs" : "" , "Typewords" : [ "string" ] } , { "Name" : "MsgFromRegexp" , "Docs" : "" , "Typewords" : [ "string" ] } , { "Name" : "VerifiedDomain" , "Docs" : "" , "Typewords" : [ "string" ] } , { "Name" : "HeadersRegexp" , "Docs" : "" , "Typewords" : [ "{}" , "string" ] } , { "Name" : "IsForward" , "Docs" : "" , "Typewords" : [ "bool" ] } , { "Name" : "ListAllowDomain" , "Docs" : "" , "Typewords" : [ "string" ] } , { "Name" : "AcceptRejectsToMailbox" , "Docs" : "" , "Typewords" : [ "string" ] } , { "Name" : "Mailbox" , "Docs" : "" , "Typewords" : [ "string" ] } , { "Name" : "Comment" , "Docs" : "" , "Typewords" : [ "string" ] } , { "Name" : "VerifiedDNSDomain" , "Docs" : "" , "Typewords" : [ "Domain" ] } , { "Name" : "ListAllowDNSDomain" , "Docs" : "" , "Typewords" : [ "Domain" ] } ] } ,
2024-04-24 20:15:30 +03:00
"Account" : { "Name" : "Account" , "Docs" : "" , "Fields" : [ { "Name" : "OutgoingWebhook" , "Docs" : "" , "Typewords" : [ "nullable" , "OutgoingWebhook" ] } , { "Name" : "IncomingWebhook" , "Docs" : "" , "Typewords" : [ "nullable" , "IncomingWebhook" ] } , { "Name" : "FromIDLoginAddresses" , "Docs" : "" , "Typewords" : [ "[]" , "string" ] } , { "Name" : "KeepRetiredMessagePeriod" , "Docs" : "" , "Typewords" : [ "int64" ] } , { "Name" : "KeepRetiredWebhookPeriod" , "Docs" : "" , "Typewords" : [ "int64" ] } , { "Name" : "Domain" , "Docs" : "" , "Typewords" : [ "string" ] } , { "Name" : "Description" , "Docs" : "" , "Typewords" : [ "string" ] } , { "Name" : "FullName" , "Docs" : "" , "Typewords" : [ "string" ] } , { "Name" : "Destinations" , "Docs" : "" , "Typewords" : [ "{}" , "Destination" ] } , { "Name" : "SubjectPass" , "Docs" : "" , "Typewords" : [ "SubjectPass" ] } , { "Name" : "QuotaMessageSize" , "Docs" : "" , "Typewords" : [ "int64" ] } , { "Name" : "RejectsMailbox" , "Docs" : "" , "Typewords" : [ "string" ] } , { "Name" : "KeepRejects" , "Docs" : "" , "Typewords" : [ "bool" ] } , { "Name" : "AutomaticJunkFlags" , "Docs" : "" , "Typewords" : [ "AutomaticJunkFlags" ] } , { "Name" : "JunkFilter" , "Docs" : "" , "Typewords" : [ "nullable" , "JunkFilter" ] } , { "Name" : "MaxOutgoingMessagesPerDay" , "Docs" : "" , "Typewords" : [ "int32" ] } , { "Name" : "MaxFirstTimeRecipientsPerDay" , "Docs" : "" , "Typewords" : [ "int32" ] } , { "Name" : "NoFirstTimeSenderDelay" , "Docs" : "" , "Typewords" : [ "bool" ] } , { "Name" : "Routes" , "Docs" : "" , "Typewords" : [ "[]" , "Route" ] } , { "Name" : "DNSDomain" , "Docs" : "" , "Typewords" : [ "Domain" ] } , { "Name" : "Aliases" , "Docs" : "" , "Typewords" : [ "[]" , "AddressAlias" ] } ] } ,
"OutgoingWebhook" : { "Name" : "OutgoingWebhook" , "Docs" : "" , "Fields" : [ { "Name" : "URL" , "Docs" : "" , "Typewords" : [ "string" ] } , { "Name" : "Authorization" , "Docs" : "" , "Typewords" : [ "string" ] } , { "Name" : "Events" , "Docs" : "" , "Typewords" : [ "[]" , "string" ] } ] } ,
"IncomingWebhook" : { "Name" : "IncomingWebhook" , "Docs" : "" , "Fields" : [ { "Name" : "URL" , "Docs" : "" , "Typewords" : [ "string" ] } , { "Name" : "Authorization" , "Docs" : "" , "Typewords" : [ "string" ] } ] } ,
2024-04-14 18:18:20 +03:00
"SubjectPass" : { "Name" : "SubjectPass" , "Docs" : "" , "Fields" : [ { "Name" : "Period" , "Docs" : "" , "Typewords" : [ "int64" ] } ] } ,
"AutomaticJunkFlags" : { "Name" : "AutomaticJunkFlags" , "Docs" : "" , "Fields" : [ { "Name" : "Enabled" , "Docs" : "" , "Typewords" : [ "bool" ] } , { "Name" : "JunkMailboxRegexp" , "Docs" : "" , "Typewords" : [ "string" ] } , { "Name" : "NeutralMailboxRegexp" , "Docs" : "" , "Typewords" : [ "string" ] } , { "Name" : "NotJunkMailboxRegexp" , "Docs" : "" , "Typewords" : [ "string" ] } ] } ,
"JunkFilter" : { "Name" : "JunkFilter" , "Docs" : "" , "Fields" : [ { "Name" : "Threshold" , "Docs" : "" , "Typewords" : [ "float64" ] } , { "Name" : "Onegrams" , "Docs" : "" , "Typewords" : [ "bool" ] } , { "Name" : "Twograms" , "Docs" : "" , "Typewords" : [ "bool" ] } , { "Name" : "Threegrams" , "Docs" : "" , "Typewords" : [ "bool" ] } , { "Name" : "MaxPower" , "Docs" : "" , "Typewords" : [ "float64" ] } , { "Name" : "TopWords" , "Docs" : "" , "Typewords" : [ "int32" ] } , { "Name" : "IgnoreWords" , "Docs" : "" , "Typewords" : [ "float64" ] } , { "Name" : "RareWords" , "Docs" : "" , "Typewords" : [ "int32" ] } ] } ,
2024-04-24 20:15:30 +03:00
"AddressAlias" : { "Name" : "AddressAlias" , "Docs" : "" , "Fields" : [ { "Name" : "SubscriptionAddress" , "Docs" : "" , "Typewords" : [ "string" ] } , { "Name" : "Alias" , "Docs" : "" , "Typewords" : [ "Alias" ] } , { "Name" : "MemberAddresses" , "Docs" : "" , "Typewords" : [ "[]" , "string" ] } ] } ,
2023-12-31 13:55:22 +03:00
"PolicyRecord" : { "Name" : "PolicyRecord" , "Docs" : "" , "Fields" : [ { "Name" : "Domain" , "Docs" : "" , "Typewords" : [ "string" ] } , { "Name" : "Inserted" , "Docs" : "" , "Typewords" : [ "timestamp" ] } , { "Name" : "ValidEnd" , "Docs" : "" , "Typewords" : [ "timestamp" ] } , { "Name" : "LastUpdate" , "Docs" : "" , "Typewords" : [ "timestamp" ] } , { "Name" : "LastUse" , "Docs" : "" , "Typewords" : [ "timestamp" ] } , { "Name" : "Backoff" , "Docs" : "" , "Typewords" : [ "bool" ] } , { "Name" : "RecordID" , "Docs" : "" , "Typewords" : [ "string" ] } , { "Name" : "Version" , "Docs" : "" , "Typewords" : [ "string" ] } , { "Name" : "Mode" , "Docs" : "" , "Typewords" : [ "Mode" ] } , { "Name" : "MX" , "Docs" : "" , "Typewords" : [ "[]" , "STSMX" ] } , { "Name" : "MaxAgeSeconds" , "Docs" : "" , "Typewords" : [ "int32" ] } , { "Name" : "Extensions" , "Docs" : "" , "Typewords" : [ "[]" , "Pair" ] } , { "Name" : "PolicyText" , "Docs" : "" , "Typewords" : [ "string" ] } ] } ,
"TLSReportRecord" : { "Name" : "TLSReportRecord" , "Docs" : "" , "Fields" : [ { "Name" : "ID" , "Docs" : "" , "Typewords" : [ "int64" ] } , { "Name" : "Domain" , "Docs" : "" , "Typewords" : [ "string" ] } , { "Name" : "FromDomain" , "Docs" : "" , "Typewords" : [ "string" ] } , { "Name" : "MailFrom" , "Docs" : "" , "Typewords" : [ "string" ] } , { "Name" : "HostReport" , "Docs" : "" , "Typewords" : [ "bool" ] } , { "Name" : "Report" , "Docs" : "" , "Typewords" : [ "Report" ] } ] } ,
"Report" : { "Name" : "Report" , "Docs" : "" , "Fields" : [ { "Name" : "OrganizationName" , "Docs" : "" , "Typewords" : [ "string" ] } , { "Name" : "DateRange" , "Docs" : "" , "Typewords" : [ "TLSRPTDateRange" ] } , { "Name" : "ContactInfo" , "Docs" : "" , "Typewords" : [ "string" ] } , { "Name" : "ReportID" , "Docs" : "" , "Typewords" : [ "string" ] } , { "Name" : "Policies" , "Docs" : "" , "Typewords" : [ "[]" , "Result" ] } ] } ,
"TLSRPTDateRange" : { "Name" : "TLSRPTDateRange" , "Docs" : "" , "Fields" : [ { "Name" : "Start" , "Docs" : "" , "Typewords" : [ "timestamp" ] } , { "Name" : "End" , "Docs" : "" , "Typewords" : [ "timestamp" ] } ] } ,
"Result" : { "Name" : "Result" , "Docs" : "" , "Fields" : [ { "Name" : "Policy" , "Docs" : "" , "Typewords" : [ "ResultPolicy" ] } , { "Name" : "Summary" , "Docs" : "" , "Typewords" : [ "Summary" ] } , { "Name" : "FailureDetails" , "Docs" : "" , "Typewords" : [ "[]" , "FailureDetails" ] } ] } ,
2024-05-09 16:58:14 +03:00
"ResultPolicy" : { "Name" : "ResultPolicy" , "Docs" : "" , "Fields" : [ { "Name" : "Type" , "Docs" : "" , "Typewords" : [ "string" ] } , { "Name" : "String" , "Docs" : "" , "Typewords" : [ "[]" , "string" ] } , { "Name" : "Domain" , "Docs" : "" , "Typewords" : [ "string" ] } , { "Name" : "MXHost" , "Docs" : "" , "Typewords" : [ "[]" , "string" ] } ] } ,
2023-12-31 13:55:22 +03:00
"Summary" : { "Name" : "Summary" , "Docs" : "" , "Fields" : [ { "Name" : "TotalSuccessfulSessionCount" , "Docs" : "" , "Typewords" : [ "int64" ] } , { "Name" : "TotalFailureSessionCount" , "Docs" : "" , "Typewords" : [ "int64" ] } ] } ,
2024-05-09 16:58:14 +03:00
"FailureDetails" : { "Name" : "FailureDetails" , "Docs" : "" , "Fields" : [ { "Name" : "ResultType" , "Docs" : "" , "Typewords" : [ "string" ] } , { "Name" : "SendingMTAIP" , "Docs" : "" , "Typewords" : [ "string" ] } , { "Name" : "ReceivingMXHostname" , "Docs" : "" , "Typewords" : [ "string" ] } , { "Name" : "ReceivingMXHelo" , "Docs" : "" , "Typewords" : [ "string" ] } , { "Name" : "ReceivingIP" , "Docs" : "" , "Typewords" : [ "string" ] } , { "Name" : "FailedSessionCount" , "Docs" : "" , "Typewords" : [ "int64" ] } , { "Name" : "AdditionalInformation" , "Docs" : "" , "Typewords" : [ "string" ] } , { "Name" : "FailureReasonCode" , "Docs" : "" , "Typewords" : [ "string" ] } ] } ,
2023-12-31 13:55:22 +03:00
"TLSRPTSummary" : { "Name" : "TLSRPTSummary" , "Docs" : "" , "Fields" : [ { "Name" : "PolicyDomain" , "Docs" : "" , "Typewords" : [ "Domain" ] } , { "Name" : "Success" , "Docs" : "" , "Typewords" : [ "int64" ] } , { "Name" : "Failure" , "Docs" : "" , "Typewords" : [ "int64" ] } , { "Name" : "ResultTypeCounts" , "Docs" : "" , "Typewords" : [ "{}" , "int64" ] } ] } ,
"DomainFeedback" : { "Name" : "DomainFeedback" , "Docs" : "" , "Fields" : [ { "Name" : "ID" , "Docs" : "" , "Typewords" : [ "int64" ] } , { "Name" : "Domain" , "Docs" : "" , "Typewords" : [ "string" ] } , { "Name" : "FromDomain" , "Docs" : "" , "Typewords" : [ "string" ] } , { "Name" : "Version" , "Docs" : "" , "Typewords" : [ "string" ] } , { "Name" : "ReportMetadata" , "Docs" : "" , "Typewords" : [ "ReportMetadata" ] } , { "Name" : "PolicyPublished" , "Docs" : "" , "Typewords" : [ "PolicyPublished" ] } , { "Name" : "Records" , "Docs" : "" , "Typewords" : [ "[]" , "ReportRecord" ] } ] } ,
"ReportMetadata" : { "Name" : "ReportMetadata" , "Docs" : "" , "Fields" : [ { "Name" : "OrgName" , "Docs" : "" , "Typewords" : [ "string" ] } , { "Name" : "Email" , "Docs" : "" , "Typewords" : [ "string" ] } , { "Name" : "ExtraContactInfo" , "Docs" : "" , "Typewords" : [ "string" ] } , { "Name" : "ReportID" , "Docs" : "" , "Typewords" : [ "string" ] } , { "Name" : "DateRange" , "Docs" : "" , "Typewords" : [ "DateRange" ] } , { "Name" : "Errors" , "Docs" : "" , "Typewords" : [ "[]" , "string" ] } ] } ,
"DateRange" : { "Name" : "DateRange" , "Docs" : "" , "Fields" : [ { "Name" : "Begin" , "Docs" : "" , "Typewords" : [ "int64" ] } , { "Name" : "End" , "Docs" : "" , "Typewords" : [ "int64" ] } ] } ,
2024-05-09 16:58:14 +03:00
"PolicyPublished" : { "Name" : "PolicyPublished" , "Docs" : "" , "Fields" : [ { "Name" : "Domain" , "Docs" : "" , "Typewords" : [ "string" ] } , { "Name" : "ADKIM" , "Docs" : "" , "Typewords" : [ "string" ] } , { "Name" : "ASPF" , "Docs" : "" , "Typewords" : [ "string" ] } , { "Name" : "Policy" , "Docs" : "" , "Typewords" : [ "string" ] } , { "Name" : "SubdomainPolicy" , "Docs" : "" , "Typewords" : [ "string" ] } , { "Name" : "Percentage" , "Docs" : "" , "Typewords" : [ "int32" ] } , { "Name" : "ReportingOptions" , "Docs" : "" , "Typewords" : [ "string" ] } ] } ,
2023-12-31 13:55:22 +03:00
"ReportRecord" : { "Name" : "ReportRecord" , "Docs" : "" , "Fields" : [ { "Name" : "Row" , "Docs" : "" , "Typewords" : [ "Row" ] } , { "Name" : "Identifiers" , "Docs" : "" , "Typewords" : [ "Identifiers" ] } , { "Name" : "AuthResults" , "Docs" : "" , "Typewords" : [ "AuthResults" ] } ] } ,
"Row" : { "Name" : "Row" , "Docs" : "" , "Fields" : [ { "Name" : "SourceIP" , "Docs" : "" , "Typewords" : [ "string" ] } , { "Name" : "Count" , "Docs" : "" , "Typewords" : [ "int32" ] } , { "Name" : "PolicyEvaluated" , "Docs" : "" , "Typewords" : [ "PolicyEvaluated" ] } ] } ,
2024-05-09 16:58:14 +03:00
"PolicyEvaluated" : { "Name" : "PolicyEvaluated" , "Docs" : "" , "Fields" : [ { "Name" : "Disposition" , "Docs" : "" , "Typewords" : [ "string" ] } , { "Name" : "DKIM" , "Docs" : "" , "Typewords" : [ "string" ] } , { "Name" : "SPF" , "Docs" : "" , "Typewords" : [ "string" ] } , { "Name" : "Reasons" , "Docs" : "" , "Typewords" : [ "[]" , "PolicyOverrideReason" ] } ] } ,
"PolicyOverrideReason" : { "Name" : "PolicyOverrideReason" , "Docs" : "" , "Fields" : [ { "Name" : "Type" , "Docs" : "" , "Typewords" : [ "string" ] } , { "Name" : "Comment" , "Docs" : "" , "Typewords" : [ "string" ] } ] } ,
2023-12-31 13:55:22 +03:00
"Identifiers" : { "Name" : "Identifiers" , "Docs" : "" , "Fields" : [ { "Name" : "EnvelopeTo" , "Docs" : "" , "Typewords" : [ "string" ] } , { "Name" : "EnvelopeFrom" , "Docs" : "" , "Typewords" : [ "string" ] } , { "Name" : "HeaderFrom" , "Docs" : "" , "Typewords" : [ "string" ] } ] } ,
"AuthResults" : { "Name" : "AuthResults" , "Docs" : "" , "Fields" : [ { "Name" : "DKIM" , "Docs" : "" , "Typewords" : [ "[]" , "DKIMAuthResult" ] } , { "Name" : "SPF" , "Docs" : "" , "Typewords" : [ "[]" , "SPFAuthResult" ] } ] } ,
2024-05-09 16:58:14 +03:00
"DKIMAuthResult" : { "Name" : "DKIMAuthResult" , "Docs" : "" , "Fields" : [ { "Name" : "Domain" , "Docs" : "" , "Typewords" : [ "string" ] } , { "Name" : "Selector" , "Docs" : "" , "Typewords" : [ "string" ] } , { "Name" : "Result" , "Docs" : "" , "Typewords" : [ "string" ] } , { "Name" : "HumanResult" , "Docs" : "" , "Typewords" : [ "string" ] } ] } ,
"SPFAuthResult" : { "Name" : "SPFAuthResult" , "Docs" : "" , "Fields" : [ { "Name" : "Domain" , "Docs" : "" , "Typewords" : [ "string" ] } , { "Name" : "Scope" , "Docs" : "" , "Typewords" : [ "string" ] } , { "Name" : "Result" , "Docs" : "" , "Typewords" : [ "string" ] } ] } ,
2023-12-31 13:55:22 +03:00
"DMARCSummary" : { "Name" : "DMARCSummary" , "Docs" : "" , "Fields" : [ { "Name" : "Domain" , "Docs" : "" , "Typewords" : [ "string" ] } , { "Name" : "Total" , "Docs" : "" , "Typewords" : [ "int32" ] } , { "Name" : "DispositionNone" , "Docs" : "" , "Typewords" : [ "int32" ] } , { "Name" : "DispositionQuarantine" , "Docs" : "" , "Typewords" : [ "int32" ] } , { "Name" : "DispositionReject" , "Docs" : "" , "Typewords" : [ "int32" ] } , { "Name" : "DKIMFail" , "Docs" : "" , "Typewords" : [ "int32" ] } , { "Name" : "SPFFail" , "Docs" : "" , "Typewords" : [ "int32" ] } , { "Name" : "PolicyOverrides" , "Docs" : "" , "Typewords" : [ "{}" , "int32" ] } ] } ,
"Reverse" : { "Name" : "Reverse" , "Docs" : "" , "Fields" : [ { "Name" : "Hostnames" , "Docs" : "" , "Typewords" : [ "[]" , "string" ] } ] } ,
"ClientConfigs" : { "Name" : "ClientConfigs" , "Docs" : "" , "Fields" : [ { "Name" : "Entries" , "Docs" : "" , "Typewords" : [ "[]" , "ClientConfigsEntry" ] } ] } ,
"ClientConfigsEntry" : { "Name" : "ClientConfigsEntry" , "Docs" : "" , "Fields" : [ { "Name" : "Protocol" , "Docs" : "" , "Typewords" : [ "string" ] } , { "Name" : "Host" , "Docs" : "" , "Typewords" : [ "Domain" ] } , { "Name" : "Port" , "Docs" : "" , "Typewords" : [ "int32" ] } , { "Name" : "Listener" , "Docs" : "" , "Typewords" : [ "string" ] } , { "Name" : "Note" , "Docs" : "" , "Typewords" : [ "string" ] } ] } ,
2024-03-18 10:50:42 +03:00
"HoldRule" : { "Name" : "HoldRule" , "Docs" : "" , "Fields" : [ { "Name" : "ID" , "Docs" : "" , "Typewords" : [ "int64" ] } , { "Name" : "Account" , "Docs" : "" , "Typewords" : [ "string" ] } , { "Name" : "SenderDomain" , "Docs" : "" , "Typewords" : [ "Domain" ] } , { "Name" : "RecipientDomain" , "Docs" : "" , "Typewords" : [ "Domain" ] } , { "Name" : "SenderDomainStr" , "Docs" : "" , "Typewords" : [ "string" ] } , { "Name" : "RecipientDomainStr" , "Docs" : "" , "Typewords" : [ "string" ] } ] } ,
add a webapi and webhooks for a simple http/json-based api
for applications to compose/send messages, receive delivery feedback, and
maintain suppression lists.
this is an alternative to applications using a library to compose messages,
submitting those messages using smtp, and monitoring a mailbox with imap for
DSNs, which can be processed into the equivalent of suppression lists. but you
need to know about all these standards/protocols and find libraries. by using
the webapi & webhooks, you just need a http & json library.
unfortunately, there is no standard for these kinds of api, so mox has made up
yet another one...
matching incoming DSNs about deliveries to original outgoing messages requires
keeping history of "retired" messages (delivered from the queue, either
successfully or failed). this can be enabled per account. history is also
useful for debugging deliveries. we now also keep history of each delivery
attempt, accessible while still in the queue, and kept when a message is
retired. the queue webadmin pages now also have pagination, to show potentially
large history.
a queue of webhook calls is now managed too. failures are retried similar to
message deliveries. webhooks can also be saved to the retired list after
completing. also configurable per account.
messages can be sent with a "unique smtp mail from" address. this can only be
used if the domain is configured with a localpart catchall separator such as
"+". when enabled, a queued message gets assigned a random "fromid", which is
added after the separator when sending. when DSNs are returned, they can be
related to previously sent messages based on this fromid. in the future, we can
implement matching on the "envid" used in the smtp dsn extension, or on the
"message-id" of the message. using a fromid can be triggered by authenticating
with a login email address that is configured as enabling fromid.
suppression lists are automatically managed per account. if a delivery attempt
results in certain smtp errors, the destination address is added to the
suppression list. future messages queued for that recipient will immediately
fail without a delivery attempt. suppression lists protect your mail server
reputation.
submitted messages can carry "extra" data through the queue and webhooks for
outgoing deliveries. through webapi as a json object, through smtp submission
as message headers of the form "x-mox-extra-<key>: value".
to make it easy to test webapi/webhooks locally, the "localserve" mode actually
puts messages in the queue. when it's time to deliver, it still won't do a full
delivery attempt, but just delivers to the sender account. unless the recipient
address has a special form, simulating a failure to deliver.
admins now have more control over the queue. "hold rules" can be added to mark
newly queued messages as "on hold", pausing delivery. rules can be about
certain sender or recipient domains/addresses, or apply to all messages pausing
the entire queue. also useful for (local) testing.
new config options have been introduced. they are editable through the admin
and/or account web interfaces.
the webapi http endpoints are enabled for newly generated configs with the
quickstart, and in localserve. existing configurations must explicitly enable
the webapi in mox.conf.
gopherwatch.org was created to dogfood this code. it initially used just the
compose/smtpclient/imapclient mox packages to send messages and process
delivery feedback. it will get a config option to use the mox webapi/webhooks
instead. the gopherwatch code to use webapi/webhook is smaller and simpler, and
developing that shaped development of the mox webapi/webhooks.
for issue #31 by cuu508
2024-04-15 22:49:02 +03:00
"Filter" : { "Name" : "Filter" , "Docs" : "" , "Fields" : [ { "Name" : "Max" , "Docs" : "" , "Typewords" : [ "int32" ] } , { "Name" : "IDs" , "Docs" : "" , "Typewords" : [ "[]" , "int64" ] } , { "Name" : "Account" , "Docs" : "" , "Typewords" : [ "string" ] } , { "Name" : "From" , "Docs" : "" , "Typewords" : [ "string" ] } , { "Name" : "To" , "Docs" : "" , "Typewords" : [ "string" ] } , { "Name" : "Hold" , "Docs" : "" , "Typewords" : [ "nullable" , "bool" ] } , { "Name" : "Submitted" , "Docs" : "" , "Typewords" : [ "string" ] } , { "Name" : "NextAttempt" , "Docs" : "" , "Typewords" : [ "string" ] } , { "Name" : "Transport" , "Docs" : "" , "Typewords" : [ "nullable" , "string" ] } ] } ,
"Sort" : { "Name" : "Sort" , "Docs" : "" , "Fields" : [ { "Name" : "Field" , "Docs" : "" , "Typewords" : [ "string" ] } , { "Name" : "LastID" , "Docs" : "" , "Typewords" : [ "int64" ] } , { "Name" : "Last" , "Docs" : "" , "Typewords" : [ "any" ] } , { "Name" : "Asc" , "Docs" : "" , "Typewords" : [ "bool" ] } ] } ,
"Msg" : { "Name" : "Msg" , "Docs" : "" , "Fields" : [ { "Name" : "ID" , "Docs" : "" , "Typewords" : [ "int64" ] } , { "Name" : "BaseID" , "Docs" : "" , "Typewords" : [ "int64" ] } , { "Name" : "Queued" , "Docs" : "" , "Typewords" : [ "timestamp" ] } , { "Name" : "Hold" , "Docs" : "" , "Typewords" : [ "bool" ] } , { "Name" : "SenderAccount" , "Docs" : "" , "Typewords" : [ "string" ] } , { "Name" : "SenderLocalpart" , "Docs" : "" , "Typewords" : [ "Localpart" ] } , { "Name" : "SenderDomain" , "Docs" : "" , "Typewords" : [ "IPDomain" ] } , { "Name" : "SenderDomainStr" , "Docs" : "" , "Typewords" : [ "string" ] } , { "Name" : "FromID" , "Docs" : "" , "Typewords" : [ "string" ] } , { "Name" : "RecipientLocalpart" , "Docs" : "" , "Typewords" : [ "Localpart" ] } , { "Name" : "RecipientDomain" , "Docs" : "" , "Typewords" : [ "IPDomain" ] } , { "Name" : "RecipientDomainStr" , "Docs" : "" , "Typewords" : [ "string" ] } , { "Name" : "Attempts" , "Docs" : "" , "Typewords" : [ "int32" ] } , { "Name" : "MaxAttempts" , "Docs" : "" , "Typewords" : [ "int32" ] } , { "Name" : "DialedIPs" , "Docs" : "" , "Typewords" : [ "{}" , "[]" , "IP" ] } , { "Name" : "NextAttempt" , "Docs" : "" , "Typewords" : [ "timestamp" ] } , { "Name" : "LastAttempt" , "Docs" : "" , "Typewords" : [ "nullable" , "timestamp" ] } , { "Name" : "Results" , "Docs" : "" , "Typewords" : [ "[]" , "MsgResult" ] } , { "Name" : "Has8bit" , "Docs" : "" , "Typewords" : [ "bool" ] } , { "Name" : "SMTPUTF8" , "Docs" : "" , "Typewords" : [ "bool" ] } , { "Name" : "IsDMARCReport" , "Docs" : "" , "Typewords" : [ "bool" ] } , { "Name" : "IsTLSReport" , "Docs" : "" , "Typewords" : [ "bool" ] } , { "Name" : "Size" , "Docs" : "" , "Typewords" : [ "int64" ] } , { "Name" : "MessageID" , "Docs" : "" , "Typewords" : [ "string" ] } , { "Name" : "MsgPrefix" , "Docs" : "" , "Typewords" : [ "nullable" , "string" ] } , { "Name" : "Subject" , "Docs" : "" , "Typewords" : [ "string" ] } , { "Name" : "DSNUTF8" , "Docs" : "" , "Typewords" : [ "nullable" , "string" ] } , { "Name" : "Transport" , "Docs" : "" , "Typewords" : [ "string" ] } , { "Name" : "RequireTLS" , "Docs" : "" , "Typewords" : [ "nullable" , "bool" ] } , { "Name" : "FutureReleaseRequest" , "Docs" : "" , "Typewords" : [ "string" ] } , { "Name" : "Extra" , "Docs" : "" , "Typewords" : [ "{}" , "string" ] } ] } ,
2023-12-31 13:55:22 +03:00
"IPDomain" : { "Name" : "IPDomain" , "Docs" : "" , "Fields" : [ { "Name" : "IP" , "Docs" : "" , "Typewords" : [ "IP" ] } , { "Name" : "Domain" , "Docs" : "" , "Typewords" : [ "Domain" ] } ] } ,
add a webapi and webhooks for a simple http/json-based api
for applications to compose/send messages, receive delivery feedback, and
maintain suppression lists.
this is an alternative to applications using a library to compose messages,
submitting those messages using smtp, and monitoring a mailbox with imap for
DSNs, which can be processed into the equivalent of suppression lists. but you
need to know about all these standards/protocols and find libraries. by using
the webapi & webhooks, you just need a http & json library.
unfortunately, there is no standard for these kinds of api, so mox has made up
yet another one...
matching incoming DSNs about deliveries to original outgoing messages requires
keeping history of "retired" messages (delivered from the queue, either
successfully or failed). this can be enabled per account. history is also
useful for debugging deliveries. we now also keep history of each delivery
attempt, accessible while still in the queue, and kept when a message is
retired. the queue webadmin pages now also have pagination, to show potentially
large history.
a queue of webhook calls is now managed too. failures are retried similar to
message deliveries. webhooks can also be saved to the retired list after
completing. also configurable per account.
messages can be sent with a "unique smtp mail from" address. this can only be
used if the domain is configured with a localpart catchall separator such as
"+". when enabled, a queued message gets assigned a random "fromid", which is
added after the separator when sending. when DSNs are returned, they can be
related to previously sent messages based on this fromid. in the future, we can
implement matching on the "envid" used in the smtp dsn extension, or on the
"message-id" of the message. using a fromid can be triggered by authenticating
with a login email address that is configured as enabling fromid.
suppression lists are automatically managed per account. if a delivery attempt
results in certain smtp errors, the destination address is added to the
suppression list. future messages queued for that recipient will immediately
fail without a delivery attempt. suppression lists protect your mail server
reputation.
submitted messages can carry "extra" data through the queue and webhooks for
outgoing deliveries. through webapi as a json object, through smtp submission
as message headers of the form "x-mox-extra-<key>: value".
to make it easy to test webapi/webhooks locally, the "localserve" mode actually
puts messages in the queue. when it's time to deliver, it still won't do a full
delivery attempt, but just delivers to the sender account. unless the recipient
address has a special form, simulating a failure to deliver.
admins now have more control over the queue. "hold rules" can be added to mark
newly queued messages as "on hold", pausing delivery. rules can be about
certain sender or recipient domains/addresses, or apply to all messages pausing
the entire queue. also useful for (local) testing.
new config options have been introduced. they are editable through the admin
and/or account web interfaces.
the webapi http endpoints are enabled for newly generated configs with the
quickstart, and in localserve. existing configurations must explicitly enable
the webapi in mox.conf.
gopherwatch.org was created to dogfood this code. it initially used just the
compose/smtpclient/imapclient mox packages to send messages and process
delivery feedback. it will get a config option to use the mox webapi/webhooks
instead. the gopherwatch code to use webapi/webhook is smaller and simpler, and
developing that shaped development of the mox webapi/webhooks.
for issue #31 by cuu508
2024-04-15 22:49:02 +03:00
"MsgResult" : { "Name" : "MsgResult" , "Docs" : "" , "Fields" : [ { "Name" : "Start" , "Docs" : "" , "Typewords" : [ "timestamp" ] } , { "Name" : "Duration" , "Docs" : "" , "Typewords" : [ "int64" ] } , { "Name" : "Success" , "Docs" : "" , "Typewords" : [ "bool" ] } , { "Name" : "Code" , "Docs" : "" , "Typewords" : [ "int32" ] } , { "Name" : "Secode" , "Docs" : "" , "Typewords" : [ "string" ] } , { "Name" : "Error" , "Docs" : "" , "Typewords" : [ "string" ] } ] } ,
"RetiredFilter" : { "Name" : "RetiredFilter" , "Docs" : "" , "Fields" : [ { "Name" : "Max" , "Docs" : "" , "Typewords" : [ "int32" ] } , { "Name" : "IDs" , "Docs" : "" , "Typewords" : [ "[]" , "int64" ] } , { "Name" : "Account" , "Docs" : "" , "Typewords" : [ "string" ] } , { "Name" : "From" , "Docs" : "" , "Typewords" : [ "string" ] } , { "Name" : "To" , "Docs" : "" , "Typewords" : [ "string" ] } , { "Name" : "Submitted" , "Docs" : "" , "Typewords" : [ "string" ] } , { "Name" : "LastActivity" , "Docs" : "" , "Typewords" : [ "string" ] } , { "Name" : "Transport" , "Docs" : "" , "Typewords" : [ "nullable" , "string" ] } , { "Name" : "Success" , "Docs" : "" , "Typewords" : [ "nullable" , "bool" ] } ] } ,
"RetiredSort" : { "Name" : "RetiredSort" , "Docs" : "" , "Fields" : [ { "Name" : "Field" , "Docs" : "" , "Typewords" : [ "string" ] } , { "Name" : "LastID" , "Docs" : "" , "Typewords" : [ "int64" ] } , { "Name" : "Last" , "Docs" : "" , "Typewords" : [ "any" ] } , { "Name" : "Asc" , "Docs" : "" , "Typewords" : [ "bool" ] } ] } ,
"MsgRetired" : { "Name" : "MsgRetired" , "Docs" : "" , "Fields" : [ { "Name" : "ID" , "Docs" : "" , "Typewords" : [ "int64" ] } , { "Name" : "BaseID" , "Docs" : "" , "Typewords" : [ "int64" ] } , { "Name" : "Queued" , "Docs" : "" , "Typewords" : [ "timestamp" ] } , { "Name" : "SenderAccount" , "Docs" : "" , "Typewords" : [ "string" ] } , { "Name" : "SenderLocalpart" , "Docs" : "" , "Typewords" : [ "Localpart" ] } , { "Name" : "SenderDomainStr" , "Docs" : "" , "Typewords" : [ "string" ] } , { "Name" : "FromID" , "Docs" : "" , "Typewords" : [ "string" ] } , { "Name" : "RecipientLocalpart" , "Docs" : "" , "Typewords" : [ "Localpart" ] } , { "Name" : "RecipientDomain" , "Docs" : "" , "Typewords" : [ "IPDomain" ] } , { "Name" : "RecipientDomainStr" , "Docs" : "" , "Typewords" : [ "string" ] } , { "Name" : "Attempts" , "Docs" : "" , "Typewords" : [ "int32" ] } , { "Name" : "MaxAttempts" , "Docs" : "" , "Typewords" : [ "int32" ] } , { "Name" : "DialedIPs" , "Docs" : "" , "Typewords" : [ "{}" , "[]" , "IP" ] } , { "Name" : "LastAttempt" , "Docs" : "" , "Typewords" : [ "nullable" , "timestamp" ] } , { "Name" : "Results" , "Docs" : "" , "Typewords" : [ "[]" , "MsgResult" ] } , { "Name" : "Has8bit" , "Docs" : "" , "Typewords" : [ "bool" ] } , { "Name" : "SMTPUTF8" , "Docs" : "" , "Typewords" : [ "bool" ] } , { "Name" : "IsDMARCReport" , "Docs" : "" , "Typewords" : [ "bool" ] } , { "Name" : "IsTLSReport" , "Docs" : "" , "Typewords" : [ "bool" ] } , { "Name" : "Size" , "Docs" : "" , "Typewords" : [ "int64" ] } , { "Name" : "MessageID" , "Docs" : "" , "Typewords" : [ "string" ] } , { "Name" : "Subject" , "Docs" : "" , "Typewords" : [ "string" ] } , { "Name" : "Transport" , "Docs" : "" , "Typewords" : [ "string" ] } , { "Name" : "RequireTLS" , "Docs" : "" , "Typewords" : [ "nullable" , "bool" ] } , { "Name" : "FutureReleaseRequest" , "Docs" : "" , "Typewords" : [ "string" ] } , { "Name" : "Extra" , "Docs" : "" , "Typewords" : [ "{}" , "string" ] } , { "Name" : "LastActivity" , "Docs" : "" , "Typewords" : [ "timestamp" ] } , { "Name" : "RecipientAddress" , "Docs" : "" , "Typewords" : [ "string" ] } , { "Name" : "Success" , "Docs" : "" , "Typewords" : [ "bool" ] } , { "Name" : "KeepUntil" , "Docs" : "" , "Typewords" : [ "timestamp" ] } ] } ,
"HookFilter" : { "Name" : "HookFilter" , "Docs" : "" , "Fields" : [ { "Name" : "Max" , "Docs" : "" , "Typewords" : [ "int32" ] } , { "Name" : "IDs" , "Docs" : "" , "Typewords" : [ "[]" , "int64" ] } , { "Name" : "Account" , "Docs" : "" , "Typewords" : [ "string" ] } , { "Name" : "Submitted" , "Docs" : "" , "Typewords" : [ "string" ] } , { "Name" : "NextAttempt" , "Docs" : "" , "Typewords" : [ "string" ] } , { "Name" : "Event" , "Docs" : "" , "Typewords" : [ "string" ] } ] } ,
"HookSort" : { "Name" : "HookSort" , "Docs" : "" , "Fields" : [ { "Name" : "Field" , "Docs" : "" , "Typewords" : [ "string" ] } , { "Name" : "LastID" , "Docs" : "" , "Typewords" : [ "int64" ] } , { "Name" : "Last" , "Docs" : "" , "Typewords" : [ "any" ] } , { "Name" : "Asc" , "Docs" : "" , "Typewords" : [ "bool" ] } ] } ,
"Hook" : { "Name" : "Hook" , "Docs" : "" , "Fields" : [ { "Name" : "ID" , "Docs" : "" , "Typewords" : [ "int64" ] } , { "Name" : "QueueMsgID" , "Docs" : "" , "Typewords" : [ "int64" ] } , { "Name" : "FromID" , "Docs" : "" , "Typewords" : [ "string" ] } , { "Name" : "MessageID" , "Docs" : "" , "Typewords" : [ "string" ] } , { "Name" : "Subject" , "Docs" : "" , "Typewords" : [ "string" ] } , { "Name" : "Extra" , "Docs" : "" , "Typewords" : [ "{}" , "string" ] } , { "Name" : "Account" , "Docs" : "" , "Typewords" : [ "string" ] } , { "Name" : "URL" , "Docs" : "" , "Typewords" : [ "string" ] } , { "Name" : "Authorization" , "Docs" : "" , "Typewords" : [ "string" ] } , { "Name" : "IsIncoming" , "Docs" : "" , "Typewords" : [ "bool" ] } , { "Name" : "OutgoingEvent" , "Docs" : "" , "Typewords" : [ "string" ] } , { "Name" : "Payload" , "Docs" : "" , "Typewords" : [ "string" ] } , { "Name" : "Submitted" , "Docs" : "" , "Typewords" : [ "timestamp" ] } , { "Name" : "Attempts" , "Docs" : "" , "Typewords" : [ "int32" ] } , { "Name" : "NextAttempt" , "Docs" : "" , "Typewords" : [ "timestamp" ] } , { "Name" : "Results" , "Docs" : "" , "Typewords" : [ "[]" , "HookResult" ] } ] } ,
"HookResult" : { "Name" : "HookResult" , "Docs" : "" , "Fields" : [ { "Name" : "Start" , "Docs" : "" , "Typewords" : [ "timestamp" ] } , { "Name" : "Duration" , "Docs" : "" , "Typewords" : [ "int64" ] } , { "Name" : "URL" , "Docs" : "" , "Typewords" : [ "string" ] } , { "Name" : "Success" , "Docs" : "" , "Typewords" : [ "bool" ] } , { "Name" : "Code" , "Docs" : "" , "Typewords" : [ "int32" ] } , { "Name" : "Error" , "Docs" : "" , "Typewords" : [ "string" ] } , { "Name" : "Response" , "Docs" : "" , "Typewords" : [ "string" ] } ] } ,
"HookRetiredFilter" : { "Name" : "HookRetiredFilter" , "Docs" : "" , "Fields" : [ { "Name" : "Max" , "Docs" : "" , "Typewords" : [ "int32" ] } , { "Name" : "IDs" , "Docs" : "" , "Typewords" : [ "[]" , "int64" ] } , { "Name" : "Account" , "Docs" : "" , "Typewords" : [ "string" ] } , { "Name" : "Submitted" , "Docs" : "" , "Typewords" : [ "string" ] } , { "Name" : "LastActivity" , "Docs" : "" , "Typewords" : [ "string" ] } , { "Name" : "Event" , "Docs" : "" , "Typewords" : [ "string" ] } ] } ,
"HookRetiredSort" : { "Name" : "HookRetiredSort" , "Docs" : "" , "Fields" : [ { "Name" : "Field" , "Docs" : "" , "Typewords" : [ "string" ] } , { "Name" : "LastID" , "Docs" : "" , "Typewords" : [ "int64" ] } , { "Name" : "Last" , "Docs" : "" , "Typewords" : [ "any" ] } , { "Name" : "Asc" , "Docs" : "" , "Typewords" : [ "bool" ] } ] } ,
"HookRetired" : { "Name" : "HookRetired" , "Docs" : "" , "Fields" : [ { "Name" : "ID" , "Docs" : "" , "Typewords" : [ "int64" ] } , { "Name" : "QueueMsgID" , "Docs" : "" , "Typewords" : [ "int64" ] } , { "Name" : "FromID" , "Docs" : "" , "Typewords" : [ "string" ] } , { "Name" : "MessageID" , "Docs" : "" , "Typewords" : [ "string" ] } , { "Name" : "Subject" , "Docs" : "" , "Typewords" : [ "string" ] } , { "Name" : "Extra" , "Docs" : "" , "Typewords" : [ "{}" , "string" ] } , { "Name" : "Account" , "Docs" : "" , "Typewords" : [ "string" ] } , { "Name" : "URL" , "Docs" : "" , "Typewords" : [ "string" ] } , { "Name" : "Authorization" , "Docs" : "" , "Typewords" : [ "bool" ] } , { "Name" : "IsIncoming" , "Docs" : "" , "Typewords" : [ "bool" ] } , { "Name" : "OutgoingEvent" , "Docs" : "" , "Typewords" : [ "string" ] } , { "Name" : "Payload" , "Docs" : "" , "Typewords" : [ "string" ] } , { "Name" : "Submitted" , "Docs" : "" , "Typewords" : [ "timestamp" ] } , { "Name" : "SupersededByID" , "Docs" : "" , "Typewords" : [ "int64" ] } , { "Name" : "Attempts" , "Docs" : "" , "Typewords" : [ "int32" ] } , { "Name" : "Results" , "Docs" : "" , "Typewords" : [ "[]" , "HookResult" ] } , { "Name" : "Success" , "Docs" : "" , "Typewords" : [ "bool" ] } , { "Name" : "LastActivity" , "Docs" : "" , "Typewords" : [ "timestamp" ] } , { "Name" : "KeepUntil" , "Docs" : "" , "Typewords" : [ "timestamp" ] } ] } ,
2023-12-31 13:55:22 +03:00
"WebserverConfig" : { "Name" : "WebserverConfig" , "Docs" : "" , "Fields" : [ { "Name" : "WebDNSDomainRedirects" , "Docs" : "" , "Typewords" : [ "[]" , "[]" , "Domain" ] } , { "Name" : "WebDomainRedirects" , "Docs" : "" , "Typewords" : [ "[]" , "[]" , "string" ] } , { "Name" : "WebHandlers" , "Docs" : "" , "Typewords" : [ "[]" , "WebHandler" ] } ] } ,
improve http request handling for internal services and multiple domains
per listener, you could enable the admin/account/webmail/webapi handlers. but
that would serve those services on their configured paths (/admin/, /,
/webmail/, /webapi/) on all domains mox would be webserving, including any
non-mail domains. so your www.example/admin/ would be serving the admin web
interface, with no way to disabled that.
with this change, the admin interface is only served on requests to (based on
Host header):
- ip addresses
- the listener host name (explicitly configured in the listener, with fallback
to global hostname)
- "localhost" (for ssh tunnel/forwarding scenario's)
the account/webmail/webapi interfaces are served on the same domains as the
admin interface, and additionally:
- the client settings domains, as optionally configured in each Domain in
domains.conf. typically "mail.<yourdomain>".
this means the internal services are no longer served on other domains
configured in the webserver, e.g. www.example.org/admin/ will not be handled
specially.
the order of evaluation of routes/services is also changed:
before this change, the internal handlers would always be evaluated first.
with this change, only the system handlers for
MTA-STS/autoconfig/ACME-validation will be evaluated first. then the webserver
handlers. and finally the internal services (admin/account/webmail/webapi).
this allows an admin to configure overrides for some of the domains (per
hostname-matching rules explained above) that would normally serve these
services.
webserver handlers can now be configured that pass the request to an internal
service: in addition to the existing static/redirect/forward config options,
there is now an "internal" config option, naming the service
(admin/account/webmail/webapi) for handling the request. this allows enabling
the internal services on custom domains.
for issue #160 by TragicLifeHu, thanks for reporting!
2024-05-11 12:13:14 +03:00
"WebHandler" : { "Name" : "WebHandler" , "Docs" : "" , "Fields" : [ { "Name" : "LogName" , "Docs" : "" , "Typewords" : [ "string" ] } , { "Name" : "Domain" , "Docs" : "" , "Typewords" : [ "string" ] } , { "Name" : "PathRegexp" , "Docs" : "" , "Typewords" : [ "string" ] } , { "Name" : "DontRedirectPlainHTTP" , "Docs" : "" , "Typewords" : [ "bool" ] } , { "Name" : "Compress" , "Docs" : "" , "Typewords" : [ "bool" ] } , { "Name" : "WebStatic" , "Docs" : "" , "Typewords" : [ "nullable" , "WebStatic" ] } , { "Name" : "WebRedirect" , "Docs" : "" , "Typewords" : [ "nullable" , "WebRedirect" ] } , { "Name" : "WebForward" , "Docs" : "" , "Typewords" : [ "nullable" , "WebForward" ] } , { "Name" : "WebInternal" , "Docs" : "" , "Typewords" : [ "nullable" , "WebInternal" ] } , { "Name" : "Name" , "Docs" : "" , "Typewords" : [ "string" ] } , { "Name" : "DNSDomain" , "Docs" : "" , "Typewords" : [ "Domain" ] } ] } ,
2023-12-31 13:55:22 +03:00
"WebStatic" : { "Name" : "WebStatic" , "Docs" : "" , "Fields" : [ { "Name" : "StripPrefix" , "Docs" : "" , "Typewords" : [ "string" ] } , { "Name" : "Root" , "Docs" : "" , "Typewords" : [ "string" ] } , { "Name" : "ListFiles" , "Docs" : "" , "Typewords" : [ "bool" ] } , { "Name" : "ContinueNotFound" , "Docs" : "" , "Typewords" : [ "bool" ] } , { "Name" : "ResponseHeaders" , "Docs" : "" , "Typewords" : [ "{}" , "string" ] } ] } ,
"WebRedirect" : { "Name" : "WebRedirect" , "Docs" : "" , "Fields" : [ { "Name" : "BaseURL" , "Docs" : "" , "Typewords" : [ "string" ] } , { "Name" : "OrigPathRegexp" , "Docs" : "" , "Typewords" : [ "string" ] } , { "Name" : "ReplacePath" , "Docs" : "" , "Typewords" : [ "string" ] } , { "Name" : "StatusCode" , "Docs" : "" , "Typewords" : [ "int32" ] } ] } ,
"WebForward" : { "Name" : "WebForward" , "Docs" : "" , "Fields" : [ { "Name" : "StripPath" , "Docs" : "" , "Typewords" : [ "bool" ] } , { "Name" : "URL" , "Docs" : "" , "Typewords" : [ "string" ] } , { "Name" : "ResponseHeaders" , "Docs" : "" , "Typewords" : [ "{}" , "string" ] } ] } ,
improve http request handling for internal services and multiple domains
per listener, you could enable the admin/account/webmail/webapi handlers. but
that would serve those services on their configured paths (/admin/, /,
/webmail/, /webapi/) on all domains mox would be webserving, including any
non-mail domains. so your www.example/admin/ would be serving the admin web
interface, with no way to disabled that.
with this change, the admin interface is only served on requests to (based on
Host header):
- ip addresses
- the listener host name (explicitly configured in the listener, with fallback
to global hostname)
- "localhost" (for ssh tunnel/forwarding scenario's)
the account/webmail/webapi interfaces are served on the same domains as the
admin interface, and additionally:
- the client settings domains, as optionally configured in each Domain in
domains.conf. typically "mail.<yourdomain>".
this means the internal services are no longer served on other domains
configured in the webserver, e.g. www.example.org/admin/ will not be handled
specially.
the order of evaluation of routes/services is also changed:
before this change, the internal handlers would always be evaluated first.
with this change, only the system handlers for
MTA-STS/autoconfig/ACME-validation will be evaluated first. then the webserver
handlers. and finally the internal services (admin/account/webmail/webapi).
this allows an admin to configure overrides for some of the domains (per
hostname-matching rules explained above) that would normally serve these
services.
webserver handlers can now be configured that pass the request to an internal
service: in addition to the existing static/redirect/forward config options,
there is now an "internal" config option, naming the service
(admin/account/webmail/webapi) for handling the request. this allows enabling
the internal services on custom domains.
for issue #160 by TragicLifeHu, thanks for reporting!
2024-05-11 12:13:14 +03:00
"WebInternal" : { "Name" : "WebInternal" , "Docs" : "" , "Fields" : [ { "Name" : "BasePath" , "Docs" : "" , "Typewords" : [ "string" ] } , { "Name" : "Service" , "Docs" : "" , "Typewords" : [ "string" ] } ] } ,
2024-04-08 22:50:30 +03:00
"Transport" : { "Name" : "Transport" , "Docs" : "" , "Fields" : [ { "Name" : "Submissions" , "Docs" : "" , "Typewords" : [ "nullable" , "TransportSMTP" ] } , { "Name" : "Submission" , "Docs" : "" , "Typewords" : [ "nullable" , "TransportSMTP" ] } , { "Name" : "SMTP" , "Docs" : "" , "Typewords" : [ "nullable" , "TransportSMTP" ] } , { "Name" : "Socks" , "Docs" : "" , "Typewords" : [ "nullable" , "TransportSocks" ] } , { "Name" : "Direct" , "Docs" : "" , "Typewords" : [ "nullable" , "TransportDirect" ] } ] } ,
2023-12-31 13:55:22 +03:00
"TransportSMTP" : { "Name" : "TransportSMTP" , "Docs" : "" , "Fields" : [ { "Name" : "Host" , "Docs" : "" , "Typewords" : [ "string" ] } , { "Name" : "Port" , "Docs" : "" , "Typewords" : [ "int32" ] } , { "Name" : "STARTTLSInsecureSkipVerify" , "Docs" : "" , "Typewords" : [ "bool" ] } , { "Name" : "NoSTARTTLS" , "Docs" : "" , "Typewords" : [ "bool" ] } , { "Name" : "Auth" , "Docs" : "" , "Typewords" : [ "nullable" , "SMTPAuth" ] } ] } ,
"SMTPAuth" : { "Name" : "SMTPAuth" , "Docs" : "" , "Fields" : [ { "Name" : "Username" , "Docs" : "" , "Typewords" : [ "string" ] } , { "Name" : "Password" , "Docs" : "" , "Typewords" : [ "string" ] } , { "Name" : "Mechanisms" , "Docs" : "" , "Typewords" : [ "[]" , "string" ] } ] } ,
"TransportSocks" : { "Name" : "TransportSocks" , "Docs" : "" , "Fields" : [ { "Name" : "Address" , "Docs" : "" , "Typewords" : [ "string" ] } , { "Name" : "RemoteIPs" , "Docs" : "" , "Typewords" : [ "[]" , "string" ] } , { "Name" : "RemoteHostname" , "Docs" : "" , "Typewords" : [ "string" ] } ] } ,
2024-04-08 22:50:30 +03:00
"TransportDirect" : { "Name" : "TransportDirect" , "Docs" : "" , "Fields" : [ { "Name" : "DisableIPv4" , "Docs" : "" , "Typewords" : [ "bool" ] } , { "Name" : "DisableIPv6" , "Docs" : "" , "Typewords" : [ "bool" ] } ] } ,
2023-12-31 13:55:22 +03:00
"EvaluationStat" : { "Name" : "EvaluationStat" , "Docs" : "" , "Fields" : [ { "Name" : "Domain" , "Docs" : "" , "Typewords" : [ "Domain" ] } , { "Name" : "Dispositions" , "Docs" : "" , "Typewords" : [ "[]" , "string" ] } , { "Name" : "Count" , "Docs" : "" , "Typewords" : [ "int32" ] } , { "Name" : "SendReport" , "Docs" : "" , "Typewords" : [ "bool" ] } ] } ,
2024-05-09 16:58:14 +03:00
"Evaluation" : { "Name" : "Evaluation" , "Docs" : "" , "Fields" : [ { "Name" : "ID" , "Docs" : "" , "Typewords" : [ "int64" ] } , { "Name" : "PolicyDomain" , "Docs" : "" , "Typewords" : [ "string" ] } , { "Name" : "Evaluated" , "Docs" : "" , "Typewords" : [ "timestamp" ] } , { "Name" : "Optional" , "Docs" : "" , "Typewords" : [ "bool" ] } , { "Name" : "IntervalHours" , "Docs" : "" , "Typewords" : [ "int32" ] } , { "Name" : "Addresses" , "Docs" : "" , "Typewords" : [ "[]" , "string" ] } , { "Name" : "PolicyPublished" , "Docs" : "" , "Typewords" : [ "PolicyPublished" ] } , { "Name" : "SourceIP" , "Docs" : "" , "Typewords" : [ "string" ] } , { "Name" : "Disposition" , "Docs" : "" , "Typewords" : [ "string" ] } , { "Name" : "AlignedDKIMPass" , "Docs" : "" , "Typewords" : [ "bool" ] } , { "Name" : "AlignedSPFPass" , "Docs" : "" , "Typewords" : [ "bool" ] } , { "Name" : "OverrideReasons" , "Docs" : "" , "Typewords" : [ "[]" , "PolicyOverrideReason" ] } , { "Name" : "EnvelopeTo" , "Docs" : "" , "Typewords" : [ "string" ] } , { "Name" : "EnvelopeFrom" , "Docs" : "" , "Typewords" : [ "string" ] } , { "Name" : "HeaderFrom" , "Docs" : "" , "Typewords" : [ "string" ] } , { "Name" : "DKIMResults" , "Docs" : "" , "Typewords" : [ "[]" , "DKIMAuthResult" ] } , { "Name" : "SPFResults" , "Docs" : "" , "Typewords" : [ "[]" , "SPFAuthResult" ] } ] } ,
2023-12-31 13:55:22 +03:00
"SuppressAddress" : { "Name" : "SuppressAddress" , "Docs" : "" , "Fields" : [ { "Name" : "ID" , "Docs" : "" , "Typewords" : [ "int64" ] } , { "Name" : "Inserted" , "Docs" : "" , "Typewords" : [ "timestamp" ] } , { "Name" : "ReportingAddress" , "Docs" : "" , "Typewords" : [ "string" ] } , { "Name" : "Until" , "Docs" : "" , "Typewords" : [ "timestamp" ] } , { "Name" : "Comment" , "Docs" : "" , "Typewords" : [ "string" ] } ] } ,
"TLSResult" : { "Name" : "TLSResult" , "Docs" : "" , "Fields" : [ { "Name" : "ID" , "Docs" : "" , "Typewords" : [ "int64" ] } , { "Name" : "PolicyDomain" , "Docs" : "" , "Typewords" : [ "string" ] } , { "Name" : "DayUTC" , "Docs" : "" , "Typewords" : [ "string" ] } , { "Name" : "RecipientDomain" , "Docs" : "" , "Typewords" : [ "string" ] } , { "Name" : "Created" , "Docs" : "" , "Typewords" : [ "timestamp" ] } , { "Name" : "Updated" , "Docs" : "" , "Typewords" : [ "timestamp" ] } , { "Name" : "IsHost" , "Docs" : "" , "Typewords" : [ "bool" ] } , { "Name" : "SendReport" , "Docs" : "" , "Typewords" : [ "bool" ] } , { "Name" : "SentToRecipientDomain" , "Docs" : "" , "Typewords" : [ "bool" ] } , { "Name" : "RecipientDomainReportingAddresses" , "Docs" : "" , "Typewords" : [ "[]" , "string" ] } , { "Name" : "SentToPolicyDomain" , "Docs" : "" , "Typewords" : [ "bool" ] } , { "Name" : "Results" , "Docs" : "" , "Typewords" : [ "[]" , "Result" ] } ] } ,
"TLSRPTSuppressAddress" : { "Name" : "TLSRPTSuppressAddress" , "Docs" : "" , "Fields" : [ { "Name" : "ID" , "Docs" : "" , "Typewords" : [ "int64" ] } , { "Name" : "Inserted" , "Docs" : "" , "Typewords" : [ "timestamp" ] } , { "Name" : "ReportingAddress" , "Docs" : "" , "Typewords" : [ "string" ] } , { "Name" : "Until" , "Docs" : "" , "Typewords" : [ "timestamp" ] } , { "Name" : "Comment" , "Docs" : "" , "Typewords" : [ "string" ] } ] } ,
2024-04-18 12:14:24 +03:00
"Dynamic" : { "Name" : "Dynamic" , "Docs" : "" , "Fields" : [ { "Name" : "Domains" , "Docs" : "" , "Typewords" : [ "{}" , "ConfigDomain" ] } , { "Name" : "Accounts" , "Docs" : "" , "Typewords" : [ "{}" , "Account" ] } , { "Name" : "WebDomainRedirects" , "Docs" : "" , "Typewords" : [ "{}" , "string" ] } , { "Name" : "WebHandlers" , "Docs" : "" , "Typewords" : [ "[]" , "WebHandler" ] } , { "Name" : "Routes" , "Docs" : "" , "Typewords" : [ "[]" , "Route" ] } , { "Name" : "MonitorDNSBLs" , "Docs" : "" , "Typewords" : [ "[]" , "string" ] } , { "Name" : "MonitorDNSBLZones" , "Docs" : "" , "Typewords" : [ "[]" , "Domain" ] } ] } ,
implement tls client certificate authentication
the imap & smtp servers now allow logging in with tls client authentication and
the "external" sasl authentication mechanism. email clients like thunderbird,
fairemail, k9, macos mail implement it. this seems to be the most secure among
the authentication mechanism commonly implemented by clients. a useful property
is that an account can have a separate tls public key for each device/email
client. with tls client cert auth, authentication is also bound to the tls
connection. a mitm cannot pass the credentials on to another tls connection,
similar to scram-*-plus. though part of scram-*-plus is that clients verify
that the server knows the client credentials.
for tls client auth with imap, we send a "preauth" untagged message by default.
that puts the connection in authenticated state. given the imap connection
state machine, further authentication commands are not allowed. some clients
don't recognize the preauth message, and try to authenticate anyway, which
fails. a tls public key has a config option to disable preauth, keeping new
connections in unauthenticated state, to work with such email clients.
for smtp (submission), we don't require an explicit auth command.
both for imap and smtp, we allow a client to authenticate with another
mechanism than "external". in that case, credentials are verified, and have to
be for the same account as the tls client auth, but the adress can be another
one than the login address configured with the tls public key.
only the public key is used to identify the account that is authenticating. we
ignore the rest of the certificate. expiration dates, names, constraints, etc
are not verified. no certificate authorities are involved.
users can upload their own (minimal) certificate. the account web interface
shows openssl commands you can run to generate a private key, minimal cert, and
a p12 file (the format that email clients seem to like...) containing both
private key and certificate.
the imapclient & smtpclient packages can now also use tls client auth. and so
does "mox sendmail", either with a pem file with private key and certificate,
or with just an ed25519 private key.
there are new subcommands "mox config tlspubkey ..." for
adding/removing/listing tls public keys from the cli, by the admin.
2024-12-06 00:41:49 +03:00
"TLSPublicKey" : { "Name" : "TLSPublicKey" , "Docs" : "" , "Fields" : [ { "Name" : "Fingerprint" , "Docs" : "" , "Typewords" : [ "string" ] } , { "Name" : "Created" , "Docs" : "" , "Typewords" : [ "timestamp" ] } , { "Name" : "Type" , "Docs" : "" , "Typewords" : [ "string" ] } , { "Name" : "Name" , "Docs" : "" , "Typewords" : [ "string" ] } , { "Name" : "NoIMAPPreauth" , "Docs" : "" , "Typewords" : [ "bool" ] } , { "Name" : "CertDER" , "Docs" : "" , "Typewords" : [ "nullable" , "string" ] } , { "Name" : "Account" , "Docs" : "" , "Typewords" : [ "string" ] } , { "Name" : "LoginAddress" , "Docs" : "" , "Typewords" : [ "string" ] } ] } ,
replace http basic auth for web interfaces with session cookie & csrf-based auth
the http basic auth we had was very simple to reason about, and to implement.
but it has a major downside:
there is no way to logout, browsers keep sending credentials. ideally, browsers
themselves would show a button to stop sending credentials.
a related downside: the http auth mechanism doesn't indicate for which server
paths the credentials are.
another downside: the original password is sent to the server with each
request. though sending original passwords to web servers seems to be
considered normal.
our new approach uses session cookies, along with csrf values when we can. the
sessions are server-side managed, automatically extended on each use. this
makes it easy to invalidate sessions and keeps the frontend simpler (than with
long- vs short-term sessions and refreshing). the cookies are httponly,
samesite=strict, scoped to the path of the web interface. cookies are set
"secure" when set over https. the cookie is set by a successful call to Login.
a call to Logout invalidates a session. changing a password invalidates all
sessions for a user, but keeps the session with which the password was changed
alive. the csrf value is also random, and associated with the session cookie.
the csrf must be sent as header for api calls, or as parameter for direct form
posts (where we cannot set a custom header). rest-like calls made directly by
the browser, e.g. for images, don't have a csrf protection. the csrf value is
returned by the Login api call and stored in localstorage.
api calls without credentials return code "user:noAuth", and with bad
credentials return "user:badAuth". the api client recognizes this and triggers
a login. after a login, all auth-failed api calls are automatically retried.
only for "user:badAuth" is an error message displayed in the login form (e.g.
session expired).
in an ideal world, browsers would take care of most session management. a
server would indicate authentication is needed (like http basic auth), and the
browsers uses trusted ui to request credentials for the server & path. the
browser could use safer mechanism than sending original passwords to the
server, such as scram, along with a standard way to create sessions. for now,
web developers have to do authentication themselves: from showing the login
prompt, ensuring the right session/csrf cookies/localstorage/headers/etc are
sent with each request.
webauthn is a newer way to do authentication, perhaps we'll implement it in the
future. though hardware tokens aren't an attractive option for many users, and
it may be overkill as long as we still do old-fashioned authentication in smtp
& imap where passwords can be sent to the server.
for issue #58
2024-01-04 15:10:48 +03:00
"CSRFToken" : { "Name" : "CSRFToken" , "Docs" : "" , "Values" : null } ,
2023-12-31 13:55:22 +03:00
"DMARCPolicy" : { "Name" : "DMARCPolicy" , "Docs" : "" , "Values" : [ { "Name" : "PolicyEmpty" , "Value" : "" , "Docs" : "" } , { "Name" : "PolicyNone" , "Value" : "none" , "Docs" : "" } , { "Name" : "PolicyQuarantine" , "Value" : "quarantine" , "Docs" : "" } , { "Name" : "PolicyReject" , "Value" : "reject" , "Docs" : "" } ] } ,
"Align" : { "Name" : "Align" , "Docs" : "" , "Values" : [ { "Name" : "AlignStrict" , "Value" : "s" , "Docs" : "" } , { "Name" : "AlignRelaxed" , "Value" : "r" , "Docs" : "" } ] } ,
"RUA" : { "Name" : "RUA" , "Docs" : "" , "Values" : null } ,
"Mode" : { "Name" : "Mode" , "Docs" : "" , "Values" : [ { "Name" : "ModeEnforce" , "Value" : "enforce" , "Docs" : "" } , { "Name" : "ModeTesting" , "Value" : "testing" , "Docs" : "" } , { "Name" : "ModeNone" , "Value" : "none" , "Docs" : "" } ] } ,
2024-04-18 12:14:24 +03:00
"Localpart" : { "Name" : "Localpart" , "Docs" : "" , "Values" : null } ,
2023-12-31 13:55:22 +03:00
"IP" : { "Name" : "IP" , "Docs" : "" , "Values" : [ ] } ,
}
export const parser = {
CheckResult : ( v : any ) = > parse ( "CheckResult" , v ) as CheckResult ,
DNSSECResult : ( v : any ) = > parse ( "DNSSECResult" , v ) as DNSSECResult ,
IPRevCheckResult : ( v : any ) = > parse ( "IPRevCheckResult" , v ) as IPRevCheckResult ,
Domain : ( v : any ) = > parse ( "Domain" , v ) as Domain ,
MXCheckResult : ( v : any ) = > parse ( "MXCheckResult" , v ) as MXCheckResult ,
MX : ( v : any ) = > parse ( "MX" , v ) as MX ,
TLSCheckResult : ( v : any ) = > parse ( "TLSCheckResult" , v ) as TLSCheckResult ,
DANECheckResult : ( v : any ) = > parse ( "DANECheckResult" , v ) as DANECheckResult ,
SPFCheckResult : ( v : any ) = > parse ( "SPFCheckResult" , v ) as SPFCheckResult ,
SPFRecord : ( v : any ) = > parse ( "SPFRecord" , v ) as SPFRecord ,
Directive : ( v : any ) = > parse ( "Directive" , v ) as Directive ,
Modifier : ( v : any ) = > parse ( "Modifier" , v ) as Modifier ,
DKIMCheckResult : ( v : any ) = > parse ( "DKIMCheckResult" , v ) as DKIMCheckResult ,
DKIMRecord : ( v : any ) = > parse ( "DKIMRecord" , v ) as DKIMRecord ,
Record : ( v : any ) = > parse ( "Record" , v ) as Record ,
DMARCCheckResult : ( v : any ) = > parse ( "DMARCCheckResult" , v ) as DMARCCheckResult ,
DMARCRecord : ( v : any ) = > parse ( "DMARCRecord" , v ) as DMARCRecord ,
URI : ( v : any ) = > parse ( "URI" , v ) as URI ,
TLSRPTCheckResult : ( v : any ) = > parse ( "TLSRPTCheckResult" , v ) as TLSRPTCheckResult ,
TLSRPTRecord : ( v : any ) = > parse ( "TLSRPTRecord" , v ) as TLSRPTRecord ,
Extension : ( v : any ) = > parse ( "Extension" , v ) as Extension ,
MTASTSCheckResult : ( v : any ) = > parse ( "MTASTSCheckResult" , v ) as MTASTSCheckResult ,
MTASTSRecord : ( v : any ) = > parse ( "MTASTSRecord" , v ) as MTASTSRecord ,
Pair : ( v : any ) = > parse ( "Pair" , v ) as Pair ,
Policy : ( v : any ) = > parse ( "Policy" , v ) as Policy ,
STSMX : ( v : any ) = > parse ( "STSMX" , v ) as STSMX ,
SRVConfCheckResult : ( v : any ) = > parse ( "SRVConfCheckResult" , v ) as SRVConfCheckResult ,
SRV : ( v : any ) = > parse ( "SRV" , v ) as SRV ,
AutoconfCheckResult : ( v : any ) = > parse ( "AutoconfCheckResult" , v ) as AutoconfCheckResult ,
AutodiscoverCheckResult : ( v : any ) = > parse ( "AutodiscoverCheckResult" , v ) as AutodiscoverCheckResult ,
AutodiscoverSRV : ( v : any ) = > parse ( "AutodiscoverSRV" , v ) as AutodiscoverSRV ,
2024-04-18 12:14:24 +03:00
ConfigDomain : ( v : any ) = > parse ( "ConfigDomain" , v ) as ConfigDomain ,
DKIM : ( v : any ) = > parse ( "DKIM" , v ) as DKIM ,
Selector : ( v : any ) = > parse ( "Selector" , v ) as Selector ,
Canonicalization : ( v : any ) = > parse ( "Canonicalization" , v ) as Canonicalization ,
DMARC : ( v : any ) = > parse ( "DMARC" , v ) as DMARC ,
MTASTS : ( v : any ) = > parse ( "MTASTS" , v ) as MTASTS ,
TLSRPT : ( v : any ) = > parse ( "TLSRPT" , v ) as TLSRPT ,
Route : ( v : any ) = > parse ( "Route" , v ) as Route ,
2024-04-24 20:15:30 +03:00
Alias : ( v : any ) = > parse ( "Alias" , v ) as Alias ,
AliasAddress : ( v : any ) = > parse ( "AliasAddress" , v ) as AliasAddress ,
Address : ( v : any ) = > parse ( "Address" , v ) as Address ,
Destination : ( v : any ) = > parse ( "Destination" , v ) as Destination ,
Ruleset : ( v : any ) = > parse ( "Ruleset" , v ) as Ruleset ,
2024-04-14 18:18:20 +03:00
Account : ( v : any ) = > parse ( "Account" , v ) as Account ,
add a webapi and webhooks for a simple http/json-based api
for applications to compose/send messages, receive delivery feedback, and
maintain suppression lists.
this is an alternative to applications using a library to compose messages,
submitting those messages using smtp, and monitoring a mailbox with imap for
DSNs, which can be processed into the equivalent of suppression lists. but you
need to know about all these standards/protocols and find libraries. by using
the webapi & webhooks, you just need a http & json library.
unfortunately, there is no standard for these kinds of api, so mox has made up
yet another one...
matching incoming DSNs about deliveries to original outgoing messages requires
keeping history of "retired" messages (delivered from the queue, either
successfully or failed). this can be enabled per account. history is also
useful for debugging deliveries. we now also keep history of each delivery
attempt, accessible while still in the queue, and kept when a message is
retired. the queue webadmin pages now also have pagination, to show potentially
large history.
a queue of webhook calls is now managed too. failures are retried similar to
message deliveries. webhooks can also be saved to the retired list after
completing. also configurable per account.
messages can be sent with a "unique smtp mail from" address. this can only be
used if the domain is configured with a localpart catchall separator such as
"+". when enabled, a queued message gets assigned a random "fromid", which is
added after the separator when sending. when DSNs are returned, they can be
related to previously sent messages based on this fromid. in the future, we can
implement matching on the "envid" used in the smtp dsn extension, or on the
"message-id" of the message. using a fromid can be triggered by authenticating
with a login email address that is configured as enabling fromid.
suppression lists are automatically managed per account. if a delivery attempt
results in certain smtp errors, the destination address is added to the
suppression list. future messages queued for that recipient will immediately
fail without a delivery attempt. suppression lists protect your mail server
reputation.
submitted messages can carry "extra" data through the queue and webhooks for
outgoing deliveries. through webapi as a json object, through smtp submission
as message headers of the form "x-mox-extra-<key>: value".
to make it easy to test webapi/webhooks locally, the "localserve" mode actually
puts messages in the queue. when it's time to deliver, it still won't do a full
delivery attempt, but just delivers to the sender account. unless the recipient
address has a special form, simulating a failure to deliver.
admins now have more control over the queue. "hold rules" can be added to mark
newly queued messages as "on hold", pausing delivery. rules can be about
certain sender or recipient domains/addresses, or apply to all messages pausing
the entire queue. also useful for (local) testing.
new config options have been introduced. they are editable through the admin
and/or account web interfaces.
the webapi http endpoints are enabled for newly generated configs with the
quickstart, and in localserve. existing configurations must explicitly enable
the webapi in mox.conf.
gopherwatch.org was created to dogfood this code. it initially used just the
compose/smtpclient/imapclient mox packages to send messages and process
delivery feedback. it will get a config option to use the mox webapi/webhooks
instead. the gopherwatch code to use webapi/webhook is smaller and simpler, and
developing that shaped development of the mox webapi/webhooks.
for issue #31 by cuu508
2024-04-15 22:49:02 +03:00
OutgoingWebhook : ( v : any ) = > parse ( "OutgoingWebhook" , v ) as OutgoingWebhook ,
IncomingWebhook : ( v : any ) = > parse ( "IncomingWebhook" , v ) as IncomingWebhook ,
2024-04-14 18:18:20 +03:00
SubjectPass : ( v : any ) = > parse ( "SubjectPass" , v ) as SubjectPass ,
AutomaticJunkFlags : ( v : any ) = > parse ( "AutomaticJunkFlags" , v ) as AutomaticJunkFlags ,
JunkFilter : ( v : any ) = > parse ( "JunkFilter" , v ) as JunkFilter ,
2024-04-24 20:15:30 +03:00
AddressAlias : ( v : any ) = > parse ( "AddressAlias" , v ) as AddressAlias ,
2023-12-31 13:55:22 +03:00
PolicyRecord : ( v : any ) = > parse ( "PolicyRecord" , v ) as PolicyRecord ,
TLSReportRecord : ( v : any ) = > parse ( "TLSReportRecord" , v ) as TLSReportRecord ,
Report : ( v : any ) = > parse ( "Report" , v ) as Report ,
TLSRPTDateRange : ( v : any ) = > parse ( "TLSRPTDateRange" , v ) as TLSRPTDateRange ,
Result : ( v : any ) = > parse ( "Result" , v ) as Result ,
ResultPolicy : ( v : any ) = > parse ( "ResultPolicy" , v ) as ResultPolicy ,
Summary : ( v : any ) = > parse ( "Summary" , v ) as Summary ,
FailureDetails : ( v : any ) = > parse ( "FailureDetails" , v ) as FailureDetails ,
TLSRPTSummary : ( v : any ) = > parse ( "TLSRPTSummary" , v ) as TLSRPTSummary ,
DomainFeedback : ( v : any ) = > parse ( "DomainFeedback" , v ) as DomainFeedback ,
ReportMetadata : ( v : any ) = > parse ( "ReportMetadata" , v ) as ReportMetadata ,
DateRange : ( v : any ) = > parse ( "DateRange" , v ) as DateRange ,
PolicyPublished : ( v : any ) = > parse ( "PolicyPublished" , v ) as PolicyPublished ,
ReportRecord : ( v : any ) = > parse ( "ReportRecord" , v ) as ReportRecord ,
Row : ( v : any ) = > parse ( "Row" , v ) as Row ,
PolicyEvaluated : ( v : any ) = > parse ( "PolicyEvaluated" , v ) as PolicyEvaluated ,
PolicyOverrideReason : ( v : any ) = > parse ( "PolicyOverrideReason" , v ) as PolicyOverrideReason ,
Identifiers : ( v : any ) = > parse ( "Identifiers" , v ) as Identifiers ,
AuthResults : ( v : any ) = > parse ( "AuthResults" , v ) as AuthResults ,
DKIMAuthResult : ( v : any ) = > parse ( "DKIMAuthResult" , v ) as DKIMAuthResult ,
SPFAuthResult : ( v : any ) = > parse ( "SPFAuthResult" , v ) as SPFAuthResult ,
DMARCSummary : ( v : any ) = > parse ( "DMARCSummary" , v ) as DMARCSummary ,
Reverse : ( v : any ) = > parse ( "Reverse" , v ) as Reverse ,
ClientConfigs : ( v : any ) = > parse ( "ClientConfigs" , v ) as ClientConfigs ,
ClientConfigsEntry : ( v : any ) = > parse ( "ClientConfigsEntry" , v ) as ClientConfigsEntry ,
2024-03-18 10:50:42 +03:00
HoldRule : ( v : any ) = > parse ( "HoldRule" , v ) as HoldRule ,
Filter : ( v : any ) = > parse ( "Filter" , v ) as Filter ,
add a webapi and webhooks for a simple http/json-based api
for applications to compose/send messages, receive delivery feedback, and
maintain suppression lists.
this is an alternative to applications using a library to compose messages,
submitting those messages using smtp, and monitoring a mailbox with imap for
DSNs, which can be processed into the equivalent of suppression lists. but you
need to know about all these standards/protocols and find libraries. by using
the webapi & webhooks, you just need a http & json library.
unfortunately, there is no standard for these kinds of api, so mox has made up
yet another one...
matching incoming DSNs about deliveries to original outgoing messages requires
keeping history of "retired" messages (delivered from the queue, either
successfully or failed). this can be enabled per account. history is also
useful for debugging deliveries. we now also keep history of each delivery
attempt, accessible while still in the queue, and kept when a message is
retired. the queue webadmin pages now also have pagination, to show potentially
large history.
a queue of webhook calls is now managed too. failures are retried similar to
message deliveries. webhooks can also be saved to the retired list after
completing. also configurable per account.
messages can be sent with a "unique smtp mail from" address. this can only be
used if the domain is configured with a localpart catchall separator such as
"+". when enabled, a queued message gets assigned a random "fromid", which is
added after the separator when sending. when DSNs are returned, they can be
related to previously sent messages based on this fromid. in the future, we can
implement matching on the "envid" used in the smtp dsn extension, or on the
"message-id" of the message. using a fromid can be triggered by authenticating
with a login email address that is configured as enabling fromid.
suppression lists are automatically managed per account. if a delivery attempt
results in certain smtp errors, the destination address is added to the
suppression list. future messages queued for that recipient will immediately
fail without a delivery attempt. suppression lists protect your mail server
reputation.
submitted messages can carry "extra" data through the queue and webhooks for
outgoing deliveries. through webapi as a json object, through smtp submission
as message headers of the form "x-mox-extra-<key>: value".
to make it easy to test webapi/webhooks locally, the "localserve" mode actually
puts messages in the queue. when it's time to deliver, it still won't do a full
delivery attempt, but just delivers to the sender account. unless the recipient
address has a special form, simulating a failure to deliver.
admins now have more control over the queue. "hold rules" can be added to mark
newly queued messages as "on hold", pausing delivery. rules can be about
certain sender or recipient domains/addresses, or apply to all messages pausing
the entire queue. also useful for (local) testing.
new config options have been introduced. they are editable through the admin
and/or account web interfaces.
the webapi http endpoints are enabled for newly generated configs with the
quickstart, and in localserve. existing configurations must explicitly enable
the webapi in mox.conf.
gopherwatch.org was created to dogfood this code. it initially used just the
compose/smtpclient/imapclient mox packages to send messages and process
delivery feedback. it will get a config option to use the mox webapi/webhooks
instead. the gopherwatch code to use webapi/webhook is smaller and simpler, and
developing that shaped development of the mox webapi/webhooks.
for issue #31 by cuu508
2024-04-15 22:49:02 +03:00
Sort : ( v : any ) = > parse ( "Sort" , v ) as Sort ,
2023-12-31 13:55:22 +03:00
Msg : ( v : any ) = > parse ( "Msg" , v ) as Msg ,
IPDomain : ( v : any ) = > parse ( "IPDomain" , v ) as IPDomain ,
add a webapi and webhooks for a simple http/json-based api
for applications to compose/send messages, receive delivery feedback, and
maintain suppression lists.
this is an alternative to applications using a library to compose messages,
submitting those messages using smtp, and monitoring a mailbox with imap for
DSNs, which can be processed into the equivalent of suppression lists. but you
need to know about all these standards/protocols and find libraries. by using
the webapi & webhooks, you just need a http & json library.
unfortunately, there is no standard for these kinds of api, so mox has made up
yet another one...
matching incoming DSNs about deliveries to original outgoing messages requires
keeping history of "retired" messages (delivered from the queue, either
successfully or failed). this can be enabled per account. history is also
useful for debugging deliveries. we now also keep history of each delivery
attempt, accessible while still in the queue, and kept when a message is
retired. the queue webadmin pages now also have pagination, to show potentially
large history.
a queue of webhook calls is now managed too. failures are retried similar to
message deliveries. webhooks can also be saved to the retired list after
completing. also configurable per account.
messages can be sent with a "unique smtp mail from" address. this can only be
used if the domain is configured with a localpart catchall separator such as
"+". when enabled, a queued message gets assigned a random "fromid", which is
added after the separator when sending. when DSNs are returned, they can be
related to previously sent messages based on this fromid. in the future, we can
implement matching on the "envid" used in the smtp dsn extension, or on the
"message-id" of the message. using a fromid can be triggered by authenticating
with a login email address that is configured as enabling fromid.
suppression lists are automatically managed per account. if a delivery attempt
results in certain smtp errors, the destination address is added to the
suppression list. future messages queued for that recipient will immediately
fail without a delivery attempt. suppression lists protect your mail server
reputation.
submitted messages can carry "extra" data through the queue and webhooks for
outgoing deliveries. through webapi as a json object, through smtp submission
as message headers of the form "x-mox-extra-<key>: value".
to make it easy to test webapi/webhooks locally, the "localserve" mode actually
puts messages in the queue. when it's time to deliver, it still won't do a full
delivery attempt, but just delivers to the sender account. unless the recipient
address has a special form, simulating a failure to deliver.
admins now have more control over the queue. "hold rules" can be added to mark
newly queued messages as "on hold", pausing delivery. rules can be about
certain sender or recipient domains/addresses, or apply to all messages pausing
the entire queue. also useful for (local) testing.
new config options have been introduced. they are editable through the admin
and/or account web interfaces.
the webapi http endpoints are enabled for newly generated configs with the
quickstart, and in localserve. existing configurations must explicitly enable
the webapi in mox.conf.
gopherwatch.org was created to dogfood this code. it initially used just the
compose/smtpclient/imapclient mox packages to send messages and process
delivery feedback. it will get a config option to use the mox webapi/webhooks
instead. the gopherwatch code to use webapi/webhook is smaller and simpler, and
developing that shaped development of the mox webapi/webhooks.
for issue #31 by cuu508
2024-04-15 22:49:02 +03:00
MsgResult : ( v : any ) = > parse ( "MsgResult" , v ) as MsgResult ,
RetiredFilter : ( v : any ) = > parse ( "RetiredFilter" , v ) as RetiredFilter ,
RetiredSort : ( v : any ) = > parse ( "RetiredSort" , v ) as RetiredSort ,
MsgRetired : ( v : any ) = > parse ( "MsgRetired" , v ) as MsgRetired ,
HookFilter : ( v : any ) = > parse ( "HookFilter" , v ) as HookFilter ,
HookSort : ( v : any ) = > parse ( "HookSort" , v ) as HookSort ,
Hook : ( v : any ) = > parse ( "Hook" , v ) as Hook ,
HookResult : ( v : any ) = > parse ( "HookResult" , v ) as HookResult ,
HookRetiredFilter : ( v : any ) = > parse ( "HookRetiredFilter" , v ) as HookRetiredFilter ,
HookRetiredSort : ( v : any ) = > parse ( "HookRetiredSort" , v ) as HookRetiredSort ,
HookRetired : ( v : any ) = > parse ( "HookRetired" , v ) as HookRetired ,
2023-12-31 13:55:22 +03:00
WebserverConfig : ( v : any ) = > parse ( "WebserverConfig" , v ) as WebserverConfig ,
WebHandler : ( v : any ) = > parse ( "WebHandler" , v ) as WebHandler ,
WebStatic : ( v : any ) = > parse ( "WebStatic" , v ) as WebStatic ,
WebRedirect : ( v : any ) = > parse ( "WebRedirect" , v ) as WebRedirect ,
WebForward : ( v : any ) = > parse ( "WebForward" , v ) as WebForward ,
improve http request handling for internal services and multiple domains
per listener, you could enable the admin/account/webmail/webapi handlers. but
that would serve those services on their configured paths (/admin/, /,
/webmail/, /webapi/) on all domains mox would be webserving, including any
non-mail domains. so your www.example/admin/ would be serving the admin web
interface, with no way to disabled that.
with this change, the admin interface is only served on requests to (based on
Host header):
- ip addresses
- the listener host name (explicitly configured in the listener, with fallback
to global hostname)
- "localhost" (for ssh tunnel/forwarding scenario's)
the account/webmail/webapi interfaces are served on the same domains as the
admin interface, and additionally:
- the client settings domains, as optionally configured in each Domain in
domains.conf. typically "mail.<yourdomain>".
this means the internal services are no longer served on other domains
configured in the webserver, e.g. www.example.org/admin/ will not be handled
specially.
the order of evaluation of routes/services is also changed:
before this change, the internal handlers would always be evaluated first.
with this change, only the system handlers for
MTA-STS/autoconfig/ACME-validation will be evaluated first. then the webserver
handlers. and finally the internal services (admin/account/webmail/webapi).
this allows an admin to configure overrides for some of the domains (per
hostname-matching rules explained above) that would normally serve these
services.
webserver handlers can now be configured that pass the request to an internal
service: in addition to the existing static/redirect/forward config options,
there is now an "internal" config option, naming the service
(admin/account/webmail/webapi) for handling the request. this allows enabling
the internal services on custom domains.
for issue #160 by TragicLifeHu, thanks for reporting!
2024-05-11 12:13:14 +03:00
WebInternal : ( v : any ) = > parse ( "WebInternal" , v ) as WebInternal ,
2023-12-31 13:55:22 +03:00
Transport : ( v : any ) = > parse ( "Transport" , v ) as Transport ,
TransportSMTP : ( v : any ) = > parse ( "TransportSMTP" , v ) as TransportSMTP ,
SMTPAuth : ( v : any ) = > parse ( "SMTPAuth" , v ) as SMTPAuth ,
TransportSocks : ( v : any ) = > parse ( "TransportSocks" , v ) as TransportSocks ,
2024-04-08 22:50:30 +03:00
TransportDirect : ( v : any ) = > parse ( "TransportDirect" , v ) as TransportDirect ,
2023-12-31 13:55:22 +03:00
EvaluationStat : ( v : any ) = > parse ( "EvaluationStat" , v ) as EvaluationStat ,
Evaluation : ( v : any ) = > parse ( "Evaluation" , v ) as Evaluation ,
SuppressAddress : ( v : any ) = > parse ( "SuppressAddress" , v ) as SuppressAddress ,
TLSResult : ( v : any ) = > parse ( "TLSResult" , v ) as TLSResult ,
TLSRPTSuppressAddress : ( v : any ) = > parse ( "TLSRPTSuppressAddress" , v ) as TLSRPTSuppressAddress ,
2024-04-18 12:14:24 +03:00
Dynamic : ( v : any ) = > parse ( "Dynamic" , v ) as Dynamic ,
implement tls client certificate authentication
the imap & smtp servers now allow logging in with tls client authentication and
the "external" sasl authentication mechanism. email clients like thunderbird,
fairemail, k9, macos mail implement it. this seems to be the most secure among
the authentication mechanism commonly implemented by clients. a useful property
is that an account can have a separate tls public key for each device/email
client. with tls client cert auth, authentication is also bound to the tls
connection. a mitm cannot pass the credentials on to another tls connection,
similar to scram-*-plus. though part of scram-*-plus is that clients verify
that the server knows the client credentials.
for tls client auth with imap, we send a "preauth" untagged message by default.
that puts the connection in authenticated state. given the imap connection
state machine, further authentication commands are not allowed. some clients
don't recognize the preauth message, and try to authenticate anyway, which
fails. a tls public key has a config option to disable preauth, keeping new
connections in unauthenticated state, to work with such email clients.
for smtp (submission), we don't require an explicit auth command.
both for imap and smtp, we allow a client to authenticate with another
mechanism than "external". in that case, credentials are verified, and have to
be for the same account as the tls client auth, but the adress can be another
one than the login address configured with the tls public key.
only the public key is used to identify the account that is authenticating. we
ignore the rest of the certificate. expiration dates, names, constraints, etc
are not verified. no certificate authorities are involved.
users can upload their own (minimal) certificate. the account web interface
shows openssl commands you can run to generate a private key, minimal cert, and
a p12 file (the format that email clients seem to like...) containing both
private key and certificate.
the imapclient & smtpclient packages can now also use tls client auth. and so
does "mox sendmail", either with a pem file with private key and certificate,
or with just an ed25519 private key.
there are new subcommands "mox config tlspubkey ..." for
adding/removing/listing tls public keys from the cli, by the admin.
2024-12-06 00:41:49 +03:00
TLSPublicKey : ( v : any ) = > parse ( "TLSPublicKey" , v ) as TLSPublicKey ,
replace http basic auth for web interfaces with session cookie & csrf-based auth
the http basic auth we had was very simple to reason about, and to implement.
but it has a major downside:
there is no way to logout, browsers keep sending credentials. ideally, browsers
themselves would show a button to stop sending credentials.
a related downside: the http auth mechanism doesn't indicate for which server
paths the credentials are.
another downside: the original password is sent to the server with each
request. though sending original passwords to web servers seems to be
considered normal.
our new approach uses session cookies, along with csrf values when we can. the
sessions are server-side managed, automatically extended on each use. this
makes it easy to invalidate sessions and keeps the frontend simpler (than with
long- vs short-term sessions and refreshing). the cookies are httponly,
samesite=strict, scoped to the path of the web interface. cookies are set
"secure" when set over https. the cookie is set by a successful call to Login.
a call to Logout invalidates a session. changing a password invalidates all
sessions for a user, but keeps the session with which the password was changed
alive. the csrf value is also random, and associated with the session cookie.
the csrf must be sent as header for api calls, or as parameter for direct form
posts (where we cannot set a custom header). rest-like calls made directly by
the browser, e.g. for images, don't have a csrf protection. the csrf value is
returned by the Login api call and stored in localstorage.
api calls without credentials return code "user:noAuth", and with bad
credentials return "user:badAuth". the api client recognizes this and triggers
a login. after a login, all auth-failed api calls are automatically retried.
only for "user:badAuth" is an error message displayed in the login form (e.g.
session expired).
in an ideal world, browsers would take care of most session management. a
server would indicate authentication is needed (like http basic auth), and the
browsers uses trusted ui to request credentials for the server & path. the
browser could use safer mechanism than sending original passwords to the
server, such as scram, along with a standard way to create sessions. for now,
web developers have to do authentication themselves: from showing the login
prompt, ensuring the right session/csrf cookies/localstorage/headers/etc are
sent with each request.
webauthn is a newer way to do authentication, perhaps we'll implement it in the
future. though hardware tokens aren't an attractive option for many users, and
it may be overkill as long as we still do old-fashioned authentication in smtp
& imap where passwords can be sent to the server.
for issue #58
2024-01-04 15:10:48 +03:00
CSRFToken : ( v : any ) = > parse ( "CSRFToken" , v ) as CSRFToken ,
2023-12-31 13:55:22 +03:00
DMARCPolicy : ( v : any ) = > parse ( "DMARCPolicy" , v ) as DMARCPolicy ,
Align : ( v : any ) = > parse ( "Align" , v ) as Align ,
RUA : ( v : any ) = > parse ( "RUA" , v ) as RUA ,
Mode : ( v : any ) = > parse ( "Mode" , v ) as Mode ,
2024-04-18 12:14:24 +03:00
Localpart : ( v : any ) = > parse ( "Localpart" , v ) as Localpart ,
2023-12-31 13:55:22 +03:00
IP : ( v : any ) = > parse ( "IP" , v ) as IP ,
}
// Admin exports web API functions for the admin web interface. All its methods are
// exported under api/. Function calls require valid HTTP Authentication
// credentials of a user.
let defaultOptions : ClientOptions = { slicesNullable : true , mapsNullable : true , nullableOptional : true }
export class Client {
replace http basic auth for web interfaces with session cookie & csrf-based auth
the http basic auth we had was very simple to reason about, and to implement.
but it has a major downside:
there is no way to logout, browsers keep sending credentials. ideally, browsers
themselves would show a button to stop sending credentials.
a related downside: the http auth mechanism doesn't indicate for which server
paths the credentials are.
another downside: the original password is sent to the server with each
request. though sending original passwords to web servers seems to be
considered normal.
our new approach uses session cookies, along with csrf values when we can. the
sessions are server-side managed, automatically extended on each use. this
makes it easy to invalidate sessions and keeps the frontend simpler (than with
long- vs short-term sessions and refreshing). the cookies are httponly,
samesite=strict, scoped to the path of the web interface. cookies are set
"secure" when set over https. the cookie is set by a successful call to Login.
a call to Logout invalidates a session. changing a password invalidates all
sessions for a user, but keeps the session with which the password was changed
alive. the csrf value is also random, and associated with the session cookie.
the csrf must be sent as header for api calls, or as parameter for direct form
posts (where we cannot set a custom header). rest-like calls made directly by
the browser, e.g. for images, don't have a csrf protection. the csrf value is
returned by the Login api call and stored in localstorage.
api calls without credentials return code "user:noAuth", and with bad
credentials return "user:badAuth". the api client recognizes this and triggers
a login. after a login, all auth-failed api calls are automatically retried.
only for "user:badAuth" is an error message displayed in the login form (e.g.
session expired).
in an ideal world, browsers would take care of most session management. a
server would indicate authentication is needed (like http basic auth), and the
browsers uses trusted ui to request credentials for the server & path. the
browser could use safer mechanism than sending original passwords to the
server, such as scram, along with a standard way to create sessions. for now,
web developers have to do authentication themselves: from showing the login
prompt, ensuring the right session/csrf cookies/localstorage/headers/etc are
sent with each request.
webauthn is a newer way to do authentication, perhaps we'll implement it in the
future. though hardware tokens aren't an attractive option for many users, and
it may be overkill as long as we still do old-fashioned authentication in smtp
& imap where passwords can be sent to the server.
for issue #58
2024-01-04 15:10:48 +03:00
private baseURL : string
public authState : AuthState
public options : ClientOptions
constructor ( ) {
this . authState = { }
this . options = { . . . defaultOptions }
this . baseURL = this . options . baseURL || defaultBaseURL
}
withAuthToken ( token : string ) : Client {
const c = new Client ( )
c . authState . token = token
c . options = this . options
return c
2023-12-31 13:55:22 +03:00
}
withOptions ( options : ClientOptions ) : Client {
replace http basic auth for web interfaces with session cookie & csrf-based auth
the http basic auth we had was very simple to reason about, and to implement.
but it has a major downside:
there is no way to logout, browsers keep sending credentials. ideally, browsers
themselves would show a button to stop sending credentials.
a related downside: the http auth mechanism doesn't indicate for which server
paths the credentials are.
another downside: the original password is sent to the server with each
request. though sending original passwords to web servers seems to be
considered normal.
our new approach uses session cookies, along with csrf values when we can. the
sessions are server-side managed, automatically extended on each use. this
makes it easy to invalidate sessions and keeps the frontend simpler (than with
long- vs short-term sessions and refreshing). the cookies are httponly,
samesite=strict, scoped to the path of the web interface. cookies are set
"secure" when set over https. the cookie is set by a successful call to Login.
a call to Logout invalidates a session. changing a password invalidates all
sessions for a user, but keeps the session with which the password was changed
alive. the csrf value is also random, and associated with the session cookie.
the csrf must be sent as header for api calls, or as parameter for direct form
posts (where we cannot set a custom header). rest-like calls made directly by
the browser, e.g. for images, don't have a csrf protection. the csrf value is
returned by the Login api call and stored in localstorage.
api calls without credentials return code "user:noAuth", and with bad
credentials return "user:badAuth". the api client recognizes this and triggers
a login. after a login, all auth-failed api calls are automatically retried.
only for "user:badAuth" is an error message displayed in the login form (e.g.
session expired).
in an ideal world, browsers would take care of most session management. a
server would indicate authentication is needed (like http basic auth), and the
browsers uses trusted ui to request credentials for the server & path. the
browser could use safer mechanism than sending original passwords to the
server, such as scram, along with a standard way to create sessions. for now,
web developers have to do authentication themselves: from showing the login
prompt, ensuring the right session/csrf cookies/localstorage/headers/etc are
sent with each request.
webauthn is a newer way to do authentication, perhaps we'll implement it in the
future. though hardware tokens aren't an attractive option for many users, and
it may be overkill as long as we still do old-fashioned authentication in smtp
& imap where passwords can be sent to the server.
for issue #58
2024-01-04 15:10:48 +03:00
const c = new Client ( )
c . authState = this . authState
c . options = { . . . this . options , . . . options }
return c
}
// LoginPrep returns a login token, and also sets it as cookie. Both must be
// present in the call to Login.
async LoginPrep ( ) : Promise < string > {
const fn : string = "LoginPrep"
const paramTypes : string [ ] [ ] = [ ]
const returnTypes : string [ ] [ ] = [ [ "string" ] ]
const params : any [ ] = [ ]
return await _sherpaCall ( this . baseURL , this . authState , { . . . this . options } , paramTypes , returnTypes , fn , params ) as string
}
// Login returns a session token for the credentials, or fails with error code
// "user:badLogin". Call LoginPrep to get a loginToken.
async Login ( loginToken : string , password : string ) : Promise < CSRFToken > {
const fn : string = "Login"
const paramTypes : string [ ] [ ] = [ [ "string" ] , [ "string" ] ]
const returnTypes : string [ ] [ ] = [ [ "CSRFToken" ] ]
const params : any [ ] = [ loginToken , password ]
return await _sherpaCall ( this . baseURL , this . authState , { . . . this . options } , paramTypes , returnTypes , fn , params ) as CSRFToken
}
// Logout invalidates the session token.
async Logout ( ) : Promise < void > {
const fn : string = "Logout"
const paramTypes : string [ ] [ ] = [ ]
const returnTypes : string [ ] [ ] = [ ]
const params : any [ ] = [ ]
return await _sherpaCall ( this . baseURL , this . authState , { . . . this . options } , paramTypes , returnTypes , fn , params ) as void
2023-12-31 13:55:22 +03:00
}
// CheckDomain checks the configuration for the domain, such as MX, SMTP STARTTLS,
// SPF, DKIM, DMARC, TLSRPT, MTASTS, autoconfig, autodiscover.
async CheckDomain ( domainName : string ) : Promise < CheckResult > {
const fn : string = "CheckDomain"
const paramTypes : string [ ] [ ] = [ [ "string" ] ]
const returnTypes : string [ ] [ ] = [ [ "CheckResult" ] ]
const params : any [ ] = [ domainName ]
replace http basic auth for web interfaces with session cookie & csrf-based auth
the http basic auth we had was very simple to reason about, and to implement.
but it has a major downside:
there is no way to logout, browsers keep sending credentials. ideally, browsers
themselves would show a button to stop sending credentials.
a related downside: the http auth mechanism doesn't indicate for which server
paths the credentials are.
another downside: the original password is sent to the server with each
request. though sending original passwords to web servers seems to be
considered normal.
our new approach uses session cookies, along with csrf values when we can. the
sessions are server-side managed, automatically extended on each use. this
makes it easy to invalidate sessions and keeps the frontend simpler (than with
long- vs short-term sessions and refreshing). the cookies are httponly,
samesite=strict, scoped to the path of the web interface. cookies are set
"secure" when set over https. the cookie is set by a successful call to Login.
a call to Logout invalidates a session. changing a password invalidates all
sessions for a user, but keeps the session with which the password was changed
alive. the csrf value is also random, and associated with the session cookie.
the csrf must be sent as header for api calls, or as parameter for direct form
posts (where we cannot set a custom header). rest-like calls made directly by
the browser, e.g. for images, don't have a csrf protection. the csrf value is
returned by the Login api call and stored in localstorage.
api calls without credentials return code "user:noAuth", and with bad
credentials return "user:badAuth". the api client recognizes this and triggers
a login. after a login, all auth-failed api calls are automatically retried.
only for "user:badAuth" is an error message displayed in the login form (e.g.
session expired).
in an ideal world, browsers would take care of most session management. a
server would indicate authentication is needed (like http basic auth), and the
browsers uses trusted ui to request credentials for the server & path. the
browser could use safer mechanism than sending original passwords to the
server, such as scram, along with a standard way to create sessions. for now,
web developers have to do authentication themselves: from showing the login
prompt, ensuring the right session/csrf cookies/localstorage/headers/etc are
sent with each request.
webauthn is a newer way to do authentication, perhaps we'll implement it in the
future. though hardware tokens aren't an attractive option for many users, and
it may be overkill as long as we still do old-fashioned authentication in smtp
& imap where passwords can be sent to the server.
for issue #58
2024-01-04 15:10:48 +03:00
return await _sherpaCall ( this . baseURL , this . authState , { . . . this . options } , paramTypes , returnTypes , fn , params ) as CheckResult
2023-12-31 13:55:22 +03:00
}
// Domains returns all configured domain names, in UTF-8 for IDNA domains.
async Domains ( ) : Promise < Domain [ ] | null > {
const fn : string = "Domains"
const paramTypes : string [ ] [ ] = [ ]
const returnTypes : string [ ] [ ] = [ [ "[]" , "Domain" ] ]
const params : any [ ] = [ ]
replace http basic auth for web interfaces with session cookie & csrf-based auth
the http basic auth we had was very simple to reason about, and to implement.
but it has a major downside:
there is no way to logout, browsers keep sending credentials. ideally, browsers
themselves would show a button to stop sending credentials.
a related downside: the http auth mechanism doesn't indicate for which server
paths the credentials are.
another downside: the original password is sent to the server with each
request. though sending original passwords to web servers seems to be
considered normal.
our new approach uses session cookies, along with csrf values when we can. the
sessions are server-side managed, automatically extended on each use. this
makes it easy to invalidate sessions and keeps the frontend simpler (than with
long- vs short-term sessions and refreshing). the cookies are httponly,
samesite=strict, scoped to the path of the web interface. cookies are set
"secure" when set over https. the cookie is set by a successful call to Login.
a call to Logout invalidates a session. changing a password invalidates all
sessions for a user, but keeps the session with which the password was changed
alive. the csrf value is also random, and associated with the session cookie.
the csrf must be sent as header for api calls, or as parameter for direct form
posts (where we cannot set a custom header). rest-like calls made directly by
the browser, e.g. for images, don't have a csrf protection. the csrf value is
returned by the Login api call and stored in localstorage.
api calls without credentials return code "user:noAuth", and with bad
credentials return "user:badAuth". the api client recognizes this and triggers
a login. after a login, all auth-failed api calls are automatically retried.
only for "user:badAuth" is an error message displayed in the login form (e.g.
session expired).
in an ideal world, browsers would take care of most session management. a
server would indicate authentication is needed (like http basic auth), and the
browsers uses trusted ui to request credentials for the server & path. the
browser could use safer mechanism than sending original passwords to the
server, such as scram, along with a standard way to create sessions. for now,
web developers have to do authentication themselves: from showing the login
prompt, ensuring the right session/csrf cookies/localstorage/headers/etc are
sent with each request.
webauthn is a newer way to do authentication, perhaps we'll implement it in the
future. though hardware tokens aren't an attractive option for many users, and
it may be overkill as long as we still do old-fashioned authentication in smtp
& imap where passwords can be sent to the server.
for issue #58
2024-01-04 15:10:48 +03:00
return await _sherpaCall ( this . baseURL , this . authState , { . . . this . options } , paramTypes , returnTypes , fn , params ) as Domain [ ] | null
2023-12-31 13:55:22 +03:00
}
// Domain returns the dns domain for a (potentially unicode as IDNA) domain name.
async Domain ( domain : string ) : Promise < Domain > {
const fn : string = "Domain"
const paramTypes : string [ ] [ ] = [ [ "string" ] ]
const returnTypes : string [ ] [ ] = [ [ "Domain" ] ]
const params : any [ ] = [ domain ]
replace http basic auth for web interfaces with session cookie & csrf-based auth
the http basic auth we had was very simple to reason about, and to implement.
but it has a major downside:
there is no way to logout, browsers keep sending credentials. ideally, browsers
themselves would show a button to stop sending credentials.
a related downside: the http auth mechanism doesn't indicate for which server
paths the credentials are.
another downside: the original password is sent to the server with each
request. though sending original passwords to web servers seems to be
considered normal.
our new approach uses session cookies, along with csrf values when we can. the
sessions are server-side managed, automatically extended on each use. this
makes it easy to invalidate sessions and keeps the frontend simpler (than with
long- vs short-term sessions and refreshing). the cookies are httponly,
samesite=strict, scoped to the path of the web interface. cookies are set
"secure" when set over https. the cookie is set by a successful call to Login.
a call to Logout invalidates a session. changing a password invalidates all
sessions for a user, but keeps the session with which the password was changed
alive. the csrf value is also random, and associated with the session cookie.
the csrf must be sent as header for api calls, or as parameter for direct form
posts (where we cannot set a custom header). rest-like calls made directly by
the browser, e.g. for images, don't have a csrf protection. the csrf value is
returned by the Login api call and stored in localstorage.
api calls without credentials return code "user:noAuth", and with bad
credentials return "user:badAuth". the api client recognizes this and triggers
a login. after a login, all auth-failed api calls are automatically retried.
only for "user:badAuth" is an error message displayed in the login form (e.g.
session expired).
in an ideal world, browsers would take care of most session management. a
server would indicate authentication is needed (like http basic auth), and the
browsers uses trusted ui to request credentials for the server & path. the
browser could use safer mechanism than sending original passwords to the
server, such as scram, along with a standard way to create sessions. for now,
web developers have to do authentication themselves: from showing the login
prompt, ensuring the right session/csrf cookies/localstorage/headers/etc are
sent with each request.
webauthn is a newer way to do authentication, perhaps we'll implement it in the
future. though hardware tokens aren't an attractive option for many users, and
it may be overkill as long as we still do old-fashioned authentication in smtp
& imap where passwords can be sent to the server.
for issue #58
2024-01-04 15:10:48 +03:00
return await _sherpaCall ( this . baseURL , this . authState , { . . . this . options } , paramTypes , returnTypes , fn , params ) as Domain
2023-12-31 13:55:22 +03:00
}
// ParseDomain parses a domain, possibly an IDNA domain.
async ParseDomain ( domain : string ) : Promise < Domain > {
const fn : string = "ParseDomain"
const paramTypes : string [ ] [ ] = [ [ "string" ] ]
const returnTypes : string [ ] [ ] = [ [ "Domain" ] ]
const params : any [ ] = [ domain ]
replace http basic auth for web interfaces with session cookie & csrf-based auth
the http basic auth we had was very simple to reason about, and to implement.
but it has a major downside:
there is no way to logout, browsers keep sending credentials. ideally, browsers
themselves would show a button to stop sending credentials.
a related downside: the http auth mechanism doesn't indicate for which server
paths the credentials are.
another downside: the original password is sent to the server with each
request. though sending original passwords to web servers seems to be
considered normal.
our new approach uses session cookies, along with csrf values when we can. the
sessions are server-side managed, automatically extended on each use. this
makes it easy to invalidate sessions and keeps the frontend simpler (than with
long- vs short-term sessions and refreshing). the cookies are httponly,
samesite=strict, scoped to the path of the web interface. cookies are set
"secure" when set over https. the cookie is set by a successful call to Login.
a call to Logout invalidates a session. changing a password invalidates all
sessions for a user, but keeps the session with which the password was changed
alive. the csrf value is also random, and associated with the session cookie.
the csrf must be sent as header for api calls, or as parameter for direct form
posts (where we cannot set a custom header). rest-like calls made directly by
the browser, e.g. for images, don't have a csrf protection. the csrf value is
returned by the Login api call and stored in localstorage.
api calls without credentials return code "user:noAuth", and with bad
credentials return "user:badAuth". the api client recognizes this and triggers
a login. after a login, all auth-failed api calls are automatically retried.
only for "user:badAuth" is an error message displayed in the login form (e.g.
session expired).
in an ideal world, browsers would take care of most session management. a
server would indicate authentication is needed (like http basic auth), and the
browsers uses trusted ui to request credentials for the server & path. the
browser could use safer mechanism than sending original passwords to the
server, such as scram, along with a standard way to create sessions. for now,
web developers have to do authentication themselves: from showing the login
prompt, ensuring the right session/csrf cookies/localstorage/headers/etc are
sent with each request.
webauthn is a newer way to do authentication, perhaps we'll implement it in the
future. though hardware tokens aren't an attractive option for many users, and
it may be overkill as long as we still do old-fashioned authentication in smtp
& imap where passwords can be sent to the server.
for issue #58
2024-01-04 15:10:48 +03:00
return await _sherpaCall ( this . baseURL , this . authState , { . . . this . options } , paramTypes , returnTypes , fn , params ) as Domain
2023-12-31 13:55:22 +03:00
}
2024-04-18 12:14:24 +03:00
// DomainConfig returns the configuration for a domain.
async DomainConfig ( domain : string ) : Promise < ConfigDomain > {
const fn : string = "DomainConfig"
const paramTypes : string [ ] [ ] = [ [ "string" ] ]
const returnTypes : string [ ] [ ] = [ [ "ConfigDomain" ] ]
const params : any [ ] = [ domain ]
return await _sherpaCall ( this . baseURL , this . authState , { . . . this . options } , paramTypes , returnTypes , fn , params ) as ConfigDomain
}
2023-12-31 13:55:22 +03:00
// DomainLocalparts returns the encoded localparts and accounts configured in domain.
2024-04-24 20:15:30 +03:00
async DomainLocalparts ( domain : string ) : Promise < [ { [ key : string ] : string } , { [ key : string ] : Alias } ] > {
2023-12-31 13:55:22 +03:00
const fn : string = "DomainLocalparts"
const paramTypes : string [ ] [ ] = [ [ "string" ] ]
2024-04-24 20:15:30 +03:00
const returnTypes : string [ ] [ ] = [ [ "{}" , "string" ] , [ "{}" , "Alias" ] ]
2023-12-31 13:55:22 +03:00
const params : any [ ] = [ domain ]
2024-04-24 20:15:30 +03:00
return await _sherpaCall ( this . baseURL , this . authState , { . . . this . options } , paramTypes , returnTypes , fn , params ) as [ { [ key : string ] : string } , { [ key : string ] : Alias } ]
2023-12-31 13:55:22 +03:00
}
// Accounts returns the names of all configured accounts.
async Accounts ( ) : Promise < string [ ] | null > {
const fn : string = "Accounts"
const paramTypes : string [ ] [ ] = [ ]
const returnTypes : string [ ] [ ] = [ [ "[]" , "string" ] ]
const params : any [ ] = [ ]
replace http basic auth for web interfaces with session cookie & csrf-based auth
the http basic auth we had was very simple to reason about, and to implement.
but it has a major downside:
there is no way to logout, browsers keep sending credentials. ideally, browsers
themselves would show a button to stop sending credentials.
a related downside: the http auth mechanism doesn't indicate for which server
paths the credentials are.
another downside: the original password is sent to the server with each
request. though sending original passwords to web servers seems to be
considered normal.
our new approach uses session cookies, along with csrf values when we can. the
sessions are server-side managed, automatically extended on each use. this
makes it easy to invalidate sessions and keeps the frontend simpler (than with
long- vs short-term sessions and refreshing). the cookies are httponly,
samesite=strict, scoped to the path of the web interface. cookies are set
"secure" when set over https. the cookie is set by a successful call to Login.
a call to Logout invalidates a session. changing a password invalidates all
sessions for a user, but keeps the session with which the password was changed
alive. the csrf value is also random, and associated with the session cookie.
the csrf must be sent as header for api calls, or as parameter for direct form
posts (where we cannot set a custom header). rest-like calls made directly by
the browser, e.g. for images, don't have a csrf protection. the csrf value is
returned by the Login api call and stored in localstorage.
api calls without credentials return code "user:noAuth", and with bad
credentials return "user:badAuth". the api client recognizes this and triggers
a login. after a login, all auth-failed api calls are automatically retried.
only for "user:badAuth" is an error message displayed in the login form (e.g.
session expired).
in an ideal world, browsers would take care of most session management. a
server would indicate authentication is needed (like http basic auth), and the
browsers uses trusted ui to request credentials for the server & path. the
browser could use safer mechanism than sending original passwords to the
server, such as scram, along with a standard way to create sessions. for now,
web developers have to do authentication themselves: from showing the login
prompt, ensuring the right session/csrf cookies/localstorage/headers/etc are
sent with each request.
webauthn is a newer way to do authentication, perhaps we'll implement it in the
future. though hardware tokens aren't an attractive option for many users, and
it may be overkill as long as we still do old-fashioned authentication in smtp
& imap where passwords can be sent to the server.
for issue #58
2024-01-04 15:10:48 +03:00
return await _sherpaCall ( this . baseURL , this . authState , { . . . this . options } , paramTypes , returnTypes , fn , params ) as string [ ] | null
2023-12-31 13:55:22 +03:00
}
// Account returns the parsed configuration of an account.
2024-04-14 18:18:20 +03:00
async Account ( account : string ) : Promise < [ Account , number ] > {
2023-12-31 13:55:22 +03:00
const fn : string = "Account"
const paramTypes : string [ ] [ ] = [ [ "string" ] ]
2024-04-14 18:18:20 +03:00
const returnTypes : string [ ] [ ] = [ [ "Account" ] , [ "int64" ] ]
2023-12-31 13:55:22 +03:00
const params : any [ ] = [ account ]
2024-04-14 18:18:20 +03:00
return await _sherpaCall ( this . baseURL , this . authState , { . . . this . options } , paramTypes , returnTypes , fn , params ) as [ Account , number ]
2023-12-31 13:55:22 +03:00
}
// ConfigFiles returns the paths and contents of the static and dynamic configuration files.
async ConfigFiles ( ) : Promise < [ string , string , string , string ] > {
const fn : string = "ConfigFiles"
const paramTypes : string [ ] [ ] = [ ]
const returnTypes : string [ ] [ ] = [ [ "string" ] , [ "string" ] , [ "string" ] , [ "string" ] ]
const params : any [ ] = [ ]
replace http basic auth for web interfaces with session cookie & csrf-based auth
the http basic auth we had was very simple to reason about, and to implement.
but it has a major downside:
there is no way to logout, browsers keep sending credentials. ideally, browsers
themselves would show a button to stop sending credentials.
a related downside: the http auth mechanism doesn't indicate for which server
paths the credentials are.
another downside: the original password is sent to the server with each
request. though sending original passwords to web servers seems to be
considered normal.
our new approach uses session cookies, along with csrf values when we can. the
sessions are server-side managed, automatically extended on each use. this
makes it easy to invalidate sessions and keeps the frontend simpler (than with
long- vs short-term sessions and refreshing). the cookies are httponly,
samesite=strict, scoped to the path of the web interface. cookies are set
"secure" when set over https. the cookie is set by a successful call to Login.
a call to Logout invalidates a session. changing a password invalidates all
sessions for a user, but keeps the session with which the password was changed
alive. the csrf value is also random, and associated with the session cookie.
the csrf must be sent as header for api calls, or as parameter for direct form
posts (where we cannot set a custom header). rest-like calls made directly by
the browser, e.g. for images, don't have a csrf protection. the csrf value is
returned by the Login api call and stored in localstorage.
api calls without credentials return code "user:noAuth", and with bad
credentials return "user:badAuth". the api client recognizes this and triggers
a login. after a login, all auth-failed api calls are automatically retried.
only for "user:badAuth" is an error message displayed in the login form (e.g.
session expired).
in an ideal world, browsers would take care of most session management. a
server would indicate authentication is needed (like http basic auth), and the
browsers uses trusted ui to request credentials for the server & path. the
browser could use safer mechanism than sending original passwords to the
server, such as scram, along with a standard way to create sessions. for now,
web developers have to do authentication themselves: from showing the login
prompt, ensuring the right session/csrf cookies/localstorage/headers/etc are
sent with each request.
webauthn is a newer way to do authentication, perhaps we'll implement it in the
future. though hardware tokens aren't an attractive option for many users, and
it may be overkill as long as we still do old-fashioned authentication in smtp
& imap where passwords can be sent to the server.
for issue #58
2024-01-04 15:10:48 +03:00
return await _sherpaCall ( this . baseURL , this . authState , { . . . this . options } , paramTypes , returnTypes , fn , params ) as [ string , string , string , string ]
2023-12-31 13:55:22 +03:00
}
// MTASTSPolicies returns all mtasts policies from the cache.
async MTASTSPolicies ( ) : Promise < PolicyRecord [ ] | null > {
const fn : string = "MTASTSPolicies"
const paramTypes : string [ ] [ ] = [ ]
const returnTypes : string [ ] [ ] = [ [ "[]" , "PolicyRecord" ] ]
const params : any [ ] = [ ]
replace http basic auth for web interfaces with session cookie & csrf-based auth
the http basic auth we had was very simple to reason about, and to implement.
but it has a major downside:
there is no way to logout, browsers keep sending credentials. ideally, browsers
themselves would show a button to stop sending credentials.
a related downside: the http auth mechanism doesn't indicate for which server
paths the credentials are.
another downside: the original password is sent to the server with each
request. though sending original passwords to web servers seems to be
considered normal.
our new approach uses session cookies, along with csrf values when we can. the
sessions are server-side managed, automatically extended on each use. this
makes it easy to invalidate sessions and keeps the frontend simpler (than with
long- vs short-term sessions and refreshing). the cookies are httponly,
samesite=strict, scoped to the path of the web interface. cookies are set
"secure" when set over https. the cookie is set by a successful call to Login.
a call to Logout invalidates a session. changing a password invalidates all
sessions for a user, but keeps the session with which the password was changed
alive. the csrf value is also random, and associated with the session cookie.
the csrf must be sent as header for api calls, or as parameter for direct form
posts (where we cannot set a custom header). rest-like calls made directly by
the browser, e.g. for images, don't have a csrf protection. the csrf value is
returned by the Login api call and stored in localstorage.
api calls without credentials return code "user:noAuth", and with bad
credentials return "user:badAuth". the api client recognizes this and triggers
a login. after a login, all auth-failed api calls are automatically retried.
only for "user:badAuth" is an error message displayed in the login form (e.g.
session expired).
in an ideal world, browsers would take care of most session management. a
server would indicate authentication is needed (like http basic auth), and the
browsers uses trusted ui to request credentials for the server & path. the
browser could use safer mechanism than sending original passwords to the
server, such as scram, along with a standard way to create sessions. for now,
web developers have to do authentication themselves: from showing the login
prompt, ensuring the right session/csrf cookies/localstorage/headers/etc are
sent with each request.
webauthn is a newer way to do authentication, perhaps we'll implement it in the
future. though hardware tokens aren't an attractive option for many users, and
it may be overkill as long as we still do old-fashioned authentication in smtp
& imap where passwords can be sent to the server.
for issue #58
2024-01-04 15:10:48 +03:00
return await _sherpaCall ( this . baseURL , this . authState , { . . . this . options } , paramTypes , returnTypes , fn , params ) as PolicyRecord [ ] | null
2023-12-31 13:55:22 +03:00
}
// TLSReports returns TLS reports overlapping with period start/end, for the given
// policy domain (or all domains if empty). The reports are sorted first by period
// end (most recent first), then by policy domain.
async TLSReports ( start : Date , end : Date , policyDomain : string ) : Promise < TLSReportRecord [ ] | null > {
const fn : string = "TLSReports"
const paramTypes : string [ ] [ ] = [ [ "timestamp" ] , [ "timestamp" ] , [ "string" ] ]
const returnTypes : string [ ] [ ] = [ [ "[]" , "TLSReportRecord" ] ]
const params : any [ ] = [ start , end , policyDomain ]
replace http basic auth for web interfaces with session cookie & csrf-based auth
the http basic auth we had was very simple to reason about, and to implement.
but it has a major downside:
there is no way to logout, browsers keep sending credentials. ideally, browsers
themselves would show a button to stop sending credentials.
a related downside: the http auth mechanism doesn't indicate for which server
paths the credentials are.
another downside: the original password is sent to the server with each
request. though sending original passwords to web servers seems to be
considered normal.
our new approach uses session cookies, along with csrf values when we can. the
sessions are server-side managed, automatically extended on each use. this
makes it easy to invalidate sessions and keeps the frontend simpler (than with
long- vs short-term sessions and refreshing). the cookies are httponly,
samesite=strict, scoped to the path of the web interface. cookies are set
"secure" when set over https. the cookie is set by a successful call to Login.
a call to Logout invalidates a session. changing a password invalidates all
sessions for a user, but keeps the session with which the password was changed
alive. the csrf value is also random, and associated with the session cookie.
the csrf must be sent as header for api calls, or as parameter for direct form
posts (where we cannot set a custom header). rest-like calls made directly by
the browser, e.g. for images, don't have a csrf protection. the csrf value is
returned by the Login api call and stored in localstorage.
api calls without credentials return code "user:noAuth", and with bad
credentials return "user:badAuth". the api client recognizes this and triggers
a login. after a login, all auth-failed api calls are automatically retried.
only for "user:badAuth" is an error message displayed in the login form (e.g.
session expired).
in an ideal world, browsers would take care of most session management. a
server would indicate authentication is needed (like http basic auth), and the
browsers uses trusted ui to request credentials for the server & path. the
browser could use safer mechanism than sending original passwords to the
server, such as scram, along with a standard way to create sessions. for now,
web developers have to do authentication themselves: from showing the login
prompt, ensuring the right session/csrf cookies/localstorage/headers/etc are
sent with each request.
webauthn is a newer way to do authentication, perhaps we'll implement it in the
future. though hardware tokens aren't an attractive option for many users, and
it may be overkill as long as we still do old-fashioned authentication in smtp
& imap where passwords can be sent to the server.
for issue #58
2024-01-04 15:10:48 +03:00
return await _sherpaCall ( this . baseURL , this . authState , { . . . this . options } , paramTypes , returnTypes , fn , params ) as TLSReportRecord [ ] | null
2023-12-31 13:55:22 +03:00
}
// TLSReportID returns a single TLS report.
async TLSReportID ( domain : string , reportID : number ) : Promise < TLSReportRecord > {
const fn : string = "TLSReportID"
const paramTypes : string [ ] [ ] = [ [ "string" ] , [ "int64" ] ]
const returnTypes : string [ ] [ ] = [ [ "TLSReportRecord" ] ]
const params : any [ ] = [ domain , reportID ]
replace http basic auth for web interfaces with session cookie & csrf-based auth
the http basic auth we had was very simple to reason about, and to implement.
but it has a major downside:
there is no way to logout, browsers keep sending credentials. ideally, browsers
themselves would show a button to stop sending credentials.
a related downside: the http auth mechanism doesn't indicate for which server
paths the credentials are.
another downside: the original password is sent to the server with each
request. though sending original passwords to web servers seems to be
considered normal.
our new approach uses session cookies, along with csrf values when we can. the
sessions are server-side managed, automatically extended on each use. this
makes it easy to invalidate sessions and keeps the frontend simpler (than with
long- vs short-term sessions and refreshing). the cookies are httponly,
samesite=strict, scoped to the path of the web interface. cookies are set
"secure" when set over https. the cookie is set by a successful call to Login.
a call to Logout invalidates a session. changing a password invalidates all
sessions for a user, but keeps the session with which the password was changed
alive. the csrf value is also random, and associated with the session cookie.
the csrf must be sent as header for api calls, or as parameter for direct form
posts (where we cannot set a custom header). rest-like calls made directly by
the browser, e.g. for images, don't have a csrf protection. the csrf value is
returned by the Login api call and stored in localstorage.
api calls without credentials return code "user:noAuth", and with bad
credentials return "user:badAuth". the api client recognizes this and triggers
a login. after a login, all auth-failed api calls are automatically retried.
only for "user:badAuth" is an error message displayed in the login form (e.g.
session expired).
in an ideal world, browsers would take care of most session management. a
server would indicate authentication is needed (like http basic auth), and the
browsers uses trusted ui to request credentials for the server & path. the
browser could use safer mechanism than sending original passwords to the
server, such as scram, along with a standard way to create sessions. for now,
web developers have to do authentication themselves: from showing the login
prompt, ensuring the right session/csrf cookies/localstorage/headers/etc are
sent with each request.
webauthn is a newer way to do authentication, perhaps we'll implement it in the
future. though hardware tokens aren't an attractive option for many users, and
it may be overkill as long as we still do old-fashioned authentication in smtp
& imap where passwords can be sent to the server.
for issue #58
2024-01-04 15:10:48 +03:00
return await _sherpaCall ( this . baseURL , this . authState , { . . . this . options } , paramTypes , returnTypes , fn , params ) as TLSReportRecord
2023-12-31 13:55:22 +03:00
}
// TLSRPTSummaries returns a summary of received TLS reports overlapping with
// period start/end for one or all domains (when domain is empty).
// The returned summaries are ordered by domain name.
async TLSRPTSummaries ( start : Date , end : Date , policyDomain : string ) : Promise < TLSRPTSummary [ ] | null > {
const fn : string = "TLSRPTSummaries"
const paramTypes : string [ ] [ ] = [ [ "timestamp" ] , [ "timestamp" ] , [ "string" ] ]
const returnTypes : string [ ] [ ] = [ [ "[]" , "TLSRPTSummary" ] ]
const params : any [ ] = [ start , end , policyDomain ]
replace http basic auth for web interfaces with session cookie & csrf-based auth
the http basic auth we had was very simple to reason about, and to implement.
but it has a major downside:
there is no way to logout, browsers keep sending credentials. ideally, browsers
themselves would show a button to stop sending credentials.
a related downside: the http auth mechanism doesn't indicate for which server
paths the credentials are.
another downside: the original password is sent to the server with each
request. though sending original passwords to web servers seems to be
considered normal.
our new approach uses session cookies, along with csrf values when we can. the
sessions are server-side managed, automatically extended on each use. this
makes it easy to invalidate sessions and keeps the frontend simpler (than with
long- vs short-term sessions and refreshing). the cookies are httponly,
samesite=strict, scoped to the path of the web interface. cookies are set
"secure" when set over https. the cookie is set by a successful call to Login.
a call to Logout invalidates a session. changing a password invalidates all
sessions for a user, but keeps the session with which the password was changed
alive. the csrf value is also random, and associated with the session cookie.
the csrf must be sent as header for api calls, or as parameter for direct form
posts (where we cannot set a custom header). rest-like calls made directly by
the browser, e.g. for images, don't have a csrf protection. the csrf value is
returned by the Login api call and stored in localstorage.
api calls without credentials return code "user:noAuth", and with bad
credentials return "user:badAuth". the api client recognizes this and triggers
a login. after a login, all auth-failed api calls are automatically retried.
only for "user:badAuth" is an error message displayed in the login form (e.g.
session expired).
in an ideal world, browsers would take care of most session management. a
server would indicate authentication is needed (like http basic auth), and the
browsers uses trusted ui to request credentials for the server & path. the
browser could use safer mechanism than sending original passwords to the
server, such as scram, along with a standard way to create sessions. for now,
web developers have to do authentication themselves: from showing the login
prompt, ensuring the right session/csrf cookies/localstorage/headers/etc are
sent with each request.
webauthn is a newer way to do authentication, perhaps we'll implement it in the
future. though hardware tokens aren't an attractive option for many users, and
it may be overkill as long as we still do old-fashioned authentication in smtp
& imap where passwords can be sent to the server.
for issue #58
2024-01-04 15:10:48 +03:00
return await _sherpaCall ( this . baseURL , this . authState , { . . . this . options } , paramTypes , returnTypes , fn , params ) as TLSRPTSummary [ ] | null
2023-12-31 13:55:22 +03:00
}
// DMARCReports returns DMARC reports overlapping with period start/end, for the
// given domain (or all domains if empty). The reports are sorted first by period
// end (most recent first), then by domain.
async DMARCReports ( start : Date , end : Date , domain : string ) : Promise < DomainFeedback [ ] | null > {
const fn : string = "DMARCReports"
const paramTypes : string [ ] [ ] = [ [ "timestamp" ] , [ "timestamp" ] , [ "string" ] ]
const returnTypes : string [ ] [ ] = [ [ "[]" , "DomainFeedback" ] ]
const params : any [ ] = [ start , end , domain ]
replace http basic auth for web interfaces with session cookie & csrf-based auth
the http basic auth we had was very simple to reason about, and to implement.
but it has a major downside:
there is no way to logout, browsers keep sending credentials. ideally, browsers
themselves would show a button to stop sending credentials.
a related downside: the http auth mechanism doesn't indicate for which server
paths the credentials are.
another downside: the original password is sent to the server with each
request. though sending original passwords to web servers seems to be
considered normal.
our new approach uses session cookies, along with csrf values when we can. the
sessions are server-side managed, automatically extended on each use. this
makes it easy to invalidate sessions and keeps the frontend simpler (than with
long- vs short-term sessions and refreshing). the cookies are httponly,
samesite=strict, scoped to the path of the web interface. cookies are set
"secure" when set over https. the cookie is set by a successful call to Login.
a call to Logout invalidates a session. changing a password invalidates all
sessions for a user, but keeps the session with which the password was changed
alive. the csrf value is also random, and associated with the session cookie.
the csrf must be sent as header for api calls, or as parameter for direct form
posts (where we cannot set a custom header). rest-like calls made directly by
the browser, e.g. for images, don't have a csrf protection. the csrf value is
returned by the Login api call and stored in localstorage.
api calls without credentials return code "user:noAuth", and with bad
credentials return "user:badAuth". the api client recognizes this and triggers
a login. after a login, all auth-failed api calls are automatically retried.
only for "user:badAuth" is an error message displayed in the login form (e.g.
session expired).
in an ideal world, browsers would take care of most session management. a
server would indicate authentication is needed (like http basic auth), and the
browsers uses trusted ui to request credentials for the server & path. the
browser could use safer mechanism than sending original passwords to the
server, such as scram, along with a standard way to create sessions. for now,
web developers have to do authentication themselves: from showing the login
prompt, ensuring the right session/csrf cookies/localstorage/headers/etc are
sent with each request.
webauthn is a newer way to do authentication, perhaps we'll implement it in the
future. though hardware tokens aren't an attractive option for many users, and
it may be overkill as long as we still do old-fashioned authentication in smtp
& imap where passwords can be sent to the server.
for issue #58
2024-01-04 15:10:48 +03:00
return await _sherpaCall ( this . baseURL , this . authState , { . . . this . options } , paramTypes , returnTypes , fn , params ) as DomainFeedback [ ] | null
2023-12-31 13:55:22 +03:00
}
// DMARCReportID returns a single DMARC report.
async DMARCReportID ( domain : string , reportID : number ) : Promise < DomainFeedback > {
const fn : string = "DMARCReportID"
const paramTypes : string [ ] [ ] = [ [ "string" ] , [ "int64" ] ]
const returnTypes : string [ ] [ ] = [ [ "DomainFeedback" ] ]
const params : any [ ] = [ domain , reportID ]
replace http basic auth for web interfaces with session cookie & csrf-based auth
the http basic auth we had was very simple to reason about, and to implement.
but it has a major downside:
there is no way to logout, browsers keep sending credentials. ideally, browsers
themselves would show a button to stop sending credentials.
a related downside: the http auth mechanism doesn't indicate for which server
paths the credentials are.
another downside: the original password is sent to the server with each
request. though sending original passwords to web servers seems to be
considered normal.
our new approach uses session cookies, along with csrf values when we can. the
sessions are server-side managed, automatically extended on each use. this
makes it easy to invalidate sessions and keeps the frontend simpler (than with
long- vs short-term sessions and refreshing). the cookies are httponly,
samesite=strict, scoped to the path of the web interface. cookies are set
"secure" when set over https. the cookie is set by a successful call to Login.
a call to Logout invalidates a session. changing a password invalidates all
sessions for a user, but keeps the session with which the password was changed
alive. the csrf value is also random, and associated with the session cookie.
the csrf must be sent as header for api calls, or as parameter for direct form
posts (where we cannot set a custom header). rest-like calls made directly by
the browser, e.g. for images, don't have a csrf protection. the csrf value is
returned by the Login api call and stored in localstorage.
api calls without credentials return code "user:noAuth", and with bad
credentials return "user:badAuth". the api client recognizes this and triggers
a login. after a login, all auth-failed api calls are automatically retried.
only for "user:badAuth" is an error message displayed in the login form (e.g.
session expired).
in an ideal world, browsers would take care of most session management. a
server would indicate authentication is needed (like http basic auth), and the
browsers uses trusted ui to request credentials for the server & path. the
browser could use safer mechanism than sending original passwords to the
server, such as scram, along with a standard way to create sessions. for now,
web developers have to do authentication themselves: from showing the login
prompt, ensuring the right session/csrf cookies/localstorage/headers/etc are
sent with each request.
webauthn is a newer way to do authentication, perhaps we'll implement it in the
future. though hardware tokens aren't an attractive option for many users, and
it may be overkill as long as we still do old-fashioned authentication in smtp
& imap where passwords can be sent to the server.
for issue #58
2024-01-04 15:10:48 +03:00
return await _sherpaCall ( this . baseURL , this . authState , { . . . this . options } , paramTypes , returnTypes , fn , params ) as DomainFeedback
2023-12-31 13:55:22 +03:00
}
// DMARCSummaries returns a summary of received DMARC reports overlapping with
// period start/end for one or all domains (when domain is empty).
// The returned summaries are ordered by domain name.
async DMARCSummaries ( start : Date , end : Date , domain : string ) : Promise < DMARCSummary [ ] | null > {
const fn : string = "DMARCSummaries"
const paramTypes : string [ ] [ ] = [ [ "timestamp" ] , [ "timestamp" ] , [ "string" ] ]
const returnTypes : string [ ] [ ] = [ [ "[]" , "DMARCSummary" ] ]
const params : any [ ] = [ start , end , domain ]
replace http basic auth for web interfaces with session cookie & csrf-based auth
the http basic auth we had was very simple to reason about, and to implement.
but it has a major downside:
there is no way to logout, browsers keep sending credentials. ideally, browsers
themselves would show a button to stop sending credentials.
a related downside: the http auth mechanism doesn't indicate for which server
paths the credentials are.
another downside: the original password is sent to the server with each
request. though sending original passwords to web servers seems to be
considered normal.
our new approach uses session cookies, along with csrf values when we can. the
sessions are server-side managed, automatically extended on each use. this
makes it easy to invalidate sessions and keeps the frontend simpler (than with
long- vs short-term sessions and refreshing). the cookies are httponly,
samesite=strict, scoped to the path of the web interface. cookies are set
"secure" when set over https. the cookie is set by a successful call to Login.
a call to Logout invalidates a session. changing a password invalidates all
sessions for a user, but keeps the session with which the password was changed
alive. the csrf value is also random, and associated with the session cookie.
the csrf must be sent as header for api calls, or as parameter for direct form
posts (where we cannot set a custom header). rest-like calls made directly by
the browser, e.g. for images, don't have a csrf protection. the csrf value is
returned by the Login api call and stored in localstorage.
api calls without credentials return code "user:noAuth", and with bad
credentials return "user:badAuth". the api client recognizes this and triggers
a login. after a login, all auth-failed api calls are automatically retried.
only for "user:badAuth" is an error message displayed in the login form (e.g.
session expired).
in an ideal world, browsers would take care of most session management. a
server would indicate authentication is needed (like http basic auth), and the
browsers uses trusted ui to request credentials for the server & path. the
browser could use safer mechanism than sending original passwords to the
server, such as scram, along with a standard way to create sessions. for now,
web developers have to do authentication themselves: from showing the login
prompt, ensuring the right session/csrf cookies/localstorage/headers/etc are
sent with each request.
webauthn is a newer way to do authentication, perhaps we'll implement it in the
future. though hardware tokens aren't an attractive option for many users, and
it may be overkill as long as we still do old-fashioned authentication in smtp
& imap where passwords can be sent to the server.
for issue #58
2024-01-04 15:10:48 +03:00
return await _sherpaCall ( this . baseURL , this . authState , { . . . this . options } , paramTypes , returnTypes , fn , params ) as DMARCSummary [ ] | null
2023-12-31 13:55:22 +03:00
}
// LookupIP does a reverse lookup of ip.
async LookupIP ( ip : string ) : Promise < Reverse > {
const fn : string = "LookupIP"
const paramTypes : string [ ] [ ] = [ [ "string" ] ]
const returnTypes : string [ ] [ ] = [ [ "Reverse" ] ]
const params : any [ ] = [ ip ]
replace http basic auth for web interfaces with session cookie & csrf-based auth
the http basic auth we had was very simple to reason about, and to implement.
but it has a major downside:
there is no way to logout, browsers keep sending credentials. ideally, browsers
themselves would show a button to stop sending credentials.
a related downside: the http auth mechanism doesn't indicate for which server
paths the credentials are.
another downside: the original password is sent to the server with each
request. though sending original passwords to web servers seems to be
considered normal.
our new approach uses session cookies, along with csrf values when we can. the
sessions are server-side managed, automatically extended on each use. this
makes it easy to invalidate sessions and keeps the frontend simpler (than with
long- vs short-term sessions and refreshing). the cookies are httponly,
samesite=strict, scoped to the path of the web interface. cookies are set
"secure" when set over https. the cookie is set by a successful call to Login.
a call to Logout invalidates a session. changing a password invalidates all
sessions for a user, but keeps the session with which the password was changed
alive. the csrf value is also random, and associated with the session cookie.
the csrf must be sent as header for api calls, or as parameter for direct form
posts (where we cannot set a custom header). rest-like calls made directly by
the browser, e.g. for images, don't have a csrf protection. the csrf value is
returned by the Login api call and stored in localstorage.
api calls without credentials return code "user:noAuth", and with bad
credentials return "user:badAuth". the api client recognizes this and triggers
a login. after a login, all auth-failed api calls are automatically retried.
only for "user:badAuth" is an error message displayed in the login form (e.g.
session expired).
in an ideal world, browsers would take care of most session management. a
server would indicate authentication is needed (like http basic auth), and the
browsers uses trusted ui to request credentials for the server & path. the
browser could use safer mechanism than sending original passwords to the
server, such as scram, along with a standard way to create sessions. for now,
web developers have to do authentication themselves: from showing the login
prompt, ensuring the right session/csrf cookies/localstorage/headers/etc are
sent with each request.
webauthn is a newer way to do authentication, perhaps we'll implement it in the
future. though hardware tokens aren't an attractive option for many users, and
it may be overkill as long as we still do old-fashioned authentication in smtp
& imap where passwords can be sent to the server.
for issue #58
2024-01-04 15:10:48 +03:00
return await _sherpaCall ( this . baseURL , this . authState , { . . . this . options } , paramTypes , returnTypes , fn , params ) as Reverse
2023-12-31 13:55:22 +03:00
}
// DNSBLStatus returns the IPs from which outgoing connections may be made and
// their current status in DNSBLs that are configured. The IPs are typically the
// configured listen IPs, or otherwise IPs on the machines network interfaces, with
// internal/private IPs removed.
//
// The returned value maps IPs to per DNSBL statuses, where "pass" means not listed and
// anything else is an error string, e.g. "fail: ..." or "temperror: ...".
2024-03-05 18:30:38 +03:00
async DNSBLStatus ( ) : Promise < [ { [ key : string ] : { [ key : string ] : string } } , Domain [ ] | null , Domain [ ] | null ] > {
2023-12-31 13:55:22 +03:00
const fn : string = "DNSBLStatus"
const paramTypes : string [ ] [ ] = [ ]
2024-03-05 18:30:38 +03:00
const returnTypes : string [ ] [ ] = [ [ "{}" , "{}" , "string" ] , [ "[]" , "Domain" ] , [ "[]" , "Domain" ] ]
2023-12-31 13:55:22 +03:00
const params : any [ ] = [ ]
2024-03-05 18:30:38 +03:00
return await _sherpaCall ( this . baseURL , this . authState , { . . . this . options } , paramTypes , returnTypes , fn , params ) as [ { [ key : string ] : { [ key : string ] : string } } , Domain [ ] | null , Domain [ ] | null ]
}
async MonitorDNSBLsSave ( text : string ) : Promise < void > {
const fn : string = "MonitorDNSBLsSave"
const paramTypes : string [ ] [ ] = [ [ "string" ] ]
const returnTypes : string [ ] [ ] = [ ]
const params : any [ ] = [ text ]
return await _sherpaCall ( this . baseURL , this . authState , { . . . this . options } , paramTypes , returnTypes , fn , params ) as void
2023-12-31 13:55:22 +03:00
}
// DomainRecords returns lines describing DNS records that should exist for the
// configured domain.
async DomainRecords ( domain : string ) : Promise < string [ ] | null > {
const fn : string = "DomainRecords"
const paramTypes : string [ ] [ ] = [ [ "string" ] ]
const returnTypes : string [ ] [ ] = [ [ "[]" , "string" ] ]
const params : any [ ] = [ domain ]
replace http basic auth for web interfaces with session cookie & csrf-based auth
the http basic auth we had was very simple to reason about, and to implement.
but it has a major downside:
there is no way to logout, browsers keep sending credentials. ideally, browsers
themselves would show a button to stop sending credentials.
a related downside: the http auth mechanism doesn't indicate for which server
paths the credentials are.
another downside: the original password is sent to the server with each
request. though sending original passwords to web servers seems to be
considered normal.
our new approach uses session cookies, along with csrf values when we can. the
sessions are server-side managed, automatically extended on each use. this
makes it easy to invalidate sessions and keeps the frontend simpler (than with
long- vs short-term sessions and refreshing). the cookies are httponly,
samesite=strict, scoped to the path of the web interface. cookies are set
"secure" when set over https. the cookie is set by a successful call to Login.
a call to Logout invalidates a session. changing a password invalidates all
sessions for a user, but keeps the session with which the password was changed
alive. the csrf value is also random, and associated with the session cookie.
the csrf must be sent as header for api calls, or as parameter for direct form
posts (where we cannot set a custom header). rest-like calls made directly by
the browser, e.g. for images, don't have a csrf protection. the csrf value is
returned by the Login api call and stored in localstorage.
api calls without credentials return code "user:noAuth", and with bad
credentials return "user:badAuth". the api client recognizes this and triggers
a login. after a login, all auth-failed api calls are automatically retried.
only for "user:badAuth" is an error message displayed in the login form (e.g.
session expired).
in an ideal world, browsers would take care of most session management. a
server would indicate authentication is needed (like http basic auth), and the
browsers uses trusted ui to request credentials for the server & path. the
browser could use safer mechanism than sending original passwords to the
server, such as scram, along with a standard way to create sessions. for now,
web developers have to do authentication themselves: from showing the login
prompt, ensuring the right session/csrf cookies/localstorage/headers/etc are
sent with each request.
webauthn is a newer way to do authentication, perhaps we'll implement it in the
future. though hardware tokens aren't an attractive option for many users, and
it may be overkill as long as we still do old-fashioned authentication in smtp
& imap where passwords can be sent to the server.
for issue #58
2024-01-04 15:10:48 +03:00
return await _sherpaCall ( this . baseURL , this . authState , { . . . this . options } , paramTypes , returnTypes , fn , params ) as string [ ] | null
2023-12-31 13:55:22 +03:00
}
// DomainAdd adds a new domain and reloads the configuration.
async DomainAdd ( domain : string , accountName : string , localpart : string ) : Promise < void > {
const fn : string = "DomainAdd"
const paramTypes : string [ ] [ ] = [ [ "string" ] , [ "string" ] , [ "string" ] ]
const returnTypes : string [ ] [ ] = [ ]
const params : any [ ] = [ domain , accountName , localpart ]
replace http basic auth for web interfaces with session cookie & csrf-based auth
the http basic auth we had was very simple to reason about, and to implement.
but it has a major downside:
there is no way to logout, browsers keep sending credentials. ideally, browsers
themselves would show a button to stop sending credentials.
a related downside: the http auth mechanism doesn't indicate for which server
paths the credentials are.
another downside: the original password is sent to the server with each
request. though sending original passwords to web servers seems to be
considered normal.
our new approach uses session cookies, along with csrf values when we can. the
sessions are server-side managed, automatically extended on each use. this
makes it easy to invalidate sessions and keeps the frontend simpler (than with
long- vs short-term sessions and refreshing). the cookies are httponly,
samesite=strict, scoped to the path of the web interface. cookies are set
"secure" when set over https. the cookie is set by a successful call to Login.
a call to Logout invalidates a session. changing a password invalidates all
sessions for a user, but keeps the session with which the password was changed
alive. the csrf value is also random, and associated with the session cookie.
the csrf must be sent as header for api calls, or as parameter for direct form
posts (where we cannot set a custom header). rest-like calls made directly by
the browser, e.g. for images, don't have a csrf protection. the csrf value is
returned by the Login api call and stored in localstorage.
api calls without credentials return code "user:noAuth", and with bad
credentials return "user:badAuth". the api client recognizes this and triggers
a login. after a login, all auth-failed api calls are automatically retried.
only for "user:badAuth" is an error message displayed in the login form (e.g.
session expired).
in an ideal world, browsers would take care of most session management. a
server would indicate authentication is needed (like http basic auth), and the
browsers uses trusted ui to request credentials for the server & path. the
browser could use safer mechanism than sending original passwords to the
server, such as scram, along with a standard way to create sessions. for now,
web developers have to do authentication themselves: from showing the login
prompt, ensuring the right session/csrf cookies/localstorage/headers/etc are
sent with each request.
webauthn is a newer way to do authentication, perhaps we'll implement it in the
future. though hardware tokens aren't an attractive option for many users, and
it may be overkill as long as we still do old-fashioned authentication in smtp
& imap where passwords can be sent to the server.
for issue #58
2024-01-04 15:10:48 +03:00
return await _sherpaCall ( this . baseURL , this . authState , { . . . this . options } , paramTypes , returnTypes , fn , params ) as void
2023-12-31 13:55:22 +03:00
}
// DomainRemove removes an existing domain and reloads the configuration.
async DomainRemove ( domain : string ) : Promise < void > {
const fn : string = "DomainRemove"
const paramTypes : string [ ] [ ] = [ [ "string" ] ]
const returnTypes : string [ ] [ ] = [ ]
const params : any [ ] = [ domain ]
replace http basic auth for web interfaces with session cookie & csrf-based auth
the http basic auth we had was very simple to reason about, and to implement.
but it has a major downside:
there is no way to logout, browsers keep sending credentials. ideally, browsers
themselves would show a button to stop sending credentials.
a related downside: the http auth mechanism doesn't indicate for which server
paths the credentials are.
another downside: the original password is sent to the server with each
request. though sending original passwords to web servers seems to be
considered normal.
our new approach uses session cookies, along with csrf values when we can. the
sessions are server-side managed, automatically extended on each use. this
makes it easy to invalidate sessions and keeps the frontend simpler (than with
long- vs short-term sessions and refreshing). the cookies are httponly,
samesite=strict, scoped to the path of the web interface. cookies are set
"secure" when set over https. the cookie is set by a successful call to Login.
a call to Logout invalidates a session. changing a password invalidates all
sessions for a user, but keeps the session with which the password was changed
alive. the csrf value is also random, and associated with the session cookie.
the csrf must be sent as header for api calls, or as parameter for direct form
posts (where we cannot set a custom header). rest-like calls made directly by
the browser, e.g. for images, don't have a csrf protection. the csrf value is
returned by the Login api call and stored in localstorage.
api calls without credentials return code "user:noAuth", and with bad
credentials return "user:badAuth". the api client recognizes this and triggers
a login. after a login, all auth-failed api calls are automatically retried.
only for "user:badAuth" is an error message displayed in the login form (e.g.
session expired).
in an ideal world, browsers would take care of most session management. a
server would indicate authentication is needed (like http basic auth), and the
browsers uses trusted ui to request credentials for the server & path. the
browser could use safer mechanism than sending original passwords to the
server, such as scram, along with a standard way to create sessions. for now,
web developers have to do authentication themselves: from showing the login
prompt, ensuring the right session/csrf cookies/localstorage/headers/etc are
sent with each request.
webauthn is a newer way to do authentication, perhaps we'll implement it in the
future. though hardware tokens aren't an attractive option for many users, and
it may be overkill as long as we still do old-fashioned authentication in smtp
& imap where passwords can be sent to the server.
for issue #58
2024-01-04 15:10:48 +03:00
return await _sherpaCall ( this . baseURL , this . authState , { . . . this . options } , paramTypes , returnTypes , fn , params ) as void
2023-12-31 13:55:22 +03:00
}
// AccountAdd adds existing a new account, with an initial email address, and
// reloads the configuration.
async AccountAdd ( accountName : string , address : string ) : Promise < void > {
const fn : string = "AccountAdd"
const paramTypes : string [ ] [ ] = [ [ "string" ] , [ "string" ] ]
const returnTypes : string [ ] [ ] = [ ]
const params : any [ ] = [ accountName , address ]
replace http basic auth for web interfaces with session cookie & csrf-based auth
the http basic auth we had was very simple to reason about, and to implement.
but it has a major downside:
there is no way to logout, browsers keep sending credentials. ideally, browsers
themselves would show a button to stop sending credentials.
a related downside: the http auth mechanism doesn't indicate for which server
paths the credentials are.
another downside: the original password is sent to the server with each
request. though sending original passwords to web servers seems to be
considered normal.
our new approach uses session cookies, along with csrf values when we can. the
sessions are server-side managed, automatically extended on each use. this
makes it easy to invalidate sessions and keeps the frontend simpler (than with
long- vs short-term sessions and refreshing). the cookies are httponly,
samesite=strict, scoped to the path of the web interface. cookies are set
"secure" when set over https. the cookie is set by a successful call to Login.
a call to Logout invalidates a session. changing a password invalidates all
sessions for a user, but keeps the session with which the password was changed
alive. the csrf value is also random, and associated with the session cookie.
the csrf must be sent as header for api calls, or as parameter for direct form
posts (where we cannot set a custom header). rest-like calls made directly by
the browser, e.g. for images, don't have a csrf protection. the csrf value is
returned by the Login api call and stored in localstorage.
api calls without credentials return code "user:noAuth", and with bad
credentials return "user:badAuth". the api client recognizes this and triggers
a login. after a login, all auth-failed api calls are automatically retried.
only for "user:badAuth" is an error message displayed in the login form (e.g.
session expired).
in an ideal world, browsers would take care of most session management. a
server would indicate authentication is needed (like http basic auth), and the
browsers uses trusted ui to request credentials for the server & path. the
browser could use safer mechanism than sending original passwords to the
server, such as scram, along with a standard way to create sessions. for now,
web developers have to do authentication themselves: from showing the login
prompt, ensuring the right session/csrf cookies/localstorage/headers/etc are
sent with each request.
webauthn is a newer way to do authentication, perhaps we'll implement it in the
future. though hardware tokens aren't an attractive option for many users, and
it may be overkill as long as we still do old-fashioned authentication in smtp
& imap where passwords can be sent to the server.
for issue #58
2024-01-04 15:10:48 +03:00
return await _sherpaCall ( this . baseURL , this . authState , { . . . this . options } , paramTypes , returnTypes , fn , params ) as void
2023-12-31 13:55:22 +03:00
}
// AccountRemove removes an existing account and reloads the configuration.
async AccountRemove ( accountName : string ) : Promise < void > {
const fn : string = "AccountRemove"
const paramTypes : string [ ] [ ] = [ [ "string" ] ]
const returnTypes : string [ ] [ ] = [ ]
const params : any [ ] = [ accountName ]
replace http basic auth for web interfaces with session cookie & csrf-based auth
the http basic auth we had was very simple to reason about, and to implement.
but it has a major downside:
there is no way to logout, browsers keep sending credentials. ideally, browsers
themselves would show a button to stop sending credentials.
a related downside: the http auth mechanism doesn't indicate for which server
paths the credentials are.
another downside: the original password is sent to the server with each
request. though sending original passwords to web servers seems to be
considered normal.
our new approach uses session cookies, along with csrf values when we can. the
sessions are server-side managed, automatically extended on each use. this
makes it easy to invalidate sessions and keeps the frontend simpler (than with
long- vs short-term sessions and refreshing). the cookies are httponly,
samesite=strict, scoped to the path of the web interface. cookies are set
"secure" when set over https. the cookie is set by a successful call to Login.
a call to Logout invalidates a session. changing a password invalidates all
sessions for a user, but keeps the session with which the password was changed
alive. the csrf value is also random, and associated with the session cookie.
the csrf must be sent as header for api calls, or as parameter for direct form
posts (where we cannot set a custom header). rest-like calls made directly by
the browser, e.g. for images, don't have a csrf protection. the csrf value is
returned by the Login api call and stored in localstorage.
api calls without credentials return code "user:noAuth", and with bad
credentials return "user:badAuth". the api client recognizes this and triggers
a login. after a login, all auth-failed api calls are automatically retried.
only for "user:badAuth" is an error message displayed in the login form (e.g.
session expired).
in an ideal world, browsers would take care of most session management. a
server would indicate authentication is needed (like http basic auth), and the
browsers uses trusted ui to request credentials for the server & path. the
browser could use safer mechanism than sending original passwords to the
server, such as scram, along with a standard way to create sessions. for now,
web developers have to do authentication themselves: from showing the login
prompt, ensuring the right session/csrf cookies/localstorage/headers/etc are
sent with each request.
webauthn is a newer way to do authentication, perhaps we'll implement it in the
future. though hardware tokens aren't an attractive option for many users, and
it may be overkill as long as we still do old-fashioned authentication in smtp
& imap where passwords can be sent to the server.
for issue #58
2024-01-04 15:10:48 +03:00
return await _sherpaCall ( this . baseURL , this . authState , { . . . this . options } , paramTypes , returnTypes , fn , params ) as void
2023-12-31 13:55:22 +03:00
}
// AddressAdd adds a new address to the account, which must already exist.
async AddressAdd ( address : string , accountName : string ) : Promise < void > {
const fn : string = "AddressAdd"
const paramTypes : string [ ] [ ] = [ [ "string" ] , [ "string" ] ]
const returnTypes : string [ ] [ ] = [ ]
const params : any [ ] = [ address , accountName ]
replace http basic auth for web interfaces with session cookie & csrf-based auth
the http basic auth we had was very simple to reason about, and to implement.
but it has a major downside:
there is no way to logout, browsers keep sending credentials. ideally, browsers
themselves would show a button to stop sending credentials.
a related downside: the http auth mechanism doesn't indicate for which server
paths the credentials are.
another downside: the original password is sent to the server with each
request. though sending original passwords to web servers seems to be
considered normal.
our new approach uses session cookies, along with csrf values when we can. the
sessions are server-side managed, automatically extended on each use. this
makes it easy to invalidate sessions and keeps the frontend simpler (than with
long- vs short-term sessions and refreshing). the cookies are httponly,
samesite=strict, scoped to the path of the web interface. cookies are set
"secure" when set over https. the cookie is set by a successful call to Login.
a call to Logout invalidates a session. changing a password invalidates all
sessions for a user, but keeps the session with which the password was changed
alive. the csrf value is also random, and associated with the session cookie.
the csrf must be sent as header for api calls, or as parameter for direct form
posts (where we cannot set a custom header). rest-like calls made directly by
the browser, e.g. for images, don't have a csrf protection. the csrf value is
returned by the Login api call and stored in localstorage.
api calls without credentials return code "user:noAuth", and with bad
credentials return "user:badAuth". the api client recognizes this and triggers
a login. after a login, all auth-failed api calls are automatically retried.
only for "user:badAuth" is an error message displayed in the login form (e.g.
session expired).
in an ideal world, browsers would take care of most session management. a
server would indicate authentication is needed (like http basic auth), and the
browsers uses trusted ui to request credentials for the server & path. the
browser could use safer mechanism than sending original passwords to the
server, such as scram, along with a standard way to create sessions. for now,
web developers have to do authentication themselves: from showing the login
prompt, ensuring the right session/csrf cookies/localstorage/headers/etc are
sent with each request.
webauthn is a newer way to do authentication, perhaps we'll implement it in the
future. though hardware tokens aren't an attractive option for many users, and
it may be overkill as long as we still do old-fashioned authentication in smtp
& imap where passwords can be sent to the server.
for issue #58
2024-01-04 15:10:48 +03:00
return await _sherpaCall ( this . baseURL , this . authState , { . . . this . options } , paramTypes , returnTypes , fn , params ) as void
2023-12-31 13:55:22 +03:00
}
// AddressRemove removes an existing address.
async AddressRemove ( address : string ) : Promise < void > {
const fn : string = "AddressRemove"
const paramTypes : string [ ] [ ] = [ [ "string" ] ]
const returnTypes : string [ ] [ ] = [ ]
const params : any [ ] = [ address ]
replace http basic auth for web interfaces with session cookie & csrf-based auth
the http basic auth we had was very simple to reason about, and to implement.
but it has a major downside:
there is no way to logout, browsers keep sending credentials. ideally, browsers
themselves would show a button to stop sending credentials.
a related downside: the http auth mechanism doesn't indicate for which server
paths the credentials are.
another downside: the original password is sent to the server with each
request. though sending original passwords to web servers seems to be
considered normal.
our new approach uses session cookies, along with csrf values when we can. the
sessions are server-side managed, automatically extended on each use. this
makes it easy to invalidate sessions and keeps the frontend simpler (than with
long- vs short-term sessions and refreshing). the cookies are httponly,
samesite=strict, scoped to the path of the web interface. cookies are set
"secure" when set over https. the cookie is set by a successful call to Login.
a call to Logout invalidates a session. changing a password invalidates all
sessions for a user, but keeps the session with which the password was changed
alive. the csrf value is also random, and associated with the session cookie.
the csrf must be sent as header for api calls, or as parameter for direct form
posts (where we cannot set a custom header). rest-like calls made directly by
the browser, e.g. for images, don't have a csrf protection. the csrf value is
returned by the Login api call and stored in localstorage.
api calls without credentials return code "user:noAuth", and with bad
credentials return "user:badAuth". the api client recognizes this and triggers
a login. after a login, all auth-failed api calls are automatically retried.
only for "user:badAuth" is an error message displayed in the login form (e.g.
session expired).
in an ideal world, browsers would take care of most session management. a
server would indicate authentication is needed (like http basic auth), and the
browsers uses trusted ui to request credentials for the server & path. the
browser could use safer mechanism than sending original passwords to the
server, such as scram, along with a standard way to create sessions. for now,
web developers have to do authentication themselves: from showing the login
prompt, ensuring the right session/csrf cookies/localstorage/headers/etc are
sent with each request.
webauthn is a newer way to do authentication, perhaps we'll implement it in the
future. though hardware tokens aren't an attractive option for many users, and
it may be overkill as long as we still do old-fashioned authentication in smtp
& imap where passwords can be sent to the server.
for issue #58
2024-01-04 15:10:48 +03:00
return await _sherpaCall ( this . baseURL , this . authState , { . . . this . options } , paramTypes , returnTypes , fn , params ) as void
2023-12-31 13:55:22 +03:00
}
// SetPassword saves a new password for an account, invalidating the previous password.
// Sessions are not interrupted, and will keep working. New login attempts must use the new password.
// Password must be at least 8 characters.
async SetPassword ( accountName : string , password : string ) : Promise < void > {
const fn : string = "SetPassword"
const paramTypes : string [ ] [ ] = [ [ "string" ] , [ "string" ] ]
const returnTypes : string [ ] [ ] = [ ]
const params : any [ ] = [ accountName , password ]
replace http basic auth for web interfaces with session cookie & csrf-based auth
the http basic auth we had was very simple to reason about, and to implement.
but it has a major downside:
there is no way to logout, browsers keep sending credentials. ideally, browsers
themselves would show a button to stop sending credentials.
a related downside: the http auth mechanism doesn't indicate for which server
paths the credentials are.
another downside: the original password is sent to the server with each
request. though sending original passwords to web servers seems to be
considered normal.
our new approach uses session cookies, along with csrf values when we can. the
sessions are server-side managed, automatically extended on each use. this
makes it easy to invalidate sessions and keeps the frontend simpler (than with
long- vs short-term sessions and refreshing). the cookies are httponly,
samesite=strict, scoped to the path of the web interface. cookies are set
"secure" when set over https. the cookie is set by a successful call to Login.
a call to Logout invalidates a session. changing a password invalidates all
sessions for a user, but keeps the session with which the password was changed
alive. the csrf value is also random, and associated with the session cookie.
the csrf must be sent as header for api calls, or as parameter for direct form
posts (where we cannot set a custom header). rest-like calls made directly by
the browser, e.g. for images, don't have a csrf protection. the csrf value is
returned by the Login api call and stored in localstorage.
api calls without credentials return code "user:noAuth", and with bad
credentials return "user:badAuth". the api client recognizes this and triggers
a login. after a login, all auth-failed api calls are automatically retried.
only for "user:badAuth" is an error message displayed in the login form (e.g.
session expired).
in an ideal world, browsers would take care of most session management. a
server would indicate authentication is needed (like http basic auth), and the
browsers uses trusted ui to request credentials for the server & path. the
browser could use safer mechanism than sending original passwords to the
server, such as scram, along with a standard way to create sessions. for now,
web developers have to do authentication themselves: from showing the login
prompt, ensuring the right session/csrf cookies/localstorage/headers/etc are
sent with each request.
webauthn is a newer way to do authentication, perhaps we'll implement it in the
future. though hardware tokens aren't an attractive option for many users, and
it may be overkill as long as we still do old-fashioned authentication in smtp
& imap where passwords can be sent to the server.
for issue #58
2024-01-04 15:10:48 +03:00
return await _sherpaCall ( this . baseURL , this . authState , { . . . this . options } , paramTypes , returnTypes , fn , params ) as void
2023-12-31 13:55:22 +03:00
}
2024-03-16 22:24:07 +03:00
// AccountSettingsSave set new settings for an account that only an admin can set.
async AccountSettingsSave ( accountName : string , maxOutgoingMessagesPerDay : number , maxFirstTimeRecipientsPerDay : number , maxMsgSize : number , firstTimeSenderDelay : boolean ) : Promise < void > {
const fn : string = "AccountSettingsSave"
const paramTypes : string [ ] [ ] = [ [ "string" ] , [ "int32" ] , [ "int32" ] , [ "int64" ] , [ "bool" ] ]
2023-12-31 13:55:22 +03:00
const returnTypes : string [ ] [ ] = [ ]
2024-03-16 22:24:07 +03:00
const params : any [ ] = [ accountName , maxOutgoingMessagesPerDay , maxFirstTimeRecipientsPerDay , maxMsgSize , firstTimeSenderDelay ]
replace http basic auth for web interfaces with session cookie & csrf-based auth
the http basic auth we had was very simple to reason about, and to implement.
but it has a major downside:
there is no way to logout, browsers keep sending credentials. ideally, browsers
themselves would show a button to stop sending credentials.
a related downside: the http auth mechanism doesn't indicate for which server
paths the credentials are.
another downside: the original password is sent to the server with each
request. though sending original passwords to web servers seems to be
considered normal.
our new approach uses session cookies, along with csrf values when we can. the
sessions are server-side managed, automatically extended on each use. this
makes it easy to invalidate sessions and keeps the frontend simpler (than with
long- vs short-term sessions and refreshing). the cookies are httponly,
samesite=strict, scoped to the path of the web interface. cookies are set
"secure" when set over https. the cookie is set by a successful call to Login.
a call to Logout invalidates a session. changing a password invalidates all
sessions for a user, but keeps the session with which the password was changed
alive. the csrf value is also random, and associated with the session cookie.
the csrf must be sent as header for api calls, or as parameter for direct form
posts (where we cannot set a custom header). rest-like calls made directly by
the browser, e.g. for images, don't have a csrf protection. the csrf value is
returned by the Login api call and stored in localstorage.
api calls without credentials return code "user:noAuth", and with bad
credentials return "user:badAuth". the api client recognizes this and triggers
a login. after a login, all auth-failed api calls are automatically retried.
only for "user:badAuth" is an error message displayed in the login form (e.g.
session expired).
in an ideal world, browsers would take care of most session management. a
server would indicate authentication is needed (like http basic auth), and the
browsers uses trusted ui to request credentials for the server & path. the
browser could use safer mechanism than sending original passwords to the
server, such as scram, along with a standard way to create sessions. for now,
web developers have to do authentication themselves: from showing the login
prompt, ensuring the right session/csrf cookies/localstorage/headers/etc are
sent with each request.
webauthn is a newer way to do authentication, perhaps we'll implement it in the
future. though hardware tokens aren't an attractive option for many users, and
it may be overkill as long as we still do old-fashioned authentication in smtp
& imap where passwords can be sent to the server.
for issue #58
2024-01-04 15:10:48 +03:00
return await _sherpaCall ( this . baseURL , this . authState , { . . . this . options } , paramTypes , returnTypes , fn , params ) as void
2023-12-31 13:55:22 +03:00
}
// ClientConfigsDomain returns configurations for email clients, IMAP and
// Submission (SMTP) for the domain.
async ClientConfigsDomain ( domain : string ) : Promise < ClientConfigs > {
const fn : string = "ClientConfigsDomain"
const paramTypes : string [ ] [ ] = [ [ "string" ] ]
const returnTypes : string [ ] [ ] = [ [ "ClientConfigs" ] ]
const params : any [ ] = [ domain ]
replace http basic auth for web interfaces with session cookie & csrf-based auth
the http basic auth we had was very simple to reason about, and to implement.
but it has a major downside:
there is no way to logout, browsers keep sending credentials. ideally, browsers
themselves would show a button to stop sending credentials.
a related downside: the http auth mechanism doesn't indicate for which server
paths the credentials are.
another downside: the original password is sent to the server with each
request. though sending original passwords to web servers seems to be
considered normal.
our new approach uses session cookies, along with csrf values when we can. the
sessions are server-side managed, automatically extended on each use. this
makes it easy to invalidate sessions and keeps the frontend simpler (than with
long- vs short-term sessions and refreshing). the cookies are httponly,
samesite=strict, scoped to the path of the web interface. cookies are set
"secure" when set over https. the cookie is set by a successful call to Login.
a call to Logout invalidates a session. changing a password invalidates all
sessions for a user, but keeps the session with which the password was changed
alive. the csrf value is also random, and associated with the session cookie.
the csrf must be sent as header for api calls, or as parameter for direct form
posts (where we cannot set a custom header). rest-like calls made directly by
the browser, e.g. for images, don't have a csrf protection. the csrf value is
returned by the Login api call and stored in localstorage.
api calls without credentials return code "user:noAuth", and with bad
credentials return "user:badAuth". the api client recognizes this and triggers
a login. after a login, all auth-failed api calls are automatically retried.
only for "user:badAuth" is an error message displayed in the login form (e.g.
session expired).
in an ideal world, browsers would take care of most session management. a
server would indicate authentication is needed (like http basic auth), and the
browsers uses trusted ui to request credentials for the server & path. the
browser could use safer mechanism than sending original passwords to the
server, such as scram, along with a standard way to create sessions. for now,
web developers have to do authentication themselves: from showing the login
prompt, ensuring the right session/csrf cookies/localstorage/headers/etc are
sent with each request.
webauthn is a newer way to do authentication, perhaps we'll implement it in the
future. though hardware tokens aren't an attractive option for many users, and
it may be overkill as long as we still do old-fashioned authentication in smtp
& imap where passwords can be sent to the server.
for issue #58
2024-01-04 15:10:48 +03:00
return await _sherpaCall ( this . baseURL , this . authState , { . . . this . options } , paramTypes , returnTypes , fn , params ) as ClientConfigs
2023-12-31 13:55:22 +03:00
}
// QueueSize returns the number of messages currently in the outgoing queue.
async QueueSize ( ) : Promise < number > {
const fn : string = "QueueSize"
const paramTypes : string [ ] [ ] = [ ]
const returnTypes : string [ ] [ ] = [ [ "int32" ] ]
const params : any [ ] = [ ]
replace http basic auth for web interfaces with session cookie & csrf-based auth
the http basic auth we had was very simple to reason about, and to implement.
but it has a major downside:
there is no way to logout, browsers keep sending credentials. ideally, browsers
themselves would show a button to stop sending credentials.
a related downside: the http auth mechanism doesn't indicate for which server
paths the credentials are.
another downside: the original password is sent to the server with each
request. though sending original passwords to web servers seems to be
considered normal.
our new approach uses session cookies, along with csrf values when we can. the
sessions are server-side managed, automatically extended on each use. this
makes it easy to invalidate sessions and keeps the frontend simpler (than with
long- vs short-term sessions and refreshing). the cookies are httponly,
samesite=strict, scoped to the path of the web interface. cookies are set
"secure" when set over https. the cookie is set by a successful call to Login.
a call to Logout invalidates a session. changing a password invalidates all
sessions for a user, but keeps the session with which the password was changed
alive. the csrf value is also random, and associated with the session cookie.
the csrf must be sent as header for api calls, or as parameter for direct form
posts (where we cannot set a custom header). rest-like calls made directly by
the browser, e.g. for images, don't have a csrf protection. the csrf value is
returned by the Login api call and stored in localstorage.
api calls without credentials return code "user:noAuth", and with bad
credentials return "user:badAuth". the api client recognizes this and triggers
a login. after a login, all auth-failed api calls are automatically retried.
only for "user:badAuth" is an error message displayed in the login form (e.g.
session expired).
in an ideal world, browsers would take care of most session management. a
server would indicate authentication is needed (like http basic auth), and the
browsers uses trusted ui to request credentials for the server & path. the
browser could use safer mechanism than sending original passwords to the
server, such as scram, along with a standard way to create sessions. for now,
web developers have to do authentication themselves: from showing the login
prompt, ensuring the right session/csrf cookies/localstorage/headers/etc are
sent with each request.
webauthn is a newer way to do authentication, perhaps we'll implement it in the
future. though hardware tokens aren't an attractive option for many users, and
it may be overkill as long as we still do old-fashioned authentication in smtp
& imap where passwords can be sent to the server.
for issue #58
2024-01-04 15:10:48 +03:00
return await _sherpaCall ( this . baseURL , this . authState , { . . . this . options } , paramTypes , returnTypes , fn , params ) as number
2023-12-31 13:55:22 +03:00
}
2024-03-18 10:50:42 +03:00
// QueueHoldRuleList lists the hold rules.
async QueueHoldRuleList ( ) : Promise < HoldRule [ ] | null > {
const fn : string = "QueueHoldRuleList"
const paramTypes : string [ ] [ ] = [ ]
const returnTypes : string [ ] [ ] = [ [ "[]" , "HoldRule" ] ]
const params : any [ ] = [ ]
return await _sherpaCall ( this . baseURL , this . authState , { . . . this . options } , paramTypes , returnTypes , fn , params ) as HoldRule [ ] | null
2023-12-31 13:55:22 +03:00
}
2024-03-18 10:50:42 +03:00
// QueueHoldRuleAdd adds a hold rule. Newly submitted and existing messages
// matching the hold rule will be marked "on hold".
async QueueHoldRuleAdd ( hr : HoldRule ) : Promise < HoldRule > {
const fn : string = "QueueHoldRuleAdd"
const paramTypes : string [ ] [ ] = [ [ "HoldRule" ] ]
const returnTypes : string [ ] [ ] = [ [ "HoldRule" ] ]
const params : any [ ] = [ hr ]
return await _sherpaCall ( this . baseURL , this . authState , { . . . this . options } , paramTypes , returnTypes , fn , params ) as HoldRule
}
// QueueHoldRuleRemove removes a hold rule. The Hold field of messages in
// the queue are not changed.
async QueueHoldRuleRemove ( holdRuleID : number ) : Promise < void > {
const fn : string = "QueueHoldRuleRemove"
2023-12-31 13:55:22 +03:00
const paramTypes : string [ ] [ ] = [ [ "int64" ] ]
const returnTypes : string [ ] [ ] = [ ]
2024-03-18 10:50:42 +03:00
const params : any [ ] = [ holdRuleID ]
replace http basic auth for web interfaces with session cookie & csrf-based auth
the http basic auth we had was very simple to reason about, and to implement.
but it has a major downside:
there is no way to logout, browsers keep sending credentials. ideally, browsers
themselves would show a button to stop sending credentials.
a related downside: the http auth mechanism doesn't indicate for which server
paths the credentials are.
another downside: the original password is sent to the server with each
request. though sending original passwords to web servers seems to be
considered normal.
our new approach uses session cookies, along with csrf values when we can. the
sessions are server-side managed, automatically extended on each use. this
makes it easy to invalidate sessions and keeps the frontend simpler (than with
long- vs short-term sessions and refreshing). the cookies are httponly,
samesite=strict, scoped to the path of the web interface. cookies are set
"secure" when set over https. the cookie is set by a successful call to Login.
a call to Logout invalidates a session. changing a password invalidates all
sessions for a user, but keeps the session with which the password was changed
alive. the csrf value is also random, and associated with the session cookie.
the csrf must be sent as header for api calls, or as parameter for direct form
posts (where we cannot set a custom header). rest-like calls made directly by
the browser, e.g. for images, don't have a csrf protection. the csrf value is
returned by the Login api call and stored in localstorage.
api calls without credentials return code "user:noAuth", and with bad
credentials return "user:badAuth". the api client recognizes this and triggers
a login. after a login, all auth-failed api calls are automatically retried.
only for "user:badAuth" is an error message displayed in the login form (e.g.
session expired).
in an ideal world, browsers would take care of most session management. a
server would indicate authentication is needed (like http basic auth), and the
browsers uses trusted ui to request credentials for the server & path. the
browser could use safer mechanism than sending original passwords to the
server, such as scram, along with a standard way to create sessions. for now,
web developers have to do authentication themselves: from showing the login
prompt, ensuring the right session/csrf cookies/localstorage/headers/etc are
sent with each request.
webauthn is a newer way to do authentication, perhaps we'll implement it in the
future. though hardware tokens aren't an attractive option for many users, and
it may be overkill as long as we still do old-fashioned authentication in smtp
& imap where passwords can be sent to the server.
for issue #58
2024-01-04 15:10:48 +03:00
return await _sherpaCall ( this . baseURL , this . authState , { . . . this . options } , paramTypes , returnTypes , fn , params ) as void
2023-12-31 13:55:22 +03:00
}
2024-03-18 10:50:42 +03:00
// QueueList returns the messages currently in the outgoing queue.
add a webapi and webhooks for a simple http/json-based api
for applications to compose/send messages, receive delivery feedback, and
maintain suppression lists.
this is an alternative to applications using a library to compose messages,
submitting those messages using smtp, and monitoring a mailbox with imap for
DSNs, which can be processed into the equivalent of suppression lists. but you
need to know about all these standards/protocols and find libraries. by using
the webapi & webhooks, you just need a http & json library.
unfortunately, there is no standard for these kinds of api, so mox has made up
yet another one...
matching incoming DSNs about deliveries to original outgoing messages requires
keeping history of "retired" messages (delivered from the queue, either
successfully or failed). this can be enabled per account. history is also
useful for debugging deliveries. we now also keep history of each delivery
attempt, accessible while still in the queue, and kept when a message is
retired. the queue webadmin pages now also have pagination, to show potentially
large history.
a queue of webhook calls is now managed too. failures are retried similar to
message deliveries. webhooks can also be saved to the retired list after
completing. also configurable per account.
messages can be sent with a "unique smtp mail from" address. this can only be
used if the domain is configured with a localpart catchall separator such as
"+". when enabled, a queued message gets assigned a random "fromid", which is
added after the separator when sending. when DSNs are returned, they can be
related to previously sent messages based on this fromid. in the future, we can
implement matching on the "envid" used in the smtp dsn extension, or on the
"message-id" of the message. using a fromid can be triggered by authenticating
with a login email address that is configured as enabling fromid.
suppression lists are automatically managed per account. if a delivery attempt
results in certain smtp errors, the destination address is added to the
suppression list. future messages queued for that recipient will immediately
fail without a delivery attempt. suppression lists protect your mail server
reputation.
submitted messages can carry "extra" data through the queue and webhooks for
outgoing deliveries. through webapi as a json object, through smtp submission
as message headers of the form "x-mox-extra-<key>: value".
to make it easy to test webapi/webhooks locally, the "localserve" mode actually
puts messages in the queue. when it's time to deliver, it still won't do a full
delivery attempt, but just delivers to the sender account. unless the recipient
address has a special form, simulating a failure to deliver.
admins now have more control over the queue. "hold rules" can be added to mark
newly queued messages as "on hold", pausing delivery. rules can be about
certain sender or recipient domains/addresses, or apply to all messages pausing
the entire queue. also useful for (local) testing.
new config options have been introduced. they are editable through the admin
and/or account web interfaces.
the webapi http endpoints are enabled for newly generated configs with the
quickstart, and in localserve. existing configurations must explicitly enable
the webapi in mox.conf.
gopherwatch.org was created to dogfood this code. it initially used just the
compose/smtpclient/imapclient mox packages to send messages and process
delivery feedback. it will get a config option to use the mox webapi/webhooks
instead. the gopherwatch code to use webapi/webhook is smaller and simpler, and
developing that shaped development of the mox webapi/webhooks.
for issue #31 by cuu508
2024-04-15 22:49:02 +03:00
async QueueList ( filter : Filter , sort : Sort ) : Promise < Msg [ ] | null > {
2024-03-18 10:50:42 +03:00
const fn : string = "QueueList"
add a webapi and webhooks for a simple http/json-based api
for applications to compose/send messages, receive delivery feedback, and
maintain suppression lists.
this is an alternative to applications using a library to compose messages,
submitting those messages using smtp, and monitoring a mailbox with imap for
DSNs, which can be processed into the equivalent of suppression lists. but you
need to know about all these standards/protocols and find libraries. by using
the webapi & webhooks, you just need a http & json library.
unfortunately, there is no standard for these kinds of api, so mox has made up
yet another one...
matching incoming DSNs about deliveries to original outgoing messages requires
keeping history of "retired" messages (delivered from the queue, either
successfully or failed). this can be enabled per account. history is also
useful for debugging deliveries. we now also keep history of each delivery
attempt, accessible while still in the queue, and kept when a message is
retired. the queue webadmin pages now also have pagination, to show potentially
large history.
a queue of webhook calls is now managed too. failures are retried similar to
message deliveries. webhooks can also be saved to the retired list after
completing. also configurable per account.
messages can be sent with a "unique smtp mail from" address. this can only be
used if the domain is configured with a localpart catchall separator such as
"+". when enabled, a queued message gets assigned a random "fromid", which is
added after the separator when sending. when DSNs are returned, they can be
related to previously sent messages based on this fromid. in the future, we can
implement matching on the "envid" used in the smtp dsn extension, or on the
"message-id" of the message. using a fromid can be triggered by authenticating
with a login email address that is configured as enabling fromid.
suppression lists are automatically managed per account. if a delivery attempt
results in certain smtp errors, the destination address is added to the
suppression list. future messages queued for that recipient will immediately
fail without a delivery attempt. suppression lists protect your mail server
reputation.
submitted messages can carry "extra" data through the queue and webhooks for
outgoing deliveries. through webapi as a json object, through smtp submission
as message headers of the form "x-mox-extra-<key>: value".
to make it easy to test webapi/webhooks locally, the "localserve" mode actually
puts messages in the queue. when it's time to deliver, it still won't do a full
delivery attempt, but just delivers to the sender account. unless the recipient
address has a special form, simulating a failure to deliver.
admins now have more control over the queue. "hold rules" can be added to mark
newly queued messages as "on hold", pausing delivery. rules can be about
certain sender or recipient domains/addresses, or apply to all messages pausing
the entire queue. also useful for (local) testing.
new config options have been introduced. they are editable through the admin
and/or account web interfaces.
the webapi http endpoints are enabled for newly generated configs with the
quickstart, and in localserve. existing configurations must explicitly enable
the webapi in mox.conf.
gopherwatch.org was created to dogfood this code. it initially used just the
compose/smtpclient/imapclient mox packages to send messages and process
delivery feedback. it will get a config option to use the mox webapi/webhooks
instead. the gopherwatch code to use webapi/webhook is smaller and simpler, and
developing that shaped development of the mox webapi/webhooks.
for issue #31 by cuu508
2024-04-15 22:49:02 +03:00
const paramTypes : string [ ] [ ] = [ [ "Filter" ] , [ "Sort" ] ]
2024-03-18 10:50:42 +03:00
const returnTypes : string [ ] [ ] = [ [ "[]" , "Msg" ] ]
add a webapi and webhooks for a simple http/json-based api
for applications to compose/send messages, receive delivery feedback, and
maintain suppression lists.
this is an alternative to applications using a library to compose messages,
submitting those messages using smtp, and monitoring a mailbox with imap for
DSNs, which can be processed into the equivalent of suppression lists. but you
need to know about all these standards/protocols and find libraries. by using
the webapi & webhooks, you just need a http & json library.
unfortunately, there is no standard for these kinds of api, so mox has made up
yet another one...
matching incoming DSNs about deliveries to original outgoing messages requires
keeping history of "retired" messages (delivered from the queue, either
successfully or failed). this can be enabled per account. history is also
useful for debugging deliveries. we now also keep history of each delivery
attempt, accessible while still in the queue, and kept when a message is
retired. the queue webadmin pages now also have pagination, to show potentially
large history.
a queue of webhook calls is now managed too. failures are retried similar to
message deliveries. webhooks can also be saved to the retired list after
completing. also configurable per account.
messages can be sent with a "unique smtp mail from" address. this can only be
used if the domain is configured with a localpart catchall separator such as
"+". when enabled, a queued message gets assigned a random "fromid", which is
added after the separator when sending. when DSNs are returned, they can be
related to previously sent messages based on this fromid. in the future, we can
implement matching on the "envid" used in the smtp dsn extension, or on the
"message-id" of the message. using a fromid can be triggered by authenticating
with a login email address that is configured as enabling fromid.
suppression lists are automatically managed per account. if a delivery attempt
results in certain smtp errors, the destination address is added to the
suppression list. future messages queued for that recipient will immediately
fail without a delivery attempt. suppression lists protect your mail server
reputation.
submitted messages can carry "extra" data through the queue and webhooks for
outgoing deliveries. through webapi as a json object, through smtp submission
as message headers of the form "x-mox-extra-<key>: value".
to make it easy to test webapi/webhooks locally, the "localserve" mode actually
puts messages in the queue. when it's time to deliver, it still won't do a full
delivery attempt, but just delivers to the sender account. unless the recipient
address has a special form, simulating a failure to deliver.
admins now have more control over the queue. "hold rules" can be added to mark
newly queued messages as "on hold", pausing delivery. rules can be about
certain sender or recipient domains/addresses, or apply to all messages pausing
the entire queue. also useful for (local) testing.
new config options have been introduced. they are editable through the admin
and/or account web interfaces.
the webapi http endpoints are enabled for newly generated configs with the
quickstart, and in localserve. existing configurations must explicitly enable
the webapi in mox.conf.
gopherwatch.org was created to dogfood this code. it initially used just the
compose/smtpclient/imapclient mox packages to send messages and process
delivery feedback. it will get a config option to use the mox webapi/webhooks
instead. the gopherwatch code to use webapi/webhook is smaller and simpler, and
developing that shaped development of the mox webapi/webhooks.
for issue #31 by cuu508
2024-04-15 22:49:02 +03:00
const params : any [ ] = [ filter , sort ]
2024-03-18 10:50:42 +03:00
return await _sherpaCall ( this . baseURL , this . authState , { . . . this . options } , paramTypes , returnTypes , fn , params ) as Msg [ ] | null
}
// QueueNextAttemptSet sets a new time for next delivery attempt of matching
// messages from the queue.
async QueueNextAttemptSet ( filter : Filter , minutes : number ) : Promise < number > {
const fn : string = "QueueNextAttemptSet"
const paramTypes : string [ ] [ ] = [ [ "Filter" ] , [ "int32" ] ]
const returnTypes : string [ ] [ ] = [ [ "int32" ] ]
const params : any [ ] = [ filter , minutes ]
return await _sherpaCall ( this . baseURL , this . authState , { . . . this . options } , paramTypes , returnTypes , fn , params ) as number
}
// QueueNextAttemptAdd adds a duration to the time of next delivery attempt of
// matching messages from the queue.
async QueueNextAttemptAdd ( filter : Filter , minutes : number ) : Promise < number > {
const fn : string = "QueueNextAttemptAdd"
const paramTypes : string [ ] [ ] = [ [ "Filter" ] , [ "int32" ] ]
const returnTypes : string [ ] [ ] = [ [ "int32" ] ]
const params : any [ ] = [ filter , minutes ]
return await _sherpaCall ( this . baseURL , this . authState , { . . . this . options } , paramTypes , returnTypes , fn , params ) as number
}
// QueueHoldSet sets the Hold field of matching messages in the queue.
async QueueHoldSet ( filter : Filter , onHold : boolean ) : Promise < number > {
const fn : string = "QueueHoldSet"
const paramTypes : string [ ] [ ] = [ [ "Filter" ] , [ "bool" ] ]
const returnTypes : string [ ] [ ] = [ [ "int32" ] ]
const params : any [ ] = [ filter , onHold ]
return await _sherpaCall ( this . baseURL , this . authState , { . . . this . options } , paramTypes , returnTypes , fn , params ) as number
}
// QueueFail fails delivery for matching messages, causing DSNs to be sent.
async QueueFail ( filter : Filter ) : Promise < number > {
const fn : string = "QueueFail"
const paramTypes : string [ ] [ ] = [ [ "Filter" ] ]
const returnTypes : string [ ] [ ] = [ [ "int32" ] ]
const params : any [ ] = [ filter ]
return await _sherpaCall ( this . baseURL , this . authState , { . . . this . options } , paramTypes , returnTypes , fn , params ) as number
}
// QueueDrop removes matching messages from the queue.
async QueueDrop ( filter : Filter ) : Promise < number > {
const fn : string = "QueueDrop"
const paramTypes : string [ ] [ ] = [ [ "Filter" ] ]
const returnTypes : string [ ] [ ] = [ [ "int32" ] ]
const params : any [ ] = [ filter ]
return await _sherpaCall ( this . baseURL , this . authState , { . . . this . options } , paramTypes , returnTypes , fn , params ) as number
}
// QueueRequireTLSSet updates the requiretls field for matching messages in the
// queue, to be used for the next delivery.
async QueueRequireTLSSet ( filter : Filter , requireTLS : boolean | null ) : Promise < number > {
const fn : string = "QueueRequireTLSSet"
const paramTypes : string [ ] [ ] = [ [ "Filter" ] , [ "nullable" , "bool" ] ]
const returnTypes : string [ ] [ ] = [ [ "int32" ] ]
const params : any [ ] = [ filter , requireTLS ]
return await _sherpaCall ( this . baseURL , this . authState , { . . . this . options } , paramTypes , returnTypes , fn , params ) as number
}
// QueueTransportSet initiates delivery of a message from the queue and sets the transport
// to use for delivery.
async QueueTransportSet ( filter : Filter , transport : string ) : Promise < number > {
const fn : string = "QueueTransportSet"
const paramTypes : string [ ] [ ] = [ [ "Filter" ] , [ "string" ] ]
const returnTypes : string [ ] [ ] = [ [ "int32" ] ]
const params : any [ ] = [ filter , transport ]
return await _sherpaCall ( this . baseURL , this . authState , { . . . this . options } , paramTypes , returnTypes , fn , params ) as number
2023-12-31 13:55:22 +03:00
}
add a webapi and webhooks for a simple http/json-based api
for applications to compose/send messages, receive delivery feedback, and
maintain suppression lists.
this is an alternative to applications using a library to compose messages,
submitting those messages using smtp, and monitoring a mailbox with imap for
DSNs, which can be processed into the equivalent of suppression lists. but you
need to know about all these standards/protocols and find libraries. by using
the webapi & webhooks, you just need a http & json library.
unfortunately, there is no standard for these kinds of api, so mox has made up
yet another one...
matching incoming DSNs about deliveries to original outgoing messages requires
keeping history of "retired" messages (delivered from the queue, either
successfully or failed). this can be enabled per account. history is also
useful for debugging deliveries. we now also keep history of each delivery
attempt, accessible while still in the queue, and kept when a message is
retired. the queue webadmin pages now also have pagination, to show potentially
large history.
a queue of webhook calls is now managed too. failures are retried similar to
message deliveries. webhooks can also be saved to the retired list after
completing. also configurable per account.
messages can be sent with a "unique smtp mail from" address. this can only be
used if the domain is configured with a localpart catchall separator such as
"+". when enabled, a queued message gets assigned a random "fromid", which is
added after the separator when sending. when DSNs are returned, they can be
related to previously sent messages based on this fromid. in the future, we can
implement matching on the "envid" used in the smtp dsn extension, or on the
"message-id" of the message. using a fromid can be triggered by authenticating
with a login email address that is configured as enabling fromid.
suppression lists are automatically managed per account. if a delivery attempt
results in certain smtp errors, the destination address is added to the
suppression list. future messages queued for that recipient will immediately
fail without a delivery attempt. suppression lists protect your mail server
reputation.
submitted messages can carry "extra" data through the queue and webhooks for
outgoing deliveries. through webapi as a json object, through smtp submission
as message headers of the form "x-mox-extra-<key>: value".
to make it easy to test webapi/webhooks locally, the "localserve" mode actually
puts messages in the queue. when it's time to deliver, it still won't do a full
delivery attempt, but just delivers to the sender account. unless the recipient
address has a special form, simulating a failure to deliver.
admins now have more control over the queue. "hold rules" can be added to mark
newly queued messages as "on hold", pausing delivery. rules can be about
certain sender or recipient domains/addresses, or apply to all messages pausing
the entire queue. also useful for (local) testing.
new config options have been introduced. they are editable through the admin
and/or account web interfaces.
the webapi http endpoints are enabled for newly generated configs with the
quickstart, and in localserve. existing configurations must explicitly enable
the webapi in mox.conf.
gopherwatch.org was created to dogfood this code. it initially used just the
compose/smtpclient/imapclient mox packages to send messages and process
delivery feedback. it will get a config option to use the mox webapi/webhooks
instead. the gopherwatch code to use webapi/webhook is smaller and simpler, and
developing that shaped development of the mox webapi/webhooks.
for issue #31 by cuu508
2024-04-15 22:49:02 +03:00
// RetiredList returns messages retired from the queue (delivery could
// have succeeded or failed).
async RetiredList ( filter : RetiredFilter , sort : RetiredSort ) : Promise < MsgRetired [ ] | null > {
const fn : string = "RetiredList"
const paramTypes : string [ ] [ ] = [ [ "RetiredFilter" ] , [ "RetiredSort" ] ]
const returnTypes : string [ ] [ ] = [ [ "[]" , "MsgRetired" ] ]
const params : any [ ] = [ filter , sort ]
return await _sherpaCall ( this . baseURL , this . authState , { . . . this . options } , paramTypes , returnTypes , fn , params ) as MsgRetired [ ] | null
}
// HookQueueSize returns the number of webhooks still to be delivered.
async HookQueueSize ( ) : Promise < number > {
const fn : string = "HookQueueSize"
const paramTypes : string [ ] [ ] = [ ]
const returnTypes : string [ ] [ ] = [ [ "int32" ] ]
const params : any [ ] = [ ]
return await _sherpaCall ( this . baseURL , this . authState , { . . . this . options } , paramTypes , returnTypes , fn , params ) as number
}
// HookList lists webhooks still to be delivered.
async HookList ( filter : HookFilter , sort : HookSort ) : Promise < Hook [ ] | null > {
const fn : string = "HookList"
const paramTypes : string [ ] [ ] = [ [ "HookFilter" ] , [ "HookSort" ] ]
const returnTypes : string [ ] [ ] = [ [ "[]" , "Hook" ] ]
const params : any [ ] = [ filter , sort ]
return await _sherpaCall ( this . baseURL , this . authState , { . . . this . options } , paramTypes , returnTypes , fn , params ) as Hook [ ] | null
}
// HookNextAttemptSet sets a new time for next delivery attempt of matching
// hooks from the queue.
async HookNextAttemptSet ( filter : HookFilter , minutes : number ) : Promise < number > {
const fn : string = "HookNextAttemptSet"
const paramTypes : string [ ] [ ] = [ [ "HookFilter" ] , [ "int32" ] ]
const returnTypes : string [ ] [ ] = [ [ "int32" ] ]
const params : any [ ] = [ filter , minutes ]
return await _sherpaCall ( this . baseURL , this . authState , { . . . this . options } , paramTypes , returnTypes , fn , params ) as number
}
// HookNextAttemptAdd adds a duration to the time of next delivery attempt of
// matching hooks from the queue.
async HookNextAttemptAdd ( filter : HookFilter , minutes : number ) : Promise < number > {
const fn : string = "HookNextAttemptAdd"
const paramTypes : string [ ] [ ] = [ [ "HookFilter" ] , [ "int32" ] ]
const returnTypes : string [ ] [ ] = [ [ "int32" ] ]
const params : any [ ] = [ filter , minutes ]
return await _sherpaCall ( this . baseURL , this . authState , { . . . this . options } , paramTypes , returnTypes , fn , params ) as number
}
// HookRetiredList lists retired webhooks.
async HookRetiredList ( filter : HookRetiredFilter , sort : HookRetiredSort ) : Promise < HookRetired [ ] | null > {
const fn : string = "HookRetiredList"
const paramTypes : string [ ] [ ] = [ [ "HookRetiredFilter" ] , [ "HookRetiredSort" ] ]
const returnTypes : string [ ] [ ] = [ [ "[]" , "HookRetired" ] ]
const params : any [ ] = [ filter , sort ]
return await _sherpaCall ( this . baseURL , this . authState , { . . . this . options } , paramTypes , returnTypes , fn , params ) as HookRetired [ ] | null
}
// HookCancel prevents further delivery attempts of matching webhooks.
async HookCancel ( filter : HookFilter ) : Promise < number > {
const fn : string = "HookCancel"
const paramTypes : string [ ] [ ] = [ [ "HookFilter" ] ]
const returnTypes : string [ ] [ ] = [ [ "int32" ] ]
const params : any [ ] = [ filter ]
return await _sherpaCall ( this . baseURL , this . authState , { . . . this . options } , paramTypes , returnTypes , fn , params ) as number
}
2023-12-31 13:55:22 +03:00
// LogLevels returns the current log levels.
async LogLevels ( ) : Promise < { [ key : string ] : string } > {
const fn : string = "LogLevels"
const paramTypes : string [ ] [ ] = [ ]
const returnTypes : string [ ] [ ] = [ [ "{}" , "string" ] ]
const params : any [ ] = [ ]
replace http basic auth for web interfaces with session cookie & csrf-based auth
the http basic auth we had was very simple to reason about, and to implement.
but it has a major downside:
there is no way to logout, browsers keep sending credentials. ideally, browsers
themselves would show a button to stop sending credentials.
a related downside: the http auth mechanism doesn't indicate for which server
paths the credentials are.
another downside: the original password is sent to the server with each
request. though sending original passwords to web servers seems to be
considered normal.
our new approach uses session cookies, along with csrf values when we can. the
sessions are server-side managed, automatically extended on each use. this
makes it easy to invalidate sessions and keeps the frontend simpler (than with
long- vs short-term sessions and refreshing). the cookies are httponly,
samesite=strict, scoped to the path of the web interface. cookies are set
"secure" when set over https. the cookie is set by a successful call to Login.
a call to Logout invalidates a session. changing a password invalidates all
sessions for a user, but keeps the session with which the password was changed
alive. the csrf value is also random, and associated with the session cookie.
the csrf must be sent as header for api calls, or as parameter for direct form
posts (where we cannot set a custom header). rest-like calls made directly by
the browser, e.g. for images, don't have a csrf protection. the csrf value is
returned by the Login api call and stored in localstorage.
api calls without credentials return code "user:noAuth", and with bad
credentials return "user:badAuth". the api client recognizes this and triggers
a login. after a login, all auth-failed api calls are automatically retried.
only for "user:badAuth" is an error message displayed in the login form (e.g.
session expired).
in an ideal world, browsers would take care of most session management. a
server would indicate authentication is needed (like http basic auth), and the
browsers uses trusted ui to request credentials for the server & path. the
browser could use safer mechanism than sending original passwords to the
server, such as scram, along with a standard way to create sessions. for now,
web developers have to do authentication themselves: from showing the login
prompt, ensuring the right session/csrf cookies/localstorage/headers/etc are
sent with each request.
webauthn is a newer way to do authentication, perhaps we'll implement it in the
future. though hardware tokens aren't an attractive option for many users, and
it may be overkill as long as we still do old-fashioned authentication in smtp
& imap where passwords can be sent to the server.
for issue #58
2024-01-04 15:10:48 +03:00
return await _sherpaCall ( this . baseURL , this . authState , { . . . this . options } , paramTypes , returnTypes , fn , params ) as { [ key : string ] : string }
2023-12-31 13:55:22 +03:00
}
// LogLevelSet sets a log level for a package.
async LogLevelSet ( pkg : string , levelStr : string ) : Promise < void > {
const fn : string = "LogLevelSet"
const paramTypes : string [ ] [ ] = [ [ "string" ] , [ "string" ] ]
const returnTypes : string [ ] [ ] = [ ]
const params : any [ ] = [ pkg , levelStr ]
replace http basic auth for web interfaces with session cookie & csrf-based auth
the http basic auth we had was very simple to reason about, and to implement.
but it has a major downside:
there is no way to logout, browsers keep sending credentials. ideally, browsers
themselves would show a button to stop sending credentials.
a related downside: the http auth mechanism doesn't indicate for which server
paths the credentials are.
another downside: the original password is sent to the server with each
request. though sending original passwords to web servers seems to be
considered normal.
our new approach uses session cookies, along with csrf values when we can. the
sessions are server-side managed, automatically extended on each use. this
makes it easy to invalidate sessions and keeps the frontend simpler (than with
long- vs short-term sessions and refreshing). the cookies are httponly,
samesite=strict, scoped to the path of the web interface. cookies are set
"secure" when set over https. the cookie is set by a successful call to Login.
a call to Logout invalidates a session. changing a password invalidates all
sessions for a user, but keeps the session with which the password was changed
alive. the csrf value is also random, and associated with the session cookie.
the csrf must be sent as header for api calls, or as parameter for direct form
posts (where we cannot set a custom header). rest-like calls made directly by
the browser, e.g. for images, don't have a csrf protection. the csrf value is
returned by the Login api call and stored in localstorage.
api calls without credentials return code "user:noAuth", and with bad
credentials return "user:badAuth". the api client recognizes this and triggers
a login. after a login, all auth-failed api calls are automatically retried.
only for "user:badAuth" is an error message displayed in the login form (e.g.
session expired).
in an ideal world, browsers would take care of most session management. a
server would indicate authentication is needed (like http basic auth), and the
browsers uses trusted ui to request credentials for the server & path. the
browser could use safer mechanism than sending original passwords to the
server, such as scram, along with a standard way to create sessions. for now,
web developers have to do authentication themselves: from showing the login
prompt, ensuring the right session/csrf cookies/localstorage/headers/etc are
sent with each request.
webauthn is a newer way to do authentication, perhaps we'll implement it in the
future. though hardware tokens aren't an attractive option for many users, and
it may be overkill as long as we still do old-fashioned authentication in smtp
& imap where passwords can be sent to the server.
for issue #58
2024-01-04 15:10:48 +03:00
return await _sherpaCall ( this . baseURL , this . authState , { . . . this . options } , paramTypes , returnTypes , fn , params ) as void
2023-12-31 13:55:22 +03:00
}
// LogLevelRemove removes a log level for a package, which cannot be the empty string.
async LogLevelRemove ( pkg : string ) : Promise < void > {
const fn : string = "LogLevelRemove"
const paramTypes : string [ ] [ ] = [ [ "string" ] ]
const returnTypes : string [ ] [ ] = [ ]
const params : any [ ] = [ pkg ]
replace http basic auth for web interfaces with session cookie & csrf-based auth
the http basic auth we had was very simple to reason about, and to implement.
but it has a major downside:
there is no way to logout, browsers keep sending credentials. ideally, browsers
themselves would show a button to stop sending credentials.
a related downside: the http auth mechanism doesn't indicate for which server
paths the credentials are.
another downside: the original password is sent to the server with each
request. though sending original passwords to web servers seems to be
considered normal.
our new approach uses session cookies, along with csrf values when we can. the
sessions are server-side managed, automatically extended on each use. this
makes it easy to invalidate sessions and keeps the frontend simpler (than with
long- vs short-term sessions and refreshing). the cookies are httponly,
samesite=strict, scoped to the path of the web interface. cookies are set
"secure" when set over https. the cookie is set by a successful call to Login.
a call to Logout invalidates a session. changing a password invalidates all
sessions for a user, but keeps the session with which the password was changed
alive. the csrf value is also random, and associated with the session cookie.
the csrf must be sent as header for api calls, or as parameter for direct form
posts (where we cannot set a custom header). rest-like calls made directly by
the browser, e.g. for images, don't have a csrf protection. the csrf value is
returned by the Login api call and stored in localstorage.
api calls without credentials return code "user:noAuth", and with bad
credentials return "user:badAuth". the api client recognizes this and triggers
a login. after a login, all auth-failed api calls are automatically retried.
only for "user:badAuth" is an error message displayed in the login form (e.g.
session expired).
in an ideal world, browsers would take care of most session management. a
server would indicate authentication is needed (like http basic auth), and the
browsers uses trusted ui to request credentials for the server & path. the
browser could use safer mechanism than sending original passwords to the
server, such as scram, along with a standard way to create sessions. for now,
web developers have to do authentication themselves: from showing the login
prompt, ensuring the right session/csrf cookies/localstorage/headers/etc are
sent with each request.
webauthn is a newer way to do authentication, perhaps we'll implement it in the
future. though hardware tokens aren't an attractive option for many users, and
it may be overkill as long as we still do old-fashioned authentication in smtp
& imap where passwords can be sent to the server.
for issue #58
2024-01-04 15:10:48 +03:00
return await _sherpaCall ( this . baseURL , this . authState , { . . . this . options } , paramTypes , returnTypes , fn , params ) as void
2023-12-31 13:55:22 +03:00
}
// CheckUpdatesEnabled returns whether checking for updates is enabled.
async CheckUpdatesEnabled ( ) : Promise < boolean > {
const fn : string = "CheckUpdatesEnabled"
const paramTypes : string [ ] [ ] = [ ]
const returnTypes : string [ ] [ ] = [ [ "bool" ] ]
const params : any [ ] = [ ]
replace http basic auth for web interfaces with session cookie & csrf-based auth
the http basic auth we had was very simple to reason about, and to implement.
but it has a major downside:
there is no way to logout, browsers keep sending credentials. ideally, browsers
themselves would show a button to stop sending credentials.
a related downside: the http auth mechanism doesn't indicate for which server
paths the credentials are.
another downside: the original password is sent to the server with each
request. though sending original passwords to web servers seems to be
considered normal.
our new approach uses session cookies, along with csrf values when we can. the
sessions are server-side managed, automatically extended on each use. this
makes it easy to invalidate sessions and keeps the frontend simpler (than with
long- vs short-term sessions and refreshing). the cookies are httponly,
samesite=strict, scoped to the path of the web interface. cookies are set
"secure" when set over https. the cookie is set by a successful call to Login.
a call to Logout invalidates a session. changing a password invalidates all
sessions for a user, but keeps the session with which the password was changed
alive. the csrf value is also random, and associated with the session cookie.
the csrf must be sent as header for api calls, or as parameter for direct form
posts (where we cannot set a custom header). rest-like calls made directly by
the browser, e.g. for images, don't have a csrf protection. the csrf value is
returned by the Login api call and stored in localstorage.
api calls without credentials return code "user:noAuth", and with bad
credentials return "user:badAuth". the api client recognizes this and triggers
a login. after a login, all auth-failed api calls are automatically retried.
only for "user:badAuth" is an error message displayed in the login form (e.g.
session expired).
in an ideal world, browsers would take care of most session management. a
server would indicate authentication is needed (like http basic auth), and the
browsers uses trusted ui to request credentials for the server & path. the
browser could use safer mechanism than sending original passwords to the
server, such as scram, along with a standard way to create sessions. for now,
web developers have to do authentication themselves: from showing the login
prompt, ensuring the right session/csrf cookies/localstorage/headers/etc are
sent with each request.
webauthn is a newer way to do authentication, perhaps we'll implement it in the
future. though hardware tokens aren't an attractive option for many users, and
it may be overkill as long as we still do old-fashioned authentication in smtp
& imap where passwords can be sent to the server.
for issue #58
2024-01-04 15:10:48 +03:00
return await _sherpaCall ( this . baseURL , this . authState , { . . . this . options } , paramTypes , returnTypes , fn , params ) as boolean
2023-12-31 13:55:22 +03:00
}
// WebserverConfig returns the current webserver config
async WebserverConfig ( ) : Promise < WebserverConfig > {
const fn : string = "WebserverConfig"
const paramTypes : string [ ] [ ] = [ ]
const returnTypes : string [ ] [ ] = [ [ "WebserverConfig" ] ]
const params : any [ ] = [ ]
replace http basic auth for web interfaces with session cookie & csrf-based auth
the http basic auth we had was very simple to reason about, and to implement.
but it has a major downside:
there is no way to logout, browsers keep sending credentials. ideally, browsers
themselves would show a button to stop sending credentials.
a related downside: the http auth mechanism doesn't indicate for which server
paths the credentials are.
another downside: the original password is sent to the server with each
request. though sending original passwords to web servers seems to be
considered normal.
our new approach uses session cookies, along with csrf values when we can. the
sessions are server-side managed, automatically extended on each use. this
makes it easy to invalidate sessions and keeps the frontend simpler (than with
long- vs short-term sessions and refreshing). the cookies are httponly,
samesite=strict, scoped to the path of the web interface. cookies are set
"secure" when set over https. the cookie is set by a successful call to Login.
a call to Logout invalidates a session. changing a password invalidates all
sessions for a user, but keeps the session with which the password was changed
alive. the csrf value is also random, and associated with the session cookie.
the csrf must be sent as header for api calls, or as parameter for direct form
posts (where we cannot set a custom header). rest-like calls made directly by
the browser, e.g. for images, don't have a csrf protection. the csrf value is
returned by the Login api call and stored in localstorage.
api calls without credentials return code "user:noAuth", and with bad
credentials return "user:badAuth". the api client recognizes this and triggers
a login. after a login, all auth-failed api calls are automatically retried.
only for "user:badAuth" is an error message displayed in the login form (e.g.
session expired).
in an ideal world, browsers would take care of most session management. a
server would indicate authentication is needed (like http basic auth), and the
browsers uses trusted ui to request credentials for the server & path. the
browser could use safer mechanism than sending original passwords to the
server, such as scram, along with a standard way to create sessions. for now,
web developers have to do authentication themselves: from showing the login
prompt, ensuring the right session/csrf cookies/localstorage/headers/etc are
sent with each request.
webauthn is a newer way to do authentication, perhaps we'll implement it in the
future. though hardware tokens aren't an attractive option for many users, and
it may be overkill as long as we still do old-fashioned authentication in smtp
& imap where passwords can be sent to the server.
for issue #58
2024-01-04 15:10:48 +03:00
return await _sherpaCall ( this . baseURL , this . authState , { . . . this . options } , paramTypes , returnTypes , fn , params ) as WebserverConfig
2023-12-31 13:55:22 +03:00
}
// WebserverConfigSave saves a new webserver config. If oldConf is not equal to
// the current config, an error is returned.
async WebserverConfigSave ( oldConf : WebserverConfig , newConf : WebserverConfig ) : Promise < WebserverConfig > {
const fn : string = "WebserverConfigSave"
const paramTypes : string [ ] [ ] = [ [ "WebserverConfig" ] , [ "WebserverConfig" ] ]
const returnTypes : string [ ] [ ] = [ [ "WebserverConfig" ] ]
const params : any [ ] = [ oldConf , newConf ]
replace http basic auth for web interfaces with session cookie & csrf-based auth
the http basic auth we had was very simple to reason about, and to implement.
but it has a major downside:
there is no way to logout, browsers keep sending credentials. ideally, browsers
themselves would show a button to stop sending credentials.
a related downside: the http auth mechanism doesn't indicate for which server
paths the credentials are.
another downside: the original password is sent to the server with each
request. though sending original passwords to web servers seems to be
considered normal.
our new approach uses session cookies, along with csrf values when we can. the
sessions are server-side managed, automatically extended on each use. this
makes it easy to invalidate sessions and keeps the frontend simpler (than with
long- vs short-term sessions and refreshing). the cookies are httponly,
samesite=strict, scoped to the path of the web interface. cookies are set
"secure" when set over https. the cookie is set by a successful call to Login.
a call to Logout invalidates a session. changing a password invalidates all
sessions for a user, but keeps the session with which the password was changed
alive. the csrf value is also random, and associated with the session cookie.
the csrf must be sent as header for api calls, or as parameter for direct form
posts (where we cannot set a custom header). rest-like calls made directly by
the browser, e.g. for images, don't have a csrf protection. the csrf value is
returned by the Login api call and stored in localstorage.
api calls without credentials return code "user:noAuth", and with bad
credentials return "user:badAuth". the api client recognizes this and triggers
a login. after a login, all auth-failed api calls are automatically retried.
only for "user:badAuth" is an error message displayed in the login form (e.g.
session expired).
in an ideal world, browsers would take care of most session management. a
server would indicate authentication is needed (like http basic auth), and the
browsers uses trusted ui to request credentials for the server & path. the
browser could use safer mechanism than sending original passwords to the
server, such as scram, along with a standard way to create sessions. for now,
web developers have to do authentication themselves: from showing the login
prompt, ensuring the right session/csrf cookies/localstorage/headers/etc are
sent with each request.
webauthn is a newer way to do authentication, perhaps we'll implement it in the
future. though hardware tokens aren't an attractive option for many users, and
it may be overkill as long as we still do old-fashioned authentication in smtp
& imap where passwords can be sent to the server.
for issue #58
2024-01-04 15:10:48 +03:00
return await _sherpaCall ( this . baseURL , this . authState , { . . . this . options } , paramTypes , returnTypes , fn , params ) as WebserverConfig
2023-12-31 13:55:22 +03:00
}
// Transports returns the configured transports, for sending email.
async Transports ( ) : Promise < { [ key : string ] : Transport } > {
const fn : string = "Transports"
const paramTypes : string [ ] [ ] = [ ]
const returnTypes : string [ ] [ ] = [ [ "{}" , "Transport" ] ]
const params : any [ ] = [ ]
replace http basic auth for web interfaces with session cookie & csrf-based auth
the http basic auth we had was very simple to reason about, and to implement.
but it has a major downside:
there is no way to logout, browsers keep sending credentials. ideally, browsers
themselves would show a button to stop sending credentials.
a related downside: the http auth mechanism doesn't indicate for which server
paths the credentials are.
another downside: the original password is sent to the server with each
request. though sending original passwords to web servers seems to be
considered normal.
our new approach uses session cookies, along with csrf values when we can. the
sessions are server-side managed, automatically extended on each use. this
makes it easy to invalidate sessions and keeps the frontend simpler (than with
long- vs short-term sessions and refreshing). the cookies are httponly,
samesite=strict, scoped to the path of the web interface. cookies are set
"secure" when set over https. the cookie is set by a successful call to Login.
a call to Logout invalidates a session. changing a password invalidates all
sessions for a user, but keeps the session with which the password was changed
alive. the csrf value is also random, and associated with the session cookie.
the csrf must be sent as header for api calls, or as parameter for direct form
posts (where we cannot set a custom header). rest-like calls made directly by
the browser, e.g. for images, don't have a csrf protection. the csrf value is
returned by the Login api call and stored in localstorage.
api calls without credentials return code "user:noAuth", and with bad
credentials return "user:badAuth". the api client recognizes this and triggers
a login. after a login, all auth-failed api calls are automatically retried.
only for "user:badAuth" is an error message displayed in the login form (e.g.
session expired).
in an ideal world, browsers would take care of most session management. a
server would indicate authentication is needed (like http basic auth), and the
browsers uses trusted ui to request credentials for the server & path. the
browser could use safer mechanism than sending original passwords to the
server, such as scram, along with a standard way to create sessions. for now,
web developers have to do authentication themselves: from showing the login
prompt, ensuring the right session/csrf cookies/localstorage/headers/etc are
sent with each request.
webauthn is a newer way to do authentication, perhaps we'll implement it in the
future. though hardware tokens aren't an attractive option for many users, and
it may be overkill as long as we still do old-fashioned authentication in smtp
& imap where passwords can be sent to the server.
for issue #58
2024-01-04 15:10:48 +03:00
return await _sherpaCall ( this . baseURL , this . authState , { . . . this . options } , paramTypes , returnTypes , fn , params ) as { [ key : string ] : Transport }
2023-12-31 13:55:22 +03:00
}
// DMARCEvaluationStats returns a map of all domains with evaluations to a count of
// the evaluations and whether those evaluations will cause a report to be sent.
async DMARCEvaluationStats ( ) : Promise < { [ key : string ] : EvaluationStat } > {
const fn : string = "DMARCEvaluationStats"
const paramTypes : string [ ] [ ] = [ ]
const returnTypes : string [ ] [ ] = [ [ "{}" , "EvaluationStat" ] ]
const params : any [ ] = [ ]
replace http basic auth for web interfaces with session cookie & csrf-based auth
the http basic auth we had was very simple to reason about, and to implement.
but it has a major downside:
there is no way to logout, browsers keep sending credentials. ideally, browsers
themselves would show a button to stop sending credentials.
a related downside: the http auth mechanism doesn't indicate for which server
paths the credentials are.
another downside: the original password is sent to the server with each
request. though sending original passwords to web servers seems to be
considered normal.
our new approach uses session cookies, along with csrf values when we can. the
sessions are server-side managed, automatically extended on each use. this
makes it easy to invalidate sessions and keeps the frontend simpler (than with
long- vs short-term sessions and refreshing). the cookies are httponly,
samesite=strict, scoped to the path of the web interface. cookies are set
"secure" when set over https. the cookie is set by a successful call to Login.
a call to Logout invalidates a session. changing a password invalidates all
sessions for a user, but keeps the session with which the password was changed
alive. the csrf value is also random, and associated with the session cookie.
the csrf must be sent as header for api calls, or as parameter for direct form
posts (where we cannot set a custom header). rest-like calls made directly by
the browser, e.g. for images, don't have a csrf protection. the csrf value is
returned by the Login api call and stored in localstorage.
api calls without credentials return code "user:noAuth", and with bad
credentials return "user:badAuth". the api client recognizes this and triggers
a login. after a login, all auth-failed api calls are automatically retried.
only for "user:badAuth" is an error message displayed in the login form (e.g.
session expired).
in an ideal world, browsers would take care of most session management. a
server would indicate authentication is needed (like http basic auth), and the
browsers uses trusted ui to request credentials for the server & path. the
browser could use safer mechanism than sending original passwords to the
server, such as scram, along with a standard way to create sessions. for now,
web developers have to do authentication themselves: from showing the login
prompt, ensuring the right session/csrf cookies/localstorage/headers/etc are
sent with each request.
webauthn is a newer way to do authentication, perhaps we'll implement it in the
future. though hardware tokens aren't an attractive option for many users, and
it may be overkill as long as we still do old-fashioned authentication in smtp
& imap where passwords can be sent to the server.
for issue #58
2024-01-04 15:10:48 +03:00
return await _sherpaCall ( this . baseURL , this . authState , { . . . this . options } , paramTypes , returnTypes , fn , params ) as { [ key : string ] : EvaluationStat }
2023-12-31 13:55:22 +03:00
}
// DMARCEvaluationsDomain returns all evaluations for aggregate reports for the
// domain, sorted from oldest to most recent.
async DMARCEvaluationsDomain ( domain : string ) : Promise < [ Domain , Evaluation [ ] | null ] > {
const fn : string = "DMARCEvaluationsDomain"
const paramTypes : string [ ] [ ] = [ [ "string" ] ]
const returnTypes : string [ ] [ ] = [ [ "Domain" ] , [ "[]" , "Evaluation" ] ]
const params : any [ ] = [ domain ]
replace http basic auth for web interfaces with session cookie & csrf-based auth
the http basic auth we had was very simple to reason about, and to implement.
but it has a major downside:
there is no way to logout, browsers keep sending credentials. ideally, browsers
themselves would show a button to stop sending credentials.
a related downside: the http auth mechanism doesn't indicate for which server
paths the credentials are.
another downside: the original password is sent to the server with each
request. though sending original passwords to web servers seems to be
considered normal.
our new approach uses session cookies, along with csrf values when we can. the
sessions are server-side managed, automatically extended on each use. this
makes it easy to invalidate sessions and keeps the frontend simpler (than with
long- vs short-term sessions and refreshing). the cookies are httponly,
samesite=strict, scoped to the path of the web interface. cookies are set
"secure" when set over https. the cookie is set by a successful call to Login.
a call to Logout invalidates a session. changing a password invalidates all
sessions for a user, but keeps the session with which the password was changed
alive. the csrf value is also random, and associated with the session cookie.
the csrf must be sent as header for api calls, or as parameter for direct form
posts (where we cannot set a custom header). rest-like calls made directly by
the browser, e.g. for images, don't have a csrf protection. the csrf value is
returned by the Login api call and stored in localstorage.
api calls without credentials return code "user:noAuth", and with bad
credentials return "user:badAuth". the api client recognizes this and triggers
a login. after a login, all auth-failed api calls are automatically retried.
only for "user:badAuth" is an error message displayed in the login form (e.g.
session expired).
in an ideal world, browsers would take care of most session management. a
server would indicate authentication is needed (like http basic auth), and the
browsers uses trusted ui to request credentials for the server & path. the
browser could use safer mechanism than sending original passwords to the
server, such as scram, along with a standard way to create sessions. for now,
web developers have to do authentication themselves: from showing the login
prompt, ensuring the right session/csrf cookies/localstorage/headers/etc are
sent with each request.
webauthn is a newer way to do authentication, perhaps we'll implement it in the
future. though hardware tokens aren't an attractive option for many users, and
it may be overkill as long as we still do old-fashioned authentication in smtp
& imap where passwords can be sent to the server.
for issue #58
2024-01-04 15:10:48 +03:00
return await _sherpaCall ( this . baseURL , this . authState , { . . . this . options } , paramTypes , returnTypes , fn , params ) as [ Domain , Evaluation [ ] | null ]
2023-12-31 13:55:22 +03:00
}
// DMARCRemoveEvaluations removes evaluations for a domain.
async DMARCRemoveEvaluations ( domain : string ) : Promise < void > {
const fn : string = "DMARCRemoveEvaluations"
const paramTypes : string [ ] [ ] = [ [ "string" ] ]
const returnTypes : string [ ] [ ] = [ ]
const params : any [ ] = [ domain ]
replace http basic auth for web interfaces with session cookie & csrf-based auth
the http basic auth we had was very simple to reason about, and to implement.
but it has a major downside:
there is no way to logout, browsers keep sending credentials. ideally, browsers
themselves would show a button to stop sending credentials.
a related downside: the http auth mechanism doesn't indicate for which server
paths the credentials are.
another downside: the original password is sent to the server with each
request. though sending original passwords to web servers seems to be
considered normal.
our new approach uses session cookies, along with csrf values when we can. the
sessions are server-side managed, automatically extended on each use. this
makes it easy to invalidate sessions and keeps the frontend simpler (than with
long- vs short-term sessions and refreshing). the cookies are httponly,
samesite=strict, scoped to the path of the web interface. cookies are set
"secure" when set over https. the cookie is set by a successful call to Login.
a call to Logout invalidates a session. changing a password invalidates all
sessions for a user, but keeps the session with which the password was changed
alive. the csrf value is also random, and associated with the session cookie.
the csrf must be sent as header for api calls, or as parameter for direct form
posts (where we cannot set a custom header). rest-like calls made directly by
the browser, e.g. for images, don't have a csrf protection. the csrf value is
returned by the Login api call and stored in localstorage.
api calls without credentials return code "user:noAuth", and with bad
credentials return "user:badAuth". the api client recognizes this and triggers
a login. after a login, all auth-failed api calls are automatically retried.
only for "user:badAuth" is an error message displayed in the login form (e.g.
session expired).
in an ideal world, browsers would take care of most session management. a
server would indicate authentication is needed (like http basic auth), and the
browsers uses trusted ui to request credentials for the server & path. the
browser could use safer mechanism than sending original passwords to the
server, such as scram, along with a standard way to create sessions. for now,
web developers have to do authentication themselves: from showing the login
prompt, ensuring the right session/csrf cookies/localstorage/headers/etc are
sent with each request.
webauthn is a newer way to do authentication, perhaps we'll implement it in the
future. though hardware tokens aren't an attractive option for many users, and
it may be overkill as long as we still do old-fashioned authentication in smtp
& imap where passwords can be sent to the server.
for issue #58
2024-01-04 15:10:48 +03:00
return await _sherpaCall ( this . baseURL , this . authState , { . . . this . options } , paramTypes , returnTypes , fn , params ) as void
2023-12-31 13:55:22 +03:00
}
// DMARCSuppressAdd adds a reporting address to the suppress list. Outgoing
// reports will be suppressed for a period.
async DMARCSuppressAdd ( reportingAddress : string , until : Date , comment : string ) : Promise < void > {
const fn : string = "DMARCSuppressAdd"
const paramTypes : string [ ] [ ] = [ [ "string" ] , [ "timestamp" ] , [ "string" ] ]
const returnTypes : string [ ] [ ] = [ ]
const params : any [ ] = [ reportingAddress , until , comment ]
replace http basic auth for web interfaces with session cookie & csrf-based auth
the http basic auth we had was very simple to reason about, and to implement.
but it has a major downside:
there is no way to logout, browsers keep sending credentials. ideally, browsers
themselves would show a button to stop sending credentials.
a related downside: the http auth mechanism doesn't indicate for which server
paths the credentials are.
another downside: the original password is sent to the server with each
request. though sending original passwords to web servers seems to be
considered normal.
our new approach uses session cookies, along with csrf values when we can. the
sessions are server-side managed, automatically extended on each use. this
makes it easy to invalidate sessions and keeps the frontend simpler (than with
long- vs short-term sessions and refreshing). the cookies are httponly,
samesite=strict, scoped to the path of the web interface. cookies are set
"secure" when set over https. the cookie is set by a successful call to Login.
a call to Logout invalidates a session. changing a password invalidates all
sessions for a user, but keeps the session with which the password was changed
alive. the csrf value is also random, and associated with the session cookie.
the csrf must be sent as header for api calls, or as parameter for direct form
posts (where we cannot set a custom header). rest-like calls made directly by
the browser, e.g. for images, don't have a csrf protection. the csrf value is
returned by the Login api call and stored in localstorage.
api calls without credentials return code "user:noAuth", and with bad
credentials return "user:badAuth". the api client recognizes this and triggers
a login. after a login, all auth-failed api calls are automatically retried.
only for "user:badAuth" is an error message displayed in the login form (e.g.
session expired).
in an ideal world, browsers would take care of most session management. a
server would indicate authentication is needed (like http basic auth), and the
browsers uses trusted ui to request credentials for the server & path. the
browser could use safer mechanism than sending original passwords to the
server, such as scram, along with a standard way to create sessions. for now,
web developers have to do authentication themselves: from showing the login
prompt, ensuring the right session/csrf cookies/localstorage/headers/etc are
sent with each request.
webauthn is a newer way to do authentication, perhaps we'll implement it in the
future. though hardware tokens aren't an attractive option for many users, and
it may be overkill as long as we still do old-fashioned authentication in smtp
& imap where passwords can be sent to the server.
for issue #58
2024-01-04 15:10:48 +03:00
return await _sherpaCall ( this . baseURL , this . authState , { . . . this . options } , paramTypes , returnTypes , fn , params ) as void
2023-12-31 13:55:22 +03:00
}
// DMARCSuppressList returns all reporting addresses on the suppress list.
async DMARCSuppressList ( ) : Promise < SuppressAddress [ ] | null > {
const fn : string = "DMARCSuppressList"
const paramTypes : string [ ] [ ] = [ ]
const returnTypes : string [ ] [ ] = [ [ "[]" , "SuppressAddress" ] ]
const params : any [ ] = [ ]
replace http basic auth for web interfaces with session cookie & csrf-based auth
the http basic auth we had was very simple to reason about, and to implement.
but it has a major downside:
there is no way to logout, browsers keep sending credentials. ideally, browsers
themselves would show a button to stop sending credentials.
a related downside: the http auth mechanism doesn't indicate for which server
paths the credentials are.
another downside: the original password is sent to the server with each
request. though sending original passwords to web servers seems to be
considered normal.
our new approach uses session cookies, along with csrf values when we can. the
sessions are server-side managed, automatically extended on each use. this
makes it easy to invalidate sessions and keeps the frontend simpler (than with
long- vs short-term sessions and refreshing). the cookies are httponly,
samesite=strict, scoped to the path of the web interface. cookies are set
"secure" when set over https. the cookie is set by a successful call to Login.
a call to Logout invalidates a session. changing a password invalidates all
sessions for a user, but keeps the session with which the password was changed
alive. the csrf value is also random, and associated with the session cookie.
the csrf must be sent as header for api calls, or as parameter for direct form
posts (where we cannot set a custom header). rest-like calls made directly by
the browser, e.g. for images, don't have a csrf protection. the csrf value is
returned by the Login api call and stored in localstorage.
api calls without credentials return code "user:noAuth", and with bad
credentials return "user:badAuth". the api client recognizes this and triggers
a login. after a login, all auth-failed api calls are automatically retried.
only for "user:badAuth" is an error message displayed in the login form (e.g.
session expired).
in an ideal world, browsers would take care of most session management. a
server would indicate authentication is needed (like http basic auth), and the
browsers uses trusted ui to request credentials for the server & path. the
browser could use safer mechanism than sending original passwords to the
server, such as scram, along with a standard way to create sessions. for now,
web developers have to do authentication themselves: from showing the login
prompt, ensuring the right session/csrf cookies/localstorage/headers/etc are
sent with each request.
webauthn is a newer way to do authentication, perhaps we'll implement it in the
future. though hardware tokens aren't an attractive option for many users, and
it may be overkill as long as we still do old-fashioned authentication in smtp
& imap where passwords can be sent to the server.
for issue #58
2024-01-04 15:10:48 +03:00
return await _sherpaCall ( this . baseURL , this . authState , { . . . this . options } , paramTypes , returnTypes , fn , params ) as SuppressAddress [ ] | null
2023-12-31 13:55:22 +03:00
}
// DMARCSuppressRemove removes a reporting address record from the suppress list.
async DMARCSuppressRemove ( id : number ) : Promise < void > {
const fn : string = "DMARCSuppressRemove"
const paramTypes : string [ ] [ ] = [ [ "int64" ] ]
const returnTypes : string [ ] [ ] = [ ]
const params : any [ ] = [ id ]
replace http basic auth for web interfaces with session cookie & csrf-based auth
the http basic auth we had was very simple to reason about, and to implement.
but it has a major downside:
there is no way to logout, browsers keep sending credentials. ideally, browsers
themselves would show a button to stop sending credentials.
a related downside: the http auth mechanism doesn't indicate for which server
paths the credentials are.
another downside: the original password is sent to the server with each
request. though sending original passwords to web servers seems to be
considered normal.
our new approach uses session cookies, along with csrf values when we can. the
sessions are server-side managed, automatically extended on each use. this
makes it easy to invalidate sessions and keeps the frontend simpler (than with
long- vs short-term sessions and refreshing). the cookies are httponly,
samesite=strict, scoped to the path of the web interface. cookies are set
"secure" when set over https. the cookie is set by a successful call to Login.
a call to Logout invalidates a session. changing a password invalidates all
sessions for a user, but keeps the session with which the password was changed
alive. the csrf value is also random, and associated with the session cookie.
the csrf must be sent as header for api calls, or as parameter for direct form
posts (where we cannot set a custom header). rest-like calls made directly by
the browser, e.g. for images, don't have a csrf protection. the csrf value is
returned by the Login api call and stored in localstorage.
api calls without credentials return code "user:noAuth", and with bad
credentials return "user:badAuth". the api client recognizes this and triggers
a login. after a login, all auth-failed api calls are automatically retried.
only for "user:badAuth" is an error message displayed in the login form (e.g.
session expired).
in an ideal world, browsers would take care of most session management. a
server would indicate authentication is needed (like http basic auth), and the
browsers uses trusted ui to request credentials for the server & path. the
browser could use safer mechanism than sending original passwords to the
server, such as scram, along with a standard way to create sessions. for now,
web developers have to do authentication themselves: from showing the login
prompt, ensuring the right session/csrf cookies/localstorage/headers/etc are
sent with each request.
webauthn is a newer way to do authentication, perhaps we'll implement it in the
future. though hardware tokens aren't an attractive option for many users, and
it may be overkill as long as we still do old-fashioned authentication in smtp
& imap where passwords can be sent to the server.
for issue #58
2024-01-04 15:10:48 +03:00
return await _sherpaCall ( this . baseURL , this . authState , { . . . this . options } , paramTypes , returnTypes , fn , params ) as void
2023-12-31 13:55:22 +03:00
}
// DMARCSuppressExtend updates the until field of a suppressed reporting address record.
async DMARCSuppressExtend ( id : number , until : Date ) : Promise < void > {
const fn : string = "DMARCSuppressExtend"
const paramTypes : string [ ] [ ] = [ [ "int64" ] , [ "timestamp" ] ]
const returnTypes : string [ ] [ ] = [ ]
const params : any [ ] = [ id , until ]
replace http basic auth for web interfaces with session cookie & csrf-based auth
the http basic auth we had was very simple to reason about, and to implement.
but it has a major downside:
there is no way to logout, browsers keep sending credentials. ideally, browsers
themselves would show a button to stop sending credentials.
a related downside: the http auth mechanism doesn't indicate for which server
paths the credentials are.
another downside: the original password is sent to the server with each
request. though sending original passwords to web servers seems to be
considered normal.
our new approach uses session cookies, along with csrf values when we can. the
sessions are server-side managed, automatically extended on each use. this
makes it easy to invalidate sessions and keeps the frontend simpler (than with
long- vs short-term sessions and refreshing). the cookies are httponly,
samesite=strict, scoped to the path of the web interface. cookies are set
"secure" when set over https. the cookie is set by a successful call to Login.
a call to Logout invalidates a session. changing a password invalidates all
sessions for a user, but keeps the session with which the password was changed
alive. the csrf value is also random, and associated with the session cookie.
the csrf must be sent as header for api calls, or as parameter for direct form
posts (where we cannot set a custom header). rest-like calls made directly by
the browser, e.g. for images, don't have a csrf protection. the csrf value is
returned by the Login api call and stored in localstorage.
api calls without credentials return code "user:noAuth", and with bad
credentials return "user:badAuth". the api client recognizes this and triggers
a login. after a login, all auth-failed api calls are automatically retried.
only for "user:badAuth" is an error message displayed in the login form (e.g.
session expired).
in an ideal world, browsers would take care of most session management. a
server would indicate authentication is needed (like http basic auth), and the
browsers uses trusted ui to request credentials for the server & path. the
browser could use safer mechanism than sending original passwords to the
server, such as scram, along with a standard way to create sessions. for now,
web developers have to do authentication themselves: from showing the login
prompt, ensuring the right session/csrf cookies/localstorage/headers/etc are
sent with each request.
webauthn is a newer way to do authentication, perhaps we'll implement it in the
future. though hardware tokens aren't an attractive option for many users, and
it may be overkill as long as we still do old-fashioned authentication in smtp
& imap where passwords can be sent to the server.
for issue #58
2024-01-04 15:10:48 +03:00
return await _sherpaCall ( this . baseURL , this . authState , { . . . this . options } , paramTypes , returnTypes , fn , params ) as void
2023-12-31 13:55:22 +03:00
}
// TLSRPTResults returns all TLSRPT results in the database.
async TLSRPTResults ( ) : Promise < TLSResult [ ] | null > {
const fn : string = "TLSRPTResults"
const paramTypes : string [ ] [ ] = [ ]
const returnTypes : string [ ] [ ] = [ [ "[]" , "TLSResult" ] ]
const params : any [ ] = [ ]
replace http basic auth for web interfaces with session cookie & csrf-based auth
the http basic auth we had was very simple to reason about, and to implement.
but it has a major downside:
there is no way to logout, browsers keep sending credentials. ideally, browsers
themselves would show a button to stop sending credentials.
a related downside: the http auth mechanism doesn't indicate for which server
paths the credentials are.
another downside: the original password is sent to the server with each
request. though sending original passwords to web servers seems to be
considered normal.
our new approach uses session cookies, along with csrf values when we can. the
sessions are server-side managed, automatically extended on each use. this
makes it easy to invalidate sessions and keeps the frontend simpler (than with
long- vs short-term sessions and refreshing). the cookies are httponly,
samesite=strict, scoped to the path of the web interface. cookies are set
"secure" when set over https. the cookie is set by a successful call to Login.
a call to Logout invalidates a session. changing a password invalidates all
sessions for a user, but keeps the session with which the password was changed
alive. the csrf value is also random, and associated with the session cookie.
the csrf must be sent as header for api calls, or as parameter for direct form
posts (where we cannot set a custom header). rest-like calls made directly by
the browser, e.g. for images, don't have a csrf protection. the csrf value is
returned by the Login api call and stored in localstorage.
api calls without credentials return code "user:noAuth", and with bad
credentials return "user:badAuth". the api client recognizes this and triggers
a login. after a login, all auth-failed api calls are automatically retried.
only for "user:badAuth" is an error message displayed in the login form (e.g.
session expired).
in an ideal world, browsers would take care of most session management. a
server would indicate authentication is needed (like http basic auth), and the
browsers uses trusted ui to request credentials for the server & path. the
browser could use safer mechanism than sending original passwords to the
server, such as scram, along with a standard way to create sessions. for now,
web developers have to do authentication themselves: from showing the login
prompt, ensuring the right session/csrf cookies/localstorage/headers/etc are
sent with each request.
webauthn is a newer way to do authentication, perhaps we'll implement it in the
future. though hardware tokens aren't an attractive option for many users, and
it may be overkill as long as we still do old-fashioned authentication in smtp
& imap where passwords can be sent to the server.
for issue #58
2024-01-04 15:10:48 +03:00
return await _sherpaCall ( this . baseURL , this . authState , { . . . this . options } , paramTypes , returnTypes , fn , params ) as TLSResult [ ] | null
2023-12-31 13:55:22 +03:00
}
// TLSRPTResultsPolicyDomain returns the TLS results for a domain.
async TLSRPTResultsDomain ( isRcptDom : boolean , policyDomain : string ) : Promise < [ Domain , TLSResult [ ] | null ] > {
const fn : string = "TLSRPTResultsDomain"
const paramTypes : string [ ] [ ] = [ [ "bool" ] , [ "string" ] ]
const returnTypes : string [ ] [ ] = [ [ "Domain" ] , [ "[]" , "TLSResult" ] ]
const params : any [ ] = [ isRcptDom , policyDomain ]
replace http basic auth for web interfaces with session cookie & csrf-based auth
the http basic auth we had was very simple to reason about, and to implement.
but it has a major downside:
there is no way to logout, browsers keep sending credentials. ideally, browsers
themselves would show a button to stop sending credentials.
a related downside: the http auth mechanism doesn't indicate for which server
paths the credentials are.
another downside: the original password is sent to the server with each
request. though sending original passwords to web servers seems to be
considered normal.
our new approach uses session cookies, along with csrf values when we can. the
sessions are server-side managed, automatically extended on each use. this
makes it easy to invalidate sessions and keeps the frontend simpler (than with
long- vs short-term sessions and refreshing). the cookies are httponly,
samesite=strict, scoped to the path of the web interface. cookies are set
"secure" when set over https. the cookie is set by a successful call to Login.
a call to Logout invalidates a session. changing a password invalidates all
sessions for a user, but keeps the session with which the password was changed
alive. the csrf value is also random, and associated with the session cookie.
the csrf must be sent as header for api calls, or as parameter for direct form
posts (where we cannot set a custom header). rest-like calls made directly by
the browser, e.g. for images, don't have a csrf protection. the csrf value is
returned by the Login api call and stored in localstorage.
api calls without credentials return code "user:noAuth", and with bad
credentials return "user:badAuth". the api client recognizes this and triggers
a login. after a login, all auth-failed api calls are automatically retried.
only for "user:badAuth" is an error message displayed in the login form (e.g.
session expired).
in an ideal world, browsers would take care of most session management. a
server would indicate authentication is needed (like http basic auth), and the
browsers uses trusted ui to request credentials for the server & path. the
browser could use safer mechanism than sending original passwords to the
server, such as scram, along with a standard way to create sessions. for now,
web developers have to do authentication themselves: from showing the login
prompt, ensuring the right session/csrf cookies/localstorage/headers/etc are
sent with each request.
webauthn is a newer way to do authentication, perhaps we'll implement it in the
future. though hardware tokens aren't an attractive option for many users, and
it may be overkill as long as we still do old-fashioned authentication in smtp
& imap where passwords can be sent to the server.
for issue #58
2024-01-04 15:10:48 +03:00
return await _sherpaCall ( this . baseURL , this . authState , { . . . this . options } , paramTypes , returnTypes , fn , params ) as [ Domain , TLSResult [ ] | null ]
2023-12-31 13:55:22 +03:00
}
// LookupTLSRPTRecord looks up a TLSRPT record and returns the parsed form, original txt
// form from DNS, and error with the TLSRPT record as a string.
async LookupTLSRPTRecord ( domain : string ) : Promise < [ TLSRPTRecord | null , string , string ] > {
const fn : string = "LookupTLSRPTRecord"
const paramTypes : string [ ] [ ] = [ [ "string" ] ]
const returnTypes : string [ ] [ ] = [ [ "nullable" , "TLSRPTRecord" ] , [ "string" ] , [ "string" ] ]
const params : any [ ] = [ domain ]
replace http basic auth for web interfaces with session cookie & csrf-based auth
the http basic auth we had was very simple to reason about, and to implement.
but it has a major downside:
there is no way to logout, browsers keep sending credentials. ideally, browsers
themselves would show a button to stop sending credentials.
a related downside: the http auth mechanism doesn't indicate for which server
paths the credentials are.
another downside: the original password is sent to the server with each
request. though sending original passwords to web servers seems to be
considered normal.
our new approach uses session cookies, along with csrf values when we can. the
sessions are server-side managed, automatically extended on each use. this
makes it easy to invalidate sessions and keeps the frontend simpler (than with
long- vs short-term sessions and refreshing). the cookies are httponly,
samesite=strict, scoped to the path of the web interface. cookies are set
"secure" when set over https. the cookie is set by a successful call to Login.
a call to Logout invalidates a session. changing a password invalidates all
sessions for a user, but keeps the session with which the password was changed
alive. the csrf value is also random, and associated with the session cookie.
the csrf must be sent as header for api calls, or as parameter for direct form
posts (where we cannot set a custom header). rest-like calls made directly by
the browser, e.g. for images, don't have a csrf protection. the csrf value is
returned by the Login api call and stored in localstorage.
api calls without credentials return code "user:noAuth", and with bad
credentials return "user:badAuth". the api client recognizes this and triggers
a login. after a login, all auth-failed api calls are automatically retried.
only for "user:badAuth" is an error message displayed in the login form (e.g.
session expired).
in an ideal world, browsers would take care of most session management. a
server would indicate authentication is needed (like http basic auth), and the
browsers uses trusted ui to request credentials for the server & path. the
browser could use safer mechanism than sending original passwords to the
server, such as scram, along with a standard way to create sessions. for now,
web developers have to do authentication themselves: from showing the login
prompt, ensuring the right session/csrf cookies/localstorage/headers/etc are
sent with each request.
webauthn is a newer way to do authentication, perhaps we'll implement it in the
future. though hardware tokens aren't an attractive option for many users, and
it may be overkill as long as we still do old-fashioned authentication in smtp
& imap where passwords can be sent to the server.
for issue #58
2024-01-04 15:10:48 +03:00
return await _sherpaCall ( this . baseURL , this . authState , { . . . this . options } , paramTypes , returnTypes , fn , params ) as [ TLSRPTRecord | null , string , string ]
2023-12-31 13:55:22 +03:00
}
// TLSRPTRemoveResults removes the TLS results for a domain for the given day. If
// day is empty, all results are removed.
async TLSRPTRemoveResults ( isRcptDom : boolean , domain : string , day : string ) : Promise < void > {
const fn : string = "TLSRPTRemoveResults"
const paramTypes : string [ ] [ ] = [ [ "bool" ] , [ "string" ] , [ "string" ] ]
const returnTypes : string [ ] [ ] = [ ]
const params : any [ ] = [ isRcptDom , domain , day ]
replace http basic auth for web interfaces with session cookie & csrf-based auth
the http basic auth we had was very simple to reason about, and to implement.
but it has a major downside:
there is no way to logout, browsers keep sending credentials. ideally, browsers
themselves would show a button to stop sending credentials.
a related downside: the http auth mechanism doesn't indicate for which server
paths the credentials are.
another downside: the original password is sent to the server with each
request. though sending original passwords to web servers seems to be
considered normal.
our new approach uses session cookies, along with csrf values when we can. the
sessions are server-side managed, automatically extended on each use. this
makes it easy to invalidate sessions and keeps the frontend simpler (than with
long- vs short-term sessions and refreshing). the cookies are httponly,
samesite=strict, scoped to the path of the web interface. cookies are set
"secure" when set over https. the cookie is set by a successful call to Login.
a call to Logout invalidates a session. changing a password invalidates all
sessions for a user, but keeps the session with which the password was changed
alive. the csrf value is also random, and associated with the session cookie.
the csrf must be sent as header for api calls, or as parameter for direct form
posts (where we cannot set a custom header). rest-like calls made directly by
the browser, e.g. for images, don't have a csrf protection. the csrf value is
returned by the Login api call and stored in localstorage.
api calls without credentials return code "user:noAuth", and with bad
credentials return "user:badAuth". the api client recognizes this and triggers
a login. after a login, all auth-failed api calls are automatically retried.
only for "user:badAuth" is an error message displayed in the login form (e.g.
session expired).
in an ideal world, browsers would take care of most session management. a
server would indicate authentication is needed (like http basic auth), and the
browsers uses trusted ui to request credentials for the server & path. the
browser could use safer mechanism than sending original passwords to the
server, such as scram, along with a standard way to create sessions. for now,
web developers have to do authentication themselves: from showing the login
prompt, ensuring the right session/csrf cookies/localstorage/headers/etc are
sent with each request.
webauthn is a newer way to do authentication, perhaps we'll implement it in the
future. though hardware tokens aren't an attractive option for many users, and
it may be overkill as long as we still do old-fashioned authentication in smtp
& imap where passwords can be sent to the server.
for issue #58
2024-01-04 15:10:48 +03:00
return await _sherpaCall ( this . baseURL , this . authState , { . . . this . options } , paramTypes , returnTypes , fn , params ) as void
2023-12-31 13:55:22 +03:00
}
// TLSRPTSuppressAdd adds a reporting address to the suppress list. Outgoing
// reports will be suppressed for a period.
async TLSRPTSuppressAdd ( reportingAddress : string , until : Date , comment : string ) : Promise < void > {
const fn : string = "TLSRPTSuppressAdd"
const paramTypes : string [ ] [ ] = [ [ "string" ] , [ "timestamp" ] , [ "string" ] ]
const returnTypes : string [ ] [ ] = [ ]
const params : any [ ] = [ reportingAddress , until , comment ]
replace http basic auth for web interfaces with session cookie & csrf-based auth
the http basic auth we had was very simple to reason about, and to implement.
but it has a major downside:
there is no way to logout, browsers keep sending credentials. ideally, browsers
themselves would show a button to stop sending credentials.
a related downside: the http auth mechanism doesn't indicate for which server
paths the credentials are.
another downside: the original password is sent to the server with each
request. though sending original passwords to web servers seems to be
considered normal.
our new approach uses session cookies, along with csrf values when we can. the
sessions are server-side managed, automatically extended on each use. this
makes it easy to invalidate sessions and keeps the frontend simpler (than with
long- vs short-term sessions and refreshing). the cookies are httponly,
samesite=strict, scoped to the path of the web interface. cookies are set
"secure" when set over https. the cookie is set by a successful call to Login.
a call to Logout invalidates a session. changing a password invalidates all
sessions for a user, but keeps the session with which the password was changed
alive. the csrf value is also random, and associated with the session cookie.
the csrf must be sent as header for api calls, or as parameter for direct form
posts (where we cannot set a custom header). rest-like calls made directly by
the browser, e.g. for images, don't have a csrf protection. the csrf value is
returned by the Login api call and stored in localstorage.
api calls without credentials return code "user:noAuth", and with bad
credentials return "user:badAuth". the api client recognizes this and triggers
a login. after a login, all auth-failed api calls are automatically retried.
only for "user:badAuth" is an error message displayed in the login form (e.g.
session expired).
in an ideal world, browsers would take care of most session management. a
server would indicate authentication is needed (like http basic auth), and the
browsers uses trusted ui to request credentials for the server & path. the
browser could use safer mechanism than sending original passwords to the
server, such as scram, along with a standard way to create sessions. for now,
web developers have to do authentication themselves: from showing the login
prompt, ensuring the right session/csrf cookies/localstorage/headers/etc are
sent with each request.
webauthn is a newer way to do authentication, perhaps we'll implement it in the
future. though hardware tokens aren't an attractive option for many users, and
it may be overkill as long as we still do old-fashioned authentication in smtp
& imap where passwords can be sent to the server.
for issue #58
2024-01-04 15:10:48 +03:00
return await _sherpaCall ( this . baseURL , this . authState , { . . . this . options } , paramTypes , returnTypes , fn , params ) as void
2023-12-31 13:55:22 +03:00
}
// TLSRPTSuppressList returns all reporting addresses on the suppress list.
async TLSRPTSuppressList ( ) : Promise < TLSRPTSuppressAddress [ ] | null > {
const fn : string = "TLSRPTSuppressList"
const paramTypes : string [ ] [ ] = [ ]
const returnTypes : string [ ] [ ] = [ [ "[]" , "TLSRPTSuppressAddress" ] ]
const params : any [ ] = [ ]
replace http basic auth for web interfaces with session cookie & csrf-based auth
the http basic auth we had was very simple to reason about, and to implement.
but it has a major downside:
there is no way to logout, browsers keep sending credentials. ideally, browsers
themselves would show a button to stop sending credentials.
a related downside: the http auth mechanism doesn't indicate for which server
paths the credentials are.
another downside: the original password is sent to the server with each
request. though sending original passwords to web servers seems to be
considered normal.
our new approach uses session cookies, along with csrf values when we can. the
sessions are server-side managed, automatically extended on each use. this
makes it easy to invalidate sessions and keeps the frontend simpler (than with
long- vs short-term sessions and refreshing). the cookies are httponly,
samesite=strict, scoped to the path of the web interface. cookies are set
"secure" when set over https. the cookie is set by a successful call to Login.
a call to Logout invalidates a session. changing a password invalidates all
sessions for a user, but keeps the session with which the password was changed
alive. the csrf value is also random, and associated with the session cookie.
the csrf must be sent as header for api calls, or as parameter for direct form
posts (where we cannot set a custom header). rest-like calls made directly by
the browser, e.g. for images, don't have a csrf protection. the csrf value is
returned by the Login api call and stored in localstorage.
api calls without credentials return code "user:noAuth", and with bad
credentials return "user:badAuth". the api client recognizes this and triggers
a login. after a login, all auth-failed api calls are automatically retried.
only for "user:badAuth" is an error message displayed in the login form (e.g.
session expired).
in an ideal world, browsers would take care of most session management. a
server would indicate authentication is needed (like http basic auth), and the
browsers uses trusted ui to request credentials for the server & path. the
browser could use safer mechanism than sending original passwords to the
server, such as scram, along with a standard way to create sessions. for now,
web developers have to do authentication themselves: from showing the login
prompt, ensuring the right session/csrf cookies/localstorage/headers/etc are
sent with each request.
webauthn is a newer way to do authentication, perhaps we'll implement it in the
future. though hardware tokens aren't an attractive option for many users, and
it may be overkill as long as we still do old-fashioned authentication in smtp
& imap where passwords can be sent to the server.
for issue #58
2024-01-04 15:10:48 +03:00
return await _sherpaCall ( this . baseURL , this . authState , { . . . this . options } , paramTypes , returnTypes , fn , params ) as TLSRPTSuppressAddress [ ] | null
2023-12-31 13:55:22 +03:00
}
// TLSRPTSuppressRemove removes a reporting address record from the suppress list.
async TLSRPTSuppressRemove ( id : number ) : Promise < void > {
const fn : string = "TLSRPTSuppressRemove"
const paramTypes : string [ ] [ ] = [ [ "int64" ] ]
const returnTypes : string [ ] [ ] = [ ]
const params : any [ ] = [ id ]
replace http basic auth for web interfaces with session cookie & csrf-based auth
the http basic auth we had was very simple to reason about, and to implement.
but it has a major downside:
there is no way to logout, browsers keep sending credentials. ideally, browsers
themselves would show a button to stop sending credentials.
a related downside: the http auth mechanism doesn't indicate for which server
paths the credentials are.
another downside: the original password is sent to the server with each
request. though sending original passwords to web servers seems to be
considered normal.
our new approach uses session cookies, along with csrf values when we can. the
sessions are server-side managed, automatically extended on each use. this
makes it easy to invalidate sessions and keeps the frontend simpler (than with
long- vs short-term sessions and refreshing). the cookies are httponly,
samesite=strict, scoped to the path of the web interface. cookies are set
"secure" when set over https. the cookie is set by a successful call to Login.
a call to Logout invalidates a session. changing a password invalidates all
sessions for a user, but keeps the session with which the password was changed
alive. the csrf value is also random, and associated with the session cookie.
the csrf must be sent as header for api calls, or as parameter for direct form
posts (where we cannot set a custom header). rest-like calls made directly by
the browser, e.g. for images, don't have a csrf protection. the csrf value is
returned by the Login api call and stored in localstorage.
api calls without credentials return code "user:noAuth", and with bad
credentials return "user:badAuth". the api client recognizes this and triggers
a login. after a login, all auth-failed api calls are automatically retried.
only for "user:badAuth" is an error message displayed in the login form (e.g.
session expired).
in an ideal world, browsers would take care of most session management. a
server would indicate authentication is needed (like http basic auth), and the
browsers uses trusted ui to request credentials for the server & path. the
browser could use safer mechanism than sending original passwords to the
server, such as scram, along with a standard way to create sessions. for now,
web developers have to do authentication themselves: from showing the login
prompt, ensuring the right session/csrf cookies/localstorage/headers/etc are
sent with each request.
webauthn is a newer way to do authentication, perhaps we'll implement it in the
future. though hardware tokens aren't an attractive option for many users, and
it may be overkill as long as we still do old-fashioned authentication in smtp
& imap where passwords can be sent to the server.
for issue #58
2024-01-04 15:10:48 +03:00
return await _sherpaCall ( this . baseURL , this . authState , { . . . this . options } , paramTypes , returnTypes , fn , params ) as void
2023-12-31 13:55:22 +03:00
}
// TLSRPTSuppressExtend updates the until field of a suppressed reporting address record.
async TLSRPTSuppressExtend ( id : number , until : Date ) : Promise < void > {
const fn : string = "TLSRPTSuppressExtend"
const paramTypes : string [ ] [ ] = [ [ "int64" ] , [ "timestamp" ] ]
const returnTypes : string [ ] [ ] = [ ]
const params : any [ ] = [ id , until ]
replace http basic auth for web interfaces with session cookie & csrf-based auth
the http basic auth we had was very simple to reason about, and to implement.
but it has a major downside:
there is no way to logout, browsers keep sending credentials. ideally, browsers
themselves would show a button to stop sending credentials.
a related downside: the http auth mechanism doesn't indicate for which server
paths the credentials are.
another downside: the original password is sent to the server with each
request. though sending original passwords to web servers seems to be
considered normal.
our new approach uses session cookies, along with csrf values when we can. the
sessions are server-side managed, automatically extended on each use. this
makes it easy to invalidate sessions and keeps the frontend simpler (than with
long- vs short-term sessions and refreshing). the cookies are httponly,
samesite=strict, scoped to the path of the web interface. cookies are set
"secure" when set over https. the cookie is set by a successful call to Login.
a call to Logout invalidates a session. changing a password invalidates all
sessions for a user, but keeps the session with which the password was changed
alive. the csrf value is also random, and associated with the session cookie.
the csrf must be sent as header for api calls, or as parameter for direct form
posts (where we cannot set a custom header). rest-like calls made directly by
the browser, e.g. for images, don't have a csrf protection. the csrf value is
returned by the Login api call and stored in localstorage.
api calls without credentials return code "user:noAuth", and with bad
credentials return "user:badAuth". the api client recognizes this and triggers
a login. after a login, all auth-failed api calls are automatically retried.
only for "user:badAuth" is an error message displayed in the login form (e.g.
session expired).
in an ideal world, browsers would take care of most session management. a
server would indicate authentication is needed (like http basic auth), and the
browsers uses trusted ui to request credentials for the server & path. the
browser could use safer mechanism than sending original passwords to the
server, such as scram, along with a standard way to create sessions. for now,
web developers have to do authentication themselves: from showing the login
prompt, ensuring the right session/csrf cookies/localstorage/headers/etc are
sent with each request.
webauthn is a newer way to do authentication, perhaps we'll implement it in the
future. though hardware tokens aren't an attractive option for many users, and
it may be overkill as long as we still do old-fashioned authentication in smtp
& imap where passwords can be sent to the server.
for issue #58
2024-01-04 15:10:48 +03:00
return await _sherpaCall ( this . baseURL , this . authState , { . . . this . options } , paramTypes , returnTypes , fn , params ) as void
2023-12-31 13:55:22 +03:00
}
2024-03-05 12:50:56 +03:00
// LookupCid turns an ID from a Received header into a cid as used in logging.
async LookupCid ( recvID : string ) : Promise < string > {
const fn : string = "LookupCid"
const paramTypes : string [ ] [ ] = [ [ "string" ] ]
const returnTypes : string [ ] [ ] = [ [ "string" ] ]
const params : any [ ] = [ recvID ]
return await _sherpaCall ( this . baseURL , this . authState , { . . . this . options } , paramTypes , returnTypes , fn , params ) as string
}
2024-04-18 12:14:24 +03:00
// Config returns the dynamic config.
async Config ( ) : Promise < Dynamic > {
const fn : string = "Config"
const paramTypes : string [ ] [ ] = [ ]
const returnTypes : string [ ] [ ] = [ [ "Dynamic" ] ]
const params : any [ ] = [ ]
return await _sherpaCall ( this . baseURL , this . authState , { . . . this . options } , paramTypes , returnTypes , fn , params ) as Dynamic
}
// AccountRoutesSave saves routes for an account.
async AccountRoutesSave ( accountName : string , routes : Route [ ] | null ) : Promise < void > {
const fn : string = "AccountRoutesSave"
const paramTypes : string [ ] [ ] = [ [ "string" ] , [ "[]" , "Route" ] ]
const returnTypes : string [ ] [ ] = [ ]
const params : any [ ] = [ accountName , routes ]
return await _sherpaCall ( this . baseURL , this . authState , { . . . this . options } , paramTypes , returnTypes , fn , params ) as void
}
// DomainRoutesSave saves routes for a domain.
async DomainRoutesSave ( domainName : string , routes : Route [ ] | null ) : Promise < void > {
const fn : string = "DomainRoutesSave"
const paramTypes : string [ ] [ ] = [ [ "string" ] , [ "[]" , "Route" ] ]
const returnTypes : string [ ] [ ] = [ ]
const params : any [ ] = [ domainName , routes ]
return await _sherpaCall ( this . baseURL , this . authState , { . . . this . options } , paramTypes , returnTypes , fn , params ) as void
}
// RoutesSave saves global routes.
async RoutesSave ( routes : Route [ ] | null ) : Promise < void > {
const fn : string = "RoutesSave"
const paramTypes : string [ ] [ ] = [ [ "[]" , "Route" ] ]
const returnTypes : string [ ] [ ] = [ ]
const params : any [ ] = [ routes ]
return await _sherpaCall ( this . baseURL , this . authState , { . . . this . options } , paramTypes , returnTypes , fn , params ) as void
}
2024-04-19 11:23:53 +03:00
// DomainDescriptionSave saves the description for a domain.
async DomainDescriptionSave ( domainName : string , descr : string ) : Promise < void > {
const fn : string = "DomainDescriptionSave"
const paramTypes : string [ ] [ ] = [ [ "string" ] , [ "string" ] ]
const returnTypes : string [ ] [ ] = [ ]
const params : any [ ] = [ domainName , descr ]
return await _sherpaCall ( this . baseURL , this . authState , { . . . this . options } , paramTypes , returnTypes , fn , params ) as void
}
// DomainClientSettingsDomainSave saves the client settings domain for a domain.
async DomainClientSettingsDomainSave ( domainName : string , clientSettingsDomain : string ) : Promise < void > {
const fn : string = "DomainClientSettingsDomainSave"
const paramTypes : string [ ] [ ] = [ [ "string" ] , [ "string" ] ]
const returnTypes : string [ ] [ ] = [ ]
const params : any [ ] = [ domainName , clientSettingsDomain ]
return await _sherpaCall ( this . baseURL , this . authState , { . . . this . options } , paramTypes , returnTypes , fn , params ) as void
}
// DomainLocalpartConfigSave saves the localpart catchall and case-sensitive
// settings for a domain.
async DomainLocalpartConfigSave ( domainName : string , localpartCatchallSeparator : string , localpartCaseSensitive : boolean ) : Promise < void > {
const fn : string = "DomainLocalpartConfigSave"
const paramTypes : string [ ] [ ] = [ [ "string" ] , [ "string" ] , [ "bool" ] ]
const returnTypes : string [ ] [ ] = [ ]
const params : any [ ] = [ domainName , localpartCatchallSeparator , localpartCaseSensitive ]
return await _sherpaCall ( this . baseURL , this . authState , { . . . this . options } , paramTypes , returnTypes , fn , params ) as void
}
// DomainDMARCAddressSave saves the DMARC reporting address/processing
// configuration for a domain. If localpart is empty, processing reports is
// disabled.
async DomainDMARCAddressSave ( domainName : string , localpart : string , domain : string , account : string , mailbox : string ) : Promise < void > {
const fn : string = "DomainDMARCAddressSave"
const paramTypes : string [ ] [ ] = [ [ "string" ] , [ "string" ] , [ "string" ] , [ "string" ] , [ "string" ] ]
const returnTypes : string [ ] [ ] = [ ]
const params : any [ ] = [ domainName , localpart , domain , account , mailbox ]
return await _sherpaCall ( this . baseURL , this . authState , { . . . this . options } , paramTypes , returnTypes , fn , params ) as void
}
// DomainTLSRPTAddressSave saves the TLS reporting address/processing
// configuration for a domain. If localpart is empty, processing reports is
// disabled.
async DomainTLSRPTAddressSave ( domainName : string , localpart : string , domain : string , account : string , mailbox : string ) : Promise < void > {
const fn : string = "DomainTLSRPTAddressSave"
const paramTypes : string [ ] [ ] = [ [ "string" ] , [ "string" ] , [ "string" ] , [ "string" ] , [ "string" ] ]
const returnTypes : string [ ] [ ] = [ ]
const params : any [ ] = [ domainName , localpart , domain , account , mailbox ]
return await _sherpaCall ( this . baseURL , this . authState , { . . . this . options } , paramTypes , returnTypes , fn , params ) as void
}
// DomainMTASTSSave saves the MTASTS policy for a domain. If policyID is empty,
// no MTASTS policy is served.
async DomainMTASTSSave ( domainName : string , policyID : string , mode : Mode , maxAge : number , mx : string [ ] | null ) : Promise < void > {
const fn : string = "DomainMTASTSSave"
const paramTypes : string [ ] [ ] = [ [ "string" ] , [ "string" ] , [ "Mode" ] , [ "int64" ] , [ "[]" , "string" ] ]
const returnTypes : string [ ] [ ] = [ ]
const params : any [ ] = [ domainName , policyID , mode , maxAge , mx ]
return await _sherpaCall ( this . baseURL , this . authState , { . . . this . options } , paramTypes , returnTypes , fn , params ) as void
}
// DomainDKIMAdd adds a DKIM selector for a domain, generating a new private
// key. The selector is not enabled for signing.
async DomainDKIMAdd ( domainName : string , selector : string , algorithm : string , hash : string , headerRelaxed : boolean , bodyRelaxed : boolean , seal : boolean , headers : string [ ] | null , lifetime : number ) : Promise < void > {
const fn : string = "DomainDKIMAdd"
const paramTypes : string [ ] [ ] = [ [ "string" ] , [ "string" ] , [ "string" ] , [ "string" ] , [ "bool" ] , [ "bool" ] , [ "bool" ] , [ "[]" , "string" ] , [ "int64" ] ]
const returnTypes : string [ ] [ ] = [ ]
const params : any [ ] = [ domainName , selector , algorithm , hash , headerRelaxed , bodyRelaxed , seal , headers , lifetime ]
return await _sherpaCall ( this . baseURL , this . authState , { . . . this . options } , paramTypes , returnTypes , fn , params ) as void
}
// DomainDKIMRemove removes a DKIM selector for a domain.
async DomainDKIMRemove ( domainName : string , selector : string ) : Promise < void > {
const fn : string = "DomainDKIMRemove"
const paramTypes : string [ ] [ ] = [ [ "string" ] , [ "string" ] ]
const returnTypes : string [ ] [ ] = [ ]
const params : any [ ] = [ domainName , selector ]
return await _sherpaCall ( this . baseURL , this . authState , { . . . this . options } , paramTypes , returnTypes , fn , params ) as void
}
// DomainDKIMSave saves the settings of selectors, and which to enable for
// signing, for a domain. All currently configured selectors must be present,
// selectors cannot be added/removed with this function.
async DomainDKIMSave ( domainName : string , selectors : { [ key : string ] : Selector } , sign : string [ ] | null ) : Promise < void > {
const fn : string = "DomainDKIMSave"
const paramTypes : string [ ] [ ] = [ [ "string" ] , [ "{}" , "Selector" ] , [ "[]" , "string" ] ]
const returnTypes : string [ ] [ ] = [ ]
const params : any [ ] = [ domainName , selectors , sign ]
return await _sherpaCall ( this . baseURL , this . authState , { . . . this . options } , paramTypes , returnTypes , fn , params ) as void
}
2024-04-24 20:15:30 +03:00
async AliasAdd ( aliaslp : string , domainName : string , alias : Alias ) : Promise < void > {
const fn : string = "AliasAdd"
const paramTypes : string [ ] [ ] = [ [ "string" ] , [ "string" ] , [ "Alias" ] ]
const returnTypes : string [ ] [ ] = [ ]
const params : any [ ] = [ aliaslp , domainName , alias ]
return await _sherpaCall ( this . baseURL , this . authState , { . . . this . options } , paramTypes , returnTypes , fn , params ) as void
}
async AliasUpdate ( aliaslp : string , domainName : string , postPublic : boolean , listMembers : boolean , allowMsgFrom : boolean ) : Promise < void > {
const fn : string = "AliasUpdate"
const paramTypes : string [ ] [ ] = [ [ "string" ] , [ "string" ] , [ "bool" ] , [ "bool" ] , [ "bool" ] ]
const returnTypes : string [ ] [ ] = [ ]
const params : any [ ] = [ aliaslp , domainName , postPublic , listMembers , allowMsgFrom ]
return await _sherpaCall ( this . baseURL , this . authState , { . . . this . options } , paramTypes , returnTypes , fn , params ) as void
}
async AliasRemove ( aliaslp : string , domainName : string ) : Promise < void > {
const fn : string = "AliasRemove"
const paramTypes : string [ ] [ ] = [ [ "string" ] , [ "string" ] ]
const returnTypes : string [ ] [ ] = [ ]
const params : any [ ] = [ aliaslp , domainName ]
return await _sherpaCall ( this . baseURL , this . authState , { . . . this . options } , paramTypes , returnTypes , fn , params ) as void
}
async AliasAddressesAdd ( aliaslp : string , domainName : string , addresses : string [ ] | null ) : Promise < void > {
const fn : string = "AliasAddressesAdd"
const paramTypes : string [ ] [ ] = [ [ "string" ] , [ "string" ] , [ "[]" , "string" ] ]
const returnTypes : string [ ] [ ] = [ ]
const params : any [ ] = [ aliaslp , domainName , addresses ]
return await _sherpaCall ( this . baseURL , this . authState , { . . . this . options } , paramTypes , returnTypes , fn , params ) as void
}
async AliasAddressesRemove ( aliaslp : string , domainName : string , addresses : string [ ] | null ) : Promise < void > {
const fn : string = "AliasAddressesRemove"
const paramTypes : string [ ] [ ] = [ [ "string" ] , [ "string" ] , [ "[]" , "string" ] ]
const returnTypes : string [ ] [ ] = [ ]
const params : any [ ] = [ aliaslp , domainName , addresses ]
return await _sherpaCall ( this . baseURL , this . authState , { . . . this . options } , paramTypes , returnTypes , fn , params ) as void
}
implement tls client certificate authentication
the imap & smtp servers now allow logging in with tls client authentication and
the "external" sasl authentication mechanism. email clients like thunderbird,
fairemail, k9, macos mail implement it. this seems to be the most secure among
the authentication mechanism commonly implemented by clients. a useful property
is that an account can have a separate tls public key for each device/email
client. with tls client cert auth, authentication is also bound to the tls
connection. a mitm cannot pass the credentials on to another tls connection,
similar to scram-*-plus. though part of scram-*-plus is that clients verify
that the server knows the client credentials.
for tls client auth with imap, we send a "preauth" untagged message by default.
that puts the connection in authenticated state. given the imap connection
state machine, further authentication commands are not allowed. some clients
don't recognize the preauth message, and try to authenticate anyway, which
fails. a tls public key has a config option to disable preauth, keeping new
connections in unauthenticated state, to work with such email clients.
for smtp (submission), we don't require an explicit auth command.
both for imap and smtp, we allow a client to authenticate with another
mechanism than "external". in that case, credentials are verified, and have to
be for the same account as the tls client auth, but the adress can be another
one than the login address configured with the tls public key.
only the public key is used to identify the account that is authenticating. we
ignore the rest of the certificate. expiration dates, names, constraints, etc
are not verified. no certificate authorities are involved.
users can upload their own (minimal) certificate. the account web interface
shows openssl commands you can run to generate a private key, minimal cert, and
a p12 file (the format that email clients seem to like...) containing both
private key and certificate.
the imapclient & smtpclient packages can now also use tls client auth. and so
does "mox sendmail", either with a pem file with private key and certificate,
or with just an ed25519 private key.
there are new subcommands "mox config tlspubkey ..." for
adding/removing/listing tls public keys from the cli, by the admin.
2024-12-06 00:41:49 +03:00
async TLSPublicKeys ( accountOpt : string ) : Promise < TLSPublicKey [ ] | null > {
const fn : string = "TLSPublicKeys"
const paramTypes : string [ ] [ ] = [ [ "string" ] ]
const returnTypes : string [ ] [ ] = [ [ "[]" , "TLSPublicKey" ] ]
const params : any [ ] = [ accountOpt ]
return await _sherpaCall ( this . baseURL , this . authState , { . . . this . options } , paramTypes , returnTypes , fn , params ) as TLSPublicKey [ ] | null
}
2023-12-31 13:55:22 +03:00
}
export const defaultBaseURL = ( function ( ) {
let p = location . pathname
if ( p && p [ p . length - 1 ] !== '/' ) {
let l = location . pathname . split ( '/' )
l = l . slice ( 0 , l . length - 1 )
p = '/' + l . join ( '/' ) + '/'
}
return location . protocol + '//' + location . host + p + 'api/'
} ) ( )
// NOTE: code below is shared between github.com/mjl-/sherpaweb and github.com/mjl-/sherpats.
// KEEP IN SYNC.
export const supportedSherpaVersion = 1
export interface Section {
Name : string
Docs : string
Functions : Function [ ]
Sections : Section [ ]
Structs : Struct [ ]
Ints : Ints [ ]
Strings : Strings [ ]
Version : string // only for top-level section
SherpaVersion : number // only for top-level section
SherpadocVersion : number // only for top-level section
}
export interface Function {
Name : string
Docs : string
Params : Arg [ ]
Returns : Arg [ ]
}
export interface Arg {
Name : string
Typewords : string [ ]
}
export interface Struct {
Name : string
Docs : string
Fields : Field [ ]
}
export interface Field {
Name : string
Docs : string
Typewords : string [ ]
}
export interface Ints {
Name : string
Docs : string
Values : {
Name : string
Value : number
Docs : string
} [ ] | null
}
export interface Strings {
Name : string
Docs : string
Values : {
Name : string
Value : string
Docs : string
} [ ] | null
}
export type NamedType = Struct | Strings | Ints
export type TypenameMap = { [ k : string ] : NamedType }
// verifyArg typechecks "v" against "typewords", returning a new (possibly modified) value for JSON-encoding.
// toJS indicate if the data is coming into JS. If so, timestamps are turned into JS Dates. Otherwise, JS Dates are turned into strings.
// allowUnknownKeys configures whether unknown keys in structs are allowed.
// types are the named types of the API.
export const verifyArg = ( path : string , v : any , typewords : string [ ] , toJS : boolean , allowUnknownKeys : boolean , types : TypenameMap , opts : ClientOptions ) : any = > {
return new verifier ( types , toJS , allowUnknownKeys , opts ) . verify ( path , v , typewords )
}
export const parse = ( name : string , v : any ) : any = > verifyArg ( name , v , [ name ] , true , false , types , defaultOptions )
class verifier {
constructor ( private types : TypenameMap , private toJS : boolean , private allowUnknownKeys : boolean , private opts : ClientOptions ) {
}
verify ( path : string , v : any , typewords : string [ ] ) : any {
typewords = typewords . slice ( 0 )
const ww = typewords . shift ( )
const error = ( msg : string ) = > {
if ( path != '' ) {
msg = path + ': ' + msg
}
throw new Error ( msg )
}
if ( typeof ww !== 'string' ) {
error ( 'bad typewords' )
return // should not be necessary, typescript doesn't see error always throws an exception?
}
const w : string = ww
const ensure = ( ok : boolean , expect : string ) : any = > {
if ( ! ok ) {
replace http basic auth for web interfaces with session cookie & csrf-based auth
the http basic auth we had was very simple to reason about, and to implement.
but it has a major downside:
there is no way to logout, browsers keep sending credentials. ideally, browsers
themselves would show a button to stop sending credentials.
a related downside: the http auth mechanism doesn't indicate for which server
paths the credentials are.
another downside: the original password is sent to the server with each
request. though sending original passwords to web servers seems to be
considered normal.
our new approach uses session cookies, along with csrf values when we can. the
sessions are server-side managed, automatically extended on each use. this
makes it easy to invalidate sessions and keeps the frontend simpler (than with
long- vs short-term sessions and refreshing). the cookies are httponly,
samesite=strict, scoped to the path of the web interface. cookies are set
"secure" when set over https. the cookie is set by a successful call to Login.
a call to Logout invalidates a session. changing a password invalidates all
sessions for a user, but keeps the session with which the password was changed
alive. the csrf value is also random, and associated with the session cookie.
the csrf must be sent as header for api calls, or as parameter for direct form
posts (where we cannot set a custom header). rest-like calls made directly by
the browser, e.g. for images, don't have a csrf protection. the csrf value is
returned by the Login api call and stored in localstorage.
api calls without credentials return code "user:noAuth", and with bad
credentials return "user:badAuth". the api client recognizes this and triggers
a login. after a login, all auth-failed api calls are automatically retried.
only for "user:badAuth" is an error message displayed in the login form (e.g.
session expired).
in an ideal world, browsers would take care of most session management. a
server would indicate authentication is needed (like http basic auth), and the
browsers uses trusted ui to request credentials for the server & path. the
browser could use safer mechanism than sending original passwords to the
server, such as scram, along with a standard way to create sessions. for now,
web developers have to do authentication themselves: from showing the login
prompt, ensuring the right session/csrf cookies/localstorage/headers/etc are
sent with each request.
webauthn is a newer way to do authentication, perhaps we'll implement it in the
future. though hardware tokens aren't an attractive option for many users, and
it may be overkill as long as we still do old-fashioned authentication in smtp
& imap where passwords can be sent to the server.
for issue #58
2024-01-04 15:10:48 +03:00
error ( 'got ' + JSON . stringify ( v ) + ', expected ' + expect )
2023-12-31 13:55:22 +03:00
}
return v
}
switch ( w ) {
case 'nullable' :
if ( v === null || v === undefined && this . opts . nullableOptional ) {
return v
}
return this . verify ( path , v , typewords )
case '[]' :
if ( v === null && this . opts . slicesNullable || v === undefined && this . opts . slicesNullable && this . opts . nullableOptional ) {
return v
}
ensure ( Array . isArray ( v ) , "array" )
return v . map ( ( e : any , i : number ) = > this . verify ( path + '[' + i + ']' , e , typewords ) )
case '{}' :
if ( v === null && this . opts . mapsNullable || v === undefined && this . opts . mapsNullable && this . opts . nullableOptional ) {
return v
}
ensure ( v !== null || typeof v === 'object' , "object" )
const r : any = { }
for ( const k in v ) {
r [ k ] = this . verify ( path + '.' + k , v [ k ] , typewords )
}
return r
}
ensure ( typewords . length == 0 , "empty typewords" )
const t = typeof v
switch ( w ) {
case 'any' :
return v
case 'bool' :
ensure ( t === 'boolean' , 'bool' )
return v
case 'int8' :
case 'uint8' :
case 'int16' :
case 'uint16' :
case 'int32' :
case 'uint32' :
case 'int64' :
case 'uint64' :
ensure ( t === 'number' && Number . isInteger ( v ) , 'integer' )
return v
case 'float32' :
case 'float64' :
ensure ( t === 'number' , 'float' )
return v
case 'int64s' :
case 'uint64s' :
ensure ( t === 'number' && Number . isInteger ( v ) || t === 'string' , 'integer fitting in float without precision loss, or string' )
return '' + v
case 'string' :
ensure ( t === 'string' , 'string' )
return v
case 'timestamp' :
if ( this . toJS ) {
ensure ( t === 'string' , 'string, with timestamp' )
const d = new Date ( v )
if ( d instanceof Date && ! isNaN ( d . getTime ( ) ) ) {
return d
}
error ( 'invalid date ' + v )
} else {
ensure ( t === 'object' && v !== null , 'non-null object' )
ensure ( v . __proto__ === Date . prototype , 'Date' )
return v . toISOString ( )
}
}
// We're left with named types.
const nt = this . types [ w ]
if ( ! nt ) {
error ( 'unknown type ' + w )
}
if ( v === null ) {
error ( 'bad value ' + v + ' for named type ' + w )
}
if ( structTypes [ nt . Name ] ) {
const t = nt as Struct
if ( typeof v !== 'object' ) {
error ( 'bad value ' + v + ' for struct ' + w )
}
const r : any = { }
for ( const f of t . Fields ) {
r [ f . Name ] = this . verify ( path + '.' + f . Name , v [ f . Name ] , f . Typewords )
}
// If going to JSON also verify no unknown fields are present.
if ( ! this . allowUnknownKeys ) {
const known : { [ key : string ] : boolean } = { }
for ( const f of t . Fields ) {
known [ f . Name ] = true
}
Object . keys ( v ) . forEach ( ( k ) = > {
if ( ! known [ k ] ) {
error ( 'unknown key ' + k + ' for struct ' + w )
}
} )
}
return r
} else if ( stringsTypes [ nt . Name ] ) {
const t = nt as Strings
if ( typeof v !== 'string' ) {
error ( 'mistyped value ' + v + ' for named strings ' + t . Name )
}
if ( ! t . Values || t . Values . length === 0 ) {
return v
}
for ( const sv of t . Values ) {
if ( sv . Value === v ) {
return v
}
}
2024-03-09 17:43:49 +03:00
error ( 'unknown value ' + v + ' for named strings ' + t . Name )
2023-12-31 13:55:22 +03:00
} else if ( intsTypes [ nt . Name ] ) {
const t = nt as Ints
if ( typeof v !== 'number' || ! Number . isInteger ( v ) ) {
error ( 'mistyped value ' + v + ' for named ints ' + t . Name )
}
if ( ! t . Values || t . Values . length === 0 ) {
return v
}
for ( const sv of t . Values ) {
if ( sv . Value === v ) {
return v
}
}
2024-03-09 17:43:49 +03:00
error ( 'unknown value ' + v + ' for named ints ' + t . Name )
2023-12-31 13:55:22 +03:00
} else {
throw new Error ( 'unexpected named type ' + nt )
}
}
}
export interface ClientOptions {
replace http basic auth for web interfaces with session cookie & csrf-based auth
the http basic auth we had was very simple to reason about, and to implement.
but it has a major downside:
there is no way to logout, browsers keep sending credentials. ideally, browsers
themselves would show a button to stop sending credentials.
a related downside: the http auth mechanism doesn't indicate for which server
paths the credentials are.
another downside: the original password is sent to the server with each
request. though sending original passwords to web servers seems to be
considered normal.
our new approach uses session cookies, along with csrf values when we can. the
sessions are server-side managed, automatically extended on each use. this
makes it easy to invalidate sessions and keeps the frontend simpler (than with
long- vs short-term sessions and refreshing). the cookies are httponly,
samesite=strict, scoped to the path of the web interface. cookies are set
"secure" when set over https. the cookie is set by a successful call to Login.
a call to Logout invalidates a session. changing a password invalidates all
sessions for a user, but keeps the session with which the password was changed
alive. the csrf value is also random, and associated with the session cookie.
the csrf must be sent as header for api calls, or as parameter for direct form
posts (where we cannot set a custom header). rest-like calls made directly by
the browser, e.g. for images, don't have a csrf protection. the csrf value is
returned by the Login api call and stored in localstorage.
api calls without credentials return code "user:noAuth", and with bad
credentials return "user:badAuth". the api client recognizes this and triggers
a login. after a login, all auth-failed api calls are automatically retried.
only for "user:badAuth" is an error message displayed in the login form (e.g.
session expired).
in an ideal world, browsers would take care of most session management. a
server would indicate authentication is needed (like http basic auth), and the
browsers uses trusted ui to request credentials for the server & path. the
browser could use safer mechanism than sending original passwords to the
server, such as scram, along with a standard way to create sessions. for now,
web developers have to do authentication themselves: from showing the login
prompt, ensuring the right session/csrf cookies/localstorage/headers/etc are
sent with each request.
webauthn is a newer way to do authentication, perhaps we'll implement it in the
future. though hardware tokens aren't an attractive option for many users, and
it may be overkill as long as we still do old-fashioned authentication in smtp
& imap where passwords can be sent to the server.
for issue #58
2024-01-04 15:10:48 +03:00
baseURL? : string
2023-12-31 13:55:22 +03:00
aborter ? : { abort ? : ( ) = > void }
timeoutMsec? : number
skipParamCheck? : boolean
skipReturnCheck? : boolean
slicesNullable? : boolean
mapsNullable? : boolean
nullableOptional? : boolean
replace http basic auth for web interfaces with session cookie & csrf-based auth
the http basic auth we had was very simple to reason about, and to implement.
but it has a major downside:
there is no way to logout, browsers keep sending credentials. ideally, browsers
themselves would show a button to stop sending credentials.
a related downside: the http auth mechanism doesn't indicate for which server
paths the credentials are.
another downside: the original password is sent to the server with each
request. though sending original passwords to web servers seems to be
considered normal.
our new approach uses session cookies, along with csrf values when we can. the
sessions are server-side managed, automatically extended on each use. this
makes it easy to invalidate sessions and keeps the frontend simpler (than with
long- vs short-term sessions and refreshing). the cookies are httponly,
samesite=strict, scoped to the path of the web interface. cookies are set
"secure" when set over https. the cookie is set by a successful call to Login.
a call to Logout invalidates a session. changing a password invalidates all
sessions for a user, but keeps the session with which the password was changed
alive. the csrf value is also random, and associated with the session cookie.
the csrf must be sent as header for api calls, or as parameter for direct form
posts (where we cannot set a custom header). rest-like calls made directly by
the browser, e.g. for images, don't have a csrf protection. the csrf value is
returned by the Login api call and stored in localstorage.
api calls without credentials return code "user:noAuth", and with bad
credentials return "user:badAuth". the api client recognizes this and triggers
a login. after a login, all auth-failed api calls are automatically retried.
only for "user:badAuth" is an error message displayed in the login form (e.g.
session expired).
in an ideal world, browsers would take care of most session management. a
server would indicate authentication is needed (like http basic auth), and the
browsers uses trusted ui to request credentials for the server & path. the
browser could use safer mechanism than sending original passwords to the
server, such as scram, along with a standard way to create sessions. for now,
web developers have to do authentication themselves: from showing the login
prompt, ensuring the right session/csrf cookies/localstorage/headers/etc are
sent with each request.
webauthn is a newer way to do authentication, perhaps we'll implement it in the
future. though hardware tokens aren't an attractive option for many users, and
it may be overkill as long as we still do old-fashioned authentication in smtp
& imap where passwords can be sent to the server.
for issue #58
2024-01-04 15:10:48 +03:00
csrfHeader? : string
login ? : ( reason : string ) = > Promise < string >
}
export interface AuthState {
token? : string // For csrf request header.
loginPromise? : Promise < void > // To let multiple API calls wait for a single login attempt, not each opening a login popup.
2023-12-31 13:55:22 +03:00
}
replace http basic auth for web interfaces with session cookie & csrf-based auth
the http basic auth we had was very simple to reason about, and to implement.
but it has a major downside:
there is no way to logout, browsers keep sending credentials. ideally, browsers
themselves would show a button to stop sending credentials.
a related downside: the http auth mechanism doesn't indicate for which server
paths the credentials are.
another downside: the original password is sent to the server with each
request. though sending original passwords to web servers seems to be
considered normal.
our new approach uses session cookies, along with csrf values when we can. the
sessions are server-side managed, automatically extended on each use. this
makes it easy to invalidate sessions and keeps the frontend simpler (than with
long- vs short-term sessions and refreshing). the cookies are httponly,
samesite=strict, scoped to the path of the web interface. cookies are set
"secure" when set over https. the cookie is set by a successful call to Login.
a call to Logout invalidates a session. changing a password invalidates all
sessions for a user, but keeps the session with which the password was changed
alive. the csrf value is also random, and associated with the session cookie.
the csrf must be sent as header for api calls, or as parameter for direct form
posts (where we cannot set a custom header). rest-like calls made directly by
the browser, e.g. for images, don't have a csrf protection. the csrf value is
returned by the Login api call and stored in localstorage.
api calls without credentials return code "user:noAuth", and with bad
credentials return "user:badAuth". the api client recognizes this and triggers
a login. after a login, all auth-failed api calls are automatically retried.
only for "user:badAuth" is an error message displayed in the login form (e.g.
session expired).
in an ideal world, browsers would take care of most session management. a
server would indicate authentication is needed (like http basic auth), and the
browsers uses trusted ui to request credentials for the server & path. the
browser could use safer mechanism than sending original passwords to the
server, such as scram, along with a standard way to create sessions. for now,
web developers have to do authentication themselves: from showing the login
prompt, ensuring the right session/csrf cookies/localstorage/headers/etc are
sent with each request.
webauthn is a newer way to do authentication, perhaps we'll implement it in the
future. though hardware tokens aren't an attractive option for many users, and
it may be overkill as long as we still do old-fashioned authentication in smtp
& imap where passwords can be sent to the server.
for issue #58
2024-01-04 15:10:48 +03:00
const _sherpaCall = async ( baseURL : string , authState : AuthState , options : ClientOptions , paramTypes : string [ ] [ ] , returnTypes : string [ ] [ ] , name : string , params : any [ ] ) : Promise < any > = > {
2023-12-31 13:55:22 +03:00
if ( ! options . skipParamCheck ) {
if ( params . length !== paramTypes . length ) {
return Promise . reject ( { message : 'wrong number of parameters in sherpa call, saw ' + params . length + ' != expected ' + paramTypes . length } )
}
params = params . map ( ( v : any , index : number ) = > verifyArg ( 'params[' + index + ']' , v , paramTypes [ index ] , false , false , types , options ) )
}
const simulate = async ( json : string ) = > {
const config = JSON . parse ( json || 'null' ) || { }
const waitMinMsec = config . waitMinMsec || 0
const waitMaxMsec = config . waitMaxMsec || 0
const wait = Math . random ( ) * ( waitMaxMsec - waitMinMsec )
const failRate = config . failRate || 0
return new Promise < void > ( ( resolve , reject ) = > {
if ( options . aborter ) {
options . aborter . abort = ( ) = > {
reject ( { message : 'call to ' + name + ' aborted by user' , code : 'sherpa:aborted' } )
reject = resolve = ( ) = > { }
}
}
setTimeout ( ( ) = > {
const r = Math . random ( )
if ( r < failRate ) {
reject ( { message : 'injected failure on ' + name , code : 'server:injected' } )
} else {
resolve ( )
}
reject = resolve = ( ) = > { }
} , waitMinMsec + wait )
} )
}
// Only simulate when there is a debug string. Otherwise it would always interfere
// with setting options.aborter.
let json : string = ''
try {
json = window . localStorage . getItem ( 'sherpats-debug' ) || ''
} catch ( err ) { }
if ( json ) {
await simulate ( json )
}
replace http basic auth for web interfaces with session cookie & csrf-based auth
the http basic auth we had was very simple to reason about, and to implement.
but it has a major downside:
there is no way to logout, browsers keep sending credentials. ideally, browsers
themselves would show a button to stop sending credentials.
a related downside: the http auth mechanism doesn't indicate for which server
paths the credentials are.
another downside: the original password is sent to the server with each
request. though sending original passwords to web servers seems to be
considered normal.
our new approach uses session cookies, along with csrf values when we can. the
sessions are server-side managed, automatically extended on each use. this
makes it easy to invalidate sessions and keeps the frontend simpler (than with
long- vs short-term sessions and refreshing). the cookies are httponly,
samesite=strict, scoped to the path of the web interface. cookies are set
"secure" when set over https. the cookie is set by a successful call to Login.
a call to Logout invalidates a session. changing a password invalidates all
sessions for a user, but keeps the session with which the password was changed
alive. the csrf value is also random, and associated with the session cookie.
the csrf must be sent as header for api calls, or as parameter for direct form
posts (where we cannot set a custom header). rest-like calls made directly by
the browser, e.g. for images, don't have a csrf protection. the csrf value is
returned by the Login api call and stored in localstorage.
api calls without credentials return code "user:noAuth", and with bad
credentials return "user:badAuth". the api client recognizes this and triggers
a login. after a login, all auth-failed api calls are automatically retried.
only for "user:badAuth" is an error message displayed in the login form (e.g.
session expired).
in an ideal world, browsers would take care of most session management. a
server would indicate authentication is needed (like http basic auth), and the
browsers uses trusted ui to request credentials for the server & path. the
browser could use safer mechanism than sending original passwords to the
server, such as scram, along with a standard way to create sessions. for now,
web developers have to do authentication themselves: from showing the login
prompt, ensuring the right session/csrf cookies/localstorage/headers/etc are
sent with each request.
webauthn is a newer way to do authentication, perhaps we'll implement it in the
future. though hardware tokens aren't an attractive option for many users, and
it may be overkill as long as we still do old-fashioned authentication in smtp
& imap where passwords can be sent to the server.
for issue #58
2024-01-04 15:10:48 +03:00
const fn = ( resolve : ( v : any ) = > void , reject : ( v : any ) = > void ) = > {
let resolve1 = ( v : any ) = > {
2023-12-31 13:55:22 +03:00
resolve ( v )
resolve1 = ( ) = > { }
reject1 = ( ) = > { }
}
let reject1 = ( v : { code : string , message : string } ) = > {
replace http basic auth for web interfaces with session cookie & csrf-based auth
the http basic auth we had was very simple to reason about, and to implement.
but it has a major downside:
there is no way to logout, browsers keep sending credentials. ideally, browsers
themselves would show a button to stop sending credentials.
a related downside: the http auth mechanism doesn't indicate for which server
paths the credentials are.
another downside: the original password is sent to the server with each
request. though sending original passwords to web servers seems to be
considered normal.
our new approach uses session cookies, along with csrf values when we can. the
sessions are server-side managed, automatically extended on each use. this
makes it easy to invalidate sessions and keeps the frontend simpler (than with
long- vs short-term sessions and refreshing). the cookies are httponly,
samesite=strict, scoped to the path of the web interface. cookies are set
"secure" when set over https. the cookie is set by a successful call to Login.
a call to Logout invalidates a session. changing a password invalidates all
sessions for a user, but keeps the session with which the password was changed
alive. the csrf value is also random, and associated with the session cookie.
the csrf must be sent as header for api calls, or as parameter for direct form
posts (where we cannot set a custom header). rest-like calls made directly by
the browser, e.g. for images, don't have a csrf protection. the csrf value is
returned by the Login api call and stored in localstorage.
api calls without credentials return code "user:noAuth", and with bad
credentials return "user:badAuth". the api client recognizes this and triggers
a login. after a login, all auth-failed api calls are automatically retried.
only for "user:badAuth" is an error message displayed in the login form (e.g.
session expired).
in an ideal world, browsers would take care of most session management. a
server would indicate authentication is needed (like http basic auth), and the
browsers uses trusted ui to request credentials for the server & path. the
browser could use safer mechanism than sending original passwords to the
server, such as scram, along with a standard way to create sessions. for now,
web developers have to do authentication themselves: from showing the login
prompt, ensuring the right session/csrf cookies/localstorage/headers/etc are
sent with each request.
webauthn is a newer way to do authentication, perhaps we'll implement it in the
future. though hardware tokens aren't an attractive option for many users, and
it may be overkill as long as we still do old-fashioned authentication in smtp
& imap where passwords can be sent to the server.
for issue #58
2024-01-04 15:10:48 +03:00
if ( ( v . code === 'user:noAuth' || v . code === 'user:badAuth' ) && options . login ) {
const login = options . login
if ( ! authState . loginPromise ) {
authState . loginPromise = new Promise ( ( aresolve , areject ) = > {
login ( v . code === 'user:badAuth' ? ( v . message || '' ) : '' )
. then ( ( token ) = > {
authState . token = token
authState . loginPromise = undefined
aresolve ( )
} , ( err : any ) = > {
authState . loginPromise = undefined
areject ( err )
} )
} )
}
authState . loginPromise
. then ( ( ) = > {
fn ( resolve , reject )
} , ( err : any ) = > {
reject ( err )
} )
return
}
2023-12-31 13:55:22 +03:00
reject ( v )
resolve1 = ( ) = > { }
reject1 = ( ) = > { }
}
const url = baseURL + name
const req = new window . XMLHttpRequest ( )
if ( options . aborter ) {
options . aborter . abort = ( ) = > {
req . abort ( )
reject1 ( { code : 'sherpa:aborted' , message : 'request aborted' } )
}
}
req . open ( 'POST' , url , true )
replace http basic auth for web interfaces with session cookie & csrf-based auth
the http basic auth we had was very simple to reason about, and to implement.
but it has a major downside:
there is no way to logout, browsers keep sending credentials. ideally, browsers
themselves would show a button to stop sending credentials.
a related downside: the http auth mechanism doesn't indicate for which server
paths the credentials are.
another downside: the original password is sent to the server with each
request. though sending original passwords to web servers seems to be
considered normal.
our new approach uses session cookies, along with csrf values when we can. the
sessions are server-side managed, automatically extended on each use. this
makes it easy to invalidate sessions and keeps the frontend simpler (than with
long- vs short-term sessions and refreshing). the cookies are httponly,
samesite=strict, scoped to the path of the web interface. cookies are set
"secure" when set over https. the cookie is set by a successful call to Login.
a call to Logout invalidates a session. changing a password invalidates all
sessions for a user, but keeps the session with which the password was changed
alive. the csrf value is also random, and associated with the session cookie.
the csrf must be sent as header for api calls, or as parameter for direct form
posts (where we cannot set a custom header). rest-like calls made directly by
the browser, e.g. for images, don't have a csrf protection. the csrf value is
returned by the Login api call and stored in localstorage.
api calls without credentials return code "user:noAuth", and with bad
credentials return "user:badAuth". the api client recognizes this and triggers
a login. after a login, all auth-failed api calls are automatically retried.
only for "user:badAuth" is an error message displayed in the login form (e.g.
session expired).
in an ideal world, browsers would take care of most session management. a
server would indicate authentication is needed (like http basic auth), and the
browsers uses trusted ui to request credentials for the server & path. the
browser could use safer mechanism than sending original passwords to the
server, such as scram, along with a standard way to create sessions. for now,
web developers have to do authentication themselves: from showing the login
prompt, ensuring the right session/csrf cookies/localstorage/headers/etc are
sent with each request.
webauthn is a newer way to do authentication, perhaps we'll implement it in the
future. though hardware tokens aren't an attractive option for many users, and
it may be overkill as long as we still do old-fashioned authentication in smtp
& imap where passwords can be sent to the server.
for issue #58
2024-01-04 15:10:48 +03:00
if ( options . csrfHeader && authState . token ) {
req . setRequestHeader ( options . csrfHeader , authState . token )
}
2023-12-31 13:55:22 +03:00
if ( options . timeoutMsec ) {
req . timeout = options . timeoutMsec
}
req . onload = ( ) = > {
if ( req . status !== 200 ) {
if ( req . status === 404 ) {
reject1 ( { code : 'sherpa:badFunction' , message : 'function does not exist' } )
} else {
reject1 ( { code : 'sherpa:http' , message : 'error calling function, HTTP status: ' + req . status } )
}
return
}
let resp : any
try {
resp = JSON . parse ( req . responseText )
} catch ( err ) {
reject1 ( { code : 'sherpa:badResponse' , message : 'bad JSON from server' } )
return
}
if ( resp && resp . error ) {
const err = resp . error
reject1 ( { code : err.code , message : err.message } )
return
} else if ( ! resp || ! resp . hasOwnProperty ( 'result' ) ) {
reject1 ( { code : 'sherpa:badResponse' , message : "invalid sherpa response object, missing 'result'" } )
return
}
if ( options . skipReturnCheck ) {
resolve1 ( resp . result )
return
}
let result = resp . result
try {
if ( returnTypes . length === 0 ) {
if ( result ) {
throw new Error ( 'function ' + name + ' returned a value while prototype says it returns "void"' )
}
} else if ( returnTypes . length === 1 ) {
result = verifyArg ( 'result' , result , returnTypes [ 0 ] , true , true , types , options )
} else {
if ( result . length != returnTypes . length ) {
throw new Error ( 'wrong number of values returned by ' + name + ', saw ' + result . length + ' != expected ' + returnTypes . length )
}
result = result . map ( ( v : any , index : number ) = > verifyArg ( 'result[' + index + ']' , v , returnTypes [ index ] , true , true , types , options ) )
}
} catch ( err ) {
let errmsg = 'bad types'
if ( err instanceof Error ) {
errmsg = err . message
}
reject1 ( { code : 'sherpa:badTypes' , message : errmsg } )
}
resolve1 ( result )
}
req . onerror = ( ) = > {
reject1 ( { code : 'sherpa:connection' , message : 'connection failed' } )
}
req . ontimeout = ( ) = > {
reject1 ( { code : 'sherpa:timeout' , message : 'request timeout' } )
}
req . setRequestHeader ( 'Content-Type' , 'application/json' )
try {
req . send ( JSON . stringify ( { params : params } ) )
} catch ( err ) {
reject1 ( { code : 'sherpa:badData' , message : 'cannot marshal to JSON' } )
}
replace http basic auth for web interfaces with session cookie & csrf-based auth
the http basic auth we had was very simple to reason about, and to implement.
but it has a major downside:
there is no way to logout, browsers keep sending credentials. ideally, browsers
themselves would show a button to stop sending credentials.
a related downside: the http auth mechanism doesn't indicate for which server
paths the credentials are.
another downside: the original password is sent to the server with each
request. though sending original passwords to web servers seems to be
considered normal.
our new approach uses session cookies, along with csrf values when we can. the
sessions are server-side managed, automatically extended on each use. this
makes it easy to invalidate sessions and keeps the frontend simpler (than with
long- vs short-term sessions and refreshing). the cookies are httponly,
samesite=strict, scoped to the path of the web interface. cookies are set
"secure" when set over https. the cookie is set by a successful call to Login.
a call to Logout invalidates a session. changing a password invalidates all
sessions for a user, but keeps the session with which the password was changed
alive. the csrf value is also random, and associated with the session cookie.
the csrf must be sent as header for api calls, or as parameter for direct form
posts (where we cannot set a custom header). rest-like calls made directly by
the browser, e.g. for images, don't have a csrf protection. the csrf value is
returned by the Login api call and stored in localstorage.
api calls without credentials return code "user:noAuth", and with bad
credentials return "user:badAuth". the api client recognizes this and triggers
a login. after a login, all auth-failed api calls are automatically retried.
only for "user:badAuth" is an error message displayed in the login form (e.g.
session expired).
in an ideal world, browsers would take care of most session management. a
server would indicate authentication is needed (like http basic auth), and the
browsers uses trusted ui to request credentials for the server & path. the
browser could use safer mechanism than sending original passwords to the
server, such as scram, along with a standard way to create sessions. for now,
web developers have to do authentication themselves: from showing the login
prompt, ensuring the right session/csrf cookies/localstorage/headers/etc are
sent with each request.
webauthn is a newer way to do authentication, perhaps we'll implement it in the
future. though hardware tokens aren't an attractive option for many users, and
it may be overkill as long as we still do old-fashioned authentication in smtp
& imap where passwords can be sent to the server.
for issue #58
2024-01-04 15:10:48 +03:00
}
return await new Promise ( fn )
2023-12-31 13:55:22 +03:00
}
}