From d34dd8aae6ca4ccf0981bd36bbdf96d7e5128372 Mon Sep 17 00:00:00 2001
From: Mechiel Lukkien <mechiel@ueber.net>
Date: Sat, 30 Mar 2024 09:39:18 +0100
Subject: [PATCH] update to latest bstore, with a bugfix for queries with
 multiple orders that were partially handled by an index

causing returned order to be incorrect.
was triggered by new code i'm working on.
---
 go.mod                                    |   2 +-
 go.sum                                    |   4 +-
 vendor/github.com/mjl-/bstore/README.md   |  57 ++----------
 vendor/github.com/mjl-/bstore/doc.go      |  81 ++++++++++++-----
 vendor/github.com/mjl-/bstore/exec.go     | 105 +++++++++++++++++-----
 vendor/github.com/mjl-/bstore/export.go   |   7 +-
 vendor/github.com/mjl-/bstore/keys.go     |   9 +-
 vendor/github.com/mjl-/bstore/plan.go     | 101 +++++++++++++++------
 vendor/github.com/mjl-/bstore/register.go |   2 +-
 vendor/modules.txt                        |   2 +-
 10 files changed, 235 insertions(+), 135 deletions(-)

diff --git a/go.mod b/go.mod
index f9a7915..b09e5c8 100644
--- a/go.mod
+++ b/go.mod
@@ -5,7 +5,7 @@ go 1.21
 require (
 	github.com/mjl-/adns v0.0.0-20240309142737-2a1aacf346dc
 	github.com/mjl-/autocert v0.0.0-20231214125928-31b7400acb05
-	github.com/mjl-/bstore v0.0.4
+	github.com/mjl-/bstore v0.0.5
 	github.com/mjl-/sconf v0.0.6
 	github.com/mjl-/sherpa v0.6.7
 	github.com/mjl-/sherpadoc v0.0.12
diff --git a/go.sum b/go.sum
index e28beac..bb234ab 100644
--- a/go.sum
+++ b/go.sum
@@ -28,8 +28,8 @@ github.com/mjl-/adns v0.0.0-20240309142737-2a1aacf346dc h1:ghTx3KsrO0hSJW0bCFCGw
 github.com/mjl-/adns v0.0.0-20240309142737-2a1aacf346dc/go.mod h1:v47qUMJnipnmDTRGaHwpCwzE6oypa5K33mUvBfzZBn8=
 github.com/mjl-/autocert v0.0.0-20231214125928-31b7400acb05 h1:s6ay4bh4tmpPLdxjyeWG45mcwHfEluBMuGPkqxHWUJ4=
 github.com/mjl-/autocert v0.0.0-20231214125928-31b7400acb05/go.mod h1:taMFU86abMxKLPV4Bynhv8enbYmS67b8LG80qZv2Qus=
-github.com/mjl-/bstore v0.0.4 h1:q+R1oAr8+E9yf9q+zxkVjQ18VFqD/E9KmGVoe4FIOBA=
-github.com/mjl-/bstore v0.0.4/go.mod h1:/cD25FNBaDfvL/plFRxI3Ba3E+wcB0XVOS8nJDqndg0=
+github.com/mjl-/bstore v0.0.5 h1:Cx+LWEBnFBsqSxZNMxeVujkfc0kG10lUJaAU4vWSRHo=
+github.com/mjl-/bstore v0.0.5/go.mod h1:/cD25FNBaDfvL/plFRxI3Ba3E+wcB0XVOS8nJDqndg0=
 github.com/mjl-/sconf v0.0.6 h1:5Dt58488ZOoVx680zgK2K3vUrokLsp5mXDUACrJlrUc=
 github.com/mjl-/sconf v0.0.6/go.mod h1:uF8OdWtLT8La3i4ln176i1pB0ps9pXGCaABEU55ZkE0=
 github.com/mjl-/sherpa v0.6.7 h1:C5F8XQdV5nCuS4fvB+ye/ziUQrajEhOoj/t2w5T14BY=
diff --git a/vendor/github.com/mjl-/bstore/README.md b/vendor/github.com/mjl-/bstore/README.md
index a7656ef..e5b9cd1 100644
--- a/vendor/github.com/mjl-/bstore/README.md
+++ b/vendor/github.com/mjl-/bstore/README.md
@@ -1,55 +1,12 @@
-Bstore is a database library for storing and quering Go values.
+Bstore is an in-process database with serializable transactions supporting
+referential/unique/nonzero constraints, (multikey) indices, automatic schema
+management based on Go types and struct tags, and a query API.
 
-Bstore is designed as a small, pure Go library that still provides most of
-the common data consistency requirements for modest database use cases. Bstore
-aims to make basic use of cgo-based libraries, such as sqlite, unnecessary.
+Documentation:
 
-See https://pkg.go.dev/github.com/mjl-/bstore for features, examples and full
-documentation.
+https://pkg.go.dev/github.com/mjl-/bstore
 
 MIT-licensed
 
-
-# FAQ - Frequently Asked Questions
-
-## Is bstore an ORM?
-
-No. The API for bstore may look like an ORM. But instead of mapping bstore
-"queries" (function calls) to an SQL query string, bstore executes them
-directly without converting to a query language, storing the data itself.
-
-## How does bstore store its data?
-
-A bstore database is a single-file BoltDB database. BoltDB provides ACID
-properties. Bstore uses a BoltDB "bucket" (key/value store) for each Go type
-stored, with multiple subbuckets: one for type definitions, one for the actual
-data, and one bucket per index. BoltDB stores data in a B+tree. See format.md
-for details.
-
-## How does bstore compare to sqlite?
-
-Sqlite is a great library, but Go applications that require cgo are hard to
-cross-compile. With bstore, cross-compiling to most Go-supported platforms
-stays trivial (though not plan9, unfortunately). Although bstore is much more
-limited in so many aspects than sqlite, bstore also offers some advantages as
-well. Some points of comparison:
-
-- Cross-compilation and reproducibility: Trivial with bstore due to pure Go,
-  much harder with sqlite because of cgo.
-- Code complexity: low with bstore (7k lines including comments/docs), high
-  with sqlite.
-- Query language: mostly-type-checked function calls in bstore, free-form query
-  strings only checked at runtime with sqlite.
-- Functionality: very limited with bstore, much more full-featured with sqlite.
-- Schema management: mostly automatic based on Go type definitions in bstore,
-  manual with ALTER statements in sqlite.
-- Types and packing/parsing: automatic/transparent in bstore based on Go types
-  (including maps, slices, structs and custom MarshalBinary encoding), versus
-  manual scanning and parameter passing with sqlite with limited set of SQL
-  types.
-- Performance: low to good performance with bstore, high performance with
-  sqlite.
-- Database files: single file with bstore, several files with sqlite (due to
-  WAL or journal files).
-- Test coverage: decent coverage but limited real-world for bstore, versus
-  extremely thoroughly tested and with enormous real-world use.
+Written by Mechiel Lukkien, mechiel@ueber.net. Feedback/bug reports/patches
+welcome.
diff --git a/vendor/github.com/mjl-/bstore/doc.go b/vendor/github.com/mjl-/bstore/doc.go
index ad2cf0a..f7e2d7e 100644
--- a/vendor/github.com/mjl-/bstore/doc.go
+++ b/vendor/github.com/mjl-/bstore/doc.go
@@ -1,16 +1,18 @@
 /*
-Package bstore is a database library for storing and querying Go values.
+Package bstore is an in-process database with serializable transactions
+supporting referential/unique/nonzero constraints, (multikey) indices,
+automatic schema management based on Go types and struct tags, and a query API.
 
-Bstore is designed as a small, pure Go library that still provides most of
-the common data consistency requirements for modest database use cases. Bstore
-aims to make basic use of cgo-based libraries, such as sqlite, unnecessary.
+Bstore a small, pure Go library that still provides most of the common data
+consistency requirements for modest database use cases. Bstore aims to make
+basic use of cgo-based libraries, such as sqlite, unnecessary.
 
 Bstore implements autoincrementing primary keys, indices, default values,
 enforcement of nonzero, unique and referential integrity constraints, automatic
 schema updates and a query API for combining filters/sorting/limits. Queries
-are planned and executed using indices for fast execution where possible.
-Bstore is designed with the Go type system in mind: you typically don't have to
-write any (un)marshal code for your types.
+are planned and executed using indices for speed where possible.  Bstore works
+with Go types: you typically don't have to write any (un)marshal code for your
+types. Bstore is not an ORM, it plans and executes queries itself.
 
 # Field types
 
@@ -142,37 +144,37 @@ Conversions that are not currently allowed, but may be in the future:
   - Types of primary keys cannot be changed, also not from one integer type to a
     wider integer type of same signedness.
 
-# BoltDB and storage
+# Bolt and storage
 
-BoltDB is used as underlying storage. BoltDB stores key/values in a single
-file, in multiple/nested buckets (namespaces) in a B+tree and provides ACID
-transactions.  Either a single write transaction or multiple read-only
-transactions can be active at a time.  Do not start a blocking read-only
-transaction while holding a writable transaction or vice versa, this will cause
-deadlock.
+Bolt is used as underlying storage through the bbolt library. Bolt stores
+key/values in a single file, allowing multiple/nested buckets (namespaces) in a
+B+tree and provides ACID serializable transactions.  A single write transaction
+can be active at a time, and one or more read-only transactions.  Do not start
+a blocking read-only transaction in a goroutine while holding a writable
+transaction or vice versa, this can cause deadlock.
 
-BoltDB returns Go values that are memory mapped to the database file.  This
-means BoltDB/bstore database files cannot be transferred between machines with
-different endianness.  BoltDB uses explicit widths for its types, so files can
+Bolt returns Go values that are memory mapped to the database file.  This means
+Bolt/bstore database files cannot be transferred between machines with
+different endianness.  Bolt uses explicit widths for its types, so files can
 be transferred between 32bit and 64bit machines of same endianness. While
-BoltDB returns read-only memory mapped byte slices, bstore only ever returns
+Bolt returns read-only memory mapped byte slices, bstore only ever returns
 parsed/copied regular writable Go values that require no special programmer
 attention.
 
-For each Go type opened for a database file, bstore ensures a BoltDB bucket
+For each Go type opened for a database file, bstore ensures a Bolt bucket
 exists with two subbuckets:
 
   - "types", with type descriptions of the stored records. Each time the database
     file is opened with a modified Go type (add/removed/modified
     field/type/bstore struct tag), a new type description is automatically added,
     identified by sequence number.
-  - "records", containing all data, with the type's primary key as BoltDB key,
+  - "records", containing all data, with the type's primary key as Bolt key,
     and the encoded remaining fields as value. The encoding starts with a
     reference to a type description.
 
 For each index, another subbucket is created, its name starting with "index.".
 The stored keys consist of the index fields followed by the primary key, and an
-empty value.
+empty value. See format.md for details.
 
 # Limitations
 
@@ -189,12 +191,12 @@ equivalent of a nil pointer.
 The first field of a stored struct is always the primary key. Autoincrement is
 only available for the primary key.
 
-BoltDB opens the database file with a lock. Only one process can have the
+Bolt opens the database file with a lock. Only one process can have the
 database open at a time.
 
-An index stored on disk in BoltDB can consume more disk space than other
+An index stored on disk in Bolt can consume more disk space than other
 database systems would: For each record, the indexed field(s) and primary key
-are stored in full. Because bstore uses BoltDB as key/value store, and doesn't
+are stored in full. Because bstore uses Bolt as key/value store, and doesn't
 manage disk pages itself, it cannot as efficiently pack an index page with many
 records.
 
@@ -202,5 +204,36 @@ Interface values cannot be stored. This would require storing the type along
 with the value. Instead, use a type that is a BinaryMarshaler.
 
 Values of builtin type "complex" cannot be stored.
+
+Bstore inherits limitations from Bolt, see
+https://pkg.go.dev/go.etcd.io/bbolt#readme-caveats-amp-limitations.
+
+# Comparison with sqlite
+
+Sqlite is a great library, but Go applications that require cgo are hard to
+cross-compile. With bstore, cross-compiling to most Go-supported platforms
+stays trivial (though not plan9, unfortunately). Although bstore is much more
+limited in so many aspects than sqlite, bstore also offers some advantages as
+well. Some points of comparison:
+
+- Cross-compilation and reproducibility: Trivial with bstore due to pure Go,
+  much harder with sqlite because of cgo.
+- Code complexity: low with bstore (7k lines including comments/docs), high
+  with sqlite.
+- Query language: mostly-type-checked function calls in bstore, free-form query
+  strings only checked at runtime with sqlite.
+- Functionality: very limited with bstore, much more full-featured with sqlite.
+- Schema management: mostly automatic based on Go type definitions in bstore,
+  manual with ALTER statements in sqlite.
+- Types and packing/parsing: automatic/transparent in bstore based on Go types
+  (including maps, slices, structs and custom MarshalBinary encoding), versus
+  manual scanning and parameter passing with sqlite with limited set of SQL
+  types.
+- Performance: low to good performance with bstore, high performance with
+  sqlite.
+- Database files: single file with bstore, several files with sqlite (due to
+  WAL or journal files).
+- Test coverage: decent coverage but limited real-world for bstore, versus
+  extremely thoroughly tested and with enormous real-world use.
 */
 package bstore
diff --git a/vendor/github.com/mjl-/bstore/exec.go b/vendor/github.com/mjl-/bstore/exec.go
index 71b5127..93a59a9 100644
--- a/vendor/github.com/mjl-/bstore/exec.go
+++ b/vendor/github.com/mjl-/bstore/exec.go
@@ -21,14 +21,31 @@ type exec[T any] struct {
 	// See plan.keys. We remove items from the list when we looked one up, but we keep the slice non-nil.
 	keys [][]byte
 
-	// If -1, no limit is set. This is different from Query where 0 means
-	// no limit. We count back and 0 means the end.
+	// If non-empty, serve nextKey requests from here. Used when we need to do
+	// in-memory sort. After reading from here, and limit isn't reached yet, we may do
+	// another fill & sort of data to serve from, for orderings partially from an
+	// index. When filling data, limit (below) is accounted for, so all elements can be
+	// returned to caller.
+	data []pair[T]
+
+	// If -1, no limit is set. This is different from Query where 0 means no limit. We
+	// count back and 0 means the end. Also set from -1 to 0 when end of execution is
+	// reached.
 	limit int
 
-	data    []pair[T] // If not nil (even if empty), serve nextKey requests from here.
-	ib      *bolt.Bucket
-	rb      *bolt.Bucket
-	forward func() (bk, bv []byte) // Once we start scanning, we prepare forward to next/prev to the following value.
+	// Index and record buckets loaded when first needed.
+	ib *bolt.Bucket
+	rb *bolt.Bucket
+
+	// Of last element in data. For finding end of group through prefix-match during
+	// partial index ordering for remaining in-memory sort.
+	lastik []byte
+
+	// If not nil, row that was scanned previously, to use instead of calling forward.
+	stowedbk, stowedbv []byte
+
+	// Once we start scanning, we prepare forward to next/prev to the following value.
+	forward func() (bk, bv []byte)
 }
 
 // exec creates a new execution for the plan, registering statistics.
@@ -54,7 +71,7 @@ func (p *plan[T]) exec(q *Query[T]) *exec[T] {
 	if len(p.orders) > 0 {
 		q.stats.Sort++
 	}
-	q.stats.LastOrdered = p.start != nil || p.stop != nil
+	q.stats.LastOrdered = p.start != nil || p.stop != nil || p.norderidxuse > 0
 	q.stats.LastAsc = !p.desc
 
 	limit := -1
@@ -107,12 +124,9 @@ func (e *exec[T]) nextKey(write, value bool) ([]byte, T, error) {
 		return nil, zero, q.err
 	}
 
-	// We collected & sorted data previously. Return from it until done.
-	// Limit was already applied.
-	if e.data != nil {
-		if len(e.data) == 0 {
-			return nil, zero, ErrAbsent
-		}
+	// We collected & sorted data previously.
+	// Limit was already applied/updated, so we can serve these without checking.
+	if len(e.data) > 0 {
 		p := e.data[0]
 		e.data = e.data[1:]
 		var v T
@@ -127,6 +141,7 @@ func (e *exec[T]) nextKey(write, value bool) ([]byte, T, error) {
 		return p.bk, v, nil
 	}
 
+	// Limit is 0 when we hit the limit or at end of processing the execution.
 	if e.limit == 0 {
 		return nil, zero, ErrAbsent
 	}
@@ -153,10 +168,8 @@ func (e *exec[T]) nextKey(write, value bool) ([]byte, T, error) {
 	// List of IDs (records) or full unique index equality match.
 	// We can get the records/index value by a simple "get" on the key.
 	if e.keys != nil {
+		// If we need to sort, we collect all elements and prevent further querying.
 		collect := len(e.plan.orders) > 0
-		if collect {
-			e.data = []pair[T]{} // Must be non-nil to get into e.data branch!
-		}
 		for i, xk := range e.keys {
 			var bk, bv []byte
 
@@ -217,6 +230,7 @@ func (e *exec[T]) nextKey(write, value bool) ([]byte, T, error) {
 			return bk, v, nil
 		}
 		if !collect {
+			e.limit = 0
 			return nil, zero, ErrAbsent
 		}
 		// Restart, now with data.
@@ -225,14 +239,13 @@ func (e *exec[T]) nextKey(write, value bool) ([]byte, T, error) {
 		if e.limit > 0 && len(e.data) > e.limit {
 			e.data = e.data[:e.limit]
 		}
+		e.limit = 0
 		return q.nextKey(write, value)
 	}
 
-	// We are going to do a scan, either over the records or an index. We may have a start and stop key.
+	// We are going to do a scan, either over the records or (a part of) an index. We
+	// may have a start and stop key.
 	collect := len(e.plan.orders) > 0
-	if collect {
-		e.data = []pair[T]{} // Must be non-nil to get into e.data branch on function restart.
-	}
 	// Every 1k keys we've seen, we'll check if the context has been canceled. If we
 	// wouldn't do this, a query that doesn't return any matches won't get canceled
 	// until it is finished.
@@ -304,6 +317,10 @@ func (e *exec[T]) nextKey(write, value bool) ([]byte, T, error) {
 					}
 				}
 			}
+		} else if e.stowedbk != nil {
+			// Resume with previously seen key/value.
+			xk, xv = e.stowedbk, e.stowedbv
+			e.stowedbk, e.stowedbv = nil, nil
 		} else {
 			if e.plan.idx == nil {
 				q.stats.Records.Cursor++
@@ -331,24 +348,37 @@ func (e *exec[T]) nextKey(write, value bool) ([]byte, T, error) {
 		}
 
 		var pk, bv []byte
+		ordersidxPartial := e.plan.norderidxuse > 0 && len(e.plan.orders) > 0
+		var idxkeys [][]byte // Only set when we have partial ordering from index.
 		if e.plan.idx == nil {
 			pk = xk
 			bv = xv
 		} else {
 			var err error
-			pk, _, err = e.plan.idx.parseKey(xk, false)
+			pk, idxkeys, err = e.plan.idx.parseKey(xk, ordersidxPartial, true)
 			if err != nil {
 				q.error(err)
 				return nil, zero, err
 			}
 		}
 
+		// If we have a parial order from the index, and this new value has a different
+		// index ordering key prefix than the last value, we stop collecting, sort the data we
+		// have by the remaining ordering, return that data, and continue collecting in the
+		// next round. We stow the new value so we don't have to revert the forward() from
+		// earlier.
+		if ordersidxPartial && len(e.data) > 0 && !prefixMatch(e.lastik, e.plan.norderidxuse, idxkeys, pk) {
+			e.stowedbk, e.stowedbv = xk, xv
+			break
+		}
+
 		p := pair[T]{pk, bv, nil}
 		if ok, err := e.checkFilter(&p); err != nil {
 			return nil, zero, err
 		} else if !ok {
 			continue
 		}
+
 		//log.Printf("have kv, %x %x", p.bk, p.bv)
 		var v T
 		var err error
@@ -361,6 +391,7 @@ func (e *exec[T]) nextKey(write, value bool) ([]byte, T, error) {
 		}
 		if collect {
 			e.data = append(e.data, p)
+			e.lastik = xk
 			continue
 		}
 		if e.limit > 0 {
@@ -368,17 +399,39 @@ func (e *exec[T]) nextKey(write, value bool) ([]byte, T, error) {
 		}
 		return p.bk, v, nil
 	}
-	if !collect {
+	if !collect || len(e.data) == 0 {
+		e.limit = 0
 		return nil, zero, ErrAbsent
 	}
 	// Restart, now with data.
 	e.sort()
-	if e.limit > 0 && len(e.data) > e.limit {
-		e.data = e.data[:e.limit]
+	if e.limit > 0 {
+		if len(e.data) > e.limit {
+			e.data = e.data[:e.limit]
+		}
+		e.limit -= len(e.data)
 	}
 	return e.nextKey(write, value)
 }
 
+// prefixMatch returns whether ik (index key) starts with the bytes from n elements
+// from field keys kl and primary key pk.
+func prefixMatch(ik []byte, n int, kl [][]byte, pk []byte) bool {
+	for i := 0; i < n; i++ {
+		var k []byte
+		if i < len(kl) {
+			k = kl[i]
+		} else {
+			k = pk
+		}
+		if !bytes.HasPrefix(ik, k) {
+			return false
+		}
+		ik = ik[len(k):]
+	}
+	return true
+}
+
 // checkFilter checks against the filters for the plan.
 func (e *exec[T]) checkFilter(p *pair[T]) (rok bool, rerr error) {
 	q := e.q
@@ -574,6 +627,10 @@ func compare(k kind, a, b reflect.Value) int {
 }
 
 func (e *exec[T]) sort() {
+	if len(e.data) <= 1 {
+		return
+	}
+
 	// todo: We should check whether we actually need to load values. We're
 	// always loading it for the time being because SortStableFunc isn't
 	// going to give us a *pair (even though it could because of the slice)
diff --git a/vendor/github.com/mjl-/bstore/export.go b/vendor/github.com/mjl-/bstore/export.go
index 9176be6..ffa3ab8 100644
--- a/vendor/github.com/mjl-/bstore/export.go
+++ b/vendor/github.com/mjl-/bstore/export.go
@@ -329,8 +329,7 @@ func (ft fieldType) parseValue(p *parser) any {
 			if fm.Nonzero(i) {
 				l = append(l, ft.ListElem.parseValue(p))
 			} else {
-				// Always add non-zero elements, or we would
-				// change the number of elements in a list.
+				// Always add zero elements, or we would change the number of elements in a list.
 				l = append(l, ft.ListElem.zeroExportValue())
 			}
 		}
@@ -343,8 +342,8 @@ func (ft fieldType) parseValue(p *parser) any {
 			if fm.Nonzero(i) {
 				l[i] = ft.ListElem.parseValue(p)
 			} else {
-				// Always add non-zero elements, or we would
-				// change the number of elements in a list.
+				// Always add zero elements, or we would change the number of elements in the
+				// array.
 				l[i] = ft.ListElem.zeroExportValue()
 			}
 		}
diff --git a/vendor/github.com/mjl-/bstore/keys.go b/vendor/github.com/mjl-/bstore/keys.go
index e78cd52..75138e2 100644
--- a/vendor/github.com/mjl-/bstore/keys.go
+++ b/vendor/github.com/mjl-/bstore/keys.go
@@ -132,7 +132,8 @@ func parsePK(rv reflect.Value, bk []byte) error {
 // parseKey parses the PK (last element) of an index key.
 // If all is set, also gathers the values before and returns them in the second
 // parameter.
-func (idx *index) parseKey(buf []byte, all bool) ([]byte, [][]byte, error) {
+// If witnull is set, string values will get their ending \0 included.
+func (idx *index) parseKey(buf []byte, all bool, withnull bool) ([]byte, [][]byte, error) {
 	var err error
 	var keys [][]byte
 	take := func(n int) {
@@ -160,7 +161,11 @@ fields:
 			for i, b := range buf {
 				if b == 0 {
 					if all {
-						keys = append(keys, buf[:i])
+						o := i
+						if withnull {
+							o++
+						}
+						keys = append(keys, buf[:o])
 					}
 					buf = buf[i+1:]
 					continue fields
diff --git a/vendor/github.com/mjl-/bstore/plan.go b/vendor/github.com/mjl-/bstore/plan.go
index f7b805d..94053ff 100644
--- a/vendor/github.com/mjl-/bstore/plan.go
+++ b/vendor/github.com/mjl-/bstore/plan.go
@@ -8,7 +8,6 @@ import (
 )
 
 // todo: cache query plans? perhaps explicitly through something like a prepared statement. the current plan includes values in keys,start,stop, which would need to be calculated for each execution. should benchmark time spent in planning first.
-// todo optimize: handle multiple sorts with multikey indices if they match
 // todo optimize: combine multiple filter (not)in/equals calls for same field
 // todo optimize: efficiently pack booleans in an index (eg for Message.Flags), and use it to query.
 // todo optimize: do multiple range scans if necessary when we can use an index for an equal check with multiple values.
@@ -31,10 +30,15 @@ type plan[T any] struct {
 	// index. Required non-nil for unique.
 	keys [][]byte
 
-	desc           bool   // Direction of the range scan.
-	start          []byte // First key to scan. Filters below may still apply. If desc, this value is > than stop (if it is set). If nil, we begin ranging at the first or last (for desc) key.
-	stop           []byte // Last key to scan. Can be nil independently of start.
-	startInclusive bool   // If the start and stop values are inclusive or exclusive.
+	desc bool // Direction of the range scan.
+	// First key to scan. Filters below may still apply. If desc, this value is > than
+	// stop (if it is set). If nil, we begin ranging at the first or last (for desc)
+	// key.
+	start []byte
+	// Last key to scan. Can be nil independently of start.
+	stop []byte
+	// If the start and stop values are inclusive or exclusive.
+	startInclusive bool
 	stopInclusive  bool
 
 	// Filter we need to apply after retrieving the record. If all original filters
@@ -42,9 +46,13 @@ type plan[T any] struct {
 	// empty.
 	filters []filter[T]
 
-	// Orders we need to apply after first retrieving all records. As with
-	// filters, if a range scan takes care of an ordering from the query,
-	// this field is empty.
+	// Number of fields from index used to group results before applying in-memory
+	// ordering with "orders" below.
+	norderidxuse int
+
+	// Orders we need to apply after first retrieving all records with equal values for
+	// first norderidxuse fields. As with filters, if a range scan takes care of all
+	// orderings from the query, this field is empty.
 	orders []order
 }
 
@@ -177,7 +185,7 @@ indices:
 	var p *plan[T]
 	var nexact int
 	var nrange int
-	var ordered bool
+	var norder int
 
 	evaluatePKOrIndex := func(idx *index) error {
 		var isPK bool
@@ -205,25 +213,40 @@ indices:
 		}
 
 		var nex = 0
-		// log.Printf("idx %v", idx)
+		// log.Printf("evaluating idx %#v", idx)
 		var skipFilters []*filter[T]
 		for _, f := range idx.Fields {
 			if equals[f.Name] != nil && f.Type.Kind != kindSlice {
 				skipFilters = append(skipFilters, equals[f.Name])
-				nex++
 			} else if inslices[f.Name] != nil && f.Type.Kind == kindSlice {
 				skipFilters = append(skipFilters, inslices[f.Name])
-				nex++
 			} else {
 				break
 			}
+			nex++
 		}
 
-		// See if the next field can be used for compare.
-		var gx, lx *filterCompare[T]
-		var nrng int
-		var order *order
+		// For ordering, skip leading filters we already match on exactly.
 		orders := q.xorders
+		trim := 0
+	TrimOrders:
+		for _, o := range orders {
+			for _, f := range idx.Fields[:nex] {
+				if o.field.Name == f.Name {
+					trim++
+					continue TrimOrders
+				}
+			}
+			break
+		}
+		orders = orders[trim:]
+
+		// Fields from the index that we use for grouping before in-memory sorting.
+		var norderidxuse int
+
+		// See if the next index field can be used for compare and ordering.
+		var gx, lx *filterCompare[T]
+		var nrng int // for nrange
 		if nex < len(idx.Fields) {
 			nf := idx.Fields[nex]
 			for i := range q.xfilters {
@@ -250,22 +273,45 @@ indices:
 				}
 			}
 
-			// See if it can be used for ordering.
-			// todo optimize: we could use multiple orders
-			if len(orders) > 0 && orders[0].field.Name == nf.Name {
-				order = &orders[0]
-				orders = orders[1:]
+			// We can use multiple orderings as long as the asc/desc direction stays the same.
+			nord := 0
+			for i, o := range orders {
+				if nex+i < len(idx.Fields) && o.field.Name == idx.Fields[nex+i].Name && (nord == 0 || o.asc == orders[0].asc) {
+					nord++
+					continue
+				}
+				break
 			}
+			norderidxuse = nex + nord
+			prevorders := orders
+			orders = orders[nord:]
+
+			// The stored index key ends with the primary key, so if we're there, and the next
+			// ordering key is the primary key, we use the index for it too.
+			if norderidxuse == len(idx.Fields) && len(orders) > 0 && orders[0].field.Name == q.st.Current.Fields[0].Name && (nord == 0 || orders[0].asc == prevorders[nord-1].asc) {
+				orders = orders[1:]
+				norderidxuse++
+			}
+		} else if len(orders) > 0 && orders[0].field.Name == q.st.Current.Fields[0].Name {
+			// We only had equals filters that used all of the index, but we're also sorting by
+			// the primary key, so use the index for that too.
+			orders = orders[1:]
+			norderidxuse++
 		}
 
+		// Orders handled by the index, excluding exact match filters.
+		idxorders := q.xorders[trim : len(q.xorders)-len(orders)]
+
+		// log.Printf("index fields to match for index order: %d, orders for index %d, in-memory ordering %d, total orders %d", norderidxuse, len(idxorders), len(orders), len(q.xorders))
+
 		// See if this is better than what we had.
-		if !(nex > nexact || (nex == nexact && (nrng > nrange || order != nil && !ordered && (q.xlimit > 0 || nrng == nrange)))) {
-			// log.Printf("plan not better, nex %d, nrng %d, limit %d, order %v ordered %v", nex, nrng, q.limit, order, ordered)
+		if !(nex > nexact || (nex == nexact && (nrng > nrange || len(idxorders) > norder && (q.xlimit > 0 || nrng == nrange)))) {
+			// log.Printf("plan not better, nex %d, nrng %d, limit %d, nidxorders %v ordered %v", nex, nrng, q.xlimit, len(idxorders), norder)
 			return nil
 		}
 		nexact = nex
 		nrange = nrng
-		ordered = order != nil
+		norder = len(idxorders)
 
 		// Calculate the prefix key.
 		var kvalues []reflect.Value
@@ -307,7 +353,8 @@ indices:
 
 		startInclusive := gx == nil || gx.op != opGreater
 		stopInclusive := lx == nil || lx.op != opLess
-		if order != nil && !order.asc {
+		desc := len(idxorders) > 0 && !idxorders[0].asc
+		if desc {
 			start, stop = stop, start
 			startInclusive, stopInclusive = stopInclusive, startInclusive
 		}
@@ -318,12 +365,13 @@ indices:
 
 		p = &plan[T]{
 			idx:            idx,
-			desc:           order != nil && !order.asc,
+			desc:           desc,
 			start:          start,
 			stop:           stop,
 			startInclusive: startInclusive,
 			stopInclusive:  stopInclusive,
 			filters:        dropFilters(q.xfilters, skipFilters),
+			norderidxuse:   norderidxuse,
 			orders:         orders,
 		}
 		return nil
@@ -341,6 +389,7 @@ indices:
 
 	}
 	if p != nil {
+		// log.Printf("using index plan %v", p)
 		return p, nil
 	}
 
diff --git a/vendor/github.com/mjl-/bstore/register.go b/vendor/github.com/mjl-/bstore/register.go
index e98daa3..e740846 100644
--- a/vendor/github.com/mjl-/bstore/register.go
+++ b/vendor/github.com/mjl-/bstore/register.go
@@ -561,7 +561,7 @@ func (db *DB) Register(ctx context.Context, typeValues ...any) error {
 							parsePK(a, prev.buf[prev.pre:]) // Ignore error, nothing to do.
 							parsePK(b, k.buf[k.pre:])       // Ignore error, nothing to do.
 							var dup []any
-							_, values, _ := idx.parseKey(k.buf, true)
+							_, values, _ := idx.parseKey(k.buf, true, false)
 							for i := range values {
 								x := reflect.New(reflect.TypeOf(idx.Fields[i].Type.zeroKey())).Elem()
 								parsePK(x, values[i]) // Ignore error, nothing to do.
diff --git a/vendor/modules.txt b/vendor/modules.txt
index bea4dd1..bd2989c 100644
--- a/vendor/modules.txt
+++ b/vendor/modules.txt
@@ -16,7 +16,7 @@ github.com/mjl-/adns/internal/singleflight
 # github.com/mjl-/autocert v0.0.0-20231214125928-31b7400acb05
 ## explicit; go 1.20
 github.com/mjl-/autocert
-# github.com/mjl-/bstore v0.0.4
+# github.com/mjl-/bstore v0.0.5
 ## explicit; go 1.19
 github.com/mjl-/bstore
 # github.com/mjl-/sconf v0.0.6