From 23684e1b161915d020877aa739b5f4922d6157a3 Mon Sep 17 00:00:00 2001 From: DavePearce Date: Fri, 25 Jul 2025 15:34:12 +1200 Subject: [PATCH 1/8] refactor constraints to have their own directory This refactors constraints so they will within their own folders. --- pkg/cmd/check.go | 9 +- pkg/ir/air/constraint.go | 25 ++-- pkg/ir/air/schema.go | 15 ++- pkg/ir/mir/agnosticity.go | 4 +- pkg/ir/mir/constraint.go | 18 ++- pkg/ir/mir/lookup.go | 14 +-- pkg/ir/mir/schema.go | 18 ++- pkg/schema/constraint/assertion.go | 2 +- .../constraint.go} | 85 +++++-------- pkg/schema/constraint/interleaving/failure.go | 57 +++++++++ .../{lookup.go => lookup/constraint.go} | 114 ++++-------------- pkg/schema/constraint/lookup/failure.go | 56 +++++++++ pkg/schema/constraint/lookup/geometry.go | 58 +++++++++ .../constraint.go} | 40 ++---- pkg/schema/constraint/permutation/failure.go | 27 +++++ .../{range.go => ranged/constraint.go} | 69 ++++------- pkg/schema/constraint/ranged/failure.go | 51 ++++++++ .../{sorted.go => sorted/constraint.go} | 52 ++++---- pkg/schema/constraint/sorted/failure.go | 27 +++++ pkg/schema/constraint/util.go | 20 +-- .../{vanishing.go => vanishing/constraint.go} | 80 ++++-------- pkg/schema/constraint/vanishing/failure.go | 49 ++++++++ 22 files changed, 537 insertions(+), 353 deletions(-) rename pkg/schema/constraint/{interleaving.go => interleaving/constraint.go} (63%) create mode 100644 pkg/schema/constraint/interleaving/failure.go rename pkg/schema/constraint/{lookup.go => lookup/constraint.go} (74%) create mode 100644 pkg/schema/constraint/lookup/failure.go create mode 100644 pkg/schema/constraint/lookup/geometry.go rename pkg/schema/constraint/{permutation.go => permutation/constraint.go} (81%) create mode 100644 pkg/schema/constraint/permutation/failure.go rename pkg/schema/constraint/{range.go => ranged/constraint.go} (68%) create mode 100644 pkg/schema/constraint/ranged/failure.go rename pkg/schema/constraint/{sorted.go => sorted/constraint.go} (82%) create mode 100644 pkg/schema/constraint/sorted/failure.go rename pkg/schema/constraint/{vanishing.go => vanishing/constraint.go} (71%) create mode 100644 pkg/schema/constraint/vanishing/failure.go diff --git a/pkg/cmd/check.go b/pkg/cmd/check.go index b288a3945..528a9b139 100644 --- a/pkg/cmd/check.go +++ b/pkg/cmd/check.go @@ -26,6 +26,9 @@ import ( "github.com/consensys/go-corset/pkg/ir" sc "github.com/consensys/go-corset/pkg/schema" "github.com/consensys/go-corset/pkg/schema/constraint" + "github.com/consensys/go-corset/pkg/schema/constraint/lookup" + "github.com/consensys/go-corset/pkg/schema/constraint/ranged" + "github.com/consensys/go-corset/pkg/schema/constraint/vanishing" "github.com/consensys/go-corset/pkg/trace" tr "github.com/consensys/go-corset/pkg/trace" "github.com/consensys/go-corset/pkg/util" @@ -221,15 +224,15 @@ func reportFailures(ir string, failures []sc.Failure, trace tr.Trace, cfg checkC // Print a human-readable report detailing the given failure func reportFailure(failure sc.Failure, trace tr.Trace, cfg checkConfig) { - if f, ok := failure.(*constraint.VanishingFailure); ok { + if f, ok := failure.(*vanishing.Failure); ok { cells := f.RequiredCells(trace) fmt.Printf("failing constraint %s:\n", f.Handle) reportRelevantCells(cells, trace, cfg) - } else if f, ok := failure.(*constraint.RangeFailure); ok { + } else if f, ok := failure.(*ranged.Failure); ok { cells := f.RequiredCells(trace) fmt.Printf("failing range constraint %s:\n", f.Handle) reportRelevantCells(cells, trace, cfg) - } else if f, ok := failure.(*constraint.LookupFailure); ok { + } else if f, ok := failure.(*lookup.Failure); ok { cells := f.RequiredCells(trace) fmt.Printf("failing lookup constraint %s:\n", f.Handle) reportRelevantCells(cells, trace, cfg) diff --git a/pkg/ir/air/constraint.go b/pkg/ir/air/constraint.go index 3fb80bebd..7cd773948 100644 --- a/pkg/ir/air/constraint.go +++ b/pkg/ir/air/constraint.go @@ -17,6 +17,11 @@ import ( "github.com/consensys/go-corset/pkg/ir" "github.com/consensys/go-corset/pkg/schema" "github.com/consensys/go-corset/pkg/schema/constraint" + "github.com/consensys/go-corset/pkg/schema/constraint/interleaving" + "github.com/consensys/go-corset/pkg/schema/constraint/lookup" + "github.com/consensys/go-corset/pkg/schema/constraint/permutation" + "github.com/consensys/go-corset/pkg/schema/constraint/ranged" + "github.com/consensys/go-corset/pkg/schema/constraint/vanishing" "github.com/consensys/go-corset/pkg/trace" "github.com/consensys/go-corset/pkg/util" "github.com/consensys/go-corset/pkg/util/collection/bit" @@ -34,11 +39,11 @@ type ConstraintBound interface { schema.Constraint constraint.Assertion[ir.Testable] | - constraint.InterleavingConstraint[*ColumnAccess] | - constraint.LookupConstraint[*ColumnAccess] | - constraint.PermutationConstraint | - constraint.RangeConstraint[*ColumnAccess] | - constraint.VanishingConstraint[LogicalTerm] + interleaving.Constraint[*ColumnAccess] | + lookup.Constraint[*ColumnAccess] | + permutation.Constraint | + ranged.Constraint[*ColumnAccess] | + vanishing.Constraint[LogicalTerm] } // Air attempts to encapsulate the notion of a valid constraint at the AIR @@ -64,32 +69,32 @@ func NewAssertion(handle string, ctx schema.ModuleId, term ir.Testable) Assertio // NewInterleavingConstraint creates a new interleaving constraint with a given handle. func NewInterleavingConstraint(handle string, targetContext schema.ModuleId, sourceContext schema.ModuleId, target ColumnAccess, sources []*ColumnAccess) Constraint { - return newAir(constraint.NewInterleavingConstraint(handle, targetContext, sourceContext, &target, sources)) + return newAir(interleaving.NewConstraint(handle, targetContext, sourceContext, &target, sources)) } // NewLookupConstraint constructs a new AIR lookup constraint func NewLookupConstraint(handle string, targets []ir.Enclosed[[]*ColumnAccess], sources []ir.Enclosed[[]*ColumnAccess]) LookupConstraint { // - return newAir(constraint.NewLookupConstraint(handle, targets, sources)) + return newAir(lookup.NewConstraint(handle, targets, sources)) } // NewPermutationConstraint creates a new permutation func NewPermutationConstraint(handle string, context schema.ModuleId, targets []schema.RegisterId, sources []schema.RegisterId) Constraint { - return newAir(constraint.NewPermutationConstraint(handle, context, targets, sources)) + return newAir(permutation.NewPermutationConstraint(handle, context, targets, sources)) } // NewRangeConstraint constructs a new AIR range constraint func NewRangeConstraint(handle string, ctx schema.ModuleId, expr ColumnAccess, bitwidth uint) RangeConstraint { - return newAir(constraint.NewRangeConstraint(handle, ctx, &expr, bitwidth)) + return newAir(ranged.NewRangeConstraint(handle, ctx, &expr, bitwidth)) } // NewVanishingConstraint constructs a new AIR vanishing constraint func NewVanishingConstraint(handle string, ctx schema.ModuleId, domain util.Option[int], term Term) VanishingConstraint { // - return newAir(constraint.NewVanishingConstraint(handle, ctx, domain, LogicalTerm{term})) + return newAir(vanishing.NewConstraint(handle, ctx, domain, LogicalTerm{term})) } // Air marks the constraint as being valid for the AIR representation. diff --git a/pkg/ir/air/schema.go b/pkg/ir/air/schema.go index fb6ec1a9f..bb3845378 100644 --- a/pkg/ir/air/schema.go +++ b/pkg/ir/air/schema.go @@ -17,6 +17,11 @@ import ( "github.com/consensys/go-corset/pkg/ir" "github.com/consensys/go-corset/pkg/schema" "github.com/consensys/go-corset/pkg/schema/constraint" + "github.com/consensys/go-corset/pkg/schema/constraint/interleaving" + "github.com/consensys/go-corset/pkg/schema/constraint/lookup" + "github.com/consensys/go-corset/pkg/schema/constraint/permutation" + "github.com/consensys/go-corset/pkg/schema/constraint/ranged" + "github.com/consensys/go-corset/pkg/schema/constraint/vanishing" "github.com/consensys/go-corset/pkg/trace" "github.com/consensys/go-corset/pkg/util" "github.com/consensys/go-corset/pkg/util/collection/set" @@ -66,19 +71,19 @@ type ( Assertion = Air[constraint.Assertion[ir.Testable]] // InterleavingConstraint captures the essence of an interleaving constraint // at the MIR level. - InterleavingConstraint = constraint.InterleavingConstraint[*ColumnAccess] + InterleavingConstraint = Air[interleaving.Constraint[*ColumnAccess]] // LookupConstraint captures the essence of a lookup constraint at the AIR // level. At the AIR level, lookup constraints are only permitted between // columns (i.e. not arbitrary expressions). - LookupConstraint = Air[constraint.LookupConstraint[*ColumnAccess]] + LookupConstraint = Air[lookup.Constraint[*ColumnAccess]] // PermutationConstraint captures the essence of a permutation constraint at the // AIR level. Specifically, it represents a constraint that one (or more) // columns are a permutation of another. - PermutationConstraint = Air[constraint.PermutationConstraint] + PermutationConstraint = Air[permutation.Constraint] // RangeConstraint captures the essence of a range constraints at the AIR level. - RangeConstraint = Air[constraint.RangeConstraint[*ColumnAccess]] + RangeConstraint = Air[ranged.Constraint[*ColumnAccess]] // VanishingConstraint captures the essence of a vanishing constraint at the AIR level. - VanishingConstraint = Air[constraint.VanishingConstraint[LogicalTerm]] + VanishingConstraint = Air[vanishing.Constraint[LogicalTerm]] ) // Following types capture permitted expression forms at the AIR level. diff --git a/pkg/ir/mir/agnosticity.go b/pkg/ir/mir/agnosticity.go index 9aa5b7454..28a50c7e3 100644 --- a/pkg/ir/mir/agnosticity.go +++ b/pkg/ir/mir/agnosticity.go @@ -15,7 +15,7 @@ package mir import ( "github.com/consensys/go-corset/pkg/ir" "github.com/consensys/go-corset/pkg/schema" - "github.com/consensys/go-corset/pkg/schema/constraint" + "github.com/consensys/go-corset/pkg/schema/constraint/vanishing" ) // Subdivide implementation for the FieldAgnostic interface. @@ -55,7 +55,7 @@ func subdivideVanishing(p VanishingConstraint, mapping schema.RegisterLimbsMap) // FIXME: this is an insufficient solution because it does not address the // potential issues around bandwidth. Specifically, where additional carry // lines are needed, etc. - return constraint.NewVanishingConstraint(p.Handle, p.Context, p.Domain, c) + return vanishing.NewConstraint(p.Handle, p.Context, p.Domain, c) } func splitLogicalTerm(term LogicalTerm, mapping schema.RegisterLimbsMap) LogicalTerm { diff --git a/pkg/ir/mir/constraint.go b/pkg/ir/mir/constraint.go index f0babb9f9..4d0cccb78 100644 --- a/pkg/ir/mir/constraint.go +++ b/pkg/ir/mir/constraint.go @@ -17,6 +17,12 @@ import ( "github.com/consensys/go-corset/pkg/ir" "github.com/consensys/go-corset/pkg/schema" "github.com/consensys/go-corset/pkg/schema/constraint" + "github.com/consensys/go-corset/pkg/schema/constraint/interleaving" + "github.com/consensys/go-corset/pkg/schema/constraint/lookup" + "github.com/consensys/go-corset/pkg/schema/constraint/permutation" + "github.com/consensys/go-corset/pkg/schema/constraint/ranged" + "github.com/consensys/go-corset/pkg/schema/constraint/sorted" + "github.com/consensys/go-corset/pkg/schema/constraint/vanishing" "github.com/consensys/go-corset/pkg/trace" "github.com/consensys/go-corset/pkg/util" "github.com/consensys/go-corset/pkg/util/collection/bit" @@ -41,36 +47,36 @@ func NewAssertion(handle string, ctx schema.ModuleId, term LogicalTerm) Constrai func NewVanishingConstraint(handle string, ctx schema.ModuleId, domain util.Option[int], term LogicalTerm) Constraint { // - return Constraint{constraint.NewVanishingConstraint(handle, ctx, domain, term)} + return Constraint{vanishing.NewConstraint(handle, ctx, domain, term)} } // NewInterleavingConstraint creates a new interleaving constraint with a given handle. func NewInterleavingConstraint(handle string, targetContext schema.ModuleId, sourceContext schema.ModuleId, target Term, sources []Term) Constraint { - return Constraint{constraint.NewInterleavingConstraint(handle, targetContext, sourceContext, target, sources)} + return Constraint{interleaving.NewConstraint(handle, targetContext, sourceContext, target, sources)} } // NewLookupConstraint creates a new lookup constraint with a given handle. func NewLookupConstraint(handle string, targets []ir.Enclosed[[]Term], sources []ir.Enclosed[[]Term]) Constraint { - return Constraint{constraint.NewLookupConstraint(handle, targets, sources)} + return Constraint{lookup.NewConstraint(handle, targets, sources)} } // NewPermutationConstraint creates a new permutation func NewPermutationConstraint(handle string, context schema.ModuleId, targets []schema.RegisterId, sources []schema.RegisterId) Constraint { - return Constraint{constraint.NewPermutationConstraint(handle, context, targets, sources)} + return Constraint{permutation.NewPermutationConstraint(handle, context, targets, sources)} } // NewRangeConstraint constructs a new Range constraint! func NewRangeConstraint(handle string, ctx schema.ModuleId, expr Term, bitwidth uint) Constraint { - return Constraint{constraint.NewRangeConstraint(handle, ctx, expr, bitwidth)} + return Constraint{ranged.NewRangeConstraint(handle, ctx, expr, bitwidth)} } // NewSortedConstraint creates a new Sorted func NewSortedConstraint(handle string, context schema.ModuleId, bitwidth uint, selector util.Option[Term], sources []Term, signs []bool, strict bool) Constraint { // - return Constraint{constraint.NewSortedConstraint(handle, context, bitwidth, selector, sources, signs, strict)} + return Constraint{sorted.NewSortedConstraint(handle, context, bitwidth, selector, sources, signs, strict)} } // Accepts determines whether a given constraint accepts a given trace or diff --git a/pkg/ir/mir/lookup.go b/pkg/ir/mir/lookup.go index 96492e07d..7229a43d6 100644 --- a/pkg/ir/mir/lookup.go +++ b/pkg/ir/mir/lookup.go @@ -17,14 +17,14 @@ import ( "github.com/consensys/go-corset/pkg/ir" "github.com/consensys/go-corset/pkg/schema" - "github.com/consensys/go-corset/pkg/schema/constraint" + "github.com/consensys/go-corset/pkg/schema/constraint/lookup" ) // Subdivide implementation for the FieldAgnostic interface. func subdivideLookup(c LookupConstraint, mapping schema.LimbsMap) LookupConstraint { var ( // Determine overall geometry for this lookup. - geometry = constraint.NewLookupGeometry(c, mapping) + geometry = lookup.NewGeometry(c, mapping) // Split all registers in the source vectors sources = mapLookupVectors(c.Sources, mapping) // Split all registers in the target vectors @@ -40,7 +40,7 @@ func subdivideLookup(c LookupConstraint, mapping schema.LimbsMap) LookupConstrai targets = padLookupVectors(rawTargets, geometry) sources = padLookupVectors(rawSources, geometry) // - return constraint.NewLookupConstraint(c.Handle, targets, sources) + return lookup.NewConstraint(c.Handle, targets, sources) } // Mapping lookup vectors essentially means applying the limb mapping to all @@ -93,7 +93,7 @@ func splitLookupVector(terms []Term) [][]Term { return nterms } -func alignLookupVectors(vectors []ir.Enclosed[[][]Term], geometry constraint.LookupGeometry, +func alignLookupVectors(vectors []ir.Enclosed[[][]Term], geometry lookup.Geometry, mapping schema.LimbsMap) { // for _, vector := range vectors { @@ -101,7 +101,7 @@ func alignLookupVectors(vectors []ir.Enclosed[[][]Term], geometry constraint.Loo } } -func alignLookupVector(vector ir.Enclosed[[][]Term], geometry constraint.LookupGeometry, mapping schema.LimbsMap) { +func alignLookupVector(vector ir.Enclosed[[][]Term], geometry lookup.Geometry, mapping schema.LimbsMap) { var modmap = mapping.Module(vector.Module) // for i, limbs := range vector.Item { @@ -136,7 +136,7 @@ func alignLookupLimbs(selector bool, limbs []Term, geometry []uint, mapping sche } } -func padLookupVectors(vectors []ir.Enclosed[[][]Term], geometry constraint.LookupGeometry) []ir.Enclosed[[]Term] { +func padLookupVectors(vectors []ir.Enclosed[[][]Term], geometry lookup.Geometry) []ir.Enclosed[[]Term] { var nterms []ir.Enclosed[[]Term] = make([]ir.Enclosed[[]Term], len(vectors)) // for i, vector := range vectors { @@ -147,7 +147,7 @@ func padLookupVectors(vectors []ir.Enclosed[[][]Term], geometry constraint.Looku return nterms } -func padTerms(terms [][]Term, geometry constraint.LookupGeometry) []Term { +func padTerms(terms [][]Term, geometry lookup.Geometry) []Term { var nterms []Term for i, ith := range terms { diff --git a/pkg/ir/mir/schema.go b/pkg/ir/mir/schema.go index a64726ebc..ce94d3735 100644 --- a/pkg/ir/mir/schema.go +++ b/pkg/ir/mir/schema.go @@ -19,6 +19,12 @@ import ( "github.com/consensys/go-corset/pkg/ir" "github.com/consensys/go-corset/pkg/schema" "github.com/consensys/go-corset/pkg/schema/constraint" + "github.com/consensys/go-corset/pkg/schema/constraint/interleaving" + "github.com/consensys/go-corset/pkg/schema/constraint/lookup" + "github.com/consensys/go-corset/pkg/schema/constraint/permutation" + "github.com/consensys/go-corset/pkg/schema/constraint/ranged" + "github.com/consensys/go-corset/pkg/schema/constraint/sorted" + "github.com/consensys/go-corset/pkg/schema/constraint/vanishing" ) // Following types capture top-level abstractions at the MIR level. @@ -49,22 +55,22 @@ type ( Assertion = constraint.Assertion[LogicalTerm] // InterleavingConstraint captures the essence of an interleaving constraint // at the MIR level. - InterleavingConstraint = constraint.InterleavingConstraint[Term] + InterleavingConstraint = interleaving.Constraint[Term] // LookupConstraint captures the essence of a lookup constraint at the MIR // level. - LookupConstraint = constraint.LookupConstraint[Term] + LookupConstraint = lookup.Constraint[Term] // PermutationConstraint captures the essence of a permutation constraint at the // MIR level. - PermutationConstraint = constraint.PermutationConstraint + PermutationConstraint = permutation.Constraint // RangeConstraint captures the essence of a range constraints at the MIR level. - RangeConstraint = constraint.RangeConstraint[Term] + RangeConstraint = ranged.Constraint[Term] // SortedConstraint captures the essence of a sorted constraint at the MIR // level. - SortedConstraint = constraint.SortedConstraint[Term] + SortedConstraint = sorted.Constraint[Term] // VanishingConstraint captures the essence of a vanishing constraint at the MIR // level. A vanishing constraint is a row constraint which must evaluate to // zero. - VanishingConstraint = constraint.VanishingConstraint[LogicalTerm] + VanishingConstraint = vanishing.Constraint[LogicalTerm] ) // Following types capture permitted expression forms at the MIR level. diff --git a/pkg/schema/constraint/assertion.go b/pkg/schema/constraint/assertion.go index abb46694d..71dfa0721 100644 --- a/pkg/schema/constraint/assertion.go +++ b/pkg/schema/constraint/assertion.go @@ -84,7 +84,7 @@ func NewAssertion[T ir.Testable](handle string, ctx schema.ModuleId, property T) // strictly necessary, these can highlight otherwise hidden problems as an aid // to debugging. func (p Assertion[T]) Consistent(schema schema.AnySchema) []error { - return checkConsistent(p.Context, schema, p.Property) + return CheckConsistent(p.Context, schema, p.Property) } // Name returns a unique name for a given constraint. This is useful diff --git a/pkg/schema/constraint/interleaving.go b/pkg/schema/constraint/interleaving/constraint.go similarity index 63% rename from pkg/schema/constraint/interleaving.go rename to pkg/schema/constraint/interleaving/constraint.go index 00acb72a2..7634243cc 100644 --- a/pkg/schema/constraint/interleaving.go +++ b/pkg/schema/constraint/interleaving/constraint.go @@ -10,60 +10,23 @@ // specific language governing permissions and limitations under the License. // // SPDX-License-Identifier: Apache-2.0 -package constraint +package interleaving import ( - "fmt" - "github.com/consensys/gnark-crypto/ecc/bls12-377/fr" "github.com/consensys/go-corset/pkg/ir" "github.com/consensys/go-corset/pkg/schema" + "github.com/consensys/go-corset/pkg/schema/constraint" "github.com/consensys/go-corset/pkg/trace" "github.com/consensys/go-corset/pkg/util" "github.com/consensys/go-corset/pkg/util/collection/bit" - "github.com/consensys/go-corset/pkg/util/collection/set" "github.com/consensys/go-corset/pkg/util/source/sexp" ) -// InterleavingFailure provides structural information about a failing lookup constraint. -type InterleavingFailure struct { - // Handle of the failing constraint - Handle string - // Relevant context for target expressions. - TargetContext schema.ModuleId - // Target expression involved - Target ir.Evaluable - // Relevant context for source expressions. - SourceContext schema.ModuleId - // Source expression which were missing - Source ir.Evaluable - // Target row on which constraint - Row uint -} - -// Message provides a suitable error message -func (p *InterleavingFailure) Message() string { - return fmt.Sprintf("interleaving \"%s\" failed (row %d)", p.Handle, p.Row) -} - -func (p *InterleavingFailure) String() string { - return p.Message() -} - -// RequiredCells identifies the cells required to evaluate the failing constraint at the failing row. -func (p *InterleavingFailure) RequiredCells(tr trace.Trace) *set.AnySortedSet[trace.CellRef] { - var res = set.NewAnySortedSet[trace.CellRef]() - // - res.InsertSorted(p.Source.RequiredCells(int(p.Row), p.SourceContext)) - res.InsertSorted(p.Target.RequiredCells(int(p.Row), p.TargetContext)) - // - return res -} - -// InterleavingConstraint declares a constraint that one expression represents the +// Constraint declares a constraint that one expression represents the // interleaving of one or more expressions. For example, suppose X=[1,2] and // Y=[3,4]. Then Z=[1,3,2,4] is the interleaving of X and Y. -type InterleavingConstraint[E ir.Evaluable] struct { +type Constraint[E ir.Evaluable] struct { Handle string // Context in which all target columns are evaluated. TargetContext schema.ModuleId @@ -75,24 +38,24 @@ type InterleavingConstraint[E ir.Evaluable] struct { Sources []E } -// NewInterleavingConstraint creates a new Interleave -func NewInterleavingConstraint[E ir.Evaluable](handle string, targetContext schema.ModuleId, - sourceContext schema.ModuleId, target E, sources []E) InterleavingConstraint[E] { +// NewConstraint creates a new Interleave +func NewConstraint[E ir.Evaluable](handle string, targetContext schema.ModuleId, + sourceContext schema.ModuleId, target E, sources []E) Constraint[E] { // - return InterleavingConstraint[E]{handle, targetContext, sourceContext, target, sources} + return Constraint[E]{handle, targetContext, sourceContext, target, sources} } // Consistent applies a number of internal consistency checks. Whilst not // strictly necessary, these can highlight otherwise hidden problems as an aid // to debugging. -func (p InterleavingConstraint[E]) Consistent(schema schema.AnySchema) []error { +func (p Constraint[E]) Consistent(schema schema.AnySchema) []error { // TODO: check column access, and widths, etc. return nil } // Name returns a unique name for a given constraint. This is useful // purely for identifying constraints in reports, etc. -func (p InterleavingConstraint[E]) Name() string { +func (p Constraint[E]) Name() string { return p.Handle } @@ -101,7 +64,7 @@ func (p InterleavingConstraint[E]) Name() string { // evaluation context, though some (e.g. lookups) have more. Note that all // constraints have at least one context (which we can call the "primary" // context). -func (p InterleavingConstraint[E]) Contexts() []schema.ModuleId { +func (p Constraint[E]) Contexts() []schema.ModuleId { return []schema.ModuleId{p.TargetContext, p.SourceContext} } @@ -110,13 +73,13 @@ func (p InterleavingConstraint[E]) Contexts() []schema.ModuleId { // expression such as "(shift X -1)". This is technically undefined for the // first row of any trace and, by association, any constraint evaluating this // expression on that first row is also undefined (and hence must pass). -func (p InterleavingConstraint[E]) Bounds(module uint) util.Bounds { +func (p Constraint[E]) Bounds(module uint) util.Bounds { return util.EMPTY_BOUND } // Accepts checks whether a Interleave holds between the source and // target columns. -func (p InterleavingConstraint[E]) Accepts(tr trace.Trace, sc schema.AnySchema) (bit.Set, schema.Failure) { +func (p Constraint[E]) Accepts(tr trace.Trace, sc schema.AnySchema) (bit.Set, schema.Failure) { var ( coverage bit.Set srcTrMod = tr.Module(p.SourceContext) @@ -136,16 +99,24 @@ func (p InterleavingConstraint[E]) Accepts(tr trace.Trace, sc schema.AnySchema) s, s_err := p.Sources[row%n].EvalAt(row/n, srcTrMod, srcScMod) // Checks if t_err != nil { - return coverage, &InternalFailure{ - p.Handle, p.TargetContext, uint(row), p.Target, t_err.Error(), + return coverage, &constraint.InternalFailure{ + Handle: p.Handle, + Context: p.TargetContext, + Row: uint(row), + Term: p.Target, + Error: t_err.Error(), } } else if s_err != nil { - return coverage, &InternalFailure{ - p.Handle, p.SourceContext, uint(row / n), p.Sources[row%n], s_err.Error(), + return coverage, &constraint.InternalFailure{ + Handle: p.Handle, + Context: p.SourceContext, + Row: uint(row / n), + Term: p.Sources[row%n], + Error: s_err.Error(), } } else if t.Cmp(&s) != 0 { // Evaluation failure - return coverage, &InterleavingFailure{ + return coverage, &Failure{ p.Handle, p.TargetContext, p.Target, @@ -161,7 +132,7 @@ func (p InterleavingConstraint[E]) Accepts(tr trace.Trace, sc schema.AnySchema) // Lisp converts this schema element into a simple S-Expression, for example // so it can be printed. -func (p InterleavingConstraint[E]) Lisp(schema schema.AnySchema) sexp.SExp { +func (p Constraint[E]) Lisp(schema schema.AnySchema) sexp.SExp { var ( sourceModule = schema.Module(p.SourceContext) targetModule = schema.Module(p.TargetContext) @@ -191,7 +162,7 @@ func (p InterleavingConstraint[E]) Lisp(schema schema.AnySchema) sexp.SExp { } // Substitute any matchined labelled constants within this constraint -func (p InterleavingConstraint[E]) Substitute(mapping map[string]fr.Element) { +func (p Constraint[E]) Substitute(mapping map[string]fr.Element) { for _, s := range p.Sources { s.Substitute(mapping) } diff --git a/pkg/schema/constraint/interleaving/failure.go b/pkg/schema/constraint/interleaving/failure.go new file mode 100644 index 000000000..99440465f --- /dev/null +++ b/pkg/schema/constraint/interleaving/failure.go @@ -0,0 +1,57 @@ +// Copyright Consensys Software Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with +// the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on +// an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the +// specific language governing permissions and limitations under the License. +// +// SPDX-License-Identifier: Apache-2.0 +package interleaving + +import ( + "fmt" + + "github.com/consensys/go-corset/pkg/ir" + "github.com/consensys/go-corset/pkg/schema" + "github.com/consensys/go-corset/pkg/trace" + "github.com/consensys/go-corset/pkg/util/collection/set" +) + +// Failure provides structural information about a failing lookup constraint. +type Failure struct { + // Handle of the failing constraint + Handle string + // Relevant context for target expressions. + TargetContext schema.ModuleId + // Target expression involved + Target ir.Evaluable + // Relevant context for source expressions. + SourceContext schema.ModuleId + // Source expression which were missing + Source ir.Evaluable + // Target row on which constraint + Row uint +} + +// Message provides a suitable error message +func (p *Failure) Message() string { + return fmt.Sprintf("interleaving \"%s\" failed (row %d)", p.Handle, p.Row) +} + +func (p *Failure) String() string { + return p.Message() +} + +// RequiredCells identifies the cells required to evaluate the failing constraint at the failing row. +func (p *Failure) RequiredCells(tr trace.Trace) *set.AnySortedSet[trace.CellRef] { + var res = set.NewAnySortedSet[trace.CellRef]() + // + res.InsertSorted(p.Source.RequiredCells(int(p.Row), p.SourceContext)) + res.InsertSorted(p.Target.RequiredCells(int(p.Row), p.TargetContext)) + // + return res +} diff --git a/pkg/schema/constraint/lookup.go b/pkg/schema/constraint/lookup/constraint.go similarity index 74% rename from pkg/schema/constraint/lookup.go rename to pkg/schema/constraint/lookup/constraint.go index b0c77e4a7..bf1f78db7 100644 --- a/pkg/schema/constraint/lookup.go +++ b/pkg/schema/constraint/lookup/constraint.go @@ -10,7 +10,7 @@ // specific language governing permissions and limitations under the License. // // SPDX-License-Identifier: Apache-2.0 -package constraint +package lookup import ( "encoding/binary" @@ -19,89 +19,17 @@ import ( "github.com/consensys/gnark-crypto/ecc/bls12-377/fr" "github.com/consensys/go-corset/pkg/ir" "github.com/consensys/go-corset/pkg/schema" - "github.com/consensys/go-corset/pkg/schema/agnostic" + "github.com/consensys/go-corset/pkg/schema/constraint" "github.com/consensys/go-corset/pkg/trace" "github.com/consensys/go-corset/pkg/util" "github.com/consensys/go-corset/pkg/util/collection/bit" "github.com/consensys/go-corset/pkg/util/collection/hash" - "github.com/consensys/go-corset/pkg/util/collection/set" "github.com/consensys/go-corset/pkg/util/source/sexp" ) -// LookupFailure provides structural information about a failing lookup constraint. -type LookupFailure struct { - // Handle of the failing constraint - Handle string - // Relevant context for source expressions. - Context schema.ModuleId - // Source expressions which were missing - Sources []ir.Evaluable - // Row on which the constraint failed - Row uint -} - -// Message provides a suitable error message -func (p *LookupFailure) Message() string { - return fmt.Sprintf("lookup \"%s\" failed (row %d)", p.Handle, p.Row) -} - -func (p *LookupFailure) String() string { - return p.Message() -} - -// RequiredCells identifies the cells required to evaluate the failing constraint at the failing row. -func (p *LookupFailure) RequiredCells(_ trace.Trace) *set.AnySortedSet[trace.CellRef] { - res := set.NewAnySortedSet[trace.CellRef]() - // - for i, e := range p.Sources { - if i != 0 || e.IsDefined() { - res.InsertSorted(e.RequiredCells(int(p.Row), p.Context)) - } - } - // - return res -} - -// LookupGeometry defines the "geometry" of a lookup. That is the maximum -// bitwidth for each source-target pairing in the lookup. For example, consider -// a lookup where (X Y) looksup into (A B). Suppose X is 16bit and Y is 32bit, -// whilst A is 64bit and B is 8bit. Then, the geometry of the lookup is [16,32]. -type LookupGeometry struct { - config schema.FieldConfig - // bitwidth for each source/target pairing - geometry []uint -} - -// NewLookupGeometry returns the calculated "geometry" for this lookup. That -// is, for each source/target pair, the maximum bitwidth of any source or target -// value. -func NewLookupGeometry[E ir.Evaluable, T schema.RegisterMap](c LookupConstraint[E], - mapping schema.ModuleMap[T]) LookupGeometry { - // - var geometry []uint = make([]uint, len(c.Sources[0].Item)) - // Include sources - for _, source := range c.Sources { - updateGeometry(geometry, source, mapping) - } - // Include targets - for _, target := range c.Targets { - updateGeometry(geometry, target, mapping) - } - // - return LookupGeometry{mapping.Field(), geometry} -} - -// LimbWidths returns the bitwidths for the required limbs for a given -// source/target pairing in the lookup. -func (p *LookupGeometry) LimbWidths(i uint) []uint { - if p.geometry[i] == 0 { - return nil - } - // - return agnostic.LimbWidths(p.config.RegisterWidth, p.geometry[i]) -} +var frZero = fr.NewElement(0) -// LookupConstraint (sometimes also called an inclusion constraint) constrains +// Constraint (sometimes also called an inclusion constraint) constrains // two sets of columns (potentially in different modules). Specifically, every // row in the source columns must match a row in the target columns (but not // vice-versa). As such, the number of source columns must be the same as the @@ -116,7 +44,7 @@ func (p *LookupGeometry) LimbWidths(i uint) []uint { // pairs (and perhaps other constraints to ensure the required relationship) and // the source module is just checking that a given set of input/output pairs // makes sense. -type LookupConstraint[E ir.Evaluable] struct { +type Constraint[E ir.Evaluable] struct { // Handle returns the handle for this lookup constraint which is simply an // identifier useful when debugging (i.e. to know which lookup failed, etc). Handle string @@ -130,9 +58,9 @@ type LookupConstraint[E ir.Evaluable] struct { Sources []ir.Enclosed[[]E] } -// NewLookupConstraint creates a new lookup constraint with a given handle. -func NewLookupConstraint[E ir.Evaluable](handle string, targets []ir.Enclosed[[]E], - sources []ir.Enclosed[[]E]) LookupConstraint[E] { +// NewConstraint creates a new lookup constraint with a given handle. +func NewConstraint[E ir.Evaluable](handle string, targets []ir.Enclosed[[]E], + sources []ir.Enclosed[[]E]) Constraint[E] { var width int // Check sources for i, ith := range sources { @@ -149,7 +77,7 @@ func NewLookupConstraint[E ir.Evaluable](handle string, targets []ir.Enclosed[[] } } - return LookupConstraint[E]{Handle: handle, + return Constraint[E]{Handle: handle, Targets: targets, Sources: sources, } @@ -158,13 +86,13 @@ func NewLookupConstraint[E ir.Evaluable](handle string, targets []ir.Enclosed[[] // Consistent applies a number of internal consistency checks. Whilst not // strictly necessary, these can highlight otherwise hidden problems as an aid // to debugging. -func (p LookupConstraint[E]) Consistent(_ schema.AnySchema) []error { +func (p Constraint[E]) Consistent(_ schema.AnySchema) []error { return nil } // Name returns a unique name for a given constraint. This is useful // purely for identifying constraints in reports, etc. -func (p LookupConstraint[E]) Name() string { +func (p Constraint[E]) Name() string { return p.Handle } @@ -173,7 +101,7 @@ func (p LookupConstraint[E]) Name() string { // evaluation context, though some (e.g. lookups) have more. Note that all // constraints have at least one context (which we can call the "primary" // context). -func (p LookupConstraint[E]) Contexts() []schema.ModuleId { +func (p Constraint[E]) Contexts() []schema.ModuleId { var contexts []schema.ModuleId // source contexts for _, source := range p.Sources { @@ -194,7 +122,7 @@ func (p LookupConstraint[E]) Contexts() []schema.ModuleId { // expression on that first row is also undefined (and hence must pass). // //nolint:revive -func (p LookupConstraint[E]) Bounds(module uint) util.Bounds { +func (p Constraint[E]) Bounds(module uint) util.Bounds { var bound util.Bounds // sources for _, ith := range p.Sources { @@ -226,7 +154,7 @@ func (p LookupConstraint[E]) Bounds(module uint) util.Bounds { // all rows of the source columns. // //nolint:revive -func (p LookupConstraint[E]) Accepts(tr trace.Trace, sc schema.AnySchema) (bit.Set, schema.Failure) { +func (p Constraint[E]) Accepts(tr trace.Trace, sc schema.AnySchema) (bit.Set, schema.Failure) { var ( coverage bit.Set // Determine width (in columns) of this lookup @@ -279,7 +207,7 @@ func (p LookupConstraint[E]) Accepts(tr trace.Trace, sc schema.AnySchema) (bit.S sources[i] = e } // Construct failures - return coverage, &LookupFailure{ + return coverage, &Failure{ p.Handle, ith.Module, sources, i, } } @@ -310,8 +238,12 @@ func evalExprsAsBytes[E ir.Evaluable](k int, selector bool, terms ir.Enclosed[[] ith, err := sources[i].EvalAt(k, trModule, scModule) // error check if err != nil { - return nil, &InternalFailure{ - handle, terms.Module, uint(i), sources[i], err.Error(), + return nil, &constraint.InternalFailure{ + Handle: handle, + Context: terms.Module, + Row: uint(i), + Term: sources[i], + Error: err.Error(), } } else if i == 0 { // Selector determines whether or not this row is enabled. If the @@ -338,7 +270,7 @@ func evalExprsAsBytes[E ir.Evaluable](k int, selector bool, terms ir.Enclosed[[] // so it can be printed. // //nolint:revive -func (p LookupConstraint[E]) Lisp(schema schema.AnySchema) sexp.SExp { +func (p Constraint[E]) Lisp(schema schema.AnySchema) sexp.SExp { var ( sources = sexp.EmptyList() targets = sexp.EmptyList() @@ -387,7 +319,7 @@ func (p LookupConstraint[E]) Lisp(schema schema.AnySchema) sexp.SExp { } // Substitute any matchined labelled constants within this constraint -func (p LookupConstraint[E]) Substitute(mapping map[string]fr.Element) { +func (p Constraint[E]) Substitute(mapping map[string]fr.Element) { // Sources for _, ith := range p.Sources { for _, s := range ith.Item { diff --git a/pkg/schema/constraint/lookup/failure.go b/pkg/schema/constraint/lookup/failure.go new file mode 100644 index 000000000..9ad48630c --- /dev/null +++ b/pkg/schema/constraint/lookup/failure.go @@ -0,0 +1,56 @@ +// Copyright Consensys Software Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with +// the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on +// an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the +// specific language governing permissions and limitations under the License. +// +// SPDX-License-Identifier: Apache-2.0 +package lookup + +import ( + "fmt" + + "github.com/consensys/go-corset/pkg/ir" + "github.com/consensys/go-corset/pkg/schema" + "github.com/consensys/go-corset/pkg/trace" + "github.com/consensys/go-corset/pkg/util/collection/set" +) + +// Failure provides structural information about a failing lookup constraint. +type Failure struct { + // Handle of the failing constraint + Handle string + // Relevant context for source expressions. + Context schema.ModuleId + // Source expressions which were missing + Sources []ir.Evaluable + // Row on which the constraint failed + Row uint +} + +// Message provides a suitable error message +func (p *Failure) Message() string { + return fmt.Sprintf("lookup \"%s\" failed (row %d)", p.Handle, p.Row) +} + +func (p *Failure) String() string { + return p.Message() +} + +// RequiredCells identifies the cells required to evaluate the failing constraint at the failing row. +func (p *Failure) RequiredCells(_ trace.Trace) *set.AnySortedSet[trace.CellRef] { + res := set.NewAnySortedSet[trace.CellRef]() + // + for i, e := range p.Sources { + if i != 0 || e.IsDefined() { + res.InsertSorted(e.RequiredCells(int(p.Row), p.Context)) + } + } + // + return res +} diff --git a/pkg/schema/constraint/lookup/geometry.go b/pkg/schema/constraint/lookup/geometry.go new file mode 100644 index 000000000..2dce4b352 --- /dev/null +++ b/pkg/schema/constraint/lookup/geometry.go @@ -0,0 +1,58 @@ +// Copyright Consensys Software Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with +// the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on +// an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the +// specific language governing permissions and limitations under the License. +// +// SPDX-License-Identifier: Apache-2.0 +package lookup + +import ( + "github.com/consensys/go-corset/pkg/ir" + "github.com/consensys/go-corset/pkg/schema" + "github.com/consensys/go-corset/pkg/schema/agnostic" +) + +// Geometry defines the "geometry" of a lookup. That is the maximum +// bitwidth for each source-target pairing in the lookup. For example, consider +// a lookup where (X Y) looksup into (A B). Suppose X is 16bit and Y is 32bit, +// whilst A is 64bit and B is 8bit. Then, the geometry of the lookup is [16,32]. +type Geometry struct { + config schema.FieldConfig + // bitwidth for each source/target pairing + geometry []uint +} + +// NewGeometry returns the calculated "geometry" for this lookup. That +// is, for each source/target pair, the maximum bitwidth of any source or target +// value. +func NewGeometry[E ir.Evaluable, T schema.RegisterMap](c Constraint[E], + mapping schema.ModuleMap[T]) Geometry { + // + var geometry []uint = make([]uint, len(c.Sources[0].Item)) + // Include sources + for _, source := range c.Sources { + updateGeometry(geometry, source, mapping) + } + // Include targets + for _, target := range c.Targets { + updateGeometry(geometry, target, mapping) + } + // + return Geometry{mapping.Field(), geometry} +} + +// LimbWidths returns the bitwidths for the required limbs for a given +// source/target pairing in the lookup. +func (p *Geometry) LimbWidths(i uint) []uint { + if p.geometry[i] == 0 { + return nil + } + // + return agnostic.LimbWidths(p.config.RegisterWidth, p.geometry[i]) +} diff --git a/pkg/schema/constraint/permutation.go b/pkg/schema/constraint/permutation/constraint.go similarity index 81% rename from pkg/schema/constraint/permutation.go rename to pkg/schema/constraint/permutation/constraint.go index e6cef05e5..02d8e2c86 100644 --- a/pkg/schema/constraint/permutation.go +++ b/pkg/schema/constraint/permutation/constraint.go @@ -10,7 +10,7 @@ // specific language governing permissions and limitations under the License. // // SPDX-License-Identifier: Apache-2.0 -package constraint +package permutation import ( "fmt" @@ -25,23 +25,9 @@ import ( "github.com/consensys/go-corset/pkg/util/source/sexp" ) -// PermutationFailure provides structural information about a failing permutation constraint. -type PermutationFailure struct { - Msg string -} - -// Message provides a suitable error message -func (p *PermutationFailure) Message() string { - return p.Msg -} - -func (p *PermutationFailure) String() string { - return p.Msg -} - -// PermutationConstraint declares a constraint that one (or more) columns are a permutation +// Constraint declares a constraint that one (or more) columns are a permutation // of another. -type PermutationConstraint struct { +type Constraint struct { Handle string // Evaluation Context for this constraint which must match that of the // source and target expressions. @@ -56,25 +42,25 @@ type PermutationConstraint struct { // NewPermutationConstraint creates a new permutation func NewPermutationConstraint(handle string, context schema.ModuleId, targets []schema.RegisterId, - sources []schema.RegisterId) PermutationConstraint { + sources []schema.RegisterId) Constraint { if len(targets) != len(sources) { panic("differeng number of target / source permutation columns") } - return PermutationConstraint{handle, context, targets, sources} + return Constraint{handle, context, targets, sources} } // Consistent applies a number of internal consistency checks. Whilst not // strictly necessary, these can highlight otherwise hidden problems as an aid // to debugging. -func (p PermutationConstraint) Consistent(schema schema.AnySchema) []error { +func (p Constraint) Consistent(schema schema.AnySchema) []error { // TODO: check column access, and widths, etc. return nil } // Name returns a unique name for a given constraint. This is useful // purely for identifying constraints in reports, etc. -func (p PermutationConstraint) Name() string { +func (p Constraint) Name() string { return p.Handle } @@ -83,7 +69,7 @@ func (p PermutationConstraint) Name() string { // evaluation context, though some (e.g. lookups) have more. Note that all // constraints have at least one context (which we can call the "primary" // context). -func (p PermutationConstraint) Contexts() []schema.ModuleId { +func (p Constraint) Contexts() []schema.ModuleId { return []schema.ModuleId{p.Context} } @@ -92,13 +78,13 @@ func (p PermutationConstraint) Contexts() []schema.ModuleId { // expression such as "(shift X -1)". This is technically undefined for the // first row of any trace and, by association, any constraint evaluating this // expression on that first row is also undefined (and hence must pass). -func (p PermutationConstraint) Bounds(module uint) util.Bounds { +func (p Constraint) Bounds(module uint) util.Bounds { return util.EMPTY_BOUND } // Accepts checks whether a permutation holds between the source and // target columns. -func (p PermutationConstraint) Accepts(tr trace.Trace, _ schema.AnySchema) (bit.Set, schema.Failure) { +func (p Constraint) Accepts(tr trace.Trace, _ schema.AnySchema) (bit.Set, schema.Failure) { var ( // Coverage currently always empty for permutation constraints. coverage bit.Set @@ -120,12 +106,12 @@ func (p PermutationConstraint) Accepts(tr trace.Trace, _ schema.AnySchema) (bit. msg := fmt.Sprintf("Target columns (%s) not permutation of source columns (%s)", dst_names, src_names) // Done - return coverage, &PermutationFailure{msg} + return coverage, &Failure{msg} } // Lisp converts this schema element into a simple S-Expression, for example // so it can be printed. -func (p PermutationConstraint) Lisp(schema schema.AnySchema) sexp.SExp { +func (p Constraint) Lisp(schema schema.AnySchema) sexp.SExp { var ( module = schema.Module(p.Context) targets = sexp.EmptyList() @@ -150,7 +136,7 @@ func (p PermutationConstraint) Lisp(schema schema.AnySchema) sexp.SExp { } // Substitute any matchined labelled constants within this constraint -func (p PermutationConstraint) Substitute(map[string]fr.Element) { +func (p Constraint) Substitute(map[string]fr.Element) { // nothing to do here } diff --git a/pkg/schema/constraint/permutation/failure.go b/pkg/schema/constraint/permutation/failure.go new file mode 100644 index 000000000..1e602d667 --- /dev/null +++ b/pkg/schema/constraint/permutation/failure.go @@ -0,0 +1,27 @@ +// Copyright Consensys Software Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with +// the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on +// an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the +// specific language governing permissions and limitations under the License. +// +// SPDX-License-Identifier: Apache-2.0 +package permutation + +// Failure provides structural information about a failing permutation constraint. +type Failure struct { + Msg string +} + +// Message provides a suitable error message +func (p *Failure) Message() string { + return p.Msg +} + +func (p *Failure) String() string { + return p.Msg +} diff --git a/pkg/schema/constraint/range.go b/pkg/schema/constraint/ranged/constraint.go similarity index 68% rename from pkg/schema/constraint/range.go rename to pkg/schema/constraint/ranged/constraint.go index 0df920e46..f25b8373e 100644 --- a/pkg/schema/constraint/range.go +++ b/pkg/schema/constraint/ranged/constraint.go @@ -10,7 +10,7 @@ // specific language governing permissions and limitations under the License. // // SPDX-License-Identifier: Apache-2.0 -package constraint +package ranged import ( "fmt" @@ -19,46 +19,17 @@ import ( "github.com/consensys/gnark-crypto/ecc/bls12-377/fr" "github.com/consensys/go-corset/pkg/ir" "github.com/consensys/go-corset/pkg/schema" + "github.com/consensys/go-corset/pkg/schema/constraint" "github.com/consensys/go-corset/pkg/trace" "github.com/consensys/go-corset/pkg/util" "github.com/consensys/go-corset/pkg/util/collection/bit" - "github.com/consensys/go-corset/pkg/util/collection/set" "github.com/consensys/go-corset/pkg/util/source/sexp" ) -// RangeFailure provides structural information about a failing type constraint. -type RangeFailure struct { - // Handle of the failing constraint - Handle string - // Enclosing context - Context schema.ModuleId - // Constraint expression - Expr ir.Evaluable - // Range restriction - Bitwidth uint - // Row on which the constraint failed - Row uint -} - -// Message provides a suitable error message -func (p *RangeFailure) Message() string { - // Construct useful error message - return fmt.Sprintf("range \"%s\" is u%d does not hold (row %d)", p.Handle, p.Bitwidth, p.Row) -} - -func (p *RangeFailure) String() string { - return p.Message() -} - -// RequiredCells identifies the cells required to evaluate the failing constraint at the failing row. -func (p *RangeFailure) RequiredCells(tr trace.Trace) *set.AnySortedSet[trace.CellRef] { - return p.Expr.RequiredCells(int(p.Row), p.Context) -} - -// RangeConstraint restricts all values for a given expression to be within a +// Constraint restricts all values for a given expression to be within a // range [0..n) for some bound n. Any bound is supported, and the system will // choose the best underlying implementation as needed. -type RangeConstraint[E ir.Evaluable] struct { +type Constraint[E ir.Evaluable] struct { // A unique identifier for this constraint. This is primarily useful for // debugging. Handle string @@ -75,20 +46,20 @@ type RangeConstraint[E ir.Evaluable] struct { // NewRangeConstraint constructs a new Range constraint! func NewRangeConstraint[E ir.Evaluable](handle string, context schema.ModuleId, - expr E, bitwidth uint) RangeConstraint[E] { - return RangeConstraint[E]{handle, context, expr, bitwidth} + expr E, bitwidth uint) Constraint[E] { + return Constraint[E]{handle, context, expr, bitwidth} } // Consistent applies a number of internal consistency checks. Whilst not // strictly necessary, these can highlight otherwise hidden problems as an aid // to debugging. -func (p RangeConstraint[E]) Consistent(schema schema.AnySchema) []error { - return checkConsistent(p.Context, schema, p.Expr) +func (p Constraint[E]) Consistent(schema schema.AnySchema) []error { + return constraint.CheckConsistent(p.Context, schema, p.Expr) } // Name returns a unique name for a given constraint. This is useful // purely for identifying constraints in reports, etc. -func (p RangeConstraint[E]) Name() string { +func (p Constraint[E]) Name() string { return p.Handle } @@ -97,7 +68,7 @@ func (p RangeConstraint[E]) Name() string { // evaluation context, though some (e.g. lookups) have more. Note that all // constraints have at least one context (which we can call the "primary" // context). -func (p RangeConstraint[E]) Contexts() []schema.ModuleId { +func (p Constraint[E]) Contexts() []schema.ModuleId { return []schema.ModuleId{p.Context} } @@ -108,7 +79,7 @@ func (p RangeConstraint[E]) Contexts() []schema.ModuleId { // expression on that first row is also undefined (and hence must pass). // //nolint:revive -func (p RangeConstraint[E]) Bounds(module uint) util.Bounds { +func (p Constraint[E]) Bounds(module uint) util.Bounds { if p.Context == module { return p.Expr.Bounds() } @@ -120,12 +91,12 @@ func (p RangeConstraint[E]) Bounds(module uint) util.Bounds { // nil otherwise return an error. // //nolint:revive -func (p RangeConstraint[E]) Accepts(tr trace.Trace, sc schema.AnySchema) (bit.Set, schema.Failure) { +func (p Constraint[E]) Accepts(tr trace.Trace, sc schema.AnySchema) (bit.Set, schema.Failure) { var ( coverage bit.Set trModule = tr.Module(p.Context) scModule = sc.Module(p.Context) - handle = determineHandle(p.Handle, p.Context, tr) + handle = constraint.DetermineHandle(p.Handle, p.Context, tr) bound = big.NewInt(2) frBound fr.Element ) @@ -141,12 +112,16 @@ func (p RangeConstraint[E]) Accepts(tr trace.Trace, sc schema.AnySchema) (bit.Se kth, err := p.Expr.EvalAt(k, trModule, scModule) // Perform the range check if err != nil { - return coverage, &InternalFailure{ - p.Handle, p.Context, uint(k), p.Expr, err.Error(), + return coverage, &constraint.InternalFailure{ + Handle: p.Handle, + Context: p.Context, + Row: uint(k), + Term: p.Expr, + Error: err.Error(), } } else if kth.Cmp(&frBound) >= 0 { // Evaluation failure - return coverage, &RangeFailure{handle, p.Context, p.Expr, p.Bitwidth, uint(k)} + return coverage, &Failure{handle, p.Context, p.Expr, p.Bitwidth, uint(k)} } } // All good @@ -157,7 +132,7 @@ func (p RangeConstraint[E]) Accepts(tr trace.Trace, sc schema.AnySchema) (bit.Se // it can be printed. // //nolint:revive -func (p RangeConstraint[E]) Lisp(schema schema.AnySchema) sexp.SExp { +func (p Constraint[E]) Lisp(schema schema.AnySchema) sexp.SExp { module := schema.Module(p.Context) // return sexp.NewList([]sexp.SExp{ @@ -168,6 +143,6 @@ func (p RangeConstraint[E]) Lisp(schema schema.AnySchema) sexp.SExp { } // Substitute any matchined labelled constants within this constraint -func (p RangeConstraint[E]) Substitute(mapping map[string]fr.Element) { +func (p Constraint[E]) Substitute(mapping map[string]fr.Element) { p.Expr.Substitute(mapping) } diff --git a/pkg/schema/constraint/ranged/failure.go b/pkg/schema/constraint/ranged/failure.go new file mode 100644 index 000000000..e5c1709fd --- /dev/null +++ b/pkg/schema/constraint/ranged/failure.go @@ -0,0 +1,51 @@ +// Copyright Consensys Software Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with +// the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on +// an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the +// specific language governing permissions and limitations under the License. +// +// SPDX-License-Identifier: Apache-2.0 +package ranged + +import ( + "fmt" + + "github.com/consensys/go-corset/pkg/ir" + "github.com/consensys/go-corset/pkg/schema" + "github.com/consensys/go-corset/pkg/trace" + "github.com/consensys/go-corset/pkg/util/collection/set" +) + +// Failure provides structural information about a failing type constraint. +type Failure struct { + // Handle of the failing constraint + Handle string + // Enclosing context + Context schema.ModuleId + // Constraint expression + Expr ir.Evaluable + // Range restriction + Bitwidth uint + // Row on which the constraint failed + Row uint +} + +// Message provides a suitable error message +func (p *Failure) Message() string { + // Construct useful error message + return fmt.Sprintf("range \"%s\" is u%d does not hold (row %d)", p.Handle, p.Bitwidth, p.Row) +} + +func (p *Failure) String() string { + return p.Message() +} + +// RequiredCells identifies the cells required to evaluate the failing constraint at the failing row. +func (p *Failure) RequiredCells(tr trace.Trace) *set.AnySortedSet[trace.CellRef] { + return p.Expr.RequiredCells(int(p.Row), p.Context) +} diff --git a/pkg/schema/constraint/sorted.go b/pkg/schema/constraint/sorted/constraint.go similarity index 82% rename from pkg/schema/constraint/sorted.go rename to pkg/schema/constraint/sorted/constraint.go index d905a7e52..778e1d463 100644 --- a/pkg/schema/constraint/sorted.go +++ b/pkg/schema/constraint/sorted/constraint.go @@ -10,7 +10,7 @@ // specific language governing permissions and limitations under the License. // // SPDX-License-Identifier: Apache-2.0 -package constraint +package sorted import ( "fmt" @@ -19,29 +19,16 @@ import ( "github.com/consensys/gnark-crypto/ecc/bls12-377/fr" "github.com/consensys/go-corset/pkg/ir" "github.com/consensys/go-corset/pkg/schema" + "github.com/consensys/go-corset/pkg/schema/constraint" "github.com/consensys/go-corset/pkg/trace" "github.com/consensys/go-corset/pkg/util" "github.com/consensys/go-corset/pkg/util/collection/bit" "github.com/consensys/go-corset/pkg/util/source/sexp" ) -// SortedFailure provides structural information about a failing Sorted constraint. -type SortedFailure struct { - Msg string -} - -// Message provides a suitable error message -func (p *SortedFailure) Message() string { - return p.Msg -} - -func (p *SortedFailure) String() string { - return p.Msg -} - -// SortedConstraint declares a constraint that one (or more) columns are +// Constraint declares a constraint that one (or more) columns are // lexicographically sorted. -type SortedConstraint[E ir.Evaluable] struct { +type Constraint[E ir.Evaluable] struct { Handle string // Evaluation Context for this constraint which must match that of the // source expressions. @@ -63,22 +50,22 @@ type SortedConstraint[E ir.Evaluable] struct { // NewSortedConstraint creates a new Sorted func NewSortedConstraint[E ir.Evaluable](handle string, context schema.ModuleId, bitwidth uint, selector util.Option[E], - sources []E, signs []bool, strict bool) SortedConstraint[E] { + sources []E, signs []bool, strict bool) Constraint[E] { // - return SortedConstraint[E]{handle, context, bitwidth, selector, sources, signs, strict} + return Constraint[E]{handle, context, bitwidth, selector, sources, signs, strict} } // Consistent applies a number of internal consistency checks. Whilst not // strictly necessary, these can highlight otherwise hidden problems as an aid // to debugging. -func (p SortedConstraint[E]) Consistent(schema schema.AnySchema) []error { +func (p Constraint[E]) Consistent(schema schema.AnySchema) []error { // TODO: add more useful checks return nil } // Name returns a unique name for a given constraint. This is useful // purely for identifying constraints in reports, etc. -func (p SortedConstraint[E]) Name() string { +func (p Constraint[E]) Name() string { return p.Handle } @@ -87,7 +74,7 @@ func (p SortedConstraint[E]) Name() string { // evaluation context, though some (e.g. lookups) have more. Note that all // constraints have at least one context (which we can call the "primary" // context). -func (p SortedConstraint[E]) Contexts() []schema.ModuleId { +func (p Constraint[E]) Contexts() []schema.ModuleId { return []schema.ModuleId{p.Context} } @@ -96,7 +83,7 @@ func (p SortedConstraint[E]) Contexts() []schema.ModuleId { // expression such as "(shift X -1)". This is technically undefined for the // first row of any trace and, by association, any constraint evaluating this // expression on that first row is also undefined (and hence must pass). -func (p SortedConstraint[E]) Bounds(module uint) util.Bounds { +func (p Constraint[E]) Bounds(module uint) util.Bounds { var bound util.Bounds // if module == p.Context { @@ -111,7 +98,7 @@ func (p SortedConstraint[E]) Bounds(module uint) util.Bounds { // Accepts checks whether a Sorted holds between the source and // target columns. -func (p SortedConstraint[E]) Accepts(tr trace.Trace, sc schema.AnySchema) (bit.Set, schema.Failure) { +func (p Constraint[E]) Accepts(tr trace.Trace, sc schema.AnySchema) (bit.Set, schema.Failure) { var ( coverage bit.Set // Determine enclosing module @@ -139,16 +126,21 @@ func (p SortedConstraint[E]) Accepts(tr trace.Trace, sc schema.AnySchema) (bit.S val, err := selector.EvalAt(int(k), trModule, scModule) // Check whether active (or not) if err != nil { - return coverage, &InternalFailure{p.Handle, p.Context, k, selector, err.Error()} + return coverage, &constraint.InternalFailure{ + Handle: p.Handle, + Context: p.Context, + Row: k, + Term: selector, + Error: err.Error()} } else if val.IsZero() { continue } } // Check sorting between rows k-1 and k if ok, err := sorted(k-1, k, deltaBound, p.Sources, p.Signs, p.Strict, trModule, scModule, lhs, rhs); err != nil { - return coverage, &InternalFailure{Handle: p.Handle, Context: p.Context, Row: k, Error: err.Error()} + return coverage, &constraint.InternalFailure{Handle: p.Handle, Context: p.Context, Row: k, Error: err.Error()} } else if !ok { - return coverage, &SortedFailure{fmt.Sprintf("sorted constraint \"%s\" failed (rows %d ~ %d)", p.Handle, k-1, k)} + return coverage, &Failure{fmt.Sprintf("sorted constraint \"%s\" failed (rows %d ~ %d)", p.Handle, k-1, k)} } } } @@ -158,7 +150,7 @@ func (p SortedConstraint[E]) Accepts(tr trace.Trace, sc schema.AnySchema) (bit.S // Lisp converts this schema element into a simple S-Expression, for example // so it can be printed. -func (p SortedConstraint[E]) Lisp(schema schema.AnySchema) sexp.SExp { +func (p Constraint[E]) Lisp(schema schema.AnySchema) sexp.SExp { var ( module = schema.Module(p.Context) kind = "sorted" @@ -200,7 +192,7 @@ func (p SortedConstraint[E]) Lisp(schema schema.AnySchema) sexp.SExp { } // Substitute any matchined labelled constants within this constraint -func (p SortedConstraint[E]) Substitute(mapping map[string]fr.Element) { +func (p Constraint[E]) Substitute(mapping map[string]fr.Element) { for _, s := range p.Sources { s.Substitute(mapping) } @@ -210,7 +202,7 @@ func (p SortedConstraint[E]) Substitute(mapping map[string]fr.Element) { } } -func (p SortedConstraint[E]) deltaBound() fr.Element { +func (p Constraint[E]) deltaBound() fr.Element { var ( two fr.Element = fr.NewElement(2) bound fr.Element diff --git a/pkg/schema/constraint/sorted/failure.go b/pkg/schema/constraint/sorted/failure.go new file mode 100644 index 000000000..f67ad47dd --- /dev/null +++ b/pkg/schema/constraint/sorted/failure.go @@ -0,0 +1,27 @@ +// Copyright Consensys Software Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with +// the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on +// an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the +// specific language governing permissions and limitations under the License. +// +// SPDX-License-Identifier: Apache-2.0 +package sorted + +// Failure provides structural information about a failing Sorted constraint. +type Failure struct { + Msg string +} + +// Message provides a suitable error message +func (p *Failure) Message() string { + return p.Msg +} + +func (p *Failure) String() string { + return p.Msg +} diff --git a/pkg/schema/constraint/util.go b/pkg/schema/constraint/util.go index 7dc17bf0c..43a87fbbe 100644 --- a/pkg/schema/constraint/util.go +++ b/pkg/schema/constraint/util.go @@ -15,17 +15,15 @@ package constraint import ( "fmt" - "github.com/consensys/gnark-crypto/ecc/bls12-377/fr" "github.com/consensys/go-corset/pkg/ir" "github.com/consensys/go-corset/pkg/schema" + "github.com/consensys/go-corset/pkg/trace" ) -var frZero = fr.NewElement(0) - -// A simple consistency check for terms in a given module. Specifically, to -// check that: (1) the module exists; (2) all used registers existing with then -// given module. -func checkConsistent[E ir.Contextual](module uint, schema schema.AnySchema, terms ...E) []error { +// CheckConsistent performs a simple consistency check for terms in a given +// module. Specifically, to check that: (1) the module exists; (2) all used +// registers existing with then given module. +func CheckConsistent[E ir.Contextual](module uint, schema schema.AnySchema, terms ...E) []error { var errs []error // Sanity check module if module >= schema.Width() { @@ -48,3 +46,11 @@ func checkConsistent[E ir.Contextual](module uint, schema schema.AnySchema, term // return errs } + +// DetermineHandle is a very simple helper which determines a suitable qualified +// name for the given constraint handle. +func DetermineHandle(handle string, ctx schema.ModuleId, tr trace.Trace) string { + modName := tr.Module(ctx).Name() + // + return trace.QualifiedColumnName(modName, handle) +} diff --git a/pkg/schema/constraint/vanishing.go b/pkg/schema/constraint/vanishing/constraint.go similarity index 71% rename from pkg/schema/constraint/vanishing.go rename to pkg/schema/constraint/vanishing/constraint.go index bfd4fc330..df8303239 100644 --- a/pkg/schema/constraint/vanishing.go +++ b/pkg/schema/constraint/vanishing/constraint.go @@ -10,7 +10,7 @@ // specific language governing permissions and limitations under the License. // // SPDX-License-Identifier: Apache-2.0 -package constraint +package vanishing import ( "fmt" @@ -18,47 +18,20 @@ import ( "github.com/consensys/gnark-crypto/ecc/bls12-377/fr" "github.com/consensys/go-corset/pkg/ir" "github.com/consensys/go-corset/pkg/schema" + "github.com/consensys/go-corset/pkg/schema/constraint" "github.com/consensys/go-corset/pkg/trace" "github.com/consensys/go-corset/pkg/util" "github.com/consensys/go-corset/pkg/util/collection/bit" - "github.com/consensys/go-corset/pkg/util/collection/set" "github.com/consensys/go-corset/pkg/util/source/sexp" ) -// VanishingFailure provides structural information about a failing vanishing constraint. -type VanishingFailure struct { - // Handle of the failing constraint - Handle string - // Constraint expression - Constraint ir.Testable - // Module where constraint failed - Context schema.ModuleId - // Row on which the constraint failed - Row uint -} - -// Message provides a suitable error message -func (p *VanishingFailure) Message() string { - // Construct useful error message - return fmt.Sprintf("constraint \"%s\" does not hold (row %d)", p.Handle, p.Row) -} - -// RequiredCells identifies the cells required to evaluate the failing constraint at the failing row. -func (p *VanishingFailure) RequiredCells(tr trace.Trace) *set.AnySortedSet[trace.CellRef] { - return p.Constraint.RequiredCells(int(p.Row), p.Context) -} - -func (p *VanishingFailure) String() string { - return p.Message() -} - -// VanishingConstraint specifies a constraint which should hold on every row of the +// Constraint specifies a constraint which should hold on every row of the // table. The only exception is when the constraint is undefined (e.g. because // it references a non-existent table cell). In such case, the constraint is // ignored. This is parameterised by the type of the constraint expression. // Thus, we can reuse this definition across the various intermediate // representations (e.g. Mid-Level IR, Arithmetic IR, etc). -type VanishingConstraint[T ir.Testable] struct { +type Constraint[T ir.Testable] struct { // A unique identifier for this constraint. This is primarily // useful for debugging. Handle string @@ -74,22 +47,22 @@ type VanishingConstraint[T ir.Testable] struct { Constraint T } -// NewVanishingConstraint constructs a new vanishing constraint! -func NewVanishingConstraint[T ir.Testable](handle string, context schema.ModuleId, - domain util.Option[int], constraint T) VanishingConstraint[T] { - return VanishingConstraint[T]{handle, context, domain, constraint} +// NewConstraint constructs a new vanishing constraint! +func NewConstraint[T ir.Testable](handle string, context schema.ModuleId, + domain util.Option[int], constraint T) Constraint[T] { + return Constraint[T]{handle, context, domain, constraint} } // Consistent applies a number of internal consistency checks. Whilst not // strictly necessary, these can highlight otherwise hidden problems as an aid // to debugging. -func (p VanishingConstraint[E]) Consistent(schema schema.AnySchema) []error { - return checkConsistent(p.Context, schema, p.Constraint) +func (p Constraint[E]) Consistent(schema schema.AnySchema) []error { + return constraint.CheckConsistent(p.Context, schema, p.Constraint) } // Name returns a unique name for a given constraint. This is useful // purely for identifying constraints in reports, etc. -func (p VanishingConstraint[E]) Name() string { +func (p Constraint[E]) Name() string { return p.Handle } @@ -98,7 +71,7 @@ func (p VanishingConstraint[E]) Name() string { // evaluation context, though some (e.g. lookups) have more. Note that all // constraints have at least one context (which we can call the "primary" // context). -func (p VanishingConstraint[E]) Contexts() []schema.ModuleId { +func (p Constraint[E]) Contexts() []schema.ModuleId { return []schema.ModuleId{p.Context} } @@ -109,7 +82,7 @@ func (p VanishingConstraint[E]) Contexts() []schema.ModuleId { // expression on that first row is also undefined (and hence must pass). // //nolint:revive -func (p VanishingConstraint[T]) Bounds(module uint) util.Bounds { +func (p Constraint[T]) Bounds(module uint) util.Bounds { if p.Context == module { return p.Constraint.Bounds() } @@ -121,10 +94,10 @@ func (p VanishingConstraint[T]) Bounds(module uint) util.Bounds { // of a table. If so, return nil otherwise return an error. // //nolint:revive -func (p VanishingConstraint[T]) Accepts(tr trace.Trace, sc schema.AnySchema) (bit.Set, schema.Failure) { +func (p Constraint[T]) Accepts(tr trace.Trace, sc schema.AnySchema) (bit.Set, schema.Failure) { var ( // Handle is used for error reporting. - handle = determineHandle(p.Handle, p.Context, tr) + handle = constraint.DetermineHandle(p.Handle, p.Context, tr) // Determine enclosing module trModule = tr.Module(p.Context) scModule = sc.Module(p.Context) @@ -187,16 +160,21 @@ func HoldsGlobally[T ir.Testable](handle string, ctx schema.ModuleId, constraint // HoldsLocally checks whether a given constraint holds (e.g. vanishes) on a // specific row of a trace. If not, report an appropriate error. -func HoldsLocally[T ir.Testable](k uint, handle string, constraint T, ctx schema.ModuleId, +func HoldsLocally[T ir.Testable](k uint, handle string, term T, ctx schema.ModuleId, trMod trace.Module, scMod schema.Module) (schema.Failure, uint) { // - ok, id, err := constraint.TestAt(int(k), trMod, scMod) + ok, id, err := term.TestAt(int(k), trMod, scMod) // Check for errors if err != nil { - return &InternalFailure{handle, ctx, k, constraint, err.Error()}, id + return &constraint.InternalFailure{ + Handle: handle, + Context: ctx, + Row: k, + Term: term, + Error: err.Error()}, id } else if !ok { // Evaluation failure - return &VanishingFailure{handle, constraint, ctx, k}, id + return &Failure{handle, term, ctx, k}, id } // Success return nil, id @@ -205,7 +183,7 @@ func HoldsLocally[T ir.Testable](k uint, handle string, constraint T, ctx schema // Lisp converts this constraint into an S-Expression. // //nolint:revive -func (p VanishingConstraint[T]) Lisp(schema schema.AnySchema) sexp.SExp { +func (p Constraint[T]) Lisp(schema schema.AnySchema) sexp.SExp { var ( module = schema.Module(p.Context) name string @@ -238,12 +216,6 @@ func (p VanishingConstraint[T]) Lisp(schema schema.AnySchema) sexp.SExp { } // Substitute any matchined labelled constants within this constraint -func (p VanishingConstraint[T]) Substitute(mapping map[string]fr.Element) { +func (p Constraint[T]) Substitute(mapping map[string]fr.Element) { p.Constraint.Substitute(mapping) } - -func determineHandle(handle string, ctx schema.ModuleId, tr trace.Trace) string { - modName := tr.Module(ctx).Name() - // - return trace.QualifiedColumnName(modName, handle) -} diff --git a/pkg/schema/constraint/vanishing/failure.go b/pkg/schema/constraint/vanishing/failure.go new file mode 100644 index 000000000..adb214335 --- /dev/null +++ b/pkg/schema/constraint/vanishing/failure.go @@ -0,0 +1,49 @@ +// Copyright Consensys Software Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with +// the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on +// an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the +// specific language governing permissions and limitations under the License. +// +// SPDX-License-Identifier: Apache-2.0 +package vanishing + +import ( + "fmt" + + "github.com/consensys/go-corset/pkg/ir" + "github.com/consensys/go-corset/pkg/schema" + "github.com/consensys/go-corset/pkg/trace" + "github.com/consensys/go-corset/pkg/util/collection/set" +) + +// Failure provides structural information about a failing vanishing constraint. +type Failure struct { + // Handle of the failing constraint + Handle string + // Constraint expression + Constraint ir.Testable + // Module where constraint failed + Context schema.ModuleId + // Row on which the constraint failed + Row uint +} + +// Message provides a suitable error message +func (p *Failure) Message() string { + // Construct useful error message + return fmt.Sprintf("constraint \"%s\" does not hold (row %d)", p.Handle, p.Row) +} + +// RequiredCells identifies the cells required to evaluate the failing constraint at the failing row. +func (p *Failure) RequiredCells(tr trace.Trace) *set.AnySortedSet[trace.CellRef] { + return p.Constraint.RequiredCells(int(p.Row), p.Context) +} + +func (p *Failure) String() string { + return p.Message() +} From d8a82b605076f7b959425b65a63dd5accdf557da Mon Sep 17 00:00:00 2001 From: DavePearce Date: Fri, 25 Jul 2025 17:06:50 +1200 Subject: [PATCH 2/8] add initial draft of lookup vector --- pkg/schema/constraint/lookup/vector.go | 110 +++++++++++++++++++++++++ 1 file changed, 110 insertions(+) create mode 100644 pkg/schema/constraint/lookup/vector.go diff --git a/pkg/schema/constraint/lookup/vector.go b/pkg/schema/constraint/lookup/vector.go new file mode 100644 index 000000000..534d2f998 --- /dev/null +++ b/pkg/schema/constraint/lookup/vector.go @@ -0,0 +1,110 @@ +// Copyright Consensys Software Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with +// the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on +// an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the +// specific language governing permissions and limitations under the License. +// +// SPDX-License-Identifier: Apache-2.0 +package lookup + +import ( + "github.com/consensys/go-corset/pkg/ir" + "github.com/consensys/go-corset/pkg/schema" + "github.com/consensys/go-corset/pkg/util" + "github.com/consensys/go-corset/pkg/util/source/sexp" +) + +// Vector encapsulates all columns on one side of a lookup (i.e. it +// represents all source columns or all target columns). +type Vector[E ir.Evaluable] struct { + // Module in which all terms are evaluated. + Module schema.ModuleId + // Selector for this vector (optional) + Selector util.Option[E] + // Terms making up this vector. + Terms []E +} + +// UnfilteredLookupVector constructs a new vector in a given context which has no selector. +func UnfilteredLookupVector[E ir.Evaluable](mid schema.ModuleId, terms ...E) Vector[E] { + return Vector[E]{ + mid, + util.None[E](), + terms, + } +} + +// FilteredLookupVector constructs a new vector in a given context which has a selector. +func FilteredLookupVector[E ir.Evaluable](mid schema.ModuleId, selector E, terms ...E) Vector[E] { + return Vector[E]{ + mid, + util.Some(selector), + terms, + } +} + +// Bounds determines the well-definedness bounds for all terms within this vector. +// +//nolint:revive +func (p *Vector[E]) Bounds(module uint) util.Bounds { + var bound util.Bounds + // + if module == p.Module { + // Include bounds for selector (if applicable) + if p.HasSelector() { + sel := p.Selector.Unwrap().Bounds() + bound.Union(&sel) + } + // Include bounds for all terms + for _, e := range p.Terms { + eth := e.Bounds() + bound.Union(ð) + } + } + // + return bound +} + +// Context returns the conterxt in which all terms of this vector must be +// evaluated. +func (p *Vector[E]) Context() schema.ModuleId { + return p.Module +} + +// HasSelector determines whether or not this lookup vector has a selector or +// not. +func (p *Vector[E]) HasSelector() bool { + return p.Selector.HasValue() +} + +// Ith returns the ith term in this vector. +func (p *Vector[E]) Ith(index uint) E { + return p.Terms[index] +} + +// Len returns the number of items in this lookup vector. +func (p *Vector[E]) Len() uint { + return uint(len(p.Terms)) +} + +// Lisp returns a textual representation of this vector. +func (p *Vector[E]) Lisp(schema schema.RegisterMap) sexp.SExp { + terms := sexp.EmptyList() + // + if p.HasSelector() { + terms.Append(p.Selector.Unwrap().Lisp(schema)) + } else { + terms.Append(sexp.NewSymbol("_")) + } + // Iterate source expressions + for i := range p.Len() { + terms.Append(p.Ith(i).Lisp(schema)) + } + // Done + return terms +} From 5a8772a02d2c1af9c8ad18fa9b7f5cab687d36ea Mon Sep 17 00:00:00 2001 From: DavePearce Date: Mon, 28 Jul 2025 12:07:49 +1200 Subject: [PATCH 3/8] update lookups to use lookup vectors This refactors the existing lookup.Constraint to make use of lookup.Vector rather than a raw array of terms. This makes many things easier to understand. --- pkg/asm/compiler/mir.go | 7 +- pkg/corset/compiler/translator.go | 21 +- pkg/ir/air/constraint.go | 4 +- pkg/ir/air/gadgets/bitwidth.go | 5 +- pkg/ir/mir/constraint.go | 3 +- pkg/ir/mir/encoding.go | 74 ++--- pkg/ir/mir/lookup.go | 200 +++++++----- pkg/ir/mir/lower.go | 20 +- pkg/ir/term.go | 13 - pkg/schema/constraint/lookup/constraint.go | 338 +++++++++++---------- pkg/schema/constraint/lookup/geometry.go | 38 ++- pkg/schema/constraint/lookup/vector.go | 35 ++- 12 files changed, 441 insertions(+), 317 deletions(-) diff --git a/pkg/asm/compiler/mir.go b/pkg/asm/compiler/mir.go index fbd4518f4..389d1a85f 100644 --- a/pkg/asm/compiler/mir.go +++ b/pkg/asm/compiler/mir.go @@ -19,6 +19,7 @@ import ( "github.com/consensys/go-corset/pkg/ir" "github.com/consensys/go-corset/pkg/ir/mir" "github.com/consensys/go-corset/pkg/schema" + "github.com/consensys/go-corset/pkg/schema/constraint/lookup" "github.com/consensys/go-corset/pkg/util" "github.com/consensys/go-corset/pkg/util/collection/array" ) @@ -75,9 +76,9 @@ func (p MirModule) NewLookup(name string, from []MirExpr, targetMid uint, to []M // Preprend (unused) selectors. Eventually, we will most likely want to support selectors. sources = array.Prepend(unused, sources) targets = array.Prepend(unused, targets) - // - target := []ir.Enclosed[[]mir.Term]{ir.Enclose(p.Module.Id(), targets)} - source := []ir.Enclosed[[]mir.Term]{ir.Enclose(targetMid, sources)} + // FIXME: exploit conditional lookups + target := []lookup.Vector[mir.Term]{lookup.UnfilteredLookupVector(p.Module.Id(), targets...)} + source := []lookup.Vector[mir.Term]{lookup.UnfilteredLookupVector(targetMid, sources...)} p.Module.AddConstraint(mir.NewLookupConstraint(name, target, source)) } diff --git a/pkg/corset/compiler/translator.go b/pkg/corset/compiler/translator.go index d901ab0ca..232b6784a 100644 --- a/pkg/corset/compiler/translator.go +++ b/pkg/corset/compiler/translator.go @@ -25,8 +25,8 @@ import ( "github.com/consensys/go-corset/pkg/ir/assignment" "github.com/consensys/go-corset/pkg/ir/mir" "github.com/consensys/go-corset/pkg/schema" + "github.com/consensys/go-corset/pkg/schema/constraint/lookup" "github.com/consensys/go-corset/pkg/util" - "github.com/consensys/go-corset/pkg/util/collection/array" "github.com/consensys/go-corset/pkg/util/source" ) @@ -357,8 +357,8 @@ func (t *translator) translateDefLookup(decl *ast.DefLookup) []SyntaxError { var ( errors []SyntaxError context ast.Context - sources []ir.Enclosed[[]mir.Term] - targets []ir.Enclosed[[]mir.Term] + sources []lookup.Vector[mir.Term] + targets []lookup.Vector[mir.Term] ) // Translate sources for i, ith := range decl.Targets { @@ -387,7 +387,7 @@ func (t *translator) translateDefLookup(decl *ast.DefLookup) []SyntaxError { } func (t *translator) translateDefLookupSources(selector ast.Expr, - sources []ast.Expr) (ir.Enclosed[[]mir.Term], ast.Context, []SyntaxError) { + sources []ast.Expr) (lookup.Vector[mir.Term], ast.Context, []SyntaxError) { // Determine context of ith set of targets context, j := ast.ContextOfExpressions(sources...) // Include selector (when present) @@ -397,7 +397,7 @@ func (t *translator) translateDefLookupSources(selector ast.Expr, // Translate target expressions whilst again checking for a conflicting // context. if context.IsConflicted() { - return ir.Enclosed[[]mir.Term]{}, context, t.srcmap.SyntaxErrors(sources[j], "conflicting context") + return lookup.Vector[mir.Term]{}, context, t.srcmap.SyntaxErrors(sources[j], "conflicting context") } // Determine enclosing module module := t.moduleOf(context) @@ -407,14 +407,11 @@ func (t *translator) translateDefLookupSources(selector ast.Expr, if selector != nil { s, errs := t.translateExpression(selector, module, 0) errors = append(errors, errs...) - terms = array.Prepend(s, terms) - } else { - // Selector is unused - s := ir.NewRegisterAccess[mir.Term](schema.NewUnusedRegisterId(), 0) - terms = array.Prepend(s, terms) + + return lookup.FilteredLookupVector(module.Id(), s, terms...), context, errors } - // Return enclosed terms - return ir.Enclose(module.Id(), terms), context, errors + // + return lookup.UnfilteredLookupVector(module.Id(), terms...), context, errors } // Translate a "definrange" declaration. diff --git a/pkg/ir/air/constraint.go b/pkg/ir/air/constraint.go index 7cd773948..e50639335 100644 --- a/pkg/ir/air/constraint.go +++ b/pkg/ir/air/constraint.go @@ -73,8 +73,8 @@ func NewInterleavingConstraint(handle string, targetContext schema.ModuleId, } // NewLookupConstraint constructs a new AIR lookup constraint -func NewLookupConstraint(handle string, targets []ir.Enclosed[[]*ColumnAccess], - sources []ir.Enclosed[[]*ColumnAccess]) LookupConstraint { +func NewLookupConstraint(handle string, targets []lookup.Vector[*ColumnAccess], + sources []lookup.Vector[*ColumnAccess]) LookupConstraint { // return newAir(lookup.NewConstraint(handle, targets, sources)) } diff --git a/pkg/ir/air/gadgets/bitwidth.go b/pkg/ir/air/gadgets/bitwidth.go index 262c3bf6d..bb2a1fc57 100644 --- a/pkg/ir/air/gadgets/bitwidth.go +++ b/pkg/ir/air/gadgets/bitwidth.go @@ -22,6 +22,7 @@ import ( "github.com/consensys/go-corset/pkg/ir/assignment" "github.com/consensys/go-corset/pkg/schema" sc "github.com/consensys/go-corset/pkg/schema" + "github.com/consensys/go-corset/pkg/schema/constraint/lookup" "github.com/consensys/go-corset/pkg/trace" "github.com/consensys/go-corset/pkg/util" "github.com/consensys/go-corset/pkg/util/field" @@ -176,8 +177,8 @@ func (p *BitwidthGadget) applyRecursiveBitwidthGadget(ref sc.RegisterRef, bitwid // Target Value ir.RawRegisterAccess[air.Term](sc.NewRegisterId(0), 0)} // - targets := []ir.Enclosed[[]*air.ColumnAccess]{ir.Enclose(mid, targetAccesses)} - sources := []ir.Enclosed[[]*air.ColumnAccess]{ir.Enclose(module.Id(), sourceAccesses)} + targets := []lookup.Vector[*air.ColumnAccess]{lookup.UnfilteredLookupVector(mid, targetAccesses...)} + sources := []lookup.Vector[*air.ColumnAccess]{lookup.UnfilteredLookupVector(module.Id(), sourceAccesses...)} // module.AddConstraint(air.NewLookupConstraint(lookupHandle, targets, sources)) // Add column to assignment so its proof is included diff --git a/pkg/ir/mir/constraint.go b/pkg/ir/mir/constraint.go index 4d0cccb78..5742761cf 100644 --- a/pkg/ir/mir/constraint.go +++ b/pkg/ir/mir/constraint.go @@ -14,7 +14,6 @@ package mir import ( "github.com/consensys/gnark-crypto/ecc/bls12-377/fr" - "github.com/consensys/go-corset/pkg/ir" "github.com/consensys/go-corset/pkg/schema" "github.com/consensys/go-corset/pkg/schema/constraint" "github.com/consensys/go-corset/pkg/schema/constraint/interleaving" @@ -57,7 +56,7 @@ func NewInterleavingConstraint(handle string, targetContext schema.ModuleId, } // NewLookupConstraint creates a new lookup constraint with a given handle. -func NewLookupConstraint(handle string, targets []ir.Enclosed[[]Term], sources []ir.Enclosed[[]Term]) Constraint { +func NewLookupConstraint(handle string, targets []lookup.Vector[Term], sources []lookup.Vector[Term]) Constraint { return Constraint{lookup.NewConstraint(handle, targets, sources)} } diff --git a/pkg/ir/mir/encoding.go b/pkg/ir/mir/encoding.go index 9931d3e57..ddf66fa92 100644 --- a/pkg/ir/mir/encoding.go +++ b/pkg/ir/mir/encoding.go @@ -22,6 +22,7 @@ import ( "github.com/consensys/gnark-crypto/ecc/bls12-377/fr" "github.com/consensys/go-corset/pkg/ir" "github.com/consensys/go-corset/pkg/schema" + "github.com/consensys/go-corset/pkg/schema/constraint/lookup" "github.com/consensys/go-corset/pkg/util" ) @@ -153,17 +154,33 @@ func encode_lookup(c LookupConstraint) ([]byte, error) { return nil, err } // Target terms - if err := encode_nary(encode_enclosed_terms, &buffer, c.Targets); err != nil { + if err := encode_nary(encode_lookup_vector, &buffer, c.Targets); err != nil { return nil, err } // Sources - if err := encode_nary(encode_enclosed_terms, &buffer, c.Sources); err != nil { + if err := encode_nary(encode_lookup_vector, &buffer, c.Sources); err != nil { return nil, err } // return buffer.Bytes(), nil } +func encode_lookup_vector(terms lookup.Vector[Term], buffer *bytes.Buffer) error { + var ( + gobEncoder = gob.NewEncoder(buffer) + ) + // Source Context + if err := gobEncoder.Encode(terms.Module); err != nil { + return err + } + // + if terms.HasSelector() { + panic("todo") + } + // Source terms + return encode_nary(encode_term, buffer, terms.Terms) +} + func encode_permutation(c PermutationConstraint) ([]byte, error) { var ( buffer bytes.Buffer @@ -294,18 +311,6 @@ func encode_range(c RangeConstraint) ([]byte, error) { return buffer.Bytes(), err } -func encode_enclosed_terms(terms ir.Enclosed[[]Term], buffer *bytes.Buffer) error { - var ( - gobEncoder = gob.NewEncoder(buffer) - ) - // Source Context - if err := gobEncoder.Encode(terms.Module); err != nil { - return err - } - // Source terms - return encode_nary(encode_term, buffer, terms.Item) -} - func decode_constraint(bytes []byte) (schema.Constraint, error) { switch bytes[0] { case assertionTag: @@ -393,17 +398,36 @@ func decode_lookup(data []byte) (schema.Constraint, error) { return lookup, err } // Targets - if lookup.Targets, err = decode_nary(decode_enclosed_terms, buffer); err != nil { + if lookup.Targets, err = decode_nary(decode_lookup_vector, buffer); err != nil { return lookup, err } // Sources - if lookup.Sources, err = decode_nary(decode_enclosed_terms, buffer); err != nil { + if lookup.Sources, err = decode_nary(decode_lookup_vector, buffer); err != nil { return lookup, err } // return lookup, nil } +func decode_lookup_vector(buf *bytes.Buffer) (lookup.Vector[Term], error) { + var ( + gobDecoder = gob.NewDecoder(buf) + terms lookup.Vector[Term] + err error + ) + // Context + if err = gobDecoder.Decode(&terms.Module); err != nil { + return terms, err + } + // Contents + if terms.Terms, err = decode_nary(decode_term, buf); err != nil { + return terms, err + } + // + //return terms, nil + panic("todo: handle selectors") +} + func decode_permutation(data []byte) (schema.Constraint, error) { var ( buffer = bytes.NewBuffer(data) @@ -524,24 +548,6 @@ func decode_vanishing(data []byte) (schema.Constraint, error) { return vanishing, err } -func decode_enclosed_terms(buf *bytes.Buffer) (ir.Enclosed[[]Term], error) { - var ( - gobDecoder = gob.NewDecoder(buf) - terms ir.Enclosed[[]Term] - err error - ) - // Context - if err = gobDecoder.Decode(&terms.Module); err != nil { - return terms, err - } - // Contents - if terms.Item, err = decode_nary(decode_term, buf); err != nil { - return terms, err - } - // - return terms, nil -} - // ============================================================================ // Logical Terms (encoding) // ============================================================================ diff --git a/pkg/ir/mir/lookup.go b/pkg/ir/mir/lookup.go index 7229a43d6..0d9f0d90a 100644 --- a/pkg/ir/mir/lookup.go +++ b/pkg/ir/mir/lookup.go @@ -31,14 +31,8 @@ func subdivideLookup(c LookupConstraint, mapping schema.LimbsMap) LookupConstrai targets = mapLookupVectors(c.Targets, mapping) ) // - rawTargets := splitLookupVectors(targets) - rawSources := splitLookupVectors(sources) - // - alignLookupVectors(rawTargets, geometry, mapping) - alignLookupVectors(rawSources, geometry, mapping) - // - targets = padLookupVectors(rawTargets, geometry) - sources = padLookupVectors(rawSources, geometry) + targets = splitLookupVectors(geometry, targets, mapping) + sources = splitLookupVectors(geometry, sources, mapping) // return lookup.NewConstraint(c.Handle, targets, sources) } @@ -50,68 +44,113 @@ func subdivideLookup(c LookupConstraint, mapping schema.LimbsMap) LookupConstrai // create more source/target pairings. Rather, it splits registers within the // existing pairings only. Later stages will subdivide and pad the // source/target pairings as necessary. -func mapLookupVectors(vectors []ir.Enclosed[[]Term], mapping schema.LimbsMap) []ir.Enclosed[[]Term] { - var nterms = make([]ir.Enclosed[[]Term], len(vectors)) +func mapLookupVectors(vectors []lookup.Vector[Term], mapping schema.LimbsMap) []lookup.Vector[Term] { + var nterms = make([]lookup.Vector[Term], len(vectors)) // for i, vector := range vectors { var ( modmap = mapping.Module(vector.Module) - terms = splitTerms(vector.Item, modmap) + terms = splitTerms(vector.Terms, modmap) ) - // - nterms[i] = ir.Enclose(vector.Module, terms) + // TODO: what about the selector itself? + nterms[i] = lookup.NewLookupVector(vector.Module, vector.Selector, terms...) } // return nterms } -func splitLookupVectors(vectors []ir.Enclosed[[]Term]) []ir.Enclosed[[][]Term] { - var nterms = make([]ir.Enclosed[[][]Term], len(vectors)) +// Splitting lookup vectors means splitting source/target pairings into one or +// more terms. For example, consider the lookup "lookup (X'1::X'0) (Y)" where +// X'1, X'0, and Y are all u16. The geometry of this lookup is [u32]. Assume a +// field bandwidth which cannot hold a u32 without overflow. Then, after +// splitting, we would expect "lookup (X'0, X'1) (Y, 0)". Here, the geometry has +// now changed to [u16,u16] to accommodate the field bandwidth. Furthermore, +// notice padding has been applied to ensure we have a matching number of +// columns on the left- and right-hand sides. +func splitLookupVectors(geometry lookup.Geometry, vectors []lookup.Vector[Term], + mapping schema.LimbsMap) []lookup.Vector[Term] { + // + var nterms = make([]lookup.Vector[Term], len(vectors)) // for i, vector := range vectors { - var terms = splitLookupVector(vector.Item) - // - nterms[i] = ir.Enclose(vector.Module, terms) + nterms[i] = splitLookupVector(geometry, vector, mapping) } // return nterms } -func splitLookupVector(terms []Term) [][]Term { - var nterms [][]Term = make([][]Term, len(terms)) +func splitLookupVector(geometry lookup.Geometry, vector lookup.Vector[Term], + mapping schema.LimbsMap) lookup.Vector[Term] { // - for i, t := range terms { - if va, ok := t.(*VectorAccess); ok { - for _, v := range va.Vars { - nterms[i] = append(nterms[i], v) + var ( + limbs [][]Term = make([][]Term, vector.Len()) + modmap = mapping.Module(vector.Module) + ) + // Initial split + for i, t := range vector.Terms { + // Determine value range of ith term + valrange := t.ValueRange(modmap) + // Determine bitwidth for that range + bitwidth, signed := valrange.BitWidth() + // Sanity check signed lookups + if signed { + panic(fmt.Sprintf("signed lookup encountered (%s)", t.Lisp(modmap).String(true))) + } + // Check whether value range exceeds available bandwidth + if bitwidth > geometry.BandWidth() { + // Yes, therefore need to split + //nolint + if va, ok := t.(*VectorAccess); ok { + for _, v := range va.Vars { + limbs[i] = append(limbs[i], v) + } + } else { + // TODO: fix this + panic("cannot (yet) split lookup term") } } else { - nterms[i] = append(nterms[i], t) + // bandwidth is not exceeded, therefore don't split. + limbs[i] = append(limbs[i], t) } } - // - return nterms -} - -func alignLookupVectors(vectors []ir.Enclosed[[][]Term], geometry lookup.Geometry, - mapping schema.LimbsMap) { - // - for _, vector := range vectors { - alignLookupVector(vector, geometry, mapping) + // Alignment + for i, limbs := range limbs { + alignLookupLimbs(limbs, geometry.LimbWidths(uint(i)), modmap) } + // Padding + nlimbs := padLookupLimbs(limbs, geometry) + // Done + return lookup.NewLookupVector(vector.Module, vector.Selector, nlimbs...) } -func alignLookupVector(vector ir.Enclosed[[][]Term], geometry lookup.Geometry, mapping schema.LimbsMap) { - var modmap = mapping.Module(vector.Module) - // - for i, limbs := range vector.Item { - // FIXME: somewhere we should check that the selector fits into a single - // column! - alignLookupLimbs(i == 0, limbs, geometry.LimbWidths(uint(i)), modmap) - } -} - -func alignLookupLimbs(selector bool, limbs []Term, geometry []uint, mapping schema.RegisterLimbsMap) { +// Alignment is related to the potential for so-called "irregular lookups". +// These only arise in relatively unlikely scenarios. However, since the +// problem is dependent upon the particular field configuration used, it is +// important to support them in order to be truly field agnostic. To understand +// the issue of alignment, consider this lookup (where X is u160 and Y is u128): +// +// (lookup (X) (Y)) +// +// Let's assume we want to split this lookup for a maximum register width of +// u160. Then, without alignment, we would get this: +// +// (lookup (X 0) (Y'0 Y'1)) +// +// This translation is clearly unsound, because the most significant 32 bits of +// X are ignored. The problem is that, since Y exceed the maximum register +// width it was split accordingly (and recall that splitting attempts to ensure +// a balanced division between limbs); however, since X did not exceed the +// maximum register width, it was not split. +// +// Irregular looks can be resolved only by introducing temporary registers to +// divide X into u128 limbs. Something like this would be a valid translation +// of the above (where T'0,T'1 are u128 temporaries): +// +// (lookup (T'0 T'1) (Y'0 Y'1)) ; (vanish (X == (T'1 * 2^128) + T'0)) +// +// NOTE: For now, this function only checks that limbs are aligned and panics +// otherwise. +func alignLookupLimbs(limbs []Term, geometry []uint, mapping schema.RegisterLimbsMap) { var ( n = len(geometry) - 1 m = len(limbs) - 1 @@ -119,50 +158,51 @@ func alignLookupLimbs(selector bool, limbs []Term, geometry []uint, mapping sche ) // For now, this is just a check that we have proper alignment. for i, limb := range limbs { - if !selector || limb.IsDefined() { - // Determine value range of limb - valrange := limb.ValueRange(limbMap) - // Determine bitwidth for that range - bitwidth, signed := valrange.BitWidth() - // Sanity check for irregular lookups - if signed { - panic(fmt.Sprintf("signed lookup encountered (%s)", limb.Lisp(limbMap).String(true))) - } else if i != n && bitwidth > geometry[i] { - panic(fmt.Sprintf("irregular lookup detected (u%d v u%d)", bitwidth, geometry[i])) - } else if i != m && bitwidth != geometry[i] { - panic(fmt.Sprintf("irregular lookup detected (u%d v u%d)", bitwidth, geometry[i])) - } + // Determine value range of limb + valrange := limb.ValueRange(limbMap) + // Determine bitwidth for that range + bitwidth, _ := valrange.BitWidth() + // Sanity check for irregular lookups + if i != n && bitwidth > geometry[i] { + panic(fmt.Sprintf("irregular lookup detected (u%d v u%d)", bitwidth, geometry[i])) + } else if i != m && bitwidth != geometry[i] { + panic(fmt.Sprintf("irregular lookup detected (u%d v u%d)", bitwidth, geometry[i])) } } } -func padLookupVectors(vectors []ir.Enclosed[[][]Term], geometry lookup.Geometry) []ir.Enclosed[[]Term] { - var nterms []ir.Enclosed[[]Term] = make([]ir.Enclosed[[]Term], len(vectors)) - // - for i, vector := range vectors { - ith := padTerms(vector.Item, geometry) - nterms[i] = ir.Enclose(vector.Module, ith) - } - // - return nterms -} - -func padTerms(terms [][]Term, geometry lookup.Geometry) []Term { +// Padding is about ensuring a matching number of columns for each source/target +// pairing in the lookup. To understand the purpose of padding, consider this +// lookup (where X is u256 and Y is u128): +// +// (lookup (X) (Y)) +// +// Let's assume we want to split this lookup for a maximum register width of +// u128. Then, without padding, we would end up with this: +// +// (lookup (X'0 X'1) (Y)) +// +// Here, we have a mismatched number of columns because Y did not need to be +// split. To resolve this, we need to pad the translation of Y as follows: +// +// (lookup (X'0 X'1) (Y 0)) +// +// Here, 0 has been appended to the translation of Y to match the number of +// columns required for X. +func padLookupLimbs(terms [][]Term, geometry lookup.Geometry) []Term { var nterms []Term for i, ith := range terms { + // Determine expected geometry (i.e. number of columns) at this + // position. n := len(geometry.LimbWidths(uint(i))) - nterms = append(nterms, padTerm(ith, n)...) + // Append available terms + nterms = append(nterms, ith...) + // Pad out with zeros to match geometry + for len(terms) < n { + nterms = append(nterms, ir.Const64[Term](0)) + } } return nterms } - -func padTerm(terms []Term, geometry int) []Term { - for len(terms) < geometry { - // Pad out with zeros - terms = append(terms, ir.Const64[Term](0)) - } - // - return terms -} diff --git a/pkg/ir/mir/lower.go b/pkg/ir/mir/lower.go index 4ebb6f74b..e473503e7 100644 --- a/pkg/ir/mir/lower.go +++ b/pkg/ir/mir/lower.go @@ -22,6 +22,7 @@ import ( "github.com/consensys/go-corset/pkg/ir/air" air_gadgets "github.com/consensys/go-corset/pkg/ir/air/gadgets" "github.com/consensys/go-corset/pkg/schema" + "github.com/consensys/go-corset/pkg/schema/constraint/lookup" "github.com/consensys/go-corset/pkg/util/collection/array" "github.com/consensys/go-corset/pkg/util/field" util_math "github.com/consensys/go-corset/pkg/util/math" @@ -242,21 +243,27 @@ func (p *AirLowering) lowerInterleavingConstraintToAir(c InterleavingConstraint, // expected value. func (p *AirLowering) lowerLookupConstraintToAir(c LookupConstraint, airModule *air.ModuleBuilder) { var ( - sources = make([]ir.Enclosed[[]*air.ColumnAccess], len(c.Sources)) - targets = make([]ir.Enclosed[[]*air.ColumnAccess], len(c.Targets)) + sources = make([]lookup.Vector[*air.ColumnAccess], len(c.Sources)) + targets = make([]lookup.Vector[*air.ColumnAccess], len(c.Targets)) ) // Lower sources for i, ith := range c.Sources { - sources[i] = p.expandEnclosedLookupTerms(ith) + sources[i] = p.expandLookupVectorToAir(ith) } // Lower targets for i, ith := range c.Targets { - targets[i] = p.expandEnclosedLookupTerms(ith) + targets[i] = p.expandLookupVectorToAir(ith) } // Add constraint airModule.AddConstraint(air.NewLookupConstraint(c.Handle, targets, sources)) } +func (p *AirLowering) expandLookupVectorToAir(terms lookup.Vector[Term]) lookup.Vector[*air.ColumnAccess] { + // accesses := p.expandTermsInner(terms.Module, true, terms.Item...) + // return ir.Enclose(terms.Module, accesses) + panic("todo") +} + // Lower a sorted constraint to the AIR level. The challenge here is that there // is not concept of sorting constraints at the AIR level. Instead, we have to // generate the necessary machinery to enforce the sorting constraint. @@ -311,11 +318,6 @@ func (p *AirLowering) lowerSortedConstraintToAir(c SortedConstraint, airModule * } } -func (p *AirLowering) expandEnclosedLookupTerms(terms ir.Enclosed[[]Term]) ir.Enclosed[[]*air.ColumnAccess] { - accesses := p.expandTermsInner(terms.Module, true, terms.Item...) - return ir.Enclose(terms.Module, accesses) -} - func (p *AirLowering) expandTerms(context schema.ModuleId, terms ...Term) []*air.ColumnAccess { return p.expandTermsInner(context, false, terms...) } diff --git a/pkg/ir/term.go b/pkg/ir/term.go index 2d334bc3d..15e4ce6ad 100644 --- a/pkg/ir/term.go +++ b/pkg/ir/term.go @@ -22,19 +22,6 @@ import ( "github.com/consensys/go-corset/pkg/util/source/sexp" ) -// Enclosed represents anything which, ultimately, must be associated with a -// module (for example a term). Furthermore, it indicates the given item has -// been associated with the given module. -type Enclosed[T any] struct { - Module schema.ModuleId - Item T -} - -// Enclose a given item in a given module. -func Enclose[T any](mid schema.ModuleId, item T) Enclosed[T] { - return Enclosed[T]{mid, item} -} - // Contextual captures something which requires an evaluation context (i.e. a // single enclosing module) in order to make sense. For example, expressions // require a single context. This interface is separated from Evaluable (and diff --git a/pkg/schema/constraint/lookup/constraint.go b/pkg/schema/constraint/lookup/constraint.go index bf1f78db7..c89127f68 100644 --- a/pkg/schema/constraint/lookup/constraint.go +++ b/pkg/schema/constraint/lookup/constraint.go @@ -14,7 +14,6 @@ package lookup import ( "encoding/binary" - "fmt" "github.com/consensys/gnark-crypto/ecc/bls12-377/fr" "github.com/consensys/go-corset/pkg/ir" @@ -27,8 +26,6 @@ import ( "github.com/consensys/go-corset/pkg/util/source/sexp" ) -var frZero = fr.NewElement(0) - // Constraint (sometimes also called an inclusion constraint) constrains // two sets of columns (potentially in different modules). Specifically, every // row in the source columns must match a row in the target columns (but not @@ -51,28 +48,28 @@ type Constraint[E ir.Evaluable] struct { // Targets returns the target expressions which are used to lookup into the // target expressions. NOTE: the first element here is *always* the target // selector. - Targets []ir.Enclosed[[]E] + Targets []Vector[E] // Sources returns the source expressions which are used to lookup into the // target expressions. NOTE: the first element here is *always* the source // selector. - Sources []ir.Enclosed[[]E] + Sources []Vector[E] } // NewConstraint creates a new lookup constraint with a given handle. -func NewConstraint[E ir.Evaluable](handle string, targets []ir.Enclosed[[]E], - sources []ir.Enclosed[[]E]) Constraint[E] { - var width int +func NewConstraint[E ir.Evaluable](handle string, targets []Vector[E], + sources []Vector[E]) Constraint[E] { + var width uint // Check sources for i, ith := range sources { - if i != 0 && len(ith.Item) != width { + if i != 0 && ith.Len() != width { panic("inconsistent number of source lookup columns") } - width = len(ith.Item) + width = ith.Len() } // Check targets for _, ith := range targets { - if len(ith.Item) != width { + if ith.Len() != width { panic("inconsistent number of target lookup columns") } } @@ -126,25 +123,13 @@ func (p Constraint[E]) Bounds(module uint) util.Bounds { var bound util.Bounds // sources for _, ith := range p.Sources { - if module == ith.Module { - for i, e := range ith.Item { - if i != 0 || e.IsDefined() { - eth := e.Bounds() - bound.Union(ð) - } - } - } + eth := ith.Bounds(module) + bound.Union(ð) } // targets for _, ith := range p.Targets { - if module == ith.Module { - for i, e := range ith.Item { - if i != 0 || e.IsDefined() { - eth := e.Bounds() - bound.Union(ð) - } - } - } + eth := ith.Bounds(module) + bound.Union(ð) } // return bound @@ -158,100 +143,204 @@ func (p Constraint[E]) Accepts(tr trace.Trace, sc schema.AnySchema) (bit.Set, sc var ( coverage bit.Set // Determine width (in columns) of this lookup - width int = len(p.Sources[0].Item) - 1 + width uint = p.Sources[0].Len() // - rows = hash.NewSet[hash.BytesKey](128) + rows *hash.Set[hash.BytesKey] // Construct reusable buffer - buffer = make([]byte, 32*width) + bytes = make([]byte, 32*width) + err schema.Failure ) - // Add all target columns to the set - for _, ith := range p.Targets { + // Insert all active target vectors + if rows, err = p.insertTargetVectors(tr, sc, bytes); err != nil { + return coverage, err + } + // Check against all active source vectors + if err = p.checkSourceVectors(rows, tr, sc, bytes); err != nil { + return coverage, err + } + // + return coverage, nil +} + +func (p *Constraint[E]) insertTargetVectors(tr trace.Trace, sc schema.AnySchema, + bytes []byte) (*hash.Set[hash.BytesKey], schema.Failure) { + // + var ( + rows = hash.NewSet[hash.BytesKey](tr.Module(p.Targets[0].Module).Height()) + ) + // Choose optimised loop + for _, target := range p.Targets { var ( - tgtTrMod = tr.Module(ith.Module) - tgtScMod = sc.Module(ith.Module) - selector = ith.Item[0].IsDefined() + trModule = tr.Module(target.Module) + scModule = sc.Module(target.Module) + height = trModule.Height() ) - // Add each row in the target module - for i := range tgtTrMod.Height() { - ith_bytes, err := evalExprsAsBytes(int(i), selector, ith, p.Handle, tgtTrMod, tgtScMod, buffer[:]) - // error check - if err != nil { - return coverage, err - } else if ith_bytes != nil { - // Insert item, whilst checking whether the buffer was consumed or not. - if !rows.Insert(hash.NewBytesKey(ith_bytes)) { - // Yes, buffer consumed. Therefore, construct a fresh buffer. - buffer = make([]byte, 32*width) + // + if target.HasSelector() { + // unfiltered + for i := range int(height) { + if err := insertFilteredTargetVector(i, target, p.Handle, rows, trModule, scModule, bytes); err != nil { + return nil, err + } + } + } else { + // unfiltered + for i := range int(height) { + if err := insertTargetVector(i, target, p.Handle, rows, trModule, scModule, bytes); err != nil { + return nil, err } } } } - // Check all source columns are contained - for _, ith := range p.Sources { + // + return rows, nil +} + +func (p *Constraint[E]) checkSourceVectors(rows *hash.Set[hash.BytesKey], tr trace.Trace, sc schema.AnySchema, + bytes []byte) schema.Failure { + // Choose optimised loop + for _, source := range p.Sources { var ( - srcTrMod = tr.Module(ith.Module) - srcScMod = sc.Module(ith.Module) - selector = ith.Item[0].IsDefined() + trModule = tr.Module(source.Module) + scModule = sc.Module(source.Module) + height = trModule.Height() ) // - for i := range srcTrMod.Height() { - ith_bytes, err := evalExprsAsBytes(int(i), selector, ith, p.Handle, srcTrMod, srcScMod, buffer[:]) - // error check - if err != nil { - return coverage, err - } - // Check whether contained. - if ith_bytes != nil && !rows.Contains(hash.NewBytesKey(ith_bytes)) { - sources := make([]ir.Evaluable, width) - for i, e := range ith.Item[1:] { - sources[i] = e + if source.HasSelector() { + // filtered + for i := range int(height) { + if err := checkFilteredSourceVector(i, source, p.Handle, rows, trModule, scModule, bytes); err != nil { + return err } - // Construct failures - return coverage, &Failure{ - p.Handle, ith.Module, sources, i, + } + } else { + // unfiltered + for i := range int(height) { + if err := checkSourceVector(i, source, p.Handle, rows, trModule, scModule, bytes); err != nil { + return err } } } } + // success + return nil +} + +func insertFilteredTargetVector[E ir.Evaluable](k int, vec Vector[E], handle string, rows *hash.Set[hash.BytesKey], + trModule trace.Module, scModule schema.Module, bytes []byte) schema.Failure { + // If no selector, then always selected + var selected bool = !vec.HasSelector() // - return coverage, nil + if vec.HasSelector() { + // Otherwise, check whether selector enabled (or not). + var ( + selector = vec.Selector.Unwrap() + ith, err = selector.EvalAt(k, trModule, scModule) + ) + // + if err != nil { + return &constraint.InternalFailure{ + Handle: handle, + Context: vec.Module, + Row: uint(k), + Term: vec.Selector.Unwrap(), + Error: err.Error(), + } + } + // Selected when non-zero + selected = !ith.IsZero() + } + // If row selected, then insert contents! + if selected { + return insertTargetVector(k, vec, handle, rows, trModule, scModule, bytes) + } + // + return nil +} + +func insertTargetVector[E ir.Evaluable](k int, vec Vector[E], handle string, rows *hash.Set[hash.BytesKey], + trModule trace.Module, scModule schema.Module, bytes []byte) schema.Failure { + // + // Check each source is included + if err := evalExprsAsBytes(k, vec, handle, trModule, scModule, bytes); err != nil { + return err + } + // + rows.Insert(hash.NewBytesKey(bytes)) + // + return nil +} + +func checkFilteredSourceVector[E ir.Evaluable](k int, vec Vector[E], handle string, rows *hash.Set[hash.BytesKey], + trModule trace.Module, scModule schema.Module, bytes []byte) schema.Failure { + // If no selector, then always selected + var selected bool = !vec.HasSelector() + // + if vec.HasSelector() { + // Otherwise, check whether selector enabled (or not). + var ( + selector = vec.Selector.Unwrap() + ith, err = selector.EvalAt(k, trModule, scModule) + ) + // + if err != nil { + return &constraint.InternalFailure{ + Handle: handle, + Context: vec.Module, + Row: uint(k), + Term: vec.Selector.Unwrap(), + Error: err.Error(), + } + } + // Selected when non-zero + selected = !ith.IsZero() + } + // If row selected, then check contents! + if selected { + return checkSourceVector(k, vec, handle, rows, trModule, scModule, bytes) + } + // + return nil } -func evalExprsAsBytes[E ir.Evaluable](k int, selector bool, terms ir.Enclosed[[]E], handle string, - trModule trace.Module, scModule schema.Module, bytes []byte) ([]byte, schema.Failure) { +func checkSourceVector[E ir.Evaluable](k int, vec Vector[E], handle string, rows *hash.Set[hash.BytesKey], + trModule trace.Module, scModule schema.Module, bytes []byte) schema.Failure { + // Check each source is included + if err := evalExprsAsBytes(k, vec, handle, trModule, scModule, bytes); err != nil { + return err + } + // Check whether contained. + if !rows.Contains(hash.NewBytesKey(bytes)) { + sources := make([]ir.Evaluable, vec.Len()) + for i, e := range vec.Terms { + sources[i] = e + } + // Construct failures + return &Failure{handle, vec.Module, sources, uint(k)} + } + // success + return nil +} + +func evalExprsAsBytes[E ir.Evaluable](k int, vec Vector[E], handle string, trModule trace.Module, + scModule schema.Module, bytes []byte) schema.Failure { var ( // Slice provides an access window for writing slice = bytes - // - sources = terms.Item - i = 0 ) - // Check whether selector is defined. If no selector exists, then it will - // not be defined. - if !selector { - // If no selector, then skip that column. - i = 1 - } // Evaluate each expression in turn (remembering that the first element is // the selector) - for ; i < len(sources); i++ { - ith, err := sources[i].EvalAt(k, trModule, scModule) + for i := uint(0); i < vec.Len(); i++ { + ith, err := vec.Ith(i).EvalAt(k, trModule, scModule) // error check if err != nil { - return nil, &constraint.InternalFailure{ + return &constraint.InternalFailure{ Handle: handle, - Context: terms.Module, - Row: uint(i), - Term: sources[i], + Context: vec.Module, + Row: i, + Term: vec.Ith(i), Error: err.Error(), } - } else if i == 0 { - // Selector determines whether or not this row is enabled. If the - // selector is 0 then this row is not enabled. - if ith.Cmp(&frZero) == 0 { - // Row is not enabled to ignore - return nil, nil - } } else { // Copy over each element binary.BigEndian.PutUint64(slice, ith[0]) @@ -263,7 +352,7 @@ func evalExprsAsBytes[E ir.Evaluable](k int, selector bool, terms ir.Enclosed[[] } } // Done - return bytes, nil + return nil } // Lisp converts this schema element into a simple S-Expression, for example @@ -277,37 +366,11 @@ func (p Constraint[E]) Lisp(schema schema.AnySchema) sexp.SExp { ) // Iterate source expressions for _, ith := range p.Sources { - var ( - source = sexp.EmptyList() - module = schema.Module(ith.Module) - ) - // - for i, item := range ith.Item { - if i == 0 && !item.IsDefined() { - source.Append(sexp.NewSymbol("_")) - } else { - source.Append(item.Lisp(module)) - } - } - // - sources.Append(source) + sources.Append(ith.Lisp(schema)) } // Iterate target expressions for _, ith := range p.Targets { - var ( - target = sexp.EmptyList() - module = schema.Module(ith.Module) - ) - // - for i, item := range ith.Item { - if i == 0 && !item.IsDefined() { - target.Append(sexp.NewSymbol("_")) - } else { - target.Append(item.Lisp(module)) - } - } - // - targets.Append(target) + targets.Append(ith.Lisp(schema)) } // Done return sexp.NewList([]sexp.SExp{ @@ -322,43 +385,10 @@ func (p Constraint[E]) Lisp(schema schema.AnySchema) sexp.SExp { func (p Constraint[E]) Substitute(mapping map[string]fr.Element) { // Sources for _, ith := range p.Sources { - for _, s := range ith.Item { - s.Substitute(mapping) - } + ith.Substitute(mapping) } // Targets for _, ith := range p.Targets { - for _, s := range ith.Item { - s.Substitute(mapping) - } - } -} - -func updateGeometry[E ir.Evaluable, T schema.RegisterMap](geometry []uint, source ir.Enclosed[[]E], - mapping schema.ModuleMap[T]) { - // - var ( - terms = source.Item - regmap = mapping.Module(source.Module) - ) - // Sanity check - if len(terms) != len(geometry) { - // Unreachable, as should be caught earlier in the pipeline. - panic("misaligned lookup") - } - // - for i, ith := range terms { - // Since first column is always the selector column, it may not be - // defined. - if i != 0 || ith.IsDefined() { - ithRange := ith.ValueRange(regmap) - bitwidth, signed := ithRange.BitWidth() - // Sanity check - if signed { - panic(fmt.Sprintf("signed lookup encountered (%s)", ith.Lisp(regmap).String(true))) - } - // - geometry[i] = max(geometry[i], bitwidth) - } + ith.Substitute(mapping) } } diff --git a/pkg/schema/constraint/lookup/geometry.go b/pkg/schema/constraint/lookup/geometry.go index 2dce4b352..7353f28e7 100644 --- a/pkg/schema/constraint/lookup/geometry.go +++ b/pkg/schema/constraint/lookup/geometry.go @@ -13,6 +13,8 @@ package lookup import ( + "fmt" + "github.com/consensys/go-corset/pkg/ir" "github.com/consensys/go-corset/pkg/schema" "github.com/consensys/go-corset/pkg/schema/agnostic" @@ -34,7 +36,7 @@ type Geometry struct { func NewGeometry[E ir.Evaluable, T schema.RegisterMap](c Constraint[E], mapping schema.ModuleMap[T]) Geometry { // - var geometry []uint = make([]uint, len(c.Sources[0].Item)) + var geometry []uint = make([]uint, c.Sources[0].Len()) // Include sources for _, source := range c.Sources { updateGeometry(geometry, source, mapping) @@ -47,6 +49,16 @@ func NewGeometry[E ir.Evaluable, T schema.RegisterMap](c Constraint[E], return Geometry{mapping.Field(), geometry} } +// BandWidth returns maximum field bandwidth available in the field. +func (p *Geometry) BandWidth() uint { + return p.config.FieldBandWidth +} + +// RegisterWidth returns maximum permitted register width for the field. +func (p *Geometry) RegisterWidth() uint { + return p.config.RegisterWidth +} + // LimbWidths returns the bitwidths for the required limbs for a given // source/target pairing in the lookup. func (p *Geometry) LimbWidths(i uint) []uint { @@ -56,3 +68,27 @@ func (p *Geometry) LimbWidths(i uint) []uint { // return agnostic.LimbWidths(p.config.RegisterWidth, p.geometry[i]) } + +func updateGeometry[E ir.Evaluable, T schema.RegisterMap](geometry []uint, source Vector[E], + mapping schema.ModuleMap[T]) { + // + var ( + regmap = mapping.Module(source.Module) + ) + // Sanity check + if source.Len() != uint(len(geometry)) { + // Unreachable, as should be caught earlier in the pipeline. + panic("misaligned lookup") + } + // + for i, ith := range source.Terms { + ithRange := ith.ValueRange(regmap) + bitwidth, signed := ithRange.BitWidth() + // Sanity check + if signed { + panic(fmt.Sprintf("signed lookup encountered (%s)", ith.Lisp(regmap).String(true))) + } + // + geometry[i] = max(geometry[i], bitwidth) + } +} diff --git a/pkg/schema/constraint/lookup/vector.go b/pkg/schema/constraint/lookup/vector.go index 534d2f998..a77b886d5 100644 --- a/pkg/schema/constraint/lookup/vector.go +++ b/pkg/schema/constraint/lookup/vector.go @@ -13,6 +13,7 @@ package lookup import ( + "github.com/consensys/gnark-crypto/ecc/bls12-377/fr" "github.com/consensys/go-corset/pkg/ir" "github.com/consensys/go-corset/pkg/schema" "github.com/consensys/go-corset/pkg/util" @@ -30,6 +31,15 @@ type Vector[E ir.Evaluable] struct { Terms []E } +// NewLookupVector constructs a new vector in a given context with an optional selector. +func NewLookupVector[E ir.Evaluable](mid schema.ModuleId, selector util.Option[E], terms ...E) Vector[E] { + if selector.HasValue() { + return FilteredLookupVector(mid, selector.Unwrap(), terms...) + } + // + return UnfilteredLookupVector(mid, terms...) +} + // UnfilteredLookupVector constructs a new vector in a given context which has no selector. func UnfilteredLookupVector[E ir.Evaluable](mid schema.ModuleId, terms ...E) Vector[E] { return Vector[E]{ @@ -87,24 +97,39 @@ func (p *Vector[E]) Ith(index uint) E { return p.Terms[index] } -// Len returns the number of items in this lookup vector. +// Len returns the number of items in this lookup vector. Note this doesn't +// include the selector (since this is optional anyway). func (p *Vector[E]) Len() uint { return uint(len(p.Terms)) } // Lisp returns a textual representation of this vector. -func (p *Vector[E]) Lisp(schema schema.RegisterMap) sexp.SExp { - terms := sexp.EmptyList() +func (p *Vector[E]) Lisp(schema schema.AnySchema) sexp.SExp { + var ( + module = schema.Module(p.Module) + terms = sexp.EmptyList() + ) // if p.HasSelector() { - terms.Append(p.Selector.Unwrap().Lisp(schema)) + terms.Append(p.Selector.Unwrap().Lisp(module)) } else { terms.Append(sexp.NewSymbol("_")) } // Iterate source expressions for i := range p.Len() { - terms.Append(p.Ith(i).Lisp(schema)) + terms.Append(p.Ith(i).Lisp(module)) } // Done return terms } + +// Substitute any matchined labelled constants within this vector +func (p *Vector[E]) Substitute(mapping map[string]fr.Element) { + for _, ith := range p.Terms { + ith.Substitute(mapping) + } + // Substitute through selector (if applicable) + if p.HasSelector() { + p.Selector.Unwrap().Substitute(mapping) + } +} From 6656e57c909a2fa6d62d22fb40bd76ec9b9e4a56 Mon Sep 17 00:00:00 2001 From: DavePearce Date: Mon, 28 Jul 2025 13:13:29 +1200 Subject: [PATCH 4/8] fix encoding / decoding; handle panics in tests This fixes encoding / decoding of lookup vectors, and additionally adds some improved handling of panics during constraint checking. --- pkg/asm/compiler/mir.go | 4 +- pkg/binfile/binfile.go | 2 +- pkg/corset/compiler/translator.go | 4 +- pkg/ir/air/gadgets/bitwidth.go | 4 +- pkg/ir/mir/encoding.go | 76 ++++++++++++++++--------- pkg/ir/mir/lookup.go | 4 +- pkg/ir/mir/lower.go | 64 +++++++++++---------- pkg/ir/term.go | 2 - pkg/schema/constraint/lookup/failure.go | 8 +-- pkg/schema/constraint/lookup/vector.go | 16 +++--- pkg/schema/schemas.go | 36 +++++++++++- 11 files changed, 134 insertions(+), 86 deletions(-) diff --git a/pkg/asm/compiler/mir.go b/pkg/asm/compiler/mir.go index 389d1a85f..8290a496b 100644 --- a/pkg/asm/compiler/mir.go +++ b/pkg/asm/compiler/mir.go @@ -77,8 +77,8 @@ func (p MirModule) NewLookup(name string, from []MirExpr, targetMid uint, to []M sources = array.Prepend(unused, sources) targets = array.Prepend(unused, targets) // FIXME: exploit conditional lookups - target := []lookup.Vector[mir.Term]{lookup.UnfilteredLookupVector(p.Module.Id(), targets...)} - source := []lookup.Vector[mir.Term]{lookup.UnfilteredLookupVector(targetMid, sources...)} + target := []lookup.Vector[mir.Term]{lookup.UnfilteredVector(p.Module.Id(), targets...)} + source := []lookup.Vector[mir.Term]{lookup.UnfilteredVector(targetMid, sources...)} p.Module.AddConstraint(mir.NewLookupConstraint(name, target, source)) } diff --git a/pkg/binfile/binfile.go b/pkg/binfile/binfile.go index 7b0737ef9..197aeae13 100644 --- a/pkg/binfile/binfile.go +++ b/pkg/binfile/binfile.go @@ -200,7 +200,7 @@ func (p *Header) IsCompatible() bool { // matter what version, we should always have the ZKBINARY identifier first, // followed by a GOB encoding of the header. What follows after that, however, // is determined by the major version. -const BINFILE_MAJOR_VERSION uint16 = 5 +const BINFILE_MAJOR_VERSION uint16 = 6 // BINFILE_MINOR_VERSION gives the minor version of the binary file format. The // expected interpretation is that older versions are compatible with newer diff --git a/pkg/corset/compiler/translator.go b/pkg/corset/compiler/translator.go index 232b6784a..f0a7a447e 100644 --- a/pkg/corset/compiler/translator.go +++ b/pkg/corset/compiler/translator.go @@ -408,10 +408,10 @@ func (t *translator) translateDefLookupSources(selector ast.Expr, s, errs := t.translateExpression(selector, module, 0) errors = append(errors, errs...) - return lookup.FilteredLookupVector(module.Id(), s, terms...), context, errors + return lookup.FilteredVector(module.Id(), s, terms...), context, errors } // - return lookup.UnfilteredLookupVector(module.Id(), terms...), context, errors + return lookup.UnfilteredVector(module.Id(), terms...), context, errors } // Translate a "definrange" declaration. diff --git a/pkg/ir/air/gadgets/bitwidth.go b/pkg/ir/air/gadgets/bitwidth.go index bb2a1fc57..f6574ca1e 100644 --- a/pkg/ir/air/gadgets/bitwidth.go +++ b/pkg/ir/air/gadgets/bitwidth.go @@ -177,8 +177,8 @@ func (p *BitwidthGadget) applyRecursiveBitwidthGadget(ref sc.RegisterRef, bitwid // Target Value ir.RawRegisterAccess[air.Term](sc.NewRegisterId(0), 0)} // - targets := []lookup.Vector[*air.ColumnAccess]{lookup.UnfilteredLookupVector(mid, targetAccesses...)} - sources := []lookup.Vector[*air.ColumnAccess]{lookup.UnfilteredLookupVector(module.Id(), sourceAccesses...)} + targets := []lookup.Vector[*air.ColumnAccess]{lookup.UnfilteredVector(mid, targetAccesses...)} + sources := []lookup.Vector[*air.ColumnAccess]{lookup.UnfilteredVector(module.Id(), sourceAccesses...)} // module.AddConstraint(air.NewLookupConstraint(lookupHandle, targets, sources)) // Add column to assignment so its proof is included diff --git a/pkg/ir/mir/encoding.go b/pkg/ir/mir/encoding.go index ddf66fa92..fee90c31b 100644 --- a/pkg/ir/mir/encoding.go +++ b/pkg/ir/mir/encoding.go @@ -28,14 +28,14 @@ import ( const ( // Constraints - assertionTag = byte(0) - interleavingTag = byte(1) - lookupTag = byte(2) - permutationTag = byte(3) - rangeTag = byte(4) - sortedTag = byte(5) - sortedSelectionTag = byte(6) - vanishingTag = byte(7) + assertionTag = byte(0) + interleavingTag = byte(1) + lookupTag = byte(2) + permutationTag = byte(3) + rangeTag = byte(4) + sortedUnfilteredTag = byte(5) + sortedFilteredTag = byte(6) + vanishingTag = byte(7) // Logicals conjunctTag = byte(10) disjunctTag = byte(11) @@ -165,20 +165,27 @@ func encode_lookup(c LookupConstraint) ([]byte, error) { return buffer.Bytes(), nil } -func encode_lookup_vector(terms lookup.Vector[Term], buffer *bytes.Buffer) error { +func encode_lookup_vector(vector lookup.Vector[Term], buffer *bytes.Buffer) error { var ( gobEncoder = gob.NewEncoder(buffer) + selector = vector.HasSelector() ) // Source Context - if err := gobEncoder.Encode(terms.Module); err != nil { + if err := gobEncoder.Encode(vector.Module); err != nil { return err } - // - if terms.HasSelector() { - panic("todo") + // HasSelector flag + if err := gobEncoder.Encode(selector); err != nil { + return err + } + // Selector itself (if applicable) + if selector { + if err := encode_term(vector.Selector.Unwrap(), buffer); err != nil { + return err + } } // Source terms - return encode_nary(encode_term, buffer, terms.Terms) + return encode_nary(encode_term, buffer, vector.Terms) } func encode_permutation(c PermutationConstraint) ([]byte, error) { @@ -218,9 +225,9 @@ func encode_sorted(c SortedConstraint) ([]byte, error) { ) // if c.Selector.HasValue() { - tag = sortedSelectionTag + tag = sortedFilteredTag } else { - tag = sortedTag + tag = sortedUnfilteredTag } // Tag if _, err := buffer.Write([]byte{tag}); err != nil { @@ -323,9 +330,9 @@ func decode_constraint(bytes []byte) (schema.Constraint, error) { return decode_permutation(bytes[1:]) case rangeTag: return decode_range(bytes[1:]) - case sortedTag: + case sortedUnfilteredTag: return decode_sorted(false, bytes[1:]) - case sortedSelectionTag: + case sortedFilteredTag: return decode_sorted(true, bytes[1:]) case vanishingTag: return decode_vanishing(bytes[1:]) @@ -411,21 +418,34 @@ func decode_lookup(data []byte) (schema.Constraint, error) { func decode_lookup_vector(buf *bytes.Buffer) (lookup.Vector[Term], error) { var ( - gobDecoder = gob.NewDecoder(buf) - terms lookup.Vector[Term] - err error + gobDecoder = gob.NewDecoder(buf) + vector lookup.Vector[Term] + hasSelector bool + selector Term + err error ) // Context - if err = gobDecoder.Decode(&terms.Module); err != nil { - return terms, err + if err = gobDecoder.Decode(&vector.Module); err != nil { + return vector, err + } + // HasSelector + if err = gobDecoder.Decode(&hasSelector); err != nil { + return vector, err + } + // Selector (if applicable) + if hasSelector { + if selector, err = decode_term(buf); err != nil { + return vector, err + } + // Wrap selector + vector.Selector = util.Some(selector) } // Contents - if terms.Terms, err = decode_nary(decode_term, buf); err != nil { - return terms, err + if vector.Terms, err = decode_nary(decode_term, buf); err != nil { + return vector, err } - // - //return terms, nil - panic("todo: handle selectors") + // Done + return vector, nil } func decode_permutation(data []byte) (schema.Constraint, error) { diff --git a/pkg/ir/mir/lookup.go b/pkg/ir/mir/lookup.go index 0d9f0d90a..e1877c216 100644 --- a/pkg/ir/mir/lookup.go +++ b/pkg/ir/mir/lookup.go @@ -53,7 +53,7 @@ func mapLookupVectors(vectors []lookup.Vector[Term], mapping schema.LimbsMap) [] terms = splitTerms(vector.Terms, modmap) ) // TODO: what about the selector itself? - nterms[i] = lookup.NewLookupVector(vector.Module, vector.Selector, terms...) + nterms[i] = lookup.NewVector(vector.Module, vector.Selector, terms...) } // return nterms @@ -120,7 +120,7 @@ func splitLookupVector(geometry lookup.Geometry, vector lookup.Vector[Term], // Padding nlimbs := padLookupLimbs(limbs, geometry) // Done - return lookup.NewLookupVector(vector.Module, vector.Selector, nlimbs...) + return lookup.NewVector(vector.Module, vector.Selector, nlimbs...) } // Alignment is related to the potential for so-called "irregular lookups". diff --git a/pkg/ir/mir/lower.go b/pkg/ir/mir/lower.go index e473503e7..fa627eb45 100644 --- a/pkg/ir/mir/lower.go +++ b/pkg/ir/mir/lower.go @@ -23,6 +23,7 @@ import ( air_gadgets "github.com/consensys/go-corset/pkg/ir/air/gadgets" "github.com/consensys/go-corset/pkg/schema" "github.com/consensys/go-corset/pkg/schema/constraint/lookup" + "github.com/consensys/go-corset/pkg/util" "github.com/consensys/go-corset/pkg/util/collection/array" "github.com/consensys/go-corset/pkg/util/field" util_math "github.com/consensys/go-corset/pkg/util/math" @@ -258,10 +259,18 @@ func (p *AirLowering) lowerLookupConstraintToAir(c LookupConstraint, airModule * airModule.AddConstraint(air.NewLookupConstraint(c.Handle, targets, sources)) } -func (p *AirLowering) expandLookupVectorToAir(terms lookup.Vector[Term]) lookup.Vector[*air.ColumnAccess] { - // accesses := p.expandTermsInner(terms.Module, true, terms.Item...) - // return ir.Enclose(terms.Module, accesses) - panic("todo") +func (p *AirLowering) expandLookupVectorToAir(vector lookup.Vector[Term]) lookup.Vector[*air.ColumnAccess] { + var ( + terms = p.expandTerms(vector.Module, vector.Terms...) + selector util.Option[*air.ColumnAccess] + ) + // + if vector.HasSelector() { + sel := p.expandTerm(vector.Module, vector.Selector.Unwrap()) + selector = util.Some(sel) + } + // + return lookup.NewVector(vector.Module, selector, terms...) } // Lower a sorted constraint to the AIR level. The challenge here is that there @@ -319,41 +328,34 @@ func (p *AirLowering) lowerSortedConstraintToAir(c SortedConstraint, airModule * } func (p *AirLowering) expandTerms(context schema.ModuleId, terms ...Term) []*air.ColumnAccess { - return p.expandTermsInner(context, false, terms...) + var nterms = make([]*air.ColumnAccess, len(terms)) + // + for i, ith := range terms { + nterms[i] = p.expandTerm(context, ith) + } + // + return nterms } -// Lower a set of zero or more MIR expressions. -func (p *AirLowering) expandTermsInner(context schema.ModuleId, lookup bool, terms ...Term) []*air.ColumnAccess { +func (p *AirLowering) expandTerm(context schema.ModuleId, term Term) *air.ColumnAccess { var ( - nterms = make([]*air.ColumnAccess, len(terms)) airModule = p.airSchema.Module(context) ) // - for i, ith := range terms { - var source_register schema.RegisterId - // - if lookup && i == 0 && !ith.IsDefined() { - // NOTE: this is a special case for lookups to handle the absence of - // a selector. What we are doing here is communicating that no - // selector exists. - source_register = schema.NewUnusedRegisterId() - } else { - sourceRange := ith.ValueRange(airModule) - sourceBitwidth, signed := sourceRange.BitWidth() - // - if signed { - panic(fmt.Sprintf("signed expansion encountered (%s)", ith.Lisp(airModule).String(true))) - } - // Lower source expressions - source := p.lowerAndSimplifyTermTo(ith, airModule) - // Expand them - source_register = air_gadgets.Expand(sourceBitwidth, source, airModule) - } - // - nterms[i] = ir.RawRegisterAccess[air.Term](source_register, 0) + var source_register schema.RegisterId + // + sourceRange := term.ValueRange(airModule) + sourceBitwidth, signed := sourceRange.BitWidth() + // + if signed { + panic(fmt.Sprintf("signed expansion encountered (%s)", term.Lisp(airModule).String(true))) } + // Lower source expressions + source := p.lowerAndSimplifyTermTo(term, airModule) + // Expand them + source_register = air_gadgets.Expand(sourceBitwidth, source, airModule) // - return nterms + return ir.RawRegisterAccess[air.Term](source_register, 0) } func (p *AirLowering) lowerAndSimplifyLogicalTo(term LogicalTerm, diff --git a/pkg/ir/term.go b/pkg/ir/term.go index 15e4ce6ad..861a92ae6 100644 --- a/pkg/ir/term.go +++ b/pkg/ir/term.go @@ -54,8 +54,6 @@ type Evaluable interface { // Lisp converts this schema element into a simple S-Expression, for example // so it can be printed. Lisp(schema.RegisterMap) sexp.SExp - // IsDefined checks whether a given evaluable expression is defined, or not. - IsDefined() bool // ValueRange returns the interval of values that this term can evaluate to. // For terms accessing registers, this is determined by the declared width of // the register. diff --git a/pkg/schema/constraint/lookup/failure.go b/pkg/schema/constraint/lookup/failure.go index 9ad48630c..5e5873461 100644 --- a/pkg/schema/constraint/lookup/failure.go +++ b/pkg/schema/constraint/lookup/failure.go @@ -45,11 +45,9 @@ func (p *Failure) String() string { // RequiredCells identifies the cells required to evaluate the failing constraint at the failing row. func (p *Failure) RequiredCells(_ trace.Trace) *set.AnySortedSet[trace.CellRef] { res := set.NewAnySortedSet[trace.CellRef]() - // - for i, e := range p.Sources { - if i != 0 || e.IsDefined() { - res.InsertSorted(e.RequiredCells(int(p.Row), p.Context)) - } + // Handle terms + for _, e := range p.Sources { + res.InsertSorted(e.RequiredCells(int(p.Row), p.Context)) } // return res diff --git a/pkg/schema/constraint/lookup/vector.go b/pkg/schema/constraint/lookup/vector.go index a77b886d5..bd71c4c7c 100644 --- a/pkg/schema/constraint/lookup/vector.go +++ b/pkg/schema/constraint/lookup/vector.go @@ -31,17 +31,17 @@ type Vector[E ir.Evaluable] struct { Terms []E } -// NewLookupVector constructs a new vector in a given context with an optional selector. -func NewLookupVector[E ir.Evaluable](mid schema.ModuleId, selector util.Option[E], terms ...E) Vector[E] { +// NewVector constructs a new vector in a given context with an optional selector. +func NewVector[E ir.Evaluable](mid schema.ModuleId, selector util.Option[E], terms ...E) Vector[E] { if selector.HasValue() { - return FilteredLookupVector(mid, selector.Unwrap(), terms...) + return FilteredVector(mid, selector.Unwrap(), terms...) } // - return UnfilteredLookupVector(mid, terms...) + return UnfilteredVector(mid, terms...) } -// UnfilteredLookupVector constructs a new vector in a given context which has no selector. -func UnfilteredLookupVector[E ir.Evaluable](mid schema.ModuleId, terms ...E) Vector[E] { +// UnfilteredVector constructs a new vector in a given context which has no selector. +func UnfilteredVector[E ir.Evaluable](mid schema.ModuleId, terms ...E) Vector[E] { return Vector[E]{ mid, util.None[E](), @@ -49,8 +49,8 @@ func UnfilteredLookupVector[E ir.Evaluable](mid schema.ModuleId, terms ...E) Vec } } -// FilteredLookupVector constructs a new vector in a given context which has a selector. -func FilteredLookupVector[E ir.Evaluable](mid schema.ModuleId, selector E, terms ...E) Vector[E] { +// FilteredVector constructs a new vector in a given context which has a selector. +func FilteredVector[E ir.Evaluable](mid schema.ModuleId, selector E, terms ...E) Vector[E] { return Vector[E]{ mid, util.Some(selector), diff --git a/pkg/schema/schemas.go b/pkg/schema/schemas.go index 9c05440c2..e704fdff7 100644 --- a/pkg/schema/schemas.go +++ b/pkg/schema/schemas.go @@ -14,6 +14,7 @@ package schema import ( "fmt" + "runtime" tr "github.com/consensys/go-corset/pkg/trace" "github.com/consensys/go-corset/pkg/util" @@ -145,10 +146,26 @@ func processConstraintBatch[C Constraint](logtitle string, batch uint, batchsize ith := iter.Next() // Launch checker for constraint go func() { - // Send outcome back + var ( + context = ith.Contexts()[0] + name = ith.Name() + cov bit.Set + ) + // Setup panic intercept + defer func() { + var ( + err = recover() + buf = make([]byte, 2048) + ) + // + if msg, ok := err.(string); ok { + n := runtime.Stack(buf, false) + c <- batchOutcome{context, name, cov, &panicFailure{msg, buf[:n]}} + } + }() + // Check and send outcome back cov, err := ith.Accepts(trace, Any(schema)) - context := ith.Contexts()[0] - name := ith.Name() + // c <- batchOutcome{context, name, cov, err} }() } @@ -172,3 +189,16 @@ type batchOutcome struct { data bit.Set error Failure } + +type panicFailure struct { + message string + stackTrace []byte +} + +func (p *panicFailure) Message() string { + return p.String() +} + +func (p *panicFailure) String() string { + return fmt.Sprintf("%s\n\n%s", p.message, string(p.stackTrace)) +} From fbb3d29d573c1a2145c2b7e00d5bff196e49c585 Mon Sep 17 00:00:00 2001 From: DavePearce Date: Mon, 28 Jul 2025 14:12:26 +1200 Subject: [PATCH 5/8] more tweaks --- pkg/ir/air/gadgets/bitwidth.go | 4 ---- pkg/ir/mir/agnosticity.go | 15 +++++++-------- pkg/ir/mir/encoding.go | 19 ++++++------------- 3 files changed, 13 insertions(+), 25 deletions(-) diff --git a/pkg/ir/air/gadgets/bitwidth.go b/pkg/ir/air/gadgets/bitwidth.go index f6574ca1e..4f559dc74 100644 --- a/pkg/ir/air/gadgets/bitwidth.go +++ b/pkg/ir/air/gadgets/bitwidth.go @@ -165,15 +165,11 @@ func (p *BitwidthGadget) applyRecursiveBitwidthGadget(ref sc.RegisterRef, bitwid } // Add lookup constraint for register into proof sourceAccesses := []*air.ColumnAccess{ - // Source Selector (unused) - ir.RawRegisterAccess[air.Term](sc.NewUnusedRegisterId(), 0), // Source Value ir.RawRegisterAccess[air.Term](ref.Register(), 0)} // NOTE: 0th column always assumed to hold full value, with others // representing limbs, etc. targetAccesses := []*air.ColumnAccess{ - // Target Selector (unused) - ir.RawRegisterAccess[air.Term](sc.NewUnusedRegisterId(), 0), // Target Value ir.RawRegisterAccess[air.Term](sc.NewRegisterId(0), 0)} // diff --git a/pkg/ir/mir/agnosticity.go b/pkg/ir/mir/agnosticity.go index 28a50c7e3..2d2932371 100644 --- a/pkg/ir/mir/agnosticity.go +++ b/pkg/ir/mir/agnosticity.go @@ -122,14 +122,7 @@ func splitTerm(term Term, mapping schema.RegisterLimbsMap) Term { case *LabelledConst: return t case *RegisterAccess: - if t.Register.IsUsed() { - return splitRegisterAccess(t, mapping) - } - // NOTE: this indicates an unused register access. Currently, this can - // only occur for the selector column of a lookup. This behaviour maybe - // deprecated in the future, and that would make this check - // unnecessary. - return t + return splitRegisterAccess(t, mapping) case *Exp: return ir.Exponent(splitTerm(t.Arg, mapping), t.Pow) case *Mul: @@ -166,6 +159,12 @@ func splitRegisterAccess(term *RegisterAccess, mapping schema.RegisterLimbsMap) for i, limb := range limbs { terms[i] = &ir.RegisterAccess[Term]{Register: limb, Shift: term.Shift} } + // Check whether vector required, or not + if len(limbs) == 1 { + // NOTE: we cannot return the original term directly, as its index may + // differ under the limb mapping. + return terms[0] + } // return ir.NewVectorAccess(terms) } diff --git a/pkg/ir/mir/encoding.go b/pkg/ir/mir/encoding.go index fee90c31b..5e079a85d 100644 --- a/pkg/ir/mir/encoding.go +++ b/pkg/ir/mir/encoding.go @@ -54,13 +54,11 @@ const ( ifZeroTag = byte(33) labelledConstantTag = byte(34) registerAccessTag = byte(35) - // NOTE: unused registers only required for optional lookup selectors. - unusedRegisterAccessTag = byte(36) - expTag = byte(37) - mulTag = byte(38) - normTag = byte(39) - subTag = byte(40) - vectorAccessTag = byte(41) + expTag = byte(36) + mulTag = byte(37) + normTag = byte(38) + subTag = byte(39) + vectorAccessTag = byte(40) ) func encode_constraint(constraint schema.Constraint) ([]byte, error) { @@ -817,9 +815,7 @@ func encode_exponent(term Exp, buf *bytes.Buffer) error { func encode_reg_access(term RegisterAccess, buf *bytes.Buffer) error { // Write (appropriate) tag - if !term.Register.IsUsed() { - return buf.WriteByte(unusedRegisterAccessTag) - } else if err := buf.WriteByte(registerAccessTag); err != nil { + if err := buf.WriteByte(registerAccessTag); err != nil { return err } // @@ -875,9 +871,6 @@ func decode_term(buf *bytes.Buffer) (Term, error) { return decode_labelled_constant(buf) case registerAccessTag: return decode_reg_access(buf) - case unusedRegisterAccessTag: - rid := schema.NewUnusedRegisterId() - return ir.NewRegisterAccess[Term](rid, 0), nil case mulTag: return decode_nary_terms(mulConstructor, buf) case normTag: From 34dc68595301a437ec01fd336ba389125e50c1fb Mon Sep 17 00:00:00 2001 From: DavePearce Date: Mon, 28 Jul 2025 14:21:53 +1200 Subject: [PATCH 6/8] fix for agnostic lookup splitting --- pkg/ir/mir/lookup.go | 4 ++-- pkg/schema/schemas.go | 6 ++++-- 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/pkg/ir/mir/lookup.go b/pkg/ir/mir/lookup.go index e1877c216..c4ffff680 100644 --- a/pkg/ir/mir/lookup.go +++ b/pkg/ir/mir/lookup.go @@ -89,7 +89,7 @@ func splitLookupVector(geometry lookup.Geometry, vector lookup.Vector[Term], // Initial split for i, t := range vector.Terms { // Determine value range of ith term - valrange := t.ValueRange(modmap) + valrange := t.ValueRange(modmap.LimbsMap()) // Determine bitwidth for that range bitwidth, signed := valrange.BitWidth() // Sanity check signed lookups @@ -199,7 +199,7 @@ func padLookupLimbs(terms [][]Term, geometry lookup.Geometry) []Term { // Append available terms nterms = append(nterms, ith...) // Pad out with zeros to match geometry - for len(terms) < n { + for m := n - len(nterms); m > 0; m-- { nterms = append(nterms, ir.Const64[Term](0)) } } diff --git a/pkg/schema/schemas.go b/pkg/schema/schemas.go index e704fdff7..8daef4745 100644 --- a/pkg/schema/schemas.go +++ b/pkg/schema/schemas.go @@ -158,9 +158,11 @@ func processConstraintBatch[C Constraint](logtitle string, batch uint, batchsize buf = make([]byte, 2048) ) // - if msg, ok := err.(string); ok { + if err != nil { n := runtime.Stack(buf, false) - c <- batchOutcome{context, name, cov, &panicFailure{msg, buf[:n]}} + c <- batchOutcome{context, name, cov, &panicFailure{ + fmt.Sprintf("%v", err), buf[:n], + }} } }() // Check and send outcome back From e03280c02c88408f378d7fc94c36b625e6f5d5f5 Mon Sep 17 00:00:00 2001 From: DavePearce Date: Mon, 28 Jul 2025 14:34:47 +1200 Subject: [PATCH 7/8] tweak for handling panic within tests This just makes the error handling code a little more robust. --- pkg/schema/schemas.go | 1 + 1 file changed, 1 insertion(+) diff --git a/pkg/schema/schemas.go b/pkg/schema/schemas.go index 8daef4745..21259f1f5 100644 --- a/pkg/schema/schemas.go +++ b/pkg/schema/schemas.go @@ -158,6 +158,7 @@ func processConstraintBatch[C Constraint](logtitle string, batch uint, batchsize buf = make([]byte, 2048) ) // + //if msg, ok := err.(string); ok { if err != nil { n := runtime.Stack(buf, false) c <- batchOutcome{context, name, cov, &panicFailure{ From 557261ecfa8c067e096baa45da79ab4f5eaf410d Mon Sep 17 00:00:00 2001 From: DavePearce Date: Mon, 28 Jul 2025 14:49:04 +1200 Subject: [PATCH 8/8] minor fixes --- pkg/ir/mir/lookup.go | 2 +- pkg/schema/constraint/lookup/constraint.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/pkg/ir/mir/lookup.go b/pkg/ir/mir/lookup.go index c4ffff680..0772deec3 100644 --- a/pkg/ir/mir/lookup.go +++ b/pkg/ir/mir/lookup.go @@ -199,7 +199,7 @@ func padLookupLimbs(terms [][]Term, geometry lookup.Geometry) []Term { // Append available terms nterms = append(nterms, ith...) // Pad out with zeros to match geometry - for m := n - len(nterms); m > 0; m-- { + for m := n - len(ith); m > 0; m-- { nterms = append(nterms, ir.Const64[Term](0)) } } diff --git a/pkg/schema/constraint/lookup/constraint.go b/pkg/schema/constraint/lookup/constraint.go index c89127f68..38532bf5e 100644 --- a/pkg/schema/constraint/lookup/constraint.go +++ b/pkg/schema/constraint/lookup/constraint.go @@ -337,7 +337,7 @@ func evalExprsAsBytes[E ir.Evaluable](k int, vec Vector[E], handle string, trMod return &constraint.InternalFailure{ Handle: handle, Context: vec.Module, - Row: i, + Row: uint(k), Term: vec.Ith(i), Error: err.Error(), }