Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion Cargo.toml
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
[package]
name = "nova-snark"
version = "0.65.0"
version = "0.66.0"
authors = ["Srinath Setty <srinath@microsoft.com>"]
edition = "2021"
description = "High-speed recursive arguments from folding schemes"
Expand Down
38 changes: 20 additions & 18 deletions src/frontend/gadgets/poseidon/circuit2.rs
Original file line number Diff line number Diff line change
Expand Up @@ -36,30 +36,32 @@ impl<Scalar: PrimeField> Elt<Scalar> {
Self::Num(num.add_bool_with_coeff(CS::one(), &Boolean::Constant(true), fr))
}

/// Ensure Elt is allocated.
/// Ensure Elt is allocated as a fresh variable with an equality constraint.
/// The `enforce` parameter must always be `true` to maintain circuit soundness;
/// passing `false` would produce an unconstrained variable.
pub fn ensure_allocated<CS: ConstraintSystem<Scalar>>(
&self,
cs: &mut CS,
enforce: bool,
) -> Result<AllocatedNum<Scalar>, SynthesisError> {
match self {
Self::Allocated(v) => Ok(v.clone()),
Self::Num(num) => {
let v = AllocatedNum::alloc(cs.namespace(|| "allocate for Elt::Num"), || {
num.get_value().ok_or(SynthesisError::AssignmentMissing)
})?;

if enforce {
cs.enforce(
|| "enforce num allocation preserves lc".to_string(),
|_| num.lc(Scalar::ONE),
|lc| lc + CS::one(),
|lc| lc + v.get_variable(),
);
}
Ok(v)
}
debug_assert!(enforce, "ensure_allocated must always enforce equality");
// Always allocate a fresh variable to guarantee consistent R1CS variable
// count regardless of whether `self` is `Allocated` or `Num`.
// Without this, compact-mode Poseidon produces different variable counts
// for different inputs, breaking IVC schemes with fixed R1CS shapes.
let v = AllocatedNum::alloc(cs.namespace(|| "ensure_allocated"), || {
self.val().ok_or(SynthesisError::AssignmentMissing)
})?;

if enforce {
cs.enforce(
|| "enforce allocation preserves value".to_string(),
|_| self.lc(),
|lc| lc + CS::one(),
|lc| lc + v.get_variable(),
);
}
Ok(v)
Comment on lines +48 to +64
}

/// Get the value of the Elt.
Expand Down
20 changes: 20 additions & 0 deletions src/frontend/util_cs/witness_cs.rs
Original file line number Diff line number Diff line change
Expand Up @@ -47,6 +47,26 @@ impl<Scalar> WitnessCS<Scalar>
where
Scalar: PrimeField,
{
/// Create a new WitnessCS with pre-allocated capacity for aux and input variables.
/// This avoids repeated reallocations during synthesis for large circuits.
pub fn with_capacity(aux_capacity: usize, input_capacity: usize) -> Self {
let mut input_assignment = Vec::with_capacity(input_capacity + 1);
input_assignment.push(Scalar::ONE);
Self {
input_assignment,
aux_assignment: Vec::with_capacity(aux_capacity),
}
}

/// Clear the assignments while retaining allocated capacity.
/// This allows reusing the same WitnessCS across multiple synthesis calls
/// without reallocating.
pub fn clear(&mut self) {
self.input_assignment.clear();
self.input_assignment.push(Scalar::ONE);
self.aux_assignment.clear();
}

/// Get input assignment
pub fn input_assignment(&self) -> &[Scalar] {
&self.input_assignment
Expand Down
39 changes: 32 additions & 7 deletions src/gadgets/nonnative/bignat.rs
Original file line number Diff line number Diff line change
Expand Up @@ -40,9 +40,38 @@ pub fn nat_to_limbs<Scalar: PrimeField>(
limb_width: usize,
n_limbs: usize,
) -> Result<Vec<Scalar>, SynthesisError> {
let mask = int_with_n_ones(limb_width);
let mut nat = nat.clone();
if nat.bits() as usize <= n_limbs * limb_width {
if nat.sign() == num_bigint::Sign::Minus {
return Err(SynthesisError::Unsatisfiable(format!(
"nat_to_limbs called with negative value {nat}"
)));
}
if nat.bits() as usize > n_limbs * limb_width {
return Err(SynthesisError::Unsatisfiable(format!(
"nat {nat} does not fit in {n_limbs} limbs of width {limb_width}"
)));
}
let bytes_per_limb = limb_width / 8;
if limb_width % 8 == 0 && bytes_per_limb <= 8 {
// Fast path: extract limbs directly from bytes
let (_, bytes) = nat.to_bytes_le();
Ok(
Comment on lines +48 to +57
(0..n_limbs)
.map(|i| {
let start = i * bytes_per_limb;
let mut limb_val: u64 = 0;
for j in 0..bytes_per_limb {
if start + j < bytes.len() {
limb_val |= (bytes[start + j] as u64) << (j * 8);
}
}
Scalar::from(limb_val)
})
.collect(),
)
} else {
// Fallback for non-byte-aligned limb widths
let mask = int_with_n_ones(limb_width);
let mut nat = nat.clone();
Ok(
(0..n_limbs)
.map(|_| {
Expand All @@ -52,10 +81,6 @@ pub fn nat_to_limbs<Scalar: PrimeField>(
})
.collect(),
)
} else {
Err(SynthesisError::Unsatisfiable(format!(
"nat {nat} does not fit in {n_limbs} limbs of width {limb_width}"
)))
}
}

Expand Down
18 changes: 0 additions & 18 deletions src/gadgets/nonnative/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,6 @@
//! Code in this module is adapted from [bellman-bignat](https://github.com/alex-ozdemir/bellman-bignat), which is licenced under MIT

use crate::frontend::SynthesisError;
use ff::PrimeField;

trait OptionExt<T> {
fn grab(&self) -> Result<&T, SynthesisError>;
Expand All @@ -14,23 +13,6 @@ impl<T> OptionExt<T> for Option<T> {
}
}

trait BitAccess {
fn get_bit(&self, i: usize) -> Option<bool>;
}

impl<Scalar: PrimeField> BitAccess for Scalar {
fn get_bit(&self, i: usize) -> Option<bool> {
if i as u32 >= Scalar::NUM_BITS {
return None;
}

let (byte_pos, bit_pos) = (i / 8, i % 8);
let byte = self.to_repr().as_ref()[byte_pos];
let bit = (byte >> bit_pos) & 1;
Some(bit == 1)
}
}

/// Module providing big natural number arithmetic in circuits.
pub mod bignat;

Expand Down
53 changes: 48 additions & 5 deletions src/gadgets/nonnative/util.rs
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
use super::{BitAccess, OptionExt};
use super::OptionExt;
use crate::frontend::{
num::AllocatedNum, ConstraintSystem, LinearCombination, SynthesisError, Variable,
};
Expand Down Expand Up @@ -100,15 +100,37 @@ impl<Scalar: PrimeField> Num<Scalar> {
mut cs: CS,
n_bits: usize,
) -> Result<(), SynthesisError> {
assert!(
n_bits as u32 <= Scalar::NUM_BITS,
"n_bits ({n_bits}) exceeds field capacity ({})",
Scalar::NUM_BITS
);
let v = self.value;

// Pre-compute all bit values from the field element's byte representation
// to avoid calling to_repr() per bit (which does Montgomery reduction each time).
let bit_values: Option<Vec<bool>> = v.map(|val| {
let repr = val.to_repr();
let bytes = repr.as_ref();
(0..n_bits)
.map(|i| {
let (byte_pos, bit_pos) = (i / 8, i % 8);
if byte_pos < bytes.len() {
(bytes[byte_pos] >> bit_pos) & 1 == 1
} else {
false
}
})
.collect()
});
Comment on lines +110 to +125

// Allocate all but the first bit.
let bits: Vec<Variable> = (1..n_bits)
.map(|i| {
cs.alloc(
|| format!("bit {i}"),
|| {
let r = if *v.grab()?.get_bit(i).grab()? {
let r = if bit_values.as_ref().ok_or(SynthesisError::AssignmentMissing)?[i] {
Scalar::ONE
} else {
Scalar::ZERO
Expand Down Expand Up @@ -179,9 +201,20 @@ impl<Scalar: PrimeField> Num<Scalar> {
mut cs: CS,
n_bits: usize,
) -> Result<Bitvector<Scalar>, SynthesisError> {
// Pre-compute all bit values with a single to_repr() call
let values: Option<Vec<bool>> = self.value.as_ref().map(|v| {
let num = *v;
(0..n_bits).map(|i| num.get_bit(i).unwrap()).collect()
let repr = v.to_repr();
let bytes = repr.as_ref();
(0..n_bits)
.map(|i| {
let (byte_pos, bit_pos) = (i / 8, i % 8);
if byte_pos < bytes.len() {
(bytes[byte_pos] >> bit_pos) & 1 == 1
} else {
false
}
})
.collect()
});
let allocations: Vec<Bit<Scalar>> = (0..n_bits)
.map(|bit_i| {
Expand Down Expand Up @@ -243,7 +276,17 @@ pub fn f_to_nat<Scalar: PrimeField>(f: &Scalar) -> BigInt {
/// Convert a natural number to a field element.
/// Returns `None` if the number is too big for the field.
pub fn nat_to_f<Scalar: PrimeField>(n: &BigInt) -> Option<Scalar> {
Scalar::from_str_vartime(&format!("{n}"))
let (sign, bytes) = n.to_bytes_le();
if sign == Sign::Minus {
return None;
}
let mut repr = Scalar::Repr::default();
let repr_bytes = repr.as_mut();
if bytes.len() > repr_bytes.len() {
return None;
}
repr_bytes[..bytes.len()].copy_from_slice(&bytes);
Scalar::from_repr(repr).into()
}

use super::bignat::BigNat;
Expand Down
Loading
Loading