Skip to content
Merged
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
221 changes: 194 additions & 27 deletions crates/lambda-rs/src/math/matrix.rs
Original file line number Diff line number Diff line change
Expand Up @@ -259,6 +259,63 @@ pub fn identity_matrix<
return result;
}

/// Compute determinant via Gaussian elimination on fixed-size stack storage.
///
/// This avoids heap allocations in hot fixed-size paths (for example 4x4
/// transform matrices).
///
/// # Arguments
///
/// - `data`: Square matrix values stored in stack-allocated fixed-size arrays.
///
/// # Returns
///
/// - Determinant of `data`.
/// - `0.0` when elimination detects an exact zero pivot after pivoting
/// (singular matrix).
fn determinant_gaussian_stack<const N: usize>(mut data: [[f32; N]; N]) -> f32 {
let mut sign = 1.0_f32;
for pivot in 0..N {
let mut pivot_row = pivot;
let mut pivot_abs = data[pivot][pivot].abs();
for (candidate, row) in data.iter().enumerate().skip(pivot + 1) {
let candidate_abs = row[pivot].abs();
if candidate_abs > pivot_abs {
pivot_abs = candidate_abs;
pivot_row = candidate;
}
}

if pivot_abs == 0.0 {
return 0.0;
}

if pivot_row != pivot {
data.swap(pivot, pivot_row);
sign = -sign;
}

let pivot_value = data[pivot][pivot];
let pivot_tail = data[pivot];
for row in data.iter_mut().skip(pivot + 1) {
let factor = row[pivot] / pivot_value;
if factor == 0.0 {
continue;
}
for (dst, src) in row[pivot..].iter_mut().zip(pivot_tail[pivot..].iter())
{
*dst -= factor * *src;
}
}
}

let mut det = sign;
for (i, row) in data.iter().enumerate().take(N) {
det *= row[i];
}
return det;
}

// -------------------------- ARRAY IMPLEMENTATION -----------------------------

/// Matrix implementations for arrays of f32 arrays. Including the trait Matrix into
Expand Down Expand Up @@ -325,7 +382,15 @@ where
todo!()
}

/// Computes the determinant of any square matrix using Laplace expansion.
/// Computes the determinant of any square matrix using Gaussian elimination
/// with partial pivoting.
///
/// # Behavior
///
/// - Uses stack-allocated elimination for 3x3 and 4x4 matrices to avoid heap
/// allocation on common transform sizes.
/// - Uses a generic dense fallback for larger matrices.
/// - Overall asymptotic runtime is `O(n^3)` for square `n x n` matrices.
fn determinant(&self) -> Result<f32, MathError> {
let rows = self.as_ref().len();
if rows == 0 {
Expand All @@ -341,34 +406,88 @@ where
return Err(MathError::NonSquareMatrix { rows, cols });
}

return match rows {
1 => Ok(self.as_ref()[0].as_ref()[0]),
2 => {
let a = self.at(0, 0);
let b = self.at(0, 1);
let c = self.at(1, 0);
let d = self.at(1, 1);
return Ok(a * d - b * c);
if rows == 1 {
return Ok(self.as_ref()[0].as_ref()[0]);
}

if rows == 2 {
let a = self.at(0, 0);
let b = self.at(0, 1);
let c = self.at(1, 0);
let d = self.at(1, 1);
return Ok(a * d - b * c);
}

if rows == 3 {
let mut data = [[0.0_f32; 3]; 3];
for (i, row) in data.iter_mut().enumerate() {
for (j, value) in row.iter_mut().enumerate() {
*value = self.at(i, j);
}
}
return Ok(determinant_gaussian_stack::<3>(data));
}

if rows == 4 {
let mut data = [[0.0_f32; 4]; 4];
for (i, row) in data.iter_mut().enumerate() {
for (j, value) in row.iter_mut().enumerate() {
*value = self.at(i, j);
}
}
return Ok(determinant_gaussian_stack::<4>(data));
}
Comment on lines +431 to +439
Copy link

Copilot AI Mar 12, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

New 4x4 stack fast-path and the >4 generic elimination path aren’t covered by tests in this module (current determinant tests only exercise 2x2 and 3x3). Please add tests for at least one 4x4 determinant and one larger matrix (e.g., 5x5) to validate correctness and pivoting/sign behavior.

Copilot uses AI. Check for mistakes.

// Use a contiguous row-major buffer for larger matrices to keep the
// fallback cache-friendlier than `Vec<Vec<f32>>`.
let mut working = vec![0.0_f32; rows * rows];
for i in 0..rows {
for j in 0..rows {
working[i * rows + j] = self.at(i, j);
}
_ => {
let mut result = 0.0;
for i in 0..rows {
let mut submatrix: Vec<Vec<f32>> = Vec::with_capacity(rows - 1);
for j in 1..rows {
let mut row = Vec::new();
for k in 0..rows {
if k != i {
row.push(self.at(j, k));
}
}
submatrix.push(row);
}
let sub_determinant = submatrix.determinant()?;
result += self.at(0, i) * sub_determinant * (-1.0_f32).powi(i as i32);
}

let mut sign = 1.0_f32;
for pivot in 0..rows {
// Partial pivoting improves numerical stability and avoids division by 0.
let mut pivot_row = pivot;
let mut pivot_abs = working[pivot * rows + pivot].abs();
for candidate in (pivot + 1)..rows {
let candidate_abs = working[candidate * rows + pivot].abs();
if candidate_abs > pivot_abs {
pivot_abs = candidate_abs;
pivot_row = candidate;
}
return Ok(result);
}
};

if pivot_abs == 0.0 {
return Ok(0.0);
}

if pivot_row != pivot {
for column in 0..rows {
working.swap(pivot * rows + column, pivot_row * rows + column);
}
sign = -sign;
}

let pivot_value = working[pivot * rows + pivot];
for row in (pivot + 1)..rows {
let factor = working[row * rows + pivot] / pivot_value;
if factor == 0.0 {
continue;
}
for column in pivot..rows {
let row_idx = row * rows + column;
let pivot_idx = pivot * rows + column;
working[row_idx] -= factor * working[pivot_idx];
}
}
}

let diagonal_product =
(0..rows).map(|i| working[i * rows + i]).product::<f32>();
return Ok(sign * diagonal_product);
}

/// Return the size as a (rows, columns).
Expand All @@ -394,7 +513,6 @@ where

#[cfg(test)]
mod tests {

use super::{
filled_matrix,
identity_matrix,
Expand Down Expand Up @@ -454,6 +572,55 @@ mod tests {
assert_eq!(m2.determinant(), Ok(-306.0));
}

#[test]
fn determinant_4x4_stack_path_handles_row_swap_sign() {
let m = [
[0.0, 2.0, 0.0, 0.0],
[1.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 3.0, 0.0],
[0.0, 0.0, 0.0, 4.0],
];
assert_eq!(m.determinant(), Ok(-24.0));
}

#[test]
fn determinant_5x5_dynamic_path_handles_row_swap_sign() {
let m = [
[0.0, 2.0, 0.0, 0.0, 0.0],
[1.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 3.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 4.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 5.0],
];
assert_eq!(m.determinant(), Ok(-120.0));
}

#[test]
fn determinant_preserves_small_non_zero_pivot_in_stack_path() {
let m = [[1.0e-8, 0.0, 0.0], [0.0, 2.0, 0.0], [0.0, 0.0, 3.0]];
crate::assert_approximately_equal!(
m.determinant().expect("determinant should succeed"),
6.0e-8,
1.0e-12
);
}

#[test]
fn determinant_preserves_small_non_zero_pivot_in_dynamic_path() {
let m = [
[1.0e-8, 0.0, 0.0, 0.0, 0.0],
[0.0, 2.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 3.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 4.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 5.0],
];
crate::assert_approximately_equal!(
m.determinant().expect("determinant should succeed"),
1.2e-6,
1.0e-10
);
}

#[test]
fn non_square_matrix_determinant() {
let m = [[3.0, 8.0], [4.0, 6.0], [0.0, 1.0]];
Expand Down
Loading