SAH BVH, degenerate mesh filtering, robust signed distance + resolve cordic merge
This commit is contained in:
commit
207198f1cc
|
|
@ -1,6 +1,7 @@
|
||||||
use std::collections::BTreeMap;
|
use std::collections::BTreeMap;
|
||||||
use cord_trig::{TrigGraph, TrigOp};
|
use cord_trig::{TrigGraph, TrigOp};
|
||||||
use crate::config::{CordicConfig, CordicOp};
|
use crate::config::{CordicConfig, CordicOp};
|
||||||
|
use crate::lut::CordicTable;
|
||||||
use crate::ops::*;
|
use crate::ops::*;
|
||||||
|
|
||||||
/// A compiled CORDIC program ready for binary serialization or execution.
|
/// A compiled CORDIC program ready for binary serialization or execution.
|
||||||
|
|
@ -17,7 +18,7 @@ pub struct CORDICProgram {
|
||||||
pub word_bits: u8,
|
pub word_bits: u8,
|
||||||
pub instructions: Vec<CORDICInstr>,
|
pub instructions: Vec<CORDICInstr>,
|
||||||
pub output: u32,
|
pub output: u32,
|
||||||
/// One entry per unique iteration count.
|
/// One entry per unique iteration count (Arc-deduplicated via LutRegistry).
|
||||||
pub tables: Vec<CordicTable>,
|
pub tables: Vec<CordicTable>,
|
||||||
/// Maps instruction index → table index. Only meaningful for CORDIC ops;
|
/// Maps instruction index → table index. Only meaningful for CORDIC ops;
|
||||||
/// non-CORDIC instructions have 0 here (ignored at eval time).
|
/// non-CORDIC instructions have 0 here (ignored at eval time).
|
||||||
|
|
@ -27,15 +28,6 @@ pub struct CORDICProgram {
|
||||||
pub gain: i64,
|
pub gain: i64,
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Precomputed atan table + gain for a specific iteration count.
|
|
||||||
#[derive(Debug, Clone)]
|
|
||||||
pub struct CordicTable {
|
|
||||||
pub iterations: u8,
|
|
||||||
pub atan_table: Vec<i64>,
|
|
||||||
pub gain: i64,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Clone)]
|
|
||||||
pub struct CompileConfig {
|
pub struct CompileConfig {
|
||||||
pub word_bits: u8,
|
pub word_bits: u8,
|
||||||
pub cordic: Option<CordicConfig>,
|
pub cordic: Option<CordicConfig>,
|
||||||
|
|
@ -68,30 +60,21 @@ impl CORDICProgram {
|
||||||
c
|
c
|
||||||
});
|
});
|
||||||
|
|
||||||
// Build tables for each unique iteration count
|
// Build tables for each unique iteration count, using LutRegistry for dedup
|
||||||
let unique_counts = cordic_cfg.unique_iteration_counts();
|
let unique_counts = cordic_cfg.unique_iteration_counts();
|
||||||
let mut count_to_table_idx: BTreeMap<u8, u8> = BTreeMap::new();
|
let mut count_to_table_idx: BTreeMap<u8, u8> = BTreeMap::new();
|
||||||
let mut tables = Vec::new();
|
let mut tables = Vec::new();
|
||||||
for &iters in &unique_counts {
|
for &iters in &unique_counts {
|
||||||
count_to_table_idx.insert(iters, tables.len() as u8);
|
count_to_table_idx.insert(iters, tables.len() as u8);
|
||||||
tables.push(CordicTable {
|
tables.push(CordicTable::get(iters, frac_bits));
|
||||||
iterations: iters,
|
|
||||||
atan_table: atan_table(iters),
|
|
||||||
gain: cordic_gain(iters, frac_bits),
|
|
||||||
});
|
|
||||||
}
|
}
|
||||||
|
|
||||||
let default_table_idx = count_to_table_idx
|
let default_table_idx = count_to_table_idx
|
||||||
.get(&cordic_cfg.default_f)
|
.get(&cordic_cfg.default_f)
|
||||||
.copied()
|
.copied()
|
||||||
.unwrap_or_else(|| {
|
.unwrap_or_else(|| {
|
||||||
// Ensure default has a table entry
|
|
||||||
let idx = tables.len() as u8;
|
let idx = tables.len() as u8;
|
||||||
tables.push(CordicTable {
|
tables.push(CordicTable::get(cordic_cfg.default_f, frac_bits));
|
||||||
iterations: cordic_cfg.default_f,
|
|
||||||
atan_table: atan_table(cordic_cfg.default_f),
|
|
||||||
gain: cordic_gain(cordic_cfg.default_f, frac_bits),
|
|
||||||
});
|
|
||||||
idx
|
idx
|
||||||
});
|
});
|
||||||
|
|
||||||
|
|
@ -163,12 +146,8 @@ impl CORDICProgram {
|
||||||
}
|
}
|
||||||
}).collect();
|
}).collect();
|
||||||
|
|
||||||
// Legacy single-table fields use the first table (or default)
|
let primary = tables.first().cloned()
|
||||||
let primary = tables.first().cloned().unwrap_or_else(|| CordicTable {
|
.unwrap_or_else(|| CordicTable::for_word_bits(config.word_bits));
|
||||||
iterations: config.word_bits,
|
|
||||||
atan_table: atan_table(config.word_bits),
|
|
||||||
gain: cordic_gain(config.word_bits, frac_bits),
|
|
||||||
});
|
|
||||||
|
|
||||||
CORDICProgram {
|
CORDICProgram {
|
||||||
word_bits: config.word_bits,
|
word_bits: config.word_bits,
|
||||||
|
|
@ -176,7 +155,7 @@ impl CORDICProgram {
|
||||||
output: graph.output,
|
output: graph.output,
|
||||||
tables,
|
tables,
|
||||||
instr_table_idx,
|
instr_table_idx,
|
||||||
atan_table: primary.atan_table,
|
atan_table: primary.atan.to_vec(),
|
||||||
gain: primary.gain,
|
gain: primary.gain,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
@ -248,11 +227,7 @@ impl CORDICProgram {
|
||||||
pos += consumed;
|
pos += consumed;
|
||||||
}
|
}
|
||||||
|
|
||||||
let tables = vec![CordicTable {
|
let tables = vec![CordicTable::get(word_bits, word_bits - 1)];
|
||||||
iterations: word_bits,
|
|
||||||
atan_table: atan_table.clone(),
|
|
||||||
gain,
|
|
||||||
}];
|
|
||||||
let instr_table_idx = vec![0u8; instructions.len()];
|
let instr_table_idx = vec![0u8; instructions.len()];
|
||||||
|
|
||||||
Some(CORDICProgram {
|
Some(CORDICProgram {
|
||||||
|
|
|
||||||
|
|
@ -1,12 +1,12 @@
|
||||||
use cord_trig::{TrigGraph, TrigOp};
|
use cord_trig::{TrigGraph, TrigOp};
|
||||||
use crate::compiler::{CORDICProgram, CordicTable};
|
use crate::compiler::CORDICProgram;
|
||||||
use crate::ops::{atan_table, cordic_gain};
|
use crate::lut::CordicTable;
|
||||||
|
|
||||||
/// CORDIC evaluator: evaluates a TrigGraph using only integer
|
/// CORDIC evaluator: evaluates a TrigGraph using only integer
|
||||||
/// shifts, adds, and comparisons. No floating point trig.
|
/// shifts, adds, and comparisons. No floating point trig.
|
||||||
///
|
///
|
||||||
/// Proof that the entire pipeline compiles down to
|
/// Proof that the entire pipeline compiles down to
|
||||||
/// binary arithmetic — shift, add, compare, repeat.
|
/// binary arithmetic -- shift, add, compare, repeat.
|
||||||
pub struct CORDICEvaluator {
|
pub struct CORDICEvaluator {
|
||||||
frac_bits: u8,
|
frac_bits: u8,
|
||||||
instr_table_idx: Vec<u8>,
|
instr_table_idx: Vec<u8>,
|
||||||
|
|
@ -19,11 +19,7 @@ impl CORDICEvaluator {
|
||||||
CORDICEvaluator {
|
CORDICEvaluator {
|
||||||
frac_bits,
|
frac_bits,
|
||||||
instr_table_idx: Vec::new(),
|
instr_table_idx: Vec::new(),
|
||||||
tables: vec![CordicTable {
|
tables: vec![CordicTable::for_word_bits(word_bits)],
|
||||||
iterations: word_bits,
|
|
||||||
atan_table: atan_table(word_bits),
|
|
||||||
gain: cordic_gain(word_bits, frac_bits),
|
|
||||||
}],
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -76,8 +72,6 @@ impl CORDICEvaluator {
|
||||||
/// Resolve the table for a given instruction index.
|
/// Resolve the table for a given instruction index.
|
||||||
fn table_for(&self, instr_idx: usize) -> &CordicTable {
|
fn table_for(&self, instr_idx: usize) -> &CordicTable {
|
||||||
if self.instr_table_idx.is_empty() || self.tables.is_empty() {
|
if self.instr_table_idx.is_empty() || self.tables.is_empty() {
|
||||||
// Fallback: use legacy single table via a static-lifetime trick
|
|
||||||
// won't work — build a table reference from self fields instead
|
|
||||||
return &self.tables[0];
|
return &self.tables[0];
|
||||||
}
|
}
|
||||||
let tidx = self.instr_table_idx.get(instr_idx).copied().unwrap_or(0) as usize;
|
let tidx = self.instr_table_idx.get(instr_idx).copied().unwrap_or(0) as usize;
|
||||||
|
|
@ -94,7 +88,7 @@ impl CORDICEvaluator {
|
||||||
let d = if z >= 0 { 1i64 } else { -1 };
|
let d = if z >= 0 { 1i64 } else { -1 };
|
||||||
let x_new = x - d * (y >> i);
|
let x_new = x - d * (y >> i);
|
||||||
let y_new = y + d * (x >> i);
|
let y_new = y + d * (x >> i);
|
||||||
z -= d * table.atan_table[i];
|
z -= d * table.atan[i];
|
||||||
x = x_new;
|
x = x_new;
|
||||||
y = y_new;
|
y = y_new;
|
||||||
}
|
}
|
||||||
|
|
@ -118,7 +112,7 @@ impl CORDICEvaluator {
|
||||||
let d = if y < 0 { 1i64 } else { -1 };
|
let d = if y < 0 { 1i64 } else { -1 };
|
||||||
let x_new = x - d * (y >> i);
|
let x_new = x - d * (y >> i);
|
||||||
let y_new = y + d * (x >> i);
|
let y_new = y + d * (x >> i);
|
||||||
z -= d * table.atan_table[i];
|
z -= d * table.atan[i];
|
||||||
x = x_new;
|
x = x_new;
|
||||||
y = y_new;
|
y = y_new;
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -11,7 +11,9 @@ pub mod compiler;
|
||||||
pub mod config;
|
pub mod config;
|
||||||
pub mod ops;
|
pub mod ops;
|
||||||
pub mod eval;
|
pub mod eval;
|
||||||
|
pub mod lut;
|
||||||
|
|
||||||
pub use compiler::CORDICProgram;
|
pub use compiler::CORDICProgram;
|
||||||
pub use config::CordicConfig;
|
pub use config::CordicConfig;
|
||||||
pub use eval::CORDICEvaluator;
|
pub use eval::CORDICEvaluator;
|
||||||
|
pub use lut::CordicTable;
|
||||||
|
|
|
||||||
|
|
@ -0,0 +1,114 @@
|
||||||
|
use std::collections::HashMap;
|
||||||
|
use std::sync::{Arc, Mutex, OnceLock};
|
||||||
|
|
||||||
|
/// Pre-computed CORDIC table: atan values + gain for a given iteration count.
|
||||||
|
#[derive(Debug, Clone)]
|
||||||
|
pub struct CordicTable {
|
||||||
|
pub atan: Arc<[i64]>,
|
||||||
|
pub gain: i64,
|
||||||
|
pub iterations: u8,
|
||||||
|
pub frac_bits: u8,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Global table registry. Keyed by (iterations, frac_bits) so any
|
||||||
|
/// combination of word width and iteration count shares a single
|
||||||
|
/// allocation. Thread-safe via interior Mutex on the map only;
|
||||||
|
/// once an Arc is cloned out, no lock is held during evaluation.
|
||||||
|
struct LutRegistry {
|
||||||
|
tables: Mutex<HashMap<(u8, u8), CordicTable>>,
|
||||||
|
}
|
||||||
|
|
||||||
|
static REGISTRY: OnceLock<LutRegistry> = OnceLock::new();
|
||||||
|
|
||||||
|
fn registry() -> &'static LutRegistry {
|
||||||
|
REGISTRY.get_or_init(|| LutRegistry {
|
||||||
|
tables: Mutex::new(HashMap::new()),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
impl CordicTable {
|
||||||
|
/// Retrieve or compute the CORDIC table for the given parameters.
|
||||||
|
/// Repeated calls with the same (iterations, frac_bits) return
|
||||||
|
/// a clone of the same Arc'd data -- no recomputation.
|
||||||
|
pub fn get(iterations: u8, frac_bits: u8) -> Self {
|
||||||
|
let reg = registry();
|
||||||
|
let key = (iterations, frac_bits);
|
||||||
|
|
||||||
|
let mut map = reg.tables.lock().unwrap();
|
||||||
|
if let Some(cached) = map.get(&key) {
|
||||||
|
return cached.clone();
|
||||||
|
}
|
||||||
|
|
||||||
|
let table = Self::compute(iterations, frac_bits);
|
||||||
|
map.insert(key, table.clone());
|
||||||
|
table
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Convenience: derive iterations and frac_bits from word_bits
|
||||||
|
/// using the standard CORDIC convention (iterations = word_bits,
|
||||||
|
/// frac_bits = word_bits - 1).
|
||||||
|
pub fn for_word_bits(word_bits: u8) -> Self {
|
||||||
|
Self::get(word_bits, word_bits - 1)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn compute(iterations: u8, frac_bits: u8) -> Self {
|
||||||
|
let scale = (1i64 << frac_bits) as f64;
|
||||||
|
|
||||||
|
let atan: Arc<[i64]> = (0..iterations)
|
||||||
|
.map(|i| {
|
||||||
|
let angle = (2.0f64).powi(-(i as i32)).atan();
|
||||||
|
(angle * scale).round() as i64
|
||||||
|
})
|
||||||
|
.collect::<Vec<_>>()
|
||||||
|
.into();
|
||||||
|
|
||||||
|
let mut k = 1.0f64;
|
||||||
|
for i in 0..iterations {
|
||||||
|
k *= 1.0 / (1.0 + (2.0f64).powi(-2 * i as i32)).sqrt();
|
||||||
|
}
|
||||||
|
let gain = (k * scale).round() as i64;
|
||||||
|
|
||||||
|
CordicTable { atan, gain, iterations, frac_bits }
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use super::*;
|
||||||
|
use crate::ops;
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn cached_matches_raw() {
|
||||||
|
for bits in [8u8, 16, 32, 48, 64] {
|
||||||
|
let table = CordicTable::for_word_bits(bits);
|
||||||
|
let raw_atan = ops::atan_table(bits);
|
||||||
|
let raw_gain = ops::cordic_gain(bits, bits - 1);
|
||||||
|
|
||||||
|
assert_eq!(table.atan.len(), raw_atan.len(), "len mismatch at {bits}");
|
||||||
|
assert_eq!(&*table.atan, &raw_atan[..], "atan mismatch at {bits}");
|
||||||
|
assert_eq!(table.gain, raw_gain, "gain mismatch at {bits}");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn deduplication() {
|
||||||
|
let a = CordicTable::for_word_bits(32);
|
||||||
|
let b = CordicTable::for_word_bits(32);
|
||||||
|
assert!(Arc::ptr_eq(&a.atan, &b.atan));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn different_widths_distinct() {
|
||||||
|
let a = CordicTable::for_word_bits(16);
|
||||||
|
let b = CordicTable::for_word_bits(32);
|
||||||
|
assert!(!Arc::ptr_eq(&a.atan, &b.atan));
|
||||||
|
assert_ne!(a.atan.len(), b.atan.len());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn custom_iteration_count() {
|
||||||
|
let t = CordicTable::get(16, 31);
|
||||||
|
assert_eq!(t.atan.len(), 16);
|
||||||
|
assert_eq!(t.frac_bits, 31);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
@ -39,12 +39,54 @@ impl BVH {
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Find the signed distance from point p to the mesh.
|
/// Find the signed distance from point p to the mesh.
|
||||||
/// Sign is determined by the normal of the nearest triangle.
|
///
|
||||||
|
/// Sign is determined by angle-weighted pseudonormal voting across all
|
||||||
|
/// triangles within a tolerance of the nearest distance. This handles
|
||||||
|
/// edge and vertex proximity correctly, where a single triangle's face
|
||||||
|
/// normal would give ambiguous or wrong sign.
|
||||||
pub fn signed_distance(&self, mesh: &TriangleMesh, p: Vec3) -> f64 {
|
pub fn signed_distance(&self, mesh: &TriangleMesh, p: Vec3) -> f64 {
|
||||||
let mut best_dist_sq = f64::INFINITY;
|
let mut best_dist_sq = f64::INFINITY;
|
||||||
let mut best_sign = 1.0f64;
|
self.query_nearest_dist(mesh, p, 0, &mut best_dist_sq);
|
||||||
self.query_nearest(mesh, p, 0, &mut best_dist_sq, &mut best_sign);
|
|
||||||
best_dist_sq.sqrt() * best_sign
|
if best_dist_sq == f64::INFINITY {
|
||||||
|
return f64::INFINITY;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Collect all triangles within a relative tolerance of the best distance.
|
||||||
|
// This catches co-nearest triangles sharing an edge or vertex.
|
||||||
|
let eps_factor = 1.0 + 1e-4;
|
||||||
|
let threshold_sq = best_dist_sq * eps_factor * eps_factor + 1e-20;
|
||||||
|
let mut neighbors: Vec<(usize, Vec3)> = Vec::new(); // (tri_idx, closest_point)
|
||||||
|
self.collect_near(mesh, p, 0, threshold_sq, &mut neighbors);
|
||||||
|
|
||||||
|
let sign = if neighbors.len() <= 1 {
|
||||||
|
// Single closest triangle: use its face normal directly
|
||||||
|
if let Some(&(tri_idx, closest)) = neighbors.first() {
|
||||||
|
let to_point = p - closest;
|
||||||
|
let normal = mesh.triangles[tri_idx].normal();
|
||||||
|
if to_point.dot(normal) >= 0.0 { 1.0 } else { -1.0 }
|
||||||
|
} else {
|
||||||
|
1.0
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// Multiple co-nearest triangles: angle-weighted normal vote.
|
||||||
|
// Weight each triangle's contribution by the solid angle it subtends
|
||||||
|
// at p, approximated by the triangle area divided by distance squared.
|
||||||
|
let mut weighted_dot = 0.0f64;
|
||||||
|
for &(tri_idx, closest) in &neighbors {
|
||||||
|
let tri = &mesh.triangles[tri_idx];
|
||||||
|
let to_point = p - closest;
|
||||||
|
let normal = tri.normal();
|
||||||
|
let area = tri.area();
|
||||||
|
let d_sq = to_point.dot(to_point);
|
||||||
|
// Weight: area / (distance^2 + epsilon) — larger, closer faces dominate
|
||||||
|
let weight = area / (d_sq + 1e-20);
|
||||||
|
weighted_dot += to_point.dot(normal) * weight;
|
||||||
|
}
|
||||||
|
if weighted_dot >= 0.0 { 1.0 } else { -1.0 }
|
||||||
|
};
|
||||||
|
|
||||||
|
best_dist_sq.sqrt() * sign
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Find the nearest triangle index and unsigned distance.
|
/// Find the nearest triangle index and unsigned distance.
|
||||||
|
|
@ -55,13 +97,13 @@ impl BVH {
|
||||||
(best_idx, best_dist_sq.sqrt())
|
(best_idx, best_dist_sq.sqrt())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn query_nearest(
|
/// Find the minimum squared distance from p to any triangle.
|
||||||
|
fn query_nearest_dist(
|
||||||
&self,
|
&self,
|
||||||
mesh: &TriangleMesh,
|
mesh: &TriangleMesh,
|
||||||
p: Vec3,
|
p: Vec3,
|
||||||
node_idx: usize,
|
node_idx: usize,
|
||||||
best_dist_sq: &mut f64,
|
best_dist_sq: &mut f64,
|
||||||
best_sign: &mut f64,
|
|
||||||
) {
|
) {
|
||||||
match &self.nodes[node_idx] {
|
match &self.nodes[node_idx] {
|
||||||
BVHNode::Leaf { bounds, first, count } => {
|
BVHNode::Leaf { bounds, first, count } => {
|
||||||
|
|
@ -71,13 +113,10 @@ impl BVH {
|
||||||
for i in *first..(*first + *count) {
|
for i in *first..(*first + *count) {
|
||||||
let tri_idx = self.tri_indices[i];
|
let tri_idx = self.tri_indices[i];
|
||||||
let tri = &mesh.triangles[tri_idx];
|
let tri = &mesh.triangles[tri_idx];
|
||||||
let (closest, dist) = tri.closest_point(p);
|
let (_, dist) = tri.closest_point(p);
|
||||||
let dist_sq = dist * dist;
|
let dist_sq = dist * dist;
|
||||||
if dist_sq < *best_dist_sq {
|
if dist_sq < *best_dist_sq {
|
||||||
*best_dist_sq = dist_sq;
|
*best_dist_sq = dist_sq;
|
||||||
let to_point = p - closest;
|
|
||||||
let normal = tri.normal();
|
|
||||||
*best_sign = if to_point.dot(normal) >= 0.0 { 1.0 } else { -1.0 };
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
@ -91,16 +130,49 @@ impl BVH {
|
||||||
let dr = right_bounds.distance_to_point(p);
|
let dr = right_bounds.distance_to_point(p);
|
||||||
|
|
||||||
if dl < dr {
|
if dl < dr {
|
||||||
self.query_nearest(mesh, p, *left, best_dist_sq, best_sign);
|
self.query_nearest_dist(mesh, p, *left, best_dist_sq);
|
||||||
self.query_nearest(mesh, p, *right, best_dist_sq, best_sign);
|
self.query_nearest_dist(mesh, p, *right, best_dist_sq);
|
||||||
} else {
|
} else {
|
||||||
self.query_nearest(mesh, p, *right, best_dist_sq, best_sign);
|
self.query_nearest_dist(mesh, p, *right, best_dist_sq);
|
||||||
self.query_nearest(mesh, p, *left, best_dist_sq, best_sign);
|
self.query_nearest_dist(mesh, p, *left, best_dist_sq);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Collect all (triangle_index, closest_point) pairs within threshold_sq of p.
|
||||||
|
fn collect_near(
|
||||||
|
&self,
|
||||||
|
mesh: &TriangleMesh,
|
||||||
|
p: Vec3,
|
||||||
|
node_idx: usize,
|
||||||
|
threshold_sq: f64,
|
||||||
|
out: &mut Vec<(usize, Vec3)>,
|
||||||
|
) {
|
||||||
|
match &self.nodes[node_idx] {
|
||||||
|
BVHNode::Leaf { bounds, first, count } => {
|
||||||
|
if bounds.distance_to_point(p).powi(2) > threshold_sq {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
for i in *first..(*first + *count) {
|
||||||
|
let tri_idx = self.tri_indices[i];
|
||||||
|
let tri = &mesh.triangles[tri_idx];
|
||||||
|
let (closest, dist) = tri.closest_point(p);
|
||||||
|
if dist * dist <= threshold_sq {
|
||||||
|
out.push((tri_idx, closest));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
BVHNode::Internal { bounds, left, right } => {
|
||||||
|
if bounds.distance_to_point(p).powi(2) > threshold_sq {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
self.collect_near(mesh, p, *left, threshold_sq, out);
|
||||||
|
self.collect_near(mesh, p, *right, threshold_sq, out);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
fn query_nearest_idx(
|
fn query_nearest_idx(
|
||||||
&self,
|
&self,
|
||||||
mesh: &TriangleMesh,
|
mesh: &TriangleMesh,
|
||||||
|
|
@ -195,6 +267,120 @@ fn point_in_aabb(p: Vec3, b: &AABB) -> bool {
|
||||||
}
|
}
|
||||||
|
|
||||||
const MAX_LEAF_SIZE: usize = 8;
|
const MAX_LEAF_SIZE: usize = 8;
|
||||||
|
const SAH_NUM_BINS: usize = 16;
|
||||||
|
const SAH_TRAVERSAL_COST: f64 = 1.0;
|
||||||
|
const SAH_INTERSECT_COST: f64 = 1.5;
|
||||||
|
|
||||||
|
/// Surface area of an AABB (used as probability proxy in SAH).
|
||||||
|
fn aabb_surface_area(b: &AABB) -> f64 {
|
||||||
|
let e = b.extent();
|
||||||
|
2.0 * (e.x * e.y + e.y * e.z + e.z * e.x)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// SAH bin for accumulating triangle bounds during split evaluation.
|
||||||
|
struct SAHBin {
|
||||||
|
bounds: AABB,
|
||||||
|
count: usize,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl SAHBin {
|
||||||
|
fn empty() -> Self {
|
||||||
|
Self { bounds: AABB::empty(), count: 0 }
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Evaluate SAH cost for splitting at each bin boundary along the given axis.
|
||||||
|
/// Returns (best_split_bin, best_cost). Split puts bins [0..split] left, [split..N] right.
|
||||||
|
fn sah_best_split(
|
||||||
|
triangles: &[Triangle],
|
||||||
|
indices: &[usize],
|
||||||
|
centroids: &[Vec3],
|
||||||
|
start: usize,
|
||||||
|
end: usize,
|
||||||
|
parent_bounds: &AABB,
|
||||||
|
axis: usize,
|
||||||
|
) -> (usize, f64) {
|
||||||
|
let get_axis = |v: Vec3| match axis {
|
||||||
|
0 => v.x,
|
||||||
|
1 => v.y,
|
||||||
|
_ => v.z,
|
||||||
|
};
|
||||||
|
|
||||||
|
// Centroid bounds (tighter than triangle bounds) for bin mapping
|
||||||
|
let mut centroid_min = f64::INFINITY;
|
||||||
|
let mut centroid_max = f64::NEG_INFINITY;
|
||||||
|
for &idx in &indices[start..end] {
|
||||||
|
let c = get_axis(centroids[idx]);
|
||||||
|
centroid_min = centroid_min.min(c);
|
||||||
|
centroid_max = centroid_max.max(c);
|
||||||
|
}
|
||||||
|
|
||||||
|
let span = centroid_max - centroid_min;
|
||||||
|
if span < 1e-12 {
|
||||||
|
// All centroids coincide on this axis; can't split here
|
||||||
|
return (SAH_NUM_BINS / 2, f64::INFINITY);
|
||||||
|
}
|
||||||
|
let inv_span = SAH_NUM_BINS as f64 / span;
|
||||||
|
|
||||||
|
// Bin triangles
|
||||||
|
let mut bins: Vec<SAHBin> = (0..SAH_NUM_BINS).map(|_| SAHBin::empty()).collect();
|
||||||
|
|
||||||
|
for &idx in &indices[start..end] {
|
||||||
|
let c = get_axis(centroids[idx]);
|
||||||
|
let b = ((c - centroid_min) * inv_span) as usize;
|
||||||
|
let b = b.min(SAH_NUM_BINS - 1);
|
||||||
|
bins[b].bounds = bins[b].bounds.union(&AABB::from_triangle(&triangles[idx]));
|
||||||
|
bins[b].count += 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sweep from left: accumulate bounds and counts for splits [0..i+1] | [i+1..N]
|
||||||
|
let mut left_area = vec![0.0f64; SAH_NUM_BINS - 1];
|
||||||
|
let mut left_count = vec![0usize; SAH_NUM_BINS - 1];
|
||||||
|
let mut running_bounds = AABB::empty();
|
||||||
|
let mut running_count = 0usize;
|
||||||
|
|
||||||
|
for i in 0..(SAH_NUM_BINS - 1) {
|
||||||
|
running_bounds = running_bounds.union(&bins[i].bounds);
|
||||||
|
running_count += bins[i].count;
|
||||||
|
left_area[i] = aabb_surface_area(&running_bounds);
|
||||||
|
left_count[i] = running_count;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sweep from right
|
||||||
|
let mut right_area = vec![0.0f64; SAH_NUM_BINS - 1];
|
||||||
|
let mut right_count = vec![0usize; SAH_NUM_BINS - 1];
|
||||||
|
running_bounds = AABB::empty();
|
||||||
|
running_count = 0;
|
||||||
|
|
||||||
|
for i in (0..(SAH_NUM_BINS - 1)).rev() {
|
||||||
|
running_bounds = running_bounds.union(&bins[i + 1].bounds);
|
||||||
|
running_count += bins[i + 1].count;
|
||||||
|
right_area[i] = aabb_surface_area(&running_bounds);
|
||||||
|
right_count[i] = running_count;
|
||||||
|
}
|
||||||
|
|
||||||
|
let parent_area = aabb_surface_area(parent_bounds);
|
||||||
|
let inv_parent = if parent_area > 1e-12 { 1.0 / parent_area } else { 1.0 };
|
||||||
|
|
||||||
|
let mut best_cost = f64::INFINITY;
|
||||||
|
let mut best_bin = SAH_NUM_BINS / 2;
|
||||||
|
|
||||||
|
for i in 0..(SAH_NUM_BINS - 1) {
|
||||||
|
if left_count[i] == 0 || right_count[i] == 0 {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
let cost = SAH_TRAVERSAL_COST
|
||||||
|
+ SAH_INTERSECT_COST * inv_parent
|
||||||
|
* (left_area[i] * left_count[i] as f64
|
||||||
|
+ right_area[i] * right_count[i] as f64);
|
||||||
|
if cost < best_cost {
|
||||||
|
best_cost = cost;
|
||||||
|
best_bin = i;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
(best_bin, best_cost)
|
||||||
|
}
|
||||||
|
|
||||||
fn build_recursive(
|
fn build_recursive(
|
||||||
triangles: &[Triangle],
|
triangles: &[Triangle],
|
||||||
|
|
@ -206,7 +392,6 @@ fn build_recursive(
|
||||||
) -> usize {
|
) -> usize {
|
||||||
let count = end - start;
|
let count = end - start;
|
||||||
|
|
||||||
// Compute bounds
|
|
||||||
let mut bounds = AABB::empty();
|
let mut bounds = AABB::empty();
|
||||||
for &idx in &indices[start..end] {
|
for &idx in &indices[start..end] {
|
||||||
bounds = bounds.union(&AABB::from_triangle(&triangles[idx]));
|
bounds = bounds.union(&AABB::from_triangle(&triangles[idx]));
|
||||||
|
|
@ -218,24 +403,82 @@ fn build_recursive(
|
||||||
return node_idx;
|
return node_idx;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Split along longest axis at centroid median
|
// SAH split: evaluate all three axes, pick lowest cost
|
||||||
let axis = bounds.longest_axis();
|
let leaf_cost = SAH_INTERSECT_COST * count as f64;
|
||||||
let mid = start + count / 2;
|
let mut best_axis = 0usize;
|
||||||
|
let mut best_bin = SAH_NUM_BINS / 2;
|
||||||
|
let mut best_cost = f64::INFINITY;
|
||||||
|
|
||||||
// Partial sort: partition around the median centroid on the chosen axis
|
for axis in 0..3 {
|
||||||
|
let (bin, cost) = sah_best_split(
|
||||||
|
triangles, indices, centroids, start, end, &bounds, axis,
|
||||||
|
);
|
||||||
|
if cost < best_cost {
|
||||||
|
best_cost = cost;
|
||||||
|
best_bin = bin;
|
||||||
|
best_axis = axis;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// If SAH says a leaf is cheaper, or no valid split found, fall back to median
|
||||||
|
let use_median = best_cost >= leaf_cost || best_cost == f64::INFINITY;
|
||||||
|
|
||||||
|
let mid = if use_median {
|
||||||
|
// Median split along longest axis
|
||||||
|
let axis = bounds.longest_axis();
|
||||||
let get_axis = |v: Vec3| match axis {
|
let get_axis = |v: Vec3| match axis {
|
||||||
0 => v.x,
|
0 => v.x,
|
||||||
1 => v.y,
|
1 => v.y,
|
||||||
_ => v.z,
|
_ => v.z,
|
||||||
};
|
};
|
||||||
|
indices[start..end].sort_unstable_by(|&a, &b| {
|
||||||
// Simple partition: sort the range by centroid along axis
|
get_axis(centroids[a])
|
||||||
let index_slice = &mut indices[start..end];
|
.partial_cmp(&get_axis(centroids[b]))
|
||||||
index_slice.sort_unstable_by(|&a, &b| {
|
.unwrap_or(std::cmp::Ordering::Equal)
|
||||||
let ca = get_axis(centroids[a]);
|
|
||||||
let cb = get_axis(centroids[b]);
|
|
||||||
ca.partial_cmp(&cb).unwrap_or(std::cmp::Ordering::Equal)
|
|
||||||
});
|
});
|
||||||
|
start + count / 2
|
||||||
|
} else {
|
||||||
|
// Partition around the SAH split bin
|
||||||
|
let get_axis = |v: Vec3| match best_axis {
|
||||||
|
0 => v.x,
|
||||||
|
1 => v.y,
|
||||||
|
_ => v.z,
|
||||||
|
};
|
||||||
|
|
||||||
|
let mut centroid_min = f64::INFINITY;
|
||||||
|
let mut centroid_max = f64::NEG_INFINITY;
|
||||||
|
for &idx in &indices[start..end] {
|
||||||
|
let c = get_axis(centroids[idx]);
|
||||||
|
centroid_min = centroid_min.min(c);
|
||||||
|
centroid_max = centroid_max.max(c);
|
||||||
|
}
|
||||||
|
let span = centroid_max - centroid_min;
|
||||||
|
let inv_span = SAH_NUM_BINS as f64 / span;
|
||||||
|
|
||||||
|
// Partition: triangles in bins [0..=best_bin] go left
|
||||||
|
let slice = &mut indices[start..end];
|
||||||
|
let mut lo = 0usize;
|
||||||
|
let mut hi = slice.len();
|
||||||
|
while lo < hi {
|
||||||
|
let c = get_axis(centroids[slice[lo]]);
|
||||||
|
let b = ((c - centroid_min) * inv_span) as usize;
|
||||||
|
let b = b.min(SAH_NUM_BINS - 1);
|
||||||
|
if b <= best_bin {
|
||||||
|
lo += 1;
|
||||||
|
} else {
|
||||||
|
hi -= 1;
|
||||||
|
slice.swap(lo, hi);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
let mid = start + lo;
|
||||||
|
// Guard: if partition degenerates, fall back to midpoint
|
||||||
|
if mid == start || mid == end {
|
||||||
|
start + count / 2
|
||||||
|
} else {
|
||||||
|
mid
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
let node_idx = nodes.len();
|
let node_idx = nodes.len();
|
||||||
nodes.push(BVHNode::Leaf { bounds: AABB::empty(), first: 0, count: 0 }); // placeholder
|
nodes.push(BVHNode::Leaf { bounds: AABB::empty(), first: 0, count: 0 }); // placeholder
|
||||||
|
|
@ -246,3 +489,93 @@ fn build_recursive(
|
||||||
nodes[node_idx] = BVHNode::Internal { bounds, left, right };
|
nodes[node_idx] = BVHNode::Internal { bounds, left, right };
|
||||||
node_idx
|
node_idx
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use super::*;
|
||||||
|
|
||||||
|
fn make_unit_cube_mesh() -> TriangleMesh {
|
||||||
|
// 12 triangles forming a unit cube centered at origin
|
||||||
|
let h = 0.5;
|
||||||
|
let corners = [
|
||||||
|
Vec3::new(-h, -h, -h), Vec3::new( h, -h, -h),
|
||||||
|
Vec3::new( h, h, -h), Vec3::new(-h, h, -h),
|
||||||
|
Vec3::new(-h, -h, h), Vec3::new( h, -h, h),
|
||||||
|
Vec3::new( h, h, h), Vec3::new(-h, h, h),
|
||||||
|
];
|
||||||
|
// CCW winding viewed from outside each face
|
||||||
|
let faces: [(usize, usize, usize); 12] = [
|
||||||
|
(0,2,1), (0,3,2), // -Z face: normal points -Z
|
||||||
|
(4,5,6), (4,6,7), // +Z face: normal points +Z
|
||||||
|
(0,1,5), (0,5,4), // -Y face: normal points -Y
|
||||||
|
(2,3,7), (2,7,6), // +Y face: normal points +Y
|
||||||
|
(0,4,7), (0,7,3), // -X face: normal points -X
|
||||||
|
(1,2,6), (1,6,5), // +X face: normal points +X
|
||||||
|
];
|
||||||
|
let mut triangles = Vec::new();
|
||||||
|
let mut bounds = AABB::empty();
|
||||||
|
for &(a, b, c) in &faces {
|
||||||
|
let tri = Triangle { v: [corners[a], corners[b], corners[c]] };
|
||||||
|
for v in &tri.v {
|
||||||
|
bounds.min = bounds.min.component_min(*v);
|
||||||
|
bounds.max = bounds.max.component_max(*v);
|
||||||
|
}
|
||||||
|
triangles.push(tri);
|
||||||
|
}
|
||||||
|
TriangleMesh { triangles, bounds }
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn bvh_builds_for_cube() {
|
||||||
|
let mesh = make_unit_cube_mesh();
|
||||||
|
let bvh = BVH::build(&mesh);
|
||||||
|
assert!(!bvh.nodes.is_empty());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn signed_distance_inside_cube() {
|
||||||
|
let mesh = make_unit_cube_mesh();
|
||||||
|
let bvh = BVH::build(&mesh);
|
||||||
|
let d = bvh.signed_distance(&mesh, Vec3::new(0.0, 0.0, 0.0));
|
||||||
|
assert!(d < 0.0, "origin should be inside cube, got {d}");
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn signed_distance_outside_cube() {
|
||||||
|
let mesh = make_unit_cube_mesh();
|
||||||
|
let bvh = BVH::build(&mesh);
|
||||||
|
let d = bvh.signed_distance(&mesh, Vec3::new(2.0, 0.0, 0.0));
|
||||||
|
assert!(d > 0.0, "point far from cube should be outside, got {d}");
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn signed_distance_on_edge_correct_sign() {
|
||||||
|
// Point near a cube edge: the multi-triangle voting should give
|
||||||
|
// consistent sign even though two face normals are involved.
|
||||||
|
let mesh = make_unit_cube_mesh();
|
||||||
|
let bvh = BVH::build(&mesh);
|
||||||
|
// Slightly outside the cube near the +X/+Y edge
|
||||||
|
let d = bvh.signed_distance(&mesh, Vec3::new(0.6, 0.6, 0.0));
|
||||||
|
assert!(d > 0.0, "point outside near edge should be positive, got {d}");
|
||||||
|
// Slightly inside near the same edge
|
||||||
|
let d2 = bvh.signed_distance(&mesh, Vec3::new(0.4, 0.4, 0.0));
|
||||||
|
assert!(d2 < 0.0, "point inside near edge should be negative, got {d2}");
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn signed_distance_on_vertex_correct_sign() {
|
||||||
|
// Point near a cube vertex: three face normals are involved.
|
||||||
|
let mesh = make_unit_cube_mesh();
|
||||||
|
let bvh = BVH::build(&mesh);
|
||||||
|
let d = bvh.signed_distance(&mesh, Vec3::new(0.7, 0.7, 0.7));
|
||||||
|
assert!(d > 0.0, "point outside near vertex should be positive, got {d}");
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn nearest_triangle_finds_closest() {
|
||||||
|
let mesh = make_unit_cube_mesh();
|
||||||
|
let bvh = BVH::build(&mesh);
|
||||||
|
let (_, dist) = bvh.nearest_triangle(&mesh, Vec3::new(1.5, 0.0, 0.0));
|
||||||
|
assert!((dist - 1.0).abs() < 0.01, "distance to +X face should be ~1.0, got {dist}");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
|
||||||
|
|
@ -237,6 +237,53 @@ pub struct TriangleMesh {
|
||||||
}
|
}
|
||||||
|
|
||||||
impl TriangleMesh {
|
impl TriangleMesh {
|
||||||
|
/// Remove degenerate triangles: zero/near-zero area, NaN/Inf vertices,
|
||||||
|
/// and collapsed edges. Threshold is relative to the mesh diagonal.
|
||||||
|
fn sanitize(&mut self) {
|
||||||
|
if self.triangles.is_empty() {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
let diag = self.bounds.diagonal();
|
||||||
|
// Minimum area: square of 1e-7 times diagonal — catches slivers from
|
||||||
|
// CAD exports without rejecting intentionally thin geometry.
|
||||||
|
let min_area = (diag * 1e-7).powi(2) * 0.5;
|
||||||
|
// Minimum edge length squared
|
||||||
|
let min_edge_sq = (diag * 1e-8).powi(2);
|
||||||
|
|
||||||
|
self.triangles.retain(|tri| {
|
||||||
|
// Reject NaN/Inf vertices
|
||||||
|
for v in &tri.v {
|
||||||
|
if !v.x.is_finite() || !v.y.is_finite() || !v.z.is_finite() {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Reject degenerate area
|
||||||
|
if tri.area() < min_area {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
// Reject collapsed edges (two vertices coincident)
|
||||||
|
let e0 = tri.v[1] - tri.v[0];
|
||||||
|
let e1 = tri.v[2] - tri.v[1];
|
||||||
|
let e2 = tri.v[0] - tri.v[2];
|
||||||
|
if e0.dot(e0) < min_edge_sq
|
||||||
|
|| e1.dot(e1) < min_edge_sq
|
||||||
|
|| e2.dot(e2) < min_edge_sq
|
||||||
|
{
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
true
|
||||||
|
});
|
||||||
|
|
||||||
|
// Recompute bounds after filtering
|
||||||
|
self.bounds = AABB::empty();
|
||||||
|
for tri in &self.triangles {
|
||||||
|
for v in &tri.v {
|
||||||
|
self.bounds.min = self.bounds.min.component_min(*v);
|
||||||
|
self.bounds.max = self.bounds.max.component_max(*v);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
pub fn from_stl(path: &Path) -> Result<Self> {
|
pub fn from_stl(path: &Path) -> Result<Self> {
|
||||||
let data = std::fs::read(path)
|
let data = std::fs::read(path)
|
||||||
.with_context(|| format!("reading {}", path.display()))?;
|
.with_context(|| format!("reading {}", path.display()))?;
|
||||||
|
|
@ -389,12 +436,102 @@ impl TriangleMesh {
|
||||||
let ext = path.extension()
|
let ext = path.extension()
|
||||||
.and_then(|e| e.to_str())
|
.and_then(|e| e.to_str())
|
||||||
.unwrap_or("");
|
.unwrap_or("");
|
||||||
match ext.to_ascii_lowercase().as_str() {
|
let mut mesh = match ext.to_ascii_lowercase().as_str() {
|
||||||
"stl" => Self::from_stl(path),
|
"stl" => Self::from_stl(path),
|
||||||
"obj" => Self::from_obj(path),
|
"obj" => Self::from_obj(path),
|
||||||
"3mf" => Self::from_3mf(path),
|
"3mf" => Self::from_3mf(path),
|
||||||
_ if ext.is_empty() => anyhow::bail!("no file extension"),
|
_ if ext.is_empty() => anyhow::bail!("no file extension"),
|
||||||
_ => anyhow::bail!("unsupported mesh format: .{ext}"),
|
_ => anyhow::bail!("unsupported mesh format: .{ext}"),
|
||||||
}
|
}?;
|
||||||
|
mesh.sanitize();
|
||||||
|
anyhow::ensure!(!mesh.triangles.is_empty(), "mesh contains no valid triangles after sanitization");
|
||||||
|
Ok(mesh)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use super::*;
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn sanitize_removes_degenerate_triangles() {
|
||||||
|
let good = Triangle {
|
||||||
|
v: [Vec3::new(0.0, 0.0, 0.0), Vec3::new(1.0, 0.0, 0.0), Vec3::new(0.0, 1.0, 0.0)],
|
||||||
|
};
|
||||||
|
// Zero-area triangle: all three vertices collinear
|
||||||
|
let degenerate = Triangle {
|
||||||
|
v: [Vec3::new(0.0, 0.0, 0.0), Vec3::new(1.0, 0.0, 0.0), Vec3::new(2.0, 0.0, 0.0)],
|
||||||
|
};
|
||||||
|
// Collapsed edge: two vertices coincident
|
||||||
|
let collapsed = Triangle {
|
||||||
|
v: [Vec3::new(0.0, 0.0, 0.0), Vec3::new(0.0, 0.0, 0.0), Vec3::new(0.0, 1.0, 0.0)],
|
||||||
|
};
|
||||||
|
let mut mesh = TriangleMesh {
|
||||||
|
triangles: vec![good, degenerate, collapsed],
|
||||||
|
bounds: AABB {
|
||||||
|
min: Vec3::new(0.0, 0.0, 0.0),
|
||||||
|
max: Vec3::new(2.0, 1.0, 0.0),
|
||||||
|
},
|
||||||
|
};
|
||||||
|
mesh.sanitize();
|
||||||
|
assert_eq!(mesh.triangles.len(), 1, "only the good triangle should survive");
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn sanitize_removes_nan_vertices() {
|
||||||
|
let good = Triangle {
|
||||||
|
v: [Vec3::new(0.0, 0.0, 0.0), Vec3::new(1.0, 0.0, 0.0), Vec3::new(0.0, 1.0, 0.0)],
|
||||||
|
};
|
||||||
|
let bad = Triangle {
|
||||||
|
v: [Vec3::new(f64::NAN, 0.0, 0.0), Vec3::new(1.0, 0.0, 0.0), Vec3::new(0.0, 1.0, 0.0)],
|
||||||
|
};
|
||||||
|
let mut mesh = TriangleMesh {
|
||||||
|
triangles: vec![good, bad],
|
||||||
|
bounds: AABB {
|
||||||
|
min: Vec3::new(0.0, 0.0, 0.0),
|
||||||
|
max: Vec3::new(1.0, 1.0, 0.0),
|
||||||
|
},
|
||||||
|
};
|
||||||
|
mesh.sanitize();
|
||||||
|
assert_eq!(mesh.triangles.len(), 1);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn sanitize_keeps_valid_mesh() {
|
||||||
|
let t1 = Triangle {
|
||||||
|
v: [Vec3::new(0.0, 0.0, 0.0), Vec3::new(1.0, 0.0, 0.0), Vec3::new(0.0, 1.0, 0.0)],
|
||||||
|
};
|
||||||
|
let t2 = Triangle {
|
||||||
|
v: [Vec3::new(1.0, 0.0, 0.0), Vec3::new(1.0, 1.0, 0.0), Vec3::new(0.0, 1.0, 0.0)],
|
||||||
|
};
|
||||||
|
let mut mesh = TriangleMesh {
|
||||||
|
triangles: vec![t1, t2],
|
||||||
|
bounds: AABB {
|
||||||
|
min: Vec3::new(0.0, 0.0, 0.0),
|
||||||
|
max: Vec3::new(1.0, 1.0, 0.0),
|
||||||
|
},
|
||||||
|
};
|
||||||
|
mesh.sanitize();
|
||||||
|
assert_eq!(mesh.triangles.len(), 2, "both valid triangles should survive");
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn sanitize_recomputes_bounds() {
|
||||||
|
let good = Triangle {
|
||||||
|
v: [Vec3::new(0.0, 0.0, 0.0), Vec3::new(1.0, 0.0, 0.0), Vec3::new(0.0, 1.0, 0.0)],
|
||||||
|
};
|
||||||
|
// This triangle has vertices way out at x=100 but is degenerate
|
||||||
|
let degenerate = Triangle {
|
||||||
|
v: [Vec3::new(100.0, 0.0, 0.0), Vec3::new(100.0, 0.0, 0.0), Vec3::new(100.0, 0.0, 0.0)],
|
||||||
|
};
|
||||||
|
let mut mesh = TriangleMesh {
|
||||||
|
triangles: vec![good, degenerate],
|
||||||
|
bounds: AABB {
|
||||||
|
min: Vec3::new(0.0, 0.0, 0.0),
|
||||||
|
max: Vec3::new(100.0, 1.0, 0.0),
|
||||||
|
},
|
||||||
|
};
|
||||||
|
mesh.sanitize();
|
||||||
|
assert!(mesh.bounds.max.x < 2.0, "bounds should not include removed triangle");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue