diff --git a/.gitignore b/.gitignore index c3593e21..ed973141 100644 --- a/.gitignore +++ b/.gitignore @@ -24,6 +24,16 @@ share/python-wheels/ .installed.cfg *.egg MANIFEST + +out.yaml +outmap.yaml +params.yaml +find-baseline.svg +*.data +*.old +eval.cpp +evaluate.cpp +perf.data # logos logos/ diff --git a/Cargo.toml b/Cargo.toml index f920262e..35752732 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -14,6 +14,33 @@ repository = "https://github.com/alphal00p/gammaloop" [profile.dev-optim] inherits = "dev" opt-level = 2 +#lto=true + +[profile.bench] +lto = "fat" + +[dev-dependencies] +criterion = { version = "0.5", features = ["html_reports"] } + +[[bench]] +name = "gamma_net" +harness = false + +[[bench]] +name="evaluate_net" +harness=false + +[[example]] +name = "gamma_chains" +path= "examples/Rust/Tensors/gamma_chain.rs" + +[[example]] +name = "gamma_network" +path="examples/Rust/Tensors/gamma_network.rs" + +[[example]] +name="evaluate_network" +path="examples/Rust/Tensors/evaluate_network.rs" [dependencies] # You may need bleeding edge changes @@ -32,8 +59,6 @@ lorentz_vector = { git = "https://github.com/benruijl/lorentz_vector", branch = "hyperdual_support", "f128_support", ] } -num = "0.3" -num-traits = "0.2" hyperdual = { git = "https://gitlab.com/benruijl/hyperdual" } rand = "0.8" rayon = "1.5" @@ -44,10 +69,9 @@ colored = "*" yaml-rust = "0.4" libc = "0.2.0" statrs = "0.16.0" -smallvec = "1.7" +smallvec = { version = "1.7", features = ["const_generics"] } itertools = "0.8" smartstring = { version = "*", features = ["serde"] } -ahash = "*" vectorize = "0.2.0" log = "*" env_logger = "*" @@ -55,6 +79,26 @@ pyo3-log = "*" nalgebra = "0.32.3" num-complex = "0.4.4" rug = "1.22.0" +wide = "0.7.13" +arbitrary-int = { version = "1.2.6", features = ["num-traits"] } +duplicate = "1.0.0" +rustc-hash = "1.1.0" +petgraph = "0.6.4" +enum-try-as-inner = "0.1.1" +indexmap = "2.2.2" +nohash-hasher = "0.2.0" +intmap = { git = "https://github.com/lcnbr/rust-intmap" } +permutation = "0.4.1" +slotmap = { version = "1.0.7", features = ["serde"] } +ahash = { version = "0.8.8", features = ["serde"] } +num = { version = "0.4.1", features = ["serde"] } +pprof = { version = "0.13.0", features = ["flamegraph"] } +derive_more = "0.99.17" +rand_xoshiro = "0.6.0" +funty = "2.0.0" +block-id = "0.2.1" +once_cell = "1.19.0" +enum_delegate = {git="https://gitlab.com/dawn_app/enum_delegate"} [dependencies.pyo3] features = ["multiple-pymethods"] @@ -68,11 +112,11 @@ pyo3-build-config = "*" crate-type = ["cdylib", "lib"] name = "_gammaloop" required-features = ["python_api"] - +bench = false [[bin]] name = "cli" required-features = ["binary"] - +bench = false [features] extension-module = ["pyo3/extension-module"] default = ["python_api"] diff --git a/benches/evaluate_net.rs b/benches/evaluate_net.rs new file mode 100644 index 00000000..e8f2ca40 --- /dev/null +++ b/benches/evaluate_net.rs @@ -0,0 +1,158 @@ +use std::{fmt::Debug, ops::Neg}; + +use _gammaloop::tensor::{ + ufo::{euclidean_four_vector, gamma}, + AbstractIndex, ContractionCountStructure, FallibleMul, MixedTensor, Representation, + SetTensorData, Slot, SparseTensor, TensorNetwork, TensorStructure, +}; + +use ahash::{AHashMap, HashMap}; +use criterion::{criterion_group, criterion_main, Criterion}; + +use rand::{distributions::Uniform, Rng, SeedableRng}; +use rand_xoshiro::Xoroshiro64Star; +use symbolica::{ + domains::float::Complex, + representations::{Atom, AtomView}, + state::State, +}; + +fn indices(n: i32, m: i32) -> Vec { + let spacings: [i32; 2] = [n, m]; + let mut start = 1; + let mut ranges = Vec::new(); + + for &spacing in spacings.iter() { + ranges.push((start..start + spacing).chain(std::iter::once(-1))); + start += spacing; + } + + ranges.into_iter().flatten().collect() +} + +fn gamma_net_param( + minkindices: &[i32], + vbar: [Complex; 4], + u: [Complex; 4], +) -> TensorNetwork> { + let mut i: i32 = 0; + let mut contracting_index = 0.into(); + let mut result: Vec> = + vec![euclidean_four_vector(contracting_index, &vbar).into()]; + for m in minkindices { + let ui = contracting_index; + contracting_index += 1.into(); + let uj = contracting_index; + if *m > 0 { + let p: ContractionCountStructure = vec![Slot::from(( + usize::try_from(*m).unwrap().into(), + Representation::Lorentz(4.into()), + ))] + .into_iter() + .collect(); + i += 1; + let pid = State::get_or_insert_fn(&format!("p{}", i), None).unwrap(); + + result.push(p.shadow_with(pid).into()); + + result.push(gamma(usize::try_from(*m).unwrap().into(), (ui, uj)).into()); + } else { + result.push( + gamma( + AbstractIndex::from(usize::try_from(m.neg()).unwrap() + 10000), + (ui, uj), + ) + .into(), + ); + } + } + result.push(euclidean_four_vector(contracting_index, &u).into()); + TensorNetwork::from(result) +} + +fn test_tensor(structure: S) -> SparseTensor, S> +where + S: TensorStructure, +{ + let mut rng: Xoroshiro64Star = Xoroshiro64Star::from_entropy(); + + let mut tensor = SparseTensor::empty(structure); + + let density = tensor.size(); + + let multipliable = Uniform::new(1., 10.); + + for _ in 0..density { + tensor + .set_flat( + rng.gen_range(0..tensor.size()), + Complex::::new(rng.sample(multipliable), rng.sample(multipliable)), + ) + .unwrap(); + } + + tensor +} + +fn const_map_gen<'a, 'b, I>( + params: &'a [MixedTensor], + const_map: &mut HashMap, symbolica::domains::float::Complex>, +) where + 'a: 'b, + I: TensorStructure + Clone + Debug, +{ + for (_i, p) in params.iter().enumerate() { + let pdata = test_tensor(p.structure().clone()).to_dense(); + p.try_as_symbolic() + .unwrap() + .try_as_dense() + .unwrap() + .append_const_map(&pdata, const_map); + } +} +fn criterion_benchmark(c: &mut Criterion) { + let one = Complex::::new(1.0, 0.0); + + let vbar = [ + one.mul_fallible(3.0).unwrap(), + one.mul_fallible(3.1).unwrap(), + one.mul_fallible(3.2).unwrap(), + one.mul_fallible(3.3).unwrap(), + ]; + let u = [ + one.mul_fallible(4.0).unwrap(), + one.mul_fallible(4.1).unwrap(), + one.mul_fallible(4.2).unwrap(), + one.mul_fallible(4.3).unwrap(), + ]; + let minkindices = indices(20, 24); + + let mut net = gamma_net_param(&minkindices, vbar, u); + net.generate_params(); + let params = net.params.clone(); + println!("{:?}", params.len()); + net.contract_algo(|tn| tn.edge_to_min_degree_node_with_depth(2)); + let mut const_map = AHashMap::new(); + + let i = Atom::new_var(State::I); + const_map.insert(i.as_view(), Complex::::new(0., 1.)); + + let mut group = c.benchmark_group("evaluate_net"); + + group.bench_function("Evaluate_net", |b| { + b.iter_batched( + || net.clone(), + |mut net| { + const_map_gen(¶ms, &mut const_map); + + net.evaluate_complex(&const_map); + + net.contract(); + }, + criterion::BatchSize::SmallInput, + ) + }); +} + +criterion_group!(benches, criterion_benchmark); +criterion_main!(benches); diff --git a/benches/gamma_net.rs b/benches/gamma_net.rs new file mode 100644 index 00000000..b447fa8e --- /dev/null +++ b/benches/gamma_net.rs @@ -0,0 +1,147 @@ +use std::ops::Neg; + +use _gammaloop::tensor::{ + ufo::{ + euclidean_four_vector, euclidean_four_vector_sym, gamma, gammasym, mink_four_vector, + mink_four_vector_sym, + }, + AbstractIndex, FallibleMul, HistoryStructure, NumTensor, TensorNetwork, +}; + +use criterion::{criterion_group, criterion_main, Criterion}; +use num::ToPrimitive; +use symbolica::domains::float::Complex; + +use symbolica::representations::Symbol; +fn gamma_net_sym( + minkindices: &[i32], + vbar: [Complex; 4], + u: [Complex; 4], +) -> TensorNetwork>> { + let mut i = 0; + let mut contracting_index = 0.into(); + let mut result: Vec>> = + vec![euclidean_four_vector_sym(contracting_index, &vbar).into()]; + for m in minkindices { + let ui = contracting_index; + contracting_index += 1.into(); + let uj = contracting_index; + if *m > 0 { + let p = [ + Complex::::new(1.0 + 0.01 * i.to_f64().unwrap(), 0.0), + Complex::::new(1.1 + 0.01 * i.to_f64().unwrap(), 0.0), + Complex::::new(1.2 + 0.01 * i.to_f64().unwrap(), 0.0), + Complex::::new(1.3 + 0.01 * i.to_f64().unwrap(), 0.0), + ]; + i += 1; + result.push(mink_four_vector_sym(usize::try_from(*m).unwrap().into(), &p).into()); + result.push(gammasym(usize::try_from(*m).unwrap().into(), (ui, uj)).into()); + } else { + result.push( + gammasym( + AbstractIndex::from(usize::try_from(m.neg()).unwrap() + 10000), + (ui, uj), + ) + .into(), + ); + } + } + result.push(euclidean_four_vector_sym(contracting_index, &u).into()); + TensorNetwork::from(result) +} + +fn gamma_net( + minkindices: &[i32], + vbar: [Complex; 4], + u: [Complex; 4], +) -> TensorNetwork { + let mut i = 0; + let mut contracting_index = 0.into(); + let mut result: Vec = vec![euclidean_four_vector(contracting_index, &vbar).into()]; + for m in minkindices { + let ui = contracting_index; + contracting_index += 1.into(); + let uj = contracting_index; + if *m > 0 { + let p = [ + Complex::::new(1.0 + 0.01 * i.to_f64().unwrap(), 0.0), + Complex::::new(1.1 + 0.01 * i.to_f64().unwrap(), 0.0), + Complex::::new(1.2 + 0.01 * i.to_f64().unwrap(), 0.0), + Complex::::new(1.3 + 0.01 * i.to_f64().unwrap(), 0.0), + ]; + i += 1; + result.push(mink_four_vector(usize::try_from(*m).unwrap().into(), &p).into()); + result.push(gamma(usize::try_from(*m).unwrap().into(), (ui, uj)).into()); + } else { + result.push( + gamma( + AbstractIndex::from(usize::try_from(m.neg()).unwrap() + 10000), + (ui, uj), + ) + .into(), + ); + } + } + result.push(euclidean_four_vector(contracting_index, &u).into()); + TensorNetwork::from(result) +} + +fn indices(n: i32, m: i32) -> Vec { + let spacings: [i32; 2] = [n, m]; + let mut start = 1; + let mut ranges = Vec::new(); + + for &spacing in spacings.iter() { + ranges.push((start..start + spacing).chain(std::iter::once(-1))); + start += spacing; + } + + ranges.into_iter().flatten().collect() +} + +fn criterion_benchmark(c: &mut Criterion) { + let one = Complex::::new(1.0, 0.0); + let _zero = Complex::::new(0.0, 0.0); + + let vbar = [ + one.mul_fallible(3.0).unwrap(), + one.mul_fallible(3.1).unwrap(), + one.mul_fallible(3.2).unwrap(), + one.mul_fallible(3.3).unwrap(), + ]; + let u = [ + one.mul_fallible(4.0).unwrap(), + one.mul_fallible(4.1).unwrap(), + one.mul_fallible(4.2).unwrap(), + one.mul_fallible(4.3).unwrap(), + ]; + let minkindices = indices(20, 24); + + let netsym = gamma_net_sym(&minkindices, vbar, u); + let net = gamma_net(&minkindices, vbar, u); + + let mut group = c.benchmark_group("gamma_net"); + + group.bench_function("gamma_net_contract_sym", |b| { + b.iter_batched( + || netsym.clone(), + |mut netsym| { + netsym.contract(); + }, + criterion::BatchSize::SmallInput, + ) + }); + + group.bench_function("gamma_net_contraction", |b| { + b.iter_batched( + || net.clone(), + |mut net| { + net.contract(); + }, + criterion::BatchSize::SmallInput, + ) + }); +} + +criterion_group!(benches, criterion_benchmark); +criterion_main!(benches); diff --git a/examples/Rust/Tensors/evaluate_network.rs b/examples/Rust/Tensors/evaluate_network.rs new file mode 100644 index 00000000..213d1072 --- /dev/null +++ b/examples/Rust/Tensors/evaluate_network.rs @@ -0,0 +1,167 @@ +use std::fmt::Debug; +use std::ops::Neg; + +use _gammaloop::tensor::{ + ufo::{euclidean_four_vector, gamma}, + AbstractIndex, ContractionCountStructure, FallibleMul, HasTensorData, MixedTensor, + Representation, SetTensorData, Slot, SparseTensor, TensorNetwork, TensorStructure, +}; +use ahash::{AHashMap, HashMap}; + +use rand::{distributions::Uniform, Rng, SeedableRng}; +use rand_xoshiro::Xoroshiro64Star; +use symbolica::{ + domains::float::Complex, + representations::{Atom, AtomView}, + state::State, +}; + +fn gamma_net_param( + minkindices: &[i32], + vbar: [Complex; 4], + u: [Complex; 4], +) -> TensorNetwork> { + let mut i: i32 = 0; + let mut contracting_index = 0.into(); + let mut result: Vec> = + vec![euclidean_four_vector(contracting_index, &vbar).into()]; + for m in minkindices { + let ui = contracting_index; + contracting_index += 1.into(); + let uj = contracting_index; + if *m > 0 { + let p: ContractionCountStructure = vec![Slot::from(( + usize::try_from(*m).unwrap().into(), + Representation::Lorentz(4.into()), + ))] + .into_iter() + .collect(); + i += 1; + let pid = State::get_or_insert_fn(&format!("p{}", i), None).unwrap(); + + result.push(p.shadow_with(pid).into()); + + result.push(gamma(usize::try_from(*m).unwrap().into(), (ui, uj)).into()); + } else { + result.push( + gamma( + AbstractIndex::from(usize::try_from(m.neg()).unwrap() + 10000), + (ui, uj), + ) + .into(), + ); + } + } + result.push(euclidean_four_vector(contracting_index, &u).into()); + TensorNetwork::from(result) +} + +fn test_tensor(structure: S) -> SparseTensor, S> +where + S: TensorStructure, +{ + let mut rng: Xoroshiro64Star = Xoroshiro64Star::from_entropy(); + + let mut tensor = SparseTensor::empty(structure); + + let density = tensor.size(); + + let multipliable = Uniform::new(1., 10.); + + for _ in 0..density { + tensor + .set_flat( + rng.gen_range(0..tensor.size()), + Complex::::new(rng.sample(multipliable), rng.sample(multipliable)), + ) + .unwrap(); + } + + tensor +} +fn const_map_gen<'a, 'b, I>( + params: &'a [MixedTensor], + const_map: &mut HashMap, symbolica::domains::float::Complex>, +) where + 'a: 'b, + I: TensorStructure + Clone + Debug, +{ + for (_i, p) in params.iter().enumerate() { + let pdata = test_tensor(p.structure().clone()).to_dense(); + p.try_as_symbolic() + .unwrap() + .try_as_dense() + .unwrap() + .append_const_map(&pdata, const_map); + } +} + +fn main() { + let one = Complex::::new(1.0, 0.0); + + let notnorm: u8 = 0b10000000; + let mut f: u8 = 3; + f |= notnorm; + println!("{:?}", f); + f |= notnorm; + println!("{:?}", f); + + let vbar = [ + one.mul_fallible(3.0).unwrap(), + one.mul_fallible(3.1).unwrap(), + one.mul_fallible(3.2).unwrap(), + one.mul_fallible(3.3).unwrap(), + ]; + let u = [ + one.mul_fallible(4.0).unwrap(), + one.mul_fallible(4.1).unwrap(), + one.mul_fallible(4.2).unwrap(), + one.mul_fallible(4.3).unwrap(), + ]; + let spacings: [i32; 2] = [2, 4]; + let mut start = 1; + let mut ranges = Vec::new(); + + for &spacing in spacings.iter() { + ranges.push((start..start + spacing).chain(std::iter::once(-1))); + start += spacing; + } + + let vec: Vec = ranges.into_iter().flatten().collect(); + + let mut net = gamma_net_param(&vec, vbar, u); + net.generate_params(); + let mut const_map = AHashMap::new(); + let params = net.params.clone(); + const_map_gen(¶ms, &mut const_map); + + let i = Atom::new_var(State::I); + const_map.insert(i.as_view(), Complex::::new(0., 1.)); + + // net.contract_algo(|tn| tn.edge_to_min_degree_node_with_depth(2)); + + // for (i, n) in &net.graph.nodes { + // match n { + // MixedTensor::Symbolic(s) => { + // for (_, a) in s.try_as_dense().unwrap().iter_flat() { + // println!("{}", a); + // } + // } + // _ => {} + // } + // } + + // for p in const_map.keys() { + // if let AtomView::Fun(f) = p { + // println!( + // "Map {}, with id {:?},{:?}", + // State::get_name(f.get_symbol()), + // f.get_symbol(), + // f + // ); + // } + // } + net.evaluate_complex(&const_map); + net.contract(); + println!("{:?}", net.result().try_as_complex().unwrap().data()[0]); +} diff --git a/examples/Rust/Tensors/gamma_chain.rs b/examples/Rust/Tensors/gamma_chain.rs new file mode 100644 index 00000000..379384ac --- /dev/null +++ b/examples/Rust/Tensors/gamma_chain.rs @@ -0,0 +1,744 @@ +// Gamma chain example + +use std::{ops::Neg, time::Instant}; + +use _gammaloop::tensor::{ + parametric::MixedTensor, + ufo::{ + euclidean_four_vector, euclidean_four_vector_sym, gammasym, mink_four_vector, + mink_four_vector_sym, param_euclidean_four_vector, param_mink_four_vector, + }, + AbstractIndex, Contract, DenseTensor, FallibleMul, HasTensorData, HistoryStructure, IntoId, + NumTensor, SparseTensor, TensorNetwork, +}; + +use num::traits::ToPrimitive; +use symbolica::domains::float::Complex; +use symbolica::representations::{Atom, Symbol}; + +// #[allow(dead_code)] +// fn gamma_trace(minkindices: &[i32]) -> SparseTensor> +// where +// T: TrySmallestUpgrade +// + One +// + Zero +// + Default +// + Copy +// + Neg +// + FallibleAddAssign +// + FallibleSubAssign, +// for<'a, 'b> &'a T: FallibleMul<&'b T, Output = T> + TrySmallestUpgrade<&'b T, LCM = T>, +// { +// let mink = minkindices[0]; +// let mut result = gamma(usize::try_from(mink).unwrap().into(), (0.into(), 1.into())); +// let mut contracting_index = 1; +// for m in minkindices[1..].iter() { +// let ui = contracting_index; +// contracting_index += 1; + +// let uj = if contracting_index < minkindices.len() { +// contracting_index +// } else { +// 0 +// }; + +// if *m > 0 { +// let gamma: SparseTensor> = +// gamma(usize::try_from(*m).unwrap().into(), (ui.into(), uj.into())); +// result = gamma.contract(&result).unwrap(); +// } else { +// result = gamma::( +// usize::try_from(m.neg()).unwrap().into(), +// (ui.into(), uj.into()), +// ) +// .contract(&result) +// .unwrap(); +// } +// } +// result +// } + +// #[allow(dead_code)] +// fn gamma_chain(minkindices: &[i32]) -> SparseTensor> +// where +// T: Num +// + Copy +// + Neg +// + Debug +// + AddAssign +// + SubAssign +// + MulAssign +// + DivAssign +// + RemAssign +// + Default, +// { +// let mink = minkindices[0]; +// let mut result = gamma(usize::try_from(mink).unwrap().into(), (0.into(), 1.into())); +// let mut contracting_index = 1; +// for m in minkindices[1..].iter() { +// let ui = contracting_index; +// contracting_index += 1; + +// let uj = contracting_index; + +// if *m > 0 { +// let gamma: SparseTensor> = +// gamma(usize::try_from(*m).unwrap().into(), (ui.into(), uj.into())); +// result = gamma.contract(&result).unwrap(); +// } else { +// result = gamma::( +// AbstractIndex::from(usize::try_from(m.neg()).unwrap() + 10000), +// (ui.into(), uj.into()), +// ) +// .contract(&result) +// .unwrap(); +// } +// } +// result +// } + +#[allow(dead_code)] +fn gamma_net( + minkindices: &[i32], + vbar: [Complex; 4], + u: [Complex; 4], +) -> TensorNetwork>> { + let mut i = 0; + let mut contracting_index: AbstractIndex = 0.into(); + let mut result: Vec>> = + vec![euclidean_four_vector_sym(contracting_index, &vbar).into()]; + for m in minkindices { + let ui = contracting_index; + contracting_index += 1.into(); + let uj = contracting_index; + if *m > 0 { + let p = [ + Complex::::new(1.0 + 0.01 * i.to_f64().unwrap(), 0.0), + Complex::::new(1.1 + 0.01 * i.to_f64().unwrap(), 0.0), + Complex::::new(1.2 + 0.01 * i.to_f64().unwrap(), 0.0), + Complex::::new(1.3 + 0.01 * i.to_f64().unwrap(), 0.0), + ]; + i += 1; + result.push(mink_four_vector_sym(usize::try_from(*m).unwrap().into(), &p).into()); + result.push(gammasym(usize::try_from(*m).unwrap().into(), (ui, uj)).into()); + } else { + result.push( + gammasym( + AbstractIndex::from(usize::try_from(m.neg()).unwrap() + 10000), + (ui, uj), + ) + .into(), + ); + } + } + result.push(euclidean_four_vector_sym(contracting_index, &u).into()); + TensorNetwork::from(result) +} + +#[allow(dead_code)] +fn defered_chain( + minkindices: &[i32], + gamma_chain: &SparseTensor>, + vbar: [Complex; 4], + u: [Complex; 4], +) -> DenseTensor> { + let mut result = euclidean_four_vector(0.into(), &vbar); + result = gamma_chain.contract(&result).unwrap(); + result = result + .contract(&euclidean_four_vector(minkindices.len().into(), &u)) + .unwrap(); + + let mut i = 0; + for m in minkindices { + if *m > 0 { + let p = [ + Complex::::new(1.0 + 0.01 * i.to_f64().unwrap(), 0.0), + Complex::::new(1.1 + 0.01 * i.to_f64().unwrap(), 0.0), + Complex::::new(1.2 + 0.01 * i.to_f64().unwrap(), 0.0), + Complex::::new(1.3 + 0.01 * i.to_f64().unwrap(), 0.0), + ]; + i += 1; + let pmu = mink_four_vector(usize::try_from(*m).unwrap().into(), &p); + result = pmu.contract(&result).unwrap(); + } + } + result +} + +#[allow(dead_code)] +fn gamma_net_param(minkindices: &[i32]) -> TensorNetwork>> { + let mut i = 0; + let mut contracting_index: AbstractIndex = 0.into(); + let mut result: Vec>> = + vec![param_euclidean_four_vector(contracting_index, "vbar".into_id()).into()]; + for m in minkindices { + let ui = contracting_index; + contracting_index += 1.into(); + let uj = contracting_index; + if *m > 0 { + let pname = format!("p{}", i).into_id(); + i += 1; + result.push(param_mink_four_vector(usize::try_from(*m).unwrap().into(), pname).into()); + result.push(gammasym(usize::try_from(*m).unwrap().into(), (ui, uj)).into()); + } else { + result.push( + gammasym( + AbstractIndex::from(usize::try_from(m.neg()).unwrap() + 10000), + (ui, uj), + ) + .into(), + ); + } + } + result.push(param_euclidean_four_vector(contracting_index, "u".into_id()).into()); + TensorNetwork::from(result) +} + +#[allow(dead_code)] +fn dump_c_with_func(_levels: Vec)>>) {} + +#[allow(unused_variables)] +fn main() { + let one = Complex::::new(1.0, 0.0); + let zero = Complex::::new(0.0, 0.0); + + let vbar = [ + one.mul_fallible(3.0).unwrap(), + one.mul_fallible(3.1).unwrap(), + one.mul_fallible(3.2).unwrap(), + one.mul_fallible(3.3).unwrap(), + ]; + let u = [ + one.mul_fallible(4.0).unwrap(), + one.mul_fallible(4.1).unwrap(), + one.mul_fallible(4.2).unwrap(), + one.mul_fallible(4.3).unwrap(), + ]; + + let spacings: [i32; 2] = [20, 24]; + let mut start = 1; + let mut ranges = Vec::new(); + + for &spacing in spacings.iter() { + ranges.push((start..start + spacing).chain(std::iter::once(-1))); + start += spacing; + } + + let vec: Vec = ranges.into_iter().flatten().collect(); + + println!("{:?}", vec); + + // // let vec = (1..=3).collect::>(); + // // let start = Instant::now(); + // // let chain = gamma_chain(&vec); //, vbar, u); + // // let duration = start.elapsed(); + + // // println!( + // // "Gamma chain {:?} gammas, size {:?} in {:?}", + // // vec.len(), + // // chain.size(), + // // duration + // // ); + + // // let start = Instant::now(); + // // let chain = defered_chain(&vec, &chain, vbar, u); + // // let duration = start.elapsed(); + + // // println!( + // // "Defered pslash with {:?} gammas, size {:?} in {:?}, gives {:?}", + // // vec.len(), + // // chain.size(), + // // duration, + // // chain.data, + // // ); + + let startfull = Instant::now(); + + let mut chain = gamma_net(&vec, vbar, u); + println!("{}", chain.graph.edges.len()); + println!("{}", chain.graph.nodes.len()); + println!("{}", chain.graph.involution.len()); + println!("{}", chain.graph.neighbors.len()); + + println!("{}", chain.dot()); + let start = Instant::now(); + chain.contract(); + let duration = start.elapsed(); + let durationfull = startfull.elapsed(); + + println!("{}", chain.dot()); + + println!( + "Gamma net with {} gammas, fully numeric, takes {:?} for the contraction, and {:?} with initialization", + vec.len(), + duration, + durationfull + ); + + // [Complex { re: 5.341852612369398e16, im: -136854212797686.44 }] + // [Complex { re: 5.3418526123694e16, im: -136854212797684.0 }] for 20, 24 + println!( + "Result: {:?}", + chain.result().try_as_complex().unwrap().data() + ); + + // // let mut chain = gamma_net(&vec, vbar, u); + // let ws: Workspace = Workspace::new(); + + // let atom = Atom::parse("A+P", &mut &ws).unwrap(); + + // let printops = PrintOptions { + // terms_on_new_line: false, + // color_top_level_sum: false, + // color_builtin_functions: false, + // print_finite_field: false, + // explicit_rational_polynomial: false, + // multiplication_operator: '*', + // square_brackets_for_function: false, + // number_thousands_separator: None, + // num_exp_as_superscript: false, + // latex: false, + // }; + // let print = AtomPrinter::new_with_options(atom.as_view(), printops, &state); + + // let satom = format!("{}", print); + // let natom = Atom::parse(&satom, &mut &ws).unwrap(); + + // println!("Print {}", natom.printer(&state)); + // // // println!("{}", chain.dot()); + // // let start = Instant::now(); + // // chain.contract_sym(& &ws); + // // let duration = start.elapsed(); + // // println!( + // // "Benchmark net {:?} gammas, size in {:?}", + // // vec.len(), + // // duration, + // // ); + + // // println!("{:?}", chain.result().is_scalar()); + + // // println!("{}", chain.result().structure()); + + // // let mut chain_param = gamma_net_param(&vec, &mut &ws); + + // // println!("{}", chain_param.dot()); + // // let start = Instant::now(); + // // chain_param.contract_sym_depth(5, & &ws); + // // let duration = start.elapsed(); + // // // println!( + // // // "Benchmark net param {:?} gammas, size in {:?}", + // // // vec.len(), + // // // duration, + // // // ); + // // println!("{}", chain_param.dot()); + + // // println!("{:?}", chain_param.result().is_scalar()); + + // // println!("{}", chain_param.result().structure()); + + // let mut chain_param = gamma_net_param(&vec, &mut &ws); + + // println!("{}", chain_param.dotsym(&state)); + // let params: Vec = chain_param + // .clone() + // .to_symbolic_tensor_vec() + // .into_iter() + // .flat_map(|x| x.data()) + // .collect(); + + // let paramstr = params + // .iter() + // .map(|a| { + // format!( + // "{}", + // AtomPrinter::new_with_options(a.as_view(), printops, &state) + // ) + // }) + // .collect::>(); + + // serde_yaml::to_writer(std::fs::File::create("params.yaml").unwrap(), ¶mstr).unwrap(); + + // let paramstr: Vec = + // serde_yaml::from_reader(std::fs::File::open("params.yaml").unwrap()).unwrap(); + + // let params: Vec = paramstr + // .iter() + // .map(|x| Atom::parse(x, &mut &ws).unwrap()) + // .collect(); + + // for p in params { + // print!("{}", p.printer(&state)); + // } + + // chain_param.contract_sym_depth(9, & &ws); + + // let mut shadow = chain_param.symbolic_shadow("S", &mut &ws); + // // println!("{}", chain_param.dotsym(&state)); + // let a = chain_param + // .clone() + // .to_symbolic_tensor_vec() + // .into_iter() + // .map(|x| { + // ( + // state.get_name(*x.name().unwrap()).clone(), + // x.data() + // .into_iter() + // .map(|x| { + // format!( + // "{}", + // AtomPrinter::new_with_options(x.as_view(), printops, &state) + // ) + // .into() + // }) + // .collect(), + // ) + // }) + // .collect(); + + // let amap = chain_param + // .to_symbolic_tensor_vec() + // .into_iter() + // .map(|x| x.symhashmap(*x.name().unwrap(), &mut &ws)) + // .collect::>(); + + // let amapstr = amap + // .iter() + // .map(|x| { + // let mut a = HashMap::new(); + // for (k, v) in x.iter() { + // a.insert( + // format!( + // "{}", + // AtomPrinter::new_with_options(k.as_view(), printops, &state) + // ) + // .into(), + // format!( + // "{}", + // AtomPrinter::new_with_options(v.as_view(), printops, &state) + // ) + // .into(), + // ); + // } + // a + // }) + // .collect::>(); + + // println!("{}", shadow.dotsym(&state)); + // let start = Instant::now(); + // shadow.contract_sym_depth(10, & &ws); + // let duration = start.elapsed(); + + // // println!( + // // "Shadow net param {:?} gammas, size in {:?}", + // // vec.len(), + // // duration, + // // ); + + // let mut shadow2 = shadow.symbolic_shadow("T", &mut &ws); + // println!("{}", shadow.dotsym(&state)); + // let b: Vec<(String, Vec)> = shadow + // .clone() + // .to_symbolic_tensor_vec() + // .into_iter() + // .map(|x| { + // ( + // state.get_name(*x.name().unwrap()).clone(), + // x.data() + // .into_iter() + // .map(|x| { + // format!( + // "{}", + // AtomPrinter::new_with_options(x.as_view(), printops, &state) + // ) + // .into() + // }) + // .collect(), + // ) + // }) + // .collect(); + + // let bmap = shadow + // .to_symbolic_tensor_vec() + // .into_iter() + // .map(|x| x.symhashmap(*x.name().unwrap(), &mut &ws)) + // .collect::>(); + + // let bmapstr = bmap + // .iter() + // .map(|x| { + // let mut a = HashMap::new(); + // for (k, v) in x.iter() { + // a.insert( + // format!( + // "{}", + // AtomPrinter::new_with_options(k.as_view(), printops, &state) + // ) + // .into(), + // format!( + // "{}", + // AtomPrinter::new_with_options(v.as_view(), printops, &state) + // ) + // .into(), + // ); + // } + // a + // }) + // .collect::>(); + + // println!("{}", shadow2.dotsym(&state)); + // let start = Instant::now(); + // shadow2.contract_sym_depth(10, & &ws); + // let duration = start.elapsed(); + + // // println!( + // // "Shadow2 net param {:?} gammas, size in {:?}", + // // vec.len(), + // // duration, + // // ); + + // shadow2.namesym("U", &mut state); + // println!("{}", shadow2.dotsym(&state)); + + // let c: Vec<(String, Vec)> = shadow2 + // .clone() + // .to_symbolic_tensor_vec() + // .into_iter() + // .map(|x| (state.get_name(*x.name().unwrap()).clone(), x.data())) + // .collect(); + + // let e: Vec<(String, Vec)> = shadow2 + // .clone() + // .to_symbolic_tensor_vec() + // .into_iter() + // .map(|x| { + // ( + // state.get_name(*x.name().unwrap()).clone(), + // x.data() + // .into_iter() + // .map(|x| { + // format!( + // "{}", + // AtomPrinter::new_with_options(x.as_view(), printops, &state) + // ) + // .into() + // }) + // .collect(), + // ) + // }) + // .collect(); + + // let cmap = shadow2 + // .to_symbolic_tensor_vec() + // .into_iter() + // .map(|x| x.symhashmap(*x.name().unwrap(), &mut &ws)) + // .collect::>(); + + // let cmapstr = cmap + // .iter() + // .map(|x| { + // let mut a = HashMap::new(); + // for (k, v) in x.iter() { + // a.insert( + // format!( + // "{}", + // AtomPrinter::new_with_options(k.as_view(), printops, &state) + // ) + // .into(), + // format!( + // "{}", + // AtomPrinter::new_with_options(v.as_view(), printops, &state) + // ) + // .into(), + // ); + // } + // a + // }) + // .collect::>(); + + // let d = e + // .iter() + // .map(|(s, v)| { + // ( + // s, + // v.iter() + // .map(|x| { + // println!("Hi: {}", x); + // Atom::parse(x, &mut &ws).unwrap() + // }) + // .collect::>(), + // ) + // }) + // .collect::>(); + + // println!("{:?}", e); + + // // for (s, v) in d.iter() { + // // for x in v.iter() { + // // println!("{}", x.printer(&state)); + // // } + // // } + + // for (s, v) in c.iter() { + // for x in v.iter() { + // println!("{}", x.printer(&state)); + // } + // } + + // let out: Vec)>> = vec![a, b, e]; + // let outmap: Vec>> = vec![amapstr, bmapstr, cmapstr]; + + // serde_yaml::to_writer(std::fs::File::create("outmap.yaml").unwrap(), &outmap).unwrap(); + + // serde_yaml::to_writer(std::fs::File::create("out.yaml").unwrap(), &out).unwrap(); + + // let from_file: Vec)>> = + // serde_yaml::from_reader(std::fs::File::open("out.yaml").unwrap()).unwrap(); + + // let from_file_map: Vec>> = + // serde_yaml::from_reader(std::fs::File::open("outmap.yaml").unwrap()).unwrap(); + + // let levelsmap = from_file_map + // .iter() + // .map(|x| { + // x.iter() + // .map(|x| { + // let mut a = HashMap::new(); + // for (k, v) in x.iter() { + // a.insert( + // Atom::parse(k, &mut &ws).unwrap(), + // Atom::parse(v, &mut &ws).unwrap(), + // ); + // } + // a + // }) + // .collect::>() + // }) + // .collect::>(); + + // let levels: Vec)>> = from_file + // .iter() + // .map(|x| { + // x.iter() + // .map(|(s, v)| { + // ( + // state.get_or_insert_fn(s, None).unwrap(), + // v.iter() + // .map(|x| Atom::parse(x, &mut &ws).unwrap()) + // .collect::>(), + // ) + // }) + // .collect::>() + // }) + // .collect::>(); + + // dump_c_with_func(levels); + + // // println!("{:?}", shadow.result().is_scalar()); + + // // println!("{:?}", shadow.result()); + + // // println!("{:?}", chain.result()); + // // for (i, c) in chain.iter() { + // // if *c == Complex::::new(0, 0) { + // // print!("hi") + // // } + // // if *c == Complex::::new(-0, 0) { + // // print!("hello") + // // } + // // if *c == Complex::::new(-0, -0) { + // // print!("hello") + // // } + // // if *c == Complex::::new(0, -0) { + // // print!("hello") + // // } + // // print!("{}", c.re); + // // } + + // let start = Instant::now(); + // // let chain = benchmark_chain(&vec, vbar, u); + // let duration = start.elapsed(); + + // // println!("{:?} in {:?}", chain, duration); + + // // let start = Instant::now(); + // // // let chain = symbolic_chain(&vec, &ws, &mut state); + // // let duration = start.elapsed(); + + // // // println!("{:?} in {:?}", chain, duration); + // // let mut out = ws.new_atom(); + // // let s = chain.finish().data.remove(0); + + // // s.as_view().expand(&ws, & &mut out); + + // // println!("{}", out.printer(&state)); + + // // let poly: MultivariatePolynomial<_, u8> = out + // // .as_view() + // // .to_polynomial(&RationalField::new(), None) + // // .unwrap(); + + // // let (h, _ops, scheme) = poly.optimize_horner_scheme(4000); + // // let mut i = h.to_instr(poly.nvars); + + // // println!( + // // "Number of operations={}, with scheme={:?}", + // // BorrowedHornerScheme::from(&h).op_count_cse(), + // // scheme, + // // ); + + // // i.fuse_operations(); + + // // for _ in 0..100_000 { + // // if !i.common_pair_elimination() { + // // break; + // // } + // // i.fuse_operations(); + // // } + + // // let op_count = i.op_count(); + // // let o = i.to_output(poly.var_map.as_ref().unwrap().to_vec(), true); + // // let o_f64 = o.convert::(); + + // // println!("Writing output to evaluate.cpp"); + // // std::fs::write( + // // "evaluate.cpp", + // // format!( + // // "{}", + // // InstructionSetPrinter { + // // instr: &o, + // // state: & + // // mode: symbolica::poly::evaluate::InstructionSetMode::CPP( + // // symbolica::poly::evaluate::InstructionSetModeCPPSettings { + // // write_header_and_test: true, + // // always_pass_output_array: false, + // // } + // // ) + // // } + // // ), + // // ) + // // .unwrap(); + + // // let mut evaluator = o_f64.evaluator(); + + // // let start = Instant::now(); + // // assert!(!evaluator + // // .evaluate(&(0..poly.nvars).map(|x| x as f64 + 1.).collect::>()) + // // .is_empty()); + // // let duration = start.elapsed(); + + // // println!("Final number of operations={}", op_count); + // // println!("Evaluation = {:?}", duration); + + // // // evaluate with simd + // // let o_f64x4 = o.convert::(); + // // let mut evaluator = o_f64x4.evaluator(); + + // // println!( + // // "Evaluation with simd = {:?}", + // // evaluator.evaluate( + // // &(0..poly.nvars) + // // .map(|x| f64x4::new([x as f64 + 1., x as f64 + 2., x as f64 + 3., x as f64 + 4.])) + // // .collect::>() + // // )[0] + // // ); +} diff --git a/examples/Rust/Tensors/gamma_network.rs b/examples/Rust/Tensors/gamma_network.rs new file mode 100644 index 00000000..803ec034 --- /dev/null +++ b/examples/Rust/Tensors/gamma_network.rs @@ -0,0 +1,86 @@ +use std::ops::Neg; + +use _gammaloop::tensor::{ + ufo::{euclidean_four_vector, gamma, mink_four_vector}, + AbstractIndex, ContractionCountStructure, FallibleMul, NumTensor, TensorNetwork, +}; +use num::ToPrimitive; + +use symbolica::domains::float::Complex; + +fn gamma_net_num( + minkindices: &[i32], + vbar: [Complex; 4], + u: [Complex; 4], +) -> TensorNetwork> { + let mut i: i32 = 0; + let mut contracting_index = 0.into(); + let mut result: Vec> = + vec![euclidean_four_vector(contracting_index, &vbar).into()]; + for m in minkindices { + let ui = contracting_index; + contracting_index += 1.into(); + let uj = contracting_index; + if *m > 0 { + let p = [ + Complex::::new(1.0 + 0.01 * i.to_f64().unwrap(), 0.0), + Complex::::new(1.1 + 0.01 * i.to_f64().unwrap(), 0.0), + Complex::::new(1.2 + 0.01 * i.to_f64().unwrap(), 0.0), + Complex::::new(1.3 + 0.01 * i.to_f64().unwrap(), 0.0), + ]; + i += 1; + result.push(mink_four_vector(usize::try_from(*m).unwrap().into(), &p).into()); + result.push(gamma(usize::try_from(*m).unwrap().into(), (ui, uj)).into()); + } else { + result.push( + gamma( + AbstractIndex::from(usize::try_from(m.neg()).unwrap() + 10000), + (ui, uj), + ) + .into(), + ); + } + } + result.push(euclidean_four_vector(contracting_index, &u).into()); + TensorNetwork::from(result) +} + +fn main() { + let one = Complex::::new(1.0, 0.0); + + let vbar = [ + one.mul_fallible(3.0).unwrap(), + one.mul_fallible(3.1).unwrap(), + one.mul_fallible(3.2).unwrap(), + one.mul_fallible(3.3).unwrap(), + ]; + let u = [ + one.mul_fallible(4.0).unwrap(), + one.mul_fallible(4.1).unwrap(), + one.mul_fallible(4.2).unwrap(), + one.mul_fallible(4.3).unwrap(), + ]; + + let spacings: [i32; 2] = [20, 24]; + let mut start = 1; + let mut ranges = Vec::new(); + + for &spacing in spacings.iter() { + ranges.push((start..start + spacing).chain(std::iter::once(-1))); + start += spacing; + } + + let vec: Vec = ranges.into_iter().flatten().collect(); + + let mut net = gamma_net_num(&vec, vbar, u); + net.contract_algo(|tn| tn.edge_to_min_degree_node_with_depth(1)); + + println!("{}", net.dot()); + // assert_eq!( + // Complex { + // re: 5.341852612369398e16, + // im: -136854212797686.44 + // }, + // net.result().try_as_complex().unwrap().data()[0] + // ); +} diff --git a/examples/Rust/Tensors/load_nested_tensornet.rs b/examples/Rust/Tensors/load_nested_tensornet.rs new file mode 100644 index 00000000..f40c466f --- /dev/null +++ b/examples/Rust/Tensors/load_nested_tensornet.rs @@ -0,0 +1,72 @@ +use std::{collections::HashMap, time::Instant}; + +use symbolica::{ + representations::{Atom, Symbol}, + state::{State, Workspace}, +}; + +fn dump_c_with_func(_levels: Vec)>>, _params: Vec) {} +fn dump_c(_levels: Vec>>, _params: Vec) {} + +fn main() { + let mut state = State::get_global_state().write().unwrap(); + let _ws: Workspace = Workspace::new(); + let from_filemap: Vec>> = + serde_yaml::from_reader(std::fs::File::open("outmap.yaml").unwrap()).unwrap(); + + let paramstr: Vec = + serde_yaml::from_reader(std::fs::File::open("params.yaml").unwrap()).unwrap(); + + let params: Vec = paramstr + .iter() + .map(|x| Atom::parse(x, &mut state).unwrap()) + .collect(); + + let startmap = Instant::now(); + + let levelsmap = from_filemap + .iter() + .map(|x| { + x.iter() + .map(|x| { + let mut a = HashMap::new(); + for (k, v) in x.iter() { + a.insert( + Atom::parse(k, &mut state).unwrap(), + Atom::parse(v, &mut state).unwrap(), + ); + } + a + }) + .collect::>() + }) + .collect::>(); + + let durationmap = startmap.elapsed(); + let from_file: Vec)>> = + serde_yaml::from_reader(std::fs::File::open("out.yaml").unwrap()).unwrap(); + + let start = Instant::now(); + let levels: Vec)>> = from_file + .iter() + .map(|x| { + x.iter() + .map(|(s, v)| { + ( + state.get_or_insert_fn(s, None).unwrap(), + v.iter() + .map(|x| Atom::parse(x, &mut state).unwrap()) + .collect::>(), + ) + }) + .collect::>() + }) + .collect::>(); + let duration = start.elapsed(); + + println!("Time to parse map: {:?}", durationmap); + println!("Time to parse vec: {:?}", duration); + + dump_c(levelsmap, params.clone()); + dump_c_with_func(levels, params); +} diff --git a/examples/Rust/Tensors/symbolica_nested_eval.rs b/examples/Rust/Tensors/symbolica_nested_eval.rs new file mode 100644 index 00000000..6890c4b7 --- /dev/null +++ b/examples/Rust/Tensors/symbolica_nested_eval.rs @@ -0,0 +1,42 @@ +use std::fs; + +use symbolica::{ + poly::evaluate::ExpressionEvaluator, + representations::{Atom, Symbol}, + state::{State, Workspace}, +}; + +fn main() { + let mut state = State::get_global_state().write().unwrap(); + let _ws = Workspace::new(); + // let from_filemap: Vec>> = + // serde_yaml::from_reader(std::fs::File::open("outmap.yaml").unwrap()).unwrap(); + + let from_file: Vec)>> = + serde_yaml::from_reader(std::fs::File::open("out.yaml").unwrap()).unwrap(); + + let levels: Vec)>> = from_file + .iter() + .map(|x| { + x.iter() + .map(|(s, v)| { + ( + state.get_or_insert_fn(s, None).unwrap(), + v.iter() + .map(|x| { + let a = Atom::parse(x, &mut state).unwrap(); + let _a_exp = Atom::new(); + a.as_view().expand(); + a + }) + .collect::>(), + ) + }) + .collect::>() + }) + .collect::>(); + + let e = ExpressionEvaluator::new(levels, 10); + + fs::write("eval.cpp", format!("{e}")).unwrap(); +} diff --git a/examples/gamma_chain.rs b/examples/gamma_chain.rs deleted file mode 100644 index c8a7ff11..00000000 --- a/examples/gamma_chain.rs +++ /dev/null @@ -1,172 +0,0 @@ -// Gamma chain example - -use std::{ops::Neg, time::Instant}; - -use _gammaloop::tensor::{ - DenseTensor, - Signature::Lorentz, - Signature::{self, Euclidean}, - SparseTensor, TensorStructure, VecSlotExt, -}; - -use num_complex::Complex64; -use num_traits::{Num, ToPrimitive}; - -fn gamma(minkindex: usize, indices: (usize, usize)) -> SparseTensor { - let structure = TensorStructure::from_idxsing( - &[indices.0, indices.1, minkindex], - &[Euclidean(4), Euclidean(4), Lorentz(4)], - ); - - let c1 = Complex64::new(1.0, 0.0); - let cn1 = Complex64::new(-1.0, 0.0); - let ci = Complex64::new(0.0, 1.0); - let cni = Complex64::new(0.0, -1.0); - - let mut gamma = SparseTensor::empty(structure); - - // dirac gamma matrices - - gamma.set(&[0, 0, 0], c1).unwrap(); - gamma.set(&[1, 1, 0], c1).unwrap(); - gamma.set(&[2, 2, 0], cn1).unwrap(); - gamma.set(&[3, 3, 0], cn1).unwrap(); - - gamma.set(&[0, 3, 1], c1).unwrap(); - gamma.set(&[1, 2, 1], c1).unwrap(); - gamma.set(&[2, 1, 1], cn1).unwrap(); - gamma.set(&[3, 0, 1], cn1).unwrap(); - - gamma.set(&[0, 3, 2], cni).unwrap(); - gamma.set(&[1, 2, 2], ci).unwrap(); - gamma.set(&[2, 1, 2], ci).unwrap(); - gamma.set(&[3, 0, 2], cni).unwrap(); - - gamma.set(&[0, 2, 3], c1).unwrap(); - gamma.set(&[1, 3, 3], cn1).unwrap(); - gamma.set(&[2, 0, 3], cn1).unwrap(); - gamma.set(&[3, 1, 3], c1).unwrap(); - - gamma -} - -fn pslash(indices: (usize, usize), p: [Complex64; 4]) -> DenseTensor { - let minkindex = indices.0 + indices.1; - - let p = DenseTensor::from_data( - &p, - TensorStructure::from_idxsing(&[minkindex], &[Lorentz(4)]), - ) - .unwrap(); - gamma(minkindex, indices).contract_with_dense(&p).unwrap() -} - -#[allow(dead_code)] -fn mink_four_vector(index: usize, p: [T; 4]) -> DenseTensor -where - T: Num + std::default::Default + std::clone::Clone, -{ - DenseTensor::from_data(&p, TensorStructure::from_idxsing(&[index], &[Lorentz(4)])).unwrap() -} - -fn eucl_four_vector(index: usize, p: [T; 4]) -> DenseTensor -where - T: Num + std::default::Default + std::clone::Clone, -{ - DenseTensor::from_data(&p, TensorStructure::from_idxsing(&[index], &[Euclidean(4)])).unwrap() -} - -fn benchmark_chain( - minkindices: &[i32], - vbar: [Complex64; 4], - u: [Complex64; 4], -) -> DenseTensor { - let mut i = 0; - let mut contracting_index = 0; - let mut result = eucl_four_vector(contracting_index, vbar); - for m in minkindices { - if *m > 0 { - let p = [ - Complex64::new(1.0 + 0.01 * i.to_f64().unwrap(), 0.0), - Complex64::new(1.1 + 0.01 * i.to_f64().unwrap(), 0.0), - Complex64::new(1.2 + 0.01 * i.to_f64().unwrap(), 0.0), - Complex64::new(1.3 + 0.01 * i.to_f64().unwrap(), 0.0), - ]; - i += 1; - let pslash = pslash((contracting_index, contracting_index + 1), p); - result = pslash.contract_with_dense(&result).unwrap(); - } else { - result = gamma( - usize::try_from(m.neg()).unwrap(), - (contracting_index, contracting_index + 1), - ) - .contract_with_dense(&result) - .unwrap(); - } - contracting_index += 1; - } - result - .contract_with_dense(&eucl_four_vector(contracting_index, u)) - .unwrap() -} - -#[allow(dead_code)] -fn identity(indices: (usize, usize), signature: Signature) -> DenseTensor { - let structure = TensorStructure::from_idxsing(&[indices.0, indices.1], &[signature, signature]); - let mut identity = DenseTensor::default(structure); - for i in 0..signature.into() { - identity.set(&[i, i], Complex64::new(1.0, 0.0)); - } - identity -} - -#[allow(unused_variables)] -fn main() { - // let p = [Complex64::new(1.0, 0.0); 4]; - // let p1 = pslash((1, 2), p); - - // let trg = gamma(1, (2, 2)).internal_contract(); - - let one = Complex64::new(1.0, 0.0); - let zero = Complex64::new(0.0, 0.0); - - // let gammamu = gamma(1, (2, 3)) - // .contract_with_dense(&identity((3, 4), Euclidean(4))) - // .unwrap(); - - // // println!("{:?}", gammamu); - - // let gammunu = gamma(1, (4, 5)) - // .contract_with_dense(&identity((5, 6), Euclidean(4))) - // .unwrap(); - - // println!("{:?}", gammamu.contract_with_dense(&gammunu).unwrap()); - - let vbar = [one * 3.0, one * 3.1, one * 3.2, one * 3.3]; - let u = [one * 4.0, one * 4.1, one * 4.2, one * 4.3]; - // let a = eucl_four_vector(1, vbar); - // let b = eucl_four_vector(1, u); - // println!("{:?}", a.contract_with_dense(&b)); - - // let p11 = pslash((1, 2), [one, zero, zero, zero]) - // .contract_with_dense(&pslash((2, 1), [one, zero, zero, zero])); - - // println!("P {:?}", p11); - - let spacings: [i32; 2] = [30, 40]; - let mut start = 1; - let mut ranges = Vec::new(); - - for &spacing in spacings.iter() { - ranges.push((start..start + spacing).chain(std::iter::once(-1))); - start += spacing; - } - - let vec: Vec = ranges.into_iter().flatten().collect(); - - let start = Instant::now(); - let chain = benchmark_chain(&vec, vbar, u); - let duration = start.elapsed(); - - println!("{:?} in {:?}", chain, duration); -} diff --git a/flake.lock b/flake.lock index e359ac06..09e0bc03 100644 --- a/flake.lock +++ b/flake.lock @@ -20,11 +20,11 @@ }, "nixpkgs": { "locked": { - "lastModified": 1696725822, - "narHash": "sha256-B7uAOS7TkLlOg1aX01rQlYbydcyB6ZnLJSfaYbKVww8=", + "lastModified": 1702272962, + "narHash": "sha256-D+zHwkwPc6oYQ4G3A1HuadopqRwUY/JkMwHz1YF7j4Q=", "owner": "NixOS", "repo": "nixpkgs", - "rev": "5aabb5780a11c500981993d49ee93cfa6df9307b", + "rev": "e97b3e4186bcadf0ef1b6be22b8558eab1cdeb5d", "type": "github" }, "original": { @@ -62,11 +62,11 @@ "nixpkgs": "nixpkgs_2" }, "locked": { - "lastModified": 1696817516, - "narHash": "sha256-Xt9OY4Wnk9/vuUfA0OHFtmSlaen5GyiS9msgwOz3okI=", + "lastModified": 1702433821, + "narHash": "sha256-Kxv+dRbzj1fLQG0fyF/H6nswda6cN48r6kjctysnY4o=", "owner": "oxalica", "repo": "rust-overlay", - "rev": "c0df7f2a856b5ff27a3ce314f6d7aacf5fda546f", + "rev": "cb9016d3a569100a609bb92c0a45beb9e23cd4eb", "type": "github" }, "original": { diff --git a/flake.nix b/flake.nix index f3083d89..8f58a335 100644 --- a/flake.nix +++ b/flake.nix @@ -35,6 +35,7 @@ default = pkgs.mkShell { #devshell definition : # LD_LIBRARY_PATH = "${pkgs.stdenv.cc.cc.lib}/lib"; + RUST_SRC_PATH = "${pkgs.rust.packages.stable.rustPlatform.rustLibSrc}"; packages = with pkgs; [ rustToolchain diff --git a/python/gammaloop/run_python_tests.sh b/python/gammaloop/run_python_tests.sh index b25de633..687d3096 100755 --- a/python/gammaloop/run_python_tests.sh +++ b/python/gammaloop/run_python_tests.sh @@ -1,7 +1,9 @@ #!/usr/bin/env bash + RETCODE=0; python -m pytest -m "not slow" --runrust --codecheck "$@" RETCODE=$RETCODE+$?; + #python -m pytest tests/unit/* "$@" #python -m pytest tests/integration/* "$@" exit $(($RETCODE)) diff --git a/src/api/python.rs b/src/api/python.rs index 5d7ff467..1579102d 100644 --- a/src/api/python.rs +++ b/src/api/python.rs @@ -9,7 +9,7 @@ use crate::{ use ahash::HashMap; use git_version::git_version; use std::{fs, path::Path}; -use symbolica; + const GIT_VERSION: &str = git_version!(); #[allow(unused)] @@ -62,8 +62,6 @@ fn gammalooprs(_py: Python, m: &PyModule) -> PyResult<()> { #[pyclass(name = "Worker")] pub struct PythonWorker { pub model: Model, - sb_state: symbolica::state::State, - sb_workspace: symbolica::state::Workspace, pub cross_sections: CrossSectionList, pub amplitudes: AmplitudeList, pub integrands: HashMap, @@ -73,8 +71,6 @@ impl Clone for PythonWorker { fn clone(&self) -> PythonWorker { PythonWorker { model: self.model.clone(), - sb_state: self.sb_state.clone(), - sb_workspace: symbolica::state::Workspace::new(), cross_sections: self.cross_sections.clone(), amplitudes: self.amplitudes.clone(), integrands: self.integrands.clone(), @@ -89,8 +85,6 @@ impl PythonWorker { pub fn new(_cls: &PyType) -> PyResult { Ok(PythonWorker { model: Model::default(), - sb_state: symbolica::state::State::new(), - sb_workspace: symbolica::state::Workspace::new(), cross_sections: CrossSectionList::default(), amplitudes: AmplitudeList::default(), integrands: HashMap::default(), @@ -98,23 +92,15 @@ impl PythonWorker { } pub fn load_model(&mut self, file_path: &str) -> PyResult<()> { - Model::from_file( - String::from(file_path), - &mut self.sb_state, - &self.sb_workspace, - ) - .map_err(|e| exceptions::PyException::new_err(e.to_string())) - .map(|m| self.model = m) + Model::from_file(String::from(file_path)) + .map_err(|e| exceptions::PyException::new_err(e.to_string())) + .map(|m| self.model = m) } pub fn load_model_from_yaml_str(&mut self, yaml_str: &str) -> PyResult<()> { - Model::from_yaml_str( - String::from(yaml_str), - &mut self.sb_state, - &self.sb_workspace, - ) - .map_err(|e| exceptions::PyException::new_err(e.root_cause().to_string())) - .map(|m| self.model = m) + Model::from_yaml_str(String::from(yaml_str)) + .map_err(|e| exceptions::PyException::new_err(e.root_cause().to_string())) + .map(|m| self.model = m) } // Note: one could consider returning a PyModel class containing the serialisable model as well, @@ -122,7 +108,7 @@ impl PythonWorker { // which will be deserialize in said native class. pub fn get_model(&self) -> PyResult { self.model - .to_yaml(&self.sb_state) + .to_yaml() .map_err(|e| exceptions::PyException::new_err(e.to_string())) } @@ -243,12 +229,7 @@ impl PythonWorker { for cross_section in &self.cross_sections.container { if cross_section_names.contains(&cross_section.name.as_str()) { n_exported += 1; - let res = cross_section.export( - export_root, - &self.model, - &mut self.sb_state, - &self.sb_workspace, - ); + let res = cross_section.export(export_root, &self.model); if let Err(err) = res { return Err(exceptions::PyException::new_err(err.to_string())); } @@ -272,12 +253,7 @@ impl PythonWorker { for amplitude in self.amplitudes.container.iter_mut() { if amplitude_names.contains(&litude.name.as_str()) { n_exported += 1; - let res = amplitude.export( - export_root, - &self.model, - &mut self.sb_state, - &self.sb_workspace, - ); + let res = amplitude.export(export_root, &self.model); if let Err(err) = res { return Err(exceptions::PyException::new_err(err.to_string())); } diff --git a/src/cff.rs b/src/cff.rs index 0ae32b13..3e28f63b 100644 --- a/src/cff.rs +++ b/src/cff.rs @@ -12,10 +12,7 @@ use color_eyre::Report; use eyre::{eyre, Result}; use itertools::Itertools; use serde::{Deserialize, Serialize}; -use symbolica::{ - representations::{AsAtomView, Atom}, - state::{ResettableBuffer, State, Workspace}, -}; +use symbolica::representations::Atom; use log::info; @@ -37,25 +34,23 @@ impl PartialEq for Esurface { #[allow(unused)] impl Esurface { - fn to_atom(&self, state: &mut State, workspace: &Workspace) -> Atom { + fn to_atom(&self) -> Atom { let symbolic_energies = self .energies .iter() - .map(|i| Atom::parse(&format!("E{}", i), state, workspace).unwrap()) + .map(|i| Atom::parse(&format!("E{}", i)).unwrap()) .collect_vec(); let symbolic_shift = self .shift .iter() - .map(|i| Atom::parse(&format!("p{}", i), state, workspace).unwrap()) + .map(|i| Atom::parse(&format!("p{}", i)).unwrap()) .collect_vec(); let builder_atom = Atom::new(); let energy_sum = symbolic_energies .iter() - .fold(builder_atom.builder(state, workspace), |acc, energy| { - acc + energy - }); + .fold(builder_atom, |acc, energy| acc + energy); let esurf = symbolic_shift.iter().fold(energy_sum, |acc, shift| { if self.shift_signature { @@ -65,7 +60,7 @@ impl Esurface { } }); - Atom::new_from_view(&esurf.as_atom_view()) + esurf } // the energy cache contains the energies of external edges as well as the virtual, @@ -1407,7 +1402,7 @@ fn generate_cff_from_orientations( #[cfg(test)] mod tests_cff { use lorentz_vector::LorentzVector; - use num_traits::Inv; + use num::traits::Inv; use super::*; diff --git a/src/cross_section.rs b/src/cross_section.rs index accbe53b..f039ec1e 100644 --- a/src/cross_section.rs +++ b/src/cross_section.rs @@ -15,7 +15,7 @@ use smartstring::{LazyCompact, SmartString}; use std::fs; use std::fs::File; use std::path::Path; -use symbolica; + #[derive(Debug, Clone, Serialize, Deserialize, PartialEq)] pub enum OutputType { @@ -392,13 +392,7 @@ impl CrossSection { } #[allow(unused)] - pub fn export( - &self, - export_root: &str, - model: &Model, - sb_state: &mut symbolica::state::State, - sb_workspace: &symbolica::state::Workspace, - ) -> Result<(), Report> { + pub fn export(&self, export_root: &str, model: &Model) -> Result<(), Report> { // TODO process cross-section by adding lots of additional information necessary for runtime. // e.g. generate e-surface, cff expression, counterterms, etc. @@ -492,13 +486,7 @@ impl Amplitude { } #[allow(unused)] - pub fn export( - &mut self, - export_root: &str, - model: &Model, - sb_state: &mut symbolica::state::State, - sb_workspace: &symbolica::state::Workspace, - ) -> Result<(), Report> { + pub fn export(&mut self, export_root: &str, model: &Model) -> Result<(), Report> { // TODO process amplitude by adding lots of additional information necessary for runtime. // e.g. generate e-surface, cff expression, counterterms, etc. diff --git a/src/gammaloop_integrand.rs b/src/gammaloop_integrand.rs index 9ca96abe..02edadff 100644 --- a/src/gammaloop_integrand.rs +++ b/src/gammaloop_integrand.rs @@ -8,8 +8,8 @@ use crate::{Precision, StabilityLevelSetting}; use itertools::Itertools; use log::{debug, warn}; use lorentz_vector::LorentzVector; +use num::traits::Zero; use num::Complex; -use num_traits::Zero; use serde::{Deserialize, Serialize}; use symbolica::numerical_integration::{ContinuousGrid, DiscreteGrid, Grid, Sample}; diff --git a/src/graph.rs b/src/graph.rs index d71f714d..29b54335 100644 --- a/src/graph.rs +++ b/src/graph.rs @@ -12,9 +12,9 @@ use itertools::Itertools; use log::warn; use lorentz_vector::LorentzVector; use nalgebra::DMatrix; -use num::Complex; #[allow(unused_imports)] -use num_traits::Float; +use num::traits::Float; +use num::Complex; use serde::{Deserialize, Serialize}; use smartstring::{LazyCompact, SmartString}; use std::{collections::HashMap, path::Path, sync::Arc}; diff --git a/src/h_function_test.rs b/src/h_function_test.rs index 491ffde8..b403f2f8 100644 --- a/src/h_function_test.rs +++ b/src/h_function_test.rs @@ -3,8 +3,8 @@ use crate::utils; use crate::utils::FloatLike; use crate::ParameterizationMapping; use crate::Settings; +use num::traits::ToPrimitive; use num::Complex; -use num_traits::ToPrimitive; use serde::{Deserialize, Serialize}; use symbolica::numerical_integration::{ContinuousGrid, Grid, Sample}; @@ -51,7 +51,7 @@ impl HFunctionTestIntegrand { // r = e_cm * b * x/(1-x) let b = Into::::into(self.settings.parameterization.b); let radius = e_cm * b * xs[0] / (T::one() - xs[0]); - jac *= ::powi(e_cm * b + radius, 2) / e_cm / b; + jac *= ::powi(e_cm * b + radius, 2) / e_cm / b; radius } }; diff --git a/src/inspect.rs b/src/inspect.rs index 26e1061c..961376c3 100644 --- a/src/inspect.rs +++ b/src/inspect.rs @@ -8,8 +8,8 @@ use crate::utils; use crate::Integrand; use crate::Settings; use lorentz_vector::LorentzVector; +use num::traits::ToPrimitive; use num::Complex; -use num_traits::ToPrimitive; pub fn inspect( settings: &Settings, diff --git a/src/lib.rs b/src/lib.rs index a88ce1b5..572e2c93 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -1,5 +1,11 @@ #![cfg_attr(feature = "fail-on-warnings", deny(warnings))] - +// #![deny(clippy::all)] +// #![warn(clippy::pedantic)] +#![warn(clippy::all)] +// #![warn(clippy::restriction)] +// #![warn(clippy::nursery)] +// #![warn(clippy::cargo)] +// #![feature(min_specialization)] pub mod api; pub mod cff; pub mod cli_functions; diff --git a/src/model.rs b/src/model.rs index e5b0dee5..30607818 100644 --- a/src/model.rs +++ b/src/model.rs @@ -38,10 +38,7 @@ pub struct SerializableVertexRule { } impl SerializableVertexRule { - pub fn from_vertex_rule( - vertex_rule: &VertexRule, - sb_state: &symbolica::state::State, - ) -> SerializableVertexRule { + pub fn from_vertex_rule(vertex_rule: &VertexRule) -> SerializableVertexRule { SerializableVertexRule { name: vertex_rule.name.clone(), particles: vertex_rule @@ -52,7 +49,7 @@ impl SerializableVertexRule { color_structures: vertex_rule .color_structures .iter() - .map(|color_structure| utils::to_str_expression(color_structure, sb_state)) + .map(utils::to_str_expression) .collect(), lorentz_structures: vertex_rule .lorentz_structures @@ -86,8 +83,6 @@ impl VertexRule { pub fn from_serializable_vertex_rule( model: &Model, vertex_rule: &SerializableVertexRule, - sb_state: &mut symbolica::state::State, - sb_workspace: &symbolica::state::Workspace, ) -> VertexRule { VertexRule { name: vertex_rule.name.clone(), @@ -100,11 +95,7 @@ impl VertexRule { .color_structures .iter() .map(|color_structure_name| { - utils::parse_python_expression( - color_structure_name.as_str(), - sb_state, - sb_workspace, - ) + utils::parse_python_expression(color_structure_name.as_str()) }) .collect(), lorentz_structures: vertex_rule @@ -142,13 +133,10 @@ pub struct SerializableCoupling { } impl SerializableCoupling { - pub fn from_coupling( - coupling: &Coupling, - sb_state: &symbolica::state::State, - ) -> SerializableCoupling { + pub fn from_coupling(coupling: &Coupling) -> SerializableCoupling { SerializableCoupling { name: coupling.name.clone(), - expression: utils::to_str_expression(&coupling.expression, sb_state), + expression: utils::to_str_expression(&coupling.expression), orders: coupling.orders.clone(), value: coupling.value.map(|value| (value.re, value.im)), } @@ -164,18 +152,10 @@ pub struct Coupling { } impl Coupling { - pub fn from_serializable_coupling( - coupling: &SerializableCoupling, - sb_state: &mut symbolica::state::State, - sb_workspace: &symbolica::state::Workspace, - ) -> Coupling { + pub fn from_serializable_coupling(coupling: &SerializableCoupling) -> Coupling { Coupling { name: coupling.name.clone(), - expression: utils::parse_python_expression( - coupling.expression.as_str(), - sb_state, - sb_workspace, - ), + expression: utils::parse_python_expression(coupling.expression.as_str()), orders: coupling.orders.clone(), value: coupling.value.map(|value| Complex::new(value.0, value.1)), } @@ -264,14 +244,11 @@ pub struct SerializableLorentzStructure { } impl SerializableLorentzStructure { - pub fn from_lorentz_structure( - ls: &LorentzStructure, - sb_state: &symbolica::state::State, - ) -> SerializableLorentzStructure { + pub fn from_lorentz_structure(ls: &LorentzStructure) -> SerializableLorentzStructure { SerializableLorentzStructure { name: ls.name.clone(), spins: ls.spins.clone(), - structure: utils::to_str_expression(&ls.structure, sb_state), + structure: utils::to_str_expression(&ls.structure), } } } @@ -286,17 +263,11 @@ pub struct LorentzStructure { impl LorentzStructure { pub fn from_serializable_lorentz_structure( ls: &SerializableLorentzStructure, - sb_state: &mut symbolica::state::State, - sb_workspace: &symbolica::state::Workspace, ) -> LorentzStructure { LorentzStructure { name: ls.name.clone(), spins: ls.spins.clone(), - structure: utils::parse_python_expression( - ls.structure.as_str(), - sb_state, - sb_workspace, - ), + structure: utils::parse_python_expression(ls.structure.as_str()), } } } @@ -313,10 +284,7 @@ pub struct SerializableParameter { } impl SerializableParameter { - pub fn from_parameter( - param: &Parameter, - sb_state: &symbolica::state::State, - ) -> SerializableParameter { + pub fn from_parameter(param: &Parameter) -> SerializableParameter { SerializableParameter { name: param.name.clone(), lhablock: param.lhablock.clone(), @@ -324,10 +292,7 @@ impl SerializableParameter { nature: param.nature.clone(), parameter_type: param.parameter_type.clone(), value: param.value.map(|value| (value.re, value.im)), - expression: param - .expression - .as_ref() - .map(|expr| utils::to_str_expression(expr, sb_state)), + expression: param.expression.as_ref().map(utils::to_str_expression), } } } @@ -344,11 +309,7 @@ pub struct Parameter { } impl Parameter { - pub fn from_serializable_parameter( - param: &SerializableParameter, - sb_state: &mut symbolica::state::State, - sb_workspace: &symbolica::state::Workspace, - ) -> Parameter { + pub fn from_serializable_parameter(param: &SerializableParameter) -> Parameter { Parameter { name: param.name.clone(), lhablock: param.lhablock.clone(), @@ -359,7 +320,7 @@ impl Parameter { expression: param .expression .as_ref() - .map(|expr| utils::parse_python_expression(expr.as_str(), sb_state, sb_workspace)), + .map(|expr| utils::parse_python_expression(expr.as_str())), } } } @@ -398,7 +359,7 @@ impl SerializableModel { .suggestion("Is it a correct yaml file") } - pub fn from_model(model: &Model, sb_state: &symbolica::state::State) -> SerializableModel { + pub fn from_model(model: &Model) -> SerializableModel { SerializableModel { name: model.name.clone(), restriction: model.restriction.clone(), @@ -410,9 +371,7 @@ impl SerializableModel { parameters: model .parameters .iter() - .map(|parameter| { - SerializableParameter::from_parameter(parameter.as_ref(), sb_state) - }) + .map(|parameter| SerializableParameter::from_parameter(parameter.as_ref())) .collect(), particles: model .particles @@ -423,23 +382,18 @@ impl SerializableModel { .lorentz_structures .iter() .map(|lorentz_structure| { - SerializableLorentzStructure::from_lorentz_structure( - lorentz_structure.as_ref(), - sb_state, - ) + SerializableLorentzStructure::from_lorentz_structure(lorentz_structure.as_ref()) }) .collect(), couplings: model .couplings .iter() - .map(|coupling| SerializableCoupling::from_coupling(coupling.as_ref(), sb_state)) + .map(|coupling| SerializableCoupling::from_coupling(coupling.as_ref())) .collect(), vertex_rules: model .vertex_rules .iter() - .map(|vertex_rule| { - SerializableVertexRule::from_vertex_rule(vertex_rule.as_ref(), sb_state) - }) + .map(|vertex_rule| SerializableVertexRule::from_vertex_rule(vertex_rule.as_ref())) .collect(), } } @@ -499,11 +453,7 @@ impl Model { self.name == "ModelNotLoaded" || self.particles.is_empty() } - pub fn from_serializable_model( - serializable_model: SerializableModel, - sb_state: &mut symbolica::state::State, - sb_workspace: &symbolica::state::Workspace, - ) -> Model { + pub fn from_serializable_model(serializable_model: SerializableModel) -> Model { let mut model: Model = Model::default(); model.name = serializable_model.name; model.restriction = serializable_model.restriction; @@ -532,11 +482,8 @@ impl Model { .iter() .enumerate() .map(|(i_param, serializable_param)| { - let parameter = Arc::new(Parameter::from_serializable_parameter( - serializable_param, - sb_state, - sb_workspace, - )); + let parameter = + Arc::new(Parameter::from_serializable_parameter(serializable_param)); model .parameter_name_to_position .insert(parameter.name.clone(), i_param); @@ -573,8 +520,6 @@ impl Model { let lorentz_structure = Arc::new(LorentzStructure::from_serializable_lorentz_structure( serializable_lorentz_structure, - sb_state, - sb_workspace, )); model .lorentz_structure_name_to_position @@ -589,11 +534,8 @@ impl Model { .iter() .enumerate() .map(|(i_coupl, serializable_coupling)| { - let coupling = Arc::new(Coupling::from_serializable_coupling( - serializable_coupling, - sb_state, - sb_workspace, - )); + let coupling = + Arc::new(Coupling::from_serializable_coupling(serializable_coupling)); model .coupling_name_to_position .insert(coupling.name.clone(), i_coupl); @@ -610,8 +552,6 @@ impl Model { let vertex_rule = Arc::new(VertexRule::from_serializable_vertex_rule( &model, serializable_vertex_rule, - sb_state, - sb_workspace, )); model .vertex_rule_name_to_position @@ -623,32 +563,20 @@ impl Model { model } - pub fn to_serializable(&self, sb_state: &symbolica::state::State) -> SerializableModel { - SerializableModel::from_model(self, sb_state) + pub fn to_serializable(&self) -> SerializableModel { + SerializableModel::from_model(self) } - pub fn to_yaml(&self, sb_state: &symbolica::state::State) -> Result { - serde_yaml::to_string(&self.to_serializable(sb_state)) + pub fn to_yaml(&self) -> Result { + serde_yaml::to_string(&self.to_serializable()) } - pub fn from_file( - file_path: String, - sb_state: &mut symbolica::state::State, - sb_workspace: &symbolica::state::Workspace, - ) -> Result { - SerializableModel::from_file(file_path).map(|serializable_model| { - Model::from_serializable_model(serializable_model, sb_state, sb_workspace) - }) + pub fn from_file(file_path: String) -> Result { + SerializableModel::from_file(file_path).map(Model::from_serializable_model) } - pub fn from_yaml_str( - yaml_str: String, - sb_state: &mut symbolica::state::State, - sb_workspace: &symbolica::state::Workspace, - ) -> Result { - SerializableModel::from_yaml_str(yaml_str).map(|serializable_model| { - Model::from_serializable_model(serializable_model, sb_state, sb_workspace) - }) + pub fn from_yaml_str(yaml_str: String) -> Result { + SerializableModel::from_yaml_str(yaml_str).map(Model::from_serializable_model) } #[inline] diff --git a/src/tensor.rs b/src/tensor.rs index 3c7e0909..91de2bf9 100644 --- a/src/tensor.rs +++ b/src/tensor.rs @@ -1,1058 +1,62 @@ -use std::{ - borrow::Cow, - collections::{BTreeMap, HashMap}, - iter::FromIterator, - ops::Bound::Included, - usize, -}; +/*! -type AbstractIndex = usize; -type Dimension = usize; -type ConcreteIndex = usize; -type Position = usize; + Contains all the tooling for working with arbitrary rank tensors, symbolically, numerically, and parametrically. -#[derive(PartialEq, Eq, Clone, Copy, Debug, Hash)] -pub enum Signature { - Euclidean(Dimension), - Lorentz(Dimension), -} + It includes special support for a minkowski metric, and a way to add any custom diagonal (sign based) metric. -impl Signature { - pub fn negative(&self) -> Vec { - match self { - Signature::Euclidean(value) => vec![false; *value], - Signature::Lorentz(value) => std::iter::once(false) - .chain(std::iter::repeat(true).take(*value - 1)) - .collect::>(), - } - } -} + All tensor types make use of a tensor structure type, either the minimum `Vec` of [`Slot`]s or a more complete (but slightly more computationally heavy) [`HistoryStructure`]. + Data is then added, to make parametric, or fully numeric tensors. + If no data is added, some [`TensorStructure`]s behave like symbolic tensors: namely [`HistoryStructure`] and [`SymbolicTensor`] -impl From for Signature { - fn from(value: Dimension) -> Self { - Signature::Euclidean(value) - } -} + There are two main types of data tensors, [`DenseTensor`] and [`SparseTensor`]. + They each implement a different type of storage for data. -impl<'a> std::iter::FromIterator<&'a Signature> for Vec { - fn from_iter>(iter: T) -> Self { - iter.into_iter() - .map(|&rep| -> Dimension { (&rep).into() }) - .collect() - } -} + All types of tensors can be contracted together using the [`Contract`] trait. + This can be done manually, or using a [`TensorNetwork`] and specifying a contraction algorithm. -impl From<&Signature> for Dimension { - fn from(rep: &Signature) -> Self { - match rep { - Signature::Euclidean(value) => *value, - Signature::Lorentz(value) => *value, - } - } -} + Several Enums are defined to be able to store heterogenous tensors. + Namely + - [`DataTensor`] + - [`NumTensor`] + - [`MixedTensor`] -impl From for Dimension { - fn from(rep: Signature) -> Self { - match rep { - Signature::Euclidean(value) => value, - Signature::Lorentz(value) => value, - } - } -} +*/ -#[derive(Debug, PartialEq, Eq, Copy, Clone, Hash)] -pub struct Slot { - index: AbstractIndex, - signature: Signature, -} +/// All tooling for tensor structures, indices and representations +pub mod structure; +pub use structure::*; -impl From<(AbstractIndex, Signature)> for Slot { - fn from(value: (AbstractIndex, Signature)) -> Self { - Slot { - index: value.0, - signature: value.1, - } - } -} +/// More ergonomic, and smart arithmatic with symbolic types +pub mod upgrading_arithmetic; +pub use upgrading_arithmetic::*; -pub type TensorStructure = Vec; +/// Tensors with data +pub mod data; +pub use data::*; -pub trait VecSlotExt { - fn from_idxsing(indices: &[AbstractIndex], dims: &[Signature]) -> Self; - fn from_integers(indices: &[AbstractIndex], dims: &[usize]) -> Self; - fn match_index(&self, other: &Self) -> Option<(Position, Position)>; - fn traces(&self) -> Vec>; - fn merge_at(&self, other: &Self, positions: (Position, Position)) -> Self; - fn shape(&self) -> Vec; - fn order(&self) -> usize; - fn strides_row_major(&self) -> Vec; - fn strides_column_major(&self) -> Vec; - fn strides(&self) -> Vec; - fn verify_indices(&self, indices: &[ConcreteIndex]) -> Result<(), String>; - fn flat_index(&self, indices: &[ConcreteIndex]) -> Result; - fn expanded_index(&self, flat_index: usize) -> Result, String>; - fn size(&self) -> usize; -} +/// Parametric tensor contraction +pub mod parametric; +pub use parametric::*; -impl VecSlotExt for TensorStructure { - fn from_idxsing(indices: &[AbstractIndex], signatures: &[Signature]) -> Self { - indices - .iter() - .zip(signatures.iter()) - .map(|(&index, &dim)| Slot::from((index, dim))) - .collect() - } +/// Symbolic tensors +pub mod symbolic; +pub use symbolic::*; +/// Iterators on fibers of tensors +pub mod iterators; +pub use iterators::*; - fn from_integers(indices: &[AbstractIndex], dims: &[usize]) -> Self { - indices - .iter() - .zip(dims.iter()) - .map(|(&index, &dim)| Slot::from((index, Signature::Euclidean(dim)))) - .collect() - } +/// Tensor contraction +pub mod contraction; +pub use contraction::*; - fn match_index(&self, other: &Self) -> Option<(Position, Position)> { - for (i, slot_a) in self.iter().enumerate().rev() { - for (j, slot_b) in other.iter().enumerate() { - if slot_a == slot_b { - return Some((i, j)); - } - } - } - None - } +/// Tensor networks +pub mod network; +pub use network::*; +/// Adding, subtracting, scalar multiplication of tensors +pub mod arithmetic; - fn traces(&self) -> Vec> { - let mut positions = HashMap::new(); - - // Track the positions of each element - for (index, &value) in self.iter().enumerate() { - positions.entry(value).or_insert_with(Vec::new).push(index); - } - - // Collect only the positions of repeated elements - positions - .into_iter() - .filter_map(|(_, indices)| { - if indices.len() > 1 { - Some(indices) - } else { - None - } - }) - .collect() - } - - fn merge_at(&self, other: &Self, positions: (Position, Position)) -> Self { - let mut slots_b = other.clone(); - let mut slots_a = self.clone(); - - slots_a.remove(positions.0); - slots_b.remove(positions.1); - - slots_a.append(&mut slots_b); - slots_a - } - - fn shape(&self) -> Vec { - self.iter().map(|slot| &slot.signature).collect() - } - - fn order(&self) -> usize { - //total valence (or misnamed : rank) - self.len() - } - - fn strides_column_major(&self) -> Vec { - let mut strides: Vec = vec![1; self.order()]; - - if self.order() == 0 { - return strides; - } - - for i in 0..self.order() - 1 { - strides[i + 1] = strides[i] * usize::from(self[i].signature); - } - - strides - } - - fn strides_row_major(&self) -> Vec { - let mut strides = vec![1; self.order()]; - if self.order() == 0 { - return strides; - } - - for i in (0..self.order() - 1).rev() { - strides[i] = strides[i + 1] * usize::from(self[i + 1].signature); - } - - strides - } - - fn strides(&self) -> Vec { - self.strides_row_major() - } - - fn verify_indices(&self, indices: &[ConcreteIndex]) -> Result<(), String> { - if indices.len() != self.order() { - return Err("Mismatched order".to_string()); - } - - for (i, &dim_len) in self.iter().map(|slot| &slot.signature).enumerate() { - if indices[i] >= usize::from(dim_len) { - return Err(format!( - "Index {} out of bounds for dimension {} of size {}", - indices[i], - i, - usize::from(dim_len) - )); - } - } - Ok(()) - } - - fn flat_index(&self, indices: &[usize]) -> Result { - let strides = self.strides(); - self.verify_indices(indices)?; - - let mut idx = 0; - for (i, &index) in indices.iter().enumerate() { - idx += index * strides[i]; - } - Ok(idx) - } - - fn expanded_index(&self, flat_index: usize) -> Result, String> { - let mut indices = vec![]; - let mut index = flat_index; - for &stride in self.strides().iter() { - indices.push(index / stride); - index %= stride; - } - if index == 0 { - Ok(indices) - } else { - Err(format!("Index {} out of bounds", flat_index)) - } - } - - fn size(&self) -> usize { - self.shape().iter().product() - } -} - -pub trait HasTensorStructure { - fn structure(&self) -> &Vec; - // inline - fn order(&self) -> usize { - self.structure().order() - } - - fn shape(&self) -> Vec { - self.structure().shape() - } - - fn size(&self) -> usize { - self.structure().size() - } - - fn verify_indices(&self, indices: &[usize]) -> Result<(), String> { - self.structure().verify_indices(indices) - } - - fn strides(&self) -> Vec { - self.structure().strides() - } - - fn flat_index(&self, indices: &[usize]) -> Result { - self.structure().flat_index(indices) - } - - fn expanded_index(&self, flat_index: usize) -> Result, String> { - self.structure().expanded_index(flat_index) - } - - fn match_index(&self, other: &dyn HasTensorStructure) -> Option<(usize, usize)> { - self.structure().match_index(other.structure()) - } - - fn traces(&self) -> Vec> { - self.structure().traces() - } -} - -#[derive(Debug, Clone)] -pub struct SparseTensor { - elements: BTreeMap, T>, - structure: Vec, -} - -impl HasTensorStructure for SparseTensor { - fn structure(&self) -> &Vec { - &self.structure - } -} - -impl SparseTensor { - pub fn empty(structure: TensorStructure) -> Self { - SparseTensor { - elements: BTreeMap::new(), - structure, - } - } - - pub fn empty_from_integers(indices: &[AbstractIndex], dims: &[Dimension]) -> Self { - let structure = TensorStructure::from_integers(indices, dims); - SparseTensor { - elements: BTreeMap::new(), - structure, - } - } - - pub fn from_data( - data: &[(Vec, T)], - indices: &[AbstractIndex], - ) -> Result { - let mut dimensions = vec![0; indices.len()]; - for (index, _) in data { - if index.len() != indices.len() { - return Err("Mismatched order".to_string()); - } - for (i, &idx) in index.iter().enumerate() { - if idx >= dimensions[i] { - dimensions[i] = idx + 1; - } - } - } - Ok(SparseTensor { - elements: BTreeMap::from_iter(data.iter().cloned()), - structure: TensorStructure::from_integers(indices, &dimensions), - }) - } - - pub fn set(&mut self, indices: &[usize], value: T) -> Result<(), String> { - self.verify_indices(indices)?; - self.elements.insert(indices.to_vec(), value); - Ok(()) - } - - pub fn get(&self, indices: &[usize]) -> Result, String> { - self.verify_indices(indices)?; - // if the index is in the bTree return the value, else return default, lazily allocating the default - Ok(match self.elements.get(indices) { - Some(value) => Cow::Borrowed(value), - None => Cow::Owned(T::default()), - }) - } -} - -pub struct SparseTensorTraceIterator<'a, T> { - tensor: &'a SparseTensor, - trace_indices: Vec, - current_indices: Vec, - done: bool, -} - -impl<'a, T> SparseTensorTraceIterator<'a, T> { - fn new(tensor: &'a SparseTensor, trace_indices: Vec) -> Self { - assert!(trace_indices.len() >= 2, "Invalid trace indices"); - //trace positions must point to the same dimension - assert!( - trace_indices - .iter() - .map(|&pos| tensor.structure()[pos].signature) - .collect::>() - .iter() - .all(|&sig| sig == tensor.structure()[trace_indices[0]].signature), - "Trace indices must point to the same dimension" - ); - SparseTensorTraceIterator { - tensor, - trace_indices, - current_indices: vec![0; tensor.order()], - done: false, - } - } - - fn increment_indices(&mut self) -> bool { - for (i, index) in self - .current_indices - .iter_mut() - .enumerate() - .rev() - .filter(|(pos, _)| !self.trace_indices.contains(pos)) - { - *index += 1; - // If the index goes beyond the shape boundary, wrap around to 0 - if index >= &mut self.tensor.shape()[i] { - *index = 0; - continue; // carry over to the next dimension - } - return true; // We've successfully found the next combination - } - false // No more combinations left - } -} - -impl<'a, T> Iterator for SparseTensorTraceIterator<'a, T> -where - T: Clone + Copy + Default + std::ops::AddAssign + std::ops::SubAssign + std::cmp::PartialEq, -{ - type Item = (Vec, T); - fn next(&mut self) -> Option { - if self.done { - return None; - } - - let trace_dimension = self.tensor.structure()[self.trace_indices[0]].signature; - let trace_sign = trace_dimension.negative(); - let mut trace = T::default(); - - for (i, sign) in trace_sign.iter().enumerate().take(trace_dimension.into()) { - let mut indices = self.current_indices.clone(); - for &pos in self.trace_indices.iter() { - indices[pos] = i; - } - if let Some(value) = self.tensor.elements.get(&indices) { - if *sign { - trace -= *value; - } else { - trace += *value; - } - } - } - - //make a vector withouth the trace indices - let trace_indices: Vec = self - .current_indices - .clone() - .into_iter() - .enumerate() - .filter(|&(i, _)| !self.trace_indices.contains(&i)) - .map(|(_, x)| x) - .collect(); - - self.done = !self.increment_indices(); - - Some((trace_indices, trace)) - } -} - -pub struct SparseTensorFiberIterator<'a, T> { - tensor: &'a SparseTensor, - fiber_index: Position, - current_indices: Vec, - done: bool, -} - -impl<'a, T> SparseTensorFiberIterator<'a, T> { - fn new(tensor: &'a SparseTensor, fiber_index: usize) -> Self { - assert!(fiber_index < tensor.order(), "Invalid fiber index"); - - SparseTensorFiberIterator { - tensor, - fiber_index, - current_indices: vec![0; tensor.order()], - done: false, - } - } - - fn increment_indices(&mut self) -> bool { - for (i, index) in self - .current_indices - .iter_mut() - .enumerate() - .rev() - .filter(|(pos, _)| *pos != self.fiber_index) - { - *index += 1; - // If the index goes beyond the shape boundary, wrap around to 0 - if index >= &mut self.tensor.shape()[i] { - *index = 0; - continue; // carry over to the next dimension - } - return true; // We've successfully foun+155 m / -164 md the next combination - } - false // No more combinations left - } -} - -impl<'a, T> Iterator for SparseTensorFiberIterator<'a, T> -where - T: Clone + Default, -{ - type Item = (Vec, Vec, Vec<&'a T>); - fn next(&mut self) -> Option { - if self.done { - return None; - } - let mut lower_bound = self.current_indices.clone(); - let mut upper_bound = self.current_indices.clone(); - - // Set the range for the varying dimension to cover all indices - lower_bound[self.fiber_index] = 0; - upper_bound[self.fiber_index] = self.tensor.shape()[self.fiber_index]; - - let range = self - .tensor - .elements - .range((Included(lower_bound), Included(upper_bound))); - - let mut nonzeros = Vec::new(); - - let mut values: Vec<&'a T> = Vec::new(); - - for (indices, value) in range.filter(|(key, _)| { - // Ensure that the difference with start_key is at the same position - for (i, index) in key.iter().enumerate() { - if self.fiber_index != i && index != &self.current_indices[i] { - return false; - } - } - true - }) { - nonzeros.push(indices[self.fiber_index]); - values.push(value); - } - - // The upper bound of the range (exclusive) - - // Prepare a vector to hold the combined values - let fiber_indices = self.current_indices.clone(); - - self.done = !self.increment_indices(); - - // Check if there are any elements in the range - if !values.is_empty() { - Some((fiber_indices, nonzeros, values)) - } else { - self.next() - } - } -} - -impl SparseTensor { - pub fn iter_fibers(&self, fiber_index: usize) -> SparseTensorFiberIterator { - SparseTensorFiberIterator::new(self, fiber_index) - } - - pub fn iter_trace(&self, trace_indices: Vec) -> SparseTensorTraceIterator { - SparseTensorTraceIterator::new(self, trace_indices) - } -} - -#[derive(Debug, Clone)] -pub struct DenseTensor { - data: Vec, - structure: TensorStructure, -} - -impl HasTensorStructure for DenseTensor { - fn structure(&self) -> &TensorStructure { - &self.structure - } -} - -impl DenseTensor { - pub fn default(structure: TensorStructure) -> Self { - DenseTensor { - data: vec![T::default(); structure.size()], - structure, - } - } - - pub fn default_from_integers(indices: &[AbstractIndex], dims: &[usize]) -> Self { - let structure = TensorStructure::from_integers(indices, dims); - DenseTensor { - data: vec![T::default(); structure.size()], - structure, - } - } - - pub fn from_data(data: &[T], structure: TensorStructure) -> Result { - if data.len() != structure.size() { - return Err("Data length does not match shape".to_string()); - } - Ok(DenseTensor { - data: data.to_vec(), - structure, - }) - } - - pub fn set(&mut self, indices: &[usize], value: T) { - let idx = self.flat_index(indices); - if let Ok(i) = idx { - self.data[i] = value; - } - } - - pub fn get_linear(&self, index: usize) -> Option<&T> { - self.data.get(index) - } - - pub fn get(&self, indices: &[usize]) -> Option<&T> { - if let Ok(idx) = self.flat_index(indices) { - Some(&self.data[idx]) - } else { - None - } - } -} - -pub struct TensorIterator<'a, T> { - tensor: &'a DenseTensor, - current_flat_index: usize, -} - -impl<'a, T> TensorIterator<'a, T> { - fn new(tensor: &'a DenseTensor) -> Self { - TensorIterator { - tensor, - current_flat_index: 0, - } - } -} - -impl<'a, T: Clone> Iterator for TensorIterator<'a, T> { - type Item = (Vec, T); - - fn next(&mut self) -> Option { - if let Ok(indices) = self.tensor.expanded_index(self.current_flat_index) { - let value = self.tensor.data[self.current_flat_index].clone(); - - self.current_flat_index += 1; - - Some((indices, value)) - } else { - None - } - } -} - -pub struct DenseTensorTraceIterator<'a, T> { - tensor: &'a DenseTensor, - trace_indices: Vec, - current_indices: Vec, - done: bool, -} - -impl<'a, T> DenseTensorTraceIterator<'a, T> { - fn new(tensor: &'a DenseTensor, trace_indices: Vec) -> Self { - assert!(trace_indices.len() >= 2, "Invalid trace indices"); - //trace positions must point to the same dimension - assert!( - trace_indices - .iter() - .map(|&pos| tensor.structure()[pos].signature) - .collect::>() - .iter() - .all(|&sig| sig == tensor.structure()[trace_indices[0]].signature), - "Trace indices must point to the same dimension" - ); - DenseTensorTraceIterator { - tensor, - trace_indices, - current_indices: vec![0; tensor.order()], - done: false, - } - } - - fn increment_indices(&mut self) -> bool { - for (i, index) in self - .current_indices - .iter_mut() - .enumerate() - .rev() - .filter(|(pos, _)| !self.trace_indices.contains(pos)) - { - *index += 1; - // If the index goes beyond the shape boundary, wrap around to 0 - if index >= &mut self.tensor.shape()[i] { - *index = 0; - continue; // carry over to the next dimension - } - return true; // We've successfully found the next combination - } - false // No more combinations left - } -} - -impl<'a, T> Iterator for DenseTensorTraceIterator<'a, T> -where - T: Clone + Copy + Default + std::ops::AddAssign + std::ops::SubAssign + std::cmp::PartialEq, -{ - type Item = (Vec, T); - fn next(&mut self) -> Option { - if self.done { - return None; - } - - let trace_dimension = self.tensor.structure()[self.trace_indices[0]].signature; - let trace_sign = trace_dimension.negative(); - let mut trace = T::default(); - - for (i, sign) in trace_sign.iter().enumerate().take(trace_dimension.into()) { - let mut indices = self.current_indices.clone(); - for &pos in self.trace_indices.iter() { - indices[pos] = i; - } - if *sign { - trace -= *self.tensor.get(&indices).unwrap(); - } else { - trace += *self.tensor.get(&indices).unwrap(); - } - } - - //make a vector withouth the trace indices - let trace_indices: Vec = self - .current_indices - .clone() - .into_iter() - .enumerate() - .filter(|&(i, _)| !self.trace_indices.contains(&i)) - .map(|(_, x)| x) - .collect(); - - self.done = !self.increment_indices(); - - Some((trace_indices, trace)) - } -} -pub struct DenseTensorFiberIterator<'a, T> { - tensor: &'a DenseTensor, - strides: Vec, - fixedindex: usize, - linear_start: usize, - current_fiber: usize, - total_fibers: usize, -} - -impl<'a, T> DenseTensorFiberIterator<'a, T> { - fn new(tensor: &'a DenseTensor, fixedindex: usize) -> Self { - assert!(fixedindex < tensor.order(), "Invalid fixedindex"); - - let fiber_length = tensor.shape()[fixedindex]; - let total_fibers = tensor.size() / fiber_length; - let strides = tensor.strides(); - - DenseTensorFiberIterator { - tensor, - strides, - fixedindex, - linear_start: 0, - current_fiber: 0, - total_fibers, - } - } - - fn update_linear_start(&mut self) { - let mut expanded_index = self.tensor.expanded_index(self.linear_start).unwrap(); - - for (i, index) in expanded_index - .iter_mut() - .enumerate() - .rev() - .filter(|(pos, _)| *pos != self.fixedindex) - { - *index += 1; - // If the index goes beyond the shape boundary, wrap around to 0 - if index >= &mut self.tensor.shape()[i] { - *index = 0; - continue; // carry over to the next dimension - } - break; // We've successfully foun+155 m / -164 md the next combination - } - self.linear_start = self.tensor.flat_index(&expanded_index).unwrap(); - } -} - -impl<'a, T> Iterator for DenseTensorFiberIterator<'a, T> -where - T: Clone + Default, -{ - type Item = (Vec, Vec<&'a T>); - - fn next(&mut self) -> Option { - if self.current_fiber >= self.total_fibers { - return None; - } - - // Determine start index for the current fiber - - let mut fiberdata = Vec::with_capacity(self.tensor.shape()[self.fixedindex]); - - let fiberindices = self.tensor.expanded_index(self.linear_start).unwrap(); - - for i in 0..self.tensor.shape()[self.fixedindex] { - let linear_index = self.linear_start + i * self.strides[self.fixedindex]; - fiberdata.push(self.tensor.get_linear(linear_index).unwrap()); - } - - self.update_linear_start(); - self.current_fiber += 1; - // Determine end index for the current fiber - - Some((fiberindices, fiberdata)) - } -} - -impl DenseTensor { - // ... [Other methods] ... - - pub fn iter(&self) -> TensorIterator { - TensorIterator::new(self) - } - - pub fn iter_fibers(&self, fixedindex: usize) -> DenseTensorFiberIterator { - DenseTensorFiberIterator::new(self, fixedindex) - } - - pub fn iter_trace(&self, trace_indices: Vec) -> DenseTensorTraceIterator { - DenseTensorTraceIterator::new(self, trace_indices) - } -} - -pub enum NumTensor { - Dense(DenseTensor), - Sparse(SparseTensor), -} - -impl DenseTensor -where - T: Default - + Clone - + std::ops::AddAssign - + std::ops::SubAssign - + std::ops::Mul - + Copy - + std::cmp::PartialEq - + std::fmt::Debug, -{ - pub fn internal_contract(&self) -> Self { - let mut result = self.clone(); - for trace in self.traces() { - let new_structure = self - .structure() - .clone() - .into_iter() - .enumerate() - .filter(|&(i, _)| !trace.contains(&i)) - .map(|(_, x)| x) - .collect(); - - let mut new_result = DenseTensor::default(new_structure); - for (idx, t) in result.iter_trace(trace) { - new_result.set(&idx, t); - } - result = new_result; - } - result - } - pub fn contract_with_sparse(&self, other: &SparseTensor) -> Option { - other.contract_with_dense(self) - } - - pub fn contract_with_dense(&self, other: &Self) -> Option { - if let Some((i, j)) = self.match_index(other) { - // println!("{},{}", i, j); - let self_shape = self.shape(); - - let dimension_of_contraction = self_shape[i]; - let metric = self.structure()[i].signature.negative(); - - let final_structure = self.structure().merge_at(other.structure(), (i, j)); - - // Initialize result tensor with default values - let mut result_data = vec![T::default(); final_structure.size()]; - - for (index_a, fiber_a) in self.iter_fibers(i) { - for (index_b, fiber_b) in other.iter_fibers(j) { - let result_index = final_structure - .flat_index( - &index_a[..i] - .iter() - .chain(&index_a[i + 1..]) - .chain(&index_b[..j]) - .chain(&index_b[j + 1..]) - .cloned() - .collect::>(), - ) - .unwrap(); - - for k in 0..dimension_of_contraction { - // Adjust indices for fetching from the other tensor - if metric[k] { - result_data[result_index] -= *fiber_a[k] * *fiber_b[k]; - } else { - result_data[result_index] += *fiber_a[k] * *fiber_b[k]; - } - } - } - } - - let result = DenseTensor { - data: result_data, - structure: final_structure, - }; - - if result.traces().is_empty() { - return Some(result); - } else { - return Some(result.internal_contract()); - } - } - None - } -} - -impl SparseTensor -where - T: Default - + Clone - + std::ops::AddAssign - + std::ops::SubAssign - + std::ops::Mul - + Copy - + std::cmp::PartialEq - + std::fmt::Debug, -{ - pub fn internal_contract(&self) -> Self { - let mut result = self.clone(); - for trace in self.traces() { - // println!("trace {:?}", trace); - let new_structure = self - .structure() - .clone() - .into_iter() - .enumerate() - .filter(|&(i, _)| !trace.contains(&i)) - .map(|(_, x)| x) - .collect(); - - let mut new_result = SparseTensor::empty(new_structure); - for (idx, t) in result.iter_trace(trace).filter(|(_, t)| *t != T::default()) { - new_result.set(&idx, t).unwrap(); - } - result = new_result; - } - result - } - pub fn contract_with_dense(&self, other: &DenseTensor) -> Option> { - if let Some((i, j)) = self.match_index(other) { - let final_structure = self.structure().merge_at(other.structure(), (i, j)); - let mut result_data = vec![T::default(); final_structure.size()]; - - let metric = self.structure()[i].signature.negative(); - - for (index_a, nonzeros, fiber_a) in self.iter_fibers(i) { - for (index_b, fiber_b) in other.iter_fibers(j) { - let result_index = final_structure - .flat_index( - &index_a[..i] - .iter() - .chain(&index_a[i + 1..]) - .chain(&index_b[..j]) - .chain(&index_b[j + 1..]) - .cloned() - .collect::>(), - ) - .unwrap(); - for (i, k) in nonzeros.iter().enumerate() { - // Adjust indices for fetching from the other tensor - if metric[*k] { - result_data[result_index] -= *fiber_a[i] * *fiber_b[*k]; - } else { - result_data[result_index] += *fiber_a[i] * *fiber_b[*k]; - } - } - } - } - - let result = DenseTensor { - data: result_data, - structure: final_structure, - }; - - if result.traces().is_empty() { - return Some(result); - } else { - return Some(result.internal_contract()); - } - } - None - } - - pub fn contract_with_sparse(&self, other: &Self) -> Option { - if let Some((i, j)) = self.match_index(other) { - let final_structure = self.structure().merge_at(other.structure(), (i, j)); - let mut result_data = BTreeMap::new(); - - let metric = self.structure()[i].signature.negative(); - - for (index_a, nonzeros_a, fiber_a) in self.iter_fibers(i) { - for (index_b, nonzeros_b, fiber_b) in other.iter_fibers(j) { - let result_index = index_a[..i] - .iter() - .chain(&index_a[i + 1..]) - .chain(&index_b[..j]) - .chain(&index_b[j + 1..]) - .cloned() - .collect::>(); - - let mut value = T::default(); - let mut nonzero = false; - for (i, j, x) in nonzeros_a.iter().enumerate().filter_map(|(i, &x)| { - nonzeros_b.binary_search(&x).ok().map(|j| (i, j, x)) // Only store the positions - }) { - // Adjust indices for fetching from the other tensor - if metric[x] { - value -= *fiber_a[i] * *fiber_b[j]; - } else { - value += *fiber_a[i] * *fiber_b[j]; - } - - nonzero = true; - } - - if nonzero && value != T::default() { - result_data.insert(result_index, value); - } - } - } - - let result = SparseTensor { - elements: result_data, - structure: final_structure, - }; - - if result.traces().is_empty() { - return Some(result); - } else { - return Some(result.internal_contract()); - } - } - None - } -} - -#[allow(dead_code)] -struct SymbolicTensor { - structure: TensorStructure, - expression: String, -} - -impl HasTensorStructure for SymbolicTensor { - fn structure(&self) -> &TensorStructure { - &self.structure - } -} - -#[allow(dead_code)] -enum Tensor { - Num(NumTensor), - Symbolic(SymbolicTensor), -} +/// Tensors as defined in the UFO format +pub mod ufo; #[cfg(test)] mod tests; diff --git a/src/tensor/arithmetic.rs b/src/tensor/arithmetic.rs new file mode 100644 index 00000000..bd71c01d --- /dev/null +++ b/src/tensor/arithmetic.rs @@ -0,0 +1,214 @@ +use crate::tensor::{ConcreteIndex, GetTensorData, SetTensorData}; + +use super::{DenseTensor, SparseTensor, TensorStructure}; +use num::traits::Num; +use std::ops::{Add, Mul, Sub}; + +impl Add> for DenseTensor +where + T: Num + Clone + Copy, + + I: TensorStructure + Clone, +{ + type Output = DenseTensor; + fn add(self, other: DenseTensor) -> DenseTensor { + assert!(self.structure().same_external(other.structure())); + + let permutation = self + .structure() + .find_permutation(other.structure()) + .unwrap(); + + let mut result = self.clone(); + + for (indices, value) in other.iter() { + let permuted_indices: Vec = + permutation.iter().map(|&index| indices[index]).collect(); + let self_value = self.get(&permuted_indices).unwrap(); + let _ = result.set(&indices, *self_value + *value); + } + result + } +} + +impl Add> for SparseTensor +where + T: Num + Clone + Default + Copy, + I: TensorStructure + Clone, +{ + type Output = SparseTensor; + fn add(self, other: SparseTensor) -> SparseTensor { + assert!(self.structure().same_external(other.structure())); + let permutation = self + .structure() + .find_permutation(other.structure()) + .unwrap(); + + let mut result = self.clone(); + + for (indices, value) in other.iter() { + let permuted_indices: Vec = + permutation.iter().map(|&index| indices[index]).collect(); + let self_value = self.smart_get(&permuted_indices).unwrap().into_owned(); + result.set(&indices, self_value + *value).unwrap(); + } + + result + } +} + +impl Add> for DenseTensor +where + T: Num + Clone + Default + Copy, + I: TensorStructure + Clone, +{ + type Output = DenseTensor; + fn add(self, other: SparseTensor) -> DenseTensor { + assert!(self.structure().same_external(other.structure())); + + let permutation = self + .structure() + .find_permutation(other.structure()) + .unwrap(); + + let mut result = self.clone(); + + for (indices, value) in other.iter() { + let permuted_indices: Vec = + permutation.iter().map(|&index| indices[index]).collect(); + let self_value = self.get(&permuted_indices).unwrap(); + result.set(&indices, *self_value + *value).unwrap(); + } + result + } +} + +impl Add> for SparseTensor +where + T: Num + Clone + Default + Copy, + I: TensorStructure + Clone, +{ + type Output = DenseTensor; + fn add(self, other: DenseTensor) -> DenseTensor { + other + self + } +} + +impl Sub> for DenseTensor +where + T: Num + Clone + Copy, + I: TensorStructure + Clone, +{ + type Output = DenseTensor; + fn sub(self, other: DenseTensor) -> DenseTensor { + assert!(self.structure().same_external(other.structure())); + let permutation = self + .structure() + .find_permutation(other.structure()) + .unwrap(); + + let mut result = self.clone(); + + for (indices, value) in other.iter() { + let permuted_indices: Vec = + permutation.iter().map(|&index| indices[index]).collect(); + let self_value = self.get(&permuted_indices).unwrap(); + result.set(&indices, *self_value - *value).unwrap(); + } + result + } +} + +impl Sub> for SparseTensor +where + T: Num + Default + Copy, + I: TensorStructure + Clone, +{ + type Output = SparseTensor; + fn sub(self, other: SparseTensor) -> SparseTensor { + assert!(self.structure().same_external(other.structure())); + let permutation = self + .structure() + .find_permutation(other.structure()) + .unwrap(); + + let mut result = self.clone(); + + for (indices, value) in other.iter() { + let permuted_indices: Vec = + permutation.iter().map(|&index| indices[index]).collect(); + let self_value = self.smart_get(&permuted_indices).unwrap().into_owned(); + result.set(&indices, self_value - *value).unwrap(); + } + + result + } +} + +impl Sub> for DenseTensor +where + T: Num + Clone + Default + Copy, + I: TensorStructure + Clone, +{ + type Output = DenseTensor; + fn sub(self, other: SparseTensor) -> DenseTensor { + assert!(self.structure().same_external(other.structure())); + let permutation = self + .structure() + .find_permutation(other.structure()) + .unwrap(); + + let mut result = self.clone(); + + for (indices, value) in other.iter() { + let permuted_indices: Vec = + permutation.iter().map(|&index| indices[index]).collect(); + let self_value = self.get(&permuted_indices).unwrap(); + result.set(&indices, *self_value - *value).unwrap(); + } + result + } +} + +impl Sub> for SparseTensor +where + T: Num + Clone + Default + Copy, + I: TensorStructure + Clone, +{ + type Output = DenseTensor; + fn sub(self, other: DenseTensor) -> DenseTensor { + other - self + } +} + +impl Mul for DenseTensor +where + T: Num + Clone + Copy, + I: TensorStructure + Clone, +{ + type Output = DenseTensor; + fn mul(self, other: T) -> DenseTensor { + let mut result = self.clone(); + + for (indices, value) in self.iter() { + result.set(&indices, other * *value).unwrap(); + } + result + } +} + +impl Mul for SparseTensor +where + T: Num + Copy + Default, + I: TensorStructure + Clone, +{ + type Output = SparseTensor; + fn mul(self, other: T) -> Self::Output { + let mut result = self.clone(); + + for (indices, value) in self.iter() { + result.set(&indices, other * *value).unwrap(); + } + result + } +} diff --git a/src/tensor/contraction.rs b/src/tensor/contraction.rs new file mode 100644 index 00000000..e7e9e099 --- /dev/null +++ b/src/tensor/contraction.rs @@ -0,0 +1,795 @@ +use ahash::AHashMap; + + + +use super::{ + DataIterator, DataTensor, DenseTensor, FallibleAddAssign, FallibleMul, FallibleSubAssign, + HasTensorData, NumTensor, Representation, SetTensorData, SparseTensor, StructureContract, + TensorStructure, TrySmallestUpgrade, +}; + +use std::{ + fmt::Debug, + // intrinsics::needs_drop, + ops::Neg, +}; + +trait LeastCommonStorage: HasTensorData + SetTensorData { + type OutStorage: SetTensorData; + fn least_common_storage(&self, other: &Other) -> Self::OutStorage + where + for<'a, 'b> &'a Self::Data: TrySmallestUpgrade<&'b Other::Data, LCM = LCMData>, + LCMData: Default + Clone; + + fn empty(structure: Self::Structure) -> Self::OutStorage + where + for<'a, 'b> &'a Self::Data: TrySmallestUpgrade<&'b Other::Data, LCM = LCMData>, + LCMData: Default + Clone; +} + +impl LeastCommonStorage> for DenseTensor +where + T: Clone, + U: Clone, + I: TensorStructure + StructureContract + Clone, +{ + type OutStorage = DenseTensor; + fn least_common_storage(&self, other: &DenseTensor) -> Self::OutStorage + where + for<'a, 'b> &'a Self::Data: TrySmallestUpgrade<&'b T, LCM = LCMData>, + LCMData: Default + Clone, + { + let mut final_structure = self.structure().clone(); + final_structure.merge(other.structure()); + DenseTensor::default(final_structure) + } + + fn empty(structure: Self::Structure) -> Self::OutStorage + where + for<'a, 'b> &'a Self::Data: TrySmallestUpgrade<&'b T, LCM = LCMData>, + LCMData: Default + Clone, + { + DenseTensor::default(structure) + } +} + +impl LeastCommonStorage> for SparseTensor +where + T: Clone, + U: Clone, + I: TensorStructure + StructureContract + Clone, +{ + type OutStorage = DenseTensor; + fn least_common_storage(&self, other: &DenseTensor) -> Self::OutStorage + where + for<'a, 'b> &'a Self::Data: TrySmallestUpgrade<&'b T, LCM = LCMData>, + LCMData: Default + Clone, + { + let mut final_structure = self.structure().clone(); + final_structure.merge(other.structure()); + DenseTensor::default(final_structure) + } + + fn empty(structure: Self::Structure) -> Self::OutStorage + where + for<'a, 'b> &'a Self::Data: TrySmallestUpgrade<&'b T, LCM = LCMData>, + LCMData: Default + Clone, + { + DenseTensor::default(structure) + } +} + +impl LeastCommonStorage> for DenseTensor +where + T: Clone, + U: Clone, + I: TensorStructure + StructureContract + Clone, +{ + type OutStorage = DenseTensor; + fn least_common_storage(&self, other: &SparseTensor) -> Self::OutStorage + where + for<'a, 'b> &'a Self::Data: TrySmallestUpgrade<&'b T, LCM = LCMData>, + LCMData: Default + Clone, + { + let mut final_structure = self.structure().clone(); + final_structure.merge(other.structure()); + DenseTensor::default(final_structure) + } + + fn empty(structure: Self::Structure) -> Self::OutStorage + where + for<'a, 'b> &'a Self::Data: TrySmallestUpgrade<&'b T, LCM = LCMData>, + LCMData: Default + Clone, + { + DenseTensor::default(structure) + } +} + +impl LeastCommonStorage> for SparseTensor +where + T: Clone, + U: Clone, + I: TensorStructure + StructureContract + Clone, +{ + type OutStorage = SparseTensor; + fn least_common_storage(&self, other: &SparseTensor) -> Self::OutStorage + where + for<'a, 'b> &'a Self::Data: TrySmallestUpgrade<&'b T, LCM = LCMData>, + LCMData: Default + Clone, + { + let mut final_structure = self.structure().clone(); + final_structure.merge(other.structure()); + SparseTensor::empty(final_structure) + } + + fn empty(structure: Self::Structure) -> Self::OutStorage + where + for<'a, 'b> &'a Self::Data: TrySmallestUpgrade<&'b T, LCM = LCMData>, + LCMData: Default + Clone, + { + SparseTensor::empty(structure) + } +} + +trait ExteriorProduct { + type Out; + fn exterior_product(&self, other: &T) -> Self::Out; +} + +impl ExteriorProduct for U +where + U: LeastCommonStorage + DataIterator, + for<'a, 'b> &'a U::Data: + TrySmallestUpgrade<&'b T::Data, LCM = LCMData> + FallibleMul<&'b T::Data, Output = LCMData>, + T: LeastCommonStorage = U::OutStorage> + DataIterator, + LCMData: Default + Clone, +{ + type Out = U::OutStorage; + + fn exterior_product(&self, other: &T) -> Self::Out { + let mut out = self.least_common_storage::(other); + + let stride = other.size(); + + for (i, u) in self.flat_iter() { + for (j, t) in other.flat_iter() { + let _ = out.set_flat(i * stride + j, u.mul_fallible(t).unwrap()); + } + } + + out + } +} + +impl DenseTensor +where + T: for<'a> std::ops::AddAssign<&'a T> + + for<'b> std::ops::SubAssign<&'b T> + + Neg + + Clone + + std::fmt::Debug, + I: TensorStructure + Clone + StructureContract, +{ + #[must_use] + + /// Contract the tensor with itself, i.e. trace over all matching indices. + pub fn internal_contract(&self) -> Self { + let mut result: DenseTensor = self.clone(); + for trace in self.traces() { + let mut new_structure = self.structure.clone(); + new_structure.trace(trace[0], trace[1]); + + let mut new_result = DenseTensor::from_data_coerced(&self.data, new_structure) + .unwrap_or_else(|_| unreachable!()); + for (idx, t) in result.iter_trace(trace) { + new_result.set(&idx, t).unwrap_or_else(|_| unreachable!()); + } + result = new_result; + } + result + } +} + +impl SparseTensor +where + T: for<'a> std::ops::AddAssign<&'a T> + + for<'b> std::ops::SubAssign<&'b T> + + std::ops::Neg + + Clone + + Default + + PartialEq, + I: TensorStructure + Clone + StructureContract, +{ + #[must_use] + /// Contract the tensor with itself, i.e. trace over all matching indices. + pub fn internal_contract(&self) -> Self { + let trace = self.traces()[0]; + + // println!("trace {:?}", trace); + let mut new_structure = self.structure.clone(); + new_structure.trace(trace[0], trace[1]); + + let mut new_result = SparseTensor::empty(new_structure); + for (idx, t) in self.iter_trace(trace).filter(|(_, t)| *t != T::default()) { + new_result.set(&idx, t).unwrap_or_else(|_| unreachable!()); + } + + if new_result.traces().is_empty() { + new_result + } else { + new_result.internal_contract() + } + } +} +pub trait Contract { + type LCM; + fn contract(&self, other: &T) -> Option; +} + +pub trait SingleContract { + type LCM; + fn single_contract(&self, other: &T, i: usize, j: usize) -> Option; +} + +trait MultiContract { + type LCM; + fn multi_contract(&self, other: &T) -> Option; +} +pub trait DotProduct { + type Out: FallibleAddAssign + FallibleSubAssign + Clone + Default; +} + +impl DotProduct for U +where + for<'a, 'b> &'a U: FallibleMul<&'b T, Output = O>, + for<'a, 'b> &'a T: FallibleMul<&'b U, Output = O>, + O: FallibleAddAssign + FallibleSubAssign + Clone + Default, +{ + type Out = O; +} + +impl SingleContract> for DenseTensor +where + for<'a, 'b> &'a U: FallibleMul<&'b T, Output = U::Out>, + for<'a, 'b> &'a T: FallibleMul<&'b U, Output = U::Out>, + U: DotProduct, + I: TensorStructure + Clone + StructureContract, +{ + type LCM = DenseTensor; + + fn single_contract(&self, other: &DenseTensor, i: usize, j: usize) -> Option { + let final_structure = self.structure.merge_at(&other.structure, (i, j)); + let mut result_data = vec![U::Out::default(); final_structure.size()]; + let mut result_index = 0; + + let mut self_iter = self.iter_fiber(i); + let mut other_iter = other.iter_fiber(j); + + let fiber_representation: Representation = self.reps()[i]; + + for fiber_a in self_iter.by_ref() { + for fiber_b in other_iter.by_ref() { + for k in 0..usize::from(fiber_representation) { + if fiber_representation.is_neg(k) { + result_data[result_index] + .sub_assign_fallible(fiber_a[k].mul_fallible(fiber_b[k]).unwrap()); + } else { + result_data[result_index] + .add_assign_fallible(fiber_a[k].mul_fallible(fiber_b[k]).unwrap()); + } + } + result_index += 1; + } + let _ = other_iter.reset(); + } + let result = DenseTensor { + data: result_data, + structure: final_structure, + }; + + Some(result) + } +} + +impl MultiContract> for DenseTensor +where + for<'a, 'b> &'a U: FallibleMul<&'b T, Output = U::Out>, + for<'a, 'b> &'a T: FallibleMul<&'b U, Output = U::Out>, + U: DotProduct, + I: TensorStructure + Clone + StructureContract, +{ + type LCM = DenseTensor; + fn multi_contract(&self, other: &DenseTensor) -> Option { + let (permutation, self_matches, other_matches) = + self.structure().match_indices(other.structure()).unwrap(); + + let mut final_structure = self.structure.clone(); + final_structure.merge(&other.structure); + + // Initialize result tensor with default values + let mut result_data = vec![U::Out::default(); final_structure.size()]; + let mut result_index = 0; + + let mut selfiter = self.iter_multi_fibers_metric(&self_matches, permutation); + + let mut other_iter = other.iter_multi_fibers(&other_matches); + while let Some(fiber_a) = selfiter.next() { + for fiber_b in other_iter.by_ref() { + for k in 0..fiber_a.len() { + if fiber_a[k].1 { + result_data[result_index].sub_assign_fallible( + fiber_b[selfiter.map[k]].mul_fallible(fiber_a[k].0).unwrap(), + ); + } else { + result_data[result_index].add_assign_fallible( + fiber_b[selfiter.map[k]].mul_fallible(fiber_a[k].0).unwrap(), + ); + } + } + result_index += 1; + } + let _ = other_iter.reset(); + } + let result: DenseTensor = DenseTensor { + data: result_data, + structure: final_structure, + }; + + Some(result) + } +} + +impl Contract for U +where + U: SingleContract + + MultiContract>::LCM> + + ExteriorProduct>::LCM> + + TensorStructure, + U::Structure: TensorStructure, + T: SingleContract>::LCM> + + MultiContract>::LCM> + + ExteriorProduct>::LCM> + + TensorStructure, +{ + type LCM = >::LCM; + fn contract(&self, other: &T) -> Option { + if let Some((single, i, j)) = self.structure().match_index(other.structure()) { + if i >= j { + if single { + // println!("single"); + return self.single_contract(other, i, j); + } + // println!("multi"); + return self.multi_contract(other); + } + // println!("flip"); + return other.contract(self); + } + // println!("exterior"); + let result = self.exterior_product(other); + Some(result) + } +} + +impl SingleContract> for SparseTensor +where + for<'a, 'b> &'a U: FallibleMul<&'b T, Output = U::Out>, + for<'a, 'b> &'a T: FallibleMul<&'b U, Output = U::Out>, + U: DotProduct, + I: TensorStructure + Clone + StructureContract, +{ + type LCM = DenseTensor; + + fn single_contract(&self, other: &DenseTensor, i: usize, j: usize) -> Option { + let final_structure = self.structure.merge_at(&other.structure, (i, j)); + let mut result_data = vec![U::Out::default(); final_structure.size()]; + let mut result_index = 0; + + let mut self_iter = self.iter_fiber(i); + let mut other_iter = other.iter_fiber(j); + + let fiber_representation: Representation = self.reps()[i]; + + let pos = self.order(); + let stride = *final_structure + .strides() + .get(pos.wrapping_sub(2)) + .unwrap_or(&1); + + for (skipped, nonzeros, fiber_a) in self_iter.by_ref() { + result_index += skipped * stride; + + for fiber_b in other_iter.by_ref() { + for (i, k) in nonzeros.iter().enumerate() { + if fiber_representation.is_neg(*k) { + result_data[result_index] + .sub_assign_fallible(fiber_a[i].mul_fallible(fiber_b[*k]).unwrap()); + } else { + result_data[result_index] + .add_assign_fallible(fiber_a[i].mul_fallible(fiber_b[*k]).unwrap()); + } + } + result_index += 1; + } + + result_index += other_iter.reset(); + } + + let result = DenseTensor { + data: result_data, + structure: final_structure, + }; + + Some(result) + } +} + +impl SingleContract> for DenseTensor +where + for<'a, 'b> &'a U: FallibleMul<&'b T, Output = U::Out>, + for<'a, 'b> &'a T: FallibleMul<&'b U, Output = U::Out>, + U: DotProduct, + I: TensorStructure + Clone + StructureContract, +{ + type LCM = DenseTensor; + + fn single_contract(&self, other: &SparseTensor, i: usize, j: usize) -> Option { + let final_structure = self.structure.merge_at(&other.structure, (i, j)); + let mut result_data = vec![U::Out::default(); final_structure.size()]; + let mut result_index = 0; + + let mut self_iter = self.iter_fiber(i); + let mut other_iter = other.iter_fiber(j); + + let fiber_representation: Representation = self.reps()[i]; + + for fiber_a in self_iter.by_ref() { + for (skipped, nonzeros, fiber_b) in other_iter.by_ref() { + result_index += skipped; + + for (i, k) in nonzeros.iter().enumerate() { + if fiber_representation.is_neg(*k) { + result_data[result_index] + .sub_assign_fallible(fiber_a[*k].mul_fallible(fiber_b[i]).unwrap()); + } else { + result_data[result_index] + .add_assign_fallible(fiber_a[*k].mul_fallible(fiber_b[i]).unwrap()); + } + } + result_index += 1; + } + result_index += other_iter.reset(); + } + + let result = DenseTensor { + data: result_data, + structure: final_structure, + }; + + Some(result) + } +} + +impl MultiContract> for SparseTensor +where + for<'a, 'b> &'a U: FallibleMul<&'b T, Output = U::Out>, + for<'a, 'b> &'a T: FallibleMul<&'b U, Output = U::Out>, + U: DotProduct, + I: TensorStructure + Clone + StructureContract, +{ + type LCM = DenseTensor; + fn multi_contract(&self, other: &DenseTensor) -> Option { + let (permutation, self_matches, other_matches) = + self.structure().match_indices(other.structure()).unwrap(); + + let mut final_structure = self.structure.clone(); + let mergepoint = final_structure.merge(&other.structure); + + // println!("mergepoint {:?}", mergepoint); + let stride = if let Some(pos) = mergepoint { + *final_structure + .strides() + .get(pos.saturating_sub(1)) + .unwrap_or(&1) + } else { + 1 + }; + + let mut result_data = vec![U::Out::default(); final_structure.size()]; + let mut result_index = 0; + + let mut selfiter = self.iter_multi_fibers_metric(&self_matches, permutation); + let mut other_iter = other.iter_multi_fibers(&other_matches); + + let mut max_skip = 0; + while let Some((skipped, nonzeros, fiber_a)) = selfiter.next() { + result_index += skipped * stride; + if skipped > max_skip { + max_skip = skipped; + } + for fiber_b in other_iter.by_ref() { + for (i, k) in nonzeros.iter().enumerate() { + if fiber_a[i].1 { + result_data[result_index].sub_assign_fallible( + fiber_a[i] + .0 + .mul_fallible(fiber_b[selfiter.map[*k]]) + .unwrap(), + ); + } else { + result_data[result_index].add_assign_fallible( + fiber_a[i] + .0 + .mul_fallible(fiber_b[selfiter.map[*k]]) + .unwrap(), + ); + } + } + result_index += 1; + } + let _ = other_iter.reset(); + } + if max_skip > 0 { + // println!("skippedDS {max_skip}"); + } + let result = DenseTensor { + data: result_data, + structure: final_structure, + }; + + Some(result) + } +} + +impl MultiContract> for DenseTensor +where + for<'a, 'b> &'a U: FallibleMul<&'b T, Output = U::Out>, + for<'a, 'b> &'a T: FallibleMul<&'b U, Output = U::Out>, + U: DotProduct, + I: TensorStructure + Clone + StructureContract, +{ + type LCM = DenseTensor; + + fn multi_contract(&self, other: &SparseTensor) -> Option { + let (permutation, self_matches, other_matches) = + self.structure().match_indices(other.structure()).unwrap(); + + let mut final_structure = self.structure.clone(); + final_structure.merge(&other.structure); + + let mut result_data = vec![U::Out::default(); final_structure.size()]; + let mut result_index = 0; + + let selfiter = self.iter_multi_fibers_metric(&self_matches, permutation.clone()); + let mut other_iter = other.iter_multi_fibers_metric( + &other_matches, + permutation.clone().inverse().normalize(true), + ); + + let mut max_skip = 0; + for fiber_a in selfiter { + while let Some((skipped, nonzeros, fiber_b)) = other_iter.next() { + result_index += skipped; + if skipped > max_skip { + max_skip = skipped; + } + + for (i, k) in nonzeros.iter().enumerate() { + if fiber_a[other_iter.map[*k]].1 { + result_data[result_index].sub_assign_fallible( + fiber_a[other_iter.map[*k]] + .0 + .mul_fallible(fiber_b[i].0) + .unwrap(), + ); + } else { + result_data[result_index].add_assign_fallible( + fiber_a[other_iter.map[*k]] + .0 + .mul_fallible(fiber_b[i].0) + .unwrap(), + ); + } + } + result_index += 1; + } + result_index += other_iter.reset(); + } + + if max_skip > 0 { + // println!("skippedSD {max_skip}"); + } + + let result = DenseTensor { + data: result_data, + structure: final_structure, + }; + + Some(result) + } +} + +impl SingleContract> for SparseTensor +where + for<'a, 'b> &'a U: FallibleMul<&'b T, Output = U::Out>, + for<'a, 'b> &'a T: FallibleMul<&'b U, Output = U::Out>, + U: DotProduct, + I: TensorStructure + Clone + StructureContract, + U::Out: PartialEq, +{ + type LCM = SparseTensor; + + fn single_contract(&self, other: &SparseTensor, i: usize, j: usize) -> Option { + let final_structure = self.structure.merge_at(&other.structure, (i, j)); + let mut result_data = AHashMap::default(); + let mut result_index = 0; + let pos = self.order(); + + let mut self_iter = self.iter_fiber(i); + let mut other_iter = other.iter_fiber(j); + + let metric = self.external_structure()[i].representation.negative(); + + let stride = *final_structure + .strides() + .get(pos.wrapping_sub(2)) + .unwrap_or(&1); + + for (skipped_a, nonzeros_a, fiber_a) in self_iter.by_ref() { + result_index += skipped_a * stride; + for (skipped_b, nonzeros_b, fiber_b) in other_iter.by_ref() { + result_index += skipped_b; + let mut value = U::Out::default(); + let mut nonzero = false; + + for (i, j, x) in nonzeros_a + .iter() + .enumerate() + .filter_map(|(i, &x)| nonzeros_b.binary_search(&x).ok().map(|j| (i, j, x))) + { + if metric[x] { + value.sub_assign_fallible(fiber_a[i].mul_fallible(fiber_b[j]).unwrap()); + } else { + value.add_assign_fallible(fiber_a[i].mul_fallible(fiber_b[j]).unwrap()); + } + nonzero = true; + } + + if nonzero && value != U::Out::default() { + result_data.insert(result_index, value); + } + result_index += 1; + } + result_index += other_iter.reset(); + } + + let result = SparseTensor { + elements: result_data, + structure: final_structure, + }; + + Some(result) + } +} + +impl MultiContract> for SparseTensor +where + for<'a, 'b> &'a U: FallibleMul<&'b T, Output = U::Out>, + for<'a, 'b> &'a T: FallibleMul<&'b U, Output = U::Out>, + U: DotProduct, + I: TensorStructure + Clone + StructureContract, + U::Out: PartialEq, +{ + type LCM = SparseTensor; + + fn multi_contract(&self, other: &SparseTensor) -> Option { + let (permutation, self_matches, other_matches) = + self.structure().match_indices(other.structure()).unwrap(); + + let mut final_structure = self.structure.clone(); + let mergepoint = final_structure.merge(&other.structure); + let mut result_data = AHashMap::default(); + + // println!("mergepoint {:?}", mergepoint); + let stride = if let Some(pos) = mergepoint { + *final_structure + .strides() + .get(pos.saturating_sub(1)) + .unwrap_or(&1) + } else { + 1 + }; + + let mut result_index = 0; + + let mut self_iter = self.iter_multi_fibers_metric(&self_matches, permutation); + let mut other_iter = other.iter_multi_fibers(&other_matches); + while let Some((skipped_a, nonzeros_a, fiber_a)) = self_iter.next() { + result_index += skipped_a * stride; + + for (skipped_b, nonzeros_b, fiber_b) in other_iter.by_ref() { + result_index += skipped_b; + let mut value = U::Out::default(); + let mut nonzero = false; + + for (i, j, _x) in nonzeros_a.iter().enumerate().filter_map(|(i, &x)| { + nonzeros_b + .binary_search(&self_iter.map[x]) + .ok() + .map(|j| (i, j, x)) + }) { + if fiber_a[i].1 { + value.sub_assign_fallible(fiber_a[i].0.mul_fallible(fiber_b[j]).unwrap()); + } else { + value.add_assign_fallible(fiber_a[i].0.mul_fallible(fiber_b[j]).unwrap()); + } + nonzero = true; + } + + if nonzero && value != U::Out::default() { + result_data.insert(result_index, value); + } + result_index += 1; + } + result_index += other_iter.reset(); + } + + let result = SparseTensor { + elements: result_data, + structure: final_structure, + }; + + Some(result) + } +} + +impl Contract> for DataTensor +where + for<'a, 'b> &'a U: + FallibleMul<&'b T, Output = U::Out> + TrySmallestUpgrade<&'b T, LCM = U::Out>, + for<'a, 'b> &'a T: + FallibleMul<&'b U, Output = U::Out> + TrySmallestUpgrade<&'b U, LCM = U::Out>, + U: DotProduct, + I: TensorStructure + Clone + StructureContract, + U::Out: PartialEq, + I: Clone + TensorStructure + StructureContract, + T: Clone + Debug, + U: Clone + Debug, +{ + type LCM = DataTensor; + fn contract(&self, other: &DataTensor) -> Option> { + match (self, other) { + (DataTensor::Dense(s), DataTensor::Dense(o)) => Some(DataTensor::Dense(s.contract(o)?)), + (DataTensor::Dense(s), DataTensor::Sparse(o)) => { + Some(DataTensor::Dense(s.contract(o)?)) + } + (DataTensor::Sparse(s), DataTensor::Dense(o)) => { + Some(DataTensor::Dense(s.contract(o)?)) + } + (DataTensor::Sparse(s), DataTensor::Sparse(o)) => { + Some(DataTensor::Sparse(s.contract(o)?)) + } + } + } +} + +impl Contract> for NumTensor +where + I: Clone + TensorStructure + StructureContract, +{ + type LCM = NumTensor; + fn contract(&self, other: &NumTensor) -> Option { + match (self, other) { + (NumTensor::Float(a), NumTensor::Float(b)) => Some(NumTensor::Float(a.contract(b)?)), + (NumTensor::Float(a), NumTensor::Complex(b)) => { + Some(NumTensor::Complex(a.contract(b)?)) + } + (NumTensor::Complex(a), NumTensor::Float(b)) => { + Some(NumTensor::Complex(a.contract(b)?)) + } + (NumTensor::Complex(a), NumTensor::Complex(b)) => { + Some(NumTensor::Complex(a.contract(b)?)) + } + } + } +} diff --git a/src/tensor/data.rs b/src/tensor/data.rs new file mode 100644 index 00000000..12c2739d --- /dev/null +++ b/src/tensor/data.rs @@ -0,0 +1,843 @@ +use super::{ + atomic_expanded_label_id, ConcreteIndex, DenseTensorLinearIterator, HasName, Slot, + SparseTensorLinearIterator, TensorStructure, TracksCount, TrySmallestUpgrade, VecStructure, +}; +use ahash::AHashMap; +use derive_more::From; +use enum_try_as_inner::EnumTryAsInner; +use indexmap::IndexMap; +use num::Zero; +use serde::{Deserialize, Serialize}; +use smartstring::alias::String; +use std::{borrow::Cow, collections::HashMap}; +use symbolica::domains::float::Complex; +use symbolica::{representations::Atom, representations::Symbol}; + +pub trait DataIterator { + type FlatIter<'a>: Iterator + where + Self: 'a, + T: 'a; + + fn flat_iter(&self) -> Self::FlatIter<'_>; +} + +impl DataIterator for SparseTensor { + type FlatIter<'a> = SparseTensorLinearIterator<'a, T> where I:'a,T: 'a; + + fn flat_iter(&self) -> Self::FlatIter<'_> { + SparseTensorLinearIterator::new(self) + } +} + +impl DataIterator for DenseTensor { + type FlatIter<'a> = DenseTensorLinearIterator<'a,T,I> where I:'a,T: 'a; + + fn flat_iter(&self) -> Self::FlatIter<'_> { + DenseTensorLinearIterator::new(self) + } +} + +trait Settable { + type SetData; + fn set(&mut self, index: usize, data: Self::SetData); +} + +impl Settable for Vec { + type SetData = T; + fn set(&mut self, index: usize, data: T) { + self[index] = data; + } +} + +impl Settable for AHashMap { + type SetData = T; + fn set(&mut self, index: usize, data: T) { + self.insert(index, data); + } +} + +/// Trait for getting the data of a tensor +pub trait HasTensorData: TensorStructure { + type Data: Clone; + // type Storage: Settable; + /// Returns all the data in the tensor, withouth any structure + fn data(&self) -> Vec; + + /// Returns all the indices of the tensor, the order of the indices is the same as the order of the data + fn indices(&self) -> Vec>; + + /// Returns a hashmap of the data, with the (expanded) indices as keys + fn hashmap(&self) -> IndexMap, Self::Data>; + + /// Returns a hashmap of the data, with the the shadowed indices as keys + fn symhashmap(&self, id: Symbol) -> HashMap; +} + +/// Trait for setting the data of a tensor +pub trait SetTensorData { + type SetData; + /// Set the data at the given indices, returns an error if the indices are out of bounds + /// + /// # Errors + /// + /// Forwards the error from [`TensorStructure::verify_indices`] + /// + fn set(&mut self, indices: &[ConcreteIndex], value: Self::SetData) -> Result<(), String>; + + fn set_flat(&mut self, index: usize, value: Self::SetData) -> Result<(), String>; +} + +/// Trait for getting the data of a tensor +pub trait GetTensorData { + type GetData; + + fn get(&self, indices: &[ConcreteIndex]) -> Result<&Self::GetData, String>; + + fn get_linear(&self, index: usize) -> Option<&Self::GetData>; +} + +/// Sparse data tensor, generic on storage type `T`, and structure type `I`. +/// +/// Stores data in a hashmap of usize, using ahash's hashmap. +/// The usize key is the flattened index of the corresponding position in the dense tensor +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct SparseTensor { + pub elements: AHashMap, + pub structure: I, +} + +impl HasTensorData for SparseTensor +where + T: Clone, + I: TensorStructure, +{ + type Data = T; + // type Storage = AHashMap; + + fn data(&self) -> Vec { + self.elements.values().cloned().collect() + } + + fn indices(&self) -> Vec> { + self.elements + .keys() + .map(|k| self.expanded_index(*k).unwrap()) + .collect() + } + + fn hashmap(&self) -> IndexMap, T> { + let mut hashmap = IndexMap::new(); + for (k, v) in self.iter() { + hashmap.insert(k.clone(), v.clone()); + } + hashmap + } + + fn symhashmap(&self, id: Symbol) -> HashMap { + let mut hashmap = HashMap::new(); + + for (k, v) in &self.elements { + hashmap.insert( + atomic_expanded_label_id(&self.expanded_index(*k).unwrap(), id), + v.clone(), + ); + } + hashmap + } +} + +impl SetTensorData for SparseTensor +where + I: TensorStructure, +{ + type SetData = T; + /// falible set method, returns an error if the indices are out of bounds. + /// Does not check if the inserted value is zero. + fn set(&mut self, indices: &[ConcreteIndex], value: T) -> Result<(), String> { + self.verify_indices(indices)?; + self.elements + .insert(self.flat_index(indices).unwrap(), value); + Ok(()) + } + + /// falible set given a flat index, returns an error if the indices are out of bounds. + fn set_flat(&mut self, index: usize, value: T) -> Result<(), String> { + if index >= self.size() { + return Err("Index out of bounds".into()); + } + self.elements.insert(index, value); + Ok(()) + } +} +impl GetTensorData for SparseTensor +where + I: TensorStructure, +{ + type GetData = T; + fn get(&self, indices: &[ConcreteIndex]) -> Result<&T, String> { + self.verify_indices(indices)?; + self.elements + .get(&self.flat_index(indices).unwrap()) + .ok_or("No elements at that spot".into()) + } + + fn get_linear(&self, index: usize) -> Option<&T> { + self.elements.get(&index) + } +} + +impl TensorStructure for SparseTensor +where + I: TensorStructure, +{ + type Structure = I; + fn structure(&self) -> &Self::Structure { + &self.structure + } + + fn mut_structure(&mut self) -> &mut Self::Structure { + &mut self.structure + } + fn external_structure(&self) -> &[Slot] { + self.structure.external_structure() + } +} + +impl HasName for SparseTensor +where + I: HasName, +{ + type Name = I::Name; + + fn name(&self) -> Option::Name>> { + self.structure.name() + } + fn set_name(&mut self, name: &Self::Name) { + self.structure.set_name(name); + } +} + +impl TracksCount for SparseTensor +where + I: TracksCount, +{ + fn contractions_num(&self) -> usize { + self.structure.contractions_num() + } +} + +impl TrySmallestUpgrade> for SparseTensor +where + U: TrySmallestUpgrade, + U::LCM: Clone, + I: TensorStructure + Clone, +{ + type LCM = SparseTensor; + fn try_upgrade(&self) -> Option> + where + Self::LCM: Clone, + { + let structure = self.structure.clone(); + let elements: Option> = self + .elements + .iter() + .map(|(k, v)| match v.try_upgrade() { + Some(Cow::Owned(u)) => Some((*k, u)), + Some(Cow::Borrowed(u)) => Some((*k, u.clone())), + None => None, + }) + .collect(); + Some(Cow::Owned(SparseTensor { + elements: elements?, + structure, + })) + } +} + +impl SparseTensor +where + I: TensorStructure, +{ + /// Create a new empty sparse tensor with the given structure + pub fn empty(structure: I) -> Self { + SparseTensor { + elements: AHashMap::default(), + structure, + } + } + + /// Checks if there is a value at the given indices + pub fn is_empty_at(&self, indices: &[ConcreteIndex]) -> bool { + !self + .elements + .contains_key(&self.flat_index(indices).unwrap()) + } + + /// Calulates how dense the tensor is, i.e. the ratio of non-zero elements to total elements + pub fn density(&self) -> f64 { + f64::from(self.elements.len() as u32) / f64::from(self.size() as u32) + } + + /// Converts the sparse tensor to a dense tensor, with the same structure + pub fn to_dense(&self) -> DenseTensor + where + T: Clone + Default, + I: Clone, + { + let mut dense = DenseTensor::default(self.structure.clone()); + for (indices, value) in self.elements.iter() { + let _ = dense.set_flat(*indices, value.clone()); + } + dense + } + + /// fallible smart set method, returns an error if the indices are out of bounds. + /// If the value is zero, it removes the element at the given indices. + pub fn smart_set(&mut self, indices: &[ConcreteIndex], value: T) -> Result<(), String> + where + T: Default + PartialEq, + { + self.verify_indices(indices)?; + if value == T::default() { + _ = self.elements.remove(&self.flat_index(indices).unwrap()); + return Ok(()); + } + self.elements + .insert(self.flat_index(indices).unwrap(), value); + Ok(()) + } + + /// Generates a new sparse tensor from the given data and structure + pub fn from_data(data: &[(Vec, T)], structure: I) -> Result + where + T: Clone, + { + let mut dimensions = vec![0; structure.order()]; + for (index, _) in data { + if index.len() != structure.order() { + return Err("Mismatched order".into()); + } + for (i, &idx) in index.iter().enumerate() { + if idx >= dimensions[i] { + dimensions[i] = idx + 1; + } + } + } + let mut elements = AHashMap::default(); + for (index, value) in data { + elements.insert(structure.flat_index(index).unwrap(), value.clone()); + } + + Ok(SparseTensor { + elements, + structure, + }) + } + + /// fallible smart get method, returns an error if the indices are out of bounds. + /// If the index is in the bTree return the value, else return zero. + pub fn smart_get(&self, indices: &[ConcreteIndex]) -> Result, String> + where + T: Default + Clone, + { + self.verify_indices(indices)?; + // if the index is in the bTree return the value, else return default, lazily allocating the default + Ok( + match self.elements.get(&self.flat_index(indices).unwrap()) { + Some(value) => Cow::Borrowed(value), + None => Cow::Owned(T::default()), + }, + ) + } +} + +#[derive(Debug, Clone, PartialEq, Deserialize, Serialize)] +pub struct DenseTensor { + pub data: Vec, + pub structure: I, +} + +impl TensorStructure for DenseTensor +where + I: TensorStructure, +{ + type Structure = I; + fn structure(&self) -> &Self::Structure { + &self.structure + } + fn mut_structure(&mut self) -> &mut Self::Structure { + &mut self.structure + } + fn external_structure(&self) -> &[Slot] { + self.structure.external_structure() + } +} + +impl HasName for DenseTensor +where + I: HasName, +{ + type Name = I::Name; + + fn name(&self) -> Option::Name>> { + self.structure.name() + } + fn set_name(&mut self, name: &Self::Name) { + self.structure.set_name(name); + } +} + +impl TracksCount for DenseTensor +where + I: TracksCount, +{ + fn contractions_num(&self) -> usize { + self.structure.contractions_num() + } +} + +impl DenseTensor +where + I: TensorStructure, +{ + pub fn default(structure: I) -> Self { + let length = if structure.is_scalar() { + 1 + } else { + structure.size() + }; + DenseTensor { + data: vec![T::default(); length], + structure, + } + } +} + +impl DenseTensor +where + I: TensorStructure, +{ + pub fn zero(structure: I) -> Self { + let length = if structure.is_scalar() { + 1 + } else { + structure.size() + }; + DenseTensor { + data: vec![T::zero(); length], + structure, + } + } +} + +impl TrySmallestUpgrade> for DenseTensor +where + U: TrySmallestUpgrade, + U::LCM: Clone, + I: TensorStructure + Clone, +{ + type LCM = DenseTensor; + fn try_upgrade(&self) -> Option> + where + Self::LCM: Clone, + { + let structure = self.structure.clone(); + let data: Option> = self + .data + .iter() + .map(|v| match v.try_upgrade() { + Some(Cow::Owned(u)) => Some(u), + Some(Cow::Borrowed(u)) => Some(u.clone()), + None => None, + }) + .collect(); + Some(Cow::Owned(DenseTensor { + data: data?, + structure, + })) + } +} + +impl DenseTensor +where + I: TensorStructure, +{ + /// Generates a new dense tensor from the given data and structure + pub fn from_data(data: &[T], structure: I) -> Result { + if data.len() != structure.size() && !(data.len() == 1 && structure.is_scalar()) { + return Err("Data length does not match shape".into()); + } + Ok(DenseTensor { + data: data.to_vec(), + structure, + }) + } + + /// Generates a new dense tensor from the given data and structure, truncating the data if it is too long with respect to the structure + pub fn from_data_coerced(data: &[T], structure: I) -> Result { + if data.len() < structure.size() { + return Err("Data length is too small".into()); + } + let mut data = data.to_vec(); + if structure.is_scalar() { + data.truncate(1); + } else { + data.truncate(structure.size()); + } + Ok(DenseTensor { data, structure }) + } +} + +impl DenseTensor +where + I: TensorStructure + Clone, +{ + /// converts the dense tensor to a sparse tensor, with the same structure + pub fn to_sparse(&self) -> SparseTensor + where + T: Clone + Default + PartialEq, + { + let mut sparse = SparseTensor::empty(self.structure.clone()); + for (i, value) in self.iter() { + if *value != T::default() { + let _ = sparse.set(&i, value.clone()); + } + } + sparse + } +} +// why no specialization? :( +// impl From> for DenseTensor +// where +// U: Into, +// { +// fn from(other: DenseTensor) -> Self { +// let data = other.data.into_iter().map(|x| x.into()).collect(); +// DenseTensor { +// data, +// structure: other.structure, +// } +// } +// } + +impl DenseTensor +where + I: Clone, +{ + pub fn convert_to(&self) -> DenseTensor + where + U: for<'a> From<&'a T>, + { + let data = self.data.iter().map(|x| x.into()).collect(); + DenseTensor { + data, + structure: self.structure.clone(), + } + } +} + +impl SparseTensor +where + I: Clone, +{ + pub fn convert_to(&self) -> SparseTensor + where + U: for<'a> From<&'a T>, + { + let elements = self.elements.iter().map(|(k, v)| (*k, v.into())).collect(); + SparseTensor { + elements, + structure: self.structure.clone(), + } + } +} + +impl HasTensorData for DenseTensor +where + T: Clone, + I: TensorStructure, +{ + type Data = T; + fn data(&self) -> Vec { + self.data.clone() + } + + fn indices(&self) -> Vec> { + let mut indices = Vec::new(); + for i in 0..self.size() { + indices.push(self.expanded_index(i).unwrap()); + } + indices + } + + fn hashmap(&self) -> IndexMap, T> { + let mut hashmap = IndexMap::new(); + for (k, v) in self.iter() { + hashmap.insert(k.clone(), v.clone()); + } + hashmap + } + + fn symhashmap(&self, id: Symbol) -> HashMap { + let mut hashmap = HashMap::new(); + + for (k, v) in self.iter() { + hashmap.insert(atomic_expanded_label_id(&k, id), v.clone()); + } + hashmap + } +} + +impl SetTensorData for DenseTensor +where + I: TensorStructure, +{ + type SetData = T; + fn set(&mut self, indices: &[ConcreteIndex], value: T) -> Result<(), String> { + self.verify_indices(indices)?; + let idx = self.flat_index(indices); + if let Ok(i) = idx { + self.data[i] = value; + } + Ok(()) + } + + fn set_flat(&mut self, index: usize, value: T) -> Result<(), String> { + if index < self.size() { + self.data[index] = value; + } else { + return Err("Index out of bounds".into()); + } + Ok(()) + } +} + +impl GetTensorData for DenseTensor +where + I: TensorStructure, +{ + type GetData = T; + fn get_linear(&self, index: usize) -> Option<&T> { + self.data.get(index) + } + + fn get(&self, indices: &[ConcreteIndex]) -> Result<&T, String> { + if let Ok(idx) = self.flat_index(indices) { + Ok(&self.data[idx]) + } else if self.structure.is_scalar() && indices.is_empty() { + Ok(&self.data[0]) + } else { + Err("Index out of bounds".into()) + } + } +} + +/// Enum for storing either a dense or a sparse tensor, with the same structure +#[derive(Debug, Clone, EnumTryAsInner, Serialize, Deserialize, From)] +#[derive_err(Debug)] +pub enum DataTensor { + Dense(DenseTensor), + Sparse(SparseTensor), +} + +impl HasTensorData for DataTensor +where + I: TensorStructure, + T: Clone, +{ + type Data = T; + fn data(&self) -> Vec { + match self { + DataTensor::Dense(d) => d.data(), + DataTensor::Sparse(s) => s.data(), + } + } + + fn indices(&self) -> Vec> { + match self { + DataTensor::Dense(d) => d.indices(), + DataTensor::Sparse(s) => s.indices(), + } + } + + fn hashmap(&self) -> IndexMap, T> { + match self { + DataTensor::Dense(d) => d.hashmap(), + DataTensor::Sparse(s) => s.hashmap(), + } + } + + fn symhashmap(&self, id: Symbol) -> HashMap { + match self { + DataTensor::Dense(d) => d.symhashmap(id), + DataTensor::Sparse(s) => s.symhashmap(id), + } + } +} + +impl TensorStructure for DataTensor +where + I: TensorStructure, + T: Clone, +{ + type Structure = I; + fn structure(&self) -> &Self::Structure { + match self { + DataTensor::Dense(d) => d.structure(), + DataTensor::Sparse(s) => s.structure(), + } + } + fn mut_structure(&mut self) -> &mut Self::Structure { + match self { + DataTensor::Dense(d) => d.mut_structure(), + DataTensor::Sparse(s) => s.mut_structure(), + } + } + fn external_structure(&self) -> &[Slot] { + match self { + DataTensor::Dense(d) => d.external_structure(), + + DataTensor::Sparse(s) => s.external_structure(), + } + } +} + +impl HasName for DataTensor +where + I: HasName, + T: Clone, + I: TensorStructure, +{ + type Name = I::Name; + + fn name(&self) -> Option::Name>> { + match self { + DataTensor::Dense(d) => d.name(), + DataTensor::Sparse(s) => s.name(), + } + } + fn set_name(&mut self, name: &Self::Name) { + match self { + DataTensor::Dense(d) => d.set_name(name), + DataTensor::Sparse(s) => s.set_name(name), + } + } +} + +impl TracksCount for DataTensor +where + I: TracksCount, + T: Clone, + I: TensorStructure, +{ + fn contractions_num(&self) -> usize { + match self { + DataTensor::Dense(d) => d.contractions_num(), + DataTensor::Sparse(s) => s.contractions_num(), + } + } +} + +/// Enum for a datatensor with specific numeric data type, generic on the structure type `I` +#[derive(Debug, Clone, EnumTryAsInner, Serialize, Deserialize)] +#[derive_err(Debug)] +pub enum NumTensor { + Float(DataTensor), + Complex(DataTensor, T>), +} + +impl TensorStructure for NumTensor +where + T: TensorStructure, +{ + type Structure = T; + fn structure(&self) -> &Self::Structure { + match self { + NumTensor::Float(f) => f.structure(), + NumTensor::Complex(c) => c.structure(), + } + } + + fn mut_structure(&mut self) -> &mut Self::Structure { + match self { + NumTensor::Float(f) => f.mut_structure(), + NumTensor::Complex(c) => c.mut_structure(), + } + } + + fn external_structure(&self) -> &[Slot] { + match self { + NumTensor::Float(f) => f.external_structure(), + NumTensor::Complex(c) => c.external_structure(), + } + } +} + +impl HasName for NumTensor +where + T: HasName + TensorStructure, +{ + type Name = T::Name; + + fn name(&self) -> Option::Name>> { + match self { + NumTensor::Float(f) => f.name(), + NumTensor::Complex(c) => c.name(), + } + } + fn set_name(&mut self, name: &Self::Name) { + match self { + NumTensor::Float(f) => f.set_name(name), + NumTensor::Complex(c) => c.set_name(name), + } + } +} + +impl TracksCount for NumTensor +where + T: TracksCount + TensorStructure, +{ + fn contractions_num(&self) -> usize { + match self { + NumTensor::Float(f) => f.contractions_num(), + NumTensor::Complex(c) => c.contractions_num(), + } + } +} + +impl From> for NumTensor +where + T: TensorStructure, +{ + fn from(other: DenseTensor) -> Self { + NumTensor::Float(DataTensor::Dense(other)) + } +} + +impl From> for NumTensor +where + T: TensorStructure, +{ + fn from(other: SparseTensor) -> Self { + NumTensor::Float(DataTensor::Sparse(other)) + } +} + +impl From, T>> for NumTensor +where + T: TensorStructure, +{ + fn from(other: DenseTensor, T>) -> Self { + NumTensor::Complex(DataTensor::Dense(other)) + } +} + +impl From, T>> for NumTensor +where + T: TensorStructure, +{ + fn from(other: SparseTensor, T>) -> Self { + NumTensor::Complex(DataTensor::Sparse(other)) + } +} diff --git a/src/tensor/iterators.rs b/src/tensor/iterators.rs new file mode 100644 index 00000000..40737360 --- /dev/null +++ b/src/tensor/iterators.rs @@ -0,0 +1,1440 @@ +//! Iterators for tensors +//! +//! Iterators for tensors are used to iterate over the elements of a tensor. +//! More specialized iterators are provided that fix a certain subset of indices, and iterate over the remaining indices. +//! At each iteration, the iterator returns a vector of references to the elements of the tensor along the fixed indices (so called fibers). +//! +//! The iterators are built using the basic index iterators provided by the `TensorStructureIterator`s. +//! + +use std::ops::{AddAssign, Neg, SubAssign}; + +use super::{ + ConcreteIndex, DenseTensor, Dimension, GetTensorData, Representation, Slot, SparseTensor, + TensorStructure, +}; +use ahash::AHashMap; + +use permutation::Permutation; + +/// An iterator over all indices of a tensor structure +/// +/// `Item` is a flat index + +pub struct TensorStructureIndexIterator<'a> { + structure: &'a [Slot], + current_flat_index: usize, +} + +impl<'a> Iterator for TensorStructureIndexIterator<'a> { + type Item = Vec; + fn next(&mut self) -> Option { + if let Ok(indices) = self.structure.expanded_index(self.current_flat_index) { + self.current_flat_index += 1; + + Some(indices) + } else { + None + } + } +} + +impl<'a> TensorStructureIndexIterator<'a> { + #[must_use] + pub fn new(structure: &'a [Slot]) -> Self { + TensorStructureIndexIterator { + structure, + current_flat_index: 0, + } + } +} + +/// An iterator over all indices of a tensor structure, keeping an index fixed + +pub struct TensorStructureFiberIterator { + pub varying_fiber_index: usize, + pub increment: usize, + pub stride_shift: Option<(usize, usize)>, + pub max: usize, +} + +/// Iterates over all indices not in the fiber, and at each index, iterates over the fiber +impl TensorStructureFiberIterator { + /// Create a new fiber iterator + /// + /// # Arguments + /// + /// * `structure` - The tensor structure + /// * `fiber_position` - The position of the fiber in the structure + /// + /// # Panics + /// + /// If the fiber index is out of bounds + pub fn new(structure: &S, fiber_position: usize) -> Self + where + S: TensorStructure, + { + assert!(fiber_position < structure.order(), "Invalid fiber index"); + + let strides = structure.strides(); + let fiber_stride = strides[fiber_position]; + let dim: usize = structure.shape()[fiber_position].into(); + + let max = structure.size() - fiber_stride * (dim - 1) - 1; + + let mut stride = None; + let mut shift = None; + let mut increment = 1; + + if fiber_position == structure.order() - 1 { + increment = *strides.get(structure.order().wrapping_sub(2)).unwrap_or(&1); + } else if fiber_position != 0 { + shift = Some(strides[fiber_position - 1]); + stride = Some(strides[fiber_position]); + } + + TensorStructureFiberIterator { + varying_fiber_index: 0, + increment, + stride_shift: stride.zip(shift), + max, + } + } + + /// Reset the iterator, so that it can be used again. + /// + /// This is cheaper than creating a new iterator. + pub fn reset(&mut self) { + self.varying_fiber_index = 0; + } +} + +impl Iterator for TensorStructureFiberIterator { + type Item = usize; + + /// Icrement the iterator + /// + /// Inrements the varying fiber index by the increment, and checks if the index is divisible by the stride, in which case it adds the shift + fn next(&mut self) -> Option { + if self.varying_fiber_index > self.max { + return None; + } + let ret = self.varying_fiber_index; + + self.varying_fiber_index += self.increment; + + if let Some((stride, shift)) = self.stride_shift { + if self.varying_fiber_index % stride == 0 { + self.varying_fiber_index += shift - stride; + } + } + + Some(ret) + } +} + +/// Iterator over all indices of a tensor structure, fixing a subset of indices +/// +/// This version directly iterates over the expanded indices. +/// +pub struct TensorStructureMultiFiberIteratorExpanded { + indices: Vec, + dims: Vec, + positions: Vec, + length: usize, + carry: bool, +} + +impl TensorStructureMultiFiberIteratorExpanded { + pub fn new(structure: &N, fiber_positions: &[bool]) -> Self + where + N: TensorStructure, + { + let positions = fiber_positions + .iter() + .enumerate() + .filter_map(|(i, p)| if *p { None } else { Some(i) }) + .collect(); + let mut dims = structure.shape(); + let mut filter = fiber_positions.iter(); + dims.retain(|_| !*filter.next().unwrap_or_else(|| unreachable!())); + + Self { + indices: vec![0; dims.len()], + dims, + positions, + length: fiber_positions.len(), + carry: false, + } + } +} + +impl Iterator for TensorStructureMultiFiberIteratorExpanded { + type Item = Vec; + + fn next(&mut self) -> Option { + if self.carry { + return None; + } + self.carry = true; + + let mut out = vec![0; self.length]; + + for (i, p) in self.positions.iter().zip(self.indices.iter()) { + out[*i] = *p; + } + + for (i, r) in self.indices.iter_mut().zip(self.dims.iter()).rev() { + if self.carry { + *i += 1; + self.carry = *i == usize::from(*r); + *i %= usize::from(*r); + } + } + + Some(out) + } +} + +/// Iterator over all indices of a tensor structure, fixing a subset of indices +/// +/// `Item` is a flat index, corresponding to the index where the fixed indices are set to 0. +#[derive(Debug)] +pub struct TensorStructureMultiFiberIterator { + pub varying_fiber_index: usize, + pub increment: usize, + pub fixed_strides: Vec, + pub shifts: Vec, + pub max: usize, +} + +impl TensorStructureMultiFiberIterator { + /// Create a new multi fiber iterator + /// + /// # Arguments + /// + /// * `structure` - The tensor structure + /// * `fiber_positions` - A boolean array indicating which indices are fixed + /// + /// # Algorithm + /// + /// Smartly constructs the shifts and strides for each fixed index. + /// It skippes over adjacent fixed indices for the strides. + /// + pub fn new(structure: &N, fiber_positions: &[bool]) -> TensorStructureMultiFiberIterator + where + N: TensorStructure, + { + let strides = structure.strides(); + let dims = structure.shape(); + let order = structure.order(); + let mut max = 0; + + // max -= 1; + + let mut increment = 1; + + let mut fixed_strides = vec![]; + let mut shifts = vec![]; + + let mut before = true; + let mut has_seen_stride = false; + let mut first = true; + + for pos in (0..order).rev() { + let is_fixed = fiber_positions[pos]; + + if is_fixed && !before && !first { + has_seen_stride = true; + fixed_strides.push(strides[pos]); + } + if !is_fixed && before && has_seen_stride { + shifts.push(strides[pos]); + } + + if !is_fixed { + max += (usize::from(dims[pos]) - 1) * strides[pos]; + if first { + increment = strides[pos]; + first = false; + } + } + + before = is_fixed; + } + + if fixed_strides.len() > shifts.len() { + fixed_strides.pop(); + } + + TensorStructureMultiFiberIterator { + varying_fiber_index: 0, + increment, + fixed_strides, + shifts, + max, + } + } + + /// Construct a pair of multi fiber iterators, each one taking as fixed what the other takes as free. + /// + /// # Arguments + /// + /// * `structure` - The tensor structure + /// * `fiber_positions` - A boolean array indicating which indices are fixed for the second iterator + /// + /// # Returns + /// + /// A pair of multi fiber iterators. + /// The first iterator considers the true values of `fiber_positions` as free, and the second iterator considers them as fixed. + pub fn new_conjugate(structure: &N, fiber_positions: &[bool]) -> (Self, Self) + where + N: TensorStructure, + { + let strides = structure.strides(); + let dims = structure.shape(); + let order = structure.order(); + let mut max = 0; + + // max -= 1; + + let mut increment = 1; + + // println!("{:?}{}", fiber_positions, increment); + let mut fixed_strides = vec![]; + let mut fixed_strides_conj = vec![]; + let mut shifts = vec![]; + let mut shifts_conj = vec![]; + + let mut before = true; + let mut has_seen_stride = false; + let mut has_seen_stride_conj = false; + let mut first = true; + let mut first_conj = true; + let mut increment_conj = 1; + + let mut max_conj = 0; + + for pos in (0..order).rev() { + let is_fixed = fiber_positions[pos]; + + if is_fixed && !before { + if !first { + has_seen_stride = true; + fixed_strides.push(strides[pos]); + } + + if has_seen_stride_conj { + shifts_conj.push(strides[pos]); + } + } + if !is_fixed && before { + if has_seen_stride { + shifts.push(strides[pos]); + } + + if !first_conj { + fixed_strides_conj.push(strides[pos]); + has_seen_stride_conj = true; + } + } + + if is_fixed { + max_conj += (usize::from(dims[pos]) - 1) * strides[pos]; + if first_conj { + increment_conj = strides[pos]; + first_conj = false; + } + } else { + max += (usize::from(dims[pos]) - 1) * strides[pos]; + if first { + increment = strides[pos]; + first = false; + } + } + + before = is_fixed; + } + + if fixed_strides.len() > shifts.len() { + fixed_strides.pop(); + } + + if fixed_strides_conj.len() > shifts_conj.len() { + fixed_strides_conj.pop(); + } + + ( + TensorStructureMultiFiberIterator { + varying_fiber_index: 0, + increment: increment_conj, + fixed_strides: fixed_strides_conj, + shifts: shifts_conj, + max: max_conj, + }, + TensorStructureMultiFiberIterator { + varying_fiber_index: 0, + increment, + fixed_strides, + shifts, + max, + }, + ) + } + + /// Reset the iterator, so that it can be used again. + /// + /// This is cheaper than creating a new iterator. + pub fn reset(&mut self) { + self.varying_fiber_index = 0; + } +} + +impl Iterator for TensorStructureMultiFiberIterator { + type Item = usize; + + fn next(&mut self) -> Option { + if self.varying_fiber_index > self.max { + return None; + } + let ret = self.varying_fiber_index; + + self.varying_fiber_index += self.increment; + + for (i, s) in self.fixed_strides.iter().enumerate() { + if self.varying_fiber_index % s == 0 { + self.varying_fiber_index += self.shifts[i] - s; + } else { + break; + } + } + + Some(ret) + } +} + +/// Iterator over all indices of a tensor structure, fixing a subset of indices. +/// +/// This version is mostly used as the iterator over the fixed indices (considering them free), when paired with `TensorStructureMultiFiberIterator` that considers the fixed indices as truely fixed. +/// +/// `Item` is a flat index, a count, and a boolean indicating if the metric is negative. +/// The flat index corresponds to the index where the fixed indices are set to 0. +/// +/// +/// It generates the indices to then compute the tensor product of the metric along the fiber. +/// +#[derive(Debug)] +pub struct TensorStructureMultiFiberMetricIterator { + pub iterator: TensorStructureMultiFiberIterator, + pos: usize, + pub reps: Vec, + pub indices: Vec, + pub is_neg: AHashMap, + map: Vec, + permutation: Permutation, +} + +impl TensorStructureMultiFiberMetricIterator { + /// Create a new multi fiber metric iterator + /// + /// + /// + /// # Arguments + /// + pub fn new( + structure: &N, + fiber_positions: &[bool], + permutation: Permutation, + ) -> TensorStructureMultiFiberMetricIterator + where + N: TensorStructure, + { + // for f in fiber_positions { + // filter[*f] = true; + // } + let iterator = TensorStructureMultiFiberIterator::new(structure, fiber_positions); + + let mut f = fiber_positions.iter(); + let mut reps = structure.reps(); + reps.retain(|_| !*f.next().unwrap_or_else(|| unreachable!())); + let capacity = reps.iter().map(usize::from).product(); + let indices = vec![0; reps.len()]; + // println!("Reps : {:?}", reps); + + TensorStructureMultiFiberMetricIterator { + iterator, + reps, + indices, + is_neg: AHashMap::with_capacity(capacity), + pos: 0, + permutation, + map: Vec::new(), + } + } + + /// Construct a pair of multi fiber iterators. + /// + /// The first is a [`TensorStructureMultiFiberMetricIterator`], that iterates along the fiber, i.e. considers the fixed indices as free. + /// + /// The second is a [`TensorStructureMultiFiberIterator`] that considers the fixed indices as fixed, iterating over the remaining indices, not in the fiber. + /// + pub fn new_conjugates( + structure: &N, + fiber_positions: &[bool], + permutation: Permutation, + ) -> (Self, TensorStructureMultiFiberIterator) + where + N: TensorStructure, + { + let iters = TensorStructureMultiFiberIterator::new_conjugate(structure, fiber_positions); + let mut f = fiber_positions.iter(); + let mut reps = structure.reps(); + reps.retain(|_| *f.next().unwrap_or_else(|| unreachable!())); + let capacity = reps.iter().map(usize::from).product(); + let indices = vec![0; reps.len()]; + // println!("Reps : {:?}", reps); + ( + TensorStructureMultiFiberMetricIterator { + iterator: iters.0, + reps, + indices, + is_neg: AHashMap::with_capacity(capacity), + pos: 0, + permutation, + map: Vec::new(), + }, + iters.1, + ) + } + + fn increment_indices(&mut self) { + let mut carry = true; + for (i, r) in self.indices.iter_mut().rev().zip(self.reps.iter().rev()) { + if carry { + *i += 1; + carry = *i == usize::from(r); + *i %= usize::from(r); + } + } + } + + fn has_neg(&mut self, i: usize) -> bool { + if let Some(is_neg) = self.is_neg.get(&i) { + return *is_neg; + } + let mut neg = false; + for (i, r) in self + .indices + .iter() + .zip(self.reps.iter()) + .filter(|(_, r)| !matches!(r, Representation::Euclidean(_))) + { + neg ^= r.is_neg(*i); + } + + self.is_neg.insert(i, neg); + + neg + } + + fn generate_map(&mut self) { + // println!("{:?}{:?}", self.map.len(), self.pos); + if self.map.len() <= self.pos { + let mut stride = 1; + // println!("{:?}", self.indices); + // println!("{:?}", self.reps); + // println!("{:?}", self.permutation); + let mut ind = 0; + for (i, d) in self + .permutation + .apply_slice(self.indices.as_slice()) + .iter() + .zip(self.permutation.apply_slice(self.reps.as_slice()).iter()) + .rev() + { + ind += stride * i; + stride *= usize::from(d); + } + self.map.push(ind); + // println!("{:?}", self.map) + } + } + + pub fn reset(&mut self) -> &[usize] { + self.iterator.reset(); + self.indices = vec![0; self.reps.len()]; + self.pos = 0; + // println!("reset!"); + &self.map + } +} + +impl Iterator for TensorStructureMultiFiberMetricIterator { + type Item = (usize, usize, bool); + + fn next(&mut self) -> Option { + if let Some(i) = self.iterator.next() { + let pos = self.pos; + self.generate_map(); + self.pos += 1; + let neg = self.has_neg(i); + self.increment_indices(); + Some((pos, i, neg)) + } else { + None + } + } +} + +/// Iterator over all indices of a tensor structure, fixing an index +/// +/// The iterator returns a vector of references to the elements of the tensor along the fixed index (so called fiber). +/// +/// In the case of a sparse tensor, the iterator returns a vector of references to the non-zero elements of the tensor along the fixed index, and the amount of skipped indices. +/// For the dense tensor, the iterator only returns the the vectro of references. +pub struct TensorFiberIterator<'a, T> +where + T: TensorStructure, +{ + tensor: &'a T, + fiber_iter: TensorStructureFiberIterator, + skipped: usize, + pub fiber_dimension: Dimension, + increment: usize, +} + +impl<'a, T> TensorFiberIterator<'a, T> +where + T: TensorStructure, +{ + /// Create a new fiber iterator, from a tensor and a fixed index + pub fn new(tensor: &'a T, fiber_position: usize) -> Self { + let fiber_iter = + TensorStructureFiberIterator::new(&tensor.external_structure(), fiber_position); + let increment = tensor.strides()[fiber_position]; + + TensorFiberIterator { + tensor, + fiber_iter, + skipped: 0, + fiber_dimension: tensor.shape()[fiber_position], + increment, + } + } + + /// Reset the iterator, so that it can be used again. + #[must_use] + pub fn reset(&mut self) -> usize { + self.fiber_iter.reset(); + let skipped = self.skipped; + self.skipped = 0; + skipped + } +} + +impl<'a, T, N> Iterator for TensorFiberIterator<'a, SparseTensor> +where + N: TensorStructure, +{ + type Item = (usize, Vec, Vec<&'a T>); + + fn next(&mut self) -> Option { + if let Some(s) = self.fiber_iter.next() { + let mut out = Vec::new(); + let mut nonzeros = Vec::new(); + for i in 0..self.fiber_dimension.into() { + if let Some(v) = self.tensor.elements.get(&(s + i * self.increment)) { + nonzeros.push(i); + out.push(v); + } + } + if out.is_empty() { + self.skipped += 1; + self.next() + } else { + let skipped = self.skipped; + self.skipped = 0; + Some((skipped, nonzeros, out)) + } + } else { + None + } + } +} + +impl<'a, T, N> Iterator for TensorFiberIterator<'a, DenseTensor> +where + N: TensorStructure, +{ + type Item = Vec<&'a T>; + + fn next(&mut self) -> Option { + if let Some(s) = self.fiber_iter.next() { + let mut out = Vec::with_capacity(self.fiber_dimension.into()); + for i in 0..self.fiber_dimension.into() { + if let Some(v) = self.tensor.get_linear(s + i * self.increment) { + out.push(v); + } + } + + Some(out) + } else { + None + } + } +} + +/// Iterator over all indices of a tensor, fixing a subset of indices +/// +/// The iterator returns a vector of tuples of references to the elements of the tensor along the fixed indices (so called fibers) and bools indicating the sign of the metric. +/// It flattens the indices of the fibers. To accomodate for a permutation of the ordering of the fixed indices, the iterator builds a map between the indices of output vector and the permuted version. +/// +/// This map can be accessed using the `map` field of the iterator. +/// +/// For the sparse tensor, the iterator also returns a vector of references to the non-zero elements of the tensor along the fixed indices, and the amount of skipped indices. +/// For the dense tensor, the iterator only returns the the vector of references. +/// +/// +pub struct TensorMultiFiberMetricIterator<'a, T> +where + T: TensorStructure, +{ + tensor: &'a T, + fiber_iter: TensorStructureMultiFiberMetricIterator, + free_iter: TensorStructureMultiFiberIterator, + skipped: usize, + pub map: Vec, + capacity: usize, +} + +impl<'a, T> TensorMultiFiberMetricIterator<'a, T> +where + T: TensorStructure, +{ + /// Create a new multi fiber metric iterator + /// + /// # Arguments + /// + /// * `tensor` - A reference to the tensor + /// * `fiber_positions` - A boolean array indicating which indices are fixed + /// * `permutation` - A permutation of the fixed indices, that matches another tensor. + pub fn new(tensor: &'a T, fiber_positions: &[bool], permutation: Permutation) -> Self { + let iters = TensorStructureMultiFiberMetricIterator::new_conjugates( + &tensor.external_structure(), + fiber_positions, + permutation, + ); + + let mut f = fiber_positions.iter(); + let mut dims = tensor.shape(); + dims.retain(|_| !*f.next().unwrap_or_else(|| unreachable!())); + let capacity = dims.iter().map(|d| usize::from(*d)).product(); + TensorMultiFiberMetricIterator { + tensor, + map: vec![], + fiber_iter: iters.0, + free_iter: iters.1, + skipped: 0, + capacity, + } + } + + /// Reset the iterator, so that it can be used again. + #[must_use] + pub fn reset(&mut self) -> usize { + self.fiber_iter.reset(); + self.free_iter.reset(); + let skipped = self.skipped; + self.skipped = 0; + skipped + } +} + +impl<'a, T, N> Iterator for TensorMultiFiberMetricIterator<'a, SparseTensor> +where + N: TensorStructure, +{ + type Item = (usize, Vec, Vec<(&'a T, bool)>); + + fn next(&mut self) -> Option { + if let Some(i) = self.free_iter.next() { + let mut out = Vec::new(); + let mut nonzeros = Vec::new(); + for (pos, ind, met) in self.fiber_iter.by_ref() { + if let Some(v) = self.tensor.get_linear(ind + i) { + out.push((v, met)); + nonzeros.push(pos); + // println!("hi"); + } + } + if self.map.is_empty() { + self.map = self.fiber_iter.reset().to_owned(); + } else { + self.fiber_iter.reset(); + } + if out.is_empty() { + self.skipped += 1; + self.next() + } else { + let skipped = self.skipped; + self.skipped = 0; + Some((skipped, nonzeros, out)) + } + } else { + None + } + } +} + +impl<'a, T, N> Iterator for TensorMultiFiberMetricIterator<'a, DenseTensor> +where + N: TensorStructure, +{ + type Item = Vec<(&'a T, bool)>; + + fn next(&mut self) -> Option { + if let Some(i) = self.free_iter.next() { + let mut out = Vec::with_capacity(self.capacity); + for (_, ind, met) in self.fiber_iter.by_ref() { + if let Some(v) = self.tensor.get_linear(ind + i) { + out.push((v, met)); + } + } + if self.map.is_empty() { + self.map = self.fiber_iter.reset().to_owned(); + } else { + self.fiber_iter.reset(); + } + + self.fiber_iter.reset(); + if out.is_empty() { + return self.next(); + } + Some(out) + } else { + None + } + } +} + +/// Iterator over all indices of a tensor, fixing a subset of indices +/// +/// The iterator returns a vector of references to the elements of the tensor along the fixed indices (so called fibers). +/// It flattens the indices of the fibers. To accomodate for a permutation of the ordering of the fixed indices, the iterator builds a map between the indices of output vector and the permuted version. +/// +/// This map can be accessed using the `map` field of the iterator. +/// +/// For the sparse tensor, the iterator also returns a vector of references to the non-zero elements of the tensor along the fixed indices, and the amount of skipped indices. +/// For the dense tensor, the iterator only returns the the vector of references. +/// +pub struct TensorMultiFiberIterator<'a, T> +where + T: TensorStructure, +{ + tensor: &'a T, + fiber_iter: TensorStructureMultiFiberIterator, + free_iter: TensorStructureMultiFiberIterator, + skipped: usize, +} + +impl<'a, T> TensorMultiFiberIterator<'a, T> +where + T: TensorStructure, +{ + pub fn new(tensor: &'a T, fiber_positions: &[bool]) -> Self { + let iters = TensorStructureMultiFiberIterator::new_conjugate( + &tensor.external_structure(), + fiber_positions, + ); + TensorMultiFiberIterator { + tensor, + fiber_iter: iters.0, + free_iter: iters.1, + skipped: 0, + } + } + + #[must_use] + pub fn reset(&mut self) -> usize { + self.fiber_iter.reset(); + self.free_iter.reset(); + let skipped = self.skipped; + self.skipped = 0; + skipped + } +} + +impl<'a, T, N> Iterator for TensorMultiFiberIterator<'a, SparseTensor> +where + N: TensorStructure, +{ + type Item = (usize, Vec, Vec<&'a T>); + + fn next(&mut self) -> Option { + if let Some(i) = self.free_iter.next() { + let mut out = Vec::new(); + let mut nonzeros = Vec::new(); + for (pos, ind) in self.fiber_iter.by_ref().enumerate() { + if let Some(v) = self.tensor.get_linear(ind + i) { + out.push(v); + nonzeros.push(pos); + } + } + self.fiber_iter.reset(); + if out.is_empty() { + self.skipped += 1; + self.next() + } else { + let skipped = self.skipped; + self.skipped = 0; + Some((skipped, nonzeros, out)) + } + } else { + None + } + } +} + +impl<'a, T, N> Iterator for TensorMultiFiberIterator<'a, DenseTensor> +where + N: TensorStructure, +{ + type Item = Vec<&'a T>; + + fn next(&mut self) -> Option { + if let Some(i) = self.free_iter.next() { + let mut out = Vec::new(); + for ind in self.fiber_iter.by_ref() { + if let Some(v) = self.tensor.get_linear(ind + i) { + out.push(v); + } + } + self.fiber_iter.reset(); + Some(out) + } else { + None + } + } +} + +/// Iterator over all the elements of a sparse tensor +/// +/// Returns the expanded indices and the element at that index +pub struct SparseTensorIterator<'a, T, N> { + iter: std::collections::hash_map::Iter<'a, usize, T>, + structure: &'a N, +} + +impl<'a, T, N> SparseTensorIterator<'a, T, N> { + fn new(tensor: &'a SparseTensor) -> Self { + SparseTensorIterator { + iter: tensor.elements.iter(), + structure: &tensor.structure, + } + } +} + +impl<'a, T, N> Iterator for SparseTensorIterator<'a, T, N> +where + N: TensorStructure, +{ + type Item = (Vec, &'a T); + + fn next(&mut self) -> Option { + if let Some((k, v)) = self.iter.next() { + let indices = self.structure.expanded_index(*k).unwrap(); + Some((indices, v)) + } else { + None + } + } +} + +/// Iterator over all the elements of a sparse tensor +/// +/// Returns the flat index and the element at that index + +pub struct SparseTensorLinearIterator<'a, T> { + iter: std::collections::hash_map::Iter<'a, usize, T>, +} + +impl<'a, T> SparseTensorLinearIterator<'a, T> { + pub fn new(tensor: &'a SparseTensor) -> Self { + SparseTensorLinearIterator { + iter: tensor.elements.iter(), + } + } +} + +impl<'a, T> Iterator for SparseTensorLinearIterator<'a, T> { + type Item = (usize, &'a T); + + fn next(&mut self) -> Option { + self.iter.next().map(|(k, v)| (*k, v)) + } +} + +// impl<'a, T, I> IntoIterator for &'a SparseTensor { +// type Item = (&'a Vec, &'a T); +// type IntoIter = SparseTensorIterator<'a, T>; + +// fn into_iter(self) -> Self::IntoIter { +// SparseTensorIterator::new(self) +// } +// } + +/// Iterator over all but two indices of a sparse tensor, where the two indices are traced +/// +/// The iterator next returns the value of the trace at the current indices, and the current indices + +pub struct SparseTensorTraceIterator<'a, T, I> { + tensor: &'a SparseTensor, + trace_indices: [usize; 2], + current_indices: Vec, + done: bool, +} + +impl<'a, T, I> SparseTensorTraceIterator<'a, T, I> +where + I: TensorStructure, +{ + /// Create a new trace iterator + /// + /// # Arguments + /// + /// * `tensor` - A reference to the tensor + /// * `trace_indices` - The indices to be traced + /// + /// # Panics + /// + /// Panics if the trace indices do not point to the same dimension + fn new(tensor: &'a SparseTensor, trace_indices: [usize; 2]) -> Self { + //trace positions must point to the same dimension + assert!( + trace_indices + .iter() + .map(|&pos| tensor.external_structure()[pos].representation) + .collect::>() + .iter() + .all(|&sig| sig == tensor.external_structure()[trace_indices[0]].representation), + "Trace indices must point to the same dimension" + ); + SparseTensorTraceIterator { + tensor, + trace_indices, + current_indices: vec![0; tensor.order()], + done: false, + } + } + + fn increment_indices(&mut self) -> bool { + for (i, index) in self + .current_indices + .iter_mut() + .enumerate() + .rev() + .filter(|(pos, _)| !self.trace_indices.contains(pos)) + // Filter out the trace indices + { + *index += 1; + // If the index goes beyond the shape boundary, wrap around to 0 + if index >= &mut usize::from(self.tensor.shape()[i]) { + *index = 0; + continue; // carry over to the next dimension + } + return true; // We've successfully found the next combination + } + false // No more combinations left + } +} + +impl<'a, T, I> Iterator for SparseTensorTraceIterator<'a, T, I> +where + T: for<'c> std::ops::AddAssign<&'c T> + + for<'b> std::ops::SubAssign<&'b T> + + std::ops::Neg + + Clone, + I: TensorStructure + Clone, +{ + type Item = (Vec, T); + fn next(&mut self) -> Option { + if self.done { + return None; + } + + let trace_dimension = + self.tensor.external_structure()[self.trace_indices[0]].representation; + let trace_sign = trace_dimension.negative(); + let mut iter = trace_sign.iter().enumerate(); + let mut indices = self.current_indices.clone(); + let (i, mut sign) = iter.next().unwrap(); //First element (to eliminate the need for default) + + indices[self.trace_indices[0]] = i; + indices[self.trace_indices[1]] = i; + + // Data might not exist at that concrete index usize, we advance it till it does, and if not we skip + + while self.tensor.is_empty_at(&indices) { + let Some((i, signint)) = iter.next() else { + self.done = !self.increment_indices(); + return self.next(); // skip + }; + indices[self.trace_indices[0]] = i; + indices[self.trace_indices[1]] = i; + sign = signint; + } + + let value = (*self.tensor.get(&indices).unwrap()).clone(); //Should now be safe to unwrap + let mut trace = if *sign { value.neg() } else { value }; + + for (i, sign) in iter { + indices[self.trace_indices[0]] = i; + indices[self.trace_indices[1]] = i; + if let Ok(value) = self.tensor.get(&indices) { + if *sign { + trace -= value; + } else { + trace += value; + } + } + } + + //make a vector withouth the trace indices + let trace_indices: Vec = self + .current_indices + .clone() + .into_iter() + .enumerate() + .filter(|&(i, _)| !self.trace_indices.contains(&i)) + .map(|(_, x)| x) + .collect(); + + self.done = !self.increment_indices(); + + Some((trace_indices, trace)) + } +} + +impl SparseTensor +where + I: TensorStructure, +{ + pub fn iter_fiber(&self, fiber_index: usize) -> TensorFiberIterator { + TensorFiberIterator::new(self, fiber_index) + } + + pub fn iter_trace(&self, trace_indices: [usize; 2]) -> SparseTensorTraceIterator { + SparseTensorTraceIterator::new(self, trace_indices) + } + + pub fn iter(&self) -> SparseTensorIterator { + SparseTensorIterator::new(self) + } + + pub fn iter_flat(&self) -> SparseTensorLinearIterator { + SparseTensorLinearIterator::new(self) + } + + pub fn iter_multi_fibers_metric( + &self, + fiber_positions: &[bool], + permutation: Permutation, + ) -> TensorMultiFiberMetricIterator { + TensorMultiFiberMetricIterator::new(self, fiber_positions, permutation) + } + + pub fn iter_multi_fibers(&self, fiber_positions: &[bool]) -> TensorMultiFiberIterator { + TensorMultiFiberIterator::new(self, fiber_positions) + } +} + +/// Iterator over all the elements of a dense tensor +/// +/// Returns the expanded indices and the element at that index +pub struct DenseTensorIterator<'a, T, I> { + tensor: &'a DenseTensor, + current_flat_index: usize, +} + +impl<'a, T, I> DenseTensorIterator<'a, T, I> { + /// Create a new dense tensor iterator + /// + /// # Arguments + /// + /// * `tensor` - A reference to the tensor + fn new(tensor: &'a DenseTensor) -> Self { + DenseTensorIterator { + tensor, + current_flat_index: 0, + } + } +} + +impl<'a, T, I> Iterator for DenseTensorIterator<'a, T, I> +where + I: TensorStructure, +{ + type Item = (Vec, &'a T); + + fn next(&mut self) -> Option { + if let Ok(indices) = self.tensor.expanded_index(self.current_flat_index) { + let value = self.tensor.get_linear(self.current_flat_index).unwrap(); + + self.current_flat_index += 1; + + Some((indices, value)) + } else { + None + } + } +} + +/// Iterator over all the elements of a dense tensor +/// +/// Returns the flat index and the element at that index + +pub struct DenseTensorLinearIterator<'a, T, I> { + tensor: &'a DenseTensor, + current_flat_index: usize, +} + +impl<'a, T, I> DenseTensorLinearIterator<'a, T, I> { + pub fn new(tensor: &'a DenseTensor) -> Self { + DenseTensorLinearIterator { + tensor, + current_flat_index: 0, + } + } +} + +impl<'a, T, I> Iterator for DenseTensorLinearIterator<'a, T, I> +where + I: TensorStructure, +{ + type Item = (usize, &'a T); + + fn next(&mut self) -> Option { + let value = self.tensor.get_linear(self.current_flat_index)?; + let index = self.current_flat_index; + self.current_flat_index += 1; + Some((index, value)) + } +} + +impl<'a, T, I> IntoIterator for &'a DenseTensor +where + I: TensorStructure, +{ + type Item = (Vec, &'a T); + type IntoIter = DenseTensorIterator<'a, T, I>; + + fn into_iter(self) -> Self::IntoIter { + DenseTensorIterator::new(self) + } +} + +impl IntoIterator for DenseTensor +where + I: TensorStructure, +{ + type Item = (Vec, T); + type IntoIter = DenseTensorIntoIterator; + + fn into_iter(self) -> Self::IntoIter { + DenseTensorIntoIterator::new(self) + } +} + +/// An consuming iterator over the elements of a dense tensor +/// +/// Returns the expanded indices and the element at that index +/// +/// +pub struct DenseTensorIntoIterator { + tensor: DenseTensor, + current_flat_index: usize, +} + +impl DenseTensorIntoIterator { + fn new(tensor: DenseTensor) -> Self { + DenseTensorIntoIterator { + tensor, + current_flat_index: 0, + } + } +} + +impl Iterator for DenseTensorIntoIterator +where + I: TensorStructure, +{ + type Item = (Vec, T); + + fn next(&mut self) -> Option { + if let Ok(indices) = self.tensor.expanded_index(self.current_flat_index) { + let indices = indices.clone(); + let value = self.tensor.data.remove(self.current_flat_index); + + self.current_flat_index += 1; + + Some((indices, value)) + } else { + None + } + } +} + +/// Iterator over all indices of a dense tensor, keeping two indices fixed and tracing over them +/// +/// The next method returns the value of the trace at the current indices, and the current indices +pub struct DenseTensorTraceIterator<'a, T, I> { + tensor: &'a DenseTensor, + trace_indices: [usize; 2], + current_indices: Vec, + done: bool, +} + +impl<'a, T, I> DenseTensorTraceIterator<'a, T, I> +where + I: TensorStructure, +{ + /// Create a new trace iterator + /// + /// # Arguments + /// + /// * `tensor` - A reference to the tensor + /// * `trace_indices` - The indices to be traced + /// + fn new(tensor: &'a DenseTensor, trace_indices: [usize; 2]) -> Self { + assert!(trace_indices.len() >= 2, "Invalid trace indices"); + //trace positions must point to the same dimension + assert!( + trace_indices + .iter() + .map(|&pos| tensor.external_structure()[pos].representation) + .collect::>() + .iter() + .all(|&sig| sig == tensor.external_structure()[trace_indices[0]].representation), + "Trace indices must point to the same dimension" + ); + DenseTensorTraceIterator { + tensor, + trace_indices, + current_indices: vec![0; tensor.order()], + done: false, + } + } + + fn increment_indices(&mut self) -> bool { + for (i, index) in self + .current_indices + .iter_mut() + .enumerate() + .rev() + .filter(|(pos, _)| !self.trace_indices.contains(pos)) + { + *index += 1; + // If the index goes beyond the shape boundary, wrap around to 0 + if index >= &mut self.tensor.shape()[i] { + *index = 0; + continue; // carry over to the next dimension + } + return true; // We've successfully found the next combination + } + false // No more combinations left + } +} + +impl<'a, T, I> Iterator for DenseTensorTraceIterator<'a, T, I> +where + T: for<'c> AddAssign<&'c T> + + for<'b> SubAssign<&'b T> + + Neg + + Clone + + std::fmt::Debug, + I: TensorStructure, +{ + type Item = (Vec, T); + fn next(&mut self) -> Option { + if self.done { + return None; + } + + let trace_dimension = + self.tensor.external_structure()[self.trace_indices[0]].representation; + let trace_sign = trace_dimension.negative(); + + let mut iter = trace_sign.iter().enumerate(); + let mut indices = self.current_indices.clone(); + let (_, sign) = iter.next().unwrap(); //First sign + + for pos in self.trace_indices { + indices[pos] = 0; + } + + let value = self.tensor.get(&indices).unwrap().clone(); + + let mut trace = if *sign { value.neg() } else { value }; + + for (i, sign) in iter { + for pos in self.trace_indices { + indices[pos] = i; + } + + if let Ok(value) = self.tensor.get(&indices) { + if *sign { + trace -= value; + } else { + trace += value; + } + } + } + + //make a vector without the trace indices + let trace_indices: Vec = self + .current_indices + .clone() + .into_iter() + .enumerate() + .filter(|&(i, _)| !self.trace_indices.contains(&i)) + .map(|(_, x)| x) + .collect(); + + self.done = !self.increment_indices(); + + Some((trace_indices, trace)) + } +} + +impl DenseTensor +where + I: TensorStructure, +{ + pub fn iter(&self) -> DenseTensorIterator { + DenseTensorIterator::new(self) + } + + pub fn iter_flat(&self) -> DenseTensorLinearIterator { + DenseTensorLinearIterator::new(self) + } + + pub fn iter_fiber(&self, fixedindex: usize) -> TensorFiberIterator { + TensorFiberIterator::new(self, fixedindex) + } + + pub fn iter_trace(&self, trace_indices: [usize; 2]) -> DenseTensorTraceIterator { + DenseTensorTraceIterator::new(self, trace_indices) + } + + pub fn iter_multi_fibers_metric( + &self, + fiber_positions: &[bool], + permutation: Permutation, + ) -> TensorMultiFiberMetricIterator { + TensorMultiFiberMetricIterator::new(self, fiber_positions, permutation) + } + + pub fn iter_multi_fibers(&self, fiber_positions: &[bool]) -> TensorMultiFiberIterator { + TensorMultiFiberIterator::new(self, fiber_positions) + } +} diff --git a/src/tensor/network.rs b/src/tensor/network.rs new file mode 100644 index 00000000..e1f763fd --- /dev/null +++ b/src/tensor/network.rs @@ -0,0 +1,787 @@ +use ahash::AHashMap; + +use serde::{Deserialize, Serialize}; +use slotmap::{new_key_type, DenseSlotMap, Key, SecondaryMap}; +use symbolica::{ + domains::float::Complex, + representations::{Atom, AtomView, Symbol}, + state::State, +}; + +use self::parametric::{MixedTensor, MixedTensors}; +use self::structure::HistoryStructure; + +use super::{ + parametric, structure, Contract, DataTensor, HasName, Shadowable, Slot, TensorStructure, + TracksCount, +}; +use smartstring::alias::String; +use std::fmt::{Debug, Display}; + +new_key_type! { + pub struct NodeId; + pub struct HedgeId; +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct HalfEdgeGraph { + pub edges: DenseSlotMap, + pub involution: SecondaryMap, + pub neighbors: SecondaryMap, + pub nodes: DenseSlotMap, + pub nodemap: SecondaryMap, + pub reverse_nodemap: SecondaryMap, +} + +struct IncidentIterator<'a> { + neighbors: &'a SecondaryMap, + current: Option, + start: HedgeId, +} + +impl<'a> Iterator for IncidentIterator<'a> { + type Item = HedgeId; + fn next(&mut self) -> Option { + let current = self.current?; + + self.current = Some(self.neighbors[current]); + + if self.current == Some(self.start) { + self.current = None; + } + + Some(current) + } +} + +impl<'a> IncidentIterator<'a> { + fn new(graph: &'a HalfEdgeGraph, initial: HedgeId) -> Self { + IncidentIterator { + neighbors: &graph.neighbors, + current: Some(initial), + start: initial, + } + } +} +#[allow(dead_code)] +impl HalfEdgeGraph { + fn new() -> Self { + HalfEdgeGraph { + involution: SecondaryMap::new(), + nodemap: SecondaryMap::new(), + neighbors: SecondaryMap::new(), + reverse_nodemap: SecondaryMap::new(), + nodes: DenseSlotMap::with_key(), + edges: DenseSlotMap::with_key(), + } + } + + fn dot(&self) -> std::string::String { + let mut out = "graph {".to_string(); + out.push_str(" node [shape=circle,height=0.1,label=\"\"]; overlap=\"scale\";"); + + // for (i, n) in &self.nodes { + // out.push_str(&format!("\n {}", i.data().as_ffi())); + // } + for (i, _) in &self.neighbors { + match i.cmp(&self.involution[i]) { + std::cmp::Ordering::Greater => { + out.push_str(&format!( + "\n {} -- {}", + self.nodemap[i].data().as_ffi(), + self.nodemap[self.involution[i]].data().as_ffi() + )); + } + std::cmp::Ordering::Equal => { + out.push_str(&format!( + "ext{} [shape=none, label=\"\"];", + i.data().as_ffi() + )); + out.push_str(&format!( + "\n {} -- ext{};", + self.nodemap[i].data().as_ffi(), + i.data().as_ffi() + )); + } + _ => {} + } + } + + out += "}"; + out + } + + fn add_node(&mut self, data: N) -> NodeId { + self.nodes.insert(data) + } + + fn node_indices(&self) -> slotmap::dense::Keys<'_, NodeId, N> { + self.nodes.keys() + } + + /// Add a node with a list of edget with associated data. Matches edges by equality. + fn add_node_with_edges(&mut self, data: N, edges: &[E]) -> NodeId + where + E: Eq + Clone, + { + let idx = self.add_node(data); + for e in edges { + let mut found_match = false; + for (i, other_e) in &self.edges { + if *e == *other_e && self.involution[i] == i { + found_match = true; + let eid = self.edges.insert(e.clone()); + self.involution.insert(eid, i); + self.involution.insert(i, eid); + self.nodemap.insert(eid, idx); + if let Some(prev_eid) = self.reverse_nodemap.insert(idx, eid) { + let next_eid = self.neighbors.insert(prev_eid, eid).unwrap(); + self.neighbors.insert(eid, next_eid); + } else { + self.neighbors.insert(eid, eid); + } + break; + } + } + if !found_match { + let eid = self.edges.insert(e.clone()); + self.involution.insert(eid, eid); + self.nodemap.insert(eid, idx); + if let Some(prev_eid) = self.reverse_nodemap.insert(idx, eid) { + let next_eid = self.neighbors.insert(prev_eid, eid).unwrap(); + self.neighbors.insert(eid, next_eid); + } else { + self.neighbors.insert(eid, eid); + } + } + } + + idx + } + + pub fn validate_neighbors(&self) -> bool { + for (i, n) in &self.reverse_nodemap { + for j in IncidentIterator::new(self, *n) { + if self.nodemap[j] != i { + return false; + } + } + } + true + } + + fn node_labels(&self) -> String + where + N: Display, + { + let mut out = String::new(); + for (i, n) in &self.nodes { + out.push_str(&format!("{}[label= \"{}\"]\n", i.data().as_ffi(), n)); + } + out + } + + #[allow(clippy::too_many_lines)] + fn merge_nodes(&mut self, a: NodeId, b: NodeId, data: N) -> NodeId { + let c = self.nodes.insert(data); + + // New initial edge for reverse_nodemap, that does not link to b + // if none is found, all incident edges are link to b and must be removed from the neighbors list + let mut new_initial_a = self + .edges_incident(a) + .find(|x| self.nodemap[self.involution[*x]] != b && self.involution[*x] != *x); + + if new_initial_a.is_none() { + new_initial_a = self + .edges_incident(a) + .find(|x| self.nodemap[self.involution[*x]] != b); + } + + let mut a_edge = new_initial_a; + + if a_edge.is_none() { + // all edges link to b, and must be removed + let initial = self.reverse_nodemap[a]; + let mut current = Some(initial); + loop { + if current.is_none() { + break; + } + let next = self.neighbors.remove(current.unwrap()); + + if next == Some(initial) { + current = None; + } else { + current = next; + } + } + } else { + loop { + let mut next = self.neighbors[a_edge.unwrap()]; + + while self.nodemap[self.involution[next]] == b { + next = self.neighbors.remove(next).unwrap(); + } + + self.nodemap.insert(a_edge.unwrap(), c); + self.neighbors.insert(a_edge.unwrap(), next); + + if new_initial_a == Some(next) { + break; + } + + a_edge = Some(next); + } + } + + let mut new_initial_b = self + .edges_incident(b) + .find(|x| self.nodemap[self.involution[*x]] != a && self.involution[*x] != *x); + + if new_initial_b.is_none() { + new_initial_b = self + .edges_incident(b) + .find(|x| self.nodemap[self.involution[*x]] != a); + } + let mut b_edge = new_initial_b; + + if b_edge.is_none() { + let initial = self.reverse_nodemap[b]; + let mut current = Some(initial); + loop { + if current.is_none() { + break; + } + let next = self.neighbors.remove(current.unwrap()); + + if next == Some(initial) { + current = None; + } else { + current = next; + } + } + } else { + loop { + let mut next = self.neighbors[b_edge.unwrap()]; + + while self.nodemap[self.involution[next]] == a { + next = self.neighbors.remove(next).unwrap(); + } + + self.nodemap.insert(b_edge.unwrap(), c); + self.neighbors.insert(b_edge.unwrap(), next); + + if new_initial_b == Some(next) { + break; + } + + b_edge = Some(next); + } + } + + match (new_initial_a, new_initial_b) { + (Some(new_edge_a), Some(new_edge_b)) => { + self.reverse_nodemap.insert(c, new_edge_a); + self.reverse_nodemap.remove(a); + self.reverse_nodemap.remove(b); + let old_neig = self.neighbors.insert(new_edge_a, new_edge_b).unwrap(); + self.neighbors.insert(b_edge.unwrap(), old_neig); + } + (Some(new_edge_a), None) => { + self.reverse_nodemap.insert(c, new_edge_a); + self.reverse_nodemap.remove(a); + self.reverse_nodemap.remove(b); + } + (None, Some(new_edge_b)) => { + self.reverse_nodemap.insert(c, new_edge_b); + self.reverse_nodemap.remove(a); + self.reverse_nodemap.remove(b); + } + (None, None) => { + self.reverse_nodemap.remove(b); + self.reverse_nodemap.remove(a); + } + } + self.nodes.remove(a); + self.nodes.remove(b); + c + } + + /// Add an internal edge between two nodes. + fn add_edge(&mut self, a: NodeId, b: NodeId, data: E) -> HedgeId + where + E: Clone, + { + let hedge_id_a = self.edges.insert(data.clone()); + let hedge_id_b = self.edges.insert(data); + self.involution.insert(hedge_id_a, hedge_id_b); + self.involution.insert(hedge_id_b, hedge_id_a); + self.nodemap.insert(hedge_id_a, a); + if let Some(prev_eid) = self.reverse_nodemap.insert(a, hedge_id_a) { + let next_eid = self.neighbors.insert(prev_eid, hedge_id_a).unwrap(); + self.neighbors.insert(hedge_id_a, next_eid).unwrap(); + } else { + self.neighbors.insert(hedge_id_a, hedge_id_a); + } + self.nodemap.insert(hedge_id_b, b); + if let Some(prev_eid) = self.reverse_nodemap.insert(b, hedge_id_b) { + let next_eid = self.neighbors.insert(prev_eid, hedge_id_b).unwrap(); + self.neighbors.insert(hedge_id_b, next_eid).unwrap(); + } else { + self.neighbors.insert(hedge_id_b, hedge_id_b); + } + hedge_id_a + } + + /// Add external, as a fixed point involution half edge. + fn add_external(&mut self, a: NodeId, data: E) -> HedgeId { + let id = self.edges.insert(data); + self.involution.insert(id, id); + self.nodemap.insert(id, a); + if let Some(prev_eid) = self.reverse_nodemap.insert(a, id) { + let next_eid = self.neighbors.insert(prev_eid, id).unwrap(); + self.neighbors.insert(id, next_eid).unwrap(); + } else { + self.neighbors.insert(id, id); + } + id + } + + fn edges_incident(&self, node: NodeId) -> impl Iterator + '_ { + IncidentIterator::new(self, self.reverse_nodemap[node]) + } + + fn edges_between(&self, a: NodeId, b: NodeId) -> impl Iterator + '_ { + self.edges_incident(a) + .filter(move |&i| self.nodemap[self.involution[i]] == b) + } + + fn internal_edges_incident(&self, node: NodeId) -> impl Iterator + '_ { + self.edges_incident(node) + .filter(move |&i| self.nodemap[self.involution[i]] != node) + } + + fn external_edges_incident(&self, node: NodeId) -> impl Iterator + '_ { + self.edges_incident(node) + .filter(move |&i| self.nodemap[self.involution[i]] == node) + } + + fn degree(&self, node: NodeId) -> usize { + self.edges_incident(node).collect::>().len() + } + + fn neighbors(&self, node: NodeId) -> impl Iterator + '_ { + self.edges_incident(node) + .map(move |i| self.nodemap[self.involution[i]]) + } + + // fn map_nodes(&self, f: F) -> HalfEdgeGraph + // where + // F: Fn(&N) -> U, + // E: Clone, + // { + // let edges = self.edges.clone(); + // let involution = self.involution.clone(); + + // let mut nodes = DenseSlotMap::with_key(); + // let mut nodemap = SecondaryMap::new(); + + // for n in &self.nodes { + // let nid = nodes.insert(f(n.1)); + // for e in self.edges_incident(n.0) { + // nodemap.insert(e, nid); + // } + // } + + // HalfEdgeGraph { + // edges, + // involution, + // nodes, + // nodemap, + // } + // } +} + +#[test] +fn merge() { + let mut graph = HalfEdgeGraph::new(); + let a = graph.add_node_with_edges(1, &[1, 2, 3, 4, 5]); + let b = graph.add_node_with_edges(2, &[1, 2, 6, 7, 8]); + let c = graph.add_node_with_edges(4, &[4, 6, 9, 10, 11]); + + println!("{}", graph.dot()); + println!("{}", graph.degree(a)); + println!("{}", graph.degree(b)); + + for (i, n) in &graph.neighbors { + println!("{} {}", graph.edges[i], graph.edges[*n]); + } + + let d = graph.merge_nodes(a, b, 3); + + // for (i, n) in &graph.neighbors { + // println!("{} {}", graph.edges[i], graph.edges[*n]); + // } + + println!("{}", graph.dot()); + println!("{}", graph.degree(c)); + println!("{}", graph.neighbors.len()); + + let e = graph.merge_nodes(c, d, 5); + + println!("{}", graph.dot()); + println!("{}", graph.degree(e)); + println!("{}", graph.neighbors.len()); + + let mut graph = HalfEdgeGraph::new(); + let a = graph.add_node_with_edges("a", &[10, 2, 3]); + let b = graph.add_node_with_edges("b", &[20, 3, 4]); + let c = graph.add_node_with_edges("c", &[30, 4, 2]); + let d = graph.add_node_with_edges("d", &[20]); + let e = graph.add_node_with_edges("e", &[30]); + + println!("Test {}", graph.dot()); + println!("{}", graph.degree(a)); + println!("{}", graph.degree(b)); + + for (i, n) in &graph.neighbors { + println!("{} {}", graph.edges[i], graph.edges[*n]); + } + + let d = graph.merge_nodes(d, b, "bd"); + + // for (i, n) in &graph.neighbors { + // println!("{} {}", graph.edges[i], graph.edges[*n]); + // } + + println!("{}", graph.degree(c)); + println!("{}", graph.neighbors.len()); + + println!("{}", graph.dot()); + + let e = graph.merge_nodes(c, e, "ce"); + + if graph.validate_neighbors() { + println!("valid"); + } else { + println!("invalid"); + } + + println!("{}", graph.dot()); + let f = graph.merge_nodes(d, e, "de"); + + if graph.validate_neighbors() { + println!("valid"); + } else { + println!("invalid"); + } + + println!("{}", graph.dot()); + println!("{}", graph.node_labels()); + println!("{}", graph.degree(a)); + println!("{}", graph.neighbors.len()); + + let g = graph.merge_nodes(a, f, "af"); + + if graph.validate_neighbors() { + println!("valid"); + } else { + println!("invalid"); + } + + println!("{}", graph.dot()); + println!("{}", graph.neighbors.len()); + println!("{}", graph.degree(g)); + + // println!("{}", graph.degree(b)); +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct TensorNetwork { + pub graph: HalfEdgeGraph, + pub params: Vec, +} + +impl TensorNetwork { + fn edge_to_min_degree_node(&self) -> Option { + let mut neighs = self.graph.reverse_nodemap.clone(); + if neighs.is_empty() { + return None; + } + + loop { + let mut all_ext = true; + for (node, initial) in &mut neighs { + *initial = self.graph.neighbors[*initial]; + let start = self.graph.reverse_nodemap[node]; + + if self.graph.involution[start] != start { + all_ext = false; + if *initial == start { + return Some(start); + } + } + } + if all_ext { + return None; + } + } + } + + pub fn to_vec(&self) -> Vec<&T> { + self.graph.nodes.values().collect() + } +} + +impl TensorNetwork> +where + N: Debug + TensorStructure, +{ + pub fn to_symbolic_tensor_vec(mut self) -> Vec> { + self.graph + .nodes + .drain() + .filter(|(_, n)| n.is_symbolic()) + .map(|(_, n)| n.try_into_symbolic().unwrap()) + .collect() + } + + pub fn evaluate_float<'a>(&'a mut self, const_map: &AHashMap, f64>) + where + N: Clone, + { + for (_, n) in &mut self.graph.nodes { + n.evaluate_float(const_map); + } + } + + pub fn evaluate_complex<'a>(&'a mut self, const_map: &AHashMap, Complex>) + where + N: Clone, + { + for (_, n) in &mut self.graph.nodes { + n.evaluate_complex(const_map); + } + } +} + +impl From> for TensorNetwork +where + T: TensorStructure, +{ + fn from(tensors: Vec) -> Self { + TensorNetwork { + graph: Self::generate_network_graph(tensors), + params: Vec::new(), + } + } +} + +impl Default for TensorNetwork +where + T: TensorStructure, +{ + fn default() -> Self { + Self::new() + } +} + +impl TensorNetwork +where + T: TensorStructure, +{ + pub fn new() -> Self { + TensorNetwork { + graph: HalfEdgeGraph::new(), + params: Vec::new(), + } + } + + pub fn push(&mut self, tensor: T) -> NodeId { + let slots = tensor.external_structure().to_vec(); + self.graph.add_node_with_edges(tensor, &slots) + } + + fn generate_network_graph(tensors: Vec) -> HalfEdgeGraph { + let mut graph = HalfEdgeGraph::::new(); + + for tensor in tensors { + let slots = tensor.external_structure().to_vec(); + graph.add_node_with_edges(tensor, &slots); + } + + graph + } + + pub fn edge_to_min_degree_node_with_depth(&self, depth: usize) -> Option + where + T: TracksCount, + { + let mut neighs: SecondaryMap = self + .graph + .reverse_nodemap + .clone() + .into_iter() + .filter(|(n, _e)| self.graph.nodes[*n].contractions_num() < depth) + .collect(); + if neighs.is_empty() { + return None; + } + + loop { + let mut all_ext = true; + for (node, initial) in &mut neighs { + *initial = self.graph.neighbors[*initial]; + let start = self.graph.reverse_nodemap[node]; + + if self.graph.involution[start] != start + && self.graph.nodes[self.graph.nodemap[self.graph.involution[start]]] + .contractions_num() + < depth + { + all_ext = false; + if *initial == start { + return Some(start); + } + } + } + if all_ext { + return None; + } + } + } +} +impl TensorNetwork +where + T: Clone, +{ + pub fn result(&self) -> T { + self.graph.nodes.iter().next().unwrap().1.clone() + } +} + +impl TensorNetwork { + pub fn dot(&self) -> std::string::String { + self.graph.dot() + } +} + +impl TensorNetwork> +where + I: TensorStructure + Clone, +{ + pub fn generate_params(&mut self) { + for (_i, n) in &self.graph.nodes { + if n.is_symbolic() { + self.params.push(n.clone()); + } + } + } +} + +impl TensorNetwork +where + T: TensorStructure> + Clone, +{ + pub fn symbolic_shadow(&mut self, name: &str) -> TensorNetwork { + { + for (i, n) in &mut self.graph.nodes { + n.mut_structure().set_name( + &State::get_or_insert_fn(format!("{}{}", name, i.data().as_ffi()), None) + .unwrap(), + ); + } + } + + let edges = self.graph.edges.clone(); + let involution = self.graph.involution.clone(); + let neighbors = self.graph.neighbors.clone(); + + let mut nodes = DenseSlotMap::with_key(); + let mut nodemap = SecondaryMap::new(); + let mut reverse_nodemap = SecondaryMap::new(); + let mut params = Vec::new(); + + for (i, n) in &self.graph.nodes { + let node = n.structure().clone().shadow().unwrap(); + + let nid = nodes.insert(MixedTensor::>::from(node.clone())); + + params.push(node.into()); + let mut first = true; + for e in self.graph.edges_incident(i) { + if first { + reverse_nodemap.insert(nid, e); + first = false; + } + nodemap.insert(e, nid); + } + } + + let g = HalfEdgeGraph { + edges, + involution, + reverse_nodemap, + neighbors, + nodes, + nodemap, + }; + + TensorNetwork { graph: g, params } + } +} + +impl TensorNetwork +where + T: HasName, +{ + pub fn name(&mut self, name: T::Name) + where + T::Name: From + Display, + { + for (id, n) in &mut self.graph.nodes { + n.set_name(&format!("{}{}", name, id.data().as_ffi()).into()); + } + } +} + +impl TensorNetwork +where + T: HasName, +{ + pub fn namesym(&mut self, name: &str) { + for (id, n) in &mut self.graph.nodes { + n.set_name( + &State::get_or_insert_fn(format!("{}{}", name, id.data().as_ffi()), None).unwrap(), + ); + } + } +} + +impl TensorNetwork +where + T: Contract + TensorStructure, +{ + pub fn contract_algo(&mut self, edge_choice: fn(&TensorNetwork) -> Option) { + if let Some(e) = edge_choice(self) { + self.contract_edge(e); + // println!("{}", self.dot()); + self.contract_algo(edge_choice); + } + } + fn contract_edge(&mut self, edge_idx: HedgeId) { + let a = self.graph.nodemap[edge_idx]; + let b = self.graph.nodemap[self.graph.involution[edge_idx]]; + + let ai = self.graph.nodes.get(a).unwrap(); + let bi = self.graph.nodes.get(b).unwrap(); + + let f = ai.contract(bi).unwrap(); + + self.graph.merge_nodes(a, b, f); + } + + pub fn contract(&mut self) { + self.contract_algo(Self::edge_to_min_degree_node) + } +} diff --git a/src/tensor/parametric.rs b/src/tensor/parametric.rs new file mode 100644 index 00000000..d5b6a481 --- /dev/null +++ b/src/tensor/parametric.rs @@ -0,0 +1,295 @@ +use ahash::{AHashMap, HashMap}; +use enum_try_as_inner::EnumTryAsInner; + +use symbolica::{ + domains::float::Complex, + evaluate::EvaluationFn, + representations::{Atom, AtomView, Symbol}, +}; + +use super::{ + Contract, DataIterator, DataTensor, DenseTensor, HasName, HistoryStructure, Slot, SparseTensor, + StructureContract, TensorStructure, TracksCount, VecStructure, +}; + +#[derive(Clone, Debug, EnumTryAsInner)] +#[derive_err(Debug)] +pub enum MixedTensor { + Float(DataTensor), + Complex(DataTensor, T>), + Symbolic(DataTensor), +} + +impl<'a, I: TensorStructure + Clone + 'a> MixedTensor { + pub fn evaluate_float<'b>(&mut self, const_map: &'b HashMap, f64>) + where + 'b: 'a, + { + let content = match self { + MixedTensor::Symbolic(x) => Some(x), + _ => None, + }; + + if let Some(x) = content { + *self = MixedTensor::Float(x.evaluate(const_map)); + } + } + + pub fn evaluate_complex<'b>(&mut self, const_map: &'b HashMap, Complex>) + where + 'b: 'a, + { + let content = match self { + MixedTensor::Symbolic(x) => Some(x), + _ => None, + }; + + if let Some(x) = content { + *self = MixedTensor::Complex(x.evaluate(const_map)); + } + } +} + +impl DataTensor +where + I: Clone + TensorStructure, +{ + pub fn evaluate<'a, 'b, T>(&self, const_map: &'b HashMap, T>) -> DataTensor + where + T: symbolica::domains::float::Real + + for<'c> std::convert::From<&'c symbolica::domains::rational::Rational>, + 'a: 'b, + { + match self { + DataTensor::Dense(x) => DataTensor::Dense(x.evaluate(const_map)), + DataTensor::Sparse(x) => DataTensor::Sparse(x.evaluate(const_map)), + } + } +} + +impl SparseTensor +where + I: Clone, +{ + pub fn evaluate<'a, T>(&self, const_map: &HashMap, T>) -> SparseTensor + where + T: symbolica::domains::float::Real + + for<'d> std::convert::From<&'d symbolica::domains::rational::Rational>, + { + let fn_map: HashMap<_, EvaluationFn<_>> = HashMap::default(); + let mut cache = HashMap::default(); + let structure = self.structure.clone(); + let data = self + .elements + .iter() + .map(|(idx, x)| { + ( + *idx, + x.as_view().evaluate::(const_map, &fn_map, &mut cache), + ) + }) + .collect::>(); + + SparseTensor { + elements: data, + structure, + } + } +} + +impl DenseTensor +where + I: Clone, +{ + pub fn evaluate<'a, T>(&'a self, const_map: &HashMap, T>) -> DenseTensor + where + T: symbolica::domains::float::Real + + for<'b> std::convert::From<&'b symbolica::domains::rational::Rational>, + { + let fn_map: HashMap<_, EvaluationFn<_>> = HashMap::default(); + let mut cache = HashMap::default(); + let structure = self.structure.clone(); + let data = self + .data + .iter() + .map(|x| x.as_view().evaluate::(const_map, &fn_map, &mut cache)) + .collect::>(); + + DenseTensor { data, structure } + } + + pub fn append_const_map<'a, 'b, T>( + &'a self, + data: &DenseTensor, + const_map: &mut HashMap, T>, + ) where + I: TensorStructure, + T: Copy, + 'a: 'b, + { + for ((i, a), (j, v)) in self.flat_iter().zip(data.flat_iter()) { + assert_eq!(i, j); + const_map.insert(a.as_view(), *v); + } + } +} + +impl TensorStructure for MixedTensor +where + T: TensorStructure, +{ + type Structure = T; + + fn structure(&self) -> &Self::Structure { + match self { + MixedTensor::Float(t) => t.structure(), + MixedTensor::Complex(t) => t.structure(), + MixedTensor::Symbolic(t) => t.structure(), + } + } + + fn mut_structure(&mut self) -> &mut Self::Structure { + match self { + MixedTensor::Float(t) => t.mut_structure(), + MixedTensor::Complex(t) => t.mut_structure(), + MixedTensor::Symbolic(t) => t.mut_structure(), + } + } + fn external_structure(&self) -> &[Slot] { + match self { + MixedTensor::Float(t) => t.external_structure(), + MixedTensor::Complex(t) => t.external_structure(), + MixedTensor::Symbolic(t) => t.external_structure(), + } + } +} + +impl HasName for MixedTensor +where + T: HasName + TensorStructure, +{ + type Name = T::Name; + + fn name(&self) -> Option::Name>> { + match self { + MixedTensor::Float(t) => t.name(), + MixedTensor::Complex(t) => t.name(), + MixedTensor::Symbolic(t) => t.name(), + } + } + + fn set_name(&mut self, name: &Self::Name) { + match self { + MixedTensor::Float(t) => t.set_name(name), + MixedTensor::Complex(t) => t.set_name(name), + MixedTensor::Symbolic(t) => t.set_name(name), + } + } +} + +impl TracksCount for MixedTensor +where + T: TracksCount + TensorStructure, +{ + fn contractions_num(&self) -> usize { + match self { + MixedTensor::Float(t) => t.contractions_num(), + MixedTensor::Complex(t) => t.contractions_num(), + MixedTensor::Symbolic(t) => t.contractions_num(), + } + } +} + +pub type MixedTensors = MixedTensor>; + +impl From> for MixedTensor +where + I: TensorStructure, +{ + fn from(other: DenseTensor) -> Self { + MixedTensor::::Float(DataTensor::Dense(other)) + } +} + +impl From> for MixedTensor +where + I: TensorStructure, +{ + fn from(other: SparseTensor) -> Self { + MixedTensor::::Float(DataTensor::Sparse(other)) + } +} + +impl From, I>> for MixedTensor +where + I: TensorStructure, +{ + fn from(other: DenseTensor, I>) -> Self { + MixedTensor::::Complex(DataTensor::Dense(other)) + } +} + +impl From, I>> for MixedTensor +where + I: TensorStructure, +{ + fn from(other: SparseTensor, I>) -> Self { + MixedTensor::::Complex(DataTensor::Sparse(other)) + } +} + +impl From> for MixedTensor +where + I: TensorStructure, +{ + fn from(other: DenseTensor) -> Self { + MixedTensor::::Symbolic(DataTensor::Dense(other)) + } +} + +impl From> for MixedTensor +where + I: TensorStructure, +{ + fn from(other: SparseTensor) -> Self { + MixedTensor::::Symbolic(DataTensor::Sparse(other)) + } +} + +impl Contract> for MixedTensor +where + I: TensorStructure + Clone + StructureContract, +{ + type LCM = MixedTensor; + fn contract(&self, other: &MixedTensor) -> Option { + match (self, other) { + (MixedTensor::::Float(s), MixedTensor::::Float(o)) => { + Some(MixedTensor::::Float(s.contract(o)?)) + } + (MixedTensor::::Float(s), MixedTensor::::Complex(o)) => { + Some(MixedTensor::::Complex(s.contract(o)?)) + } + (MixedTensor::::Float(s), MixedTensor::::Symbolic(o)) => { + Some(MixedTensor::::Symbolic(s.contract(o)?)) + } + (MixedTensor::::Complex(s), MixedTensor::::Float(o)) => { + Some(MixedTensor::::Complex(s.contract(o)?)) + } + (MixedTensor::::Complex(s), MixedTensor::::Complex(o)) => { + Some(MixedTensor::::Complex(s.contract(o)?)) + } + (MixedTensor::::Complex(s), MixedTensor::::Symbolic(o)) => { + Some(MixedTensor::::Symbolic(s.contract(o)?)) + } + (MixedTensor::::Symbolic(s), MixedTensor::::Float(o)) => { + Some(MixedTensor::::Symbolic(s.contract(o)?)) + } + (MixedTensor::::Symbolic(s), MixedTensor::::Complex(o)) => { + Some(MixedTensor::::Symbolic(s.contract(o)?)) + } + (MixedTensor::::Symbolic(s), MixedTensor::::Symbolic(o)) => { + Some(MixedTensor::::Symbolic(s.contract(o)?)) + } + } + } +} diff --git a/src/tensor/structure.rs b/src/tensor/structure.rs new file mode 100644 index 00000000..3bd710d9 --- /dev/null +++ b/src/tensor/structure.rs @@ -0,0 +1,1731 @@ +use ahash::AHashMap; +use derive_more::Add; +use derive_more::AddAssign; +use derive_more::Display; +use derive_more::From; +use derive_more::Into; +use duplicate::duplicate; +use indexmap::IndexMap; +use serde::Deserialize; +use serde::Serialize; +use smartstring::LazyCompact; +use smartstring::SmartString; +use std::borrow::Cow; +use std::fmt::Debug; +use symbolica::representations::ListIterator; + +use std::i64; + +use std::ops::Range; + +use symbolica::coefficient::CoefficientView; + +use symbolica::representations::AtomView; + +use permutation::Permutation; + +use symbolica::representations::{AsAtomView, Atom, FunctionBuilder, Symbol}; +use symbolica::state::{State, Workspace}; + +use std::collections::HashSet; +use std::{cmp::Ordering, collections::HashMap}; + +use super::ufo; +use super::DenseTensor; +use super::MixedTensor; +use super::TensorStructureIndexIterator; +use smartstring::alias::String; +/// A type that represents the name of an index in a tensor. +#[derive( + Debug, + Copy, + Clone, + Ord, + PartialOrd, + Eq, + PartialEq, + Hash, + Serialize, + Deserialize, + From, + Into, + Display, + Add, + AddAssign, +)] +#[display(fmt = "id{}", _0)] +pub struct AbstractIndex(pub usize); + +/// A Dimension +#[derive( + Debug, + Copy, + Clone, + Ord, + PartialOrd, + Eq, + PartialEq, + Hash, + Serialize, + Deserialize, + From, + Into, + Add, + Display, +)] +#[into(owned, ref, ref_mut)] +#[display(fmt = "{}", _0)] +pub struct Dimension(pub usize); + +impl PartialEq for Dimension { + fn eq(&self, other: &usize) -> bool { + self.0 == *other + } +} + +impl PartialEq for usize { + fn eq(&self, other: &Dimension) -> bool { + *self == other.0 + } +} + +impl PartialOrd for Dimension { + fn partial_cmp(&self, other: &usize) -> Option { + self.0.partial_cmp(other) + } +} + +impl PartialOrd for usize { + fn partial_cmp(&self, other: &Dimension) -> Option { + self.partial_cmp(&other.0) + } +} + +/// A concrete index, i.e. the concrete usize/index of the corresponding abstract index + +pub type ConcreteIndex = usize; + +/// A Representation/Dimension of the index. +#[derive(PartialEq, Eq, Clone, Copy, Debug, Hash, PartialOrd, Ord, Serialize, Deserialize)] +pub enum Representation { + /// Represents a Euclidean space of the given dimension, with metric diag(1,1,1,1,...) + Euclidean(Dimension), + /// Represents a Minkowski space of the given dimension, with metric diag(1,-1,-1,-1,...) + Lorentz(Dimension), + /// Represents a Spinor space of the given dimension + Spin(Dimension), + /// Represents a Color Fundamental space of the given dimension + ColorFundamental(Dimension), + /// Represents a Color Anti-Fundamental space of the given dimension + ColorAntiFundamental(Dimension), + /// Represents a Color Adjoint space of the given dimension + ColorAdjoint(Dimension), + /// Represents a Color Sextet space of the given dimension + ColorSextet(Dimension), + /// Represents a Color Anti-Sextet space of the given dimension + ColorAntiSextet(Dimension), +} + +impl Representation { + #[inline] + // this could be implemented directly in the fiberiterator. + /// gives the vector of booleans, saying which concrete index along a Dimension/Abstract Index should have a minus sign during contraction. + /// + /// # Example + /// ``` + /// # use _gammaloop::tensor::Representation; + /// # use _gammaloop::tensor::Dimension; + /// let spin = Representation::Spin(Dimension(5)); + /// + /// let metric_diag = spin.negative(); + /// + /// let mut agree= true; + /// + /// for (i,r) in metric_diag.iter().enumerate(){ + /// if (r ^ spin.is_neg(i)) { + /// agree = false; + /// } + /// } + /// + /// assert!(agree); + /// ``` + #[must_use] + pub fn negative(&self) -> Vec { + match *self { + Self::Lorentz(value) => std::iter::once(false) + .chain(std::iter::repeat(true).take(value.0 - 1)) + .collect::>(), + Self::Euclidean(value) + | Self::Spin(value) + | Self::ColorAdjoint(value) + | Self::ColorFundamental(value) + | Self::ColorAntiFundamental(value) + | Self::ColorSextet(value) + | Self::ColorAntiSextet(value) => vec![false; value.into()], + } + } + + /// for the given concrete index, says whether it should have a minus sign during contraction + /// + /// for example see [`Self::negative`] + #[inline] + #[must_use] + pub const fn is_neg(&self, i: usize) -> bool { + match self { + Self::Lorentz(_) => i > 0, + _ => false, + } + } + + /// yields a function builder for the representation, adding a first variable: the dimension. + /// + /// for example see [`Slot::to_symbolic`] + #[allow(clippy::cast_possible_wrap)] + pub fn to_fnbuilder<'a, 'b: 'a>(&'a self) -> FunctionBuilder { + let (value, id) = match *self { + Self::Euclidean(value) => (value, State::get_or_insert_fn("euc", None)), + Self::Lorentz(value) => (value, State::get_or_insert_fn("lor", None)), + Self::Spin(value) => (value, State::get_or_insert_fn("spin", None)), + Self::ColorAdjoint(value) => (value, State::get_or_insert_fn("CAdj", None)), + Self::ColorFundamental(value) => (value, State::get_or_insert_fn("CF", None)), + Self::ColorAntiFundamental(value) => (value, State::get_or_insert_fn("CAF", None)), + Self::ColorSextet(value) => (value, State::get_or_insert_fn("CS", None)), + Self::ColorAntiSextet(value) => (value, State::get_or_insert_fn("CAS", None)), + }; + + let mut value_builder = FunctionBuilder::new(id.unwrap_or_else(|_| unreachable!())); + + value_builder = + value_builder.add_arg(Atom::new_num(usize::from(value) as i64).as_atom_view()); + + value_builder + } + + /// Finishes the function builder into an Atom + /// + /// # Example + /// + /// ``` + /// # use symbolica::state::{State, Workspace}; + /// # use _gammaloop::tensor::Representation; + /// # use _gammaloop::tensor::Dimension; + /// + /// let mink = Representation::Lorentz(Dimension(4)); + /// + /// assert_eq!("lor(4)",format!("{}",mink.to_symbolic())); + /// assert_eq!("l4",format!("{}",mink)); + /// ``` + pub fn to_symbolic(&self) -> Atom { + self.to_fnbuilder().finish() + } +} + +impl From for Representation { + fn from(value: Dimension) -> Self { + Self::Euclidean(value) + } +} + +impl From for Representation { + fn from(value: usize) -> Self { + Self::Euclidean(value.into()) + } +} + +impl<'a> std::iter::FromIterator<&'a Representation> for Vec { + fn from_iter>(iter: T) -> Self { + iter.into_iter() + .map(|&rep| -> Dimension { (&rep).into() }) + .collect() + } +} + +impl From<&Representation> for Dimension { + fn from(rep: &Representation) -> Self { + match rep { + Representation::Euclidean(value) + | Representation::Lorentz(value) + | Representation::Spin(value) + | Representation::ColorAdjoint(value) + | Representation::ColorFundamental(value) + | Representation::ColorAntiFundamental(value) + | Representation::ColorSextet(value) + | Representation::ColorAntiSextet(value) => *value, + } + } +} + +impl From<&Representation> for usize { + fn from(value: &Representation) -> Self { + usize::from(Dimension::from(value)) + } +} + +impl From for Dimension { + fn from(rep: Representation) -> Self { + match rep { + Representation::Euclidean(value) + | Representation::Lorentz(value) + | Representation::Spin(value) + | Representation::ColorAdjoint(value) + | Representation::ColorFundamental(value) + | Representation::ColorAntiFundamental(value) + | Representation::ColorSextet(value) + | Representation::ColorAntiSextet(value) => value, + } + } +} + +impl From for usize { + fn from(value: Representation) -> Self { + usize::from(Dimension::from(value)) + } +} + +impl std::fmt::Display for Representation { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + Self::Euclidean(value) => write!(f, "e{value}"), + Self::Lorentz(value) => write!(f, "l{value}"), + Self::Spin(value) => write!(f, "s{value}"), + Self::ColorAdjoint(value) => write!(f, "cad{value}"), + Self::ColorFundamental(value) => write!(f, "cf{value}"), + Self::ColorAntiFundamental(value) => write!(f, "caf{value}"), + Self::ColorSextet(value) => write!(f, "cs{value}"), + Self::ColorAntiSextet(value) => write!(f, "cas{value}"), + } + } +} + +/// A [`Slot`] is an index, identified by a `usize` and a [`Representation`]. +/// +/// A vector of slots thus identifies the shape and type of the tensor. +/// Two indices are considered matching if *both* the `usize` and the [`Representation`] matches. +/// +/// # Example +/// +/// It can be built from a tuple of `usize` and `Representation` +/// ``` +/// # use _gammaloop::tensor::{Representation,Slot,Dimension,AbstractIndex}; +/// let mink = Representation::Lorentz(Dimension(4)); +/// let mu = Slot::from((AbstractIndex(0),mink)); +/// +/// assert_eq!("id0l4",format!("{}",mu)); +/// ``` +/// +/// It can also be built from a tuple of `usize` and `usize`, where we default to `Representation::Euclidean` +/// ``` +/// # use _gammaloop::tensor::{Representation,Slot}; +/// let mu = Slot::from((0,4)); +/// assert_eq!("id0e4",format!("{}",mu)); +/// ``` +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq, Serialize, Deserialize)] +pub struct Slot { + index: AbstractIndex, + pub representation: Representation, +} + +/// Can possibly constuct a Slot from an `AtomView`, if it is of the form: (,) +/// +/// # Example +/// +/// ``` +/// +/// # use _gammaloop::tensor::{Representation,Slot,Dimension,AbstractIndex}; +/// # use symbolica::representations::AtomView; + +/// let mink = Representation::Lorentz(Dimension(4)); +/// let mu = Slot::from((AbstractIndex(0), mink)); +/// let atom = mu.to_symbolic(); +/// let slot = Slot::try_from(atom.as_view()).unwrap(); +/// assert_eq!(slot, mu); +/// ``` +impl TryFrom> for Slot { + type Error = &'static str; + + fn try_from(value: AtomView<'_>) -> Result { + fn extract_num(iter: &mut ListIterator) -> Result { + if let Some(a) = iter.next() { + if let AtomView::Num(n) = a { + if let CoefficientView::Natural(n, 1) = n.get_coeff_view() { + return Ok(n); + } + return Err("Argument is not a natural number"); + } + Err("Argument is not a number") + } else { + Err("No more arguments") + } + } + + let mut iter = if let AtomView::Fun(f) = value { + f.iter() + } else { + return Err("Not a slot, is composite"); + }; + + let dim: Dimension = usize::try_from(extract_num(&mut iter)?) + .or(Err("Dimension too large"))? + .into(); + let index: AbstractIndex = usize::try_from(extract_num(&mut iter)?) + .or(Err("Dimension too large"))? + .into(); + + if extract_num(&mut iter).is_ok() { + return Err("Too many arguments"); + } + + let euc = State::get_or_insert_fn("euc", None).unwrap(); + let lor = State::get_or_insert_fn("lor", None).unwrap(); + let spin = State::get_or_insert_fn("spin", None).unwrap(); + let cadj = State::get_or_insert_fn("CAdj", None).unwrap(); + let cf = State::get_or_insert_fn("CF", None).unwrap(); + let caf = State::get_or_insert_fn("CAF", None).unwrap(); + let cs = State::get_or_insert_fn("CS", None).unwrap(); + let cas = State::get_or_insert_fn("CAS", None).unwrap(); + + let representation = if let AtomView::Fun(f) = value { + let sym = f.get_symbol(); + match sym { + _ if sym == euc => Representation::Euclidean(dim), + _ if sym == lor => Representation::Lorentz(dim), + _ if sym == spin => Representation::Spin(dim), + _ if sym == cadj => Representation::ColorAdjoint(dim), + _ if sym == cf => Representation::ColorFundamental(dim), + _ if sym == caf => Representation::ColorAntiFundamental(dim), + _ if sym == cs => Representation::ColorSextet(dim), + _ if sym == cas => Representation::ColorAntiSextet(dim), + _ => return Err("Not a slot, isn't a representation"), + } + } else { + return Err("Not a slot, is composite"); + }; + + Ok(Slot { + index, + representation, + }) + } +} + +impl PartialOrd for Slot { + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } +} + +impl Ord for Slot { + fn cmp(&self, other: &Self) -> Ordering { + match self.representation.cmp(&other.representation) { + Ordering::Equal => self.index.cmp(&other.index), + other => other, + } + } +} + +impl From<(AbstractIndex, Representation)> for Slot { + fn from(value: (AbstractIndex, Representation)) -> Self { + Self { + index: value.0, + representation: value.1, + } + } +} + +impl From<(usize, usize)> for Slot { + fn from(value: (usize, usize)) -> Self { + Self { + index: value.0.into(), + representation: value.1.into(), + } + } +} + +#[allow(clippy::cast_possible_wrap)] +impl Slot { + /// using the function builder of the representation add the abstract index as an argument, and finish it to an Atom. + /// # Example + /// + /// ``` + /// # use symbolica::state::{State, Workspace}; + /// # use _gammaloop::tensor::{Representation,Slot,Dimension,AbstractIndex}; + /// let mink = Representation::Lorentz(Dimension(4)); + /// let mu = Slot::from((AbstractIndex(0),mink)); + /// + /// assert_eq!("lor(4,0)",format!("{}",mu.to_symbolic())); + /// assert_eq!("id0l4",format!("{}",mu)); + /// ``` + pub fn to_symbolic(&self) -> Atom { + let mut value_builder = self.representation.to_fnbuilder(); + value_builder = + value_builder.add_arg(Atom::new_num(usize::from(self.index) as i64).as_atom_view()); + value_builder.finish() + } +} + +impl std::fmt::Display for Slot { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "{}{}", self.index, self.representation) + } +} + +/// A trait for a any struct that functions as a tensor structure. +/// Only three methods are required to be implemented, the rest are default implementations. +/// +/// The associated type `Structure` is the type of the structure. This is usefull for containers of structures, like a datatensor. +/// The two methods `structure` and `mut_structure` are used to get a reference to the structure, and a mutable reference to the structure. +/// +pub trait TensorStructure { + type Structure; + /// returns the list of slots that are the external indices of the tensor + fn external_structure(&self) -> &[Slot]; + + fn structure(&self) -> &Self::Structure; + + fn mut_structure(&mut self) -> &mut Self::Structure; + + /// checks if the tensor has the same exact structure as another tensor + fn same_content(&self, other: &Self) -> bool { + self.same_external(other) + } + + /// Given two [`TensorStructure`]s, returns the index of the first matching slot in each external index list, along with a boolean indicating if there is a single match + fn match_index(&self, other: &Self) -> Option<(bool, usize, usize)> { + let posmap = self + .external_structure() + .iter() + .enumerate() + .map(|(i, slot)| (slot, i)) + .collect::>(); + + let mut first_pair: Option<(usize, usize)> = None; + + for (j, slot) in other.external_structure().iter().enumerate() { + if let Some(&i) = posmap.get(slot) { + if let Some((i, j)) = first_pair { + // Found a second match, return early with false indicating non-unique match + return Some((false, i, j)); + } + first_pair = Some((i, j)); + } + } + + first_pair.map(|(i, j)| (true, i, j)) // Maps the found pair to Some with true indicating a unique match, or None if no match was found + } + + /// Given two [`TensorStructure`]s, returns the index of the first matching slot in each external index list + fn match_indices(&self, other: &Self) -> Option<(Permutation, Vec, Vec)> { + let mut self_matches = vec![false; self.order()]; + let mut perm = Vec::new(); + let mut other_matches = vec![false; other.order()]; + + let posmap = self + .external_structure() + .iter() + .enumerate() + .map(|(i, slot)| (slot, i)) + .collect::>(); + + for (j, slot_other) in other.external_structure().iter().enumerate() { + if let Some(&i) = posmap.get(slot_other) { + self_matches[i] = true; + other_matches[j] = true; + perm.push(j); + } + } + + if perm.is_empty() { + None + } else { + let p: Permutation = permutation::sort(&mut perm); + Some((p, self_matches, other_matches)) + } + } + /// Identify the repeated slots in the external index list + fn traces(&self) -> Vec<[usize; 2]> { + let mut positions = HashMap::new(); + + // Track the positions of each element + for (index, &value) in self.external_structure().iter().enumerate() { + positions.entry(value).or_insert_with(Vec::new).push(index); + } + + // Collect only the positions of repeated elements + positions + .into_iter() + .filter_map(|(_, indices)| { + if indices.len() == 2 { + Some([indices[0], indices[1]]) + } else { + None + } + }) + .collect() + } + + /// yields the (outwards facing) shape of the tensor as a list of dimensions + fn shape(&self) -> Vec { + self.external_structure() + .iter() + .map(|slot| &slot.representation) + .collect() + } + + fn reps(&self) -> Vec { + self.external_structure() + .iter() + .map(|slot| slot.representation) + .collect() + } + + /// yields the order/total valence of the tensor, i.e. the number of indices + /// (or misnamed : rank) + fn order(&self) -> usize { + //total valence (or misnamed : rank) + self.external_structure().len() + } + + /// checks if externally, the two tensors are the same + fn same_external(&self, other: &Self) -> bool { + let set1: HashSet<_> = self.external_structure().iter().collect(); + let set2: HashSet<_> = other.external_structure().iter().collect(); + set1 == set2 + } + + /// find the permutation of the external indices that would make the two tensors the same + fn find_permutation(&self, other: &Self) -> Option> { + if self.external_structure().len() != other.external_structure().len() { + return None; + } + + let mut index_map = HashMap::new(); + for (i, item) in other.external_structure().iter().enumerate() { + index_map.entry(item).or_insert_with(Vec::new).push(i); + } + + let mut permutation = Vec::with_capacity(self.external_structure().len()); + let mut used_indices = HashSet::new(); + for item in self.external_structure() { + if let Some(indices) = index_map.get_mut(item) { + // Find an index that hasn't been used yet + if let Some(&index) = indices.iter().find(|&&i| !used_indices.contains(&i)) { + permutation.push(index); + used_indices.insert(index); + } else { + // No available index for this item + return None; + } + } else { + // Item not found in other + return None; + } + } + + Some(permutation) + } + + /// yields the strides of the tensor in column major order + fn strides_column_major(&self) -> Vec { + let mut strides: Vec = vec![1; self.order()]; + + if self.order() == 0 { + return strides; + } + + for i in 0..self.order() - 1 { + strides[i + 1] = strides[i] * usize::from(self.external_structure()[i].representation); + } + + strides + } + + /// yields the strides of the tensor in row major order + fn strides_row_major(&self) -> Vec { + let mut strides = vec![1; self.order()]; + if self.order() == 0 { + return strides; + } + + for i in (0..self.order() - 1).rev() { + strides[i] = + strides[i + 1] * usize::from(self.external_structure()[i + 1].representation); + } + + strides + } + + /// By default, the strides are row major + fn strides(&self) -> Vec { + self.strides_row_major() + } + + /// Verifies that the list of indices provided are valid for the tensor + /// + /// # Errors + /// + /// `Mismatched order` = if the length of the indices is different from the order of the tensor, + /// + /// `Index out of bounds` = if the index is out of bounds for the dimension of that index + /// + fn verify_indices(&self, indices: &[ConcreteIndex]) -> Result<(), String> { + if indices.len() != self.order() { + return Err("Mismatched order".into()); + } + + for (i, &dim_len) in self + .external_structure() + .iter() + .map(|slot| &slot.representation) + .enumerate() + { + if indices[i] >= usize::from(dim_len) { + return Err(format!( + "Index {} out of bounds for dimension {} of size {}", + indices[i], + i, + usize::from(dim_len) + ) + .into()); + } + } + Ok(()) + } + + /// yields the flat index of the tensor given a list of indices + /// + /// # Errors + /// + /// Same as [`Self::verify_indices`] + fn flat_index(&self, indices: &[ConcreteIndex]) -> Result { + let strides = self.strides(); + self.verify_indices(indices)?; + + let mut idx = 0; + for (i, &index) in indices.iter().enumerate() { + idx += index * strides[i]; + } + Ok(idx) + } + + /// yields the expanded index of the tensor given a flat index + /// + /// # Errors + /// + /// `Index out of bounds` = if the flat index is out of bounds for the tensor + fn expanded_index(&self, flat_index: usize) -> Result, String> { + let mut indices = vec![]; + let mut index = flat_index; + for &stride in &self.strides() { + indices.push(index / stride); + index %= stride; + } + if flat_index < self.size() { + Ok(indices) + } else { + Err(format!("Index {flat_index} out of bounds").into()) + } + } + + /// yields an iterator over the indices of the tensor + fn index_iter(&self) -> TensorStructureIndexIterator { + TensorStructureIndexIterator::new(self.external_structure()) + } + + /// if the tensor has no (external) indices, it is a scalar + fn is_scalar(&self) -> bool { + self.order() == 0 + } + + /// get the metric along the i-th index + fn get_ith_metric(&self, i: usize) -> Option> { + Some(self.external_structure().get(i)?.representation.negative()) + } + + /// yields the size of the tensor, i.e. the product of the dimensions. This is the length of the vector of the data in a dense tensor + fn size(&self) -> usize { + self.shape().iter().map(|x| usize::from(*x)).product() + } + + fn shadow_with(self, f_id: Symbol) -> DenseTensor + where + Self: std::marker::Sized, + Self::Structure: Clone, + { + let mut data = vec![]; + for index in self.index_iter() { + data.push(atomic_expanded_label_id(&index, f_id)); + } + + DenseTensor { + data, + structure: self.structure().clone(), + } + } + + fn to_explicit_rep(self, f_id: Symbol) -> MixedTensor + where + Self: std::marker::Sized, + Self::Structure: Clone + TensorStructure, + { + let id = State::get_or_insert_fn("id", None).unwrap(); + let gamma = State::get_or_insert_fn("γ", None).unwrap(); + let gamma5 = State::get_or_insert_fn("γ5", None).unwrap(); + let proj_m = State::get_or_insert_fn("ProjM", None).unwrap(); + let proj_p = State::get_or_insert_fn("ProjP", None).unwrap(); + let sigma = State::get_or_insert_fn("σ", None).unwrap(); + + match f_id { + _ if f_id == id => { + ufo::identity_data::(self.structure().clone()).into() + } + + _ if f_id == gamma => ufo::gamma_data(self.structure().clone()).into(), + _ if f_id == gamma5 => ufo::gamma5_data(self.structure().clone()).into(), + _ if f_id == proj_m => ufo::proj_m_data(self.structure().clone()).into(), + _ if f_id == proj_p => ufo::proj_p_data(self.structure().clone()).into(), + _ if f_id == sigma => ufo::sigma_data(self.structure().clone()).into(), + name => self.shadow_with(name).into(), + } + } +} + +impl<'a> TensorStructure for &'a [Slot] { + type Structure = &'a [Slot]; + + fn external_structure(&self) -> &[Slot] { + self + } + + fn structure(&self) -> &Self::Structure { + self + } + + fn mut_structure(&mut self) -> &mut Self::Structure { + self + } +} + +impl TensorStructure for Vec { + type Structure = Self; + + fn structure(&self) -> &Self::Structure { + self + } + fn mut_structure(&mut self) -> &mut Self::Structure { + self + } + fn external_structure(&self) -> &[Slot] { + self + } +} + +/// A trait for a structure that can be traced and merged, during a contraction. +pub trait StructureContract { + fn trace(&mut self, i: usize, j: usize); + + fn trace_out(&mut self); + + fn merge(&mut self, other: &Self) -> Option; + + #[must_use] + fn merge_at(&self, other: &Self, positions: (usize, usize)) -> Self; +} + +impl StructureContract for Vec { + fn trace(&mut self, i: usize, j: usize) { + if i < j { + self.trace(j, i); + return; + } + let a = self.remove(i); + let b = self.remove(j); + assert_eq!(a, b); + } + + fn trace_out(&mut self) { + let mut positions = IndexMap::new(); + + // Track the positions of each element + for (index, &value) in self.iter().enumerate() { + positions.entry(value).or_insert_with(Vec::new).push(index); + } + // Collect only the positions of non- repeated elements + + *self = positions + .into_iter() + .filter_map(|(value, indices)| { + if indices.len() == 1 { + Some(value) + } else { + None + } + }) + .collect(); + } + + fn merge(&mut self, other: &Self) -> Option { + let mut positions = IndexMap::new(); + let mut i = 0; + + self.retain(|x| { + let e = positions.get(x); + if e.is_some() { + return false; + } + positions.insert(*x, (Some(i), None)); + i += 1; + true + }); + + let mut first = true; + let mut first_other = 0; + + for (index, &value) in self.iter().enumerate() { + positions.entry(value).or_insert((Some(index), None)); + } + + for (index, &value) in other.iter().enumerate() { + let e = positions.get(&value); + if let Some((Some(selfi), None)) = e { + positions.insert(value, (Some(*selfi), Some(index))); + } else { + positions.insert(value, (None, Some(index))); + self.push(value); + } + } + + let mut i = 0; + + self.retain(|x| { + let pos = positions.get(x).unwrap(); + if pos.1.is_none() { + i += 1; + return true; + } + if pos.0.is_none() { + if first { + first = false; + first_other = i; + } + return true; + } + false + }); + + if first { + None + } else { + Some(first_other) + } + } + + fn merge_at(&self, other: &Self, positions: (usize, usize)) -> Self { + let mut slots_b = other.clone(); + let mut slots_a = self.clone(); + + slots_a.remove(positions.0); + slots_b.remove(positions.1); + + slots_a.append(&mut slots_b); + slots_a + } +} + +/// A trait for a structure that can be traced and merged, during a contraction, maybe using symbolic state and workspace. + +pub trait SymbolicStructureContract { + fn trace_sym(&mut self, i: usize, j: usize, state: &State, ws: &Workspace); + + fn trace_out_sym(&mut self, state: &State, ws: &Workspace); + + fn merge_sym(&mut self, other: &Self, state: &State, ws: &Workspace); + + #[must_use] + fn merge_at_sym( + &self, + other: &Self, + positions: (usize, usize), + state: &State, + ws: &Workspace, + ) -> Self; +} + +impl SymbolicStructureContract for T +where + T: StructureContract, +{ + fn trace_sym(&mut self, i: usize, j: usize, _state: &State, _ws: &Workspace) { + self.trace(i, j); + } + + fn trace_out_sym(&mut self, _state: &State, _ws: &Workspace) { + self.trace_out(); + } + + fn merge_sym(&mut self, other: &Self, _state: &State, _ws: &Workspace) { + self.merge(other); + } + + fn merge_at_sym( + &self, + other: &Self, + positions: (usize, usize), + _state: &State, + _ws: &Workspace, + ) -> Self { + self.merge_at(other, positions) + } +} + +#[derive(Clone, PartialEq, Eq, Debug, Serialize, Deserialize)] +pub struct VecStructure { + pub structure: Vec, +} + +impl FromIterator for VecStructure { + fn from_iter>(iter: T) -> Self { + Self { + structure: iter.into_iter().collect(), + } + } +} + +impl VecStructure { + pub fn new(structure: Vec) -> Self { + Self { structure } + } + + pub fn to_named(self, name: &str) -> NamedStructure { + NamedStructure::from_slots(self.structure, name) + } +} + +impl From for VecStructure { + fn from(structure: ContractionCountStructure) -> Self { + Self { + structure: structure.structure, + } + } +} + +impl From for ContractionCountStructure { + fn from(structure: VecStructure) -> Self { + Self { + structure: structure.structure, + contractions: 0, + } + } +} + +impl From> for VecStructure { + fn from(structure: Vec) -> Self { + Self { structure } + } +} + +impl From for Vec { + fn from(structure: VecStructure) -> Self { + structure.structure + } +} + +// const IDPRINTER: Lazy> = Lazy::new(|| BlockId::new(Alphabet::alphanumeric(), 1, 1)); + +impl std::fmt::Display for VecStructure { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + for (index, item) in self.structure.iter().enumerate() { + if index != 0 { + // To avoid a newline at the start + writeln!(f)?; + } + write!( + f, + "{:<3} ({})", + usize::from(item.index), + // IDPRINTER + // .encode_string(usize::from(item.index) as u64) + // .unwrap(), + item.representation + )?; + } + Ok(()) + } +} + +impl TensorStructure for VecStructure { + type Structure = VecStructure; + fn structure(&self) -> &Self::Structure { + self + } + fn mut_structure(&mut self) -> &mut Self::Structure { + self + } + fn external_structure(&self) -> &[Slot] { + &self.structure + } +} + +impl StructureContract for VecStructure { + fn merge(&mut self, other: &Self) -> Option { + self.structure.merge(&other.structure) + } + + fn trace_out(&mut self) { + self.structure.trace_out(); + } + + fn merge_at(&self, other: &Self, positions: (usize, usize)) -> Self { + Self { + structure: self.structure.merge_at(&other.structure, positions), + } + } + + fn trace(&mut self, i: usize, j: usize) { + self.structure.trace(i, j); + } +} + +/// A named structure is a structure with a global name, and a list of slots +/// +/// It is useful when you want to shadow tensors, to nest tensor network contraction operations. +#[derive(Clone, PartialEq, Eq, Debug, Serialize, Deserialize)] +pub struct NamedStructure { + pub structure: Vec, + pub global_name: Option>, +} + +impl NamedStructure { + /// Constructs a new [`NamedStructure`] from a list of tuples of indices and dimension (assumes they are all euclidean), along with a name + #[must_use] + pub fn from_integers(slots: &[(AbstractIndex, Dimension)], name: &str) -> Self { + let slots: Vec<(AbstractIndex, Representation)> = slots + .iter() + .map(|(index, dim)| (*index, Representation::Euclidean(*dim))) + .collect(); + Self::new(&slots, name) + } + /// Constructs a new [`NamedStructure`] from a list of tuples of indices and representations, along with a name + #[must_use] + pub fn new(slots: &[(AbstractIndex, Representation)], name: &str) -> Self { + let structure: Vec = slots + .iter() + .map(|(index, representation)| Slot::from((*index, *representation))) + .collect(); + + Self { + structure, + global_name: Some(name.into()), + } + } + + pub fn from_slots(slots: Vec, name: &str) -> Self { + Self { + structure: slots, + global_name: Some(name.into()), + } + } +} + +/// A trait for a structure that has a name +pub trait HasName { + type Name: Clone; + fn name(&self) -> Option>; + fn set_name(&mut self, name: &Self::Name); +} + +impl HasName for NamedStructure { + type Name = SmartString; + fn name(&self) -> Option> { + self.global_name.as_ref().map(Cow::Borrowed) + } + fn set_name(&mut self, name: &Self::Name) { + self.global_name = Some(name.clone()); + } +} + +impl TensorStructure for NamedStructure { + type Structure = Self; + fn structure(&self) -> &Self::Structure { + self + } + fn mut_structure(&mut self) -> &mut Self::Structure { + self + } + fn external_structure(&self) -> &[Slot] { + &self.structure + } +} + +impl StructureContract for NamedStructure { + fn merge(&mut self, other: &Self) -> Option { + self.structure.merge(&other.structure) + } + + fn trace_out(&mut self) { + self.structure.trace_out(); + } + + /// when merging two named structures, the global name is lost + fn merge_at(&self, other: &Self, positions: (usize, usize)) -> Self { + Self { + structure: self.structure.merge_at(&other.structure, positions), + global_name: None, + } + } + + fn trace(&mut self, i: usize, j: usize) { + self.structure.trace(i, j); + } +} + +/// A contraction count structure +/// +/// Useful for tensor network contraction algorithm. +#[derive(Clone, PartialEq, Eq, Debug, Serialize, Deserialize)] +pub struct ContractionCountStructure { + pub structure: Vec, + pub contractions: usize, +} + +impl FromIterator for ContractionCountStructure { + fn from_iter>(iter: T) -> Self { + Self { + structure: iter.into_iter().collect(), + contractions: 0, + } + } +} + +impl ContractionCountStructure { + /// Constructs a new [`ContractionCountStructure`] from a list of tuples of indices and dimension (assumes they are all euclidean), along with a name + #[must_use] + pub fn from_integers(slots: &[(AbstractIndex, Dimension)]) -> Self { + let slots: Vec<(AbstractIndex, Representation)> = slots + .iter() + .map(|(index, dim)| (*index, Representation::Euclidean(*dim))) + .collect(); + Self::new(&slots) + } + /// Constructs a new [`ContractionCountStructure`] from a list of tuples of indices and representations, along with a name + #[must_use] + pub fn new(slots: &[(AbstractIndex, Representation)]) -> Self { + let structure: Vec = slots + .iter() + .map(|(index, representation)| Slot::from((*index, *representation))) + .collect(); + + Self { + structure, + contractions: 0, + } + } + + pub fn from_slots(slots: Vec) -> Self { + Self { + structure: slots, + contractions: 0, + } + } +} + +pub trait TracksCount { + fn contractions_num(&self) -> usize; + + fn is_composite(&self) -> bool { + self.contractions_num() > 0 + } +} + +impl TracksCount for ContractionCountStructure { + fn contractions_num(&self) -> usize { + self.contractions + } +} + +impl TensorStructure for ContractionCountStructure { + type Structure = ContractionCountStructure; + fn structure(&self) -> &Self::Structure { + self + } + fn mut_structure(&mut self) -> &mut Self::Structure { + self + } + fn external_structure(&self) -> &[Slot] { + &self.structure + } +} + +impl StructureContract for ContractionCountStructure { + fn merge(&mut self, other: &Self) -> Option { + self.contractions += other.contractions + 1; + self.structure.merge(&other.structure) + } + + fn trace_out(&mut self) { + self.structure.trace_out(); + } + + fn merge_at(&self, other: &Self, positions: (usize, usize)) -> Self { + Self { + structure: self.structure.merge_at(&other.structure, positions), + contractions: self.contractions + other.contractions + 1, + } + } + + fn trace(&mut self, i: usize, j: usize) { + self.structure.trace(i, j); + } +} + +/// A structure to enable smart shadowing of tensors in a tensor network contraction algorithm. +#[derive(Clone, PartialEq, Eq, Debug, Serialize, Deserialize)] +pub struct SmartShadowStructure { + pub structure: Vec, + pub contractions: usize, + pub global_name: Option>, +} + +impl SmartShadowStructure { + /// Constructs a new [`SmartShadow`] from a list of tuples of indices and dimension (assumes they are all euclidean), along with a name + #[must_use] + pub fn from_integers(slots: &[(AbstractIndex, Dimension)], name: &str) -> Self { + let slots: Vec<(AbstractIndex, Representation)> = slots + .iter() + .map(|(index, dim)| (*index, Representation::Euclidean(*dim))) + .collect(); + Self::new(&slots, name) + } + /// Constructs a new [`SmartShadow`] from a list of tuples of indices and representations, along with a name + #[must_use] + pub fn new(slots: &[(AbstractIndex, Representation)], name: &str) -> Self { + let structure: Vec = slots + .iter() + .map(|(index, representation)| Slot::from((*index, *representation))) + .collect(); + + SmartShadowStructure { + structure, + contractions: 0, + global_name: Some(name.into()), + } + } +} + +impl HasName for SmartShadowStructure { + type Name = SmartString; + fn name(&self) -> Option>> { + self.global_name.as_ref().map(Cow::Borrowed) + } + fn set_name(&mut self, name: &SmartString) { + self.global_name = Some(name.clone()); + } +} + +impl TracksCount for SmartShadowStructure { + fn contractions_num(&self) -> usize { + self.contractions + } +} + +impl TensorStructure for SmartShadowStructure { + type Structure = SmartShadowStructure; + fn structure(&self) -> &Self::Structure { + self + } + fn mut_structure(&mut self) -> &mut Self::Structure { + self + } + fn external_structure(&self) -> &[Slot] { + &self.structure + } +} + +impl StructureContract for SmartShadowStructure { + fn merge(&mut self, other: &Self) -> Option { + self.contractions += other.contractions; + self.structure.merge(&other.structure) + } + + fn trace_out(&mut self) { + self.structure.trace_out(); + } + + fn merge_at(&self, other: &Self, positions: (usize, usize)) -> Self { + SmartShadowStructure { + structure: self.structure.merge_at(&other.structure, positions), + contractions: self.contractions + other.contractions, + global_name: None, + } + } + + fn trace(&mut self, i: usize, j: usize) { + self.structure.trace(i, j); + } +} + +/// A tracking structure +/// +/// It contains two vecs of [`Slot`]s, one for the internal structure, simply extended during each contraction, and one external, coresponding to all the free indices +/// +/// It enables keeping track of the contraction history of the tensor, mostly for debugging and display purposes. +/// A [`SymbolicTensor`] can also be used in this way, however it needs a symbolica state and workspace during contraction. +#[derive(Clone, PartialEq, Debug, Serialize, Deserialize)] +pub struct HistoryStructure { + internal: Vec, + pub external: Vec, + pub names: AHashMap, N>, //ideally this is a named partion.. maybe a btreemap, and the range is from previous to next + pub global_name: Option, +} + +impl HistoryStructure { + /// Constructs a new [`HistoryStructure`] from a list of tuples of indices and dimension (assumes they are all euclidean), along with a name + pub fn from_integers(slots: &[(AbstractIndex, Dimension)], name: N) -> Self + where + N: Clone, + { + let slots: Vec<(AbstractIndex, Representation)> = slots + .iter() + .map(|(index, dim)| (*index, Representation::Euclidean(*dim))) + .collect(); + Self::new(&slots, name) + } + /// Constructs a new [`HistoryStructure`] from a list of tuples of indices and representations, along with a name + pub fn new(slots: &[(AbstractIndex, Representation)], name: N) -> Self + where + N: Clone, + { + let structure: Vec = slots + .iter() + .map(|(index, representation)| Slot::from((*index, *representation))) + .collect(); + + let name_map = AHashMap::from([(0..structure.len(), name.clone())]); + + HistoryStructure { + internal: structure.clone(), + external: structure, + names: name_map, + global_name: Some(name), + } + } + + /// make the indices in the internal index list of self independent from the indices in the internal index list of other + /// This is done by shifting the indices in the internal index list of self by the the maximum index present. + pub fn independentize_internal(&mut self, other: &Self) { + let internal_set: HashSet = self + .internal + .clone() + .into_iter() + .filter(|s| self.external.contains(s)) + .collect(); + + let other_set: HashSet = other.internal.clone().into_iter().collect(); + + let mut replacement_value = internal_set + .union(&other_set) + .map(|s| s.index) + .max() + .unwrap_or(0.into()) + + 1.into(); + + for item in &mut self.internal { + if other_set.contains(item) { + item.index = replacement_value; + replacement_value += 1.into(); + } + } + } +} + +impl HasName for HistoryStructure +where + N: Clone, +{ + type Name = N; + fn name(&self) -> Option> { + self.global_name.as_ref().map(|name| Cow::Borrowed(name)) + } + fn set_name(&mut self, name: &N) { + self.global_name = Some(name.clone()); + } +} + +impl TracksCount for HistoryStructure { + /// Since each time we contract, we merge the name maps, the amount of contractions, is the size of the name map + /// This function returns the number of contractions thus computed + fn contractions_num(&self) -> usize { + self.names.len() + } +} + +impl TensorStructure for HistoryStructure { + type Structure = HistoryStructure; + + fn structure(&self) -> &Self::Structure { + self + } + + fn mut_structure(&mut self) -> &mut Self::Structure { + self + } + fn external_structure(&self) -> &[Slot] { + &self.external + } + /// checks if internally, the two tensors are the same. This implies that the external indices are the same + fn same_content(&self, other: &Self) -> bool { + let set1: HashSet<_> = self.internal.iter().collect(); + let set2: HashSet<_> = other.internal.iter().collect(); + set1 == set2 + // TODO: check names + } +} + +// impl TensorStructure for [Slot] { +// type Structure = [Slot]; + +// fn external_structure(&self) -> &[Slot] { +// self +// } +// } + +impl StructureContract for HistoryStructure +where + N: Clone, +{ + /// remove the repeated indices in the external index list + fn trace_out(&mut self) { + let mut positions = IndexMap::new(); + + // Track the positions of each element + for (index, &value) in self.external.iter().enumerate() { + positions.entry(value).or_insert_with(Vec::new).push(index); + } + // Collect only the positions of non- repeated elements + + self.external = positions + .into_iter() + .filter_map(|(value, indices)| { + if indices.len() == 1 { + Some(value) + } else { + None + } + }) + .collect(); + } + + /// remove the given indices from the external index list + fn trace(&mut self, i: usize, j: usize) { + if i < j { + self.trace(j, i); + return; + } + let a = self.external.remove(i); + let b = self.external.remove(j); + assert_eq!(a, b); + } + + /// essentially contract. + fn merge(&mut self, other: &Self) -> Option { + let shift = self.internal.len(); + for (range, name) in &other.names { + self.names + .insert((range.start + shift)..(range.end + shift), name.clone()); + } + self.trace_out(); + self.independentize_internal(other); + self.internal.append(&mut other.internal.clone()); + self.external.merge(&other.external) + } + + /// Merge two [`HistoryStructure`] at the given positions of the external index list. Ideally the internal index list should be independentized before merging + /// This is essentially a contraction of only one index. The name maps are merged, and shifted accordingly. The global name is lost, since the resulting tensor is composite + /// The global name can be set again with the [`Self::set_global_name`] function + fn merge_at(&self, other: &Self, positions: (usize, usize)) -> Self { + let mut slots_other = other.external.clone(); + let mut slots_self: Vec = self.external.clone(); + + slots_self.remove(positions.0); + slots_other.remove(positions.1); + + let mut slots_self_int = self.internal.clone(); + let mut slots_other_int = other.internal.clone(); + slots_self_int.append(&mut slots_other_int); + + let mut names = self.names.clone(); + let shift = self.internal.len(); + for (range, name) in &other.names { + names.insert((range.start + shift)..(range.end + shift), name.clone()); + } + slots_self.append(&mut slots_other); + HistoryStructure { + internal: slots_self_int, + external: slots_self, + names, + global_name: None, + } + } +} + +pub fn atomic_expanded_label( + indices: &[ConcreteIndex], + name: I, + _state: &mut State, + _ws: &Workspace, +) -> Atom { + let id = name.into_id(); + atomic_expanded_label_id(indices, id) +} + +pub fn atomic_flat_label(index: usize, name: I) -> Atom { + let id = name.into_id(); + atomic_flat_label_id(index, id) +} + +#[allow(clippy::cast_possible_wrap)] +pub fn atomic_flat_label_id(index: usize, id: Symbol) -> Atom { + let mut value_builder = FunctionBuilder::new(id); + value_builder = value_builder.add_arg(Atom::new_num(index as i64).as_atom_view()); + value_builder.finish() +} + +#[allow(clippy::cast_possible_wrap)] +pub fn atomic_expanded_label_id(indices: &[ConcreteIndex], id: Symbol) -> Atom { + let mut value_builder = FunctionBuilder::new(id); + for &index in indices { + value_builder = value_builder.add_arg(Atom::new_num(index as i64).as_atom_view()); + } + value_builder.finish() +} +pub trait IntoId { + fn into_id(self) -> Symbol; +} + +impl IntoId for SmartString { + fn into_id(self) -> Symbol { + State::get_or_insert_fn(self, None).unwrap() + } +} + +impl IntoId for Symbol { + fn into_id(self) -> Symbol { + self + } +} + +impl IntoId for &str { + fn into_id(self) -> Symbol { + State::get_or_insert_fn(self, None).unwrap() + } +} + +impl IntoId for std::string::String { + fn into_id(self) -> Symbol { + State::get_or_insert_fn(self, None).unwrap() + } +} + +/// Trait that enables shadowing of a tensor +/// +/// This creates a dense tensor of atoms, where the atoms are the expanded indices of the tensor, with the global name as the name of the labels. +pub trait Shadowable: TensorStructure { + type Name: IntoId + Clone; + fn shadow(self) -> Option> + where + Self: std::marker::Sized + HasName::Name>, + Self::Structure: Clone, + { + let name = self.name()?.into_owned(); + + Some(self.shadow_with(name.into_id())) + } + + fn smart_shadow(self) -> Option> + where + Self: std::marker::Sized + HasName::Name>, + Self::Structure: Clone + TensorStructure, + { + let name = self.name()?.into_owned(); + Some(self.to_explicit_rep(name.into_id())) + } + + fn to_symbolic(&self) -> Option + where + Self: HasName::Name>, + { + Some(self.to_symbolic_with(self.name()?.into_owned())) + } + + fn to_symbolic_with(&self, name: Self::Name) -> Atom { + let atoms = self + .external_structure() + .iter() + .map(|slot| slot.to_symbolic()) + .collect::>(); + + let mut value_builder = FunctionBuilder::new(name.into_id()); + for atom in atoms { + value_builder = value_builder.add_arg(atom.as_atom_view()); + } + value_builder.finish() + } +} + +impl Shadowable for N +where + N: TensorStructure + HasName, + N::Name: IntoId + Clone, +{ + type Name = N::Name; +} + +duplicate! {[ + N; +[HistoryStructure]; +[HistoryStructure>]; +] +impl std::fmt::Display for N +{ + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + let mut string = String::new(); + if let Some(global_name) = self.name() { + string.push_str(&format!("{global_name}:")); + } + for (range, name) in self + .names + .iter() + .filter(|(r, _)| *r != &(0..self.internal.len()) || !self.is_composite()) + { + string.push_str(&format!("{name}(")); + for slot in &self.internal[range.clone()] { + string.push_str(&format!("{slot},")); + } + string.pop(); + string.push(')'); + } + write!(f, "{string}") + } +} +} +impl HistoryStructure { + #[must_use] + pub fn to_string(&self, _state: &State) -> String { + let mut string = String::new(); + if let Some(global_name) = self.name() { + string.push_str(&format!("{:?}:", global_name)); + } + for (range, name) in self + .names + .iter() + .filter(|(r, _)| *r != &(0..self.internal.len()) || !self.is_composite()) + { + string.push_str(&format!("{:?}(", name)); + for slot in &self.internal[range.clone()] { + string.push_str(&format!("{slot},")); + } + string.pop(); + string.push(')'); + } + string + } +} diff --git a/src/tensor/symbolic.rs b/src/tensor/symbolic.rs new file mode 100644 index 00000000..085f3700 --- /dev/null +++ b/src/tensor/symbolic.rs @@ -0,0 +1,157 @@ +use super::{ + Contract, HasName, IntoId, MixedTensor, Shadowable, Slot, StructureContract, TensorNetwork, + TensorStructure, VecStructure, +}; + +use symbolica::representations::{Atom, AtomView, Symbol}; + +/// A fully symbolic tensor, with no concrete values. +/// +/// This tensor is used to represent the structure of a tensor, and is used to perform symbolic contraction. +/// Currently contraction is just a multiplication of the atoms, but in the future this will ensure that internal indices are independent accross the contraction. +/// +/// Additionally, this can also be used as a tensor structure, that tracks the history, much like [`HistoryStructure`]. +#[derive(Debug)] +pub struct SymbolicTensor { + structure: VecStructure, + expression: symbolica::representations::Atom, +} + +impl TensorStructure for SymbolicTensor { + type Structure = VecStructure; + + fn structure(&self) -> &Self::Structure { + self.structure.structure() + } + + fn mut_structure(&mut self) -> &mut Self::Structure { + self.structure.mut_structure() + } + fn external_structure(&self) -> &[Slot] { + self.structure.external_structure() + } +} + +impl StructureContract for SymbolicTensor { + fn merge_at(&self, other: &Self, positions: (usize, usize)) -> Self { + let structure = self.structure.merge_at(&other.structure, positions); + // let mut out: Atom = Atom::new(); + // other.expression.mul(state, ws, &self.expression, &mut out); + + SymbolicTensor { + structure, + expression: &other.expression * &self.expression, + } + } + + fn merge(&mut self, other: &Self) -> Option { + self.expression = &other.expression * &self.expression; + self.structure.merge(&other.structure) + } + + fn trace_out(&mut self) { + self.structure.trace_out(); + } + + fn trace(&mut self, i: usize, j: usize) { + self.structure.trace(i, j); + } +} + +impl SymbolicTensor { + pub fn from_named(structure: &N) -> Option + where + N: TensorStructure + HasName, + N::Name: IntoId + Clone, + { + Some(SymbolicTensor { + expression: structure.to_symbolic()?, + structure: structure.external_structure().to_vec().into(), + }) + } + + #[must_use] + pub fn get_atom(&self) -> &Atom { + &self.expression + } + + pub fn to_mixed(self) -> MixedTensor { + self.smart_shadow().unwrap() + } + + pub fn to_network(self) -> Result>, &'static str> { + let mut network: TensorNetwork> = TensorNetwork::new(); + + if let AtomView::Mul(m) = self.expression.as_view() { + for atom in m.iter() { + if let AtomView::Fun(f) = atom { + let mut structure: Vec = vec![]; + let f_id = f.get_symbol(); + + for arg in f.iter() { + structure.push(arg.try_into()?); + } + let s: VecStructure = structure.into(); + network.push(s.to_explicit_rep(f_id)); + } + } + } + + Ok(network) + } +} + +impl TryFrom> for VecStructure { + type Error = &'static str; + fn try_from(value: AtomView) -> Result { + let mut structure: Vec = vec![]; + if let AtomView::Fun(f) = value { + for arg in f.iter() { + structure.push(arg.try_into()?); + } + } + Ok(structure.into()) + } +} + +impl TryFrom for SymbolicTensor { + type Error = &'static str; + fn try_from(value: Atom) -> Result { + Ok(SymbolicTensor { + structure: value.as_view().try_into()?, + expression: value, + }) + } +} + +impl HasName for SymbolicTensor { + type Name = Symbol; + fn name(&self) -> Option> { + if let AtomView::Fun(f) = self.expression.as_view() { + Some(std::borrow::Cow::Owned(f.get_symbol())) + } else { + None + } + } + + fn set_name(&mut self, _name: &Self::Name) { + unimplemented!("Cannot set name of a symbolic tensor") + } +} + +/// Symbolic contraction of two symbolic tensors is just a multiplication of the atoms. +/// +impl Contract for SymbolicTensor { + type LCM = SymbolicTensor; + fn contract(&self, other: &SymbolicTensor) -> Option { + let mut new_structure = self.structure.clone(); + + let expression = &other.expression * &self.expression; + println!("expression: {}", expression); + new_structure.merge(&other.structure); + Some(SymbolicTensor { + expression, + structure: new_structure, + }) + } +} diff --git a/src/tensor/tests.rs b/src/tensor/tests.rs index 54ca6dcb..3c4a7a7f 100644 --- a/src/tensor/tests.rs +++ b/src/tensor/tests.rs @@ -1,92 +1,886 @@ -use std::collections::BTreeMap; +use crate::tensor::{ + ufo::mink_four_vector, Contract, DenseTensor, FallibleAddAssign, FallibleMul, FallibleSub, + GetTensorData, HasTensorData, MixedTensor, Representation, SparseTensor, StructureContract, + TensorStructure, +}; +use ahash::{HashMap, HashMapExt}; -use crate::tensor::{SparseTensor, VecSlotExt}; +use indexmap::{IndexMap, IndexSet}; +use rand::{distributions::Uniform, Rng, SeedableRng}; +use rand_xoshiro::Xoroshiro64Star; +use smartstring::alias::String; +use symbolica::domains::float::Complex; +use symbolica::{ + representations::Atom, + state::{State, Workspace}, +}; -use super::{DenseTensor, HasTensorStructure, TensorStructure}; +use super::{ + symbolic::SymbolicTensor, ufo, AbstractIndex, DataTensor, Dimension, HistoryStructure, + NamedStructure, NumTensor, SetTensorData, Shadowable, Slot, TensorNetwork, TryIntoUpgrade, + VecStructure, +}; + +trait Average { + fn mean(a: Self, b: Self) -> Self; +} + +fn test_tensor(structure: S, seed: u64, range: Option<(D, D)>) -> SparseTensor +where + S: TensorStructure, + D: rand::distributions::uniform::SampleUniform, + Uniform: Copy, + + rand::distributions::Standard: rand::distributions::Distribution, +{ + let mut rng: Xoroshiro64Star = Xoroshiro64Star::seed_from_u64(seed); + + let mut tensor = SparseTensor::empty(structure); + + let density = rng.gen_range(0..tensor.size()); + + if let Some((low, high)) = range { + let multipliable = Uniform::new(low, high); + for _ in 0..density { + tensor + .set_flat(rng.gen_range(0..tensor.size()), rng.sample(multipliable)) + .unwrap(); + } + } else { + for _ in 0..density { + tensor + .set_flat(rng.gen_range(0..tensor.size()), rng.gen()) + .unwrap(); + } + } + + tensor +} + +fn test_structure(length: usize, seed: u64) -> VecStructure { + let mut rng = Xoroshiro64Star::seed_from_u64(seed); + let mut s = IndexSet::new(); + + let rank = length; + while s.len() < rank { + let rep = rng.gen_range(0..=1); + let dim = Dimension(rng.gen_range(1..=9)); + let id = AbstractIndex(rng.gen_range(0..256)); + let rep = match rep { + 0 => Representation::Euclidean(dim), + _ => Representation::Lorentz(dim), + }; + + s.insert((id, rep).into()); + } + + s.into_iter().collect() +} + +fn test_structure_with_dims(dims: &[usize], seed: u64) -> VecStructure { + let mut s = IndexSet::new(); + let mut rng = Xoroshiro64Star::seed_from_u64(seed); + + for d in dims { + loop { + let dim: Dimension = (*d).into(); + let rep = rng.gen_range(0..=1); + let id = AbstractIndex(rng.gen_range(0..256)); + + let rep = match rep { + 0 => Representation::Euclidean(dim), + _ => Representation::Lorentz(dim), + }; + + if s.insert((id, rep).into()) { + break; + } + } + } + + s.into_iter().collect() +} #[test] -fn indexflatten() { - let a = TensorStructure::from_integers(&[1, 2, 3], &[3, 4, 5]); +fn rng_is_deterministic() { + let valid = IndexMap::from([ + (vec![3, 0, 3], 53), + (vec![1, 1, 0], 45), + (vec![1, 1, 2], -99), + (vec![2, 1, 0], -59), + (vec![0, 1, 1], -93), + (vec![2, 0, 1], 105), + (vec![4, 1, 0], 125), + (vec![1, 0, 0], -118), + (vec![0, 0, 3], -26), + (vec![4, 0, 0], 59), + (vec![3, 1, 2], 84), + (vec![3, 1, 0], -13), + (vec![1, 0, 3], 119), + (vec![0, 1, 2], 48), + (vec![1, 0, 2], 17), + (vec![0, 0, 0], 34), + (vec![3, 0, 2], 20), + (vec![4, 0, 2], -3), + (vec![3, 1, 3], 69), + (vec![4, 0, 1], 125), + ]); + for _ in 0..10 { + let a = test_structure(3, 11); + + let a: SparseTensor = test_tensor(a, 1, None); + + assert_eq!(a.hashmap(), valid); + } +} - let idx = vec![1, 2, 3]; +#[test] +fn indexflatten() { + let a = test_structure(4, 31); + let idx = vec![1, 2, 3, 1]; let flatidx = a.flat_index(&idx).unwrap(); - println!("{:?}", a.strides()); + assert_eq!(idx, a.expanded_index(flatidx).unwrap()); +} + +#[test] +fn trace() { + let structura = + HistoryStructure::from_integers(&[(1, 5), (1, 5)].map(|(a, d)| (a.into(), d.into())), "a"); + let a = test_tensor::(structura, 3, None); + let f = a.internal_contract(); - println!("{}", flatidx); - println!("{:?}", a.expanded_index(flatidx).unwrap()); + assert!(f.is_scalar()); + assert_eq!(f.data(), vec![79]); } #[test] fn construct_dense_tensor() { - let a = TensorStructure::from_integers(&[1, 2, 3], &[2, 3, 4]); - + let a = test_structure(4, 32); let data = vec![1.0; a.size()]; - let a = super::DenseTensor::from_data(&data, a).unwrap(); - println!("{:?}", a); + let tensor = super::DenseTensor::from_data(&data, a).unwrap(); + let num_tensor: NumTensor = tensor.clone().into(); + let data_tensor: DataTensor = tensor.clone().into(); + let mixed_tensor: MixedTensor<_> = tensor.clone().into(); + + assert_eq!(mixed_tensor.try_as_float().unwrap().data(), data); + assert_eq!(data_tensor.data(), data); + assert_eq!(num_tensor.try_as_float().unwrap().data(), data); } #[test] fn construct_sparse_tensor() -> Result<(), String> { - let structure = TensorStructure::from_integers(&[1, 2, 3], &[2, 3, 4]); - - let mut a: SparseTensor = SparseTensor::empty(structure); - a.set(&[1, 2, 1], 1)?; - a.set(&[0, 2, 3], 2)?; - a.set(&[1, 2, 3], 3)?; - a.set(&[1, 0, 3], 4)?; - println!("{:?}", a); + let structure = test_structure(3, 11); + println!("{:?}", structure); + + let mut a = SparseTensor::empty(structure); + a.set(&[1, 0, 1], 1.)?; + a.set(&[0, 0, 2], 2.)?; + a.set(&[1, 1, 2], 3.)?; + a.set(&[1, 0, 2], 4.)?; + + let num_tensor: NumTensor = a.clone().into(); + let data_tensor: DataTensor = a.clone().into(); + let mixed_tensor: MixedTensor<_> = a.clone().into(); + + assert_eq!( + num_tensor.try_as_float().unwrap().hashmap(), + data_tensor.hashmap() + ); + assert_eq!(data_tensor.hashmap(), a.hashmap()); + assert_eq!(mixed_tensor.try_as_float().unwrap().hashmap(), a.hashmap()); + Ok(()) } #[test] -fn dense_tensor_shape() { - let a = TensorStructure::from_integers(&[1, 2, 3], &[2, 3, 4]); +fn tensor_structure_forwarding() { + let a = test_structure(6, 1); + let range = Some((-1000, 1000)); - let data = vec![1.0; a.size()]; - let a = super::DenseTensor::from_data(&data, a).unwrap(); - assert_eq!(a.shape(), vec![2, 3, 4]); + let sparse: SparseTensor = test_tensor(a.clone(), 1, range); + let dense: DenseTensor = test_tensor(a.clone(), 2, range).to_dense(); + + assert_eq!(a.strides(), sparse.strides()); + assert_eq!(dense.reps(), a.reps()); } #[test] -fn contract_densor() { - let structur_a = TensorStructure::from_integers(&[1, 3], &[2, 2]); - let structur_b = TensorStructure::from_integers(&[3, 4], &[2, 2]); +fn scalar_and_dim1_conract() { + let common = test_structure_with_dims(&[1, 3, 1, 2], 6); + let mut structa = test_structure(1, 32); + structa.merge(&common); + let mut structb = test_structure(1, 22); + structb.merge(&common); + let range = Some((-100, 100)); + + let mut tensor_1: SparseTensor = test_tensor(structa, 3, range); + tensor_1.set_flat(0, 45).unwrap(); + let mut tensor_2: SparseTensor = test_tensor(structb, 2, range); + tensor_2.set_flat(0, 2).unwrap(); + let f = tensor_1.contract(&tensor_2).unwrap(); + + let valid = IndexMap::from([ + (vec![0, 3], 5908), + (vec![2, 1], 2491), + (vec![3, 1], -1081), + (vec![0, 0], 90), + (vec![3, 0], -1200), + (vec![3, 3], -788), + (vec![1, 1], 2961), + (vec![2, 3], -4004), + (vec![2, 0], 160), + (vec![0, 1], -987), + ]); + + assert_eq!(f.hashmap(), valid); +} +#[test] +fn contract_with_rank_one_in_middle() { + let s = 12; + let common = test_structure(3, s); + let mut structa: VecStructure = test_structure(2, s + 1); + structa.merge(&common); + let mut structb: VecStructure = test_structure(1, s + 2); + structb.merge(&common); + + // println!("seed: {s}"); + + // println!("--"); + // println!("{structa}"); + // println!("--"); + // println!("{structb}"); + let range = Some((-1000, 1000)); + let tensor_a: SparseTensor = test_tensor(structa, s + 3, range); + let dense_a: DenseTensor = tensor_a.to_dense(); + let tensor_b: SparseTensor = test_tensor(structb, s + 4, range); + let dense_b: DenseTensor = tensor_b.to_dense(); + + let f = tensor_b.contract(&tensor_a).unwrap().to_dense(); + let g = dense_b.contract(&dense_a).unwrap(); + + assert_eq!(f.data, g.data); +} + +fn test_structure_with_id(ids: T, seed: u64) -> Vec +where + T: Iterator, +{ + let mut rng = Xoroshiro64Star::seed_from_u64(seed); + let mut s = Vec::new(); + + for id in ids { + let rep = rng.gen_range(0..=1); + let dim = Dimension(rng.gen_range(1..=9)); + let id = AbstractIndex(id); + let rep = match rep { + 0 => Representation::Euclidean(dim), + _ => Representation::Lorentz(dim), + }; + + s.push((id, rep).into()); + } + s +} + +#[test] +fn single_contract() { + let s = 18; + let range = Some((-1000, 1000)); + let common = test_structure_with_id(0..1, s); + let mut structa = test_structure_with_id(1..2, s); + let mut structb = test_structure_with_id(2..3, s); + let mut rng = Xoroshiro64Star::seed_from_u64(s); + + structa.insert(rng.gen_range(0..structa.len()), common[0]); + structb.insert(rng.gen_range(0..structb.len()), common[0]); + structa.sort(); + let structa: VecStructure = structa.into(); + let structb: VecStructure = structb.into(); + + let spensor_a: SparseTensor = test_tensor(structa.clone(), s + 3, range); + + let densor_a: DenseTensor = spensor_a.to_dense(); + // println!("A={:?}", densor_a); + + let spensor_b: SparseTensor = test_tensor(structb.clone(), s + 4, range); + let densor_b: DenseTensor = spensor_b.to_dense(); + // println!("B={:?}", densor_b); + + let dense_dense = densor_b.contract(&densor_a).unwrap(); + // println!("A*B {:?}", dense_dense); + let sparse_sparse = spensor_b.contract(&spensor_a).unwrap().to_dense(); + let dense_sparse = densor_b.contract(&spensor_a).unwrap(); + let sparse_dense = spensor_b.contract(&densor_a).unwrap(); + + assert_eq!( + dense_dense.data(), + sparse_sparse.data(), + "S-S not match at seed: {s}" + ); + assert_eq!( + dense_dense.data(), + dense_sparse.data(), + "D-S not match at seed: {s}" + ); + assert_eq!( + dense_dense.data(), + sparse_dense.data(), + "S-D not match at seed: {s}" + ); +} + +#[test] +fn all_single_contractions() { + let range = Some((-1000, 1000)); + + let mut dseq = vec![]; + let mut sseq = vec![]; + let mut sdeq = vec![]; + + for s in 0..1000 { + let common = test_structure_with_id(0..1, s); + let mut structa = test_structure_with_id(1..2, s); + let mut structb = test_structure_with_id(2..3, s); + let mut rng = Xoroshiro64Star::seed_from_u64(s); + + structa.insert(rng.gen_range(0..structa.len()), common[0]); + structb.insert(rng.gen_range(0..structb.len()), common[0]); + structa.sort(); + let structa: VecStructure = structa.into(); + let structb: VecStructure = structb.into(); + + let spensor_a: SparseTensor = test_tensor(structa.clone(), s + 3, range); + let densor_a: DenseTensor = spensor_a.to_dense(); + let spensor_b: SparseTensor = test_tensor(structb.clone(), s + 4, range); + let densor_b: DenseTensor = spensor_b.to_dense(); + + let dense_dense = densor_b.contract(&densor_a).unwrap(); + // println!("{}", dense_dense.structure()); + let sparse_sparse = spensor_b.contract(&spensor_a).unwrap().to_dense(); + let dense_sparse = densor_b.contract(&spensor_a).unwrap(); + let sparse_dense = spensor_b.contract(&densor_a).unwrap(); + + if dense_dense.data() != sparse_sparse.data() { + sseq.push(s); + } + if dense_dense.data() != dense_sparse.data() { + dseq.push(s); + } + if dense_dense.data() != sparse_dense.data() { + sdeq.push(s); + } + } + + assert_eq!(sseq.len(), 0, "Sparse-Sparse failed at seeds {sseq:?}"); + assert_eq!(dseq.len(), 0, "Dense-Sparse failed at seeds {dseq:?}"); + assert_eq!(sdeq.len(), 0, "Sparse-Dense failed at seeds {sdeq:?}"); +} + +#[test] +fn multi_contract() { + let range = Some((-1000, 1000)); + let s = 18; + let mut rng = Xoroshiro64Star::seed_from_u64(s); + let ncommon = rng.gen_range(2..5); + + let common = test_structure_with_id(0..ncommon, s); + let mut structa = test_structure_with_id(ncommon..ncommon + 1, s); + let mut structb = test_structure_with_id(ncommon + 1..ncommon + 2, s); + + for c in common { + structa.insert(rng.gen_range(0..structa.len()), c); + structb.insert(rng.gen_range(0..structb.len()), c); + } + structa.sort(); + let structa: VecStructure = structa.into(); + let structb: VecStructure = structb.into(); + + let spensor_a: SparseTensor = test_tensor(structa.clone(), s + 3, range); + let densor_a: DenseTensor = spensor_a.to_dense(); + let spensor_b: SparseTensor = test_tensor(structb.clone(), s + 4, range); + let densor_b: DenseTensor = spensor_b.to_dense(); + + let dense_dense = densor_b.contract(&densor_a).unwrap(); + // println!("{}", dense_dense.structure()); + let sparse_sparse = spensor_b.contract(&spensor_a).unwrap().to_dense(); + let dense_sparse = densor_b.contract(&spensor_a).unwrap(); + let sparse_dense = spensor_b.contract(&densor_a).unwrap(); + + assert_eq!( + dense_dense.data(), + sparse_sparse.data(), + "S-S not match at seed: {s}" + ); + assert_eq!( + dense_dense.data(), + dense_sparse.data(), + "D-S not match at seed: {s}" + ); + assert_eq!( + dense_dense.data(), + sparse_dense.data(), + "S-D not match at seed: {s}" + ); +} + +#[test] +fn all_multi_contractions() { + let _seeds = [48, 50, 118, 225, 234, 310]; + let range = Some((-1000, 1000)); + + let mut dseq = vec![]; + let mut sseq = vec![]; + let mut sdeq = vec![]; + for s in 0..1000 { + let mut rng = Xoroshiro64Star::seed_from_u64(s); + let ncommon = rng.gen_range(2..5); + + let common = test_structure_with_id(0..ncommon, s); + let mut structa = test_structure_with_id(ncommon..ncommon + 1, s); + let mut structb = test_structure_with_id(ncommon + 1..ncommon + 2, s); + + for c in common { + structa.insert(rng.gen_range(0..structa.len()), c); + structb.insert(rng.gen_range(0..structb.len()), c); + } + structa.sort(); + let structa: VecStructure = structa.into(); + let structb: VecStructure = structb.into(); + + let spensor_a: SparseTensor = test_tensor(structa.clone(), s + 3, range); + let densor_a: DenseTensor = spensor_a.to_dense(); + let spensor_b: SparseTensor = test_tensor(structb.clone(), s + 4, range); + let densor_b: DenseTensor = spensor_b.to_dense(); + + let dense_dense = densor_b.contract(&densor_a).unwrap(); + // println!("{}", dense_dense.structure()); + let sparse_sparse = spensor_b.contract(&spensor_a).unwrap().to_dense(); + let dense_sparse = densor_b.contract(&spensor_a).unwrap(); + let sparse_dense = spensor_b.contract(&densor_a).unwrap(); + + if dense_dense.data() != sparse_sparse.data() { + sseq.push(s); + } + if dense_dense.data() != dense_sparse.data() { + dseq.push(s); + } + if dense_dense.data() != sparse_dense.data() { + sdeq.push(s); + } + } + assert_eq!(sseq.len(), 0, "Sparse-Sparse failed at seeds {sseq:?}"); + assert_eq!(dseq.len(), 0, "Dense-Sparse failed at seeds {dseq:?}"); + assert_eq!(sdeq.len(), 0, "Sparse-Dense failed at seeds {sdeq:?}"); +} + +#[test] +fn gamma() { + let g1: SparseTensor> = ufo::gamma(0.into(), (0.into(), 1.into())); + let g2: SparseTensor> = ufo::gamma(1.into(), (1.into(), 2.into())); + let g3: SparseTensor> = ufo::gamma(2.into(), (2.into(), 0.into())); + + let c = g1.contract(&g2).unwrap().contract(&g3).unwrap(); + assert_eq!( + Vec::>::new(), + c.data(), + "Odd traces must vanish" + ); + + let d: SparseTensor> = + ufo::gamma(0.into(), (0.into(), 0.into())).internal_contract(); + + assert_eq!(Vec::>::new(), d.data(), "Gammas are traceless"); +} + +#[test] +fn matches() { + let structur_a = HistoryStructure::new( + &[ + (3.into(), Representation::Lorentz(2.into())), + (2.into(), Representation::Lorentz(3.into())), + (2.into(), Representation::Euclidean(2.into())), + (1.into(), Representation::Lorentz(2.into())), + ], + "a", + ); + let structur_b = HistoryStructure::new( + &[ + (1.into(), Representation::Lorentz(2.into())), + (3.into(), Representation::Lorentz(2.into())), + (2.into(), Representation::Lorentz(2.into())), + (1.into(), Representation::Euclidean(2.into())), + ], + "b", + ); + + let a = structur_a.match_index(&structur_b); + + assert_eq!(Some((false, 3, 0)), a); +} + +#[test] +fn mixed_tensor_contraction() { + let im = Complex::new(1.5, 1.25); + let data_a = [(vec![0, 0], 1.0), (vec![1, 1], 2.0)]; + + let structur_a = + HistoryStructure::from_integers(&[(2, 2), (1, 2)].map(|(a, d)| (a.into(), d.into())), "a"); + + let a = SparseTensor::from_data(&data_a, structur_a.clone()).unwrap(); + + let structur_b = + HistoryStructure::from_integers(&[(2, 2), (4, 2)].map(|(a, d)| (a.into(), d.into())), "b"); + + let b = DenseTensor::from_data( + &[ + im.mul_fallible(1.0).unwrap(), + 2.0.mul_fallible(im).unwrap(), + 3.0.mul_fallible(im).unwrap(), + 4.0.mul_fallible(im).unwrap(), + ], + structur_b.clone(), + ) + .unwrap(); + + let f = b.contract(&a).unwrap(); + + assert_eq!( + f.data, + [ + 1.0.mul_fallible(im).unwrap(), + 6.0.mul_fallible(im).unwrap(), + 2.0.mul_fallible(im).unwrap(), + 8.0.mul_fallible(im).unwrap() + ] + ); + + let data_a = [ + (vec![0, 0], 1.0.mul_fallible(im).unwrap()), + (vec![1, 1], 2.0.mul_fallible(im).unwrap()), + ]; + + let a = SparseTensor::from_data(&data_a, structur_a).unwrap(); - let a = DenseTensor::from_data(&[1.0, 2.0, 3.0, 4.0], structur_a).unwrap(); let b = DenseTensor::from_data(&[1.0, 2.0, 3.0, 4.0], structur_b).unwrap(); - let f = a.contract_with_dense(&b).unwrap(); - assert_eq!(f.data, [7.0, 10.0, 15.0, 22.0]); + + let f = a.contract(&b).unwrap(); + assert_eq!( + f.data, + [ + 1.0.mul_fallible(im).unwrap(), + 2.0.mul_fallible(im).unwrap(), + 6.0.mul_fallible(im).unwrap(), + 8.0.mul_fallible(im).unwrap() + ] + ); +} + +#[test] +fn tensor_net() { + let a: NumTensor = ufo::gamma(1.into(), (2.into(), 3.into())).into(); + let b: NumTensor = ufo::gamma(2.into(), (3.into(), 4.into())).into(); + let c: NumTensor = ufo::gamma(3.into(), (4.into(), 5.into())).into(); + let d: NumTensor = ufo::gamma(4.into(), (5.into(), 2.into())).into(); + let p: NumTensor = mink_four_vector(1.into(), &[2., 3., 2., 1.]).into(); + let q: NumTensor = mink_four_vector(2.into(), &[2., 3., 2., 1.]).into(); + let r: NumTensor = mink_four_vector(3.into(), &[2., 3., 2., 1.]).into(); + let s: NumTensor = mink_four_vector(4.into(), &[2., 3., 2., 1.]).into(); + + let mut n = TensorNetwork::from(vec![a, b, c, p, q, d, r, s]); + + assert!(n.graph.validate_neighbors()); + + // println!("{}", n.dot()); + + assert_eq!(16, n.graph.neighbors.len()); + + n.contract(); + + assert_eq!(0, n.graph.neighbors.len()); + assert_eq!( + Complex::new(-400., 0.), + n.result().try_as_complex().unwrap().data()[0] + ) } #[test] fn contract_spensor() { let data_a = [(vec![0, 0], 1.0), (vec![1, 1], 2.0)]; + let structur_a = + HistoryStructure::from_integers(&[(2, 2), (1, 2)].map(|(a, d)| (a.into(), d.into())), "a"); + + let a = SparseTensor::from_data(&data_a, structur_a).unwrap(); + + let data_b = [(vec![1, 0], 1.0), (vec![0, 1], 2.0)]; + let structur_b = + HistoryStructure::from_integers(&[(1, 2), (3, 2)].map(|(a, d)| (a.into(), d.into())), "b"); + + let b = SparseTensor::from_data(&data_b, structur_b).unwrap(); + + let f = a.contract(&b).unwrap(); + + let result = IndexMap::from([(vec![0, 1], 2.0), (vec![1, 0], 2.0)]); + + assert_eq!(f.hashmap(), result) +} + +#[test] +fn sparse_addition() { + let data_a = [(vec![1, 0], 1.0), (vec![0, 1], 2.0)]; + let structur_a = + HistoryStructure::from_integers(&[(2, 2), (1, 2)].map(|(a, d)| (a.into(), d.into())), "a"); - let a = SparseTensor::from_data(&data_a, &[2, 1]).unwrap(); + let a = SparseTensor::from_data(&data_a, structur_a).unwrap(); let data_b = [(vec![1, 0], 1.0), (vec![0, 1], 2.0)]; + let structur_b = + HistoryStructure::from_integers(&[(1, 2), (2, 2)].map(|(a, d)| (a.into(), d.into())), "b"); - let b = SparseTensor::from_data(&data_b, &[1, 3]).unwrap(); + let b = SparseTensor::from_data(&data_b, structur_b).unwrap(); - let f = a.contract_with_sparse(&b).unwrap(); + let f = a + b; - let result = BTreeMap::from([(vec![0, 1], 2.0), (vec![1, 0], 2.0)]); + let result = IndexMap::from([(vec![0, 1], 3.0), (vec![1, 0], 3.0)]); - assert_eq!(f.elements, result) + assert_eq!(f.hashmap(), result) +} + +#[test] +fn sparse_sub() { + let data_a = [(vec![1, 0], 1.0), (vec![0, 1], 2.0)]; + let structur_a = + HistoryStructure::from_integers(&[(2, 2), (1, 2)].map(|(a, d)| (a.into(), d.into())), "a"); + + let a = SparseTensor::from_data(&data_a, structur_a).unwrap(); + + let data_b = [(vec![1, 0], 1.0), (vec![0, 1], 3.0)]; + + let structur_b = + HistoryStructure::from_integers(&[(2, 2), (1, 2)].map(|(a, d)| (a.into(), d.into())), "a"); + + let b = SparseTensor::from_data(&data_b, structur_b).unwrap(); + + let f = a - b; + + let result = IndexMap::from([(vec![0, 1], -1.0), (vec![1, 0], 0.0)]); + assert_eq!(f.hashmap(), result); + // println!("{:?}", f); } #[test] fn contract_densor_with_spensor() { let data_a = [(vec![0, 0], 1.0), (vec![1, 1], 2.0)]; - let a = SparseTensor::from_data(&data_a, &[2, 1]).unwrap(); - println!("{:?}", a); + let structur_a = + HistoryStructure::from_integers(&[(2, 2), (1, 2)].map(|(a, d)| (a.into(), d.into())), "a"); + + let a = SparseTensor::from_data(&data_a, structur_a).unwrap(); let data_b = [1.0, 2.0, 3.0, 4.0]; - let structur_b = TensorStructure::from_integers(&[1, 4], &[2, 2]); + let structur_b = + HistoryStructure::from_integers(&[(1, 2), (4, 2)].map(|(a, d)| (a.into(), d.into())), "b"); + + let b = DenseTensor::from_data(&data_b, structur_b).unwrap(); + + let f = a.contract(&b).unwrap(); + + assert_eq!(f.data, [1.0, 2.0, 6.0, 8.0]); +} + +// #[test] +// fn symbolic_zeros() { +// let mut state = State::get_global_state().write().unwrap(); +// let ws = Workspace::new(); +// let structure = TensorSkeleton::from_integers(&[(1, 2), (3, 2)], "a"); + +// let sym_zeros = DenseTensor::symbolic_zeros(structure.clone()); + +// let zeros: DenseTensor = DenseTensor::default(structure); + +// assert_eq!(sym_zeros, zeros.to_symbolic(&ws, &mut state)); +// } + +#[test] +fn evaluate() { + let structure = test_structure(3, 1).to_named("a"); + + let a = structure.clone().shadow().unwrap(); + + let adata = test_tensor(structure, 1, Some((-100., 100.))).to_dense(); + + let mut const_map = HashMap::new(); + + a.append_const_map(&adata, &mut const_map); + let aev: DenseTensor = a.evaluate(&const_map); + + assert_eq!(aev.data(), adata.data()); +} + +#[test] +fn convert_sym() { + let _ws = Workspace::new(); + let i = Complex::new(0.0, 1.0); + let mut data_b = vec![i * Complex::from(5.0), Complex::from(2.6) + i]; + data_b.append( + &mut [3.34, -17.125, 5.0, 6.0] + .iter() + .map(|x| Complex::from(*x)) + .collect::>(), + ); + let structur_b = + HistoryStructure::from_integers(&[(1, 2), (4, 3)].map(|(a, d)| (a.into(), d.into())), "b"); let b = DenseTensor::from_data(&data_b, structur_b).unwrap(); - println!("{:?}", b); - let f = a.contract_with_dense(&b).unwrap(); - println!("{:?}", f); + + let symb: DenseTensor = b.try_into_upgrade().unwrap(); + + let expected_data: Vec = [ + "5*𝑖", + "𝑖+5854679515581645/2251799813685248", + "940126422213591/281474976710656", + "-137/8", + "5", + "6", + ] + .iter() + .map(|x| Atom::parse(x).unwrap()) + .collect(); + + assert_eq!( + symb.iter().map(|(_, x)| x.clone()).collect::>(), + expected_data + ); +} + +// #[test] +// fn symbolic_matrix_mult() { +// let mut state = State::get_global_state().write().unwrap(); +// let ws = Workspace::new(); + +// let structura = TensorStructure::from_integers(&[1, 4], &[2, 3]); +// let aatom = DenseTensor::symbolic_labels("a", structura, &ws, &mut state); +// let structurb = TensorStructure::from_integers(&[4, 1], &[3, 2]); +// let _batom = DenseTensor::symbolic_labels("b", structurb.clone(), &ws, &mut state); + +// let data_b = [1.5, 2.25, 3.5, -17.125, 5.0, 6.0]; +// let b = DenseTensor::from_data(&data_b, structurb).unwrap(); + +// let symb = b.to_symbolic(&ws, &mut state); + +// let f = aatom +// .builder(&state, &ws) +// .contract(&symb.builder(&state, &ws)); + +// assert_eq!( +// *f.unwrap().finish().get(&[]).unwrap(), +// Atom::parse( +// "3/2*a_0_0+7/2*a_0_1+5*a_0_2+9/4*a_1_0-137/8*a_1_1+6*a_1_2", +// &mut state, +// &ws +// ) +// .unwrap() +// ); /// let state = State: + +// // symb.contract_with_dense(&a); +// // let structurb = TensorStructure::from_integers(&[2, 4], &[2, 3]); +// // let b = DenseTensor::symbolic_labels("b", structurb, &ws, &mut state); +// } + +#[test] +fn empty_densor() { + let empty_structure = Vec::::new(); + + let empty: DenseTensor = DenseTensor::default(empty_structure.into()); + + assert_eq!(*empty.get(&[]).unwrap(), 0.0); +} + +#[test] +fn complex() { + let _structur = test_structure(2, 1); + + let _r = Complex::new(1.0, 2.0); + let _p = Complex::new(3.0, 4.0); +} + +#[test] +fn symbolic_contract() { + let structura = HistoryStructure::from_integers( + &[(1, 2), (4, 3)].map(|(a, d)| (a.into(), d.into())), + "T".to_string(), + ); + + let structurb = HistoryStructure::from_integers( + &[(3, 2), (2, 3)].map(|(a, d)| (a.into(), d.into())), + "P".to_string(), + ); + + let a = SymbolicTensor::from_named(&structura).unwrap(); + let b = SymbolicTensor::from_named(&structurb).unwrap(); + let f = a.contract(&b).unwrap(); + + assert_eq!( + *f.get_atom(), + Atom::parse("T(euc(2,1),euc(3,4))*P(euc(2,3),euc(3,2))").unwrap() + ); + + let a = f.to_network().unwrap(); + + // let syms = a.to_symbolic_tensor_vec(); + + // for s in syms { + // println!("{:?}", s.structure()); + // } + + println!("{}", a.dot()); +} + +#[test] +fn test_fallible_mul() { + let a: i32 = 4; + let b: f64 = 4.; + let mut c = a.mul_fallible(b).unwrap(); + c.add_assign_fallible(&a); + let d: Option = b.mul_fallible(&a); + let a: &i32 = &a; + let e: Option = a.mul_fallible(&b); + assert_eq!(c, 20.); + assert_eq!(d, Some(16.)); + assert_eq!(e, Some(16.)); + + let a = &Atom::parse("a(2)").unwrap(); + let b = &Atom::parse("b(1)").unwrap(); + + let mut f = a.mul_fallible(4.).unwrap(); + f.add_assign_fallible(b); + + let i = Atom::new_var(State::I); + + f.add_assign_fallible(&i); + + let function_map = HashMap::new(); + let mut cache = HashMap::new(); + + let mut const_map = HashMap::new(); + const_map.insert(i.as_view(), Complex::::new(0., 1.)); + + const_map.insert(a.as_view(), Complex::::new(3., 1.)); + + const_map.insert(b.as_view(), Complex::::new(3., 1.)); + + let ev = f.as_view().evaluate(&const_map, &function_map, &mut cache); + + println!("{}", ev); + // print!("{}", f.unwrap()); + + let g = Complex::new(0.1, 3.); + + let mut h = a.sub_fallible(g).unwrap(); + + h.add_assign_fallible(a); + let _f = a.mul_fallible(a); + + Atom::default(); + + println!("{}", h); } diff --git a/src/tensor/ufo.rs b/src/tensor/ufo.rs new file mode 100644 index 00000000..8634e124 --- /dev/null +++ b/src/tensor/ufo.rs @@ -0,0 +1,567 @@ +use super::{ + AbstractIndex, DenseTensor, HistoryStructure, IntoId, + Representation::{self, Euclidean, Lorentz}, + SetTensorData, Shadowable, Slot, SparseTensor, TensorStructure, +}; + +use num::{NumCast, One, Zero}; + +use symbolica::{ + domains::float::{Complex, Real}, + representations::{Atom, Symbol}, + state::State, +}; + +// pub fn init_state() { +// assert!(EUC == State::get_or_insert_fn("euc", None).unwrap()); +// assert!(LOR == State::get_or_insert_fn("lor", None).unwrap()); +// assert!(SPIN == State::get_or_insert_fn("spin", None).unwrap()); +// assert!(CADJ == State::get_or_insert_fn("CAdj", None).unwrap()); +// assert!(CF == State::get_or_insert_fn("CF", None).unwrap()); +// assert!(CAF == State::get_or_insert_fn("CAF", None).unwrap()); +// assert!(CS == State::get_or_insert_fn("CS", None).unwrap()); +// assert!(CAS == State::get_or_insert_fn("CAS", None).unwrap()); + +// assert!(ID == State::get_or_insert_fn("id", None).unwrap()); +// assert!(GAMMA == State::get_or_insert_fn("γ", None).unwrap()); +// assert!(GAMMA5 == State::get_or_insert_fn("γ5", None).unwrap()); +// assert!(PROJM == State::get_or_insert_fn("ProjM", None).unwrap()); +// assert!(PROJP == State::get_or_insert_fn("ProjP", None).unwrap()); +// assert!(SIGMA == State::get_or_insert_fn("σ", None).unwrap()); +// } + +#[allow(dead_code)] +#[must_use] +pub fn identity( + indices: (AbstractIndex, AbstractIndex), + signature: Representation, +) -> SparseTensor, I> +where + T: Real, + I: TensorStructure + FromIterator, +{ + //TODO: make it just swap indices + let structure = [(indices.0, signature), (indices.1, signature)] + .into_iter() + .map(Slot::from) + .collect(); + let mut identity = SparseTensor::empty(structure); + for i in 0..signature.into() { + identity + .set(&[i, i], Complex::::new(T::one(), T::zero())) + .unwrap_or_else(|_| unreachable!()); + } + identity +} + +/// Create a rank 2 identity tensor +/// +/// # Arguments +/// +/// * `structure` - The structure of the tensor +/// +/// # Panics +/// +/// * If the structure is not rank 2 +/// * If the structure has different indices + +pub fn identity_data(structure: N) -> SparseTensor +where + T: One, + N: TensorStructure, +{ + assert!(structure.order() == 2, "Identity tensor must be rank 2"); + + assert!( + structure.reps()[0] == structure.reps()[1], + "Identity tensor must have equal indices" + ); + + let mut identity = SparseTensor::empty(structure); + + for i in 0..identity.shape()[0].into() { + identity + .set(&[i, i], T::one()) + .unwrap_or_else(|_| unreachable!()); + } + identity +} + +#[allow(dead_code)] +#[must_use] +pub fn lorentz_identity( + indices: (AbstractIndex, AbstractIndex), +) -> SparseTensor, I> +where + T: One + Zero + Real, + I: TensorStructure + FromIterator, +{ + // IdentityL(1,2) (Lorentz) Kronecker delta δ^μ1_μ1 + let signature = Lorentz(4.into()); + identity(indices, signature) +} + +pub fn mink_four_vector(index: AbstractIndex, p: &[T; 4]) -> DenseTensor +where + T: Clone, + I: TensorStructure + FromIterator, +{ + DenseTensor::from_data( + p, + [Slot::from((index, Lorentz(4.into())))] + .into_iter() + .collect(), + ) + .unwrap_or_else(|_| unreachable!()) +} + +pub fn mink_four_vector_sym( + index: AbstractIndex, + p: &[T; 4], +) -> DenseTensor> +where + T: Clone, +{ + DenseTensor::from_data( + p, + HistoryStructure::new( + &[(index, Lorentz(4.into()))], + State::get_or_insert_fn("p", None).unwrap_or_else(|_| unreachable!()), + ), + ) + .unwrap_or_else(|_| unreachable!()) +} + +pub fn euclidean_four_vector(index: AbstractIndex, p: &[T; 4]) -> DenseTensor +where + T: Clone, + I: TensorStructure + FromIterator, +{ + DenseTensor::from_data( + p, + [Slot::from((index, Euclidean(4.into())))] + .into_iter() + .collect(), + ) + .unwrap_or_else(|_| unreachable!()) +} + +pub fn euclidean_four_vector_sym( + index: AbstractIndex, + p: &[T; 4], +) -> DenseTensor> +where + T: Clone, +{ + DenseTensor::from_data( + p, + HistoryStructure::new( + &[(index, Euclidean(4.into()))], + State::get_or_insert_fn("p", None).unwrap_or_else(|_| unreachable!()), + ), + ) + .unwrap_or_else(|_| unreachable!()) +} + +pub fn param_mink_four_vector( + index: AbstractIndex, + name: N, +) -> DenseTensor> +where + N: Clone + IntoId, +{ + HistoryStructure::new(&[(index, Lorentz(4.into()))], name) + .shadow() + .unwrap_or_else(|| unreachable!()) +} + +pub fn param_euclidean_four_vector( + index: AbstractIndex, + name: N, +) -> DenseTensor> +where + N: Clone + IntoId, +{ + HistoryStructure::new(&[(index, Euclidean(4.into()))], name) + .shadow() + .unwrap_or_else(|| unreachable!()) +} + +#[allow(dead_code)] +#[must_use] +pub fn euclidean_identity( + indices: (AbstractIndex, AbstractIndex), +) -> SparseTensor, I> +where + T: One + Zero + Real, + I: TensorStructure + FromIterator, +{ + // Identity(1,2) (Spinorial) Kronecker delta δ_s1_s2 + let signature = Euclidean(4.into()); + identity(indices, signature) +} + +#[allow(dead_code)] +pub fn gamma( + minkindex: AbstractIndex, + indices: (AbstractIndex, AbstractIndex), +) -> SparseTensor, I> +where + T: One + Zero + Copy + Real + std::ops::Neg + Real, + I: TensorStructure + FromIterator, +{ + // Gamma(1,2,3) Dirac matrix (γ^μ1)_s2_s3 + let structure = [ + (indices.0, Euclidean(4.into())), + (indices.1, Euclidean(4.into())), + (minkindex, Lorentz(4.into())), + ] + .into_iter() + .map(Slot::from) + .collect(); + + gamma_data(structure) +} + +pub fn gammasym( + minkindex: AbstractIndex, + indices: (AbstractIndex, AbstractIndex), +) -> SparseTensor, HistoryStructure> +where + T: One + Zero + Copy + Real + std::ops::Neg + Real, +{ + let structure = HistoryStructure::new( + &[ + (indices.0, Euclidean(4.into())), + (indices.1, Euclidean(4.into())), + (minkindex, Lorentz(4.into())), + ], + State::get_or_insert_fn("γ", None).unwrap_or_else(|_| unreachable!()), + ); + + gamma_data(structure) +} + +#[allow(clippy::similar_names)] +pub fn gamma_data(structure: N) -> SparseTensor, N> +where + T: Real, + N: TensorStructure, +{ + let c1 = Complex::::new(T::one(), T::zero()); + let cn1 = Complex::::new(-T::one(), T::zero()); + let ci = Complex::::new(T::zero(), T::one()); + let cni = Complex::::new(T::zero(), -T::one()); + let mut gamma = SparseTensor::empty(structure); + + // dirac gamma matrices + + gamma.set(&[0, 0, 0], c1).unwrap(); + gamma.set(&[1, 1, 0], c1).unwrap(); + gamma.set(&[2, 2, 0], cn1).unwrap(); + gamma.set(&[3, 3, 0], cn1).unwrap(); + + gamma.set(&[0, 3, 1], c1).unwrap(); + gamma.set(&[1, 2, 1], c1).unwrap(); + gamma.set(&[2, 1, 1], cn1).unwrap(); + gamma.set(&[3, 0, 1], cn1).unwrap(); + + gamma.set(&[0, 3, 2], cni).unwrap(); + gamma.set(&[1, 2, 2], ci).unwrap(); + gamma.set(&[2, 1, 2], ci).unwrap(); + gamma.set(&[3, 0, 2], cni).unwrap(); + + gamma.set(&[0, 2, 3], c1).unwrap(); + gamma.set(&[1, 3, 3], cn1).unwrap(); + gamma.set(&[2, 0, 3], cn1).unwrap(); + gamma.set(&[3, 1, 3], c1).unwrap(); + + gamma //.to_dense() +} + +pub fn gamma5(indices: (AbstractIndex, AbstractIndex)) -> SparseTensor, I> +where + T: One + Zero + Copy + Real, + I: TensorStructure + FromIterator, +{ + let structure = [ + (indices.0, Euclidean(4.into())), + (indices.1, Euclidean(4.into())), + ] + .into_iter() + .map(Slot::from) + .collect(); + + gamma5_data(structure) +} + +pub fn gamma5sym( + indices: (AbstractIndex, AbstractIndex), +) -> SparseTensor, HistoryStructure> +where + T: One + Zero + Copy + Real, +{ + let structure = HistoryStructure::new( + &[ + (indices.0, Euclidean(4.into())), + (indices.1, Euclidean(4.into())), + ], + State::get_or_insert_fn("γ5", None).unwrap_or_else(|_| unreachable!()), + ); + + gamma5_data(structure) +} + +pub fn gamma5_data(structure: N) -> SparseTensor, N> +where + T: Real, + N: TensorStructure, +{ + let c1 = Complex::::new(T::one(), T::zero()); + + let mut gamma5 = SparseTensor::empty(structure); + + gamma5.set(&[0, 2], c1).unwrap(); + gamma5.set(&[1, 3], c1).unwrap(); + gamma5.set(&[2, 0], c1).unwrap(); + gamma5.set(&[3, 1], c1).unwrap(); + + gamma5 +} + +pub fn proj_m(indices: (AbstractIndex, AbstractIndex)) -> SparseTensor, I> +where + T: Real + NumCast, + I: TensorStructure + FromIterator, +{ + // ProjM(1,2) Left chirality projector (( 1−γ5)/ 2 )_s1_s2 + let structure = [ + (indices.0, Euclidean(4.into())), + (indices.1, Euclidean(4.into())), + ] + .into_iter() + .map(Slot::from) + .collect(); + + proj_m_data(structure) +} + +pub fn proj_msym( + indices: (AbstractIndex, AbstractIndex), +) -> SparseTensor, HistoryStructure> +where + T: Real + NumCast, +{ + let structure = HistoryStructure::new( + &[ + (indices.0, Euclidean(4.into())), + (indices.1, Euclidean(4.into())), + ], + State::get_or_insert_fn("ProjM", None).unwrap_or_else(|_| unreachable!()), + ); + + proj_m_data(structure) +} + +#[allow(clippy::similar_names)] +pub fn proj_m_data(structure: N) -> SparseTensor, N> +where + T: Real + NumCast, + N: TensorStructure, +{ + // ProjM(1,2) Left chirality projector (( 1−γ5)/ 2 )_s1_s2 + let chalf = Complex::::new(T::from(0.5).unwrap(), T::zero()); + let cnhalf = Complex::::new(T::from(-0.5).unwrap(), T::zero()); + + let mut proj_m = SparseTensor::empty(structure); + + proj_m.set(&[0, 0], chalf).unwrap(); + proj_m.set(&[1, 1], chalf).unwrap(); + proj_m.set(&[2, 2], chalf).unwrap(); + proj_m.set(&[3, 3], chalf).unwrap(); + + proj_m.set(&[0, 2], cnhalf).unwrap(); + proj_m.set(&[1, 3], cnhalf).unwrap(); + proj_m.set(&[2, 0], cnhalf).unwrap(); + proj_m.set(&[3, 1], cnhalf).unwrap(); + + proj_m +} + +pub fn proj_p(indices: (AbstractIndex, AbstractIndex)) -> SparseTensor, I> +where + T: Real + NumCast, + I: TensorStructure + FromIterator, +{ + // ProjP(1,2) Right chirality projector (( 1+γ5)/ 2 )_s1_s2 + let structure = [ + (indices.0, Euclidean(4.into())), + (indices.1, Euclidean(4.into())), + ] + .into_iter() + .map(Slot::from) + .collect(); + + proj_p_data(structure) +} + +pub fn proj_psym( + indices: (AbstractIndex, AbstractIndex), +) -> SparseTensor, HistoryStructure> +where + T: Real + NumCast, +{ + let structure = HistoryStructure::new( + &[ + (indices.0, Euclidean(4.into())), + (indices.1, Euclidean(4.into())), + ], + State::get_or_insert_fn("ProjP", None).unwrap_or_else(|_| unreachable!()), + ); + + proj_p_data(structure) +} + +pub fn proj_p_data(structure: N) -> SparseTensor, N> +where + T: Real + NumCast, + N: TensorStructure, +{ + // ProjP(1,2) Right chirality projector (( 1+γ5)/ 2 )_s1_s2 + let chalf = Complex::::new(T::from(0.5).unwrap_or_else(|| unreachable!()), T::zero()); + + let mut proj_p = SparseTensor::empty(structure); + + proj_p + .set(&[0, 0], chalf) + .unwrap_or_else(|_| unreachable!()); + proj_p + .set(&[1, 1], chalf) + .unwrap_or_else(|_| unreachable!()); + proj_p + .set(&[2, 2], chalf) + .unwrap_or_else(|_| unreachable!()); + proj_p + .set(&[3, 3], chalf) + .unwrap_or_else(|_| unreachable!()); + + proj_p + .set(&[0, 2], chalf) + .unwrap_or_else(|_| unreachable!()); + proj_p + .set(&[1, 3], chalf) + .unwrap_or_else(|_| unreachable!()); + proj_p + .set(&[2, 0], chalf) + .unwrap_or_else(|_| unreachable!()); + proj_p + .set(&[3, 1], chalf) + .unwrap_or_else(|_| unreachable!()); + + proj_p +} + +pub fn sigma( + indices: (AbstractIndex, AbstractIndex), + minkdices: (AbstractIndex, AbstractIndex), +) -> SparseTensor, I> +where + T: Copy + Real, + I: TensorStructure + FromIterator, +{ + let structure = [ + (indices.0, Euclidean(4.into())), + (indices.1, Euclidean(4.into())), + (minkdices.0, Lorentz(4.into())), + (minkdices.1, Lorentz(4.into())), + ] + .into_iter() + .map(Slot::from) + .collect(); + + sigma_data(structure) +} + +pub fn sigmasym( + indices: (AbstractIndex, AbstractIndex), + minkdices: (AbstractIndex, AbstractIndex), +) -> SparseTensor, HistoryStructure> +where + T: Copy + Real, +{ + let structure = HistoryStructure::new( + &[ + (indices.0, Euclidean(4.into())), + (indices.1, Euclidean(4.into())), + (minkdices.0, Lorentz(4.into())), + (minkdices.1, Lorentz(4.into())), + ], + State::get_or_insert_fn("σ", None).unwrap_or_else(|_| unreachable!()), + ); + + sigma_data(structure) +} + +#[allow(clippy::similar_names)] +pub fn sigma_data(structure: N) -> SparseTensor, N> +where + T: Copy + Real, + N: TensorStructure, +{ + let c1 = Complex::::new(T::one(), T::zero()); + let cn1 = Complex::::new(-T::one(), T::zero()); + let ci = Complex::::new(T::zero(), T::one()); + let cni = Complex::::new(T::zero(), -T::one()); + + let mut sigma = SparseTensor::empty(structure); + sigma.set(&[0, 2, 0, 1], c1).unwrap(); + sigma.set(&[0, 2, 3, 0], c1).unwrap(); + sigma.set(&[0, 3, 1, 2], c1).unwrap(); + sigma.set(&[1, 0, 2, 2], c1).unwrap(); + sigma.set(&[1, 1, 1, 2], c1).unwrap(); + sigma.set(&[1, 3, 0, 2], c1).unwrap(); + sigma.set(&[2, 2, 1, 0], c1).unwrap(); + sigma.set(&[2, 2, 2, 1], c1).unwrap(); + sigma.set(&[2, 3, 3, 2], c1).unwrap(); + sigma.set(&[3, 0, 0, 2], c1).unwrap(); + sigma.set(&[3, 3, 2, 2], c1).unwrap(); + sigma.set(&[3, 1, 3, 2], c1).unwrap(); + sigma.set(&[0, 1, 3, 0], ci).unwrap(); + sigma.set(&[0, 3, 1, 1], ci).unwrap(); + sigma.set(&[0, 3, 2, 0], ci).unwrap(); + sigma.set(&[1, 0, 3, 3], ci).unwrap(); + sigma.set(&[1, 1, 0, 3], ci).unwrap(); + sigma.set(&[1, 1, 2, 0], ci).unwrap(); + sigma.set(&[2, 1, 1, 0], ci).unwrap(); + sigma.set(&[2, 3, 0, 0], ci).unwrap(); + sigma.set(&[2, 3, 3, 1], ci).unwrap(); + sigma.set(&[3, 0, 1, 3], ci).unwrap(); + sigma.set(&[3, 1, 0, 0], ci).unwrap(); + sigma.set(&[3, 1, 2, 3], ci).unwrap(); + sigma.set(&[0, 0, 3, 2], cn1).unwrap(); + sigma.set(&[0, 1, 0, 2], cn1).unwrap(); + sigma.set(&[0, 2, 1, 3], cn1).unwrap(); + sigma.set(&[1, 2, 0, 3], cn1).unwrap(); + sigma.set(&[1, 2, 1, 1], cn1).unwrap(); + sigma.set(&[1, 2, 2, 0], cn1).unwrap(); + sigma.set(&[2, 0, 1, 2], cn1).unwrap(); + sigma.set(&[2, 1, 2, 2], cn1).unwrap(); + sigma.set(&[2, 2, 3, 3], cn1).unwrap(); + sigma.set(&[3, 2, 0, 0], cn1).unwrap(); + sigma.set(&[3, 2, 2, 3], cn1).unwrap(); + sigma.set(&[3, 2, 3, 1], cn1).unwrap(); + sigma.set(&[0, 0, 2, 3], cni).unwrap(); + sigma.set(&[0, 0, 3, 1], cni).unwrap(); + sigma.set(&[0, 1, 1, 3], cni).unwrap(); + sigma.set(&[1, 0, 2, 1], cni).unwrap(); + sigma.set(&[1, 3, 0, 1], cni).unwrap(); + sigma.set(&[1, 3, 3, 0], cni).unwrap(); + sigma.set(&[2, 0, 0, 3], cni).unwrap(); + sigma.set(&[2, 0, 1, 1], cni).unwrap(); + sigma.set(&[2, 1, 3, 3], cni).unwrap(); + sigma.set(&[3, 0, 0, 1], cni).unwrap(); + sigma.set(&[3, 3, 1, 0], cni).unwrap(); + sigma.set(&[3, 3, 2, 1], cni).unwrap(); + + sigma +} diff --git a/src/tensor/upgrading_arithmetic.rs b/src/tensor/upgrading_arithmetic.rs new file mode 100644 index 00000000..350bf547 --- /dev/null +++ b/src/tensor/upgrading_arithmetic.rs @@ -0,0 +1,426 @@ +use std::borrow::Cow; +use std::ops::Mul; + +use duplicate::duplicate; + +use symbolica::domains::float::Complex; + +use symbolica::domains::float::Real; +use symbolica::representations::Atom; + +use symbolica::state::State; + +#[macro_export] +macro_rules! forward_ref_bino { + (impl $imp:ident, $method:ident for $t:ty, $u:ty,$out:ty) => { + impl<'a> $imp<$u> for &'a $t { + type Output = $out; + + #[inline] + fn $method(self, other: $u) -> Option { + $imp::$method(self, &other) + } + } + + impl $imp<&$u> for $t { + type Output = $out; + + #[inline] + fn $method(self, other: &$u) -> Option { + $imp::$method(&self, other) + } + } + + impl $imp<$u> for $t { + type Output = $out; + + #[inline] + fn $method(self, other: $u) -> Option { + $imp::$method(&self, &other) + } + } + }; +} +pub trait SmallestUpgrade { + type LCM; + fn upgrade(self) -> Self::LCM; +} + +pub trait TryFromUpgrade { + fn try_from_upgrade(value: T) -> Option + where + Self: Sized; +} + +pub trait TrySmallestUpgrade { + type LCM; + + fn try_upgrade(&self) -> Option> + where + Self::LCM: Clone; +} + +impl TryFromUpgrade for U +where + T: TrySmallestUpgrade, + U: Clone, +{ + fn try_from_upgrade(value: T) -> Option { + let cow = value.try_upgrade()?; + Some(cow.into_owned()) + } +} + +pub trait TryIntoUpgrade { + fn try_into_upgrade(self) -> Option; +} + +impl TryIntoUpgrade for T +where + U: TryFromUpgrade, +{ + fn try_into_upgrade(self) -> Option { + U::try_from_upgrade(self) + } +} + +// duplicate! { +// [ num; +// [f64] ; +// [i32] ;] + +// impl TrySmallestUpgrade for Complex { +// type LCM = Complex; + +// fn try_upgrade(&self) -> Option> +// where +// Self::LCM: Clone { +// Some(Cow::Borrowed(self)) +// } +// } + +// impl TrySmallestUpgrade> for num { +// type LCM = Complex; + +// fn try_upgrade(&self) -> Option> +// where +// Self::LCM: Clone { +// Some(Cow::Owned(Complex::from(*self))) +// } +// } +// } + +// impl TrySmallestUpgrade for T +// where +// T: Borrow, +// U: Borrow, +// { +// type LCM = T; + +// fn try_upgrade(&self) -> Option> +// where +// Self::LCM: Clone, +// { +// Some(Cow::Borrowed(self)) +// } +// } can't do this because of future impls GRR. + +duplicate! { + [smaller larger; + [i16] [i16]; + [i32] [i32]; + [f64] [f64]; + [Complex] [Complex]; + [f64] [Complex]; + [f64] [Atom]; + [Atom] [Atom]; + [Complex] [Atom]; + [f32] [f64]; + [i32] [f64];] + +impl TrySmallestUpgrade for larger { + type LCM = larger; + fn try_upgrade(&self) -> Option> + where + Self::LCM: Clone { + Some(Cow::Borrowed(self)) + } +} + +impl<'a> TrySmallestUpgrade<&'a smaller> for larger { + type LCM = larger; + fn try_upgrade(&self) -> Option> + where + Self::LCM: Clone { + Some(Cow::Borrowed(self)) + } +} + +impl<'a,'b> TrySmallestUpgrade<&'a smaller> for &'b larger { + type LCM = larger; + fn try_upgrade(&self) -> Option> + where + Self::LCM: Clone { + Some(Cow::Borrowed(self)) + } +} + +impl<'b> TrySmallestUpgrade for &'b larger { + type LCM = larger; + fn try_upgrade(&self) -> Option> + where + Self::LCM: Clone { + Some(Cow::Borrowed(self)) + } +} +} + +duplicate! { + [smaller larger; + [f32] [f64]; + [i32] [f64];] + +impl TrySmallestUpgrade for smaller { + type LCM = larger; + + + fn try_upgrade(&self) -> Option> + where + Self::LCM: Clone { + Some(Cow::Owned(larger::from(*self))) + } +} + +} + +impl TrySmallestUpgrade> for T +where + T: Real, +{ + type LCM = Complex; + + fn try_upgrade(&self) -> Option> + where + Self::LCM: Clone, + { + let new = Complex::new(*self, T::zero()); + Some(Cow::Owned(new)) + } +} + +impl TrySmallestUpgrade for f64 { + type LCM = Atom; + fn try_upgrade(&self) -> Option> + where + Self::LCM: Clone, + { + let rugrat = rug::Rational::from_f64(*self)?; + let natrat = symbolica::domains::rational::Rational::from_large(rugrat); + let symrat = Atom::new_num(symbolica::coefficient::Coefficient::from(natrat)); + + Some(Cow::Owned(symrat)) + } +} + +impl TrySmallestUpgrade for Complex { + type LCM = Atom; + fn try_upgrade(&self) -> Option> + where + Self::LCM: Clone, + { + let real: Cow<'_, Atom> = >::try_upgrade(&self.re)?; + let imag: Cow<'_, Atom> = >::try_upgrade(&self.im)?; + let i = Atom::new_var(State::I); + let symrat = (i * imag.as_ref()) + real.as_ref(); + + Some(Cow::Owned(symrat)) + } +} + +duplicate! { + [smaller larger; + [f64] [Atom]; + [Complex] [Atom]; + [f64][Complex]; + [f32] [f64]; + [i32] [f64];] +impl<'a> TrySmallestUpgrade<&'a larger> for smaller { + type LCM = larger; + fn try_upgrade(&self) -> Option> + where + Self::LCM: Clone { + >::try_upgrade(self) + } +} + +impl<'a,'b> TrySmallestUpgrade<&'a larger> for &'b smaller { + type LCM = larger; + fn try_upgrade(&self) -> Option> + where + Self::LCM: Clone { + >::try_upgrade(*self) + }} + +impl<'b> TrySmallestUpgrade for &'b smaller { + type LCM = larger; + fn try_upgrade(&self) -> Option> + where + Self::LCM: Clone { + >::try_upgrade(*self) + } +} + +} + +pub trait FallibleMul { + type Output; + fn mul_fallible(self, rhs: T) -> Option; +} +impl FallibleMul for U +where + U: TrySmallestUpgrade, + T: TrySmallestUpgrade>::LCM>, + U::LCM: Clone, + for<'a, 'b> &'a U::LCM: std::ops::Mul<&'b U::LCM, Output = U::LCM>, +{ + type Output = U::LCM; + + fn mul_fallible(self, rhs: T) -> Option { + let lhs = self.try_upgrade()?; + let rhs = rhs.try_upgrade()?; + Some(lhs.as_ref().mul(rhs.as_ref())) + } +} +pub trait FallibleAdd { + type Output; + fn add_fallible(self, rhs: T) -> Option; +} + +impl FallibleAdd for U +where + U: TrySmallestUpgrade, + T: TrySmallestUpgrade>::LCM>, + U::LCM: Clone, + for<'a, 'b> &'a U::LCM: std::ops::Add<&'b U::LCM, Output = U::LCM>, +{ + type Output = U::LCM; + + fn add_fallible(self, rhs: T) -> Option { + let lhs = self.try_upgrade()?; + let rhs = rhs.try_upgrade()?; + Some(lhs.as_ref() + rhs.as_ref()) + } +} + +pub trait FallibleSub { + type Output; + fn sub_fallible(self, rhs: T) -> Option; +} + +impl FallibleSub for U +where + U: TrySmallestUpgrade, + T: TrySmallestUpgrade>::LCM>, + U::LCM: Clone, + for<'a, 'b> &'a U::LCM: std::ops::Sub<&'b U::LCM, Output = U::LCM>, +{ + type Output = U::LCM; + + fn sub_fallible(self, rhs: T) -> Option { + let lhs = self.try_upgrade()?; + let rhs = rhs.try_upgrade()?; + Some(lhs.as_ref() - rhs.as_ref()) + } +} + +pub trait FallibleAddAssign { + fn add_assign_fallible(&mut self, rhs: T); +} + +impl FallibleAddAssign for U +where + U: TrySmallestUpgrade, + T: TrySmallestUpgrade, + U::LCM: Clone, + for<'a, 'b> &'a U::LCM: std::ops::Add<&'b U::LCM, Output = U::LCM>, +{ + fn add_assign_fallible(&mut self, rhs: T) { + let lhs = self.try_upgrade().unwrap(); + let rhs = rhs.try_upgrade().unwrap(); + let out = lhs.as_ref() + rhs.as_ref(); + *self = out; + } +} + +pub trait FallibleSubAssign { + fn sub_assign_fallible(&mut self, rhs: T); +} + +impl FallibleSubAssign for U +where + U: TrySmallestUpgrade, + T: TrySmallestUpgrade, + U::LCM: Clone, + for<'a, 'b> &'a U::LCM: std::ops::Sub<&'b U::LCM, Output = U::LCM>, +{ + fn sub_assign_fallible(&mut self, rhs: T) { + let lhs = self.try_upgrade().unwrap(); + let rhs = rhs.try_upgrade().unwrap(); + let out = lhs.as_ref() - rhs.as_ref(); + *self = out; + } +} + +// impl SmallestUpgrade for T +// where +// U: From, +// { +// type LCM = U; +// fn upgrade(self) -> Self::LCM { +// U::from(self) +// } +// } + +// impl SmallestUpgrade> for Up +// where +// T: From, +// { +// type LCM = T; +// fn upgrade(self) -> Self::LCM { +// T::from(self.up) +// } +// } We can't do this because of possible future impls, means that any specialization is forbidden + +// impl SmallestUpgrade for T { +// type LCM = T; +// fn upgrade(self) -> Self::LCM { +// self +// } +// } We don't want this, so that we can specialize binary operations + +// impl SmallestUpgrade for U +// where +// T: SmallestUpgrade, +// { +// type LCM = U; +// fn upgrade(self) -> Self::LCM { +// self +// } +// } This should work but doesn't + +// impl SmallestUpgrade> for Up +// where +// T: From, +// { +// type LCM = T; +// fn upgrade(self) -> Self::LCM { +// T::from(self.up) +// } +// } We can't do this because of possible future impls, means that any specialization is forbidden + +// impl SmallestUpgrade for T { +// type LCM = T; +// fn upgrade(self) -> Self::LCM { +// self +// } +// } We don't want this, so that we can specialize binary operations diff --git a/src/tests_from_pytest.rs b/src/tests_from_pytest.rs index 5903071b..22b413c8 100644 --- a/src/tests_from_pytest.rs +++ b/src/tests_from_pytest.rs @@ -18,29 +18,21 @@ use symbolica; #[allow(unused)] const LTD_COMPARISON_TOLERANCE: f64 = 1.0e-12; -pub fn load_amplitude_output( - output_path: String, - sb_state: &mut symbolica::state::State, - sb_workspace: &symbolica::state::Workspace, -) -> (Model, Amplitude) { +pub fn load_amplitude_output(output_path: String) -> (Model, Amplitude) { let path = Path::new(&output_path); let output_meta_data: OutputMetaData = serde_yaml::from_reader(File::open(path.join("output_metadata.yaml")).unwrap()).unwrap(); assert_eq!(output_meta_data.output_type, OutputType::Amplitudes); assert_eq!(output_meta_data.contents.len(), 1); - let model = Model::from_file( - String::from( - path.join(format!( - "sources/model/{}.yaml", - output_meta_data.model_name - )) - .to_str() - .unwrap(), - ), - sb_state, - sb_workspace, - ) + let model = Model::from_file(String::from( + path.join(format!( + "sources/model/{}.yaml", + output_meta_data.model_name + )) + .to_str() + .unwrap(), + )) .unwrap(); let amplitude = Amplitude::from_file( &model, @@ -72,13 +64,9 @@ mod tests_scalar_massless_triangle { #[ignore] // Important since this test will only run successfully when called from with pytest where the massless_triangle_generation fixture will be run fn pytest_massless_scalar_triangle() { assert!(env::var("PYTEST_OUTPUT_PATH_FOR_RUST").is_ok()); - let mut sb_state = symbolica::state::State::new(); - let sb_workspace = symbolica::state::Workspace::new(); - let (model, amplitude) = load_amplitude_output( - env::var("PYTEST_OUTPUT_PATH_FOR_RUST").unwrap(), - &mut sb_state, - &sb_workspace, - ); + + let (model, amplitude) = + load_amplitude_output(env::var("PYTEST_OUTPUT_PATH_FOR_RUST").unwrap()); assert_eq!(model.name, "scalars"); assert!(amplitude.amplitude_graphs.len() == 1); @@ -160,13 +148,9 @@ mod tests_scalar_massless_triangle { #[ignore] // Important since this test will only run successfully when called from with pytest where the massless_triangle_generation fixture will be run fn pytest_scalar_fishnet_2x2() { assert!(env::var("PYTEST_OUTPUT_PATH_FOR_RUST").is_ok()); - let mut sb_state = symbolica::state::State::new(); - let sb_workspace = symbolica::state::Workspace::new(); - let (model, amplitude) = load_amplitude_output( - env::var("PYTEST_OUTPUT_PATH_FOR_RUST").unwrap(), - &mut sb_state, - &sb_workspace, - ); + + let (model, amplitude) = + load_amplitude_output(env::var("PYTEST_OUTPUT_PATH_FOR_RUST").unwrap()); assert_eq!(model.name, "scalars"); assert!(amplitude.amplitude_graphs.len() == 1); @@ -279,13 +263,9 @@ fn pytest_scalar_fishnet_2x2() { #[ignore] fn pytest_scalar_sunrise() { assert!(env::var("PYTEST_OUTPUT_PATH_FOR_RUST").is_ok()); - let mut sb_state = symbolica::state::State::new(); - let sb_workspace = symbolica::state::Workspace::new(); - let (model, amplitude) = load_amplitude_output( - env::var("PYTEST_OUTPUT_PATH_FOR_RUST").unwrap(), - &mut sb_state, - &sb_workspace, - ); + + let (model, amplitude) = + load_amplitude_output(env::var("PYTEST_OUTPUT_PATH_FOR_RUST").unwrap()); assert_eq!(model.name, "scalars"); assert!(amplitude.amplitude_graphs.len() == 1); @@ -331,13 +311,9 @@ fn pytest_scalar_sunrise() { #[ignore] // Important since this test will only run successfully when called from with pytest where the massless_triangle_generation fixture will be run fn pytest_scalar_fishnet_2x3() { assert!(env::var("PYTEST_OUTPUT_PATH_FOR_RUST").is_ok()); - let mut sb_state = symbolica::state::State::new(); - let sb_workspace = symbolica::state::Workspace::new(); - let (model, mut amplitude) = load_amplitude_output( - env::var("PYTEST_OUTPUT_PATH_FOR_RUST").unwrap(), - &mut sb_state, - &sb_workspace, - ); + + let (model, mut amplitude) = + load_amplitude_output(env::var("PYTEST_OUTPUT_PATH_FOR_RUST").unwrap()); assert_eq!(model.name, "scalars"); assert!(amplitude.amplitude_graphs.len() == 1); @@ -413,13 +389,8 @@ fn pytest_scalar_fishnet_2x3() { fn pytest_scalar_cube() { assert!(env::var("PYTEST_OUTPUT_PATH_FOR_RUST").is_ok()); - let mut sb_state = symbolica::state::State::new(); - let sb_workspace = symbolica::state::Workspace::new(); - let (model, amplitude) = load_amplitude_output( - env::var("PYTEST_OUTPUT_PATH_FOR_RUST").unwrap(), - &mut sb_state, - &sb_workspace, - ); + let (model, amplitude) = + load_amplitude_output(env::var("PYTEST_OUTPUT_PATH_FOR_RUST").unwrap()); assert_eq!(model.name, "scalars"); assert!(amplitude.amplitude_graphs.len() == 1); @@ -490,13 +461,8 @@ fn pytest_scalar_cube() { fn pytest_scalar_bubble() { assert!(env::var("PYTEST_OUTPUT_PATH_FOR_RUST").is_ok()); - let mut sb_state = symbolica::state::State::new(); - let sb_workspace = symbolica::state::Workspace::new(); - let (model, amplitude) = load_amplitude_output( - env::var("PYTEST_OUTPUT_PATH_FOR_RUST").unwrap(), - &mut sb_state, - &sb_workspace, - ); + let (model, amplitude) = + load_amplitude_output(env::var("PYTEST_OUTPUT_PATH_FOR_RUST").unwrap()); assert_eq!(model.name, "scalars"); assert!(amplitude.amplitude_graphs.len() == 1); @@ -540,13 +506,8 @@ fn pytest_scalar_bubble() { fn pytest_massless_scalar_box() { assert!(env::var("PYTEST_OUTPUT_PATH_FOR_RUST").is_ok()); - let mut sb_state = symbolica::state::State::new(); - let sb_workspace = symbolica::state::Workspace::new(); - let (model, amplitude) = load_amplitude_output( - env::var("PYTEST_OUTPUT_PATH_FOR_RUST").unwrap(), - &mut sb_state, - &sb_workspace, - ); + let (model, amplitude) = + load_amplitude_output(env::var("PYTEST_OUTPUT_PATH_FOR_RUST").unwrap()); assert_eq!(model.name, "scalars"); assert!(amplitude.amplitude_graphs.len() == 1); @@ -622,13 +583,8 @@ fn pytest_massless_scalar_box() { fn pytest_scalar_double_triangle() { assert!(env::var("PYTEST_OUTPUT_PATH_FOR_RUST").is_ok()); - let mut sb_state = symbolica::state::State::new(); - let sb_workspace = symbolica::state::Workspace::new(); - let (model, amplitude) = load_amplitude_output( - env::var("PYTEST_OUTPUT_PATH_FOR_RUST").unwrap(), - &mut sb_state, - &sb_workspace, - ); + let (model, amplitude) = + load_amplitude_output(env::var("PYTEST_OUTPUT_PATH_FOR_RUST").unwrap()); assert_eq!(model.name, "scalars"); assert!(amplitude.amplitude_graphs.len() == 1); @@ -697,13 +653,8 @@ fn pytest_scalar_double_triangle() { fn pytest_scalar_mercedes() { assert!(env::var("PYTEST_OUTPUT_PATH_FOR_RUST").is_ok()); - let mut sb_state = symbolica::state::State::new(); - let sb_workspace = symbolica::state::Workspace::new(); - let (model, amplitude) = load_amplitude_output( - env::var("PYTEST_OUTPUT_PATH_FOR_RUST").unwrap(), - &mut sb_state, - &sb_workspace, - ); + let (model, amplitude) = + load_amplitude_output(env::var("PYTEST_OUTPUT_PATH_FOR_RUST").unwrap()); assert_eq!(model.name, "scalars"); assert!(amplitude.amplitude_graphs.len() == 1); @@ -766,13 +717,8 @@ fn pytest_scalar_mercedes() { fn pytest_scalar_triangle_box() { assert!(env::var("PYTEST_OUTPUT_PATH_FOR_RUST").is_ok()); - let mut sb_state = symbolica::state::State::new(); - let sb_workspace = symbolica::state::Workspace::new(); - let (model, amplitude) = load_amplitude_output( - env::var("PYTEST_OUTPUT_PATH_FOR_RUST").unwrap(), - &mut sb_state, - &sb_workspace, - ); + let (model, amplitude) = + load_amplitude_output(env::var("PYTEST_OUTPUT_PATH_FOR_RUST").unwrap()); assert_eq!(model.name, "scalars"); assert!(amplitude.amplitude_graphs.len() == 1); @@ -845,13 +791,8 @@ fn pytest_scalar_triangle_box() { fn pytest_scalar_isopod() { assert!(env::var("PYTEST_OUTPUT_PATH_FOR_RUST").is_ok()); - let mut sb_state = symbolica::state::State::new(); - let sb_workspace = symbolica::state::Workspace::new(); - let (model, amplitude) = load_amplitude_output( - env::var("PYTEST_OUTPUT_PATH_FOR_RUST").unwrap(), - &mut sb_state, - &sb_workspace, - ); + let (model, amplitude) = + load_amplitude_output(env::var("PYTEST_OUTPUT_PATH_FOR_RUST").unwrap()); assert_eq!(model.name, "scalars"); assert!(amplitude.amplitude_graphs.len() == 1); diff --git a/src/utils.rs b/src/utils.rs index 69e7f249..57f29c9a 100644 --- a/src/utils.rs +++ b/src/utils.rs @@ -3,10 +3,10 @@ use colored::Colorize; use hyperdual::Hyperdual; use itertools::{izip, Itertools}; use lorentz_vector::{Field, LorentzVector, RealNumberLike}; +use num::traits::{Float, FloatConst, FromPrimitive, Num, NumAssign, NumCast, Signed}; +use num::traits::{Inv, One, Zero}; use num::Complex; use num::ToPrimitive; -use num_traits::{Float, FloatConst, FromPrimitive, Num, NumAssign, NumCast, Signed}; -use num_traits::{Inv, One, Zero}; use serde::{Deserialize, Serialize}; use smartstring::{LazyCompact, SmartString}; use statrs::function::gamma::{gamma, gamma_lr, gamma_ur}; @@ -152,18 +152,14 @@ where { } -pub fn parse_python_expression( - expression: &str, - sb_state: &mut symbolica::state::State, - sb_workspace: &symbolica::state::Workspace, -) -> Atom { +pub fn parse_python_expression(expression: &str) -> Atom { let processed_string = String::from(expression) .replace("**", "^") .replace("cmath.sqrt", "sqrt") .replace("cmath.pi", "pi") .replace("math.sqrt", "sqrt") .replace("math.pi", "pi"); - Atom::parse(processed_string.as_str(), sb_state, sb_workspace) + Atom::parse(processed_string.as_str()) .map_err(|e| { format!( "Failed to parse expression : '{}'\nError: {}", @@ -173,10 +169,7 @@ pub fn parse_python_expression( .unwrap() } -pub fn to_str_expression( - expression: &Atom, - sb_state: &symbolica::state::State, -) -> SmartString { +pub fn to_str_expression(expression: &Atom) -> SmartString { format!( "{}", AtomPrinter::new_with_options( @@ -187,13 +180,13 @@ pub fn to_str_expression( color_builtin_functions: false, print_finite_field: false, explicit_rational_polynomial: false, + symmetric_representation_for_finite_field: false, number_thousands_separator: None, multiplication_operator: '*', square_brackets_for_function: false, num_exp_as_superscript: false, latex: false }, - sb_state ) ) .into() @@ -960,7 +953,7 @@ pub fn global_parameterize( // r = e_cm * b * x/(1-x) let b = Into::::into(settings.parameterization.b); let radius = e_cm * b * x_r[0] / (T::one() - x_r[0]); - jac *= ::powi(e_cm * b + radius, 2) / e_cm / b; + jac *= ::powi(e_cm * b + radius, 2) / e_cm / b; radius } } @@ -1060,7 +1053,7 @@ pub fn global_parameterize( #[allow(unused)] pub fn global_inv_parameterize( - moms: &Vec>, + moms: &[LorentzVector], e_cm_squared: T, settings: &Settings, force_radius: bool, @@ -1094,7 +1087,7 @@ pub fn global_inv_parameterize( } ParameterizationMapping::Linear => { let b = Into::::into(settings.parameterization.b); - inv_jac /= ::powi(e_cm * b + k_r, 2) / e_cm / b; + inv_jac /= ::powi(e_cm * b + k_r, 2) / e_cm / b; xs.push(k_r / (e_cm * b + k_r)); } } @@ -1217,7 +1210,7 @@ pub fn parameterize3d( // r = e_cm * b * x/(1-x) let b = Into::::into(settings.parameterization.b); let radius = e_cm * b * x_r[0] / (T::one() - x_r[0]); - jac *= ::powi(e_cm * b + radius, 2) / e_cm / b; + jac *= ::powi(e_cm * b + radius, 2) / e_cm / b; radius } }; @@ -1291,7 +1284,7 @@ pub fn inv_parametrize3d( } ParameterizationMapping::Linear => { let b = Into::::into(settings.parameterization.b); - jac /= ::powi(e_cm * b + k_r, 2) / e_cm / b; + jac /= ::powi(e_cm * b + k_r, 2) / e_cm / b; k_r / (e_cm * b + k_r) } };