From 28cba3e4042d99ce3e232dff0b8c7d7a347b8c06 Mon Sep 17 00:00:00 2001 From: athillard-ledger Date: Mon, 12 Feb 2024 15:11:40 +0100 Subject: [PATCH] Add centered product preprocessing , with dependencies! (#3) Add centered product preprocessing Add Standardization preprocessor --- Cargo.toml | 3 +- examples/rank.rs | 7 +- src/lib.rs | 1 + src/preprocessors.rs | 294 +++++++++++++++++++++++++++++++++++++++++++ src/processors.rs | 2 +- 5 files changed, 300 insertions(+), 7 deletions(-) create mode 100644 src/preprocessors.rs diff --git a/Cargo.toml b/Cargo.toml index 2ae3968..440129f 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -16,4 +16,5 @@ npyz = "0.7.4" ndarray = "0.15.6" rayon = "1.7.0" indicatif = "0.17.3" -ndarray-npy ="0.8.1" \ No newline at end of file +ndarray-npy ="0.8.1" +itertools = "*" \ No newline at end of file diff --git a/examples/rank.rs b/examples/rank.rs index cef968c..967033f 100644 --- a/examples/rank.rs +++ b/examples/rank.rs @@ -3,8 +3,8 @@ use muscat::leakage::{hw, sbox}; use muscat::util::{progress_bar, read_array_2_from_npy_file, save_array}; use ndarray::*; use rayon::prelude::{ParallelBridge, ParallelIterator}; -use simple_bar::ProgressBar; use std::time::Instant; +use indicatif::ProgressIterator; // traces format type FormatTraces = i16; @@ -21,11 +21,9 @@ fn rank() { let target_byte = 1; let folder = String::from("../../data"); let nfiles = 5; - // let mut bar = ProgressBar::default(nfiles as u32, 50, false); - let bar = progress_bar(nfiles); let chunk = 3000; let mut rank = Cpa::new(size, guess_range, target_byte, leakage_model); - for file in 0..nfiles { + for file in (0..nfiles).progress_with(progress_bar(nfiles)) { let dir_l = format!("{folder}/l{file}.npy"); let dir_p = format!("{folder}/p{file}.npy"); let leakages: Array2 = read_array_2_from_npy_file(&dir_l); @@ -57,7 +55,6 @@ fn rank() { rank = rank + x; rank.finalize(); } - bar.inc(file as u64); } // save rank key curves in npy save_array("../results/rank.npy", &rank.pass_rank()); diff --git a/src/lib.rs b/src/lib.rs index e99f8b2..47aa3e3 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -4,3 +4,4 @@ pub mod processors; pub mod quicklog; pub mod trace; pub mod util; +pub mod preprocessors; \ No newline at end of file diff --git a/src/preprocessors.rs b/src/preprocessors.rs new file mode 100644 index 0000000..249af8f --- /dev/null +++ b/src/preprocessors.rs @@ -0,0 +1,294 @@ +use ndarray::{Array1, ArrayView1, s}; +use std::ops::Range; +use itertools::Itertools; + +use crate::processors::MeanVar; + +/// Computes the centered product of "order" leakage samples +/// Used particularly when performing high-order SCA +struct CenteredProduct{ + /// Sum of traces + acc: Array1, + /// Number of traces processed + count: usize, + /// Mean of traces + mean: Array1, + /// Indices of samples to combine + intervals: Vec>, + /// Boolean to ensure that finalize function happened before apply + processed: bool, +} + +impl CenteredProduct{ + /// Creates a new CenteredProduct processor. + /// + /// # Arguments + /// + /// * `size` - Number of samples per trace + /// * `intervals` - Intervals to combine + pub fn new(size: usize, intervals: Vec>) -> Self { + Self { + acc: Array1::zeros(size), + count: 0, + intervals: intervals, + processed: false, + mean: Array1::zeros(size), + } + } + + /// Processes an input trace to update internal accumulators. + pub fn process + Copy>(&mut self, trace: &ArrayView1) { + let size = self.acc.len(); + for i in 0..size { + let x = trace[i].into(); + self.acc[i] += x; + } + self.count += 1 + } + + /// Compute the mean + pub fn finalize(&mut self){ + if self.count != 0{ + self.mean = self.acc.map(|&x| x as f64 / self.count as f64) + } + self.processed = true + } + + /// Apply the processing to an input trace + /// The centered product substract the mean of the traces and then perform products between every input time samples + pub fn apply + Copy>(&mut self, trace: &ArrayView1) -> Array1{ + // First we substract the mean trace + let centered_trace:Array1 = trace.mapv(|x| f64::from(x.into())) - &self.mean; + let length_out_trace:usize = self.intervals.iter().map(|x| x.len()).product(); + + let mut centered_product_trace = Array1::ones(length_out_trace); + + // Then we do the products + let mut multi_prod = (0..self.intervals.len()).map(|i| self.intervals[i].clone()).multi_cartesian_product(); //NOTE/TODO: maybe this can go in the struct parameters, which could improve performances + + for (idx,combination) in multi_prod.enumerate(){ + println!("{:?}",combination); + for i in combination{ + centered_product_trace[idx] *= centered_trace[i as usize] as f64; + } + } + println!{"{:?}",centered_product_trace}; + return centered_product_trace; + } +} + +/// Elevates parts of a trace to a certain power +struct Power{ + intervals: Vec>, + power: i32, +} + +impl Power{ + /// Creates a new Power processor. + /// + /// # Arguments + /// + /// * `size` - Number of samples per trace + /// * `intervals` - Intervals to elevate to the power + /// * `power` - Power to elevate + pub fn new(size: usize, intervals: Vec>, power: i32) -> Self { + Self { + intervals: intervals, + power : power + } + } + + /// Processes an input trace + pub fn process + Copy>(&self, trace: &ArrayView1) -> Array1 { + // Concatenate the slices specified by the ranges + let result: Array1<_> = self.intervals + .iter() + .flat_map(|range| trace.slice(s![range.clone()]).to_owned()) + .map(|val| val.into() as f64) + .collect(); + + result.mapv(|result| result.powi(self.power)) + } +} + + +/// Standardization of the traces by removing the mean and scaling to unit variance +struct StandardScaler{ + /// meanVar processor + meanvar: MeanVar, + /// mean + mean: Array1, + /// std + std: Array1, +} + +impl StandardScaler{ + pub fn new(size: usize) -> Self { + Self { + meanvar: MeanVar::new(size), + mean: Array1::zeros(size), + std: Array1::zeros(size), + } + } + + /// Processes an input trace to update internal accumulators. + pub fn process + Copy>(&mut self, trace: &ArrayView1) { + self.meanvar.process(trace); + } + + /// Compute mean and var + pub fn finalize(&mut self){ + self.mean = self.meanvar.mean(); + self.std = self.meanvar.var().mapv(f64::sqrt); + } + + /// Apply the processing to an input trace + pub fn apply + Copy>(&mut self, trace: &ArrayView1) -> Array1{ + (trace.mapv(|x| f64::from(x.into())) - &self.mean) / &self.std + } +} + + +#[cfg(test)] +mod tests { + use crate::preprocessors::StandardScaler; + + use super::CenteredProduct; + use super::Power; + use ndarray::array; + + fn round_to_2_digits(x:f64)->f64{ + return (x * 100 as f64).round() / 100 as f64; + } + + #[test] + fn test_centered_product() { + let mut processor = CenteredProduct::new(5,vec![0..2, 3..5]); + processor.process(&array![0i16, 1i16, 2i16, -3i16, -4i16].view()); + processor.finalize(); + assert_eq!( + processor.apply(&array![0i16, 1i16, 2i16, -3i16, -4i16].view()), + array![0f64,0f64,0f64,0f64] + ); + let traces = [ + array![77, 137, 51, 91], + array![72, 61, 91, 83], + array![39, 49, 52, 23], + array![26, 114, 63, 45], + array![30, 8, 97, 91], + array![13, 68, 7, 45], + array![17, 181, 60, 34], + array![43, 88, 76, 78], + array![0, 36, 35, 0], + array![93, 191, 49, 26], + ]; + + let mut processor2 = CenteredProduct::new(4,vec![0..1,1..2,2..4]); + for t in traces.iter(){ + processor2.process(&t.view()); + } + processor2.finalize(); + + let expected_results =[ + array![-11169.72, 61984.08], + array![-32942.77, -31440.82], + array![-540.46, -2533.96], + array![-1521.45, 2049.30], + array![36499.87, 36969.02], + array![-36199.24, -4675.44], + array![-3999.12, 37044.48], + array![-189.74, -279.84], + array![-54268.83, -121223.88], + array![-46231.64, -130058.24], + ]; + + for (i,t) in traces.iter().enumerate(){ + assert_eq!( + processor2.apply(&t.view()).map(|x| round_to_2_digits(*x)), + expected_results[i] + ); + } + } + + #[test] + fn test_power() { + let t= array![-1, 2, -3, 4, -5, 6]; + let processor1 = Power { + intervals: vec![0..2, 4..6], + power:1, + }; + let processor2 = Power { + intervals: vec![0..2, 4..6], + power:2, + }; + let processor3 = Power { + intervals: vec![0..2, 4..6], + power:3, + }; + let expected_results =[ + array![-1.0, 2.0, -5.0, 6.0], + array![1.0, 4.0, 25.0, 36.0], + array![-1.0, 8.0, -125.0, 216.0], + ]; + + assert_eq!( + processor1.process(&t.view()).map(|x| round_to_2_digits(*x)), + expected_results[0] + ); + assert_eq!( + processor2.process(&t.view()).map(|x| round_to_2_digits(*x)), + expected_results[1] + ); + assert_eq!( + processor3.process(&t.view()).map(|x| round_to_2_digits(*x)), + expected_results[2] + ); + + + } + + #[test] + fn test_standard_scaler() { + let traces = [ + array![77, 137, 51, 91], + array![72, 61, 91, 83], + array![39, 49, 52, 23], + array![26, 114, 63, 45], + array![30, 8, 97, 91], + array![13, 68, 7, 45], + array![17, 181, 60, 34], + array![43, 88, 76, 78], + array![0, 36, 35, 0], + array![93, 191, 49, 26], + ]; + + let mut processor = StandardScaler::new(4); + for t in traces.iter(){ + processor.process(&t.view()); + } + processor.finalize(); + + let expected_results =[ + array![ 1.25, 0.75, -0.28, 1.29], + array![ 1.07, -0.56, 1.32, 1.03], + array![-0.07, -0.76, -0.24, -0.94], + array![-0.52, 0.36, 0.20, -0.22], + array![-0.38, -1.47, 1.55, 1.29], + array![-0.97, -0.44, -2.04, -0.22], + array![-0.83, 1.51, 0.08, -0.58], + array![ 0.07, -0.09, 0.72, 0.86], + array![-1.42, -0.99, -0.92, -1.69], + array![ 1.80, 1.68, -0.36, -0.84], + ]; + + for (i,t) in traces.iter().enumerate(){ + assert_eq!( + processor.apply(&t.view()).map(|x| round_to_2_digits(*x)), + expected_results[i] + ); + } + } + + +} + diff --git a/src/processors.rs b/src/processors.rs index a3e725b..f457549 100644 --- a/src/processors.rs +++ b/src/processors.rs @@ -1,5 +1,4 @@ //! Traces processing algorithms, such as T-Test, SNR, etc. - use ndarray::{s, Array1, Array2, ArrayView1}; use std::ops::Add; @@ -202,6 +201,7 @@ impl Add for TTest { } } + #[cfg(test)] mod tests { use super::MeanVar;