From da9e63c099cde66925d2bb82a67581b8a7aecec3 Mon Sep 17 00:00:00 2001 From: Ian Williamson Date: Mon, 20 Jan 2020 13:06:05 -0800 Subject: [PATCH] Update README and benchmarks --- README.md | 41 ++++++++++++++++++++++++++++------ tests/test_benchmark.py | 49 +++++++++++++++++++++++++++++++++-------- 2 files changed, 74 insertions(+), 16 deletions(-) diff --git a/README.md b/README.md index c144def..9380bf5 100644 --- a/README.md +++ b/README.md @@ -8,21 +8,48 @@ The `vtmm` package supports some of the same functionality as the [tmm](https:// ## Gradients -Currently `vtmm` is implementing using a Tensor Flow backend. This means that gradients of scalar loss / objective functions of the transmission and reflection can be taken for "free." At a later time a numpy backend will be implemented for users that do not need this functionality and do not want the tensorflow requirement. +Currently `vtmm` uses Tensor Flow as its backend. This means that gradients of scalar loss / objective functions of the transmission and reflection can be taken for free. At a later time a numpy backend may be implemented for users that do not need gradient functionality and/or do not want Tensor Flow as a requirement. ## Example -The primary function of `vtmm` is `tmm_rt(pol, omega, kx, n, d)`. A basic example is provided below. +The entry point to `vtmm` is the function `tmm_rt(pol, omega, kx, n, d)`. See the example below for a basic illustration of how to use the package. ```python import tensorflow as tf from vtmm import tmm_rt pol = 's' -n = tf.constant([1.0, 3.5, 1.0]) -d = tf.constant([2e-6]) -kx = tf.linspace(0.0, 2*np.pi*220e12/299792458, 1000) -omega = tf.linspace(150e12, 220e12, 1000) * 2 * np.pi +n = tf.constant([1.0, 3.5, 1.0]) # Layer refractive indices +d = tf.constant([2e-6]) # Layer thicknesses +kx = tf.linspace(0.0, 2*np.pi*220e12/299792458, 1000) # Parallel wavevectors +omega = tf.linspace(150e12, 220e12, 1000) * 2 * np.pi # Angular frequencies -t,r = tmm_rt(pol, omega, kx, n, d) +# t and r will be 2D tensors of shape [ num kx, num omega ] +t, r = tmm_rt(pol, omega, kx, n, d) +``` + +## Benchmarks + +See `tests/test_benchmark.py` for a comparison between `vtmm` and the non-vectorized `tmm` package. The benchmarks shown below are for `len(omega) == len(kx) == 50` and 75 timeit evaluations. + +``` +python -W ignore ./tests/test_benchmark.py +``` + +``` +Single omega / kx benchmark +vtmm: 0.2432 s +tmm: 0.0401 s + +Large stack benchmark +vtmm: 0.7811 s +tmm: 79.8765 s + +Medium stack benchmark +vtmm: 0.4607 s +tmm: 52.2255 s + +Small stack benchmark +vtmm: 0.3367 s +tmm: 41.0926 s ``` diff --git a/tests/test_benchmark.py b/tests/test_benchmark.py index 8c3a73f..d0321d9 100644 --- a/tests/test_benchmark.py +++ b/tests/test_benchmark.py @@ -10,21 +10,52 @@ from test_tmm import calc_rt_pytmm +NUM = 75 + class TestTMM(unittest.TestCase): def setUp(self): self.omega = tf.linspace(150e12, 250e12, 50) * 2 * np.pi - self.kx = tf.linspace(0.0, 2 * np.pi * 150e12 / C0, 51) - - def test_benchmark(self): + self.kx = tf.linspace(0.0, 2 * np.pi * 150e12 / C0, 50) + + def test_benchmark_single(self): + vec_n = tf.constant([1.0, 3.5, 1.0]) + vec_d = tf.constant([1e-6]) + + print("Single omega / kx benchmark") + t1 = timeit.timeit( lambda: vtmm.tmm_rt('p', self.omega[0:1], self.kx[0:1], vec_n, vec_d), number=NUM ) + print("vtmm: %.4f s" % t1) + t2 = timeit.timeit( lambda: calc_rt_pytmm('p', self.omega[0:1], self.kx[0:1], vec_n, vec_d), number=NUM ) + print("tmm: %.4f s" % t2) + + def test_benchmark_small(self): + vec_n = tf.constant([1.0, 3.5, 1.0]) + vec_d = tf.constant([1e-6]) + + print("Small stack benchmark") + t1 = timeit.timeit( lambda: vtmm.tmm_rt('p', self.omega, self.kx, vec_n, vec_d), number=NUM ) + print("vtmm: %.4f s" % t1) + t2 = timeit.timeit( lambda: calc_rt_pytmm('p', self.omega, self.kx, vec_n, vec_d), number=NUM ) + print("tmm: %.4f s" % t2) + + def test_benchmark_medium(self): + vec_n = tf.constant([1.0, 1.5, 3.5, 1.5, 1.0]) + vec_d = tf.constant([1e-6, 1e-6, 1e-6]) + + print("Medium stack benchmark") + t1 = timeit.timeit( lambda: vtmm.tmm_rt('p', self.omega, self.kx, vec_n, vec_d), number=NUM ) + print("vtmm: %.4f s" % t1) + t2 = timeit.timeit( lambda: calc_rt_pytmm('p', self.omega, self.kx, vec_n, vec_d), number=NUM ) + print("tmm: %.4f s" % t2) + + def test_benchmark_large(self): vec_n = tf.constant([1.0, 1.5, 3.5, 1.5, 2.5, 3.0, 1.5, 2, 3, 1.0]) vec_d = tf.constant([1e-6, 1.33e-6, 1e-6, 1e-6, 2e-6, 1e-5, 1.25e-6, 1e-6]) - print("Running benchmark") - print("----------------------") - t1 = timeit.timeit( lambda: vtmm.tmm_rt('p', self.omega, self.kx, vec_n, vec_d), number=50 ) - print("Tensorflow (vectorized implementation): %.3f" % t1) - t2 = timeit.timeit( lambda: calc_rt_pytmm('p', self.omega, self.kx, vec_n, vec_d), number=50 ) - print("TMM (unvectorized implementation): %.3f" % t2) + print("Large stack benchmark") + t1 = timeit.timeit( lambda: vtmm.tmm_rt('p', self.omega, self.kx, vec_n, vec_d), number=NUM ) + print("vtmm: %.4f s" % t1) + t2 = timeit.timeit( lambda: calc_rt_pytmm('p', self.omega, self.kx, vec_n, vec_d), number=NUM ) + print("tmm: %.4f s" % t2) if __name__ == '__main__':