Skip to content

Commit

Permalink
Update README and benchmarks
Browse files Browse the repository at this point in the history
  • Loading branch information
ianwilliamson committed Jan 20, 2020
1 parent c598328 commit da9e63c
Show file tree
Hide file tree
Showing 2 changed files with 74 additions and 16 deletions.
41 changes: 34 additions & 7 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -8,21 +8,48 @@ The `vtmm` package supports some of the same functionality as the [tmm](https://

## Gradients

Currently `vtmm` is implementing using a Tensor Flow backend. This means that gradients of scalar loss / objective functions of the transmission and reflection can be taken for "free." At a later time a numpy backend will be implemented for users that do not need this functionality and do not want the tensorflow requirement.
Currently `vtmm` uses Tensor Flow as its backend. This means that gradients of scalar loss / objective functions of the transmission and reflection can be taken for free. At a later time a numpy backend may be implemented for users that do not need gradient functionality and/or do not want Tensor Flow as a requirement.

## Example

The primary function of `vtmm` is `tmm_rt(pol, omega, kx, n, d)`. A basic example is provided below.
The entry point to `vtmm` is the function `tmm_rt(pol, omega, kx, n, d)`. See the example below for a basic illustration of how to use the package.

```python
import tensorflow as tf
from vtmm import tmm_rt

pol = 's'
n = tf.constant([1.0, 3.5, 1.0])
d = tf.constant([2e-6])
kx = tf.linspace(0.0, 2*np.pi*220e12/299792458, 1000)
omega = tf.linspace(150e12, 220e12, 1000) * 2 * np.pi
n = tf.constant([1.0, 3.5, 1.0]) # Layer refractive indices
d = tf.constant([2e-6]) # Layer thicknesses
kx = tf.linspace(0.0, 2*np.pi*220e12/299792458, 1000) # Parallel wavevectors
omega = tf.linspace(150e12, 220e12, 1000) * 2 * np.pi # Angular frequencies

t,r = tmm_rt(pol, omega, kx, n, d)
# t and r will be 2D tensors of shape [ num kx, num omega ]
t, r = tmm_rt(pol, omega, kx, n, d)
```

## Benchmarks

See `tests/test_benchmark.py` for a comparison between `vtmm` and the non-vectorized `tmm` package. The benchmarks shown below are for `len(omega) == len(kx) == 50` and 75 timeit evaluations.

```
python -W ignore ./tests/test_benchmark.py
```

```
Single omega / kx benchmark
vtmm: 0.2432 s
tmm: 0.0401 s
Large stack benchmark
vtmm: 0.7811 s
tmm: 79.8765 s
Medium stack benchmark
vtmm: 0.4607 s
tmm: 52.2255 s
Small stack benchmark
vtmm: 0.3367 s
tmm: 41.0926 s
```
49 changes: 40 additions & 9 deletions tests/test_benchmark.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,21 +10,52 @@

from test_tmm import calc_rt_pytmm

NUM = 75

class TestTMM(unittest.TestCase):
def setUp(self):
self.omega = tf.linspace(150e12, 250e12, 50) * 2 * np.pi
self.kx = tf.linspace(0.0, 2 * np.pi * 150e12 / C0, 51)

def test_benchmark(self):
self.kx = tf.linspace(0.0, 2 * np.pi * 150e12 / C0, 50)

def test_benchmark_single(self):
vec_n = tf.constant([1.0, 3.5, 1.0])
vec_d = tf.constant([1e-6])

print("Single omega / kx benchmark")
t1 = timeit.timeit( lambda: vtmm.tmm_rt('p', self.omega[0:1], self.kx[0:1], vec_n, vec_d), number=NUM )
print("vtmm: %.4f s" % t1)
t2 = timeit.timeit( lambda: calc_rt_pytmm('p', self.omega[0:1], self.kx[0:1], vec_n, vec_d), number=NUM )
print("tmm: %.4f s" % t2)

def test_benchmark_small(self):
vec_n = tf.constant([1.0, 3.5, 1.0])
vec_d = tf.constant([1e-6])

print("Small stack benchmark")
t1 = timeit.timeit( lambda: vtmm.tmm_rt('p', self.omega, self.kx, vec_n, vec_d), number=NUM )
print("vtmm: %.4f s" % t1)
t2 = timeit.timeit( lambda: calc_rt_pytmm('p', self.omega, self.kx, vec_n, vec_d), number=NUM )
print("tmm: %.4f s" % t2)

def test_benchmark_medium(self):
vec_n = tf.constant([1.0, 1.5, 3.5, 1.5, 1.0])
vec_d = tf.constant([1e-6, 1e-6, 1e-6])

print("Medium stack benchmark")
t1 = timeit.timeit( lambda: vtmm.tmm_rt('p', self.omega, self.kx, vec_n, vec_d), number=NUM )
print("vtmm: %.4f s" % t1)
t2 = timeit.timeit( lambda: calc_rt_pytmm('p', self.omega, self.kx, vec_n, vec_d), number=NUM )
print("tmm: %.4f s" % t2)

def test_benchmark_large(self):
vec_n = tf.constant([1.0, 1.5, 3.5, 1.5, 2.5, 3.0, 1.5, 2, 3, 1.0])
vec_d = tf.constant([1e-6, 1.33e-6, 1e-6, 1e-6, 2e-6, 1e-5, 1.25e-6, 1e-6])

print("Running benchmark")
print("----------------------")
t1 = timeit.timeit( lambda: vtmm.tmm_rt('p', self.omega, self.kx, vec_n, vec_d), number=50 )
print("Tensorflow (vectorized implementation): %.3f" % t1)
t2 = timeit.timeit( lambda: calc_rt_pytmm('p', self.omega, self.kx, vec_n, vec_d), number=50 )
print("TMM (unvectorized implementation): %.3f" % t2)
print("Large stack benchmark")
t1 = timeit.timeit( lambda: vtmm.tmm_rt('p', self.omega, self.kx, vec_n, vec_d), number=NUM )
print("vtmm: %.4f s" % t1)
t2 = timeit.timeit( lambda: calc_rt_pytmm('p', self.omega, self.kx, vec_n, vec_d), number=NUM )
print("tmm: %.4f s" % t2)


if __name__ == '__main__':
Expand Down

0 comments on commit da9e63c

Please sign in to comment.