Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Python implementation for medium evaluations #862

Merged
merged 8 commits into from May 14, 2019
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
8 changes: 8 additions & 0 deletions doc/docs/Python_User_Interface.md
Expand Up @@ -300,6 +300,14 @@ List of dispersive susceptibilities (see below) added to the permeability μ in
Transforms `epsilon`, `mu`, and `sigma` of any [susceptibilities](#susceptibility) by the 3×3 matrix `M`. If `M` is a [rotation matrix](https://en.wikipedia.org/wiki/Rotation_matrix), then the principal axes of the susceptibilities are rotated by `M`. More generally, the susceptibilities χ are transformed to MχMᵀ/|det M|, which corresponds to [transformation optics](http://math.mit.edu/~stevenj/18.369/coordinate-transform.pdf) for an arbitrary curvilinear coordinate transformation with Jacobian matrix M. The absolute value of the determinant is to prevent inadvertent construction of left-handed materials, which are [problematic in nondispersive media](FAQ.md#why-does-my-simulation-diverge-if-0).

**`epsilon(freq` [ scalar, list, or `numpy` array, ]`)`**
-
Calculates the medium's permittivity tensor as a 3x3 `numpy` array at the specified frequency, `freq`. Either a scalar, list, or `numpy` array of frequencies may be supplied. In the case `N` frequency points, a `numpy` array size Nx3x3 is returned.

**`mu(freq` [ scalar, list, or `numpy` array, ]`)`**
-
Calculates the medium's permeability tensor as a 3x3 `numpy` array at the specified frequency, `freq`. Either a scalar, list, or `numpy` array of frequencies may be supplied. In the case `N` frequency points, a `numpy` array size Nx3x3 is returned.

**material functions**

Any function that accepts a `Medium` instance can also accept a user defined Python function. This allows you to specify the material as an arbitrary function of position. The function must have one argument, the position `Vector3`, and return the material at that point, which should be a Python `Medium` instance. This is accomplished by passing a function to the `material_function` keyword argument in the `Simulation` constructor, or the `material` keyword argument in any `GeometricObject` constructor.
Expand Down
1 change: 1 addition & 0 deletions python/Makefile.am
Expand Up @@ -53,6 +53,7 @@ TESTS = \
$(KDOM_TEST) \
$(TEST_DIR)/ldos.py \
$(MDPYTEST) \
$(TEST_DIR)/medium_evaluations.py \
$(MPBPYTEST) \
$(MODE_COEFFS_TEST) \
$(MODE_DECOMPOSITION_TEST) \
Expand Down
51 changes: 47 additions & 4 deletions python/geom.py 100644 → 100755
Expand Up @@ -163,7 +163,9 @@ def __init__(self, epsilon_diag=Vector3(1, 1, 1),
H_chi2_diag=Vector3(),
H_chi3_diag=Vector3(),
D_conductivity_diag=Vector3(),
D_conductivity_offdiag=Vector3(),
B_conductivity_diag=Vector3(),
B_conductivity_offdiag=Vector3(),
epsilon=None,
index=None,
mu=None,
Expand Down Expand Up @@ -211,7 +213,9 @@ def __init__(self, epsilon_diag=Vector3(1, 1, 1),
self.H_chi2_diag = H_chi2_diag
self.H_chi3_diag = H_chi3_diag
self.D_conductivity_diag = D_conductivity_diag
self.D_conductivity_offdiag = D_conductivity_offdiag
self.B_conductivity_diag = B_conductivity_diag
self.B_conductivity_offdiag = D_conductivity_offdiag
self.valid_freq_range = valid_freq_range

def transform(self, m):
Expand All @@ -235,6 +239,35 @@ def transform(self, m):
for s in self.H_susceptibilities:
s.transform(m)

def epsilon(self,freq):
return self._get_epsmu(self.epsilon_diag, self.epsilon_offdiag, self.E_susceptibilities, self.D_conductivity_diag, self.D_conductivity_offdiag, freq)

def mu(self,freq):
return self._get_epsmu(self.mu_diag, self.mu_offdiag, self.H_susceptibilities, self.B_conductivity_diag, self.B_conductivity_offdiag, freq)

def _get_epsmu(self, diag, offdiag, susceptibilities, conductivity_diag, conductivity_offdiag, freq):
# Clean the input
if type(freq) is not np.ndarray:
freqs = np.array(freq)
else:
freqs = np.squeeze(freq)

freqs = freqs[:,np.newaxis,np.newaxis]

# Initialize with instantaneous dielectric tensor
epsmu = np.expand_dims(Matrix(diag=diag,offdiag=offdiag),axis=0)

# Iterate through susceptibilities
for i_sus in range(len(susceptibilities)):
epsmu = epsmu + susceptibilities[i_sus].eval_susceptibility(freqs)

# Account for conductivity term
conductivity = np.expand_dims(Matrix(diag=conductivity_diag,offdiag=conductivity_offdiag),axis=0)
epsmu = (1 + 1j/freqs * conductivity) * epsmu

# Convert list matrix to 3D numpy array size [freqs,3,3]
return np.squeeze(epsmu)


class Susceptibility(object):

Expand All @@ -243,9 +276,7 @@ def __init__(self, sigma_diag=Vector3(), sigma_offdiag=Vector3(), sigma=None):
self.sigma_offdiag = sigma_offdiag

def transform(self, m):
sigma = Matrix(mp.Vector3(self.sigma_diag.x, self.sigma_offdiag.x, self.sigma_offdiag.y),
mp.Vector3(self.sigma_offdiag.x, self.sigma_diag.y, self.sigma_offdiag.z),
mp.Vector3(self.sigma_offdiag.y, self.sigma_offdiag.z, self.sigma_diag.z))
sigma = Matrix(diag=self.sigma_diag,offdiag=self.sigma_offdiag)
new_sigma = (m * sigma * m.transpose()) / abs(m.determinant())
self.sigma_diag = mp.Vector3(new_sigma.c1.x, new_sigma.c2.y, new_sigma.c3.z)
self.sigma_offdiag = mp.Vector3(new_sigma.c2.x, new_sigma.c3.x, new_sigma.c3.y)
Expand All @@ -257,6 +288,10 @@ def __init__(self, frequency=0.0, gamma=0.0, **kwargs):
super(LorentzianSusceptibility, self).__init__(**kwargs)
self.frequency = frequency
self.gamma = gamma

def eval_susceptibility(self,freq):
sigma = np.expand_dims(Matrix(diag=self.sigma_diag,offdiag=self.sigma_offdiag),axis=0)
return self.frequency*self.frequency / (self.frequency*self.frequency - freq*freq - 1j*self.gamma*freq) * sigma


class DrudeSusceptibility(Susceptibility):
Expand All @@ -265,6 +300,10 @@ def __init__(self, frequency=0.0, gamma=0.0, **kwargs):
super(DrudeSusceptibility, self).__init__(**kwargs)
self.frequency = frequency
self.gamma = gamma

def eval_susceptibility(self,freq):
sigma = np.expand_dims(Matrix(diag=self.sigma_diag,offdiag=self.sigma_offdiag),axis=0)
return -self.frequency*self.frequency / (freq*(freq + 1j*self.gamma)) * sigma


class NoisyLorentzianSusceptibility(LorentzianSusceptibility):
Expand Down Expand Up @@ -436,10 +475,14 @@ def __init__(self, vertices, height, axis=Vector3(z=1), center=None, **kwargs):

class Matrix(object):

def __init__(self, c1=Vector3(), c2=Vector3(), c3=Vector3()):
def __init__(self, c1=Vector3(), c2=Vector3(), c3=Vector3(), diag=Vector3(), offdiag=Vector3()):
self.c1 = c1
self.c2 = c2
self.c3 = c3
if c1 == c2 == c3 == Vector3():
self.c1 = Vector3(diag.x,offdiag.x,offdiag.y)
self.c2 = Vector3(np.conj(offdiag.x),diag.y,offdiag.z)
self.c3 = Vector3(np.conj(offdiag.y),np.conj(offdiag.z),diag.z)

def __getitem__(self, i):
return self.row(i)
Expand Down
14 changes: 12 additions & 2 deletions python/materials.py
Expand Up @@ -258,12 +258,17 @@
Ag_frq4 = 9.083*eV_um_scale # 0.137 um
Ag_gam4 = 0.916*eV_um_scale
Ag_sig4 = Ag_f4*Ag_plasma_frq**2/Ag_frq4**2
Ag_f5 = 5.646
Ag_frq5 = 20.29*eV_um_scale
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This additional Lorentzian term corresponds to a resonance at a wavelength of 0.061 μm (or 61 nm) that is so far outside the optical range that it should not affect results but it will increase the computational expense due to the additional storage and time stepping of auxiliary fields. (This is why it was originally left out.) Is it really necessary?

Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Right, but it seems to be a pretty broad resonance because even though it's so far away, it does alter the shape of the resonances between 200 nm and 300 nm. After that, however, it's negligible.

Here's a comparison:

copareResonance

The 5 resonance implementation matches refractiveindex.info's raw data points.

Ag_gam5 = 2.419*eV_um_scale
Ag_sig5 = Ag_f5*Ag_plasma_frq**2/Ag_frq5**2

Ag_susc = [mp.DrudeSusceptibility(frequency=Ag_frq0, gamma=Ag_gam0, sigma=Ag_sig0),
mp.LorentzianSusceptibility(frequency=Ag_frq1, gamma=Ag_gam1, sigma=Ag_sig1),
mp.LorentzianSusceptibility(frequency=Ag_frq2, gamma=Ag_gam2, sigma=Ag_sig2),
mp.LorentzianSusceptibility(frequency=Ag_frq3, gamma=Ag_gam3, sigma=Ag_sig3),
mp.LorentzianSusceptibility(frequency=Ag_frq4, gamma=Ag_gam4, sigma=Ag_sig4)]
mp.LorentzianSusceptibility(frequency=Ag_frq4, gamma=Ag_gam4, sigma=Ag_sig4),
mp.LorentzianSusceptibility(frequency=Ag_frq5, gamma=Ag_gam5, sigma=Ag_sig5)]

Ag = mp.Medium(epsilon=1.0, E_susceptibilities=Ag_susc, valid_freq_range=metal_range)

Expand Down Expand Up @@ -291,12 +296,17 @@
Au_frq4 = 4.304*eV_um_scale # 0.288 um
Au_gam4 = 2.494*eV_um_scale
Au_sig4 = Au_f4*Au_plasma_frq**2/Au_frq4**2
Au_f5 = 4.384
Au_frq5 = 13.32*eV_um_scale
Copy link
Collaborator

@oskooi oskooi May 10, 2019

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This Lorentzian resonance is at 0.093 μm (or 93 nm). Similar to the additional Ag term; is it really necessary or should we just reduce the tolerance of the test slightly (i.e., matching the results to 4 decimal places rather than 6)?

Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Same as before. Let me know if you want me to remove it.

Au_gam5 = 2.214*eV_um_scale
Au_sig5 = Au_f5*Au_plasma_frq**2/Au_frq5**2

Au_susc = [mp.DrudeSusceptibility(frequency=Au_frq0, gamma=Au_gam0, sigma=Au_sig0),
mp.LorentzianSusceptibility(frequency=Au_frq1, gamma=Au_gam1, sigma=Au_sig1),
mp.LorentzianSusceptibility(frequency=Au_frq2, gamma=Au_gam2, sigma=Au_sig2),
mp.LorentzianSusceptibility(frequency=Au_frq3, gamma=Au_gam3, sigma=Au_sig3),
mp.LorentzianSusceptibility(frequency=Au_frq4, gamma=Au_gam4, sigma=Au_sig4)]
mp.LorentzianSusceptibility(frequency=Au_frq4, gamma=Au_gam4, sigma=Au_sig4),
mp.LorentzianSusceptibility(frequency=Au_frq5, gamma=Au_gam5, sigma=Au_sig5)]

Au = mp.Medium(epsilon=1.0, E_susceptibilities=Au_susc, valid_freq_range=metal_range)

Expand Down
46 changes: 46 additions & 0 deletions python/tests/medium_evaluations.py
@@ -0,0 +1,46 @@

# medium_evaluations.py - Tests the evaluation of material permitivity profiles.
# Checks materials with lorentizian, drude, and non uniform diagonals.
# The extracted values are compared against actual datapoints pulled from
# refractiveindex.info.
# TODO:
# * check materials with off diagonal components
# * check magnetic profiles

from __future__ import division

import unittest
import meep as mp
import numpy as np


class TestMediumEvaluations(unittest.TestCase):

def test_medium_evaluations(self):
from meep.materials import Si, Au, LiNbO3

# Check Silicon
w0 = 1/1.357
w1 = 1/11.04
eps = Si.epsilon([w0,w1])
self.assertAlmostEqual(np.real(np.sqrt(eps[0,0,0])), 3.4975134228247, places=6)
self.assertAlmostEqual(np.real(np.sqrt(eps[1,0,0])), 3.4175012372164, places=6)

# Check Gold
w0 = 1/0.24797
w1 = 1/6.1992
eps = Au.epsilon([w0,w1])
self.assertAlmostEqual(np.real(np.sqrt(eps[0,0,0])), 1.0941, places=4)
self.assertAlmostEqual(np.real(np.sqrt(eps[1,0,0])), 5.0974, places=4)

# Check Lithium Niobate
w0 = 1/0.4
w1 = 1/5.0
eps = LiNbO3.epsilon([w0,w1])
self.assertAlmostEqual(np.real(np.sqrt(eps[0,0,0])), 2.4392710888511, places=6)
self.assertAlmostEqual(np.real(np.sqrt(eps[1,0,0])), 2.0508048794821, places=6)
self.assertAlmostEqual(np.real(np.sqrt(eps[0,2,2])), 2.332119801394, places=6)
self.assertAlmostEqual(np.real(np.sqrt(eps[1,2,2])), 2.0025312699385, places=6)

if __name__ == '__main__':
unittest.main()