Skip to content

Commit

Permalink
added Morello 2017 power-2 model
Browse files Browse the repository at this point in the history
  • Loading branch information
haykirstin committed Nov 21, 2017
1 parent db6a177 commit fde63ce
Show file tree
Hide file tree
Showing 4 changed files with 66 additions and 42 deletions.
15 changes: 8 additions & 7 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -13,19 +13,19 @@
from ldtk import LDPSetCreator, BoxcarFilter

filters = [BoxcarFilter('a', 450, 550), # Define your passbands
BoxcarFilter('b', 650, 750), # - Boxcar filters useful in
BoxcarFilter('b', 650, 750), # - Boxcar filters useful in
BoxcarFilter('c', 850, 950)] # transmission spectroscopy

sc = LDPSetCreator(teff=(6400, 50), # Define your star, and the code
logg=(4.50, 0.20), # downloads the uncached stellar
logg=(4.50, 0.20), # downloads the uncached stellar
z=(0.25, 0.05), # spectra from the Husser et al.
filters=filters) # FTP server automatically.

ps = sc.create_profiles() # Create the limb darkening profiles
cq,eq = ps.coeffs_qd(do_mc=True) # Estimate quadratic law coefficients

lnlike = ps.lnlike_qd([[0.45,0.15], # Calculate the quadratic law log
[0.35,0.10], # likelihood for a set of coefficients
lnlike = ps.lnlike_qd([[0.45,0.15], # Calculate the quadratic law log
[0.35,0.10], # likelihood for a set of coefficients
[0.25,0.05]]) # (returns the joint likelihood)

lnlike = ps.lnlike_qd([0.25,0.05],flt=0) # Quad. law log L for the first filter
Expand Down Expand Up @@ -88,9 +88,10 @@ The ``LDPSet`` class offers methods to calculate log likelihoods for a set of li
- ``lnlike_qd`` : Quadratic model
- ``lnlike_nl`` : Nonlinear model
- ``lnlike_gn`` : General model
- ``lnlike_p2`` : Power-2 model

## Resampling
The limb darkening profiles can be resampled to a desired sampling in ``mu`` using the resampling methods in the ``LDPSet``.
The limb darkening profiles can be resampled to a desired sampling in ``mu`` using the resampling methods in the ``LDPSet``.

- ``resample_linear_z(nz=100)``: Resample the profiles to be linear in z
- ``resample_linear_mu(nmu=100)``: Resample the profiles to be linear in mu
Expand All @@ -107,8 +108,8 @@ The limb darkening profiles can be resampled to a desired sampling in ``mu`` usi
If you use PyLDTk in your research, please cite the PyLDTk paper

Parviainen, H. & Aigrain, S. MNRAS 453, 3821–3826 (2015) (DOI:10.1093/mnras/stv1857).
and the paper describing the spectrum library without which PyLDTk would be rather useless

and the paper describing the spectrum library without which PyLDTk would be rather useless

Husser, T.-O. et al. A&A 553, A6 (2013) (DOI:10.1051/0004-6361/201219058).

Expand Down
31 changes: 22 additions & 9 deletions ldtk/ldmodel.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@ class LDModel(object):
npar = None
name = None
abbr = None

def __init__(self):
raise NotImplementedError

Expand All @@ -41,7 +41,7 @@ class LinearModel(LDModel):
npar = 1
name = 'linear'
abbr = 'ln'

@classmethod
def evaluate(cl, mu, pv):
assert len(pv) == cl.npar
Expand All @@ -54,7 +54,7 @@ class QuadraticModel(LDModel):
npar = 2
name = 'quadratic'
abbr = 'qd'

@classmethod
def evaluate(cl, mu, pv):
assert len(pv) == cl.npar
Expand All @@ -80,7 +80,7 @@ class NonlinearModel(LDModel):
npar = 4
name = 'nonlinear'
abbr = 'nl'

@classmethod
def evaluate(cl, mu, pv):
assert len(pv) == cl.npar
Expand All @@ -94,15 +94,28 @@ class GeneralModel(LDModel):
npar = None
name = 'general'
abbr = 'ge'

@classmethod
def evaluate(cl, mu, pv):
mu = asarray(mu)
return 1. - np.sum([c*(1.-mu**(i+1)) for i,c in enumerate(pv)], 0)

class Power2Model(LDModel):
"""Power-2 limb darkening model (Morello et al, 2017)."""
npar = 2
name = 'power2'
abbr = 'p2'

@classmethod
def evaluate(cl, mu, pv):
assert len(pv) == cl.npar
mu = asarray(mu)
return 1. - pv[0]*(1.-(mu**pv[1]))


models = {'linear': LinearModel,
'quadratic': QuadraticModel,
models = {'linear': LinearModel,
'quadratic': QuadraticModel,
'sqrt': SquareRootModel,
'nonlinear': NonlinearModel,
'general': GeneralModel}
'nonlinear': NonlinearModel,
'general': GeneralModel,
'power2': Power2Model}
47 changes: 25 additions & 22 deletions ldtk/ldtk.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@ def load_ldpset(filename):
# ============
class LDPSet(object):
def __init__(self, filters, mu, ldp_samples):
self._filters = filters
self._filters = filters
self._nfilters = len(filters)
self._mu = mu
self._z = sqrt(1-mu**2)
Expand Down Expand Up @@ -63,26 +63,30 @@ def __init__(self, filters, mu, ldp_samples):
self.lnlike_sq = partial(self._lnlike, ldmodel=SquareRootModel)
self.lnlike_nl = partial(self._lnlike, ldmodel=NonlinearModel)
self.lnlike_ge = partial(self._lnlike, ldmodel=GeneralModel)
self.lnlike_p2 = partial(self._lnlike, ldmodel=Power2Model)

self.coeffs_ln = partial(self._coeffs, ldmodel=LinearModel)
self.coeffs_qd = partial(self._coeffs, ldmodel=QuadraticModel)
self.coeffs_sq = partial(self._coeffs, ldmodel=SquareRootModel)
self.coeffs_nl = partial(self._coeffs, ldmodel=NonlinearModel)
self.coeffs_ge = partial(self._coeffs, ldmodel=GeneralModel)
self.coeffs_p2 = partial(self._coeffs, ldmodel=Power2Model)

self.lnlike_ln.__doc__ = "Linear limb darkening model\n(coeffs, join=True, flt=None)"
self.lnlike_qd.__doc__ = "Quadratic limb darkening model\n(coeffs, join=True, flt=None)"
self.lnlike_sq.__doc__ = "Square root limb darkening model\n(coeffs, join=True, flt=None)"
self.lnlike_nl.__doc__ = "Nonlinear limb darkening model\n(coeffs, join=True, flt=None)"
self.lnlike_ge.__doc__ = "General limb darkening model\n(coeffs, join=True, flt=None)"
self.lnlike_p2.__doc__ = "Power-2 limb darkening model\n(coeffs, join=True, flt=None)"

self.coeffs_ln.__doc__ = "Estimate the linear limb darkening model coefficients, see LPDSet._coeffs for details."
self.coeffs_ln.__doc__ = "Estimate the linear limb darkening model coefficients, see LPDSet._coeffs for details."
self.coeffs_qd.__doc__ = "Estimate the quadratic limb darkening model coefficients, see LPDSet._coeffs for details."
self.coeffs_sq.__doc__ = "Estimate the square root limb darkening model coefficients, see LPDSet._coeffs for details."
self.coeffs_nl.__doc__ = "Estimate the nonlinear limb darkening model coefficients, see LPDSet._coeffs for details."
self.coeffs_ge.__doc__ = "Estimate the general limb darkening model coefficients, see LPDSet._coeffs for details."
self.coeffs_p2.__doc__ = "Estimate the power-2 limb darkening model coefficients, see LPDSet._coeffs for details."



def save(self, filename):
with open(filename, 'wb') as f:
dump(self._filters, f)
Expand All @@ -107,13 +111,13 @@ def set_limb_z(self, z):
def set_limb_mu(self, mu):
self._limb_mu = mu
self._limb_i = argmin(abs(self._mu_orig-mu))
self._limb_z = sqrt(1.-mu**2)
self._limb_z = sqrt(1.-mu**2)
self.reset_sampling()


def redefine_limb(self):
self._z = self._z_orig[self._limb_i:] / self._limb_z
self._mu = sqrt(1.-self._z**2)
self._mu = sqrt(1.-self._z**2)
self._ldps = self._ldps_orig[:,:,self._limb_i:].copy()
self._mean = self._mean_orig[:,self._limb_i:].copy()
self._std = self._std_orig[:,self._limb_i:].copy()
Expand All @@ -122,7 +126,7 @@ def redefine_limb(self):
def set_uncertainty_multiplier(self, em):
self._em = em
self._update()


def reset_sampling(self):
self.redefine_limb()
Expand All @@ -131,20 +135,20 @@ def reset_sampling(self):

def resample_linear_z(self, nz=100):
self.resample(z=linspace(0,1,nz))


def resample_linear_mu(self, nmu=100):
self.resample(mu=linspace(0,1,nmu))


def resample(self, mu=None, z=None):
muc = self._mu.copy()
if z is not None:
self._z = z
self._mu = sqrt(1-self._z**2)
self._mu = sqrt(1-self._z**2)
elif mu is not None:
self._mu = mu
self._z = sqrt(1-self._mu**2)
self._z = sqrt(1-self._mu**2)

self._mean = array([interp1d(muc, f, kind='cubic')(self._mu) for f in self._mean])
self._std = array([interp1d(muc, f, kind='cubic')(self._mu) for f in self._std])
Expand Down Expand Up @@ -184,7 +188,7 @@ def _coeffs(self, return_cm=False, do_mc=False, n_mc_samples=20000, mc_thin=25,
if do_mc:
logl = zeros(n_mc_samples)
chain = zeros([n_mc_samples,npar])

chain[0,:] = qc
logl[0] = self._lnlike(chain[0], flt=iflt, ldmodel=ldmodel)

Expand Down Expand Up @@ -213,7 +217,7 @@ def _coeffs(self, return_cm=False, do_mc=False, n_mc_samples=20000, mc_thin=25,

return array(qcs), array(covs)


def _lnlike(self, ldcs, joint=True, flt=None, ldmodel=QuadraticModel):
if flt is None:
for fid, ldc in enumerate(asarray(ldcs).reshape([self._nfilters,-1])):
Expand All @@ -239,7 +243,7 @@ def profile_uncertainties(self):
class LDPSetCreator(object):
def __init__(self, teff, logg, z, filters,
qe=None, limits=None, offline_mode=False,
force_download=False, verbose=False, cache=None):
force_download=False, verbose=False, cache=None):
"""Creates a limb darkening profile set (LDPSet).
Parameters
Expand Down Expand Up @@ -281,7 +285,7 @@ def set_lims(ms_or_samples, pts, plims=[0.135,100-0.135] ):
return a_lims_hilo(pts, *percentile(ms_or_samples, plims))
else:
return a_lims(pts, *ms_or_samples)

if not limits:
teff_lims = set_lims(teff, TEFF_POINTS)
logg_lims = set_lims(logg, LOGG_POINTS)
Expand All @@ -305,7 +309,7 @@ def set_lims(ms_or_samples, pts, plims=[0.135,100-0.135] ):
self.client.download_uncached_files(force=force_download)
if with_mpi:
comm.Barrier()

## Initialize the basic arrays
## ---------------------------
with pf.open(self.files[0]) as hdul:
Expand All @@ -316,7 +320,7 @@ def set_lims(ms_or_samples, pts, plims=[0.135,100-0.135] ):
self.mu = hdul[1].data
self.z = sqrt(1-self.mu**2)
self.nmu = self.mu.size

## Read in the fluxes
## ------------------
self.fluxes = zeros([self.nfilters, self.nfiles, self.nmu])
Expand All @@ -330,8 +334,8 @@ def set_lims(ms_or_samples, pts, plims=[0.135,100-0.135] ):
## -----------------------------
points = array([[f.teff,f.logg,f.z] for f in self.client.files])
self.itps = [NDI(points, self.fluxes[i,:,:]) for i in range(self.nfilters)]


def create_profiles(self, nsamples=100, teff=None, logg=None, metal=None):
"""Creates a set of limb darkening profiles
Expand All @@ -341,7 +345,7 @@ def create_profiles(self, nsamples=100, teff=None, logg=None, metal=None):
teff : array_like [optional]
logg : array_like [optional]
metal : array_like [optional]
Notes
-----
Teff, logg, and z are by default read in from the previously-created
Expand All @@ -355,13 +359,13 @@ def sample(a,b):
teff = sample(teff, self.teff)
logg = sample(logg, self.logg)
metal = sample(metal, self.metal)

minsize = min(nsamples, min(map(len, [teff, logg, metal])))
samples = ones([minsize,3])
samples[:,0] = clip(teff, *self.client.teffl)[:minsize]
samples[:,1] = clip(logg, *self.client.loggl)[:minsize]
samples[:,2] = clip(metal, *self.client.zl)[:minsize]

self.ldp_samples = zeros([self.nfilters, minsize, self.nmu])
for iflt in range(self.nfilters):
self.ldp_samples[iflt,:,:] = self.itps[iflt](samples)
Expand All @@ -371,4 +375,3 @@ def sample(a,b):
@property
def filter_names(self):
return [f.name for f in self.filters]

15 changes: 11 additions & 4 deletions tests/test_ld_models.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,28 +10,35 @@ class TestModels(unittest.TestCase):
def setUp(self):
self.mu = np.array([0, 0.1, 0.5, 1.])


def test_linear(self):
npt.assert_array_almost_equal(LinearModel.evaluate([0.0, 0.5, 1.0], [0]), [1.0, 1.0, 1.0])
npt.assert_array_almost_equal(LinearModel.evaluate([0.0, 0.5, 1.0], [1]), [0.0, 0.5, 1.0])


def test_quadratic(self):
npt.assert_array_almost_equal(QuadraticModel.evaluate(0., [0,0]), 1.)
npt.assert_array_almost_equal(QuadraticModel.evaluate(0., [1,0]), 0.)
npt.assert_array_almost_equal(QuadraticModel.evaluate(1., [0,0]), 1.)
npt.assert_array_almost_equal(QuadraticModel.evaluate(1., [1,0]), 1.)


def test_nonlinear(self):
npt.assert_array_almost_equal(NonlinearModel.evaluate(0., [0,0,0,0]), 1.)
npt.assert_array_almost_equal(NonlinearModel.evaluate(0., [1,0,0,0]), 0.)
npt.assert_array_almost_equal(NonlinearModel.evaluate(1., [0,0,0,0]), 1.)
npt.assert_array_almost_equal(NonlinearModel.evaluate(1., [1,0,0,0]), 1.)


def test_general(self):
npt.assert_array_almost_equal(GeneralModel.evaluate(0., [0]), 1.)
npt.assert_array_almost_equal(GeneralModel.evaluate(0., [1]), 0.)
npt.assert_array_almost_equal(GeneralModel.evaluate(1., [0]), 1.)
npt.assert_array_almost_equal(GeneralModel.evaluate(1., [1]), 1.)


def test_power2(self):
npt.assert_array_almost_equal(Power2Model.evaluate(0., [0,0]), 1.)
npt.assert_array_almost_equal(Power2Model.evaluate(0., [1,0]), 0.)
npt.assert_array_almost_equal(Power2Model.evaluate(1., [0,0]), 1.)
npt.assert_array_almost_equal(Power2Model.evaluate(1., [1,0]), 1.)

0 comments on commit fde63ce

Please sign in to comment.